1
The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40:
1
The following changes since commit 035eed4c0d257c905a556fa0f4865a0c077b4e7f:
2
2
3
Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000)
3
Merge remote-tracking branch 'remotes/vivier/tags/q800-for-5.0-pull-request' into staging (2020-01-07 17:08:21 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200121
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200108
8
8
9
for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6:
9
for you to fetch changes up to 5e7ef51cbe47e726f76bfbc208e167085cf398c4:
10
10
11
scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000)
11
MAINTAINERS: Replace Claudio Fontana for tcg/aarch64 (2020-01-08 11:54:12 +1100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Remove another limit to NB_MMU_MODES.
14
Improve -static and -pie linking
15
Fix compilation using uclibc.
15
Add cpu_{ld,st}*_mmuidx_ra
16
Fix defaulting of -accel parameters.
16
Remove MMU_MODE*_SUFFIX
17
Tidy cputlb basic routines.
17
Move tcg headers under include/
18
Adjust git.orderfile for decodetree.
19
18
20
----------------------------------------------------------------
19
----------------------------------------------------------------
21
Carlos Santos (1):
20
Philippe Mathieu-Daudé (4):
22
util/cacheinfo: fix crash when compiling with uClibc
21
tcg: Search includes from the project root source directory
22
tcg: Search includes in the parent source directory
23
tcg: Move TCG headers to include/tcg/
24
configure: Remove tcg/ from the preprocessor include search list
23
25
24
Philippe Mathieu-Daudé (1):
26
Richard Henderson (37):
25
scripts/git.orderfile: Display decodetree before C source
27
configure: Drop adjustment of textseg
28
tcg: Remove softmmu code_gen_buffer fixed address
29
configure: Do not force pie=no for non-x86
30
configure: Always detect -no-pie toolchain support
31
configure: Unnest detection of -z,relro and -z,now
32
configure: Override the os default with --disable-pie
33
configure: Support -static-pie if requested
34
target/xtensa: Use probe_access for itlb_hit_test
35
cputlb: Use trace_mem_get_info instead of trace_mem_build_info
36
trace: Remove trace_mem_build_info_no_se_[bl]e
37
target/s390x: Include tcg.h in mem_helper.c
38
target/arm: Include tcg.h in sve_helper.c
39
accel/tcg: Include tcg.h in tcg-runtime.c
40
linux-user: Include tcg.h in syscall.c
41
linux-user: Include trace-root.h in syscall-trace.h
42
plugins: Include trace/mem.h in api.c
43
cputlb: Move body of cpu_ldst_template.h out of line
44
translator: Use cpu_ld*_code instead of open-coding
45
cputlb: Rename helper_ret_ld*_cmmu to cpu_ld*_code
46
cputlb: Provide cpu_(ld,st}*_mmuidx_ra for user-only
47
target/i386: Use cpu_*_mmuidx_ra instead of templates
48
cputlb: Expand cpu_ldst_useronly_template.h in user-exec.c
49
target/nios2: Remove MMU_MODE{0,1}_SUFFIX
50
target/alpha: Remove MMU_MODE{0,1}_SUFFIX
51
target/cris: Remove MMU_MODE{0,1}_SUFFIX
52
target/i386: Remove MMU_MODE{0,1,2}_SUFFIX
53
target/microblaze: Remove MMU_MODE{0,1,2}_SUFFIX
54
target/sh4: Remove MMU_MODE{0,1}_SUFFIX
55
target/unicore32: Remove MMU_MODE{0,1}_SUFFIX
56
target/xtensa: Remove MMU_MODE{0,1,2,3}_SUFFIX
57
target/m68k: Use cpu_*_mmuidx_ra instead of MMU_MODE{0,1}_SUFFIX
58
target/mips: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
59
target/s390x: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
60
target/ppc: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
61
cputlb: Remove support for MMU_MODE*_SUFFIX
62
cputlb: Expand cpu_ldst_template.h in cputlb.c
63
MAINTAINERS: Replace Claudio Fontana for tcg/aarch64
26
64
27
Richard Henderson (14):
65
Makefile | 2 +-
28
cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
66
accel/tcg/atomic_template.h | 67 ++---
29
vl: Remove unused variable in configure_accelerators
67
include/exec/cpu_ldst.h | 446 +++++++++---------------------
30
vl: Reduce scope of variables in configure_accelerators
68
include/exec/cpu_ldst_template.h | 211 --------------
31
vl: Remove useless test in configure_accelerators
69
include/exec/cpu_ldst_useronly_template.h | 159 -----------
32
vl: Only choose enabled accelerators in configure_accelerators
70
include/exec/translator.h | 48 +---
33
cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked
71
{tcg => include/tcg}/tcg-gvec-desc.h | 0
34
cputlb: Make tlb_n_entries private to cputlb.c
72
{tcg => include/tcg}/tcg-mo.h | 0
35
cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb
73
{tcg => include/tcg}/tcg-op-gvec.h | 0
36
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
74
{tcg => include/tcg}/tcg-op.h | 2 +-
37
cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked
75
{tcg => include/tcg}/tcg-opc.h | 0
38
cputlb: Split out tlb_mmu_flush_locked
76
{tcg => include/tcg}/tcg.h | 33 +--
39
cputlb: Partially merge tlb_dyn_init into tlb_init
77
include/user/syscall-trace.h | 2 +
40
cputlb: Initialize tlbs as flushed
78
target/alpha/cpu.h | 2 -
41
cputlb: Hoist timestamp outside of loops over tlbs
79
target/cris/cpu.h | 2 -
80
target/i386/cpu.h | 3 -
81
target/m68k/cpu.h | 2 -
82
target/microblaze/cpu.h | 3 -
83
target/mips/cpu.h | 4 -
84
target/nios2/cpu.h | 2 -
85
target/ppc/cpu.h | 2 -
86
target/s390x/cpu.h | 5 -
87
target/sh4/cpu.h | 2 -
88
target/unicore32/cpu.h | 2 -
89
target/xtensa/cpu.h | 4 -
90
tcg/i386/tcg-target.h | 2 +-
91
trace/mem-internal.h | 17 --
92
accel/tcg/cpu-exec.c | 2 +-
93
accel/tcg/cputlb.c | 315 ++++++++++++++++-----
94
accel/tcg/tcg-runtime-gvec.c | 2 +-
95
accel/tcg/tcg-runtime.c | 1 +
96
accel/tcg/translate-all.c | 39 +--
97
accel/tcg/user-exec.c | 238 +++++++++++++++-
98
bsd-user/main.c | 2 +-
99
cpus.c | 2 +-
100
exec.c | 2 +-
101
linux-user/main.c | 2 +-
102
linux-user/syscall.c | 1 +
103
plugins/api.c | 1 +
104
target/alpha/translate.c | 2 +-
105
target/arm/helper-a64.c | 2 +-
106
target/arm/sve_helper.c | 1 +
107
target/arm/translate-a64.c | 4 +-
108
target/arm/translate-sve.c | 6 +-
109
target/arm/translate.c | 4 +-
110
target/cris/translate.c | 2 +-
111
target/hppa/translate.c | 2 +-
112
target/i386/mem_helper.c | 2 +-
113
target/i386/seg_helper.c | 56 ++--
114
target/i386/translate.c | 2 +-
115
target/lm32/translate.c | 2 +-
116
target/m68k/op_helper.c | 77 ++++--
117
target/m68k/translate.c | 2 +-
118
target/microblaze/translate.c | 2 +-
119
target/mips/op_helper.c | 182 ++++--------
120
target/mips/translate.c | 2 +-
121
target/moxie/translate.c | 2 +-
122
target/nios2/translate.c | 2 +-
123
target/openrisc/translate.c | 2 +-
124
target/ppc/mem_helper.c | 13 +-
125
target/ppc/translate.c | 4 +-
126
target/riscv/cpu_helper.c | 2 +-
127
target/riscv/translate.c | 2 +-
128
target/s390x/mem_helper.c | 11 +-
129
target/s390x/translate.c | 4 +-
130
target/sh4/translate.c | 2 +-
131
target/sparc/ldst_helper.c | 2 +-
132
target/sparc/translate.c | 2 +-
133
target/tilegx/translate.c | 2 +-
134
target/tricore/translate.c | 2 +-
135
target/unicore32/translate.c | 2 +-
136
target/xtensa/mmu_helper.c | 5 +-
137
target/xtensa/translate.c | 2 +-
138
tcg/aarch64/tcg-target.inc.c | 4 +-
139
tcg/arm/tcg-target.inc.c | 4 +-
140
tcg/i386/tcg-target.inc.c | 4 +-
141
tcg/mips/tcg-target.inc.c | 2 +-
142
tcg/optimize.c | 2 +-
143
tcg/ppc/tcg-target.inc.c | 4 +-
144
tcg/riscv/tcg-target.inc.c | 4 +-
145
tcg/s390/tcg-target.inc.c | 4 +-
146
tcg/sparc/tcg-target.inc.c | 2 +-
147
tcg/tcg-common.c | 2 +-
148
tcg/tcg-op-gvec.c | 8 +-
149
tcg/tcg-op-vec.c | 6 +-
150
tcg/tcg-op.c | 6 +-
151
tcg/tcg.c | 2 +-
152
tcg/tci.c | 2 +-
153
MAINTAINERS | 4 +-
154
configure | 117 +++-----
155
docs/devel/loads-stores.rst | 215 ++++++++++----
156
91 files changed, 1075 insertions(+), 1357 deletions(-)
157
delete mode 100644 include/exec/cpu_ldst_template.h
158
delete mode 100644 include/exec/cpu_ldst_useronly_template.h
159
rename {tcg => include/tcg}/tcg-gvec-desc.h (100%)
160
rename {tcg => include/tcg}/tcg-mo.h (100%)
161
rename {tcg => include/tcg}/tcg-op-gvec.h (100%)
162
rename {tcg => include/tcg}/tcg-op.h (99%)
163
rename {tcg => include/tcg}/tcg-opc.h (100%)
164
rename {tcg => include/tcg}/tcg.h (96%)
42
165
43
include/exec/cpu_ldst.h | 5 -
44
accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++---------------
45
util/cacheinfo.c | 10 +-
46
vl.c | 27 +++--
47
scripts/git.orderfile | 3 +
48
5 files changed, 223 insertions(+), 109 deletions(-)
49
diff view generated by jsdifflib
New patch
1
This adjustment was random and unnecessary. The user mode
2
startup code in probe_guest_base() will choose a value for
3
guest_base that allows the host qemu binary to not conflict
4
with the guest binary.
1
5
6
With modern distributions, this isn't even used, as the default
7
is PIE, which does the same job in a more portable way.
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
v2: Remove mention of config-host.ld from make distclean
14
---
15
Makefile | 2 +-
16
configure | 47 -----------------------------------------------
17
2 files changed, 1 insertion(+), 48 deletions(-)
18
19
diff --git a/Makefile b/Makefile
20
index XXXXXXX..XXXXXXX 100644
21
--- a/Makefile
22
+++ b/Makefile
23
@@ -XXX,XX +XXX,XX @@ rm -f $(MANUAL_BUILDDIR)/$1/objects.inv $(MANUAL_BUILDDIR)/$1/searchindex.js $(M
24
endef
25
26
distclean: clean
27
-    rm -f config-host.mak config-host.h* config-host.ld $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi qemu-monitor-info.texi
28
+    rm -f config-host.mak config-host.h* $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi qemu-monitor-info.texi
29
    rm -f tests/tcg/config-*.mak
30
    rm -f config-all-devices.mak config-all-disas.mak config.status
31
    rm -f $(SUBDIR_DEVICES_MAK)
32
diff --git a/configure b/configure
33
index XXXXXXX..XXXXXXX 100755
34
--- a/configure
35
+++ b/configure
36
@@ -XXX,XX +XXX,XX @@ if test "$cpu" = "s390x" ; then
37
fi
38
fi
39
40
-# Probe for the need for relocating the user-only binary.
41
-if ( [ "$linux_user" = yes ] || [ "$bsd_user" = yes ] ) && [ "$pie" = no ]; then
42
- textseg_addr=
43
- case "$cpu" in
44
- arm | i386 | ppc* | s390* | sparc* | x86_64 | x32)
45
- # ??? Rationale for choosing this address
46
- textseg_addr=0x60000000
47
- ;;
48
- mips)
49
- # A 256M aligned address, high in the address space, with enough
50
- # room for the code_gen_buffer above it before the stack.
51
- textseg_addr=0x60000000
52
- ;;
53
- esac
54
- if [ -n "$textseg_addr" ]; then
55
- cat > $TMPC <<EOF
56
- int main(void) { return 0; }
57
-EOF
58
- textseg_ldflags="-Wl,-Ttext-segment=$textseg_addr"
59
- if ! compile_prog "" "$textseg_ldflags"; then
60
- # In case ld does not support -Ttext-segment, edit the default linker
61
- # script via sed to set the .text start addr. This is needed on FreeBSD
62
- # at least.
63
- if ! $ld --verbose >/dev/null 2>&1; then
64
- error_exit \
65
- "We need to link the QEMU user mode binaries at a" \
66
- "specific text address. Unfortunately your linker" \
67
- "doesn't support either the -Ttext-segment option or" \
68
- "printing the default linker script with --verbose." \
69
- "If you don't want the user mode binaries, pass the" \
70
- "--disable-user option to configure."
71
- fi
72
-
73
- $ld --verbose | sed \
74
- -e '1,/==================================================/d' \
75
- -e '/==================================================/,$d' \
76
- -e "s/[.] = [0-9a-fx]* [+] SIZEOF_HEADERS/. = $textseg_addr + SIZEOF_HEADERS/" \
77
- -e "s/__executable_start = [0-9a-fx]*/__executable_start = $textseg_addr/" > config-host.ld
78
- textseg_ldflags="-Wl,-T../config-host.ld"
79
- fi
80
- fi
81
-fi
82
-
83
# Check that the C++ compiler exists and works with the C compiler.
84
# All the QEMU_CXXFLAGS are based on QEMU_CFLAGS. Keep this at the end to don't miss any other that could be added.
85
if has $cxx; then
86
@@ -XXX,XX +XXX,XX @@ if test "$gprof" = "yes" ; then
87
fi
88
fi
89
90
-if test "$target_linux_user" = "yes" || test "$target_bsd_user" = "yes" ; then
91
- ldflags="$ldflags $textseg_ldflags"
92
-fi
93
-
94
# Newer kernels on s390 check for an S390_PGSTE program header and
95
# enable the pgste page table extensions in that case. This makes
96
# the vm.allocate_pgste sysctl unnecessary. We enable this program
97
--
98
2.20.1
99
100
diff view generated by jsdifflib
New patch
1
The commentary talks about "in concert with the addresses
2
assigned in the relevant linker script", except there is no
3
linker script for softmmu, nor has there been for some time.
1
4
5
(Do not confuse the user-only linker script editing that was
6
removed in the previous patch, because user-only does not
7
use this code_gen_buffer allocation method.)
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Thomas Huth <thuth@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
accel/tcg/translate-all.c | 37 +++++--------------------------------
14
1 file changed, 5 insertions(+), 32 deletions(-)
15
16
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/translate-all.c
19
+++ b/accel/tcg/translate-all.c
20
@@ -XXX,XX +XXX,XX @@ static inline void *alloc_code_gen_buffer(void)
21
{
22
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
23
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
24
- uintptr_t start = 0;
25
size_t size = tcg_ctx->code_gen_buffer_size;
26
void *buf;
27
28
- /* Constrain the position of the buffer based on the host cpu.
29
- Note that these addresses are chosen in concert with the
30
- addresses assigned in the relevant linker script file. */
31
-# if defined(__PIE__) || defined(__PIC__)
32
- /* Don't bother setting a preferred location if we're building
33
- a position-independent executable. We're more likely to get
34
- an address near the main executable if we let the kernel
35
- choose the address. */
36
-# elif defined(__x86_64__) && defined(MAP_32BIT)
37
- /* Force the memory down into low memory with the executable.
38
- Leave the choice of exact location with the kernel. */
39
- flags |= MAP_32BIT;
40
- /* Cannot expect to map more than 800MB in low memory. */
41
- if (size > 800u * 1024 * 1024) {
42
- tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
43
- }
44
-# elif defined(__sparc__)
45
- start = 0x40000000ul;
46
-# elif defined(__s390x__)
47
- start = 0x90000000ul;
48
-# elif defined(__mips__)
49
-# if _MIPS_SIM == _ABI64
50
- start = 0x128000000ul;
51
-# else
52
- start = 0x08000000ul;
53
-# endif
54
-# endif
55
-
56
- buf = mmap((void *)start, size, prot, flags, -1, 0);
57
+ buf = mmap(NULL, size, prot, flags, -1, 0);
58
if (buf == MAP_FAILED) {
59
return NULL;
60
}
61
62
#ifdef __mips__
63
if (cross_256mb(buf, size)) {
64
- /* Try again, with the original still mapped, to avoid re-acquiring
65
- that 256mb crossing. This time don't specify an address. */
66
+ /*
67
+ * Try again, with the original still mapped, to avoid re-acquiring
68
+ * the same 256mb crossing.
69
+ */
70
size_t size2;
71
void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
72
switch ((int)(buf2 != MAP_FAILED)) {
73
--
74
2.20.1
75
76
diff view generated by jsdifflib
New patch
1
PIE is supported on many other hosts besides x86.
1
2
3
The default for non-x86 is now the same as x86: pie is used
4
if supported, and may be forced via --enable/--disable-pie.
5
6
The original commit (40d6444e91c) said:
7
8
"Non-x86 are not changed, as they require TCG changes"
9
10
but I think that's wrong -- there's nothing about PIE that
11
affects TCG one way or another.
12
13
Tested on aarch64 (bionic) and ppc64le (centos 7) hosts.
14
15
Tested-by: Alex Bennée <alex.bennee@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
17
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
---
20
configure | 10 ----------
21
1 file changed, 10 deletions(-)
22
23
diff --git a/configure b/configure
24
index XXXXXXX..XXXXXXX 100755
25
--- a/configure
26
+++ b/configure
27
@@ -XXX,XX +XXX,XX @@ if ! compile_prog "-Werror" "" ; then
28
    "Thread-Local Storage (TLS). Please upgrade to a version that does."
29
fi
30
31
-if test "$pie" = ""; then
32
- case "$cpu-$targetos" in
33
- i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
34
- ;;
35
- *)
36
- pie="no"
37
- ;;
38
- esac
39
-fi
40
-
41
if test "$pie" != "no" ; then
42
cat > $TMPC << EOF
43
44
--
45
2.20.1
46
47
diff view generated by jsdifflib
New patch
1
The CFLAGS_NOPIE and LDFLAGS_NOPIE variables are used
2
in pc-bios/optionrom/Makefile, which has nothing to do
3
with the PIE setting of the main qemu executables.
1
4
5
This overrides any operating system default to build
6
all executables as PIE, which is important for ROMs.
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Thomas Huth <thuth@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
configure | 18 ++++++++----------
13
1 file changed, 8 insertions(+), 10 deletions(-)
14
15
diff --git a/configure b/configure
16
index XXXXXXX..XXXXXXX 100755
17
--- a/configure
18
+++ b/configure
19
@@ -XXX,XX +XXX,XX @@ if ! compile_prog "-Werror" "" ; then
20
    "Thread-Local Storage (TLS). Please upgrade to a version that does."
21
fi
22
23
-if test "$pie" != "no" ; then
24
- cat > $TMPC << EOF
25
+cat > $TMPC << EOF
26
27
#ifdef __linux__
28
# define THREAD __thread
29
#else
30
# define THREAD
31
#endif
32
-
33
static THREAD int tls_var;
34
-
35
int main(void) { return tls_var; }
36
-
37
EOF
38
- # check we support --no-pie first...
39
- if compile_prog "-Werror -fno-pie" "-no-pie"; then
40
- CFLAGS_NOPIE="-fno-pie"
41
- LDFLAGS_NOPIE="-nopie"
42
- fi
43
44
+# Check we support --no-pie first; we will need this for building ROMs.
45
+if compile_prog "-Werror -fno-pie" "-no-pie"; then
46
+ CFLAGS_NOPIE="-fno-pie"
47
+ LDFLAGS_NOPIE="-no-pie"
48
+fi
49
+
50
+if test "$pie" != "no" ; then
51
if compile_prog "-fPIE -DPIE" "-pie"; then
52
QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
53
LDFLAGS="-pie $LDFLAGS"
54
--
55
2.20.1
56
57
diff view generated by jsdifflib
1
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
1
There is nothing about these options that is related to PIE.
2
but hoist outside of any loop over a set of tlbs. This is
2
Use them unconditionally.
3
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
4
and tlb_flush_page_locked, so not onerous.
5
3
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Fangrui Song <i@maskray.me>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
8
---
11
accel/tcg/cputlb.c | 14 ++++++++------
9
v2: Do not split into two tests.
12
1 file changed, 8 insertions(+), 6 deletions(-)
10
---
11
configure | 9 ++++++---
12
1 file changed, 6 insertions(+), 3 deletions(-)
13
13
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
diff --git a/configure b/configure
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100755
16
--- a/accel/tcg/cputlb.c
16
--- a/configure
17
+++ b/accel/tcg/cputlb.c
17
+++ b/configure
18
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
18
@@ -XXX,XX +XXX,XX @@ if test "$pie" != "no" ; then
19
* high), since otherwise we are likely to have a significant amount of
19
QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
20
* conflict misses.
20
LDFLAGS="-pie $LDFLAGS"
21
*/
21
pie="yes"
22
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
22
- if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then
23
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
23
- LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS"
24
+ int64_t now)
24
- fi
25
{
25
else
26
size_t old_size = tlb_n_entries(fast);
26
if test "$pie" = "yes"; then
27
size_t rate;
27
error_exit "PIE not available due to missing toolchain support"
28
size_t new_size = old_size;
28
@@ -XXX,XX +XXX,XX @@ if test "$pie" != "no" ; then
29
- int64_t now = get_clock_realtime();
29
fi
30
int64_t window_len_ms = 100;
30
fi
31
int64_t window_len_ns = window_len_ms * 1000 * 1000;
31
32
bool window_expired = now > desc->window_begin_ns + window_len_ns;
32
+# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
33
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
33
+# The combination is known as "full relro", because .got.plt is read-only too.
34
memset(desc->vtable, -1, sizeof(desc->vtable));
34
+if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then
35
}
35
+ LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS"
36
36
+fi
37
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
37
+
38
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
38
##########################################
39
+ int64_t now)
39
# __sync_fetch_and_and requires at least -march=i486. Many toolchains
40
{
40
# use i686 as default anyway, but for those that don't, an explicit
41
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
42
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
43
44
- tlb_mmu_resize_locked(desc, fast);
45
+ tlb_mmu_resize_locked(desc, fast, now);
46
tlb_mmu_flush_locked(desc, fast);
47
}
48
49
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
CPUArchState *env = cpu->env_ptr;
51
uint16_t asked = data.host_int;
52
uint16_t all_dirty, work, to_clean;
53
+ int64_t now = get_clock_realtime();
54
55
assert_cpu_is_self(cpu);
56
57
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
58
59
for (work = to_clean; work != 0; work &= work - 1) {
60
int mmu_idx = ctz32(work);
61
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
62
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
63
}
64
65
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
67
tlb_debug("forcing full flush midx %d ("
68
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
69
midx, lp_addr, lp_mask);
70
- tlb_flush_one_mmuidx_locked(env, midx);
71
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
72
} else {
73
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
74
tlb_n_used_entries_dec(env, midx);
75
--
41
--
76
2.20.1
42
2.20.1
77
43
78
44
diff view generated by jsdifflib
New patch
1
Some distributions, e.g. Ubuntu 19.10, enable PIE by default.
2
If for some reason one wishes to build a non-pie binary, we
3
must provide additional options to override.
1
4
5
At the same time, reorg the code to an elif chain.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Thomas Huth <thuth@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
configure | 25 ++++++++++++-------------
13
1 file changed, 12 insertions(+), 13 deletions(-)
14
15
diff --git a/configure b/configure
16
index XXXXXXX..XXXXXXX 100755
17
--- a/configure
18
+++ b/configure
19
@@ -XXX,XX +XXX,XX @@ if compile_prog "-Werror -fno-pie" "-no-pie"; then
20
LDFLAGS_NOPIE="-no-pie"
21
fi
22
23
-if test "$pie" != "no" ; then
24
- if compile_prog "-fPIE -DPIE" "-pie"; then
25
- QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
26
- LDFLAGS="-pie $LDFLAGS"
27
- pie="yes"
28
- else
29
- if test "$pie" = "yes"; then
30
- error_exit "PIE not available due to missing toolchain support"
31
- else
32
- echo "Disabling PIE due to missing toolchain support"
33
- pie="no"
34
- fi
35
- fi
36
+if test "$pie" = "no"; then
37
+ QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS"
38
+ LDFLAGS="$LDFLAGS_NOPIE $LDFLAGS"
39
+elif compile_prog "-fPIE -DPIE" "-pie"; then
40
+ QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
41
+ LDFLAGS="-pie $LDFLAGS"
42
+ pie="yes"
43
+elif test "$pie" = "yes"; then
44
+ error_exit "PIE not available due to missing toolchain support"
45
+else
46
+ echo "Disabling PIE due to missing toolchain support"
47
+ pie="no"
48
fi
49
50
# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
51
--
52
2.20.1
53
54
diff view generated by jsdifflib
New patch
1
Recent toolchains support static and pie at the same time.
1
2
3
As with normal dynamic builds, allow --static to default to PIE
4
if supported by the toolchain. Allow --enable/--disable-pie to
5
override the default.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
v2: Fix --disable-pie --static
11
---
12
configure | 19 ++++++++++++-------
13
1 file changed, 12 insertions(+), 7 deletions(-)
14
15
diff --git a/configure b/configure
16
index XXXXXXX..XXXXXXX 100755
17
--- a/configure
18
+++ b/configure
19
@@ -XXX,XX +XXX,XX @@ for opt do
20
;;
21
--static)
22
static="yes"
23
- LDFLAGS="-static $LDFLAGS"
24
QEMU_PKG_CONFIG_FLAGS="--static $QEMU_PKG_CONFIG_FLAGS"
25
;;
26
--mandir=*) mandir="$optarg"
27
@@ -XXX,XX +XXX,XX @@ if test "$static" = "yes" ; then
28
if test "$modules" = "yes" ; then
29
error_exit "static and modules are mutually incompatible"
30
fi
31
- if test "$pie" = "yes" ; then
32
- error_exit "static and pie are mutually incompatible"
33
- else
34
- pie="no"
35
- fi
36
fi
37
38
# Unconditional check for compiler __thread support
39
@@ -XXX,XX +XXX,XX @@ if compile_prog "-Werror -fno-pie" "-no-pie"; then
40
LDFLAGS_NOPIE="-no-pie"
41
fi
42
43
-if test "$pie" = "no"; then
44
+if test "$static" = "yes"; then
45
+ if test "$pie" != "no" && compile_prog "-fPIE -DPIE" "-static-pie"; then
46
+ QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS"
47
+ LDFLAGS="-static-pie $LDFLAGS"
48
+ pie="yes"
49
+ elif test "$pie" = "yes"; then
50
+ error_exit "-static-pie not available due to missing toolchain support"
51
+ else
52
+ LDFLAGS="-static $LDFLAGS"
53
+ pie="no"
54
+ fi
55
+elif test "$pie" = "no"; then
56
QEMU_CFLAGS="$CFLAGS_NOPIE $QEMU_CFLAGS"
57
LDFLAGS="$LDFLAGS_NOPIE $LDFLAGS"
58
elif compile_prog "-fPIE -DPIE" "-pie"; then
59
--
60
2.20.1
61
62
diff view generated by jsdifflib
1
There's little point in leaving these data structures half initialized,
1
We don't actually need the result of the read, only to probe that the
2
and relying on a flush to be done during reset.
2
memory mapping exists. This is exactly what probe_access does.
3
3
4
This is also the only user of any cpu_ld*_code_ra function.
5
Removing this allows the interface to be removed shortly.
6
7
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
12
---
8
accel/tcg/cputlb.c | 5 +++--
13
target/xtensa/mmu_helper.c | 5 +++--
9
1 file changed, 3 insertions(+), 2 deletions(-)
14
1 file changed, 3 insertions(+), 2 deletions(-)
10
15
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
16
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
18
--- a/target/xtensa/mmu_helper.c
14
+++ b/accel/tcg/cputlb.c
19
+++ b/target/xtensa/mmu_helper.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
20
@@ -XXX,XX +XXX,XX @@
16
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
21
void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
17
fast->table = g_new(CPUTLBEntry, n_entries);
22
{
18
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
23
/*
19
+ tlb_mmu_flush_locked(desc, fast);
24
- * Attempt the memory load; we don't care about the result but
25
+ * Probe the memory; we don't care about the result but
26
* only the side-effects (ie any MMU or other exception)
27
*/
28
- cpu_ldub_code_ra(env, vaddr, GETPC());
29
+ probe_access(env, vaddr, 1, MMU_INST_FETCH,
30
+ cpu_mmu_index(env, true), GETPC());
20
}
31
}
21
32
22
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
33
void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
23
@@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu)
24
25
qemu_spin_init(&env_tlb(env)->c.lock);
26
27
- /* Ensure that cpu_reset performs a full flush. */
28
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
29
+ /* All tlbs are initialized flushed. */
30
+ env_tlb(env)->c.dirty = 0;
31
32
for (i = 0; i < NB_MMU_MODES; i++) {
33
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
34
--
34
--
35
2.20.1
35
2.20.1
36
36
37
37
diff view generated by jsdifflib
New patch
1
In the cpu_ldst templates, we already require a MemOp, and it
2
is cleaner and clearer to pass that instead of 3 separate
3
arguments describing the memory operation.
1
4
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
include/exec/cpu_ldst_template.h | 22 +++++++++++-----------
11
include/exec/cpu_ldst_useronly_template.h | 12 ++++++------
12
2 files changed, 17 insertions(+), 17 deletions(-)
13
14
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst_template.h
17
+++ b/include/exec/cpu_ldst_template.h
18
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
19
RES_TYPE res;
20
target_ulong addr;
21
int mmu_idx = CPU_MMU_INDEX;
22
- TCGMemOpIdx oi;
23
+ MemOp op = MO_TE | SHIFT;
24
#if !defined(SOFTMMU_CODE_ACCESS)
25
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx);
26
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
27
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
28
#endif
29
30
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
31
entry = tlb_entry(env, mmu_idx, addr);
32
if (unlikely(entry->ADDR_READ !=
33
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
34
- oi = make_memop_idx(SHIFT, mmu_idx);
35
+ TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
36
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
37
- oi, retaddr);
38
+ oi, retaddr);
39
} else {
40
uintptr_t hostaddr = addr + entry->addend;
41
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
42
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
43
int res;
44
target_ulong addr;
45
int mmu_idx = CPU_MMU_INDEX;
46
- TCGMemOpIdx oi;
47
-#if !defined(SOFTMMU_CODE_ACCESS)
48
- uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx);
49
+ MemOp op = MO_TE | MO_SIGN | SHIFT;
50
+#ifndef SOFTMMU_CODE_ACCESS
51
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
52
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
53
#endif
54
55
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
56
entry = tlb_entry(env, mmu_idx, addr);
57
if (unlikely(entry->ADDR_READ !=
58
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
59
- oi = make_memop_idx(SHIFT, mmu_idx);
60
+ TCGMemOpIdx oi = make_memop_idx(op & ~MO_SIGN, mmu_idx);
61
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
62
MMUSUFFIX)(env, addr, oi, retaddr);
63
} else {
64
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
65
CPUTLBEntry *entry;
66
target_ulong addr;
67
int mmu_idx = CPU_MMU_INDEX;
68
- TCGMemOpIdx oi;
69
+ MemOp op = MO_TE | SHIFT;
70
#if !defined(SOFTMMU_CODE_ACCESS)
71
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx);
72
+ uint16_t meminfo = trace_mem_get_info(op, mmu_idx, true);
73
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
74
#endif
75
76
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
77
entry = tlb_entry(env, mmu_idx, addr);
78
if (unlikely(tlb_addr_write(entry) !=
79
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
80
- oi = make_memop_idx(SHIFT, mmu_idx);
81
+ TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
82
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
83
retaddr);
84
} else {
85
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/include/exec/cpu_ldst_useronly_template.h
88
+++ b/include/exec/cpu_ldst_useronly_template.h
89
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
90
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
91
clear_helper_retaddr();
92
#else
93
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
94
- MMU_USER_IDX);
95
+ MemOp op = MO_TE | SHIFT;
96
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
97
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
98
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
99
#endif
100
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
101
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
102
clear_helper_retaddr();
103
#else
104
- uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
105
- MMU_USER_IDX);
106
+ MemOp op = MO_TE | MO_SIGN | SHIFT;
107
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
108
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
109
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
110
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
111
@@ -XXX,XX +XXX,XX @@ static inline void
112
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
113
RES_TYPE v)
114
{
115
- uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
116
- MMU_USER_IDX);
117
+ MemOp op = MO_TE | SHIFT;
118
+ uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, true);
119
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
120
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
121
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
122
--
123
2.20.1
124
125
diff view generated by jsdifflib
1
There is only one caller for tlb_table_flush_by_mmuidx. Place
1
It is easy for the atomic helpers to use trace_mem_build_info
2
the result at the earlier line number, due to an expected user
2
directly, without resorting to symbol pasting. For this usage,
3
in the near future.
3
we cannot use trace_mem_get_info, because the MemOp does not
4
4
support 16-byte accesses.
5
6
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
---
9
accel/tcg/cputlb.c | 19 +++++++------------
10
accel/tcg/atomic_template.h | 67 +++++++++++++------------------------
10
1 file changed, 7 insertions(+), 12 deletions(-)
11
trace/mem-internal.h | 17 ----------
11
12
2 files changed, 24 insertions(+), 60 deletions(-)
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
14
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
16
--- a/accel/tcg/atomic_template.h
15
+++ b/accel/tcg/cputlb.c
17
+++ b/accel/tcg/atomic_template.h
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
18
@@ -XXX,XX +XXX,XX @@
17
}
19
the ATOMIC_NAME macro, and redefined below. */
20
#if DATA_SIZE == 1
21
# define END
22
-# define MEND _be /* either le or be would be fine */
23
#elif defined(HOST_WORDS_BIGENDIAN)
24
# define END _be
25
-# define MEND _be
26
#else
27
# define END _le
28
-# define MEND _le
29
#endif
30
31
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
32
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
33
ATOMIC_MMU_DECLS;
34
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
35
DATA_TYPE ret;
36
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
37
- ATOMIC_MMU_IDX);
38
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
39
+ ATOMIC_MMU_IDX);
40
41
atomic_trace_rmw_pre(env, addr, info);
42
#if DATA_SIZE == 16
43
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
44
{
45
ATOMIC_MMU_DECLS;
46
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
47
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
48
- ATOMIC_MMU_IDX);
49
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
50
+ ATOMIC_MMU_IDX);
51
52
atomic_trace_ld_pre(env, addr, info);
53
val = atomic16_read(haddr);
54
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
55
{
56
ATOMIC_MMU_DECLS;
57
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
58
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true,
59
- ATOMIC_MMU_IDX);
60
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
61
+ ATOMIC_MMU_IDX);
62
63
atomic_trace_st_pre(env, addr, info);
64
atomic16_set(haddr, val);
65
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
66
ATOMIC_MMU_DECLS;
67
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
68
DATA_TYPE ret;
69
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
70
- ATOMIC_MMU_IDX);
71
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
72
+ ATOMIC_MMU_IDX);
73
74
atomic_trace_rmw_pre(env, addr, info);
75
ret = atomic_xchg__nocheck(haddr, val);
76
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
77
ATOMIC_MMU_DECLS; \
78
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
79
DATA_TYPE ret; \
80
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
81
- false, \
82
- ATOMIC_MMU_IDX); \
83
- \
84
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
85
+ ATOMIC_MMU_IDX); \
86
atomic_trace_rmw_pre(env, addr, info); \
87
ret = atomic_##X(haddr, val); \
88
ATOMIC_MMU_CLEANUP; \
89
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
90
ATOMIC_MMU_DECLS; \
91
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
92
XDATA_TYPE cmp, old, new, val = xval; \
93
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
94
- false, \
95
- ATOMIC_MMU_IDX); \
96
- \
97
+ uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
98
+ ATOMIC_MMU_IDX); \
99
atomic_trace_rmw_pre(env, addr, info); \
100
smp_mb(); \
101
cmp = atomic_read__nocheck(haddr); \
102
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
103
#endif /* DATA SIZE >= 16 */
104
105
#undef END
106
-#undef MEND
107
108
#if DATA_SIZE > 1
109
110
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
111
within the ATOMIC_NAME macro. */
112
#ifdef HOST_WORDS_BIGENDIAN
113
# define END _le
114
-# define MEND _le
115
#else
116
# define END _be
117
-# define MEND _be
118
#endif
119
120
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
121
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
122
ATOMIC_MMU_DECLS;
123
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
124
DATA_TYPE ret;
125
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
126
- false,
127
- ATOMIC_MMU_IDX);
128
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
129
+ ATOMIC_MMU_IDX);
130
131
atomic_trace_rmw_pre(env, addr, info);
132
#if DATA_SIZE == 16
133
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
134
{
135
ATOMIC_MMU_DECLS;
136
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
137
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
138
- false,
139
- ATOMIC_MMU_IDX);
140
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
141
+ ATOMIC_MMU_IDX);
142
143
atomic_trace_ld_pre(env, addr, info);
144
val = atomic16_read(haddr);
145
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
146
{
147
ATOMIC_MMU_DECLS;
148
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
149
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
150
- true,
151
- ATOMIC_MMU_IDX);
152
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
153
+ ATOMIC_MMU_IDX);
154
155
val = BSWAP(val);
156
atomic_trace_st_pre(env, addr, info);
157
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
158
ATOMIC_MMU_DECLS;
159
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
160
ABI_TYPE ret;
161
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
162
- false,
163
- ATOMIC_MMU_IDX);
164
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
165
+ ATOMIC_MMU_IDX);
166
167
atomic_trace_rmw_pre(env, addr, info);
168
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
169
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
170
ATOMIC_MMU_DECLS; \
171
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
172
DATA_TYPE ret; \
173
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
174
- false, \
175
- ATOMIC_MMU_IDX); \
176
- \
177
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
178
+ false, ATOMIC_MMU_IDX); \
179
atomic_trace_rmw_pre(env, addr, info); \
180
ret = atomic_##X(haddr, BSWAP(val)); \
181
ATOMIC_MMU_CLEANUP; \
182
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
183
ATOMIC_MMU_DECLS; \
184
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
185
XDATA_TYPE ldo, ldn, old, new, val = xval; \
186
- uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
187
- false, \
188
- ATOMIC_MMU_IDX); \
189
- \
190
+ uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
191
+ false, ATOMIC_MMU_IDX); \
192
atomic_trace_rmw_pre(env, addr, info); \
193
smp_mb(); \
194
ldn = atomic_read__nocheck(haddr); \
195
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
196
#endif /* DATA_SIZE >= 16 */
197
198
#undef END
199
-#undef MEND
200
#endif /* DATA_SIZE > 1 */
201
202
#undef BSWAP
203
diff --git a/trace/mem-internal.h b/trace/mem-internal.h
204
index XXXXXXX..XXXXXXX 100644
205
--- a/trace/mem-internal.h
206
+++ b/trace/mem-internal.h
207
@@ -XXX,XX +XXX,XX @@ static inline uint16_t trace_mem_get_info(MemOp op,
208
mmu_idx);
18
}
209
}
19
210
20
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
211
-/* Used by the atomic helpers */
21
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
212
-static inline
22
{
213
-uint16_t trace_mem_build_info_no_se_be(int size_shift, bool store,
23
tlb_mmu_resize_locked(env, mmu_idx);
214
- TCGMemOpIdx oi)
24
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
25
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
26
+ env_tlb(env)->d[mmu_idx].large_page_addr = -1;
27
+ env_tlb(env)->d[mmu_idx].large_page_mask = -1;
28
+ env_tlb(env)->d[mmu_idx].vindex = 0;
29
+ memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
30
+ memset(env_tlb(env)->d[mmu_idx].vtable, -1,
31
+ sizeof(env_tlb(env)->d[0].vtable));
32
}
33
34
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
35
@@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
36
*pelide = elide;
37
}
38
39
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
-{
215
-{
41
- tlb_table_flush_by_mmuidx(env, mmu_idx);
216
- return trace_mem_build_info(size_shift, false, MO_BE, store,
42
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
217
- get_mmuidx(oi));
43
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
44
- env_tlb(env)->d[mmu_idx].vindex = 0;
45
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
46
- sizeof(env_tlb(env)->d[0].vtable));
47
-}
218
-}
48
-
219
-
49
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
220
-static inline
50
{
221
-uint16_t trace_mem_build_info_no_se_le(int size_shift, bool store,
51
CPUArchState *env = cpu->env_ptr;
222
- TCGMemOpIdx oi)
223
-{
224
- return trace_mem_build_info(size_shift, false, MO_LE, store,
225
- get_mmuidx(oi));
226
-}
227
-
228
#endif /* TRACE__MEM_INTERNAL_H */
52
--
229
--
53
2.20.1
230
2.20.1
54
231
55
232
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on tcg.h being included indirectly.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/s390x/mem_helper.c | 1 +
10
1 file changed, 1 insertion(+)
11
12
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/s390x/mem_helper.c
15
+++ b/target/s390x/mem_helper.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "exec/cpu_ldst.h"
18
#include "qemu/int128.h"
19
#include "qemu/atomic128.h"
20
+#include "tcg.h"
21
22
#if !defined(CONFIG_USER_ONLY)
23
#include "hw/s390x/storage-keys.h"
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on tcg.h being included indirectly.
1
3
4
Cc: Peter Maydell <peter.maydell@linaro.org>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/arm/sve_helper.c | 1 +
10
1 file changed, 1 insertion(+)
11
12
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/sve_helper.c
15
+++ b/target/arm/sve_helper.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "exec/helper-proto.h"
18
#include "tcg/tcg-gvec-desc.h"
19
#include "fpu/softfloat.h"
20
+#include "tcg.h"
21
22
23
/* Note that vector data is stored in host-endian 64-bit chunks,
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on tcg.h being included indirectly.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/tcg-runtime.c | 1 +
9
1 file changed, 1 insertion(+)
10
11
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tcg-runtime.c
14
+++ b/accel/tcg/tcg-runtime.c
15
@@ -XXX,XX +XXX,XX @@
16
#include "exec/tb-lookup.h"
17
#include "disas/disas.h"
18
#include "exec/log.h"
19
+#include "tcg.h"
20
21
/* 32-bit helpers */
22
23
--
24
2.20.1
25
26
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on tcg.h being included indirectly.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
linux-user/syscall.c | 1 +
10
1 file changed, 1 insertion(+)
11
12
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/syscall.c
15
+++ b/linux-user/syscall.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "user/syscall-trace.h"
18
#include "qapi/error.h"
19
#include "fd-trans.h"
20
+#include "tcg.h"
21
22
#ifndef CLONE_IO
23
#define CLONE_IO 0x80000000 /* Clone io context */
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on trace-root.h being included beforehand.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/user/syscall-trace.h | 2 ++
10
1 file changed, 2 insertions(+)
11
12
diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/user/syscall-trace.h
15
+++ b/include/user/syscall-trace.h
16
@@ -XXX,XX +XXX,XX @@
17
#ifndef _SYSCALL_TRACE_H_
18
#define _SYSCALL_TRACE_H_
19
20
+#include "trace-root.h"
21
+
22
/*
23
* These helpers just provide a common place for the various
24
* subsystems that want to track syscalls to put their hooks in. We
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
New patch
1
Code movement in an upcoming patch will show that this file
2
was implicitly depending on trace/mem.h being included beforehand.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reported-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
plugins/api.c | 1 +
10
1 file changed, 1 insertion(+)
11
12
diff --git a/plugins/api.c b/plugins/api.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/plugins/api.c
15
+++ b/plugins/api.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "qemu/plugin-memory.h"
18
#include "hw/boards.h"
19
#endif
20
+#include "trace/mem.h"
21
22
/* Uninstall and Reset handlers */
23
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
1
No functional change, but the smaller expressions make
1
With the tracing hooks, the inline functions are no longer
2
the code easier to read.
2
so simple. Once out-of-line, the current tlb_entry lookup
3
3
is redundant with the one in the main load/store_helper.
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
This also begins the introduction of a new target facing
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
interface, with suffix *_mmuidx_ra. This is not yet
7
official because the interface is not done for user-only.
8
9
Use abi_ptr instead of target_ulong in preparation for
10
user-only; the two types are identical for softmmu.
11
12
What remains in cpu_ldst_template.h are the expansions
13
for _code, _data, and MMU_MODE<N>_SUFFIX.
14
15
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
17
---
9
accel/tcg/cputlb.c | 19 ++++++++++---------
18
include/exec/cpu_ldst.h | 25 ++++++-
10
1 file changed, 10 insertions(+), 9 deletions(-)
19
include/exec/cpu_ldst_template.h | 125 +++++++------------------------
11
20
accel/tcg/cputlb.c | 116 ++++++++++++++++++++++++++++
21
3 files changed, 166 insertions(+), 100 deletions(-)
22
23
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/exec/cpu_ldst.h
26
+++ b/include/exec/cpu_ldst.h
27
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
28
29
#else
30
31
-/* The memory helpers for tcg-generated code need tcg_target_long etc. */
32
+/* Needed for TCG_OVERSIZED_GUEST */
33
#include "tcg.h"
34
35
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
36
@@ -XXX,XX +XXX,XX @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
37
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
38
}
39
40
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
41
+ int mmu_idx, uintptr_t ra);
42
+uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
43
+ int mmu_idx, uintptr_t ra);
44
+uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
45
+ int mmu_idx, uintptr_t ra);
46
+uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
47
+ int mmu_idx, uintptr_t ra);
48
+
49
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
50
+ int mmu_idx, uintptr_t ra);
51
+int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
52
+ int mmu_idx, uintptr_t ra);
53
+
54
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
55
+ int mmu_idx, uintptr_t retaddr);
56
+void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
57
+ int mmu_idx, uintptr_t retaddr);
58
+void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
59
+ int mmu_idx, uintptr_t retaddr);
60
+void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
61
+ int mmu_idx, uintptr_t retaddr);
62
+
63
#ifdef MMU_MODE0_SUFFIX
64
#define CPU_MMU_INDEX 0
65
#define MEMSUFFIX MMU_MODE0_SUFFIX
66
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/include/exec/cpu_ldst_template.h
69
+++ b/include/exec/cpu_ldst_template.h
70
@@ -XXX,XX +XXX,XX @@
71
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
72
*/
73
74
-#if !defined(SOFTMMU_CODE_ACCESS)
75
-#include "trace-root.h"
76
-#endif
77
-
78
-#include "qemu/plugin.h"
79
-#include "trace/mem.h"
80
-
81
#if DATA_SIZE == 8
82
#define SUFFIX q
83
#define USUFFIX q
84
@@ -XXX,XX +XXX,XX @@
85
#define RES_TYPE uint32_t
86
#endif
87
88
+/* generic load/store macros */
89
+
90
#ifdef SOFTMMU_CODE_ACCESS
91
-#define ADDR_READ addr_code
92
-#define MMUSUFFIX _cmmu
93
-#define URETSUFFIX USUFFIX
94
-#define SRETSUFFIX glue(s, SUFFIX)
95
-#else
96
-#define ADDR_READ addr_read
97
-#define MMUSUFFIX _mmu
98
-#define URETSUFFIX USUFFIX
99
-#define SRETSUFFIX glue(s, SUFFIX)
100
+
101
+static inline RES_TYPE
102
+glue(glue(cpu_ld, USUFFIX), _code)(CPUArchState *env, target_ulong ptr)
103
+{
104
+ TCGMemOpIdx oi = make_memop_idx(MO_TE | SHIFT, CPU_MMU_INDEX);
105
+ return glue(glue(helper_ret_ld, USUFFIX), _cmmu)(env, ptr, oi, 0);
106
+}
107
+
108
+#if DATA_SIZE <= 2
109
+static inline int
110
+glue(glue(cpu_lds, SUFFIX), _code)(CPUArchState *env, target_ulong ptr)
111
+{
112
+ return (DATA_STYPE)glue(glue(cpu_ld, USUFFIX), _code)(env, ptr);
113
+}
114
#endif
115
116
-/* generic load/store macros */
117
+#else
118
119
static inline RES_TYPE
120
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
121
target_ulong ptr,
122
uintptr_t retaddr)
123
{
124
- CPUTLBEntry *entry;
125
- RES_TYPE res;
126
- target_ulong addr;
127
- int mmu_idx = CPU_MMU_INDEX;
128
- MemOp op = MO_TE | SHIFT;
129
-#if !defined(SOFTMMU_CODE_ACCESS)
130
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
131
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
132
-#endif
133
-
134
- addr = ptr;
135
- entry = tlb_entry(env, mmu_idx, addr);
136
- if (unlikely(entry->ADDR_READ !=
137
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
138
- TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
139
- res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
140
- oi, retaddr);
141
- } else {
142
- uintptr_t hostaddr = addr + entry->addend;
143
- res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
144
- }
145
-#ifndef SOFTMMU_CODE_ACCESS
146
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
147
-#endif
148
- return res;
149
+ return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
150
+ retaddr);
151
}
152
153
static inline RES_TYPE
154
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
155
{
156
- return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
157
+ return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
158
}
159
160
#if DATA_SIZE <= 2
161
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
162
target_ulong ptr,
163
uintptr_t retaddr)
164
{
165
- CPUTLBEntry *entry;
166
- int res;
167
- target_ulong addr;
168
- int mmu_idx = CPU_MMU_INDEX;
169
- MemOp op = MO_TE | MO_SIGN | SHIFT;
170
-#ifndef SOFTMMU_CODE_ACCESS
171
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, false);
172
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
173
-#endif
174
-
175
- addr = ptr;
176
- entry = tlb_entry(env, mmu_idx, addr);
177
- if (unlikely(entry->ADDR_READ !=
178
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
179
- TCGMemOpIdx oi = make_memop_idx(op & ~MO_SIGN, mmu_idx);
180
- res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
181
- MMUSUFFIX)(env, addr, oi, retaddr);
182
- } else {
183
- uintptr_t hostaddr = addr + entry->addend;
184
- res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
185
- }
186
-#ifndef SOFTMMU_CODE_ACCESS
187
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
188
-#endif
189
- return res;
190
+ return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
191
+ retaddr);
192
}
193
194
static inline int
195
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
196
{
197
- return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
198
+ return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
199
}
200
#endif
201
202
-#ifndef SOFTMMU_CODE_ACCESS
203
-
204
/* generic store macro */
205
206
static inline void
207
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
208
target_ulong ptr,
209
RES_TYPE v, uintptr_t retaddr)
210
{
211
- CPUTLBEntry *entry;
212
- target_ulong addr;
213
- int mmu_idx = CPU_MMU_INDEX;
214
- MemOp op = MO_TE | SHIFT;
215
-#if !defined(SOFTMMU_CODE_ACCESS)
216
- uint16_t meminfo = trace_mem_get_info(op, mmu_idx, true);
217
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
218
-#endif
219
-
220
- addr = ptr;
221
- entry = tlb_entry(env, mmu_idx, addr);
222
- if (unlikely(tlb_addr_write(entry) !=
223
- (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
224
- TCGMemOpIdx oi = make_memop_idx(op, mmu_idx);
225
- glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
226
- retaddr);
227
- } else {
228
- uintptr_t hostaddr = addr + entry->addend;
229
- glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
230
- }
231
-#ifndef SOFTMMU_CODE_ACCESS
232
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
233
-#endif
234
+ glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX,
235
+ retaddr);
236
}
237
238
static inline void
239
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
240
RES_TYPE v)
241
{
242
- glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
243
+ glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
244
}
245
246
#endif /* !SOFTMMU_CODE_ACCESS */
247
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
248
#undef SUFFIX
249
#undef USUFFIX
250
#undef DATA_SIZE
251
-#undef MMUSUFFIX
252
-#undef ADDR_READ
253
-#undef URETSUFFIX
254
-#undef SRETSUFFIX
255
#undef SHIFT
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
256
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
257
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
258
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
259
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
260
@@ -XXX,XX +XXX,XX @@
17
261
#include "qemu/atomic.h"
18
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
262
#include "qemu/atomic128.h"
19
{
263
#include "translate-all.h"
20
- tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
264
+#include "trace-root.h"
21
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
265
+#include "qemu/plugin.h"
22
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
266
+#include "trace/mem.h"
23
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
267
#ifdef CONFIG_PLUGIN
24
- env_tlb(env)->d[mmu_idx].vindex = 0;
268
#include "qemu/plugin-memory.h"
25
- memset(env_tlb(env)->f[mmu_idx].table, -1,
269
#endif
26
- sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
270
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
27
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
271
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
28
- sizeof(env_tlb(env)->d[0].vtable));
272
}
29
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
273
30
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
274
+/*
31
+
275
+ * Load helpers for cpu_ldst.h.
32
+ tlb_mmu_resize_locked(desc, fast);
276
+ */
33
+ desc->n_used_entries = 0;
277
+
34
+ desc->large_page_addr = -1;
278
+static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
35
+ desc->large_page_mask = -1;
279
+ int mmu_idx, uintptr_t retaddr,
36
+ desc->vindex = 0;
280
+ MemOp op, FullLoadHelper *full_load)
37
+ memset(fast->table, -1, sizeof_tlb(fast));
281
+{
38
+ memset(desc->vtable, -1, sizeof(desc->vtable));
282
+ uint16_t meminfo;
39
}
283
+ TCGMemOpIdx oi;
40
284
+ uint64_t ret;
41
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
285
+
286
+ meminfo = trace_mem_get_info(op, mmu_idx, false);
287
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
288
+
289
+ op &= ~MO_SIGN;
290
+ oi = make_memop_idx(op, mmu_idx);
291
+ ret = full_load(env, addr, oi, retaddr);
292
+
293
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
294
+
295
+ return ret;
296
+}
297
+
298
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
299
+ int mmu_idx, uintptr_t ra)
300
+{
301
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
302
+}
303
+
304
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
305
+ int mmu_idx, uintptr_t ra)
306
+{
307
+ return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
308
+ full_ldub_mmu);
309
+}
310
+
311
+uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
312
+ int mmu_idx, uintptr_t ra)
313
+{
314
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW,
315
+ MO_TE == MO_LE
316
+ ? full_le_lduw_mmu : full_be_lduw_mmu);
317
+}
318
+
319
+int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
320
+ int mmu_idx, uintptr_t ra)
321
+{
322
+ return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW,
323
+ MO_TE == MO_LE
324
+ ? full_le_lduw_mmu : full_be_lduw_mmu);
325
+}
326
+
327
+uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
328
+ int mmu_idx, uintptr_t ra)
329
+{
330
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL,
331
+ MO_TE == MO_LE
332
+ ? full_le_ldul_mmu : full_be_ldul_mmu);
333
+}
334
+
335
+uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
336
+ int mmu_idx, uintptr_t ra)
337
+{
338
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ,
339
+ MO_TE == MO_LE
340
+ ? helper_le_ldq_mmu : helper_be_ldq_mmu);
341
+}
342
+
343
/*
344
* Store Helpers
345
*/
346
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
347
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
348
}
349
350
+/*
351
+ * Store Helpers for cpu_ldst.h
352
+ */
353
+
354
+static inline void QEMU_ALWAYS_INLINE
355
+cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
356
+ int mmu_idx, uintptr_t retaddr, MemOp op)
357
+{
358
+ TCGMemOpIdx oi;
359
+ uint16_t meminfo;
360
+
361
+ meminfo = trace_mem_get_info(op, mmu_idx, true);
362
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
363
+
364
+ oi = make_memop_idx(op, mmu_idx);
365
+ store_helper(env, addr, val, oi, retaddr, op);
366
+
367
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
368
+}
369
+
370
+void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
371
+ int mmu_idx, uintptr_t retaddr)
372
+{
373
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
374
+}
375
+
376
+void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
377
+ int mmu_idx, uintptr_t retaddr)
378
+{
379
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW);
380
+}
381
+
382
+void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
383
+ int mmu_idx, uintptr_t retaddr)
384
+{
385
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL);
386
+}
387
+
388
+void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
389
+ int mmu_idx, uintptr_t retaddr)
390
+{
391
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
392
+}
393
+
394
/* First set of helpers allows passing in of OI and RETADDR. This makes
395
them callable from other helpers. */
396
42
--
397
--
43
2.20.1
398
2.20.1
44
399
45
400
diff view generated by jsdifflib
New patch
1
The DO_LOAD macros replicate the distinction already performed
2
by the cpu_ldst.h functions. Use them.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/cpu_ldst.h | 11 ---------
9
include/exec/translator.h | 48 +++++++++++----------------------------
10
2 files changed, 13 insertions(+), 46 deletions(-)
11
12
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu_ldst.h
15
+++ b/include/exec/cpu_ldst.h
16
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
17
#include "exec/cpu_ldst_useronly_template.h"
18
#undef MEMSUFFIX
19
20
-/*
21
- * Code access is deprecated in favour of translator_ld* functions
22
- * (see translator.h). However there are still users that need to
23
- * converted so for now these stay.
24
- */
25
#define MEMSUFFIX _code
26
#define CODE_ACCESS
27
#define DATA_SIZE 1
28
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
29
#undef CPU_MMU_INDEX
30
#undef MEMSUFFIX
31
32
-/*
33
- * Code access is deprecated in favour of translator_ld* functions
34
- * (see translator.h). However there are still users that need to
35
- * converted so for now these stay.
36
- */
37
-
38
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
39
#define MEMSUFFIX _code
40
#define SOFTMMU_CODE_ACCESS
41
diff --git a/include/exec/translator.h b/include/exec/translator.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/exec/translator.h
44
+++ b/include/exec/translator.h
45
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db);
46
/*
47
* Translator Load Functions
48
*
49
- * These are intended to replace the old cpu_ld*_code functions and
50
- * are mandatory for front-ends that have been migrated to the common
51
- * translator_loop. These functions are only intended to be called
52
- * from the translation stage and should not be called from helper
53
- * functions. Those functions should be converted to encode the
54
- * relevant information at translation time.
55
+ * These are intended to replace the direct usage of the cpu_ld*_code
56
+ * functions and are mandatory for front-ends that have been migrated
57
+ * to the common translator_loop. These functions are only intended
58
+ * to be called from the translation stage and should not be called
59
+ * from helper functions. Those functions should be converted to encode
60
+ * the relevant information at translation time.
61
*/
62
63
-#ifdef CONFIG_USER_ONLY
64
-
65
-#define DO_LOAD(type, name, shift) \
66
- do { \
67
- set_helper_retaddr(1); \
68
- ret = name ## _p(g2h(pc)); \
69
- clear_helper_retaddr(); \
70
- } while (0)
71
-
72
-#else
73
-
74
-#define DO_LOAD(type, name, shift) \
75
- do { \
76
- int mmu_idx = cpu_mmu_index(env, true); \
77
- TCGMemOpIdx oi = make_memop_idx(shift, mmu_idx); \
78
- ret = helper_ret_ ## name ## _cmmu(env, pc, oi, 0); \
79
- } while (0)
80
-
81
-#endif
82
-
83
-#define GEN_TRANSLATOR_LD(fullname, name, type, shift, swap_fn) \
84
+#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
85
static inline type \
86
fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
87
{ \
88
- type ret; \
89
- DO_LOAD(type, name, shift); \
90
- \
91
+ type ret = load_fn(env, pc); \
92
if (do_swap) { \
93
ret = swap_fn(ret); \
94
} \
95
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db);
96
return fullname ## _swap(env, pc, false); \
97
}
98
99
-GEN_TRANSLATOR_LD(translator_ldub, ldub, uint8_t, 0, /* no swap */ )
100
-GEN_TRANSLATOR_LD(translator_ldsw, ldsw, int16_t, 1, bswap16)
101
-GEN_TRANSLATOR_LD(translator_lduw, lduw, uint16_t, 1, bswap16)
102
-GEN_TRANSLATOR_LD(translator_ldl, ldl, uint32_t, 2, bswap32)
103
-GEN_TRANSLATOR_LD(translator_ldq, ldq, uint64_t, 3, bswap64)
104
+GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
105
+GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
106
+GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
107
+GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
108
+GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
109
#undef GEN_TRANSLATOR_LD
110
111
#endif /* EXEC__TRANSLATOR_H */
112
--
113
2.20.1
114
115
diff view generated by jsdifflib
1
No functional change, but the smaller expressions make
1
There are no uses of the *_cmmu names other than the bare wrapping
2
the code easier to read.
2
within the *_code inlines. Therefore rename the functions so we
3
3
can drop the inlines.
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Use abi_ptr instead of target_ulong in preparation for user-only;
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
the two types are identical for softmmu.
7
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
accel/tcg/cputlb.c | 35 +++++++++++++++++------------------
12
include/exec/cpu_ldst.h | 29 ++++------
10
1 file changed, 17 insertions(+), 18 deletions(-)
13
include/exec/cpu_ldst_template.h | 21 -------
11
14
tcg/tcg.h | 29 ----------
15
accel/tcg/cputlb.c | 94 ++++++++------------------------
16
docs/devel/loads-stores.rst | 4 +-
17
5 files changed, 36 insertions(+), 141 deletions(-)
18
19
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/cpu_ldst.h
22
+++ b/include/exec/cpu_ldst.h
23
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
24
#undef CPU_MMU_INDEX
25
#undef MEMSUFFIX
26
27
-#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
28
-#define MEMSUFFIX _code
29
-#define SOFTMMU_CODE_ACCESS
30
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
31
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
32
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
33
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
34
35
-#define DATA_SIZE 1
36
-#include "exec/cpu_ldst_template.h"
37
+static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
38
+{
39
+ return (int8_t)cpu_ldub_code(env, addr);
40
+}
41
42
-#define DATA_SIZE 2
43
-#include "exec/cpu_ldst_template.h"
44
-
45
-#define DATA_SIZE 4
46
-#include "exec/cpu_ldst_template.h"
47
-
48
-#define DATA_SIZE 8
49
-#include "exec/cpu_ldst_template.h"
50
-
51
-#undef CPU_MMU_INDEX
52
-#undef MEMSUFFIX
53
-#undef SOFTMMU_CODE_ACCESS
54
+static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
55
+{
56
+ return (int16_t)cpu_lduw_code(env, addr);
57
+}
58
59
#endif /* defined(CONFIG_USER_ONLY) */
60
61
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/include/exec/cpu_ldst_template.h
64
+++ b/include/exec/cpu_ldst_template.h
65
@@ -XXX,XX +XXX,XX @@
66
67
/* generic load/store macros */
68
69
-#ifdef SOFTMMU_CODE_ACCESS
70
-
71
-static inline RES_TYPE
72
-glue(glue(cpu_ld, USUFFIX), _code)(CPUArchState *env, target_ulong ptr)
73
-{
74
- TCGMemOpIdx oi = make_memop_idx(MO_TE | SHIFT, CPU_MMU_INDEX);
75
- return glue(glue(helper_ret_ld, USUFFIX), _cmmu)(env, ptr, oi, 0);
76
-}
77
-
78
-#if DATA_SIZE <= 2
79
-static inline int
80
-glue(glue(cpu_lds, SUFFIX), _code)(CPUArchState *env, target_ulong ptr)
81
-{
82
- return (DATA_STYPE)glue(glue(cpu_ld, USUFFIX), _code)(env, ptr);
83
-}
84
-#endif
85
-
86
-#else
87
-
88
static inline RES_TYPE
89
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
90
target_ulong ptr,
91
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
92
glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
93
}
94
95
-#endif /* !SOFTMMU_CODE_ACCESS */
96
-
97
#undef RES_TYPE
98
#undef DATA_TYPE
99
#undef DATA_STYPE
100
diff --git a/tcg/tcg.h b/tcg/tcg.h
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg.h
103
+++ b/tcg/tcg.h
104
@@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
105
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
106
TCGMemOpIdx oi, uintptr_t retaddr);
107
108
-uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
109
- TCGMemOpIdx oi, uintptr_t retaddr);
110
-int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
111
- TCGMemOpIdx oi, uintptr_t retaddr);
112
-uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
113
- TCGMemOpIdx oi, uintptr_t retaddr);
114
-int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
115
- TCGMemOpIdx oi, uintptr_t retaddr);
116
-uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
117
- TCGMemOpIdx oi, uintptr_t retaddr);
118
-uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
119
- TCGMemOpIdx oi, uintptr_t retaddr);
120
-uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
121
- TCGMemOpIdx oi, uintptr_t retaddr);
122
-int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
123
- TCGMemOpIdx oi, uintptr_t retaddr);
124
-uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
125
- TCGMemOpIdx oi, uintptr_t retaddr);
126
-uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
127
- TCGMemOpIdx oi, uintptr_t retaddr);
128
-
129
/* Temporary aliases until backends are converted. */
130
#ifdef TARGET_WORDS_BIGENDIAN
131
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
132
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
133
# define helper_ret_stw_mmu helper_be_stw_mmu
134
# define helper_ret_stl_mmu helper_be_stl_mmu
135
# define helper_ret_stq_mmu helper_be_stq_mmu
136
-# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
137
-# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
138
-# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
139
-# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
140
#else
141
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
142
# define helper_ret_lduw_mmu helper_le_lduw_mmu
143
@@ -XXX,XX +XXX,XX @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
144
# define helper_ret_stw_mmu helper_le_stw_mmu
145
# define helper_ret_stl_mmu helper_le_stl_mmu
146
# define helper_ret_stq_mmu helper_le_stq_mmu
147
-# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
148
-# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
149
-# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
150
-# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
151
#endif
152
153
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
154
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
155
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
156
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
157
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
158
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
17
159
18
/**
160
/* Code access functions. */
19
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
161
20
- * @env: CPU that owns the TLB
162
-static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
21
- * @mmu_idx: MMU index of the TLB
163
+static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
22
+ * @desc: The CPUTLBDesc portion of the TLB
164
TCGMemOpIdx oi, uintptr_t retaddr)
23
+ * @fast: The CPUTLBDescFast portion of the same TLB
165
{
24
*
166
- return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
25
* Called with tlb_lock_held.
167
+ return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
26
*
168
}
27
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
169
28
* high), since otherwise we are likely to have a significant amount of
170
-uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
29
* conflict misses.
171
- TCGMemOpIdx oi, uintptr_t retaddr)
30
*/
172
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
31
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
173
{
32
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
174
- return full_ldub_cmmu(env, addr, oi, retaddr);
33
{
175
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
34
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
176
+ return full_ldub_code(env, addr, oi, 0);
35
- size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
177
}
36
+ size_t old_size = tlb_n_entries(fast);
178
37
size_t rate;
179
-int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
38
size_t new_size = old_size;
180
- TCGMemOpIdx oi, uintptr_t retaddr)
39
int64_t now = get_clock_realtime();
181
+static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
40
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
182
+ TCGMemOpIdx oi, uintptr_t retaddr)
41
return;
183
{
42
}
184
- return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
43
185
+ return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
44
- g_free(env_tlb(env)->f[mmu_idx].table);
186
}
45
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
187
46
+ g_free(fast->table);
188
-static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
47
+ g_free(desc->iotlb);
189
- TCGMemOpIdx oi, uintptr_t retaddr)
48
190
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
49
tlb_window_reset(desc, now, 0);
191
{
50
/* desc->n_used_entries is cleared by the caller */
192
- return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
51
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
193
- full_le_lduw_cmmu);
52
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
194
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
53
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
195
+ return full_lduw_code(env, addr, oi, 0);
54
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196
}
55
+ fast->table = g_try_new(CPUTLBEntry, new_size);
197
56
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
198
-uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
57
+
199
- TCGMemOpIdx oi, uintptr_t retaddr)
58
/*
200
+static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
59
* If the allocations fail, try smaller sizes. We just freed some
201
+ TCGMemOpIdx oi, uintptr_t retaddr)
60
* memory, so going back to half of new_size has a good chance of working.
202
{
61
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
203
- return full_le_lduw_cmmu(env, addr, oi, retaddr);
62
* allocations to fail though, so we progressively reduce the allocation
204
+ return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
63
* size, aborting if we cannot even allocate the smallest TLB we support.
205
}
64
*/
206
65
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
207
-int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
66
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
208
- TCGMemOpIdx oi, uintptr_t retaddr)
67
+ while (fast->table == NULL || desc->iotlb == NULL) {
209
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
68
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
210
{
69
error_report("%s: %s", __func__, strerror(errno));
211
- return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
70
abort();
212
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
71
}
213
+ return full_ldl_code(env, addr, oi, 0);
72
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
214
}
73
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
215
74
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
216
-static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
75
217
- TCGMemOpIdx oi, uintptr_t retaddr)
76
- g_free(env_tlb(env)->f[mmu_idx].table);
218
+static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
77
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
219
+ TCGMemOpIdx oi, uintptr_t retaddr)
78
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
220
{
79
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
221
- return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
80
+ g_free(fast->table);
222
- full_be_lduw_cmmu);
81
+ g_free(desc->iotlb);
223
+ return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
82
+ fast->table = g_try_new(CPUTLBEntry, new_size);
224
}
83
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
225
84
}
226
-uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
85
}
227
- TCGMemOpIdx oi, uintptr_t retaddr)
86
228
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
87
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
229
{
88
{
230
- return full_be_lduw_cmmu(env, addr, oi, retaddr);
89
- tlb_mmu_resize_locked(env, mmu_idx);
231
-}
90
+ tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
232
-
91
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
233
-int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
92
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
234
- TCGMemOpIdx oi, uintptr_t retaddr)
93
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
235
-{
236
- return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
237
-}
238
-
239
-static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
240
- TCGMemOpIdx oi, uintptr_t retaddr)
241
-{
242
- return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
243
- full_le_ldul_cmmu);
244
-}
245
-
246
-uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
247
- TCGMemOpIdx oi, uintptr_t retaddr)
248
-{
249
- return full_le_ldul_cmmu(env, addr, oi, retaddr);
250
-}
251
-
252
-static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
253
- TCGMemOpIdx oi, uintptr_t retaddr)
254
-{
255
- return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
256
- full_be_ldul_cmmu);
257
-}
258
-
259
-uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
260
- TCGMemOpIdx oi, uintptr_t retaddr)
261
-{
262
- return full_be_ldul_cmmu(env, addr, oi, retaddr);
263
-}
264
-
265
-uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
266
- TCGMemOpIdx oi, uintptr_t retaddr)
267
-{
268
- return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
269
- helper_le_ldq_cmmu);
270
-}
271
-
272
-uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
273
- TCGMemOpIdx oi, uintptr_t retaddr)
274
-{
275
- return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
276
- helper_be_ldq_cmmu);
277
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
278
+ return full_ldq_code(env, addr, oi, 0);
279
}
280
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
281
index XXXXXXX..XXXXXXX 100644
282
--- a/docs/devel/loads-stores.rst
283
+++ b/docs/devel/loads-stores.rst
284
@@ -XXX,XX +XXX,XX @@ more in line with the other memory access functions.
285
286
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
287
288
-load (code): ``helper_{endian}_ld{sign}{size}_cmmu(env, addr, opindex, retaddr)``
289
-
290
store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
291
292
``sign``
293
@@ -XXX,XX +XXX,XX @@ store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
294
- ``ret`` : target endianness
295
296
Regexes for git grep
297
- - ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_c\?mmu\>``
298
+ - ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_mmu\>``
299
- ``\<helper_\(le\|be\|ret\)_st[bwlq]_mmu\>``
300
301
``address_space_*``
94
--
302
--
95
2.20.1
303
2.20.1
96
304
97
305
diff view generated by jsdifflib
New patch
1
This finishes the new interface began with the previous patch.
2
Document the interface and deprecate MMU_MODE<N>_SUFFIX.
1
3
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu_ldst.h | 80 +++++++++++++-
10
docs/devel/loads-stores.rst | 211 ++++++++++++++++++++++++++----------
11
2 files changed, 230 insertions(+), 61 deletions(-)
12
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
16
+++ b/include/exec/cpu_ldst.h
17
@@ -XXX,XX +XXX,XX @@
18
*
19
* The syntax for the accessors is:
20
*
21
- * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
22
+ * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
23
+ * cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)
24
+ * cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
25
*
26
- * store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
27
+ * store: cpu_st{size}_{mmusuffix}(env, ptr, val)
28
+ * cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr)
29
+ * cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
30
*
31
* sign is:
32
* (empty): for 32 and 64 bit sizes
33
@@ -XXX,XX +XXX,XX @@
34
* l: 32 bits
35
* q: 64 bits
36
*
37
- * mmusuffix is one of the generic suffixes "data" or "code", or
38
- * (for softmmu configs) a target-specific MMU mode suffix as defined
39
- * in target cpu.h.
40
+ * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
41
+ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
42
+ * the index to use; the "data" and "code" suffixes take the index from
43
+ * cpu_mmu_index().
44
*/
45
#ifndef CPU_LDST_H
46
#define CPU_LDST_H
47
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
48
#undef MEMSUFFIX
49
#undef CODE_ACCESS
50
51
+/*
52
+ * Provide the same *_mmuidx_ra interface as for softmmu.
53
+ * The mmu_idx argument is ignored.
54
+ */
55
+
56
+static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
57
+ int mmu_idx, uintptr_t ra)
58
+{
59
+ return cpu_ldub_data_ra(env, addr, ra);
60
+}
61
+
62
+static inline uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
63
+ int mmu_idx, uintptr_t ra)
64
+{
65
+ return cpu_lduw_data_ra(env, addr, ra);
66
+}
67
+
68
+static inline uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
69
+ int mmu_idx, uintptr_t ra)
70
+{
71
+ return cpu_ldl_data_ra(env, addr, ra);
72
+}
73
+
74
+static inline uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
75
+ int mmu_idx, uintptr_t ra)
76
+{
77
+ return cpu_ldq_data_ra(env, addr, ra);
78
+}
79
+
80
+static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
81
+ int mmu_idx, uintptr_t ra)
82
+{
83
+ return cpu_ldsb_data_ra(env, addr, ra);
84
+}
85
+
86
+static inline int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
87
+ int mmu_idx, uintptr_t ra)
88
+{
89
+ return cpu_ldsw_data_ra(env, addr, ra);
90
+}
91
+
92
+static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
93
+ uint32_t val, int mmu_idx, uintptr_t ra)
94
+{
95
+ cpu_stb_data_ra(env, addr, val, ra);
96
+}
97
+
98
+static inline void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
99
+ uint32_t val, int mmu_idx, uintptr_t ra)
100
+{
101
+ cpu_stw_data_ra(env, addr, val, ra);
102
+}
103
+
104
+static inline void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
105
+ uint32_t val, int mmu_idx, uintptr_t ra)
106
+{
107
+ cpu_stl_data_ra(env, addr, val, ra);
108
+}
109
+
110
+static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
111
+ uint64_t val, int mmu_idx, uintptr_t ra)
112
+{
113
+ cpu_stq_data_ra(env, addr, val, ra);
114
+}
115
+
116
#else
117
118
/* Needed for TCG_OVERSIZED_GUEST */
119
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
120
index XXXXXXX..XXXXXXX 100644
121
--- a/docs/devel/loads-stores.rst
122
+++ b/docs/devel/loads-stores.rst
123
@@ -XXX,XX +XXX,XX @@ Regexes for git grep
124
- ``\<ldn_\([hbl]e\)?_p\>``
125
- ``\<stn_\([hbl]e\)?_p\>``
126
127
-``cpu_{ld,st}_*``
128
-~~~~~~~~~~~~~~~~~
129
+``cpu_{ld,st}*_mmuidx_ra``
130
+~~~~~~~~~~~~~~~~~~~~~~~~~~
131
132
-These functions operate on a guest virtual address. Be aware
133
-that these functions may cause a guest CPU exception to be
134
-taken (e.g. for an alignment fault or MMU fault) which will
135
-result in guest CPU state being updated and control longjumping
136
-out of the function call. They should therefore only be used
137
-in code that is implementing emulation of the target CPU.
138
+These functions operate on a guest virtual address plus a context,
139
+known as a "mmu index" or ``mmuidx``, which controls how that virtual
140
+address is translated. The meaning of the indexes are target specific,
141
+but specifying a particular index might be necessary if, for instance,
142
+the helper requires an "always as non-privileged" access rather that
143
+the default access for the current state of the guest CPU.
144
145
-These functions may throw an exception (longjmp() back out
146
-to the top level TCG loop). This means they must only be used
147
-from helper functions where the translator has saved all
148
-necessary CPU state before generating the helper function call.
149
-It's usually better to use the ``_ra`` variants described below
150
-from helper functions, but these functions are the right choice
151
-for calls made from hooks like the CPU do_interrupt hook or
152
-when you know for certain that the translator had to save all
153
-the CPU state that ``cpu_restore_state()`` would restore anyway.
154
+These functions may cause a guest CPU exception to be taken
155
+(e.g. for an alignment fault or MMU fault) which will result in
156
+guest CPU state being updated and control longjmp'ing out of the
157
+function call. They should therefore only be used in code that is
158
+implementing emulation of the guest CPU.
159
+
160
+The ``retaddr`` parameter is used to control unwinding of the
161
+guest CPU state in case of a guest CPU exception. This is passed
162
+to ``cpu_restore_state()``. Therefore the value should either be 0,
163
+to indicate that the guest CPU state is already synchronized, or
164
+the result of ``GETPC()`` from the top level ``HELPER(foo)``
165
+function, which is a return address into the generated code.
166
167
Function names follow the pattern:
168
169
-load: ``cpu_ld{sign}{size}_{mmusuffix}(env, ptr)``
170
+load: ``cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
171
172
-store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
173
+store: ``cpu_st{size}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
174
175
``sign``
176
- (empty) : for 32 or 64 bit sizes
177
@@ -XXX,XX +XXX,XX @@ store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
178
- ``l`` : 32 bits
179
- ``q`` : 64 bits
180
181
-``mmusuffix`` is one of the generic suffixes ``data`` or ``code``, or
182
-(for softmmu configs) a target-specific MMU mode suffix as defined
183
-in the target's ``cpu.h``.
184
+Regexes for git grep:
185
+ - ``\<cpu_ld[us]\?[bwlq]_mmuidx_ra\>``
186
+ - ``\<cpu_st[bwlq]_mmuidx_ra\>``
187
188
-Regexes for git grep
189
- - ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+\>``
190
- - ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+\>``
191
+``cpu_{ld,st}*_data_ra``
192
+~~~~~~~~~~~~~~~~~~~~~~~~
193
194
-``cpu_{ld,st}_*_ra``
195
-~~~~~~~~~~~~~~~~~~~~
196
-
197
-These functions work like the ``cpu_{ld,st}_*`` functions except
198
-that they also take a ``retaddr`` argument. This extra argument
199
-allows for correct unwinding of any exception that is taken,
200
-and should generally be the result of GETPC() called directly
201
-from the top level HELPER(foo) function (i.e. the return address
202
-in the generated code).
203
+These functions work like the ``cpu_{ld,st}_mmuidx_ra`` functions
204
+except that the ``mmuidx`` parameter is taken from the current mode
205
+of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
206
207
These are generally the preferred way to do accesses by guest
208
-virtual address from helper functions; see the documentation
209
-of the non-``_ra`` variants for when those would be better.
210
-
211
-Calling these functions with a ``retaddr`` argument of 0 is
212
-equivalent to calling the non-``_ra`` version of the function.
213
+virtual address from helper functions, unless the access should
214
+be performed with a context other than the default.
215
216
Function names follow the pattern:
217
218
-load: ``cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)``
219
+load: ``cpu_ld{sign}{size}_data_ra(env, ptr, ra)``
220
221
-store: ``cpu_st{sign}{size}_{mmusuffix}_ra(env, ptr, val, retaddr)``
222
+store: ``cpu_st{size}_data_ra(env, ptr, val, ra)``
223
+
224
+``sign``
225
+ - (empty) : for 32 or 64 bit sizes
226
+ - ``u`` : unsigned
227
+ - ``s`` : signed
228
+
229
+``size``
230
+ - ``b`` : 8 bits
231
+ - ``w`` : 16 bits
232
+ - ``l`` : 32 bits
233
+ - ``q`` : 64 bits
234
+
235
+Regexes for git grep:
236
+ - ``\<cpu_ld[us]\?[bwlq]_data_ra\>``
237
+ - ``\<cpu_st[bwlq]_data_ra\>``
238
+
239
+``cpu_{ld,st}*_data``
240
+~~~~~~~~~~~~~~~~~~~~~
241
+
242
+These functions work like the ``cpu_{ld,st}_data_ra`` functions
243
+except that the ``retaddr`` parameter is 0, and thus does not
244
+unwind guest CPU state.
245
+
246
+This means they must only be used from helper functions where the
247
+translator has saved all necessary CPU state. These functions are
248
+the right choice for calls made from hooks like the CPU ``do_interrupt``
249
+hook or when you know for certain that the translator had to save all
250
+the CPU state anyway.
251
+
252
+Function names follow the pattern:
253
+
254
+load: ``cpu_ld{sign}{size}_data(env, ptr)``
255
+
256
+store: ``cpu_st{size}_data(env, ptr, val)``
257
+
258
+``sign``
259
+ - (empty) : for 32 or 64 bit sizes
260
+ - ``u`` : unsigned
261
+ - ``s`` : signed
262
+
263
+``size``
264
+ - ``b`` : 8 bits
265
+ - ``w`` : 16 bits
266
+ - ``l`` : 32 bits
267
+ - ``q`` : 64 bits
268
269
Regexes for git grep
270
- - ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+_ra\>``
271
- - ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+_ra\>``
272
+ - ``\<cpu_ld[us]\?[bwlq]_data\>``
273
+ - ``\<cpu_st[bwlq]_data\+\>``
274
275
-``helper_*_{ld,st}*mmu``
276
-~~~~~~~~~~~~~~~~~~~~~~~~
277
+``cpu_ld*_code``
278
+~~~~~~~~~~~~~~~~
279
+
280
+These functions perform a read for instruction execution. The ``mmuidx``
281
+parameter is taken from the current mode of the guest CPU, as determined
282
+by ``cpu_mmu_index(env, true)``. The ``retaddr`` parameter is 0, and
283
+thus does not unwind guest CPU state, because CPU state is always
284
+synchronized while translating instructions. Any guest CPU exception
285
+that is raised will indicate an instruction execution fault rather than
286
+a data read fault.
287
+
288
+In general these functions should not be used directly during translation.
289
+There are wrapper functions that are to be used which also take care of
290
+plugins for tracing.
291
+
292
+Function names follow the pattern:
293
+
294
+load: ``cpu_ld{sign}{size}_code(env, ptr)``
295
+
296
+``sign``
297
+ - (empty) : for 32 or 64 bit sizes
298
+ - ``u`` : unsigned
299
+ - ``s`` : signed
300
+
301
+``size``
302
+ - ``b`` : 8 bits
303
+ - ``w`` : 16 bits
304
+ - ``l`` : 32 bits
305
+ - ``q`` : 64 bits
306
+
307
+Regexes for git grep:
308
+ - ``\<cpu_ld[us]\?[bwlq]_code\>``
309
+
310
+``translator_ld*``
311
+~~~~~~~~~~~~~~~~~~
312
+
313
+These functions are a wrapper for ``cpu_ld*_code`` which also perform
314
+any actions required by any tracing plugins. They are only to be
315
+called during the translator callback ``translate_insn``.
316
+
317
+There is a set of functions ending in ``_swap`` which, if the parameter
318
+is true, returns the value in the endianness that is the reverse of
319
+the guest native endianness, as determined by ``TARGET_WORDS_BIGENDIAN``.
320
+
321
+Function names follow the pattern:
322
+
323
+load: ``translator_ld{sign}{size}(env, ptr)``
324
+
325
+swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)``
326
+
327
+``sign``
328
+ - (empty) : for 32 or 64 bit sizes
329
+ - ``u`` : unsigned
330
+ - ``s`` : signed
331
+
332
+``size``
333
+ - ``b`` : 8 bits
334
+ - ``w`` : 16 bits
335
+ - ``l`` : 32 bits
336
+ - ``q`` : 64 bits
337
+
338
+Regexes for git grep
339
+ - ``\<translator_ld[us]\?[bwlq]\(_swap\)\?\>``
340
+
341
+``helper_*_{ld,st}*_mmu``
342
+~~~~~~~~~~~~~~~~~~~~~~~~~
343
344
These functions are intended primarily to be called by the code
345
generated by the TCG backend. They may also be called by target
346
-CPU helper function code. Like the ``cpu_{ld,st}_*_ra`` functions
347
-they perform accesses by guest virtual address; the difference is
348
-that these functions allow you to specify an ``opindex`` parameter
349
-which encodes (among other things) the mmu index to use for the
350
-access. This is necessary if your helper needs to make an access
351
-via a specific mmu index (for instance, an "always as non-privileged"
352
-access) rather than using the default mmu index for the current state
353
-of the guest CPU.
354
+CPU helper function code. Like the ``cpu_{ld,st}_mmuidx_ra`` functions
355
+they perform accesses by guest virtual address, with a given ``mmuidx``.
356
357
-The ``opindex`` parameter should be created by calling ``make_memop_idx()``.
358
+These functions specify an ``opindex`` parameter which encodes
359
+(among other things) the mmu index to use for the access. This parameter
360
+should be created by calling ``make_memop_idx()``.
361
362
The ``retaddr`` parameter should be the result of GETPC() called directly
363
from the top level HELPER(foo) function (or 0 if no guest CPU state
364
@@ -XXX,XX +XXX,XX @@ unwinding is required).
365
366
**TODO** The names of these functions are a bit odd for historical
367
reasons because they were originally expected to be called only from
368
-within generated code. We should rename them to bring them
369
-more in line with the other memory access functions.
370
+within generated code. We should rename them to bring them more in
371
+line with the other memory access functions. The explicit endianness
372
+is the only feature they have beyond ``*_mmuidx_ra``.
373
374
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
375
376
--
377
2.20.1
378
379
diff view generated by jsdifflib
1
The accel_list and tmp variables are only used when manufacturing
1
Do not use exec/cpu_ldst_{,useronly_}template.h directly,
2
-machine accel, options based on -accel.
2
but instead use the functional interface.
3
3
4
Cc: Eduardo Habkost <ehabkost@redhat.com>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
6
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
---
9
vl.c | 3 ++-
10
target/i386/seg_helper.c | 56 ++++++++++++++++++++--------------------
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
1 file changed, 28 insertions(+), 28 deletions(-)
11
12
12
diff --git a/vl.c b/vl.c
13
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
--- a/target/i386/seg_helper.c
15
+++ b/vl.c
16
+++ b/target/i386/seg_helper.c
16
@@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
17
@@ -XXX,XX +XXX,XX @@
17
static void configure_accelerators(const char *progname)
18
# define LOG_PCALL_STATE(cpu) do { } while (0)
18
{
19
#endif
19
const char *accel;
20
20
- char **accel_list, **tmp;
21
-#ifdef CONFIG_USER_ONLY
21
bool init_failed = false;
22
-#define MEMSUFFIX _kernel
22
23
-#define DATA_SIZE 1
23
qemu_opts_foreach(qemu_find_opts("icount"),
24
-#include "exec/cpu_ldst_useronly_template.h"
24
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
25
+/*
25
26
+ * TODO: Convert callers to compute cpu_mmu_index_kernel once
26
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
27
+ * and use *_mmuidx_ra directly.
27
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
28
+ */
28
+ char **accel_list, **tmp;
29
+#define cpu_ldub_kernel_ra(e, p, r) \
29
+
30
+ cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
30
if (accel == NULL) {
31
+#define cpu_lduw_kernel_ra(e, p, r) \
31
/* Select the default accelerator */
32
+ cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
32
if (!accel_find("tcg") && !accel_find("kvm")) {
33
+#define cpu_ldl_kernel_ra(e, p, r) \
34
+ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
35
+#define cpu_ldq_kernel_ra(e, p, r) \
36
+ cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
37
38
-#define DATA_SIZE 2
39
-#include "exec/cpu_ldst_useronly_template.h"
40
+#define cpu_stb_kernel_ra(e, p, v, r) \
41
+ cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
42
+#define cpu_stw_kernel_ra(e, p, v, r) \
43
+ cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
44
+#define cpu_stl_kernel_ra(e, p, v, r) \
45
+ cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
46
+#define cpu_stq_kernel_ra(e, p, v, r) \
47
+ cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
48
49
-#define DATA_SIZE 4
50
-#include "exec/cpu_ldst_useronly_template.h"
51
+#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
52
+#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
53
+#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
54
+#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
55
56
-#define DATA_SIZE 8
57
-#include "exec/cpu_ldst_useronly_template.h"
58
-#undef MEMSUFFIX
59
-#else
60
-#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
61
-#define MEMSUFFIX _kernel
62
-#define DATA_SIZE 1
63
-#include "exec/cpu_ldst_template.h"
64
-
65
-#define DATA_SIZE 2
66
-#include "exec/cpu_ldst_template.h"
67
-
68
-#define DATA_SIZE 4
69
-#include "exec/cpu_ldst_template.h"
70
-
71
-#define DATA_SIZE 8
72
-#include "exec/cpu_ldst_template.h"
73
-#undef CPU_MMU_INDEX
74
-#undef MEMSUFFIX
75
-#endif
76
+#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
77
+#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
78
+#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
79
+#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
80
81
/* return non zero if error */
82
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
33
--
83
--
34
2.20.1
84
2.20.1
35
85
36
86
diff view generated by jsdifflib
1
There are no users of this function outside cputlb.c,
1
With the tracing hooks, the inline functions are no longer
2
and its interface will change in the next patch.
2
so simple. Reduce the amount of preprocessor obfuscation
3
by expanding the text of each of the functions generated.
3
4
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
include/exec/cpu_ldst.h | 5 -----
9
include/exec/cpu_ldst.h | 54 +++--
10
accel/tcg/cputlb.c | 5 +++++
10
include/exec/cpu_ldst_useronly_template.h | 159 ---------------
11
2 files changed, 5 insertions(+), 5 deletions(-)
11
accel/tcg/user-exec.c | 236 ++++++++++++++++++++++
12
3 files changed, 262 insertions(+), 187 deletions(-)
13
delete mode 100644 include/exec/cpu_ldst_useronly_template.h
12
14
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
17
--- a/include/exec/cpu_ldst.h
16
+++ b/include/exec/cpu_ldst.h
18
+++ b/include/exec/cpu_ldst.h
17
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
19
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
18
return (addr >> TARGET_PAGE_BITS) & size_mask;
20
21
/* In user-only mode we provide only the _code and _data accessors. */
22
23
-#define MEMSUFFIX _data
24
-#define DATA_SIZE 1
25
-#include "exec/cpu_ldst_useronly_template.h"
26
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
27
+uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
28
+uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
29
+uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr);
30
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
31
+int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr);
32
33
-#define DATA_SIZE 2
34
-#include "exec/cpu_ldst_useronly_template.h"
35
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
36
+uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
37
+uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
38
+uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
39
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
40
+int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
41
42
-#define DATA_SIZE 4
43
-#include "exec/cpu_ldst_useronly_template.h"
44
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
45
+void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
46
+void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
47
+void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
48
49
-#define DATA_SIZE 8
50
-#include "exec/cpu_ldst_useronly_template.h"
51
-#undef MEMSUFFIX
52
-
53
-#define MEMSUFFIX _code
54
-#define CODE_ACCESS
55
-#define DATA_SIZE 1
56
-#include "exec/cpu_ldst_useronly_template.h"
57
-
58
-#define DATA_SIZE 2
59
-#include "exec/cpu_ldst_useronly_template.h"
60
-
61
-#define DATA_SIZE 4
62
-#include "exec/cpu_ldst_useronly_template.h"
63
-
64
-#define DATA_SIZE 8
65
-#include "exec/cpu_ldst_useronly_template.h"
66
-#undef MEMSUFFIX
67
-#undef CODE_ACCESS
68
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
69
+ uint32_t val, uintptr_t retaddr);
70
+void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
71
+ uint32_t val, uintptr_t retaddr);
72
+void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
73
+ uint32_t val, uintptr_t retaddr);
74
+void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
75
+ uint64_t val, uintptr_t retaddr);
76
77
/*
78
* Provide the same *_mmuidx_ra interface as for softmmu.
79
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
80
#undef CPU_MMU_INDEX
81
#undef MEMSUFFIX
82
83
+#endif /* defined(CONFIG_USER_ONLY) */
84
+
85
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
86
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
87
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
88
@@ -XXX,XX +XXX,XX @@ static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
89
return (int16_t)cpu_lduw_code(env, addr);
19
}
90
}
20
91
21
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
92
-#endif /* defined(CONFIG_USER_ONLY) */
93
-
94
/**
95
* tlb_vaddr_to_host:
96
* @env: CPUArchState
97
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
98
deleted file mode 100644
99
index XXXXXXX..XXXXXXX
100
--- a/include/exec/cpu_ldst_useronly_template.h
101
+++ /dev/null
102
@@ -XXX,XX +XXX,XX @@
103
-/*
104
- * User-only accessor function support
105
- *
106
- * Generate inline load/store functions for one data size.
107
- *
108
- * Generate a store function as well as signed and unsigned loads.
109
- *
110
- * Not used directly but included from cpu_ldst.h.
111
- *
112
- * Copyright (c) 2015 Linaro Limited
113
- *
114
- * This library is free software; you can redistribute it and/or
115
- * modify it under the terms of the GNU Lesser General Public
116
- * License as published by the Free Software Foundation; either
117
- * version 2 of the License, or (at your option) any later version.
118
- *
119
- * This library is distributed in the hope that it will be useful,
120
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
121
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
122
- * Lesser General Public License for more details.
123
- *
124
- * You should have received a copy of the GNU Lesser General Public
125
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
126
- */
127
-
128
-#if !defined(CODE_ACCESS)
129
-#include "trace-root.h"
130
-#endif
131
-
132
-#include "trace/mem.h"
133
-
134
-#if DATA_SIZE == 8
135
-#define SUFFIX q
136
-#define USUFFIX q
137
-#define DATA_TYPE uint64_t
138
-#define SHIFT 3
139
-#elif DATA_SIZE == 4
140
-#define SUFFIX l
141
-#define USUFFIX l
142
-#define DATA_TYPE uint32_t
143
-#define SHIFT 2
144
-#elif DATA_SIZE == 2
145
-#define SUFFIX w
146
-#define USUFFIX uw
147
-#define DATA_TYPE uint16_t
148
-#define DATA_STYPE int16_t
149
-#define SHIFT 1
150
-#elif DATA_SIZE == 1
151
-#define SUFFIX b
152
-#define USUFFIX ub
153
-#define DATA_TYPE uint8_t
154
-#define DATA_STYPE int8_t
155
-#define SHIFT 0
156
-#else
157
-#error unsupported data size
158
-#endif
159
-
160
-#if DATA_SIZE == 8
161
-#define RES_TYPE uint64_t
162
-#else
163
-#define RES_TYPE uint32_t
164
-#endif
165
-
166
-static inline RES_TYPE
167
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
22
-{
168
-{
23
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
169
- RES_TYPE ret;
170
-#ifdef CODE_ACCESS
171
- set_helper_retaddr(1);
172
- ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
173
- clear_helper_retaddr();
174
-#else
175
- MemOp op = MO_TE | SHIFT;
176
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
177
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
178
- ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
179
-#endif
180
- return ret;
24
-}
181
-}
25
-
182
-
26
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
183
-#ifndef CODE_ACCESS
27
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
184
-static inline RES_TYPE
28
target_ulong addr)
185
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
29
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
186
- abi_ptr ptr,
187
- uintptr_t retaddr)
188
-{
189
- RES_TYPE ret;
190
- set_helper_retaddr(retaddr);
191
- ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
192
- clear_helper_retaddr();
193
- return ret;
194
-}
195
-#endif
196
-
197
-#if DATA_SIZE <= 2
198
-static inline int
199
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
200
-{
201
- int ret;
202
-#ifdef CODE_ACCESS
203
- set_helper_retaddr(1);
204
- ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
205
- clear_helper_retaddr();
206
-#else
207
- MemOp op = MO_TE | MO_SIGN | SHIFT;
208
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, false);
209
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
210
- ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
211
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
212
-#endif
213
- return ret;
214
-}
215
-
216
-#ifndef CODE_ACCESS
217
-static inline int
218
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
219
- abi_ptr ptr,
220
- uintptr_t retaddr)
221
-{
222
- int ret;
223
- set_helper_retaddr(retaddr);
224
- ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
225
- clear_helper_retaddr();
226
- return ret;
227
-}
228
-#endif /* CODE_ACCESS */
229
-#endif /* DATA_SIZE <= 2 */
230
-
231
-#ifndef CODE_ACCESS
232
-static inline void
233
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
234
- RES_TYPE v)
235
-{
236
- MemOp op = MO_TE | SHIFT;
237
- uint16_t meminfo = trace_mem_get_info(op, MMU_USER_IDX, true);
238
- trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
239
- glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
240
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
241
-}
242
-
243
-static inline void
244
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
245
- abi_ptr ptr,
246
- RES_TYPE v,
247
- uintptr_t retaddr)
248
-{
249
- set_helper_retaddr(retaddr);
250
- glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
251
- clear_helper_retaddr();
252
-}
253
-#endif
254
-
255
-#undef RES_TYPE
256
-#undef DATA_TYPE
257
-#undef DATA_STYPE
258
-#undef SUFFIX
259
-#undef USUFFIX
260
-#undef DATA_SIZE
261
-#undef SHIFT
262
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
30
index XXXXXXX..XXXXXXX 100644
263
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/tcg/cputlb.c
264
--- a/accel/tcg/user-exec.c
32
+++ b/accel/tcg/cputlb.c
265
+++ b/accel/tcg/user-exec.c
33
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
266
@@ -XXX,XX +XXX,XX @@
34
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
267
#include "translate-all.h"
35
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
268
#include "exec/helper-proto.h"
36
269
#include "qemu/atomic128.h"
37
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
270
+#include "trace-root.h"
38
+{
271
+#include "trace/mem.h"
39
+ return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
272
40
+}
273
#undef EAX
41
+
274
#undef ECX
42
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
275
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
43
{
276
44
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
277
/* The softmmu versions of these helpers are in cputlb.c. */
278
279
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
280
+{
281
+ uint32_t ret;
282
+ uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
283
+
284
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
285
+ ret = ldub_p(g2h(ptr));
286
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
287
+ return ret;
288
+}
289
+
290
+int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
291
+{
292
+ int ret;
293
+ uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
294
+
295
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
296
+ ret = ldsb_p(g2h(ptr));
297
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
298
+ return ret;
299
+}
300
+
301
+uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr)
302
+{
303
+ uint32_t ret;
304
+ uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, false);
305
+
306
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
307
+ ret = lduw_p(g2h(ptr));
308
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
309
+ return ret;
310
+}
311
+
312
+int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr)
313
+{
314
+ int ret;
315
+ uint16_t meminfo = trace_mem_get_info(MO_TESW, MMU_USER_IDX, false);
316
+
317
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
318
+ ret = ldsw_p(g2h(ptr));
319
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
320
+ return ret;
321
+}
322
+
323
+uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr)
324
+{
325
+ uint32_t ret;
326
+ uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, false);
327
+
328
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
329
+ ret = ldl_p(g2h(ptr));
330
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
331
+ return ret;
332
+}
333
+
334
+uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr)
335
+{
336
+ uint64_t ret;
337
+ uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, false);
338
+
339
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
340
+ ret = ldq_p(g2h(ptr));
341
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
342
+ return ret;
343
+}
344
+
345
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
346
+{
347
+ uint32_t ret;
348
+
349
+ set_helper_retaddr(retaddr);
350
+ ret = cpu_ldub_data(env, ptr);
351
+ clear_helper_retaddr();
352
+ return ret;
353
+}
354
+
355
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
356
+{
357
+ int ret;
358
+
359
+ set_helper_retaddr(retaddr);
360
+ ret = cpu_ldsb_data(env, ptr);
361
+ clear_helper_retaddr();
362
+ return ret;
363
+}
364
+
365
+uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
366
+{
367
+ uint32_t ret;
368
+
369
+ set_helper_retaddr(retaddr);
370
+ ret = cpu_lduw_data(env, ptr);
371
+ clear_helper_retaddr();
372
+ return ret;
373
+}
374
+
375
+int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
376
+{
377
+ int ret;
378
+
379
+ set_helper_retaddr(retaddr);
380
+ ret = cpu_ldsw_data(env, ptr);
381
+ clear_helper_retaddr();
382
+ return ret;
383
+}
384
+
385
+uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
386
+{
387
+ uint32_t ret;
388
+
389
+ set_helper_retaddr(retaddr);
390
+ ret = cpu_ldl_data(env, ptr);
391
+ clear_helper_retaddr();
392
+ return ret;
393
+}
394
+
395
+uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
396
+{
397
+ uint64_t ret;
398
+
399
+ set_helper_retaddr(retaddr);
400
+ ret = cpu_ldq_data(env, ptr);
401
+ clear_helper_retaddr();
402
+ return ret;
403
+}
404
+
405
+void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
406
+{
407
+ uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
408
+
409
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
410
+ stb_p(g2h(ptr), val);
411
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
412
+}
413
+
414
+void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
415
+{
416
+ uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, true);
417
+
418
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
419
+ stw_p(g2h(ptr), val);
420
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
421
+}
422
+
423
+void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
424
+{
425
+ uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, true);
426
+
427
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
428
+ stl_p(g2h(ptr), val);
429
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
430
+}
431
+
432
+void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
433
+{
434
+ uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, true);
435
+
436
+ trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
437
+ stq_p(g2h(ptr), val);
438
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
439
+}
440
+
441
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
442
+ uint32_t val, uintptr_t retaddr)
443
+{
444
+ set_helper_retaddr(retaddr);
445
+ cpu_stb_data(env, ptr, val);
446
+ clear_helper_retaddr();
447
+}
448
+
449
+void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
450
+ uint32_t val, uintptr_t retaddr)
451
+{
452
+ set_helper_retaddr(retaddr);
453
+ cpu_stw_data(env, ptr, val);
454
+ clear_helper_retaddr();
455
+}
456
+
457
+void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
458
+ uint32_t val, uintptr_t retaddr)
459
+{
460
+ set_helper_retaddr(retaddr);
461
+ cpu_stl_data(env, ptr, val);
462
+ clear_helper_retaddr();
463
+}
464
+
465
+void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
466
+ uint64_t val, uintptr_t retaddr)
467
+{
468
+ set_helper_retaddr(retaddr);
469
+ cpu_stq_data(env, ptr, val);
470
+ clear_helper_retaddr();
471
+}
472
+
473
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
474
+{
475
+ uint32_t ret;
476
+
477
+ set_helper_retaddr(1);
478
+ ret = ldub_p(g2h(ptr));
479
+ clear_helper_retaddr();
480
+ return ret;
481
+}
482
+
483
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
484
+{
485
+ uint32_t ret;
486
+
487
+ set_helper_retaddr(1);
488
+ ret = lduw_p(g2h(ptr));
489
+ clear_helper_retaddr();
490
+ return ret;
491
+}
492
+
493
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
494
+{
495
+ uint32_t ret;
496
+
497
+ set_helper_retaddr(1);
498
+ ret = ldl_p(g2h(ptr));
499
+ clear_helper_retaddr();
500
+ return ret;
501
+}
502
+
503
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
504
+{
505
+ uint64_t ret;
506
+
507
+ set_helper_retaddr(1);
508
+ ret = ldq_p(g2h(ptr));
509
+ clear_helper_retaddr();
510
+ return ret;
511
+}
512
+
513
/* Do not allow unaligned operations to proceed. Return the host address. */
514
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
515
int size, uintptr_t retaddr)
45
--
516
--
46
2.20.1
517
2.20.1
47
518
48
519
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Cc: Chris Wulff <crwulff@gmail.com>
4
Cc: Marek Vasut <marex@denx.de>
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/nios2/cpu.h | 2 --
10
1 file changed, 2 deletions(-)
11
12
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/nios2/cpu.h
15
+++ b/target/nios2/cpu.h
16
@@ -XXX,XX +XXX,XX @@ void do_nios2_semihosting(CPUNios2State *env);
17
#define CPU_SAVE_VERSION 1
18
19
/* MMU modes definitions */
20
-#define MMU_MODE0_SUFFIX _kernel
21
-#define MMU_MODE1_SUFFIX _user
22
#define MMU_SUPERVISOR_IDX 0
23
#define MMU_USER_IDX 1
24
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/alpha/cpu.h | 2 --
8
1 file changed, 2 deletions(-)
9
10
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/alpha/cpu.h
13
+++ b/target/alpha/cpu.h
14
@@ -XXX,XX +XXX,XX @@ enum {
15
PALcode cheats and usees the KSEG mapping for its code+data rather than
16
physical addresses. */
17
18
-#define MMU_MODE0_SUFFIX _kernel
19
-#define MMU_MODE1_SUFFIX _user
20
#define MMU_KERNEL_IDX 0
21
#define MMU_USER_IDX 1
22
#define MMU_PHYS_IDX 2
23
--
24
2.20.1
25
26
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Cc: Edgar E. Iglesias <edgar.iglesias@gmail.com>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/cris/cpu.h | 2 --
9
1 file changed, 2 deletions(-)
10
11
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/cris/cpu.h
14
+++ b/target/cris/cpu.h
15
@@ -XXX,XX +XXX,XX @@ enum {
16
#define cpu_signal_handler cpu_cris_signal_handler
17
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _kernel
20
-#define MMU_MODE1_SUFFIX _user
21
#define MMU_USER_IDX 1
22
static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
23
{
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
1
The result of g_strsplit is never NULL.
1
The functions generated by these macros are unused.
2
2
3
Cc: Eduardo Habkost <ehabkost@redhat.com>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
vl.c | 2 +-
9
target/i386/cpu.h | 3 ---
10
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 3 deletions(-)
11
11
12
diff --git a/vl.c b/vl.c
12
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
14
--- a/target/i386/cpu.h
15
+++ b/vl.c
15
+++ b/target/i386/cpu.h
16
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
16
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_get_tsc(CPUX86State *env);
17
17
#define cpu_list x86_cpu_list
18
accel_list = g_strsplit(accel, ":", 0);
18
19
19
/* MMU modes definitions */
20
- for (tmp = accel_list; tmp && *tmp; tmp++) {
20
-#define MMU_MODE0_SUFFIX _ksmap
21
+ for (tmp = accel_list; *tmp; tmp++) {
21
-#define MMU_MODE1_SUFFIX _user
22
/*
22
-#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
23
* Filter invalid accelerators here, to prevent obscenities
23
#define MMU_KSMAP_IDX 0
24
* such as "-machine accel=tcg,,thread=single".
24
#define MMU_USER_IDX 1
25
#define MMU_KNOSMAP_IDX 2
25
--
26
--
26
2.20.1
27
2.20.1
27
28
28
29
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Cc: Edgar E. Iglesias <edgar.iglesias@gmail.com>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/microblaze/cpu.h | 3 ---
9
1 file changed, 3 deletions(-)
10
11
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/microblaze/cpu.h
14
+++ b/target/microblaze/cpu.h
15
@@ -XXX,XX +XXX,XX @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
16
#define cpu_signal_handler cpu_mb_signal_handler
17
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _nommu
20
-#define MMU_MODE1_SUFFIX _kernel
21
-#define MMU_MODE2_SUFFIX _user
22
#define MMU_NOMMU_IDX 0
23
#define MMU_KERNEL_IDX 1
24
#define MMU_USER_IDX 2
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Cc: Aurelien Jarno <aurelien@aurel32.net>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/sh4/cpu.h | 2 --
9
1 file changed, 2 deletions(-)
10
11
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sh4/cpu.h
14
+++ b/target/sh4/cpu.h
15
@@ -XXX,XX +XXX,XX @@ void cpu_load_tlb(CPUSH4State * env);
16
#define cpu_list sh4_cpu_list
17
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _kernel
20
-#define MMU_MODE1_SUFFIX _user
21
#define MMU_USER_IDX 1
22
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
23
{
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
4
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/unicore32/cpu.h | 2 --
9
1 file changed, 2 deletions(-)
10
11
diff --git a/target/unicore32/cpu.h b/target/unicore32/cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/unicore32/cpu.h
14
+++ b/target/unicore32/cpu.h
15
@@ -XXX,XX +XXX,XX @@ void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
16
int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
17
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _kernel
20
-#define MMU_MODE1_SUFFIX _user
21
#define MMU_USER_IDX 1
22
static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
23
{
24
--
25
2.20.1
26
27
diff view generated by jsdifflib
New patch
1
The functions generated by these macros are unused.
1
2
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/xtensa/cpu.h | 4 ----
9
1 file changed, 4 deletions(-)
10
11
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/xtensa/cpu.h
14
+++ b/target/xtensa/cpu.h
15
@@ -XXX,XX +XXX,XX @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
16
}
17
18
/* MMU modes definitions */
19
-#define MMU_MODE0_SUFFIX _ring0
20
-#define MMU_MODE1_SUFFIX _ring1
21
-#define MMU_MODE2_SUFFIX _ring2
22
-#define MMU_MODE3_SUFFIX _ring3
23
#define MMU_USER_IDX 3
24
25
static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
26
--
27
2.20.1
28
29
diff view generated by jsdifflib
1
Merge into the only caller, but at the same time split
1
The generated *_user functions are unused. The *_kernel functions
2
out tlb_mmu_init to initialize a single tlb entry.
2
have a couple of users in op_helper.c; use *_mmuidx_ra instead,
3
with MMU_KERNEL_IDX.
3
4
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
---
9
accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
10
v2: Use *_mmuidx_ra directly, without intermediate macros.
10
1 file changed, 16 insertions(+), 17 deletions(-)
11
---
12
target/m68k/cpu.h | 2 --
13
target/m68k/op_helper.c | 77 +++++++++++++++++++++++++----------------
14
2 files changed, 47 insertions(+), 32 deletions(-)
11
15
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
16
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
13
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
18
--- a/target/m68k/cpu.h
15
+++ b/accel/tcg/cputlb.c
19
+++ b/target/m68k/cpu.h
16
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
20
@@ -XXX,XX +XXX,XX @@ enum {
17
desc->window_max_entries = max_entries;
21
#define cpu_list m68k_cpu_list
22
23
/* MMU modes definitions */
24
-#define MMU_MODE0_SUFFIX _kernel
25
-#define MMU_MODE1_SUFFIX _user
26
#define MMU_KERNEL_IDX 0
27
#define MMU_USER_IDX 1
28
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
29
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/m68k/op_helper.c
32
+++ b/target/m68k/op_helper.c
33
@@ -XXX,XX +XXX,XX @@ static void cf_rte(CPUM68KState *env)
34
uint32_t fmt;
35
36
sp = env->aregs[7];
37
- fmt = cpu_ldl_kernel(env, sp);
38
- env->pc = cpu_ldl_kernel(env, sp + 4);
39
+ fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
40
+ env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
41
sp |= (fmt >> 28) & 3;
42
env->aregs[7] = sp + 8;
43
44
@@ -XXX,XX +XXX,XX @@ static void m68k_rte(CPUM68KState *env)
45
46
sp = env->aregs[7];
47
throwaway:
48
- sr = cpu_lduw_kernel(env, sp);
49
+ sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
50
sp += 2;
51
- env->pc = cpu_ldl_kernel(env, sp);
52
+ env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
53
sp += 4;
54
if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
55
/* all except 68000 */
56
- fmt = cpu_lduw_kernel(env, sp);
57
+ fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
58
sp += 2;
59
switch (fmt >> 12) {
60
case 0:
61
@@ -XXX,XX +XXX,XX @@ static void cf_interrupt_all(CPUM68KState *env, int is_hw)
62
/* ??? This could cause MMU faults. */
63
sp &= ~3;
64
sp -= 4;
65
- cpu_stl_kernel(env, sp, retaddr);
66
+ cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
67
sp -= 4;
68
- cpu_stl_kernel(env, sp, fmt);
69
+ cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
70
env->aregs[7] = sp;
71
/* Jump to vector. */
72
- env->pc = cpu_ldl_kernel(env, env->vbr + vector);
73
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
18
}
74
}
19
75
20
-static void tlb_dyn_init(CPUArchState *env)
76
static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
21
-{
77
@@ -XXX,XX +XXX,XX @@ static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
22
- int i;
78
switch (format) {
23
-
79
case 4:
24
- for (i = 0; i < NB_MMU_MODES; i++) {
80
*sp -= 4;
25
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
81
- cpu_stl_kernel(env, *sp, env->pc);
26
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
82
+ cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
27
-
83
*sp -= 4;
28
- tlb_window_reset(desc, get_clock_realtime(), 0);
84
- cpu_stl_kernel(env, *sp, addr);
29
- desc->n_used_entries = 0;
85
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
30
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
86
break;
31
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
87
case 3:
32
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
88
case 2:
33
- }
89
*sp -= 4;
34
-}
90
- cpu_stl_kernel(env, *sp, addr);
35
-
91
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
36
/**
92
break;
37
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
93
}
38
* @desc: The CPUTLBDesc portion of the TLB
94
*sp -= 2;
39
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
95
- cpu_stw_kernel(env, *sp, (format << 12) + (cs->exception_index << 2));
40
tlb_mmu_flush_locked(desc, fast);
96
+ cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
97
+ MMU_KERNEL_IDX, 0);
98
}
99
*sp -= 4;
100
- cpu_stl_kernel(env, *sp, retaddr);
101
+ cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
102
*sp -= 2;
103
- cpu_stw_kernel(env, *sp, sr);
104
+ cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
41
}
105
}
42
106
43
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
107
static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
44
+{
108
@@ -XXX,XX +XXX,XX @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
45
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
109
cpu_abort(cs, "DOUBLE MMU FAULT\n");
110
}
111
env->mmu.fault = true;
112
+ /* push data 3 */
113
sp -= 4;
114
- cpu_stl_kernel(env, sp, 0); /* push data 3 */
115
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
116
+ /* push data 2 */
117
sp -= 4;
118
- cpu_stl_kernel(env, sp, 0); /* push data 2 */
119
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
120
+ /* push data 1 */
121
sp -= 4;
122
- cpu_stl_kernel(env, sp, 0); /* push data 1 */
123
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
124
+ /* write back 1 / push data 0 */
125
sp -= 4;
126
- cpu_stl_kernel(env, sp, 0); /* write back 1 / push data 0 */
127
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
128
+ /* write back 1 address */
129
sp -= 4;
130
- cpu_stl_kernel(env, sp, 0); /* write back 1 address */
131
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
132
+ /* write back 2 data */
133
sp -= 4;
134
- cpu_stl_kernel(env, sp, 0); /* write back 2 data */
135
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
136
+ /* write back 2 address */
137
sp -= 4;
138
- cpu_stl_kernel(env, sp, 0); /* write back 2 address */
139
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
140
+ /* write back 3 data */
141
sp -= 4;
142
- cpu_stl_kernel(env, sp, 0); /* write back 3 data */
143
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
144
+ /* write back 3 address */
145
sp -= 4;
146
- cpu_stl_kernel(env, sp, env->mmu.ar); /* write back 3 address */
147
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
148
+ /* fault address */
149
sp -= 4;
150
- cpu_stl_kernel(env, sp, env->mmu.ar); /* fault address */
151
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
152
+ /* write back 1 status */
153
sp -= 2;
154
- cpu_stw_kernel(env, sp, 0); /* write back 1 status */
155
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
156
+ /* write back 2 status */
157
sp -= 2;
158
- cpu_stw_kernel(env, sp, 0); /* write back 2 status */
159
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
160
+ /* write back 3 status */
161
sp -= 2;
162
- cpu_stw_kernel(env, sp, 0); /* write back 3 status */
163
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
164
+ /* special status word */
165
sp -= 2;
166
- cpu_stw_kernel(env, sp, env->mmu.ssw); /* special status word */
167
+ cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
168
+ /* effective address */
169
sp -= 4;
170
- cpu_stl_kernel(env, sp, env->mmu.ar); /* effective address */
171
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
46
+
172
+
47
+ tlb_window_reset(desc, now, 0);
173
do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
48
+ desc->n_used_entries = 0;
174
env->mmu.fault = false;
49
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
175
if (qemu_loglevel_mask(CPU_LOG_INT)) {
50
+ fast->table = g_new(CPUTLBEntry, n_entries);
176
@@ -XXX,XX +XXX,XX @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
51
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
177
52
+}
178
env->aregs[7] = sp;
53
+
179
/* Jump to vector. */
54
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
180
- env->pc = cpu_ldl_kernel(env, env->vbr + vector);
55
{
181
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
56
env_tlb(env)->d[mmu_idx].n_used_entries++;
57
@@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
58
void tlb_init(CPUState *cpu)
59
{
60
CPUArchState *env = cpu->env_ptr;
61
+ int64_t now = get_clock_realtime();
62
+ int i;
63
64
qemu_spin_init(&env_tlb(env)->c.lock);
65
66
/* Ensure that cpu_reset performs a full flush. */
67
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
68
69
- tlb_dyn_init(env);
70
+ for (i = 0; i < NB_MMU_MODES; i++) {
71
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
72
+ }
73
}
182
}
74
183
75
/* flush_all_helper: run fn across all cpus
184
static void do_interrupt_all(CPUM68KState *env, int is_hw)
76
--
185
--
77
2.20.1
186
2.20.1
78
187
79
188
diff view generated by jsdifflib
New patch
1
The separate suffixed functions were used to construct
2
some do_##insn function switched on mmu_idx. The interface
3
is exactly identical to the *_mmuidx_ra functions. Replace
4
them directly and remove the constructions.
1
5
6
Cc: Aurelien Jarno <aurelien@aurel32.net>
7
Cc: Aleksandar Rikalo <aleksandar.rikalo@rt-rk.com>
8
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/mips/cpu.h | 4 -
14
target/mips/op_helper.c | 182 +++++++++++++---------------------------
15
2 files changed, 60 insertions(+), 126 deletions(-)
16
17
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/mips/cpu.h
20
+++ b/target/mips/cpu.h
21
@@ -XXX,XX +XXX,XX @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
22
* MMU modes definitions. We carefully match the indices with our
23
* hflags layout.
24
*/
25
-#define MMU_MODE0_SUFFIX _kernel
26
-#define MMU_MODE1_SUFFIX _super
27
-#define MMU_MODE2_SUFFIX _user
28
-#define MMU_MODE3_SUFFIX _error
29
#define MMU_USER_IDX 2
30
31
static inline int hflags_mmu_index(uint32_t hflags)
32
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/mips/op_helper.c
35
+++ b/target/mips/op_helper.c
36
@@ -XXX,XX +XXX,XX @@ static void raise_exception(CPUMIPSState *env, uint32_t exception)
37
do_raise_exception(env, exception, 0);
38
}
39
40
-#if defined(CONFIG_USER_ONLY)
41
-#define HELPER_LD(name, insn, type) \
42
-static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
43
- int mem_idx, uintptr_t retaddr) \
44
-{ \
45
- return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
46
-}
47
-#else
48
-#define HELPER_LD(name, insn, type) \
49
-static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
50
- int mem_idx, uintptr_t retaddr) \
51
-{ \
52
- switch (mem_idx) { \
53
- case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
54
- case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
55
- default: \
56
- case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
57
- case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
58
- } \
59
-}
60
-#endif
61
-HELPER_LD(lw, ldl, int32_t)
62
-#if defined(TARGET_MIPS64)
63
-HELPER_LD(ld, ldq, int64_t)
64
-#endif
65
-#undef HELPER_LD
66
-
67
-#if defined(CONFIG_USER_ONLY)
68
-#define HELPER_ST(name, insn, type) \
69
-static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
70
- type val, int mem_idx, uintptr_t retaddr) \
71
-{ \
72
- cpu_##insn##_data_ra(env, addr, val, retaddr); \
73
-}
74
-#else
75
-#define HELPER_ST(name, insn, type) \
76
-static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
77
- type val, int mem_idx, uintptr_t retaddr) \
78
-{ \
79
- switch (mem_idx) { \
80
- case 0: \
81
- cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
82
- break; \
83
- case 1: \
84
- cpu_##insn##_super_ra(env, addr, val, retaddr); \
85
- break; \
86
- default: \
87
- case 2: \
88
- cpu_##insn##_user_ra(env, addr, val, retaddr); \
89
- break; \
90
- case 3: \
91
- cpu_##insn##_error_ra(env, addr, val, retaddr); \
92
- break; \
93
- } \
94
-}
95
-#endif
96
-HELPER_ST(sb, stb, uint8_t)
97
-HELPER_ST(sw, stl, uint32_t)
98
-#if defined(TARGET_MIPS64)
99
-HELPER_ST(sd, stq, uint64_t)
100
-#endif
101
-#undef HELPER_ST
102
-
103
/* 64 bits arithmetic for 32 bits hosts */
104
static inline uint64_t get_HILO(CPUMIPSState *env)
105
{
106
@@ -XXX,XX +XXX,XX @@ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
107
} \
108
env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \
109
env->lladdr = arg; \
110
- env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
111
+ env->llval = cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \
112
return env->llval; \
113
}
114
-HELPER_LD_ATOMIC(ll, lw, 0x3)
115
+HELPER_LD_ATOMIC(ll, ldl, 0x3)
116
#ifdef TARGET_MIPS64
117
-HELPER_LD_ATOMIC(lld, ld, 0x7)
118
+HELPER_LD_ATOMIC(lld, ldq, 0x7)
119
#endif
120
#undef HELPER_LD_ATOMIC
121
#endif
122
@@ -XXX,XX +XXX,XX @@ HELPER_LD_ATOMIC(lld, ld, 0x7)
123
void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
124
int mem_idx)
125
{
126
- do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
127
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
128
129
if (GET_LMASK(arg2) <= 2) {
130
- do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
131
- GETPC());
132
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
133
+ mem_idx, GETPC());
134
}
135
136
if (GET_LMASK(arg2) <= 1) {
137
- do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
138
- GETPC());
139
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
140
+ mem_idx, GETPC());
141
}
142
143
if (GET_LMASK(arg2) == 0) {
144
- do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
145
- GETPC());
146
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
147
+ mem_idx, GETPC());
148
}
149
}
150
151
void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
152
int mem_idx)
153
{
154
- do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
155
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
156
157
if (GET_LMASK(arg2) >= 1) {
158
- do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
159
- GETPC());
160
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
161
+ mem_idx, GETPC());
162
}
163
164
if (GET_LMASK(arg2) >= 2) {
165
- do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
166
- GETPC());
167
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
168
+ mem_idx, GETPC());
169
}
170
171
if (GET_LMASK(arg2) == 3) {
172
- do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
173
- GETPC());
174
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
175
+ mem_idx, GETPC());
176
}
177
}
178
179
@@ -XXX,XX +XXX,XX @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
180
void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
181
int mem_idx)
182
{
183
- do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
184
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
185
186
if (GET_LMASK64(arg2) <= 6) {
187
- do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
188
- GETPC());
189
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
190
+ mem_idx, GETPC());
191
}
192
193
if (GET_LMASK64(arg2) <= 5) {
194
- do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
195
- GETPC());
196
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
197
+ mem_idx, GETPC());
198
}
199
200
if (GET_LMASK64(arg2) <= 4) {
201
- do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
202
- GETPC());
203
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
204
+ mem_idx, GETPC());
205
}
206
207
if (GET_LMASK64(arg2) <= 3) {
208
- do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
209
- GETPC());
210
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
211
+ mem_idx, GETPC());
212
}
213
214
if (GET_LMASK64(arg2) <= 2) {
215
- do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
216
- GETPC());
217
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
218
+ mem_idx, GETPC());
219
}
220
221
if (GET_LMASK64(arg2) <= 1) {
222
- do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
223
- GETPC());
224
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
225
+ mem_idx, GETPC());
226
}
227
228
if (GET_LMASK64(arg2) <= 0) {
229
- do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
230
- GETPC());
231
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
232
+ mem_idx, GETPC());
233
}
234
}
235
236
void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
237
int mem_idx)
238
{
239
- do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
240
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
241
242
if (GET_LMASK64(arg2) >= 1) {
243
- do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
244
- GETPC());
245
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
246
+ mem_idx, GETPC());
247
}
248
249
if (GET_LMASK64(arg2) >= 2) {
250
- do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
251
- GETPC());
252
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
253
+ mem_idx, GETPC());
254
}
255
256
if (GET_LMASK64(arg2) >= 3) {
257
- do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
258
- GETPC());
259
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
260
+ mem_idx, GETPC());
261
}
262
263
if (GET_LMASK64(arg2) >= 4) {
264
- do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
265
- GETPC());
266
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
267
+ mem_idx, GETPC());
268
}
269
270
if (GET_LMASK64(arg2) >= 5) {
271
- do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
272
- GETPC());
273
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
274
+ mem_idx, GETPC());
275
}
276
277
if (GET_LMASK64(arg2) >= 6) {
278
- do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
279
- GETPC());
280
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
281
+ mem_idx, GETPC());
282
}
283
284
if (GET_LMASK64(arg2) == 7) {
285
- do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
286
- GETPC());
287
+ cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
288
+ mem_idx, GETPC());
289
}
290
}
291
#endif /* TARGET_MIPS64 */
292
@@ -XXX,XX +XXX,XX @@ void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
293
294
for (i = 0; i < base_reglist; i++) {
295
env->active_tc.gpr[multiple_regs[i]] =
296
- (target_long)do_lw(env, addr, mem_idx, GETPC());
297
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
298
addr += 4;
299
}
300
}
301
302
if (do_r31) {
303
- env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
304
- GETPC());
305
+ env->active_tc.gpr[31] =
306
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
307
}
308
}
309
310
@@ -XXX,XX +XXX,XX @@ void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
311
target_ulong i;
312
313
for (i = 0; i < base_reglist; i++) {
314
- do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
315
- GETPC());
316
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
317
+ mem_idx, GETPC());
318
addr += 4;
319
}
320
}
321
322
if (do_r31) {
323
- do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
324
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
325
}
326
}
327
328
@@ -XXX,XX +XXX,XX @@ void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
329
target_ulong i;
330
331
for (i = 0; i < base_reglist; i++) {
332
- env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
333
- GETPC());
334
+ env->active_tc.gpr[multiple_regs[i]] =
335
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
336
addr += 8;
337
}
338
}
339
340
if (do_r31) {
341
- env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
342
+ env->active_tc.gpr[31] =
343
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
344
}
345
}
346
347
@@ -XXX,XX +XXX,XX @@ void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
348
target_ulong i;
349
350
for (i = 0; i < base_reglist; i++) {
351
- do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
352
- GETPC());
353
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
354
+ mem_idx, GETPC());
355
addr += 8;
356
}
357
}
358
359
if (do_r31) {
360
- do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
361
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
362
}
363
}
364
#endif
365
--
366
2.20.1
367
368
diff view generated by jsdifflib
1
By choosing "tcg:kvm" when kvm is not enabled, we generate
1
The generated functions aside from *_real are unused.
2
an incorrect warning: "invalid accelerator kvm".
2
The *_real functions have a couple of users in mem_helper.c;
3
use *_mmuidx_ra instead, with MMU_REAL_IDX.
3
4
4
At the same time, use g_str_has_suffix rather than open-coding
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
the same operation.
6
Reviewed-by: David Hildenbrand <david@redhat.com>
6
7
Presumably the inverse is also true with --disable-tcg.
8
9
Fixes: 28a0961757fc
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
9
---
15
vl.c | 21 +++++++++++++--------
10
v2: Use *_mmuidx_ra directly, without intermediate macros.
16
1 file changed, 13 insertions(+), 8 deletions(-)
11
---
12
target/s390x/cpu.h | 5 -----
13
target/s390x/mem_helper.c | 10 +++++-----
14
2 files changed, 5 insertions(+), 10 deletions(-)
17
15
18
diff --git a/vl.c b/vl.c
16
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
18
--- a/target/s390x/cpu.h
21
+++ b/vl.c
19
+++ b/target/s390x/cpu.h
22
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
20
@@ -XXX,XX +XXX,XX @@
23
21
24
if (accel == NULL) {
22
#define TARGET_INSN_START_EXTRA_WORDS 2
25
/* Select the default accelerator */
23
26
- if (!accel_find("tcg") && !accel_find("kvm")) {
24
-#define MMU_MODE0_SUFFIX _primary
27
- error_report("No accelerator selected and"
25
-#define MMU_MODE1_SUFFIX _secondary
28
- " no default accelerator available");
26
-#define MMU_MODE2_SUFFIX _home
29
- exit(1);
27
-#define MMU_MODE3_SUFFIX _real
30
- } else {
28
-
31
- int pnlen = strlen(progname);
29
#define MMU_USER_IDX 0
32
- if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
30
33
+ bool have_tcg = accel_find("tcg");
31
#define S390_MAX_CPUS 248
34
+ bool have_kvm = accel_find("kvm");
32
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
35
+
33
index XXXXXXX..XXXXXXX 100644
36
+ if (have_tcg && have_kvm) {
34
--- a/target/s390x/mem_helper.c
37
+ if (g_str_has_suffix(progname, "kvm")) {
35
+++ b/target/s390x/mem_helper.c
38
/* If the program name ends with "kvm", we prefer KVM */
36
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
39
accel = "kvm:tcg";
37
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
40
} else {
38
41
accel = "tcg:kvm";
39
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
42
}
40
- cpu_stq_real_ra(env, real_addr + i, 0, ra);
43
+ } else if (have_kvm) {
41
+ cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
44
+ accel = "kvm";
42
}
45
+ } else if (have_tcg) {
43
46
+ accel = "tcg";
44
return 0;
47
+ } else {
45
@@ -XXX,XX +XXX,XX @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
48
+ error_report("No accelerator selected and"
46
for (i = 0; i < entries; i++) {
49
+ " no default accelerator available");
47
/* addresses are not wrapped in 24/31bit mode but table index is */
50
+ exit(1);
48
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
49
- entry = cpu_ldq_real_ra(env, raddr, ra);
50
+ entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
51
if (!(entry & REGION_ENTRY_I)) {
52
/* we are allowed to not store if already invalid */
53
entry |= REGION_ENTRY_I;
54
- cpu_stq_real_ra(env, raddr, entry, ra);
55
+ cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
51
}
56
}
52
}
57
}
53
-
58
}
54
accel_list = g_strsplit(accel, ":", 0);
59
@@ -XXX,XX +XXX,XX @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
55
60
pte_addr += VADDR_PAGE_TX(vaddr) * 8;
56
for (tmp = accel_list; *tmp; tmp++) {
61
62
/* Mark the page table entry as invalid */
63
- pte = cpu_ldq_real_ra(env, pte_addr, ra);
64
+ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
65
pte |= PAGE_ENTRY_I;
66
- cpu_stq_real_ra(env, pte_addr, pte, ra);
67
+ cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
68
69
/* XXX we exploit the fact that Linux passes the exact virtual
70
address here - it's not obliged to! */
57
--
71
--
58
2.20.1
72
2.20.1
59
73
60
74
diff view generated by jsdifflib
1
We will want to be able to flush a tlb without resizing.
1
There are only two uses. Within dcbz_common, the local variable
2
mmu_idx already contains the epid computation, and we can avoid
3
repeating it for the store. Within helper_icbiep, the usage is
4
trivially expanded using PPC_TLB_EPID_LOAD.
2
5
6
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: David Gibson <david@gibson.dropbear.id.au>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
accel/tcg/cputlb.c | 15 ++++++++++-----
11
target/ppc/cpu.h | 2 --
9
1 file changed, 10 insertions(+), 5 deletions(-)
12
target/ppc/mem_helper.c | 11 ++---------
13
2 files changed, 2 insertions(+), 11 deletions(-)
10
14
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
17
--- a/target/ppc/cpu.h
14
+++ b/accel/tcg/cputlb.c
18
+++ b/target/ppc/cpu.h
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
19
@@ -XXX,XX +XXX,XX @@ struct ppc_radix_page_info {
20
* + real/paged mode combinations. The other two modes are for
21
* external PID load/store.
22
*/
23
-#define MMU_MODE8_SUFFIX _epl
24
-#define MMU_MODE9_SUFFIX _eps
25
#define PPC_TLB_EPID_LOAD 8
26
#define PPC_TLB_EPID_STORE 9
27
28
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/ppc/mem_helper.c
31
+++ b/target/ppc/mem_helper.c
32
@@ -XXX,XX +XXX,XX @@ static void dcbz_common(CPUPPCState *env, target_ulong addr,
33
} else {
34
/* Slow path */
35
for (i = 0; i < dcbz_size; i += 8) {
36
- if (epid) {
37
-#if !defined(CONFIG_USER_ONLY)
38
- /* Does not make sense on USER_ONLY config */
39
- cpu_stq_eps_ra(env, addr + i, 0, retaddr);
40
-#endif
41
- } else {
42
- cpu_stq_data_ra(env, addr + i, 0, retaddr);
43
- }
44
+ cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
45
}
16
}
46
}
17
}
47
}
18
48
@@ -XXX,XX +XXX,XX @@ void helper_icbiep(CPUPPCState *env, target_ulong addr)
19
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
49
#if !defined(CONFIG_USER_ONLY)
20
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
50
/* See comments above */
21
{
51
addr &= ~(env->dcache_line_size - 1);
22
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
52
- cpu_ldl_epl_ra(env, addr, GETPC());
23
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
53
+ cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
24
-
54
#endif
25
- tlb_mmu_resize_locked(desc, fast);
26
desc->n_used_entries = 0;
27
desc->large_page_addr = -1;
28
desc->large_page_mask = -1;
29
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
30
memset(desc->vtable, -1, sizeof(desc->vtable));
31
}
55
}
32
56
33
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
34
+{
35
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
36
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
37
+
38
+ tlb_mmu_resize_locked(desc, fast);
39
+ tlb_mmu_flush_locked(desc, fast);
40
+}
41
+
42
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
43
{
44
env_tlb(env)->d[mmu_idx].n_used_entries++;
45
--
57
--
46
2.20.1
58
2.20.1
47
59
48
60
diff view generated by jsdifflib
New patch
1
1
All users have now been converted to cpu_*_mmuidx_ra.
2
3
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/cpu_ldst.h | 230 ----------------------------------------
8
1 file changed, 230 deletions(-)
9
10
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/cpu_ldst.h
13
+++ b/include/exec/cpu_ldst.h
14
@@ -XXX,XX +XXX,XX @@ void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
15
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
16
int mmu_idx, uintptr_t retaddr);
17
18
-#ifdef MMU_MODE0_SUFFIX
19
-#define CPU_MMU_INDEX 0
20
-#define MEMSUFFIX MMU_MODE0_SUFFIX
21
-#define DATA_SIZE 1
22
-#include "exec/cpu_ldst_template.h"
23
-
24
-#define DATA_SIZE 2
25
-#include "exec/cpu_ldst_template.h"
26
-
27
-#define DATA_SIZE 4
28
-#include "exec/cpu_ldst_template.h"
29
-
30
-#define DATA_SIZE 8
31
-#include "exec/cpu_ldst_template.h"
32
-#undef CPU_MMU_INDEX
33
-#undef MEMSUFFIX
34
-#endif
35
-
36
-#if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
37
-#define CPU_MMU_INDEX 1
38
-#define MEMSUFFIX MMU_MODE1_SUFFIX
39
-#define DATA_SIZE 1
40
-#include "exec/cpu_ldst_template.h"
41
-
42
-#define DATA_SIZE 2
43
-#include "exec/cpu_ldst_template.h"
44
-
45
-#define DATA_SIZE 4
46
-#include "exec/cpu_ldst_template.h"
47
-
48
-#define DATA_SIZE 8
49
-#include "exec/cpu_ldst_template.h"
50
-#undef CPU_MMU_INDEX
51
-#undef MEMSUFFIX
52
-#endif
53
-
54
-#if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
55
-
56
-#define CPU_MMU_INDEX 2
57
-#define MEMSUFFIX MMU_MODE2_SUFFIX
58
-#define DATA_SIZE 1
59
-#include "exec/cpu_ldst_template.h"
60
-
61
-#define DATA_SIZE 2
62
-#include "exec/cpu_ldst_template.h"
63
-
64
-#define DATA_SIZE 4
65
-#include "exec/cpu_ldst_template.h"
66
-
67
-#define DATA_SIZE 8
68
-#include "exec/cpu_ldst_template.h"
69
-#undef CPU_MMU_INDEX
70
-#undef MEMSUFFIX
71
-#endif /* (NB_MMU_MODES >= 3) */
72
-
73
-#if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
74
-
75
-#define CPU_MMU_INDEX 3
76
-#define MEMSUFFIX MMU_MODE3_SUFFIX
77
-#define DATA_SIZE 1
78
-#include "exec/cpu_ldst_template.h"
79
-
80
-#define DATA_SIZE 2
81
-#include "exec/cpu_ldst_template.h"
82
-
83
-#define DATA_SIZE 4
84
-#include "exec/cpu_ldst_template.h"
85
-
86
-#define DATA_SIZE 8
87
-#include "exec/cpu_ldst_template.h"
88
-#undef CPU_MMU_INDEX
89
-#undef MEMSUFFIX
90
-#endif /* (NB_MMU_MODES >= 4) */
91
-
92
-#if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
93
-
94
-#define CPU_MMU_INDEX 4
95
-#define MEMSUFFIX MMU_MODE4_SUFFIX
96
-#define DATA_SIZE 1
97
-#include "exec/cpu_ldst_template.h"
98
-
99
-#define DATA_SIZE 2
100
-#include "exec/cpu_ldst_template.h"
101
-
102
-#define DATA_SIZE 4
103
-#include "exec/cpu_ldst_template.h"
104
-
105
-#define DATA_SIZE 8
106
-#include "exec/cpu_ldst_template.h"
107
-#undef CPU_MMU_INDEX
108
-#undef MEMSUFFIX
109
-#endif /* (NB_MMU_MODES >= 5) */
110
-
111
-#if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
112
-
113
-#define CPU_MMU_INDEX 5
114
-#define MEMSUFFIX MMU_MODE5_SUFFIX
115
-#define DATA_SIZE 1
116
-#include "exec/cpu_ldst_template.h"
117
-
118
-#define DATA_SIZE 2
119
-#include "exec/cpu_ldst_template.h"
120
-
121
-#define DATA_SIZE 4
122
-#include "exec/cpu_ldst_template.h"
123
-
124
-#define DATA_SIZE 8
125
-#include "exec/cpu_ldst_template.h"
126
-#undef CPU_MMU_INDEX
127
-#undef MEMSUFFIX
128
-#endif /* (NB_MMU_MODES >= 6) */
129
-
130
-#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
131
-
132
-#define CPU_MMU_INDEX 6
133
-#define MEMSUFFIX MMU_MODE6_SUFFIX
134
-#define DATA_SIZE 1
135
-#include "exec/cpu_ldst_template.h"
136
-
137
-#define DATA_SIZE 2
138
-#include "exec/cpu_ldst_template.h"
139
-
140
-#define DATA_SIZE 4
141
-#include "exec/cpu_ldst_template.h"
142
-
143
-#define DATA_SIZE 8
144
-#include "exec/cpu_ldst_template.h"
145
-#undef CPU_MMU_INDEX
146
-#undef MEMSUFFIX
147
-#endif /* (NB_MMU_MODES >= 7) */
148
-
149
-#if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
150
-
151
-#define CPU_MMU_INDEX 7
152
-#define MEMSUFFIX MMU_MODE7_SUFFIX
153
-#define DATA_SIZE 1
154
-#include "exec/cpu_ldst_template.h"
155
-
156
-#define DATA_SIZE 2
157
-#include "exec/cpu_ldst_template.h"
158
-
159
-#define DATA_SIZE 4
160
-#include "exec/cpu_ldst_template.h"
161
-
162
-#define DATA_SIZE 8
163
-#include "exec/cpu_ldst_template.h"
164
-#undef CPU_MMU_INDEX
165
-#undef MEMSUFFIX
166
-#endif /* (NB_MMU_MODES >= 8) */
167
-
168
-#if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
169
-
170
-#define CPU_MMU_INDEX 8
171
-#define MEMSUFFIX MMU_MODE8_SUFFIX
172
-#define DATA_SIZE 1
173
-#include "exec/cpu_ldst_template.h"
174
-
175
-#define DATA_SIZE 2
176
-#include "exec/cpu_ldst_template.h"
177
-
178
-#define DATA_SIZE 4
179
-#include "exec/cpu_ldst_template.h"
180
-
181
-#define DATA_SIZE 8
182
-#include "exec/cpu_ldst_template.h"
183
-#undef CPU_MMU_INDEX
184
-#undef MEMSUFFIX
185
-#endif /* (NB_MMU_MODES >= 9) */
186
-
187
-#if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
188
-
189
-#define CPU_MMU_INDEX 9
190
-#define MEMSUFFIX MMU_MODE9_SUFFIX
191
-#define DATA_SIZE 1
192
-#include "exec/cpu_ldst_template.h"
193
-
194
-#define DATA_SIZE 2
195
-#include "exec/cpu_ldst_template.h"
196
-
197
-#define DATA_SIZE 4
198
-#include "exec/cpu_ldst_template.h"
199
-
200
-#define DATA_SIZE 8
201
-#include "exec/cpu_ldst_template.h"
202
-#undef CPU_MMU_INDEX
203
-#undef MEMSUFFIX
204
-#endif /* (NB_MMU_MODES >= 10) */
205
-
206
-#if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
207
-
208
-#define CPU_MMU_INDEX 10
209
-#define MEMSUFFIX MMU_MODE10_SUFFIX
210
-#define DATA_SIZE 1
211
-#include "exec/cpu_ldst_template.h"
212
-
213
-#define DATA_SIZE 2
214
-#include "exec/cpu_ldst_template.h"
215
-
216
-#define DATA_SIZE 4
217
-#include "exec/cpu_ldst_template.h"
218
-
219
-#define DATA_SIZE 8
220
-#include "exec/cpu_ldst_template.h"
221
-#undef CPU_MMU_INDEX
222
-#undef MEMSUFFIX
223
-#endif /* (NB_MMU_MODES >= 11) */
224
-
225
-#if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
226
-
227
-#define CPU_MMU_INDEX 11
228
-#define MEMSUFFIX MMU_MODE11_SUFFIX
229
-#define DATA_SIZE 1
230
-#include "exec/cpu_ldst_template.h"
231
-
232
-#define DATA_SIZE 2
233
-#include "exec/cpu_ldst_template.h"
234
-
235
-#define DATA_SIZE 4
236
-#include "exec/cpu_ldst_template.h"
237
-
238
-#define DATA_SIZE 8
239
-#include "exec/cpu_ldst_template.h"
240
-#undef CPU_MMU_INDEX
241
-#undef MEMSUFFIX
242
-#endif /* (NB_MMU_MODES >= 12) */
243
-
244
-#if (NB_MMU_MODES > 12)
245
-#error "NB_MMU_MODES > 12 is not supported for now"
246
-#endif /* (NB_MMU_MODES > 12) */
247
-
248
/* these access are slower, they must be as rare as possible */
249
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
250
#define MEMSUFFIX _data
251
--
252
2.20.1
253
254
diff view generated by jsdifflib
1
We do not need the entire CPUArchState to compute these values.
1
Reduce the amount of preprocessor obfuscation by expanding
2
the text of each of the functions generated. The result is
3
only slightly smaller than the original.
2
4
5
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Aleksandar Markovic <amarkovic@wavecomp.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
9
---
8
accel/tcg/cputlb.c | 15 ++++++++-------
10
include/exec/cpu_ldst.h | 67 +++++++-----------
9
1 file changed, 8 insertions(+), 7 deletions(-)
11
include/exec/cpu_ldst_template.h | 117 -------------------------------
12
accel/tcg/cputlb.c | 107 +++++++++++++++++++++++++++-
13
3 files changed, 130 insertions(+), 161 deletions(-)
14
delete mode 100644 include/exec/cpu_ldst_template.h
10
15
16
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu_ldst.h
19
+++ b/include/exec/cpu_ldst.h
20
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
21
#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
22
#endif
23
24
-#if defined(CONFIG_USER_ONLY)
25
-
26
-extern __thread uintptr_t helper_retaddr;
27
-
28
-static inline void set_helper_retaddr(uintptr_t ra)
29
-{
30
- helper_retaddr = ra;
31
- /*
32
- * Ensure that this write is visible to the SIGSEGV handler that
33
- * may be invoked due to a subsequent invalid memory operation.
34
- */
35
- signal_barrier();
36
-}
37
-
38
-static inline void clear_helper_retaddr(void)
39
-{
40
- /*
41
- * Ensure that previous memory operations have succeeded before
42
- * removing the data visible to the signal handler.
43
- */
44
- signal_barrier();
45
- helper_retaddr = 0;
46
-}
47
-
48
-/* In user-only mode we provide only the _code and _data accessors. */
49
-
50
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
51
uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
52
uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
53
@@ -XXX,XX +XXX,XX @@ void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
54
void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
55
uint64_t val, uintptr_t retaddr);
56
57
+#if defined(CONFIG_USER_ONLY)
58
+
59
+extern __thread uintptr_t helper_retaddr;
60
+
61
+static inline void set_helper_retaddr(uintptr_t ra)
62
+{
63
+ helper_retaddr = ra;
64
+ /*
65
+ * Ensure that this write is visible to the SIGSEGV handler that
66
+ * may be invoked due to a subsequent invalid memory operation.
67
+ */
68
+ signal_barrier();
69
+}
70
+
71
+static inline void clear_helper_retaddr(void)
72
+{
73
+ /*
74
+ * Ensure that previous memory operations have succeeded before
75
+ * removing the data visible to the signal handler.
76
+ */
77
+ signal_barrier();
78
+ helper_retaddr = 0;
79
+}
80
+
81
/*
82
* Provide the same *_mmuidx_ra interface as for softmmu.
83
* The mmu_idx argument is ignored.
84
@@ -XXX,XX +XXX,XX @@ void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
85
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
86
int mmu_idx, uintptr_t retaddr);
87
88
-/* these access are slower, they must be as rare as possible */
89
-#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
90
-#define MEMSUFFIX _data
91
-#define DATA_SIZE 1
92
-#include "exec/cpu_ldst_template.h"
93
-
94
-#define DATA_SIZE 2
95
-#include "exec/cpu_ldst_template.h"
96
-
97
-#define DATA_SIZE 4
98
-#include "exec/cpu_ldst_template.h"
99
-
100
-#define DATA_SIZE 8
101
-#include "exec/cpu_ldst_template.h"
102
-#undef CPU_MMU_INDEX
103
-#undef MEMSUFFIX
104
-
105
#endif /* defined(CONFIG_USER_ONLY) */
106
107
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
108
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
109
deleted file mode 100644
110
index XXXXXXX..XXXXXXX
111
--- a/include/exec/cpu_ldst_template.h
112
+++ /dev/null
113
@@ -XXX,XX +XXX,XX @@
114
-/*
115
- * Software MMU support
116
- *
117
- * Generate inline load/store functions for one MMU mode and data
118
- * size.
119
- *
120
- * Generate a store function as well as signed and unsigned loads.
121
- *
122
- * Not used directly but included from cpu_ldst.h.
123
- *
124
- * Copyright (c) 2003 Fabrice Bellard
125
- *
126
- * This library is free software; you can redistribute it and/or
127
- * modify it under the terms of the GNU Lesser General Public
128
- * License as published by the Free Software Foundation; either
129
- * version 2 of the License, or (at your option) any later version.
130
- *
131
- * This library is distributed in the hope that it will be useful,
132
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
133
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
134
- * Lesser General Public License for more details.
135
- *
136
- * You should have received a copy of the GNU Lesser General Public
137
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
138
- */
139
-
140
-#if DATA_SIZE == 8
141
-#define SUFFIX q
142
-#define USUFFIX q
143
-#define DATA_TYPE uint64_t
144
-#define SHIFT 3
145
-#elif DATA_SIZE == 4
146
-#define SUFFIX l
147
-#define USUFFIX l
148
-#define DATA_TYPE uint32_t
149
-#define SHIFT 2
150
-#elif DATA_SIZE == 2
151
-#define SUFFIX w
152
-#define USUFFIX uw
153
-#define DATA_TYPE uint16_t
154
-#define DATA_STYPE int16_t
155
-#define SHIFT 1
156
-#elif DATA_SIZE == 1
157
-#define SUFFIX b
158
-#define USUFFIX ub
159
-#define DATA_TYPE uint8_t
160
-#define DATA_STYPE int8_t
161
-#define SHIFT 0
162
-#else
163
-#error unsupported data size
164
-#endif
165
-
166
-#if DATA_SIZE == 8
167
-#define RES_TYPE uint64_t
168
-#else
169
-#define RES_TYPE uint32_t
170
-#endif
171
-
172
-/* generic load/store macros */
173
-
174
-static inline RES_TYPE
175
-glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
176
- target_ulong ptr,
177
- uintptr_t retaddr)
178
-{
179
- return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
180
- retaddr);
181
-}
182
-
183
-static inline RES_TYPE
184
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
185
-{
186
- return glue(glue(cpu_ld, USUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
187
-}
188
-
189
-#if DATA_SIZE <= 2
190
-static inline int
191
-glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
192
- target_ulong ptr,
193
- uintptr_t retaddr)
194
-{
195
- return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX,
196
- retaddr);
197
-}
198
-
199
-static inline int
200
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
201
-{
202
- return glue(glue(cpu_lds, SUFFIX), _mmuidx_ra)(env, ptr, CPU_MMU_INDEX, 0);
203
-}
204
-#endif
205
-
206
-/* generic store macro */
207
-
208
-static inline void
209
-glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
210
- target_ulong ptr,
211
- RES_TYPE v, uintptr_t retaddr)
212
-{
213
- glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX,
214
- retaddr);
215
-}
216
-
217
-static inline void
218
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
219
- RES_TYPE v)
220
-{
221
- glue(glue(cpu_st, SUFFIX), _mmuidx_ra)(env, ptr, v, CPU_MMU_INDEX, 0);
222
-}
223
-
224
-#undef RES_TYPE
225
-#undef DATA_TYPE
226
-#undef DATA_STYPE
227
-#undef SUFFIX
228
-#undef USUFFIX
229
-#undef DATA_SIZE
230
-#undef SHIFT
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
231
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
232
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
233
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
234
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
235
@@ -XXX,XX +XXX,XX @@
16
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
236
#include "qemu/atomic128.h"
17
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
237
#include "translate-all.h"
18
238
#include "trace-root.h"
19
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
239
-#include "qemu/plugin.h"
20
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
240
#include "trace/mem.h"
21
{
241
#ifdef CONFIG_PLUGIN
22
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
242
#include "qemu/plugin-memory.h"
23
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
243
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
244
? helper_le_ldq_mmu : helper_be_ldq_mmu);
24
}
245
}
25
246
26
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
247
+uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
27
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
248
+ uintptr_t retaddr)
28
{
249
+{
29
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
250
+ return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
30
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
251
+}
252
+
253
+int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
254
+{
255
+ return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
256
+}
257
+
258
+uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr,
259
+ uintptr_t retaddr)
260
+{
261
+ return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
262
+}
263
+
264
+int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
265
+{
266
+ return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
267
+}
268
+
269
+uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
270
+{
271
+ return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
272
+}
273
+
274
+uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
275
+{
276
+ return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
277
+}
278
+
279
+uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
280
+{
281
+ return cpu_ldub_data_ra(env, ptr, 0);
282
+}
283
+
284
+int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
285
+{
286
+ return cpu_ldsb_data_ra(env, ptr, 0);
287
+}
288
+
289
+uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr)
290
+{
291
+ return cpu_lduw_data_ra(env, ptr, 0);
292
+}
293
+
294
+int cpu_ldsw_data(CPUArchState *env, target_ulong ptr)
295
+{
296
+ return cpu_ldsw_data_ra(env, ptr, 0);
297
+}
298
+
299
+uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr)
300
+{
301
+ return cpu_ldl_data_ra(env, ptr, 0);
302
+}
303
+
304
+uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr)
305
+{
306
+ return cpu_ldq_data_ra(env, ptr, 0);
307
+}
308
+
309
/*
310
* Store Helpers
311
*/
312
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
313
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
31
}
314
}
32
315
33
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
316
+void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
34
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
317
+ uint32_t val, uintptr_t retaddr)
35
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
318
+{
36
{
319
+ cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
37
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
320
+}
38
- size_t old_size = tlb_n_entries(env, mmu_idx);
321
+
39
+ size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
322
+void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr,
40
size_t rate;
323
+ uint32_t val, uintptr_t retaddr)
41
size_t new_size = old_size;
324
+{
42
int64_t now = get_clock_realtime();
325
+ cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
43
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
326
+}
44
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
327
+
45
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
328
+void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr,
46
env_tlb(env)->d[mmu_idx].vindex = 0;
329
+ uint32_t val, uintptr_t retaddr)
47
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
330
+{
48
+ memset(env_tlb(env)->f[mmu_idx].table, -1,
331
+ cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
49
+ sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
332
+}
50
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
333
+
51
sizeof(env_tlb(env)->d[0].vtable));
334
+void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr,
52
}
335
+ uint64_t val, uintptr_t retaddr)
53
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
336
+{
54
qemu_spin_lock(&env_tlb(env)->c.lock);
337
+ cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
55
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
338
+}
56
unsigned int i;
339
+
57
- unsigned int n = tlb_n_entries(env, mmu_idx);
340
+void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
58
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
341
+{
59
342
+ cpu_stb_data_ra(env, ptr, val, 0);
60
for (i = 0; i < n; i++) {
343
+}
61
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
344
+
345
+void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val)
346
+{
347
+ cpu_stw_data_ra(env, ptr, val, 0);
348
+}
349
+
350
+void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val)
351
+{
352
+ cpu_stl_data_ra(env, ptr, val, 0);
353
+}
354
+
355
+void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val)
356
+{
357
+ cpu_stq_data_ra(env, ptr, val, 0);
358
+}
359
+
360
/* First set of helpers allows passing in of OI and RETADDR. This makes
361
them callable from other helpers. */
362
62
--
363
--
63
2.20.1
364
2.20.1
64
365
65
366
diff view generated by jsdifflib
New patch
1
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
3
We currently search both the root and the tcg/ directories for tcg
4
files:
5
6
$ git grep '#include "tcg/' | wc -l
7
28
8
9
$ git grep '#include "tcg[^/]' | wc -l
10
94
11
12
To simplify the preprocessor search path, unify by expliciting the
13
tcg/ directory.
14
15
Patch created mechanically by running:
16
17
$ for x in \
18
tcg.h tcg-mo.h tcg-op.h tcg-opc.h \
19
tcg-op-gvec.h tcg-gvec-desc.h; do \
20
sed -i "s,#include \"$x\",#include \"tcg/$x\"," \
21
$(git grep -l "#include \"$x\""); \
22
done
23
24
Acked-by: David Gibson <david@gibson.dropbear.id.au> (ppc parts)
25
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
26
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
27
Reviewed-by: Stefan Weil <sw@weilnetz.de>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
30
Message-Id: <20200101112303.20724-2-philmd@redhat.com>
31
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
32
---
33
include/exec/cpu_ldst.h | 2 +-
34
tcg/i386/tcg-target.h | 2 +-
35
tcg/tcg-op.h | 2 +-
36
tcg/tcg.h | 4 ++--
37
accel/tcg/cpu-exec.c | 2 +-
38
accel/tcg/tcg-runtime-gvec.c | 2 +-
39
accel/tcg/tcg-runtime.c | 2 +-
40
accel/tcg/translate-all.c | 2 +-
41
accel/tcg/user-exec.c | 2 +-
42
bsd-user/main.c | 2 +-
43
cpus.c | 2 +-
44
exec.c | 2 +-
45
linux-user/main.c | 2 +-
46
linux-user/syscall.c | 2 +-
47
target/alpha/translate.c | 2 +-
48
target/arm/helper-a64.c | 2 +-
49
target/arm/sve_helper.c | 2 +-
50
target/arm/translate-a64.c | 4 ++--
51
target/arm/translate-sve.c | 6 +++---
52
target/arm/translate.c | 4 ++--
53
target/cris/translate.c | 2 +-
54
target/hppa/translate.c | 2 +-
55
target/i386/mem_helper.c | 2 +-
56
target/i386/translate.c | 2 +-
57
target/lm32/translate.c | 2 +-
58
target/m68k/translate.c | 2 +-
59
target/microblaze/translate.c | 2 +-
60
target/mips/translate.c | 2 +-
61
target/moxie/translate.c | 2 +-
62
target/nios2/translate.c | 2 +-
63
target/openrisc/translate.c | 2 +-
64
target/ppc/mem_helper.c | 2 +-
65
target/ppc/translate.c | 4 ++--
66
target/riscv/cpu_helper.c | 2 +-
67
target/riscv/translate.c | 2 +-
68
target/s390x/mem_helper.c | 2 +-
69
target/s390x/translate.c | 4 ++--
70
target/sh4/translate.c | 2 +-
71
target/sparc/ldst_helper.c | 2 +-
72
target/sparc/translate.c | 2 +-
73
target/tilegx/translate.c | 2 +-
74
target/tricore/translate.c | 2 +-
75
target/unicore32/translate.c | 2 +-
76
target/xtensa/translate.c | 2 +-
77
tcg/optimize.c | 2 +-
78
tcg/tcg-common.c | 2 +-
79
tcg/tcg-op-gvec.c | 8 ++++----
80
tcg/tcg-op-vec.c | 6 +++---
81
tcg/tcg-op.c | 6 +++---
82
tcg/tcg.c | 2 +-
83
tcg/tci.c | 2 +-
84
51 files changed, 65 insertions(+), 65 deletions(-)
85
86
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/include/exec/cpu_ldst.h
89
+++ b/include/exec/cpu_ldst.h
90
@@ -XXX,XX +XXX,XX @@ static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
91
#else
92
93
/* Needed for TCG_OVERSIZED_GUEST */
94
-#include "tcg.h"
95
+#include "tcg/tcg.h"
96
97
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
98
{
99
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/i386/tcg-target.h
102
+++ b/tcg/i386/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
104
* The x86 has a pretty strong memory ordering which only really
105
* allows for some stores to be re-ordered after loads.
106
*/
107
-#include "tcg-mo.h"
108
+#include "tcg/tcg-mo.h"
109
110
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
111
112
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/tcg-op.h
115
+++ b/tcg/tcg-op.h
116
@@ -XXX,XX +XXX,XX @@
117
#ifndef TCG_TCG_OP_H
118
#define TCG_TCG_OP_H
119
120
-#include "tcg.h"
121
+#include "tcg/tcg.h"
122
#include "exec/helper-proto.h"
123
#include "exec/helper-gen.h"
124
125
diff --git a/tcg/tcg.h b/tcg/tcg.h
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/tcg.h
128
+++ b/tcg/tcg.h
129
@@ -XXX,XX +XXX,XX @@
130
#include "qemu/bitops.h"
131
#include "qemu/plugin.h"
132
#include "qemu/queue.h"
133
-#include "tcg-mo.h"
134
+#include "tcg/tcg-mo.h"
135
#include "tcg-target.h"
136
#include "qemu/int128.h"
137
138
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
139
140
typedef enum TCGOpcode {
141
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
142
-#include "tcg-opc.h"
143
+#include "tcg/tcg-opc.h"
144
#undef DEF
145
NB_OPS,
146
} TCGOpcode;
147
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/accel/tcg/cpu-exec.c
150
+++ b/accel/tcg/cpu-exec.c
151
@@ -XXX,XX +XXX,XX @@
152
#include "trace.h"
153
#include "disas/disas.h"
154
#include "exec/exec-all.h"
155
-#include "tcg.h"
156
+#include "tcg/tcg.h"
157
#include "qemu/atomic.h"
158
#include "sysemu/qtest.h"
159
#include "qemu/timer.h"
160
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/accel/tcg/tcg-runtime-gvec.c
163
+++ b/accel/tcg/tcg-runtime-gvec.c
164
@@ -XXX,XX +XXX,XX @@
165
#include "qemu/host-utils.h"
166
#include "cpu.h"
167
#include "exec/helper-proto.h"
168
-#include "tcg-gvec-desc.h"
169
+#include "tcg/tcg-gvec-desc.h"
170
171
172
/* Virtually all hosts support 16-byte vectors. Those that don't can emulate
173
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/accel/tcg/tcg-runtime.c
176
+++ b/accel/tcg/tcg-runtime.c
177
@@ -XXX,XX +XXX,XX @@
178
#include "exec/tb-lookup.h"
179
#include "disas/disas.h"
180
#include "exec/log.h"
181
-#include "tcg.h"
182
+#include "tcg/tcg.h"
183
184
/* 32-bit helpers */
185
186
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/accel/tcg/translate-all.c
189
+++ b/accel/tcg/translate-all.c
190
@@ -XXX,XX +XXX,XX @@
191
#include "trace.h"
192
#include "disas/disas.h"
193
#include "exec/exec-all.h"
194
-#include "tcg.h"
195
+#include "tcg/tcg.h"
196
#if defined(CONFIG_USER_ONLY)
197
#include "qemu.h"
198
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
199
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/accel/tcg/user-exec.c
202
+++ b/accel/tcg/user-exec.c
203
@@ -XXX,XX +XXX,XX @@
204
#include "cpu.h"
205
#include "disas/disas.h"
206
#include "exec/exec-all.h"
207
-#include "tcg.h"
208
+#include "tcg/tcg.h"
209
#include "qemu/bitops.h"
210
#include "exec/cpu_ldst.h"
211
#include "translate-all.h"
212
diff --git a/bsd-user/main.c b/bsd-user/main.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/bsd-user/main.c
215
+++ b/bsd-user/main.c
216
@@ -XXX,XX +XXX,XX @@
217
#include "qemu/module.h"
218
#include "cpu.h"
219
#include "exec/exec-all.h"
220
-#include "tcg.h"
221
+#include "tcg/tcg.h"
222
#include "qemu/timer.h"
223
#include "qemu/envlist.h"
224
#include "exec/log.h"
225
diff --git a/cpus.c b/cpus.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/cpus.c
228
+++ b/cpus.c
229
@@ -XXX,XX +XXX,XX @@
230
#include "qemu/bitmap.h"
231
#include "qemu/seqlock.h"
232
#include "qemu/guest-random.h"
233
-#include "tcg.h"
234
+#include "tcg/tcg.h"
235
#include "hw/nmi.h"
236
#include "sysemu/replay.h"
237
#include "sysemu/runstate.h"
238
diff --git a/exec.c b/exec.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/exec.c
241
+++ b/exec.c
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
#include "exec/exec-all.h"
245
#include "exec/target_page.h"
246
-#include "tcg.h"
247
+#include "tcg/tcg.h"
248
#include "hw/qdev-core.h"
249
#include "hw/qdev-properties.h"
250
#if !defined(CONFIG_USER_ONLY)
251
diff --git a/linux-user/main.c b/linux-user/main.c
252
index XXXXXXX..XXXXXXX 100644
253
--- a/linux-user/main.c
254
+++ b/linux-user/main.c
255
@@ -XXX,XX +XXX,XX @@
256
#include "qemu/plugin.h"
257
#include "cpu.h"
258
#include "exec/exec-all.h"
259
-#include "tcg.h"
260
+#include "tcg/tcg.h"
261
#include "qemu/timer.h"
262
#include "qemu/envlist.h"
263
#include "qemu/guest-random.h"
264
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
265
index XXXXXXX..XXXXXXX 100644
266
--- a/linux-user/syscall.c
267
+++ b/linux-user/syscall.c
268
@@ -XXX,XX +XXX,XX @@
269
#include "user/syscall-trace.h"
270
#include "qapi/error.h"
271
#include "fd-trans.h"
272
-#include "tcg.h"
273
+#include "tcg/tcg.h"
274
275
#ifndef CLONE_IO
276
#define CLONE_IO 0x80000000 /* Clone io context */
277
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/alpha/translate.c
280
+++ b/target/alpha/translate.c
281
@@ -XXX,XX +XXX,XX @@
282
#include "disas/disas.h"
283
#include "qemu/host-utils.h"
284
#include "exec/exec-all.h"
285
-#include "tcg-op.h"
286
+#include "tcg/tcg-op.h"
287
#include "exec/cpu_ldst.h"
288
#include "exec/helper-proto.h"
289
#include "exec/helper-gen.h"
290
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/helper-a64.c
293
+++ b/target/arm/helper-a64.c
294
@@ -XXX,XX +XXX,XX @@
295
#include "exec/cpu_ldst.h"
296
#include "qemu/int128.h"
297
#include "qemu/atomic128.h"
298
-#include "tcg.h"
299
+#include "tcg/tcg.h"
300
#include "fpu/softfloat.h"
301
#include <zlib.h> /* For crc32 */
302
303
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/sve_helper.c
306
+++ b/target/arm/sve_helper.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "exec/helper-proto.h"
309
#include "tcg/tcg-gvec-desc.h"
310
#include "fpu/softfloat.h"
311
-#include "tcg.h"
312
+#include "tcg/tcg.h"
313
314
315
/* Note that vector data is stored in host-endian 64-bit chunks,
316
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
317
index XXXXXXX..XXXXXXX 100644
318
--- a/target/arm/translate-a64.c
319
+++ b/target/arm/translate-a64.c
320
@@ -XXX,XX +XXX,XX @@
321
322
#include "cpu.h"
323
#include "exec/exec-all.h"
324
-#include "tcg-op.h"
325
-#include "tcg-op-gvec.h"
326
+#include "tcg/tcg-op.h"
327
+#include "tcg/tcg-op-gvec.h"
328
#include "qemu/log.h"
329
#include "arm_ldst.h"
330
#include "translate.h"
331
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
332
index XXXXXXX..XXXXXXX 100644
333
--- a/target/arm/translate-sve.c
334
+++ b/target/arm/translate-sve.c
335
@@ -XXX,XX +XXX,XX @@
336
#include "qemu/osdep.h"
337
#include "cpu.h"
338
#include "exec/exec-all.h"
339
-#include "tcg-op.h"
340
-#include "tcg-op-gvec.h"
341
-#include "tcg-gvec-desc.h"
342
+#include "tcg/tcg-op.h"
343
+#include "tcg/tcg-op-gvec.h"
344
+#include "tcg/tcg-gvec-desc.h"
345
#include "qemu/log.h"
346
#include "arm_ldst.h"
347
#include "translate.h"
348
diff --git a/target/arm/translate.c b/target/arm/translate.c
349
index XXXXXXX..XXXXXXX 100644
350
--- a/target/arm/translate.c
351
+++ b/target/arm/translate.c
352
@@ -XXX,XX +XXX,XX @@
353
#include "internals.h"
354
#include "disas/disas.h"
355
#include "exec/exec-all.h"
356
-#include "tcg-op.h"
357
-#include "tcg-op-gvec.h"
358
+#include "tcg/tcg-op.h"
359
+#include "tcg/tcg-op-gvec.h"
360
#include "qemu/log.h"
361
#include "qemu/bitops.h"
362
#include "arm_ldst.h"
363
diff --git a/target/cris/translate.c b/target/cris/translate.c
364
index XXXXXXX..XXXXXXX 100644
365
--- a/target/cris/translate.c
366
+++ b/target/cris/translate.c
367
@@ -XXX,XX +XXX,XX @@
368
#include "cpu.h"
369
#include "disas/disas.h"
370
#include "exec/exec-all.h"
371
-#include "tcg-op.h"
372
+#include "tcg/tcg-op.h"
373
#include "exec/helper-proto.h"
374
#include "mmu.h"
375
#include "exec/cpu_ldst.h"
376
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
377
index XXXXXXX..XXXXXXX 100644
378
--- a/target/hppa/translate.c
379
+++ b/target/hppa/translate.c
380
@@ -XXX,XX +XXX,XX @@
381
#include "disas/disas.h"
382
#include "qemu/host-utils.h"
383
#include "exec/exec-all.h"
384
-#include "tcg-op.h"
385
+#include "tcg/tcg-op.h"
386
#include "exec/cpu_ldst.h"
387
#include "exec/helper-proto.h"
388
#include "exec/helper-gen.h"
389
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/target/i386/mem_helper.c
392
+++ b/target/i386/mem_helper.c
393
@@ -XXX,XX +XXX,XX @@
394
#include "exec/cpu_ldst.h"
395
#include "qemu/int128.h"
396
#include "qemu/atomic128.h"
397
-#include "tcg.h"
398
+#include "tcg/tcg.h"
399
400
void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
401
{
402
diff --git a/target/i386/translate.c b/target/i386/translate.c
403
index XXXXXXX..XXXXXXX 100644
404
--- a/target/i386/translate.c
405
+++ b/target/i386/translate.c
406
@@ -XXX,XX +XXX,XX @@
407
#include "cpu.h"
408
#include "disas/disas.h"
409
#include "exec/exec-all.h"
410
-#include "tcg-op.h"
411
+#include "tcg/tcg-op.h"
412
#include "exec/cpu_ldst.h"
413
#include "exec/translator.h"
414
415
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
416
index XXXXXXX..XXXXXXX 100644
417
--- a/target/lm32/translate.c
418
+++ b/target/lm32/translate.c
419
@@ -XXX,XX +XXX,XX @@
420
#include "exec/helper-proto.h"
421
#include "exec/exec-all.h"
422
#include "exec/translator.h"
423
-#include "tcg-op.h"
424
+#include "tcg/tcg-op.h"
425
#include "qemu/qemu-print.h"
426
427
#include "exec/cpu_ldst.h"
428
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/m68k/translate.c
431
+++ b/target/m68k/translate.c
432
@@ -XXX,XX +XXX,XX @@
433
#include "cpu.h"
434
#include "disas/disas.h"
435
#include "exec/exec-all.h"
436
-#include "tcg-op.h"
437
+#include "tcg/tcg-op.h"
438
#include "qemu/log.h"
439
#include "qemu/qemu-print.h"
440
#include "exec/cpu_ldst.h"
441
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
442
index XXXXXXX..XXXXXXX 100644
443
--- a/target/microblaze/translate.c
444
+++ b/target/microblaze/translate.c
445
@@ -XXX,XX +XXX,XX @@
446
#include "cpu.h"
447
#include "disas/disas.h"
448
#include "exec/exec-all.h"
449
-#include "tcg-op.h"
450
+#include "tcg/tcg-op.h"
451
#include "exec/helper-proto.h"
452
#include "microblaze-decode.h"
453
#include "exec/cpu_ldst.h"
454
diff --git a/target/mips/translate.c b/target/mips/translate.c
455
index XXXXXXX..XXXXXXX 100644
456
--- a/target/mips/translate.c
457
+++ b/target/mips/translate.c
458
@@ -XXX,XX +XXX,XX @@
459
#include "internal.h"
460
#include "disas/disas.h"
461
#include "exec/exec-all.h"
462
-#include "tcg-op.h"
463
+#include "tcg/tcg-op.h"
464
#include "exec/cpu_ldst.h"
465
#include "hw/mips/cpudevs.h"
466
467
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/moxie/translate.c
470
+++ b/target/moxie/translate.c
471
@@ -XXX,XX +XXX,XX @@
472
#include "cpu.h"
473
#include "exec/exec-all.h"
474
#include "disas/disas.h"
475
-#include "tcg-op.h"
476
+#include "tcg/tcg-op.h"
477
#include "exec/cpu_ldst.h"
478
#include "qemu/qemu-print.h"
479
480
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
481
index XXXXXXX..XXXXXXX 100644
482
--- a/target/nios2/translate.c
483
+++ b/target/nios2/translate.c
484
@@ -XXX,XX +XXX,XX @@
485
486
#include "qemu/osdep.h"
487
#include "cpu.h"
488
-#include "tcg-op.h"
489
+#include "tcg/tcg-op.h"
490
#include "exec/exec-all.h"
491
#include "disas/disas.h"
492
#include "exec/helper-proto.h"
493
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
494
index XXXXXXX..XXXXXXX 100644
495
--- a/target/openrisc/translate.c
496
+++ b/target/openrisc/translate.c
497
@@ -XXX,XX +XXX,XX @@
498
#include "cpu.h"
499
#include "exec/exec-all.h"
500
#include "disas/disas.h"
501
-#include "tcg-op.h"
502
+#include "tcg/tcg-op.h"
503
#include "qemu/log.h"
504
#include "qemu/bitops.h"
505
#include "qemu/qemu-print.h"
506
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
507
index XXXXXXX..XXXXXXX 100644
508
--- a/target/ppc/mem_helper.c
509
+++ b/target/ppc/mem_helper.c
510
@@ -XXX,XX +XXX,XX @@
511
#include "exec/helper-proto.h"
512
#include "helper_regs.h"
513
#include "exec/cpu_ldst.h"
514
-#include "tcg.h"
515
+#include "tcg/tcg.h"
516
#include "internal.h"
517
#include "qemu/atomic128.h"
518
519
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/ppc/translate.c
522
+++ b/target/ppc/translate.c
523
@@ -XXX,XX +XXX,XX @@
524
#include "internal.h"
525
#include "disas/disas.h"
526
#include "exec/exec-all.h"
527
-#include "tcg-op.h"
528
-#include "tcg-op-gvec.h"
529
+#include "tcg/tcg-op.h"
530
+#include "tcg/tcg-op-gvec.h"
531
#include "qemu/host-utils.h"
532
#include "qemu/main-loop.h"
533
#include "exec/cpu_ldst.h"
534
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
535
index XXXXXXX..XXXXXXX 100644
536
--- a/target/riscv/cpu_helper.c
537
+++ b/target/riscv/cpu_helper.c
538
@@ -XXX,XX +XXX,XX @@
539
#include "qemu/main-loop.h"
540
#include "cpu.h"
541
#include "exec/exec-all.h"
542
-#include "tcg-op.h"
543
+#include "tcg/tcg-op.h"
544
#include "trace.h"
545
546
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
547
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
548
index XXXXXXX..XXXXXXX 100644
549
--- a/target/riscv/translate.c
550
+++ b/target/riscv/translate.c
551
@@ -XXX,XX +XXX,XX @@
552
#include "qemu/osdep.h"
553
#include "qemu/log.h"
554
#include "cpu.h"
555
-#include "tcg-op.h"
556
+#include "tcg/tcg-op.h"
557
#include "disas/disas.h"
558
#include "exec/cpu_ldst.h"
559
#include "exec/exec-all.h"
560
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
561
index XXXXXXX..XXXXXXX 100644
562
--- a/target/s390x/mem_helper.c
563
+++ b/target/s390x/mem_helper.c
564
@@ -XXX,XX +XXX,XX @@
565
#include "exec/cpu_ldst.h"
566
#include "qemu/int128.h"
567
#include "qemu/atomic128.h"
568
-#include "tcg.h"
569
+#include "tcg/tcg.h"
570
571
#if !defined(CONFIG_USER_ONLY)
572
#include "hw/s390x/storage-keys.h"
573
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/s390x/translate.c
576
+++ b/target/s390x/translate.c
577
@@ -XXX,XX +XXX,XX @@
578
#include "internal.h"
579
#include "disas/disas.h"
580
#include "exec/exec-all.h"
581
-#include "tcg-op.h"
582
-#include "tcg-op-gvec.h"
583
+#include "tcg/tcg-op.h"
584
+#include "tcg/tcg-op-gvec.h"
585
#include "qemu/log.h"
586
#include "qemu/host-utils.h"
587
#include "exec/cpu_ldst.h"
588
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
589
index XXXXXXX..XXXXXXX 100644
590
--- a/target/sh4/translate.c
591
+++ b/target/sh4/translate.c
592
@@ -XXX,XX +XXX,XX @@
593
#include "cpu.h"
594
#include "disas/disas.h"
595
#include "exec/exec-all.h"
596
-#include "tcg-op.h"
597
+#include "tcg/tcg-op.h"
598
#include "exec/cpu_ldst.h"
599
#include "exec/helper-proto.h"
600
#include "exec/helper-gen.h"
601
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/sparc/ldst_helper.c
604
+++ b/target/sparc/ldst_helper.c
605
@@ -XXX,XX +XXX,XX @@
606
607
#include "qemu/osdep.h"
608
#include "cpu.h"
609
-#include "tcg.h"
610
+#include "tcg/tcg.h"
611
#include "exec/helper-proto.h"
612
#include "exec/exec-all.h"
613
#include "exec/cpu_ldst.h"
614
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/target/sparc/translate.c
617
+++ b/target/sparc/translate.c
618
@@ -XXX,XX +XXX,XX @@
619
#include "disas/disas.h"
620
#include "exec/helper-proto.h"
621
#include "exec/exec-all.h"
622
-#include "tcg-op.h"
623
+#include "tcg/tcg-op.h"
624
#include "exec/cpu_ldst.h"
625
626
#include "exec/helper-gen.h"
627
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
628
index XXXXXXX..XXXXXXX 100644
629
--- a/target/tilegx/translate.c
630
+++ b/target/tilegx/translate.c
631
@@ -XXX,XX +XXX,XX @@
632
#include "exec/log.h"
633
#include "disas/disas.h"
634
#include "exec/exec-all.h"
635
-#include "tcg-op.h"
636
+#include "tcg/tcg-op.h"
637
#include "exec/cpu_ldst.h"
638
#include "linux-user/syscall_defs.h"
639
640
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/tricore/translate.c
643
+++ b/target/tricore/translate.c
644
@@ -XXX,XX +XXX,XX @@
645
#include "cpu.h"
646
#include "disas/disas.h"
647
#include "exec/exec-all.h"
648
-#include "tcg-op.h"
649
+#include "tcg/tcg-op.h"
650
#include "exec/cpu_ldst.h"
651
#include "qemu/qemu-print.h"
652
653
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
654
index XXXXXXX..XXXXXXX 100644
655
--- a/target/unicore32/translate.c
656
+++ b/target/unicore32/translate.c
657
@@ -XXX,XX +XXX,XX @@
658
#include "cpu.h"
659
#include "disas/disas.h"
660
#include "exec/exec-all.h"
661
-#include "tcg-op.h"
662
+#include "tcg/tcg-op.h"
663
#include "qemu/log.h"
664
#include "exec/cpu_ldst.h"
665
#include "exec/translator.h"
666
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
667
index XXXXXXX..XXXXXXX 100644
668
--- a/target/xtensa/translate.c
669
+++ b/target/xtensa/translate.c
670
@@ -XXX,XX +XXX,XX @@
671
#include "cpu.h"
672
#include "exec/exec-all.h"
673
#include "disas/disas.h"
674
-#include "tcg-op.h"
675
+#include "tcg/tcg-op.h"
676
#include "qemu/log.h"
677
#include "qemu/qemu-print.h"
678
#include "exec/cpu_ldst.h"
679
diff --git a/tcg/optimize.c b/tcg/optimize.c
680
index XXXXXXX..XXXXXXX 100644
681
--- a/tcg/optimize.c
682
+++ b/tcg/optimize.c
683
@@ -XXX,XX +XXX,XX @@
684
*/
685
686
#include "qemu/osdep.h"
687
-#include "tcg-op.h"
688
+#include "tcg/tcg-op.h"
689
690
#define CASE_OP_32_64(x) \
691
glue(glue(case INDEX_op_, x), _i32): \
692
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
693
index XXXXXXX..XXXXXXX 100644
694
--- a/tcg/tcg-common.c
695
+++ b/tcg/tcg-common.c
696
@@ -XXX,XX +XXX,XX @@ uintptr_t tci_tb_ptr;
697
TCGOpDef tcg_op_defs[] = {
698
#define DEF(s, oargs, iargs, cargs, flags) \
699
{ #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
700
-#include "tcg-opc.h"
701
+#include "tcg/tcg-opc.h"
702
#undef DEF
703
};
704
const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
705
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
706
index XXXXXXX..XXXXXXX 100644
707
--- a/tcg/tcg-op-gvec.c
708
+++ b/tcg/tcg-op-gvec.c
709
@@ -XXX,XX +XXX,XX @@
710
*/
711
712
#include "qemu/osdep.h"
713
-#include "tcg.h"
714
-#include "tcg-op.h"
715
-#include "tcg-op-gvec.h"
716
+#include "tcg/tcg.h"
717
+#include "tcg/tcg-op.h"
718
+#include "tcg/tcg-op-gvec.h"
719
#include "qemu/main-loop.h"
720
-#include "tcg-gvec-desc.h"
721
+#include "tcg/tcg-gvec-desc.h"
722
723
#define MAX_UNROLL 4
724
725
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
726
index XXXXXXX..XXXXXXX 100644
727
--- a/tcg/tcg-op-vec.c
728
+++ b/tcg/tcg-op-vec.c
729
@@ -XXX,XX +XXX,XX @@
730
731
#include "qemu/osdep.h"
732
#include "cpu.h"
733
-#include "tcg.h"
734
-#include "tcg-op.h"
735
-#include "tcg-mo.h"
736
+#include "tcg/tcg.h"
737
+#include "tcg/tcg-op.h"
738
+#include "tcg/tcg-mo.h"
739
740
/* Reduce the number of ifdefs below. This assumes that all uses of
741
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
742
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
743
index XXXXXXX..XXXXXXX 100644
744
--- a/tcg/tcg-op.c
745
+++ b/tcg/tcg-op.c
746
@@ -XXX,XX +XXX,XX @@
747
#include "qemu/osdep.h"
748
#include "cpu.h"
749
#include "exec/exec-all.h"
750
-#include "tcg.h"
751
-#include "tcg-op.h"
752
-#include "tcg-mo.h"
753
+#include "tcg/tcg.h"
754
+#include "tcg/tcg-op.h"
755
+#include "tcg/tcg-mo.h"
756
#include "trace-tcg.h"
757
#include "trace/mem.h"
758
#include "exec/plugin-gen.h"
759
diff --git a/tcg/tcg.c b/tcg/tcg.c
760
index XXXXXXX..XXXXXXX 100644
761
--- a/tcg/tcg.c
762
+++ b/tcg/tcg.c
763
@@ -XXX,XX +XXX,XX @@
764
#include "hw/boards.h"
765
#endif
766
767
-#include "tcg-op.h"
768
+#include "tcg/tcg-op.h"
769
770
#if UINTPTR_MAX == UINT32_MAX
771
# define ELF_CLASS ELFCLASS32
772
diff --git a/tcg/tci.c b/tcg/tci.c
773
index XXXXXXX..XXXXXXX 100644
774
--- a/tcg/tci.c
775
+++ b/tcg/tci.c
776
@@ -XXX,XX +XXX,XX @@
777
#include "qemu-common.h"
778
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
779
#include "exec/cpu_ldst.h"
780
-#include "tcg-op.h"
781
+#include "tcg/tcg-op.h"
782
783
/* Marker for missing code. */
784
#define TODO() \
785
--
786
2.20.1
787
788
diff view generated by jsdifflib
1
In target/arm we will shortly have "too many" mmu_idx.
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
The current minimum barrier is caused by the way in which
3
tlb_flush_page_by_mmuidx is coded.
4
2
5
We can remove this limitation by allocating memory for
3
All the *.inc.c files included by tcg/$TARGET/tcg-target.inc.c
6
consumption by the worker. Let us assume that this is
4
are in tcg/, their parent directory. To simplify the preprocessor
7
the unlikely case, as will be the case for the majority
5
search path, include the relative parent path: '..'.
8
of targets which have so far satisfied the BUILD_BUG_ON,
9
and only allocate memory when necessary.
10
6
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Patch created mechanically by running:
8
9
$ for x in tcg-pool.inc.c tcg-ldst.inc.c; do \
10
sed -i "s,#include \"$x\",#include \"../$x\"," \
11
$(git grep -l "#include \"$x\""); \
12
done
13
14
Acked-by: David Gibson <david@gibson.dropbear.id.au> (ppc parts)
15
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Stefan Weil <sw@weilnetz.de>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
20
Message-Id: <20200101112303.20724-3-philmd@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
22
---
14
accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
23
tcg/aarch64/tcg-target.inc.c | 4 ++--
15
1 file changed, 132 insertions(+), 35 deletions(-)
24
tcg/arm/tcg-target.inc.c | 4 ++--
25
tcg/i386/tcg-target.inc.c | 4 ++--
26
tcg/mips/tcg-target.inc.c | 2 +-
27
tcg/ppc/tcg-target.inc.c | 4 ++--
28
tcg/riscv/tcg-target.inc.c | 4 ++--
29
tcg/s390/tcg-target.inc.c | 4 ++--
30
tcg/sparc/tcg-target.inc.c | 2 +-
31
8 files changed, 14 insertions(+), 14 deletions(-)
16
32
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
33
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
18
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
35
--- a/tcg/aarch64/tcg-target.inc.c
20
+++ b/accel/tcg/cputlb.c
36
+++ b/tcg/aarch64/tcg-target.inc.c
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
37
@@ -XXX,XX +XXX,XX @@
22
}
38
* See the COPYING file in the top-level directory for details.
39
*/
40
41
-#include "tcg-pool.inc.c"
42
+#include "../tcg-pool.inc.c"
43
#include "qemu/bitops.h"
44
45
/* We're going to re-use TCGType in setting of the SF bit, which controls
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
23
}
47
}
24
48
25
-/* As we are going to hijack the bottom bits of the page address for a
49
#ifdef CONFIG_SOFTMMU
26
- * mmuidx bit mask we need to fail to build if we can't do that
50
-#include "tcg-ldst.inc.c"
27
+/**
51
+#include "../tcg-ldst.inc.c"
28
+ * tlb_flush_page_by_mmuidx_async_0:
52
29
+ * @cpu: cpu on which to flush
53
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
30
+ * @addr: page of virtual address to flush
54
* TCGMemOpIdx oi, uintptr_t ra)
31
+ * @idxmap: set of mmu_idx to flush
55
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
32
+ *
56
index XXXXXXX..XXXXXXX 100644
33
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
57
--- a/tcg/arm/tcg-target.inc.c
34
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
58
+++ b/tcg/arm/tcg-target.inc.c
59
@@ -XXX,XX +XXX,XX @@
35
*/
60
*/
36
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
61
37
-
62
#include "elf.h"
38
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
63
-#include "tcg-pool.inc.c"
39
- run_on_cpu_data data)
64
+#include "../tcg-pool.inc.c"
40
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
65
41
+ target_ulong addr,
66
int arm_arch = __ARM_ARCH;
42
+ uint16_t idxmap)
67
43
{
68
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
44
CPUArchState *env = cpu->env_ptr;
45
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
46
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
47
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
48
int mmu_idx;
49
50
assert_cpu_is_self(cpu);
51
52
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
53
- addr, mmu_idx_bitmap);
54
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
55
56
qemu_spin_lock(&env_tlb(env)->c.lock);
57
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
58
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
59
+ if ((idxmap >> mmu_idx) & 1) {
60
tlb_flush_page_locked(env, mmu_idx, addr);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
64
tb_flush_jmp_cache(cpu, addr);
65
}
69
}
66
70
67
+/**
71
#ifdef CONFIG_SOFTMMU
68
+ * tlb_flush_page_by_mmuidx_async_1:
72
-#include "tcg-ldst.inc.c"
69
+ * @cpu: cpu on which to flush
73
+#include "../tcg-ldst.inc.c"
70
+ * @data: encoded addr + idxmap
74
71
+ *
75
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
72
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
76
* int mmu_idx, uintptr_t ra)
73
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
77
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
74
+ * offset of the target_ptr field. This limits the set of mmu_idx
78
index XXXXXXX..XXXXXXX 100644
75
+ * that can be passed via this method.
79
--- a/tcg/i386/tcg-target.inc.c
76
+ */
80
+++ b/tcg/i386/tcg-target.inc.c
77
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
81
@@ -XXX,XX +XXX,XX @@
78
+ run_on_cpu_data data)
82
* THE SOFTWARE.
79
+{
83
*/
80
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
84
81
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
85
-#include "tcg-pool.inc.c"
82
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
86
+#include "../tcg-pool.inc.c"
83
+
87
84
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
88
#ifdef CONFIG_DEBUG_TCG
85
+}
89
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
86
+
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
87
+typedef struct {
88
+ target_ulong addr;
89
+ uint16_t idxmap;
90
+} TLBFlushPageByMMUIdxData;
91
+
92
+/**
93
+ * tlb_flush_page_by_mmuidx_async_2:
94
+ * @cpu: cpu on which to flush
95
+ * @data: allocated addr + idxmap
96
+ *
97
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
98
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
99
+ * TLBFlushPageByMMUIdxData structure that has been allocated
100
+ * specifically for this helper. Free the structure when done.
101
+ */
102
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
103
+ run_on_cpu_data data)
104
+{
105
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
106
+
107
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
108
+ g_free(d);
109
+}
110
+
111
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
112
{
113
- target_ulong addr_and_mmu_idx;
114
-
115
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
116
117
/* This should already be page aligned */
118
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
119
- addr_and_mmu_idx |= idxmap;
120
+ addr &= TARGET_PAGE_MASK;
121
122
- if (!qemu_cpu_is_self(cpu)) {
123
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
124
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
125
+ if (qemu_cpu_is_self(cpu)) {
126
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
127
+ } else if (idxmap < TARGET_PAGE_SIZE) {
128
+ /*
129
+ * Most targets have only a few mmu_idx. In the case where
130
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
131
+ * allocating memory for this operation.
132
+ */
133
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
134
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
135
} else {
136
- tlb_flush_page_by_mmuidx_async_work(
137
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
138
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
139
+
140
+ /* Otherwise allocate a structure, freed by the worker. */
141
+ d->addr = addr;
142
+ d->idxmap = idxmap;
143
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
144
+ RUN_ON_CPU_HOST_PTR(d));
145
}
146
}
91
}
147
92
148
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
93
#if defined(CONFIG_SOFTMMU)
149
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
94
-#include "tcg-ldst.inc.c"
150
uint16_t idxmap)
95
+#include "../tcg-ldst.inc.c"
151
{
96
152
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
97
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
153
- target_ulong addr_and_mmu_idx;
98
* int mmu_idx, uintptr_t ra)
154
-
99
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
155
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
100
index XXXXXXX..XXXXXXX 100644
156
101
--- a/tcg/mips/tcg-target.inc.c
157
/* This should already be page aligned */
102
+++ b/tcg/mips/tcg-target.inc.c
158
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
159
- addr_and_mmu_idx |= idxmap;
160
+ addr &= TARGET_PAGE_MASK;
161
162
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
163
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
164
+ /*
165
+ * Allocate memory to hold addr+idxmap only when needed.
166
+ * See tlb_flush_page_by_mmuidx for details.
167
+ */
168
+ if (idxmap < TARGET_PAGE_SIZE) {
169
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
170
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
171
+ } else {
172
+ CPUState *dst_cpu;
173
+
174
+ /* Allocate a separate data block for each destination cpu. */
175
+ CPU_FOREACH(dst_cpu) {
176
+ if (dst_cpu != src_cpu) {
177
+ TLBFlushPageByMMUIdxData *d
178
+ = g_new(TLBFlushPageByMMUIdxData, 1);
179
+
180
+ d->addr = addr;
181
+ d->idxmap = idxmap;
182
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
183
+ RUN_ON_CPU_HOST_PTR(d));
184
+ }
185
+ }
186
+ }
187
+
188
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
189
}
104
}
190
105
191
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
106
#if defined(CONFIG_SOFTMMU)
192
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
107
-#include "tcg-ldst.inc.c"
193
target_ulong addr,
108
+#include "../tcg-ldst.inc.c"
194
uint16_t idxmap)
109
195
{
110
static void * const qemu_ld_helpers[16] = {
196
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
111
[MO_UB] = helper_ret_ldub_mmu,
197
- target_ulong addr_and_mmu_idx;
112
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
198
-
113
index XXXXXXX..XXXXXXX 100644
199
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
114
--- a/tcg/ppc/tcg-target.inc.c
200
115
+++ b/tcg/ppc/tcg-target.inc.c
201
/* This should already be page aligned */
116
@@ -XXX,XX +XXX,XX @@
202
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
117
*/
203
- addr_and_mmu_idx |= idxmap;
118
204
+ addr &= TARGET_PAGE_MASK;
119
#include "elf.h"
205
120
-#include "tcg-pool.inc.c"
206
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
121
+#include "../tcg-pool.inc.c"
207
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
122
208
+ /*
123
#if defined _CALL_DARWIN || defined __APPLE__
209
+ * Allocate memory to hold addr+idxmap only when needed.
124
#define TCG_TARGET_CALL_DARWIN
210
+ * See tlb_flush_page_by_mmuidx for details.
125
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
211
+ */
126
};
212
+ if (idxmap < TARGET_PAGE_SIZE) {
127
213
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
128
#if defined (CONFIG_SOFTMMU)
214
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
129
-#include "tcg-ldst.inc.c"
215
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
130
+#include "../tcg-ldst.inc.c"
216
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
131
217
+ } else {
132
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
218
+ CPUState *dst_cpu;
133
* int mmu_idx, uintptr_t ra)
219
+ TLBFlushPageByMMUIdxData *d;
134
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
220
+
135
index XXXXXXX..XXXXXXX 100644
221
+ /* Allocate a separate data block for each destination cpu. */
136
--- a/tcg/riscv/tcg-target.inc.c
222
+ CPU_FOREACH(dst_cpu) {
137
+++ b/tcg/riscv/tcg-target.inc.c
223
+ if (dst_cpu != src_cpu) {
138
@@ -XXX,XX +XXX,XX @@
224
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
139
* THE SOFTWARE.
225
+ d->addr = addr;
140
*/
226
+ d->idxmap = idxmap;
141
227
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
142
-#include "tcg-pool.inc.c"
228
+ RUN_ON_CPU_HOST_PTR(d));
143
+#include "../tcg-pool.inc.c"
229
+ }
144
230
+ }
145
#ifdef CONFIG_DEBUG_TCG
231
+
146
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
232
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
233
+ d->addr = addr;
148
*/
234
+ d->idxmap = idxmap;
149
235
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
150
#if defined(CONFIG_SOFTMMU)
236
+ RUN_ON_CPU_HOST_PTR(d));
151
-#include "tcg-ldst.inc.c"
237
+ }
152
+#include "../tcg-ldst.inc.c"
153
154
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
155
* TCGMemOpIdx oi, uintptr_t ra)
156
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/s390/tcg-target.inc.c
159
+++ b/tcg/s390/tcg-target.inc.c
160
@@ -XXX,XX +XXX,XX @@
161
#error "unsupported code generation mode"
162
#endif
163
164
-#include "tcg-pool.inc.c"
165
+#include "../tcg-pool.inc.c"
166
#include "elf.h"
167
168
/* ??? The translation blocks produced by TCG are generally small enough to
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
238
}
170
}
239
171
240
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
172
#if defined(CONFIG_SOFTMMU)
173
-#include "tcg-ldst.inc.c"
174
+#include "../tcg-ldst.inc.c"
175
176
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
177
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
178
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/tcg/sparc/tcg-target.inc.c
181
+++ b/tcg/sparc/tcg-target.inc.c
182
@@ -XXX,XX +XXX,XX @@
183
* THE SOFTWARE.
184
*/
185
186
-#include "tcg-pool.inc.c"
187
+#include "../tcg-pool.inc.c"
188
189
#ifdef CONFIG_DEBUG_TCG
190
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
241
--
191
--
242
2.20.1
192
2.20.1
243
193
244
194
diff view generated by jsdifflib
1
From: Carlos Santos <casantos@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
3
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
4
but the corresponding sysconf calls returns -1, which is a valid result,
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
meaning that the limit is indeterminate.
5
Reviewed-by: Stefan Weil <sw@weilnetz.de>
6
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Handle this situation using the fallback values instead of crashing due
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
to an assertion failure.
8
Message-Id: <20200101112303.20724-4-philmd@redhat.com>
9
10
Signed-off-by: Carlos Santos <casantos@redhat.com>
11
Message-Id: <20191017123713.30192-1-casantos@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
10
---
14
util/cacheinfo.c | 10 ++++++++--
11
{tcg => include/tcg}/tcg-gvec-desc.h | 0
15
1 file changed, 8 insertions(+), 2 deletions(-)
12
{tcg => include/tcg}/tcg-mo.h | 0
13
{tcg => include/tcg}/tcg-op-gvec.h | 0
14
{tcg => include/tcg}/tcg-op.h | 0
15
{tcg => include/tcg}/tcg-opc.h | 0
16
{tcg => include/tcg}/tcg.h | 0
17
MAINTAINERS | 1 +
18
7 files changed, 1 insertion(+)
19
rename {tcg => include/tcg}/tcg-gvec-desc.h (100%)
20
rename {tcg => include/tcg}/tcg-mo.h (100%)
21
rename {tcg => include/tcg}/tcg-op-gvec.h (100%)
22
rename {tcg => include/tcg}/tcg-op.h (100%)
23
rename {tcg => include/tcg}/tcg-opc.h (100%)
24
rename {tcg => include/tcg}/tcg.h (100%)
16
25
17
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
26
diff --git a/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h
27
similarity index 100%
28
rename from tcg/tcg-gvec-desc.h
29
rename to include/tcg/tcg-gvec-desc.h
30
diff --git a/tcg/tcg-mo.h b/include/tcg/tcg-mo.h
31
similarity index 100%
32
rename from tcg/tcg-mo.h
33
rename to include/tcg/tcg-mo.h
34
diff --git a/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
35
similarity index 100%
36
rename from tcg/tcg-op-gvec.h
37
rename to include/tcg/tcg-op-gvec.h
38
diff --git a/tcg/tcg-op.h b/include/tcg/tcg-op.h
39
similarity index 100%
40
rename from tcg/tcg-op.h
41
rename to include/tcg/tcg-op.h
42
diff --git a/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
43
similarity index 100%
44
rename from tcg/tcg-opc.h
45
rename to include/tcg/tcg-opc.h
46
diff --git a/tcg/tcg.h b/include/tcg/tcg.h
47
similarity index 100%
48
rename from tcg/tcg.h
49
rename to include/tcg/tcg.h
50
diff --git a/MAINTAINERS b/MAINTAINERS
18
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
19
--- a/util/cacheinfo.c
52
--- a/MAINTAINERS
20
+++ b/util/cacheinfo.c
53
+++ b/MAINTAINERS
21
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
54
@@ -XXX,XX +XXX,XX @@ Common TCG code
22
static void sys_cache_info(int *isize, int *dsize)
55
M: Richard Henderson <rth@twiddle.net>
23
{
56
S: Maintained
24
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
57
F: tcg/
25
- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
58
+F: include/tcg/
26
+ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
59
27
+ if (tmp_isize > 0) {
60
TCG Plugins
28
+ *isize = tmp_isize;
61
M: Alex Bennée <alex.bennee@linaro.org>
29
+ }
30
# endif
31
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
32
- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
33
+ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
34
+ if (tmp_dsize > 0) {
35
+ *dsize = tmp_dsize;
36
+ }
37
# endif
38
}
39
#endif /* sys_cache_info */
40
--
62
--
41
2.20.1
63
2.20.1
42
64
43
65
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
To avoid scrolling each instruction when reviewing tcg
3
All tcg includes are relative to the repository root directory,
4
helpers written for the decodetree script, display the
4
we can safely remove the tcg/ directory from the include search
5
.decode files (similar to header declarations) before
5
path list.
6
the C source (implementation of previous declarations).
7
6
7
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Stefan Weil <sw@weilnetz.de>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
12
Message-Id: <20200101112303.20724-5-philmd@redhat.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20191230082856.30556-1-philmd@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
---
14
scripts/git.orderfile | 3 +++
15
configure | 1 -
15
1 file changed, 3 insertions(+)
16
1 file changed, 1 deletion(-)
16
17
17
diff --git a/scripts/git.orderfile b/scripts/git.orderfile
18
diff --git a/configure b/configure
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100755
19
--- a/scripts/git.orderfile
20
--- a/configure
20
+++ b/scripts/git.orderfile
21
+++ b/configure
21
@@ -XXX,XX +XXX,XX @@ qga/*.json
22
@@ -XXX,XX +XXX,XX @@ elif test "$ARCH" = "riscv32" || test "$ARCH" = "riscv64" ; then
22
# headers
23
else
23
*.h
24
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
24
25
fi
25
+# decoding tree specification
26
-QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg $QEMU_INCLUDES"
26
+*.decode
27
27
+
28
echo "TOOLS=$tools" >> $config_host_mak
28
# code
29
echo "ROMS=$roms" >> $config_host_mak
29
*.c
30
--
30
--
31
2.20.1
31
2.20.1
32
32
33
33
diff view generated by jsdifflib
1
The accel_initialised variable no longer has any setters.
1
Claudio's Huawei address has been defunct for quite a while. In
2
2
3
Fixes: 6f6e1698a68c
3
https://lists.gnu.org/archive/html/qemu-devel/2019-05/msg06872.html
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
4
5
he asked for his personal address to be removed as well.
6
7
I will take over officially.
8
9
Cc: Claudio Fontana <claudio.fontana@gmail.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
13
---
10
vl.c | 3 +--
14
MAINTAINERS | 3 +--
11
1 file changed, 1 insertion(+), 2 deletions(-)
15
1 file changed, 1 insertion(+), 2 deletions(-)
12
16
13
diff --git a/vl.c b/vl.c
17
diff --git a/MAINTAINERS b/MAINTAINERS
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/vl.c
19
--- a/MAINTAINERS
16
+++ b/vl.c
20
+++ b/MAINTAINERS
17
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
21
@@ -XXX,XX +XXX,XX @@ F: plugins/
18
{
22
F: tests/plugin
19
const char *accel;
23
20
char **accel_list, **tmp;
24
AArch64 TCG target
21
- bool accel_initialised = false;
25
-M: Claudio Fontana <claudio.fontana@huawei.com>
22
bool init_failed = false;
26
-M: Claudio Fontana <claudio.fontana@gmail.com>
23
27
+M: Richard Henderson <richard.henderson@linaro.org>
24
qemu_opts_foreach(qemu_find_opts("icount"),
28
S: Maintained
25
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
29
L: qemu-arm@nongnu.org
26
30
F: tcg/aarch64/
27
accel_list = g_strsplit(accel, ":", 0);
28
29
- for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
30
+ for (tmp = accel_list; tmp && *tmp; tmp++) {
31
/*
32
* Filter invalid accelerators here, to prevent obscenities
33
* such as "-machine accel=tcg,,thread=single".
34
--
31
--
35
2.20.1
32
2.20.1
36
33
37
34
diff view generated by jsdifflib