1
The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40:
1
v2: Fix incorretly resolved rebase conflict in patch 16.
2
v3: Work around clang preprocessor bug in patch 3.
2
3
3
Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000)
4
5
r~
6
7
8
The following changes since commit fd28528ece590dc709d1a893fce2ff2f68ddca70:
9
10
Merge tag 'pull-or1k-20220904' of https://github.com/stffrdhrn/qemu into staging (2022-09-05 18:01:02 -0400)
4
11
5
are available in the Git repository at:
12
are available in the Git repository at:
6
13
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200121
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220906
8
15
9
for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6:
16
for you to fetch changes up to 00c07344fa245b22e895b363320ba4cd0ec1088a:
10
17
11
scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000)
18
target/riscv: Make translator stop before the end of a page (2022-09-06 08:04:26 +0100)
12
19
13
----------------------------------------------------------------
20
----------------------------------------------------------------
14
Remove another limit to NB_MMU_MODES.
21
Respect PROT_EXEC in user-only mode.
15
Fix compilation using uclibc.
22
Fix s390x, i386 and riscv for translations crossing a page.
16
Fix defaulting of -accel parameters.
17
Tidy cputlb basic routines.
18
Adjust git.orderfile for decodetree.
19
23
20
----------------------------------------------------------------
24
----------------------------------------------------------------
21
Carlos Santos (1):
25
Ilya Leoshkevich (4):
22
util/cacheinfo: fix crash when compiling with uClibc
26
linux-user: Clear translations on mprotect()
27
accel/tcg: Introduce is_same_page()
28
target/s390x: Make translator stop before the end of a page
29
target/i386: Make translator stop before the end of a page
23
30
24
Philippe Mathieu-Daudé (1):
31
Richard Henderson (16):
25
scripts/git.orderfile: Display decodetree before C source
32
linux-user/arm: Mark the commpage executable
33
linux-user/hppa: Allocate page zero as a commpage
34
linux-user/x86_64: Allocate vsyscall page as a commpage
35
linux-user: Honor PT_GNU_STACK
36
tests/tcg/i386: Move smc_code2 to an executable section
37
accel/tcg: Properly implement get_page_addr_code for user-only
38
accel/tcg: Unlock mmap_lock after longjmp
39
accel/tcg: Make tb_htable_lookup static
40
accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
41
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
42
accel/tcg: Document the faulting lookup in tb_lookup_cmp
43
accel/tcg: Remove translator_ldsw
44
accel/tcg: Add pc and host_pc params to gen_intermediate_code
45
accel/tcg: Add fast path for translator_ld*
46
target/riscv: Add MAX_INSN_LEN and insn_len
47
target/riscv: Make translator stop before the end of a page
26
48
27
Richard Henderson (14):
49
include/elf.h | 1 +
28
cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
50
include/exec/cpu-common.h | 1 +
29
vl: Remove unused variable in configure_accelerators
51
include/exec/exec-all.h | 89 ++++++++----------------
30
vl: Reduce scope of variables in configure_accelerators
52
include/exec/translator.h | 96 ++++++++++++++++---------
31
vl: Remove useless test in configure_accelerators
53
linux-user/arm/target_cpu.h | 4 +-
32
vl: Only choose enabled accelerators in configure_accelerators
54
linux-user/qemu.h | 1 +
33
cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked
55
accel/tcg/cpu-exec.c | 143 ++++++++++++++++++++------------------
34
cputlb: Make tlb_n_entries private to cputlb.c
56
accel/tcg/cputlb.c | 93 +++++++------------------
35
cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb
57
accel/tcg/translate-all.c | 29 ++++----
36
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
58
accel/tcg/translator.c | 135 ++++++++++++++++++++++++++---------
37
cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked
59
accel/tcg/user-exec.c | 17 ++++-
38
cputlb: Split out tlb_mmu_flush_locked
60
linux-user/elfload.c | 82 ++++++++++++++++++++--
39
cputlb: Partially merge tlb_dyn_init into tlb_init
61
linux-user/mmap.c | 6 +-
40
cputlb: Initialize tlbs as flushed
62
softmmu/physmem.c | 12 ++++
41
cputlb: Hoist timestamp outside of loops over tlbs
63
target/alpha/translate.c | 5 +-
42
64
target/arm/translate.c | 5 +-
43
include/exec/cpu_ldst.h | 5 -
65
target/avr/translate.c | 5 +-
44
accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++---------------
66
target/cris/translate.c | 5 +-
45
util/cacheinfo.c | 10 +-
67
target/hexagon/translate.c | 6 +-
46
vl.c | 27 +++--
68
target/hppa/translate.c | 5 +-
47
scripts/git.orderfile | 3 +
69
target/i386/tcg/translate.c | 71 +++++++++++--------
48
5 files changed, 223 insertions(+), 109 deletions(-)
70
target/loongarch/translate.c | 6 +-
49
71
target/m68k/translate.c | 5 +-
72
target/microblaze/translate.c | 5 +-
73
target/mips/tcg/translate.c | 5 +-
74
target/nios2/translate.c | 5 +-
75
target/openrisc/translate.c | 6 +-
76
target/ppc/translate.c | 5 +-
77
target/riscv/translate.c | 32 +++++++--
78
target/rx/translate.c | 5 +-
79
target/s390x/tcg/translate.c | 20 ++++--
80
target/sh4/translate.c | 5 +-
81
target/sparc/translate.c | 5 +-
82
target/tricore/translate.c | 6 +-
83
target/xtensa/translate.c | 6 +-
84
tests/tcg/i386/test-i386.c | 2 +-
85
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++
86
tests/tcg/s390x/noexec.c | 106 ++++++++++++++++++++++++++++
87
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++
88
tests/tcg/multiarch/noexec.c.inc | 139 ++++++++++++++++++++++++++++++++++++
89
tests/tcg/riscv64/Makefile.target | 1 +
90
tests/tcg/s390x/Makefile.target | 1 +
91
tests/tcg/x86_64/Makefile.target | 3 +-
92
43 files changed, 966 insertions(+), 367 deletions(-)
93
create mode 100644 tests/tcg/riscv64/noexec.c
94
create mode 100644 tests/tcg/s390x/noexec.c
95
create mode 100644 tests/tcg/x86_64/noexec.c
96
create mode 100644 tests/tcg/multiarch/noexec.c.inc
diff view generated by jsdifflib
New patch
1
We're about to start validating PAGE_EXEC, which means
2
that we've got to mark the commpage executable. We had
3
been placing the commpage outside of reserved_va, which
4
was incorrect and lead to an abort.
1
5
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
linux-user/arm/target_cpu.h | 4 ++--
11
linux-user/elfload.c | 6 +++++-
12
2 files changed, 7 insertions(+), 3 deletions(-)
13
14
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/arm/target_cpu.h
17
+++ b/linux-user/arm/target_cpu.h
18
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
19
} else {
20
/*
21
* We need to be able to map the commpage.
22
- * See validate_guest_space in linux-user/elfload.c.
23
+ * See init_guest_commpage in linux-user/elfload.c.
24
*/
25
- return 0xffff0000ul;
26
+ return 0xfffffffful;
27
}
28
}
29
#define MAX_RESERVED_VA arm_max_reserved_va
30
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/linux-user/elfload.c
33
+++ b/linux-user/elfload.c
34
@@ -XXX,XX +XXX,XX @@ enum {
35
36
static bool init_guest_commpage(void)
37
{
38
- void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
39
+ abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
40
+ void *want = g2h_untagged(commpage);
41
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
42
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
43
44
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
45
perror("Protecting guest commpage");
46
exit(EXIT_FAILURE);
47
}
48
+
49
+ page_set_flags(commpage, commpage + qemu_host_page_size,
50
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
51
return true;
52
}
53
54
--
55
2.34.1
diff view generated by jsdifflib
1
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
1
We're about to start validating PAGE_EXEC, which means that we've
2
but hoist outside of any loop over a set of tlbs. This is
2
got to mark page zero executable. We had been special casing this
3
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
3
entirely within translate.
4
and tlb_flush_page_locked, so not onerous.
5
4
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
8
---
11
accel/tcg/cputlb.c | 14 ++++++++------
9
linux-user/elfload.c | 34 +++++++++++++++++++++++++++++++---
12
1 file changed, 8 insertions(+), 6 deletions(-)
10
1 file changed, 31 insertions(+), 3 deletions(-)
13
11
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/cputlb.c
14
--- a/linux-user/elfload.c
17
+++ b/accel/tcg/cputlb.c
15
+++ b/linux-user/elfload.c
18
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
16
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
19
* high), since otherwise we are likely to have a significant amount of
17
regs->gr[31] = infop->entry;
20
* conflict misses.
21
*/
22
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
23
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
24
+ int64_t now)
25
{
26
size_t old_size = tlb_n_entries(fast);
27
size_t rate;
28
size_t new_size = old_size;
29
- int64_t now = get_clock_realtime();
30
int64_t window_len_ms = 100;
31
int64_t window_len_ns = window_len_ms * 1000 * 1000;
32
bool window_expired = now > desc->window_begin_ns + window_len_ns;
33
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
34
memset(desc->vtable, -1, sizeof(desc->vtable));
35
}
18
}
36
19
37
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
20
+#define LO_COMMPAGE 0
38
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
21
+
39
+ int64_t now)
22
+static bool init_guest_commpage(void)
40
{
23
+{
41
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
24
+ void *want = g2h_untagged(LO_COMMPAGE);
42
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
25
+ void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
43
26
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
44
- tlb_mmu_resize_locked(desc, fast);
27
+
45
+ tlb_mmu_resize_locked(desc, fast, now);
28
+ if (addr == MAP_FAILED) {
46
tlb_mmu_flush_locked(desc, fast);
29
+ perror("Allocating guest commpage");
30
+ exit(EXIT_FAILURE);
31
+ }
32
+ if (addr != want) {
33
+ return false;
34
+ }
35
+
36
+ /*
37
+ * On Linux, page zero is normally marked execute only + gateway.
38
+ * Normal read or write is supposed to fail (thus PROT_NONE above),
39
+ * but specific offsets have kernel code mapped to raise permissions
40
+ * and implement syscalls. Here, simply mark the page executable.
41
+ * Special case the entry points during translation (see do_page_zero).
42
+ */
43
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
44
+ PAGE_EXEC | PAGE_VALID);
45
+ return true;
46
+}
47
+
48
#endif /* TARGET_HPPA */
49
50
#ifdef TARGET_XTENSA
51
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
47
}
52
}
48
53
49
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
54
#if defined(HI_COMMPAGE)
50
CPUArchState *env = cpu->env_ptr;
55
-#define LO_COMMPAGE 0
51
uint16_t asked = data.host_int;
56
+#define LO_COMMPAGE -1
52
uint16_t all_dirty, work, to_clean;
57
#elif defined(LO_COMMPAGE)
53
+ int64_t now = get_clock_realtime();
58
#define HI_COMMPAGE 0
54
59
#else
55
assert_cpu_is_self(cpu);
60
#define HI_COMMPAGE 0
56
61
-#define LO_COMMPAGE 0
57
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
62
+#define LO_COMMPAGE -1
58
63
#define init_guest_commpage() true
59
for (work = to_clean; work != 0; work &= work - 1) {
64
#endif
60
int mmu_idx = ctz32(work);
65
61
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
66
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
62
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
67
} else {
68
offset = -(HI_COMMPAGE & -align);
69
}
70
- } else if (LO_COMMPAGE != 0) {
71
+ } else if (LO_COMMPAGE != -1) {
72
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
63
}
73
}
64
74
65
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
67
tlb_debug("forcing full flush midx %d ("
68
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
69
midx, lp_addr, lp_mask);
70
- tlb_flush_one_mmuidx_locked(env, midx);
71
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
72
} else {
73
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
74
tlb_n_used_entries_dec(env, midx);
75
--
75
--
76
2.20.1
76
2.34.1
77
78
diff view generated by jsdifflib
New patch
1
We're about to start validating PAGE_EXEC, which means that we've
2
got to mark the vsyscall page executable. We had been special
3
casing this entirely within translate.
1
4
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
linux-user/elfload.c | 23 +++++++++++++++++++++++
10
1 file changed, 23 insertions(+)
11
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
15
+++ b/linux-user/elfload.c
16
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
17
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
18
}
19
20
+#if ULONG_MAX > UINT32_MAX
21
+#define INIT_GUEST_COMMPAGE
22
+static bool init_guest_commpage(void)
23
+{
24
+ /*
25
+ * The vsyscall page is at a high negative address aka kernel space,
26
+ * which means that we cannot actually allocate it with target_mmap.
27
+ * We still should be able to use page_set_flags, unless the user
28
+ * has specified -R reserved_va, which would trigger an assert().
29
+ */
30
+ if (reserved_va != 0 &&
31
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
32
+ error_report("Cannot allocate vsyscall page");
33
+ exit(EXIT_FAILURE);
34
+ }
35
+ page_set_flags(TARGET_VSYSCALL_PAGE,
36
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
37
+ PAGE_EXEC | PAGE_VALID);
38
+ return true;
39
+}
40
+#endif
41
#else
42
43
#define ELF_START_MMAP 0x80000000
44
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
45
#else
46
#define HI_COMMPAGE 0
47
#define LO_COMMPAGE -1
48
+#ifndef INIT_GUEST_COMMPAGE
49
#define init_guest_commpage() true
50
#endif
51
+#endif
52
53
static void pgb_fail_in_use(const char *image_name)
54
{
55
--
56
2.34.1
diff view generated by jsdifflib
New patch
1
Map the stack executable if required by default or on demand.
1
2
3
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/elf.h | 1 +
8
linux-user/qemu.h | 1 +
9
linux-user/elfload.c | 19 ++++++++++++++++++-
10
3 files changed, 20 insertions(+), 1 deletion(-)
11
12
diff --git a/include/elf.h b/include/elf.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/elf.h
15
+++ b/include/elf.h
16
@@ -XXX,XX +XXX,XX @@ typedef int64_t Elf64_Sxword;
17
#define PT_LOPROC 0x70000000
18
#define PT_HIPROC 0x7fffffff
19
20
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
21
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
22
23
#define PT_MIPS_REGINFO 0x70000000
24
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/qemu.h
27
+++ b/linux-user/qemu.h
28
@@ -XXX,XX +XXX,XX @@ struct image_info {
29
uint32_t elf_flags;
30
int personality;
31
abi_ulong alignment;
32
+ bool exec_stack;
33
34
/* Generic semihosting knows about these pointers. */
35
abi_ulong arg_strings; /* strings for argv */
36
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/linux-user/elfload.c
39
+++ b/linux-user/elfload.c
40
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
41
#define ELF_ARCH EM_386
42
43
#define ELF_PLATFORM get_elf_platform()
44
+#define EXSTACK_DEFAULT true
45
46
static const char *get_elf_platform(void)
47
{
48
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
49
50
#define ELF_ARCH EM_ARM
51
#define ELF_CLASS ELFCLASS32
52
+#define EXSTACK_DEFAULT true
53
54
static inline void init_thread(struct target_pt_regs *regs,
55
struct image_info *infop)
56
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
57
#else
58
59
#define ELF_CLASS ELFCLASS32
60
+#define EXSTACK_DEFAULT true
61
62
#endif
63
64
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
65
66
#define ELF_CLASS ELFCLASS64
67
#define ELF_ARCH EM_LOONGARCH
68
+#define EXSTACK_DEFAULT true
69
70
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
71
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
73
#define ELF_CLASS ELFCLASS32
74
#endif
75
#define ELF_ARCH EM_MIPS
76
+#define EXSTACK_DEFAULT true
77
78
#ifdef TARGET_ABI_MIPSN32
79
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
80
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
81
#define bswaptls(ptr) bswap32s(ptr)
82
#endif
83
84
+#ifndef EXSTACK_DEFAULT
85
+#define EXSTACK_DEFAULT false
86
+#endif
87
+
88
#include "elf.h"
89
90
/* We must delay the following stanzas until after "elf.h". */
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
92
struct image_info *info)
93
{
94
abi_ulong size, error, guard;
95
+ int prot;
96
97
size = guest_stack_size;
98
if (size < STACK_LOWER_LIMIT) {
99
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
100
guard = qemu_real_host_page_size();
101
}
102
103
- error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
104
+ prot = PROT_READ | PROT_WRITE;
105
+ if (info->exec_stack) {
106
+ prot |= PROT_EXEC;
107
+ }
108
+ error = target_mmap(0, size + guard, prot,
109
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
110
if (error == -1) {
111
perror("mmap stack");
112
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
113
*/
114
loaddr = -1, hiaddr = 0;
115
info->alignment = 0;
116
+ info->exec_stack = EXSTACK_DEFAULT;
117
for (i = 0; i < ehdr->e_phnum; ++i) {
118
struct elf_phdr *eppnt = phdr + i;
119
if (eppnt->p_type == PT_LOAD) {
120
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
121
if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
122
goto exit_errmsg;
123
}
124
+ } else if (eppnt->p_type == PT_GNU_STACK) {
125
+ info->exec_stack = eppnt->p_flags & PF_X;
126
}
127
}
128
129
--
130
2.34.1
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
Currently it's possible to execute pages that do not have PAGE_EXEC
4
if there is an existing translation block. Fix by invalidating TBs
5
that touch the affected pages.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Message-Id: <20220817150506.592862-2-iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
linux-user/mmap.c | 6 ++++--
12
1 file changed, 4 insertions(+), 2 deletions(-)
13
14
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/mmap.c
17
+++ b/linux-user/mmap.c
18
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
19
goto error;
20
}
21
}
22
+
23
page_set_flags(start, start + len, page_flags);
24
- mmap_unlock();
25
- return 0;
26
+ tb_invalidate_phys_range(start, start + len);
27
+ ret = 0;
28
+
29
error:
30
mmap_unlock();
31
return ret;
32
--
33
2.34.1
diff view generated by jsdifflib
1
The result of g_strsplit is never NULL.
1
We're about to start validating PAGE_EXEC, which means
2
that we've got to put this code into a section that is
3
both writable and executable.
2
4
3
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Note that this test did not run on hardware beforehand either.
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
vl.c | 2 +-
11
tests/tcg/i386/test-i386.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 1 insertion(+), 1 deletion(-)
11
13
12
diff --git a/vl.c b/vl.c
14
diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
16
--- a/tests/tcg/i386/test-i386.c
15
+++ b/vl.c
17
+++ b/tests/tcg/i386/test-i386.c
16
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
18
@@ -XXX,XX +XXX,XX @@ uint8_t code[] = {
17
19
0xc3, /* ret */
18
accel_list = g_strsplit(accel, ":", 0);
20
};
19
21
20
- for (tmp = accel_list; tmp && *tmp; tmp++) {
22
-asm(".section \".data\"\n"
21
+ for (tmp = accel_list; *tmp; tmp++) {
23
+asm(".section \".data_x\",\"awx\"\n"
22
/*
24
"smc_code2:\n"
23
* Filter invalid accelerators here, to prevent obscenities
25
"movl 4(%esp), %eax\n"
24
* such as "-machine accel=tcg,,thread=single".
26
"movl %eax, smc_patch_addr2 + 1\n"
25
--
27
--
26
2.20.1
28
2.34.1
27
28
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
2
3
To avoid scrolling each instruction when reviewing tcg
3
Introduce a function that checks whether a given address is on the same
4
helpers written for the decodetree script, display the
4
page as where disassembly started. Having it improves readability of
5
.decode files (similar to header declarations) before
5
the following patches.
6
the C source (implementation of previous declarations).
7
6
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
8
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-Id: <20220811095534.241224-3-iii@linux.ibm.com>
11
Message-Id: <20191230082856.30556-1-philmd@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
[rth: Make the DisasContextBase parameter const.]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
13
---
14
scripts/git.orderfile | 3 +++
14
include/exec/translator.h | 10 ++++++++++
15
1 file changed, 3 insertions(+)
15
1 file changed, 10 insertions(+)
16
16
17
diff --git a/scripts/git.orderfile b/scripts/git.orderfile
17
diff --git a/include/exec/translator.h b/include/exec/translator.h
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/scripts/git.orderfile
19
--- a/include/exec/translator.h
20
+++ b/scripts/git.orderfile
20
+++ b/include/exec/translator.h
21
@@ -XXX,XX +XXX,XX @@ qga/*.json
21
@@ -XXX,XX +XXX,XX @@ FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
22
# headers
22
23
*.h
23
#undef GEN_TRANSLATOR_LD
24
24
25
+# decoding tree specification
25
+/*
26
+*.decode
26
+ * Return whether addr is on the same page as where disassembly started.
27
+ * Translators can use this to enforce the rule that only single-insn
28
+ * translation blocks are allowed to cross page boundaries.
29
+ */
30
+static inline bool is_same_page(const DisasContextBase *db, target_ulong addr)
31
+{
32
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
33
+}
27
+
34
+
28
# code
35
#endif /* EXEC__TRANSLATOR_H */
29
*.c
30
--
36
--
31
2.20.1
37
2.34.1
32
33
diff view generated by jsdifflib
1
Merge into the only caller, but at the same time split
1
The current implementation is a no-op, simply returning addr.
2
out tlb_mmu_init to initialize a single tlb entry.
2
This is incorrect, because we ought to be checking the page
3
permissions for execution.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Make get_page_addr_code inline for both implementations.
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
12
include/exec/exec-all.h | 85 ++++++++++++++---------------------------
10
1 file changed, 16 insertions(+), 17 deletions(-)
13
accel/tcg/cputlb.c | 5 ---
14
accel/tcg/user-exec.c | 14 +++++++
15
3 files changed, 42 insertions(+), 62 deletions(-)
11
16
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
20
+++ b/include/exec/exec-all.h
21
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
22
hwaddr index, MemTxAttrs attrs);
23
#endif
24
25
-#if defined(CONFIG_USER_ONLY)
26
-void mmap_lock(void);
27
-void mmap_unlock(void);
28
-bool have_mmap_lock(void);
29
-
30
/**
31
- * get_page_addr_code() - user-mode version
32
+ * get_page_addr_code_hostp()
33
* @env: CPUArchState
34
* @addr: guest virtual address of guest code
35
*
36
- * Returns @addr.
37
+ * See get_page_addr_code() (full-system version) for documentation on the
38
+ * return value.
39
+ *
40
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
41
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
42
+ * to the host address where @addr's content is kept.
43
+ *
44
+ * Note: this function can trigger an exception.
45
+ */
46
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
47
+ void **hostp);
48
+
49
+/**
50
+ * get_page_addr_code()
51
+ * @env: CPUArchState
52
+ * @addr: guest virtual address of guest code
53
+ *
54
+ * If we cannot translate and execute from the entire RAM page, or if
55
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
56
+ * ram_addr_t corresponding to the guest code at @addr.
57
+ *
58
+ * Note: this function can trigger an exception.
59
*/
60
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
61
target_ulong addr)
62
{
63
- return addr;
64
+ return get_page_addr_code_hostp(env, addr, NULL);
65
}
66
67
-/**
68
- * get_page_addr_code_hostp() - user-mode version
69
- * @env: CPUArchState
70
- * @addr: guest virtual address of guest code
71
- *
72
- * Returns @addr.
73
- *
74
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
75
- * is kept.
76
- */
77
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
78
- target_ulong addr,
79
- void **hostp)
80
-{
81
- if (hostp) {
82
- *hostp = g2h_untagged(addr);
83
- }
84
- return addr;
85
-}
86
+#if defined(CONFIG_USER_ONLY)
87
+void mmap_lock(void);
88
+void mmap_unlock(void);
89
+bool have_mmap_lock(void);
90
91
/**
92
* adjust_signal_pc:
93
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
94
static inline void mmap_lock(void) {}
95
static inline void mmap_unlock(void) {}
96
97
-/**
98
- * get_page_addr_code() - full-system version
99
- * @env: CPUArchState
100
- * @addr: guest virtual address of guest code
101
- *
102
- * If we cannot translate and execute from the entire RAM page, or if
103
- * the region is not backed by RAM, returns -1. Otherwise, returns the
104
- * ram_addr_t corresponding to the guest code at @addr.
105
- *
106
- * Note: this function can trigger an exception.
107
- */
108
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
109
-
110
-/**
111
- * get_page_addr_code_hostp() - full-system version
112
- * @env: CPUArchState
113
- * @addr: guest virtual address of guest code
114
- *
115
- * See get_page_addr_code() (full-system version) for documentation on the
116
- * return value.
117
- *
118
- * Sets *@hostp (when @hostp is non-NULL) as follows.
119
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
120
- * to the host address where @addr's content is kept.
121
- *
122
- * Note: this function can trigger an exception.
123
- */
124
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
125
- void **hostp);
126
-
127
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
128
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
129
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
130
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
131
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
132
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
133
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
134
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
17
desc->window_max_entries = max_entries;
135
return qemu_ram_addr_from_host_nofail(p);
18
}
136
}
19
137
20
-static void tlb_dyn_init(CPUArchState *env)
138
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
21
-{
139
-{
22
- int i;
140
- return get_page_addr_code_hostp(env, addr, NULL);
23
-
24
- for (i = 0; i < NB_MMU_MODES; i++) {
25
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
26
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
27
-
28
- tlb_window_reset(desc, get_clock_realtime(), 0);
29
- desc->n_used_entries = 0;
30
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
31
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
32
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
33
- }
34
-}
141
-}
35
-
142
-
36
/**
143
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
37
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
144
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
38
* @desc: The CPUTLBDesc portion of the TLB
145
{
39
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
146
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
40
tlb_mmu_flush_locked(desc, fast);
147
index XXXXXXX..XXXXXXX 100644
148
--- a/accel/tcg/user-exec.c
149
+++ b/accel/tcg/user-exec.c
150
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
151
return size ? g2h(env_cpu(env), addr) : NULL;
41
}
152
}
42
153
43
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
154
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
155
+ void **hostp)
44
+{
156
+{
45
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
157
+ int flags;
46
+
158
+
47
+ tlb_window_reset(desc, now, 0);
159
+ flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
48
+ desc->n_used_entries = 0;
160
+ g_assert(flags == 0);
49
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
161
+
50
+ fast->table = g_new(CPUTLBEntry, n_entries);
162
+ if (hostp) {
51
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
163
+ *hostp = g2h_untagged(addr);
164
+ }
165
+ return addr;
52
+}
166
+}
53
+
167
+
54
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
168
/* The softmmu versions of these helpers are in cputlb.c. */
55
{
169
56
env_tlb(env)->d[mmu_idx].n_used_entries++;
170
/*
57
@@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
58
void tlb_init(CPUState *cpu)
59
{
60
CPUArchState *env = cpu->env_ptr;
61
+ int64_t now = get_clock_realtime();
62
+ int i;
63
64
qemu_spin_init(&env_tlb(env)->c.lock);
65
66
/* Ensure that cpu_reset performs a full flush. */
67
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
68
69
- tlb_dyn_init(env);
70
+ for (i = 0; i < NB_MMU_MODES; i++) {
71
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
72
+ }
73
}
74
75
/* flush_all_helper: run fn across all cpus
76
--
171
--
77
2.20.1
172
2.34.1
78
79
diff view generated by jsdifflib
1
By choosing "tcg:kvm" when kvm is not enabled, we generate
1
The mmap_lock is held around tb_gen_code. While the comment
2
an incorrect warning: "invalid accelerator kvm".
2
is correct that the lock is dropped when tb_gen_code runs out
3
of memory, the lock is *not* dropped when an exception is
4
raised reading code for translation.
3
5
4
At the same time, use g_str_has_suffix rather than open-coding
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
5
the same operation.
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Presumably the inverse is also true with --disable-tcg.
8
9
Fixes: 28a0961757fc
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
10
---
15
vl.c | 21 +++++++++++++--------
11
accel/tcg/cpu-exec.c | 12 ++++++------
16
1 file changed, 13 insertions(+), 8 deletions(-)
12
accel/tcg/user-exec.c | 3 ---
13
2 files changed, 6 insertions(+), 9 deletions(-)
17
14
18
diff --git a/vl.c b/vl.c
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
17
--- a/accel/tcg/cpu-exec.c
21
+++ b/vl.c
18
+++ b/accel/tcg/cpu-exec.c
22
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
19
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
23
20
cpu_tb_exec(cpu, tb, &tb_exit);
24
if (accel == NULL) {
21
cpu_exec_exit(cpu);
25
/* Select the default accelerator */
22
} else {
26
- if (!accel_find("tcg") && !accel_find("kvm")) {
23
- /*
27
- error_report("No accelerator selected and"
24
- * The mmap_lock is dropped by tb_gen_code if it runs out of
28
- " no default accelerator available");
25
- * memory.
29
- exit(1);
26
- */
30
- } else {
27
#ifndef CONFIG_SOFTMMU
31
- int pnlen = strlen(progname);
28
clear_helper_retaddr();
32
- if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
29
- tcg_debug_assert(!have_mmap_lock());
33
+ bool have_tcg = accel_find("tcg");
30
+ if (have_mmap_lock()) {
34
+ bool have_kvm = accel_find("kvm");
31
+ mmap_unlock();
35
+
32
+ }
36
+ if (have_tcg && have_kvm) {
33
#endif
37
+ if (g_str_has_suffix(progname, "kvm")) {
34
if (qemu_mutex_iothread_locked()) {
38
/* If the program name ends with "kvm", we prefer KVM */
35
qemu_mutex_unlock_iothread();
39
accel = "kvm:tcg";
36
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
40
} else {
37
41
accel = "tcg:kvm";
38
#ifndef CONFIG_SOFTMMU
42
}
39
clear_helper_retaddr();
43
+ } else if (have_kvm) {
40
- tcg_debug_assert(!have_mmap_lock());
44
+ accel = "kvm";
41
+ if (have_mmap_lock()) {
45
+ } else if (have_tcg) {
42
+ mmap_unlock();
46
+ accel = "tcg";
43
+ }
47
+ } else {
44
#endif
48
+ error_report("No accelerator selected and"
45
if (qemu_mutex_iothread_locked()) {
49
+ " no default accelerator available");
46
qemu_mutex_unlock_iothread();
50
+ exit(1);
47
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
51
}
48
index XXXXXXX..XXXXXXX 100644
52
}
49
--- a/accel/tcg/user-exec.c
53
-
50
+++ b/accel/tcg/user-exec.c
54
accel_list = g_strsplit(accel, ":", 0);
51
@@ -XXX,XX +XXX,XX @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
55
52
* (and if the translator doesn't handle page boundaries correctly
56
for (tmp = accel_list; *tmp; tmp++) {
53
* there's little we can do about that here). Therefore, do not
54
* trigger the unwinder.
55
- *
56
- * Like tb_gen_code, release the memory lock before cpu_loop_exit.
57
*/
58
- mmap_unlock();
59
*pc = 0;
60
return MMU_INST_FETCH;
61
}
57
--
62
--
58
2.20.1
63
2.34.1
59
60
diff view generated by jsdifflib
1
There is only one caller for tlb_table_flush_by_mmuidx. Place
1
The function is not used outside of cpu-exec.c. Move it and
2
the result at the earlier line number, due to an expected user
2
its subroutines up in the file, before the first use.
3
in the near future.
4
3
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
accel/tcg/cputlb.c | 19 +++++++------------
9
include/exec/exec-all.h | 3 -
10
1 file changed, 7 insertions(+), 12 deletions(-)
10
accel/tcg/cpu-exec.c | 122 ++++++++++++++++++++--------------------
11
2 files changed, 61 insertions(+), 64 deletions(-)
11
12
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
--- a/include/exec/exec-all.h
15
+++ b/accel/tcg/cputlb.c
16
+++ b/include/exec/exec-all.h
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
17
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
17
}
18
#endif
19
void tb_flush(CPUState *cpu);
20
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
21
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
22
- target_ulong cs_base, uint32_t flags,
23
- uint32_t cflags);
24
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
25
26
/* GETPC is the true target of the return instruction that we'll execute. */
27
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/cpu-exec.c
30
+++ b/accel/tcg/cpu-exec.c
31
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu)
32
return cflags;
18
}
33
}
19
34
20
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
35
+struct tb_desc {
21
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
36
+ target_ulong pc;
22
{
37
+ target_ulong cs_base;
23
tlb_mmu_resize_locked(env, mmu_idx);
38
+ CPUArchState *env;
24
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
39
+ tb_page_addr_t phys_page1;
25
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
40
+ uint32_t flags;
26
+ env_tlb(env)->d[mmu_idx].large_page_addr = -1;
41
+ uint32_t cflags;
27
+ env_tlb(env)->d[mmu_idx].large_page_mask = -1;
42
+ uint32_t trace_vcpu_dstate;
28
+ env_tlb(env)->d[mmu_idx].vindex = 0;
43
+};
29
+ memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
44
+
30
+ memset(env_tlb(env)->d[mmu_idx].vtable, -1,
45
+static bool tb_lookup_cmp(const void *p, const void *d)
31
+ sizeof(env_tlb(env)->d[0].vtable));
46
+{
47
+ const TranslationBlock *tb = p;
48
+ const struct tb_desc *desc = d;
49
+
50
+ if (tb->pc == desc->pc &&
51
+ tb->page_addr[0] == desc->phys_page1 &&
52
+ tb->cs_base == desc->cs_base &&
53
+ tb->flags == desc->flags &&
54
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
55
+ tb_cflags(tb) == desc->cflags) {
56
+ /* check next page if needed */
57
+ if (tb->page_addr[1] == -1) {
58
+ return true;
59
+ } else {
60
+ tb_page_addr_t phys_page2;
61
+ target_ulong virt_page2;
62
+
63
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
64
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
65
+ if (tb->page_addr[1] == phys_page2) {
66
+ return true;
67
+ }
68
+ }
69
+ }
70
+ return false;
71
+}
72
+
73
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
74
+ target_ulong cs_base, uint32_t flags,
75
+ uint32_t cflags)
76
+{
77
+ tb_page_addr_t phys_pc;
78
+ struct tb_desc desc;
79
+ uint32_t h;
80
+
81
+ desc.env = cpu->env_ptr;
82
+ desc.cs_base = cs_base;
83
+ desc.flags = flags;
84
+ desc.cflags = cflags;
85
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
86
+ desc.pc = pc;
87
+ phys_pc = get_page_addr_code(desc.env, pc);
88
+ if (phys_pc == -1) {
89
+ return NULL;
90
+ }
91
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
92
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
93
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
94
+}
95
+
96
/* Might cause an exception, so have a longjmp destination ready */
97
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
98
target_ulong cs_base,
99
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
100
end_exclusive();
32
}
101
}
33
102
34
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
103
-struct tb_desc {
35
@@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
104
- target_ulong pc;
36
*pelide = elide;
105
- target_ulong cs_base;
37
}
106
- CPUArchState *env;
38
107
- tb_page_addr_t phys_page1;
39
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
108
- uint32_t flags;
109
- uint32_t cflags;
110
- uint32_t trace_vcpu_dstate;
111
-};
112
-
113
-static bool tb_lookup_cmp(const void *p, const void *d)
40
-{
114
-{
41
- tlb_table_flush_by_mmuidx(env, mmu_idx);
115
- const TranslationBlock *tb = p;
42
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
116
- const struct tb_desc *desc = d;
43
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
117
-
44
- env_tlb(env)->d[mmu_idx].vindex = 0;
118
- if (tb->pc == desc->pc &&
45
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
119
- tb->page_addr[0] == desc->phys_page1 &&
46
- sizeof(env_tlb(env)->d[0].vtable));
120
- tb->cs_base == desc->cs_base &&
121
- tb->flags == desc->flags &&
122
- tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
123
- tb_cflags(tb) == desc->cflags) {
124
- /* check next page if needed */
125
- if (tb->page_addr[1] == -1) {
126
- return true;
127
- } else {
128
- tb_page_addr_t phys_page2;
129
- target_ulong virt_page2;
130
-
131
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
132
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
133
- if (tb->page_addr[1] == phys_page2) {
134
- return true;
135
- }
136
- }
137
- }
138
- return false;
47
-}
139
-}
48
-
140
-
49
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
141
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
142
- target_ulong cs_base, uint32_t flags,
143
- uint32_t cflags)
144
-{
145
- tb_page_addr_t phys_pc;
146
- struct tb_desc desc;
147
- uint32_t h;
148
-
149
- desc.env = cpu->env_ptr;
150
- desc.cs_base = cs_base;
151
- desc.flags = flags;
152
- desc.cflags = cflags;
153
- desc.trace_vcpu_dstate = *cpu->trace_dstate;
154
- desc.pc = pc;
155
- phys_pc = get_page_addr_code(desc.env, pc);
156
- if (phys_pc == -1) {
157
- return NULL;
158
- }
159
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
160
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
161
- return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
162
-}
163
-
164
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
50
{
165
{
51
CPUArchState *env = cpu->env_ptr;
166
if (TCG_TARGET_HAS_direct_jump) {
52
--
167
--
53
2.20.1
168
2.34.1
54
55
diff view generated by jsdifflib
1
We will want to be able to flush a tlb without resizing.
1
The base qemu_ram_addr_from_host function is already in
2
softmmu/physmem.c; move the nofail version to be adjacent.
2
3
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
accel/tcg/cputlb.c | 15 ++++++++++-----
9
include/exec/cpu-common.h | 1 +
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
accel/tcg/cputlb.c | 12 ------------
11
softmmu/physmem.c | 12 ++++++++++++
12
3 files changed, 13 insertions(+), 12 deletions(-)
10
13
14
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-common.h
17
+++ b/include/exec/cpu-common.h
18
@@ -XXX,XX +XXX,XX @@ typedef uintptr_t ram_addr_t;
19
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
20
/* This should not be used by devices. */
21
ram_addr_t qemu_ram_addr_from_host(void *ptr);
22
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
23
RAMBlock *qemu_ram_block_by_name(const char *name);
24
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
25
ram_addr_t *offset);
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
26
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
28
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
29
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
30
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
16
}
31
prot, mmu_idx, size);
17
}
32
}
18
33
19
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
34
-static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
20
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
35
-{
21
{
36
- ram_addr_t ram_addr;
22
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
23
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
24
-
37
-
25
- tlb_mmu_resize_locked(desc, fast);
38
- ram_addr = qemu_ram_addr_from_host(ptr);
26
desc->n_used_entries = 0;
39
- if (ram_addr == RAM_ADDR_INVALID) {
27
desc->large_page_addr = -1;
40
- error_report("Bad ram pointer %p", ptr);
28
desc->large_page_mask = -1;
41
- abort();
29
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
42
- }
30
memset(desc->vtable, -1, sizeof(desc->vtable));
43
- return ram_addr;
44
-}
45
-
46
/*
47
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
48
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
49
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/softmmu/physmem.c
52
+++ b/softmmu/physmem.c
53
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
54
return block->offset + offset;
31
}
55
}
32
56
33
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
57
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
34
+{
58
+{
35
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
59
+ ram_addr_t ram_addr;
36
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
37
+
60
+
38
+ tlb_mmu_resize_locked(desc, fast);
61
+ ram_addr = qemu_ram_addr_from_host(ptr);
39
+ tlb_mmu_flush_locked(desc, fast);
62
+ if (ram_addr == RAM_ADDR_INVALID) {
63
+ error_report("Bad ram pointer %p", ptr);
64
+ abort();
65
+ }
66
+ return ram_addr;
40
+}
67
+}
41
+
68
+
42
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
69
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
43
{
70
MemTxAttrs attrs, void *buf, hwaddr len);
44
env_tlb(env)->d[mmu_idx].n_used_entries++;
71
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
45
--
72
--
46
2.20.1
73
2.34.1
47
48
diff view generated by jsdifflib
1
We do not need the entire CPUArchState to compute these values.
1
Simplify the implementation of get_page_addr_code_hostp
2
by reusing the existing probe_access infrastructure.
2
3
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
accel/tcg/cputlb.c | 15 ++++++++-------
8
accel/tcg/cputlb.c | 76 ++++++++++++++++------------------------------
9
1 file changed, 8 insertions(+), 7 deletions(-)
9
1 file changed, 26 insertions(+), 50 deletions(-)
10
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
15
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
16
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
16
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
17
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
17
(ADDR) & TARGET_PAGE_MASK)
18
18
19
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
19
-/*
20
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
20
- * Return a ram_addr_t for the virtual address for execution.
21
- *
22
- * Return -1 if we can't translate and execute from an entire page
23
- * of RAM. This will force us to execute by loading and translating
24
- * one insn at a time, without caching.
25
- *
26
- * NOTE: This function will trigger an exception if the page is
27
- * not executable.
28
- */
29
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
30
- void **hostp)
31
-{
32
- uintptr_t mmu_idx = cpu_mmu_index(env, true);
33
- uintptr_t index = tlb_index(env, mmu_idx, addr);
34
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
35
- void *p;
36
-
37
- if (unlikely(!tlb_hit(entry->addr_code, addr))) {
38
- if (!VICTIM_TLB_HIT(addr_code, addr)) {
39
- tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
40
- index = tlb_index(env, mmu_idx, addr);
41
- entry = tlb_entry(env, mmu_idx, addr);
42
-
43
- if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
44
- /*
45
- * The MMU protection covers a smaller range than a target
46
- * page, so we must redo the MMU check for every insn.
47
- */
48
- return -1;
49
- }
50
- }
51
- assert(tlb_hit(entry->addr_code, addr));
52
- }
53
-
54
- if (unlikely(entry->addr_code & TLB_MMIO)) {
55
- /* The region is not backed by RAM. */
56
- if (hostp) {
57
- *hostp = NULL;
58
- }
59
- return -1;
60
- }
61
-
62
- p = (void *)((uintptr_t)addr + entry->addend);
63
- if (hostp) {
64
- *hostp = p;
65
- }
66
- return qemu_ram_addr_from_host_nofail(p);
67
-}
68
-
69
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
70
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
21
{
71
{
22
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
72
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
23
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
73
return flags ? NULL : host;
24
}
74
}
25
75
26
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
76
+/*
27
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
77
+ * Return a ram_addr_t for the virtual address for execution.
28
{
78
+ *
29
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
79
+ * Return -1 if we can't translate and execute from an entire page
30
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
80
+ * of RAM. This will force us to execute by loading and translating
31
}
81
+ * one insn at a time, without caching.
32
82
+ *
33
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
83
+ * NOTE: This function will trigger an exception if the page is
34
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
84
+ * not executable.
35
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
85
+ */
36
{
86
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
37
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
87
+ void **hostp)
38
- size_t old_size = tlb_n_entries(env, mmu_idx);
88
+{
39
+ size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
89
+ void *p;
40
size_t rate;
90
+
41
size_t new_size = old_size;
91
+ (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
42
int64_t now = get_clock_realtime();
92
+ cpu_mmu_index(env, true), false, &p, 0);
43
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
93
+ if (p == NULL) {
44
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
94
+ return -1;
45
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
95
+ }
46
env_tlb(env)->d[mmu_idx].vindex = 0;
96
+ if (hostp) {
47
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
97
+ *hostp = p;
48
+ memset(env_tlb(env)->f[mmu_idx].table, -1,
98
+ }
49
+ sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
99
+ return qemu_ram_addr_from_host_nofail(p);
50
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
100
+}
51
sizeof(env_tlb(env)->d[0].vtable));
101
+
52
}
102
#ifdef CONFIG_PLUGIN
53
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
103
/*
54
qemu_spin_lock(&env_tlb(env)->c.lock);
104
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
55
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56
unsigned int i;
57
- unsigned int n = tlb_n_entries(env, mmu_idx);
58
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
59
60
for (i = 0; i < n; i++) {
61
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
62
--
105
--
63
2.20.1
106
2.34.1
64
65
diff view generated by jsdifflib
1
The accel_list and tmp variables are only used when manufacturing
1
It was non-obvious to me why we can raise an exception in
2
-machine accel, options based on -accel.
2
the middle of a comparison function, but it works.
3
While nearby, use TARGET_PAGE_ALIGN instead of open-coding.
3
4
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
vl.c | 3 ++-
8
accel/tcg/cpu-exec.c | 11 ++++++++++-
10
1 file changed, 2 insertions(+), 1 deletion(-)
9
1 file changed, 10 insertions(+), 1 deletion(-)
11
10
12
diff --git a/vl.c b/vl.c
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
13
--- a/accel/tcg/cpu-exec.c
15
+++ b/vl.c
14
+++ b/accel/tcg/cpu-exec.c
16
@@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
15
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
17
static void configure_accelerators(const char *progname)
16
tb_page_addr_t phys_page2;
18
{
17
target_ulong virt_page2;
19
const char *accel;
18
20
- char **accel_list, **tmp;
19
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
21
bool init_failed = false;
20
+ /*
22
21
+ * We know that the first page matched, and an otherwise valid TB
23
qemu_opts_foreach(qemu_find_opts("icount"),
22
+ * encountered an incomplete instruction at the end of that page,
24
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
23
+ * therefore we know that generating a new TB from the current PC
25
24
+ * must also require reading from the next page -- even if the
26
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
25
+ * second pages do not match, and therefore the resulting insn
27
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
26
+ * is different for the new TB. Therefore any exception raised
28
+ char **accel_list, **tmp;
27
+ * here by the faulting lookup is not premature.
29
+
28
+ */
30
if (accel == NULL) {
29
+ virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
31
/* Select the default accelerator */
30
phys_page2 = get_page_addr_code(desc->env, virt_page2);
32
if (!accel_find("tcg") && !accel_find("kvm")) {
31
if (tb->page_addr[1] == phys_page2) {
32
return true;
33
--
33
--
34
2.20.1
34
2.34.1
35
36
diff view generated by jsdifflib
1
No functional change, but the smaller expressions make
1
The only user can easily use translator_lduw and
2
the code easier to read.
2
adjust the type to signed during the return.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
accel/tcg/cputlb.c | 19 ++++++++++---------
9
include/exec/translator.h | 1 -
10
1 file changed, 10 insertions(+), 9 deletions(-)
10
target/i386/tcg/translate.c | 2 +-
11
2 files changed, 1 insertion(+), 2 deletions(-)
11
12
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
--- a/include/exec/translator.h
15
+++ b/accel/tcg/cputlb.c
16
+++ b/include/exec/translator.h
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
17
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
17
18
18
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
19
#define FOR_EACH_TRANSLATOR_LD(F) \
20
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
21
- F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
22
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
23
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
24
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
25
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/i386/tcg/translate.c
28
+++ b/target/i386/tcg/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
30
31
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
19
{
32
{
20
- tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
33
- return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
21
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
34
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
22
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
23
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
24
- env_tlb(env)->d[mmu_idx].vindex = 0;
25
- memset(env_tlb(env)->f[mmu_idx].table, -1,
26
- sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
27
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
28
- sizeof(env_tlb(env)->d[0].vtable));
29
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
30
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
31
+
32
+ tlb_mmu_resize_locked(desc, fast);
33
+ desc->n_used_entries = 0;
34
+ desc->large_page_addr = -1;
35
+ desc->large_page_mask = -1;
36
+ desc->vindex = 0;
37
+ memset(fast->table, -1, sizeof_tlb(fast));
38
+ memset(desc->vtable, -1, sizeof(desc->vtable));
39
}
35
}
40
36
41
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
37
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
42
--
38
--
43
2.20.1
39
2.34.1
44
45
diff view generated by jsdifflib
1
The accel_initialised variable no longer has any setters.
1
Pass these along to translator_loop -- pc may be used instead
2
of tb->pc, and host_pc is currently unused. Adjust all targets
3
at one time.
2
4
3
Fixes: 6f6e1698a68c
5
Acked-by: Alistair Francis <alistair.francis@wdc.com>
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
9
---
10
vl.c | 3 +--
10
include/exec/exec-all.h | 1 -
11
1 file changed, 1 insertion(+), 2 deletions(-)
11
include/exec/translator.h | 24 ++++++++++++++++++++----
12
accel/tcg/translate-all.c | 6 ++++--
13
accel/tcg/translator.c | 9 +++++----
14
target/alpha/translate.c | 5 +++--
15
target/arm/translate.c | 5 +++--
16
target/avr/translate.c | 5 +++--
17
target/cris/translate.c | 5 +++--
18
target/hexagon/translate.c | 6 ++++--
19
target/hppa/translate.c | 5 +++--
20
target/i386/tcg/translate.c | 5 +++--
21
target/loongarch/translate.c | 6 ++++--
22
target/m68k/translate.c | 5 +++--
23
target/microblaze/translate.c | 5 +++--
24
target/mips/tcg/translate.c | 5 +++--
25
target/nios2/translate.c | 5 +++--
26
target/openrisc/translate.c | 6 ++++--
27
target/ppc/translate.c | 5 +++--
28
target/riscv/translate.c | 5 +++--
29
target/rx/translate.c | 5 +++--
30
target/s390x/tcg/translate.c | 5 +++--
31
target/sh4/translate.c | 5 +++--
32
target/sparc/translate.c | 5 +++--
33
target/tricore/translate.c | 6 ++++--
34
target/xtensa/translate.c | 6 ++++--
35
25 files changed, 97 insertions(+), 53 deletions(-)
12
36
13
diff --git a/vl.c b/vl.c
37
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
14
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
15
--- a/vl.c
39
--- a/include/exec/exec-all.h
16
+++ b/vl.c
40
+++ b/include/exec/exec-all.h
17
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
41
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
18
{
42
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
19
const char *accel;
43
#endif
20
char **accel_list, **tmp;
44
21
- bool accel_initialised = false;
45
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
22
bool init_failed = false;
46
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
23
47
target_ulong *data);
24
qemu_opts_foreach(qemu_find_opts("icount"),
48
25
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
49
diff --git a/include/exec/translator.h b/include/exec/translator.h
26
50
index XXXXXXX..XXXXXXX 100644
27
accel_list = g_strsplit(accel, ":", 0);
51
--- a/include/exec/translator.h
28
52
+++ b/include/exec/translator.h
29
- for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
53
@@ -XXX,XX +XXX,XX @@
30
+ for (tmp = accel_list; tmp && *tmp; tmp++) {
54
#include "exec/translate-all.h"
31
/*
55
#include "tcg/tcg.h"
32
* Filter invalid accelerators here, to prevent obscenities
56
33
* such as "-machine accel=tcg,,thread=single".
57
+/**
58
+ * gen_intermediate_code
59
+ * @cpu: cpu context
60
+ * @tb: translation block
61
+ * @max_insns: max number of instructions to translate
62
+ * @pc: guest virtual program counter address
63
+ * @host_pc: host physical program counter address
64
+ *
65
+ * This function must be provided by the target, which should create
66
+ * the target-specific DisasContext, and then invoke translator_loop.
67
+ */
68
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
69
+ target_ulong pc, void *host_pc);
70
71
/**
72
* DisasJumpType:
73
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
74
75
/**
76
* translator_loop:
77
- * @ops: Target-specific operations.
78
- * @db: Disassembly context.
79
* @cpu: Target vCPU.
80
* @tb: Translation block.
81
* @max_insns: Maximum number of insns to translate.
82
+ * @pc: guest virtual program counter address
83
+ * @host_pc: host physical program counter address
84
+ * @ops: Target-specific operations.
85
+ * @db: Disassembly context.
86
*
87
* Generic translator loop.
88
*
89
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
90
* - When single-stepping is enabled (system-wide or on the current vCPU).
91
* - When too many instructions have been translated.
92
*/
93
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
94
- CPUState *cpu, TranslationBlock *tb, int max_insns);
95
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
96
+ target_ulong pc, void *host_pc,
97
+ const TranslatorOps *ops, DisasContextBase *db);
98
99
void translator_loop_temp_check(DisasContextBase *db);
100
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/accel/tcg/translate-all.c
104
+++ b/accel/tcg/translate-all.c
105
@@ -XXX,XX +XXX,XX @@
106
107
#include "exec/cputlb.h"
108
#include "exec/translate-all.h"
109
+#include "exec/translator.h"
110
#include "qemu/bitmap.h"
111
#include "qemu/qemu-print.h"
112
#include "qemu/timer.h"
113
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
114
TCGProfile *prof = &tcg_ctx->prof;
115
int64_t ti;
116
#endif
117
+ void *host_pc;
118
119
assert_memory_lock();
120
qemu_thread_jit_write();
121
122
- phys_pc = get_page_addr_code(env, pc);
123
+ phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
124
125
if (phys_pc == -1) {
126
/* Generate a one-shot TB with 1 insn in it */
127
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
128
tcg_func_start(tcg_ctx);
129
130
tcg_ctx->cpu = env_cpu(env);
131
- gen_intermediate_code(cpu, tb, max_insns);
132
+ gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
133
assert(tb->size != 0);
134
tcg_ctx->cpu = NULL;
135
max_insns = tb->icount;
136
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/accel/tcg/translator.c
139
+++ b/accel/tcg/translator.c
140
@@ -XXX,XX +XXX,XX @@ static inline void translator_page_protect(DisasContextBase *dcbase,
141
#endif
142
}
143
144
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
145
- CPUState *cpu, TranslationBlock *tb, int max_insns)
146
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
147
+ target_ulong pc, void *host_pc,
148
+ const TranslatorOps *ops, DisasContextBase *db)
149
{
150
uint32_t cflags = tb_cflags(tb);
151
bool plugin_enabled;
152
153
/* Initialize DisasContext */
154
db->tb = tb;
155
- db->pc_first = tb->pc;
156
- db->pc_next = db->pc_first;
157
+ db->pc_first = pc;
158
+ db->pc_next = pc;
159
db->is_jmp = DISAS_NEXT;
160
db->num_insns = 0;
161
db->max_insns = max_insns;
162
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/alpha/translate.c
165
+++ b/target/alpha/translate.c
166
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
167
.disas_log = alpha_tr_disas_log,
168
};
169
170
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
171
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
172
+ target_ulong pc, void *host_pc)
173
{
174
DisasContext dc;
175
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
176
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
177
}
178
179
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
180
diff --git a/target/arm/translate.c b/target/arm/translate.c
181
index XXXXXXX..XXXXXXX 100644
182
--- a/target/arm/translate.c
183
+++ b/target/arm/translate.c
184
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
185
};
186
187
/* generate intermediate code for basic block 'tb'. */
188
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
189
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
190
+ target_ulong pc, void *host_pc)
191
{
192
DisasContext dc = { };
193
const TranslatorOps *ops = &arm_translator_ops;
194
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
195
}
196
#endif
197
198
- translator_loop(ops, &dc.base, cpu, tb, max_insns);
199
+ translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
200
}
201
202
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
203
diff --git a/target/avr/translate.c b/target/avr/translate.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/target/avr/translate.c
206
+++ b/target/avr/translate.c
207
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
208
.disas_log = avr_tr_disas_log,
209
};
210
211
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
212
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
213
+ target_ulong pc, void *host_pc)
214
{
215
DisasContext dc = { };
216
- translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
217
+ translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
218
}
219
220
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
221
diff --git a/target/cris/translate.c b/target/cris/translate.c
222
index XXXXXXX..XXXXXXX 100644
223
--- a/target/cris/translate.c
224
+++ b/target/cris/translate.c
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps cris_tr_ops = {
226
.disas_log = cris_tr_disas_log,
227
};
228
229
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
230
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
231
+ target_ulong pc, void *host_pc)
232
{
233
DisasContext dc;
234
- translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
235
+ translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
236
}
237
238
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
239
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
240
index XXXXXXX..XXXXXXX 100644
241
--- a/target/hexagon/translate.c
242
+++ b/target/hexagon/translate.c
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
244
.disas_log = hexagon_tr_disas_log,
245
};
246
247
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
248
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
249
+ target_ulong pc, void *host_pc)
250
{
251
DisasContext ctx;
252
253
- translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
254
+ translator_loop(cs, tb, max_insns, pc, host_pc,
255
+ &hexagon_tr_ops, &ctx.base);
256
}
257
258
#define NAME_LEN 64
259
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
260
index XXXXXXX..XXXXXXX 100644
261
--- a/target/hppa/translate.c
262
+++ b/target/hppa/translate.c
263
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
264
.disas_log = hppa_tr_disas_log,
265
};
266
267
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
268
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
269
+ target_ulong pc, void *host_pc)
270
{
271
DisasContext ctx;
272
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
273
+ translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
274
}
275
276
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
277
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/i386/tcg/translate.c
280
+++ b/target/i386/tcg/translate.c
281
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
282
};
283
284
/* generate intermediate code for basic block 'tb'. */
285
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
286
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
287
+ target_ulong pc, void *host_pc)
288
{
289
DisasContext dc;
290
291
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
292
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
293
}
294
295
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
296
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
297
index XXXXXXX..XXXXXXX 100644
298
--- a/target/loongarch/translate.c
299
+++ b/target/loongarch/translate.c
300
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
301
.disas_log = loongarch_tr_disas_log,
302
};
303
304
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
305
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
306
+ target_ulong pc, void *host_pc)
307
{
308
DisasContext ctx;
309
310
- translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
311
+ translator_loop(cs, tb, max_insns, pc, host_pc,
312
+ &loongarch_tr_ops, &ctx.base);
313
}
314
315
void loongarch_translate_init(void)
316
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
317
index XXXXXXX..XXXXXXX 100644
318
--- a/target/m68k/translate.c
319
+++ b/target/m68k/translate.c
320
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
321
.disas_log = m68k_tr_disas_log,
322
};
323
324
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
325
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
326
+ target_ulong pc, void *host_pc)
327
{
328
DisasContext dc;
329
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
330
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
331
}
332
333
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
334
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
335
index XXXXXXX..XXXXXXX 100644
336
--- a/target/microblaze/translate.c
337
+++ b/target/microblaze/translate.c
338
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
339
.disas_log = mb_tr_disas_log,
340
};
341
342
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
343
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
344
+ target_ulong pc, void *host_pc)
345
{
346
DisasContext dc;
347
- translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
348
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
349
}
350
351
void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
352
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
353
index XXXXXXX..XXXXXXX 100644
354
--- a/target/mips/tcg/translate.c
355
+++ b/target/mips/tcg/translate.c
356
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
357
.disas_log = mips_tr_disas_log,
358
};
359
360
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
361
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
362
+ target_ulong pc, void *host_pc)
363
{
364
DisasContext ctx;
365
366
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
367
+ translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base);
368
}
369
370
void mips_tcg_init(void)
371
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
372
index XXXXXXX..XXXXXXX 100644
373
--- a/target/nios2/translate.c
374
+++ b/target/nios2/translate.c
375
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps nios2_tr_ops = {
376
.disas_log = nios2_tr_disas_log,
377
};
378
379
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
380
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
381
+ target_ulong pc, void *host_pc)
382
{
383
DisasContext dc;
384
- translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
385
+ translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
386
}
387
388
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
389
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/target/openrisc/translate.c
392
+++ b/target/openrisc/translate.c
393
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
394
.disas_log = openrisc_tr_disas_log,
395
};
396
397
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
398
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
399
+ target_ulong pc, void *host_pc)
400
{
401
DisasContext ctx;
402
403
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
404
+ translator_loop(cs, tb, max_insns, pc, host_pc,
405
+ &openrisc_tr_ops, &ctx.base);
406
}
407
408
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
409
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
410
index XXXXXXX..XXXXXXX 100644
411
--- a/target/ppc/translate.c
412
+++ b/target/ppc/translate.c
413
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
414
.disas_log = ppc_tr_disas_log,
415
};
416
417
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
418
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
419
+ target_ulong pc, void *host_pc)
420
{
421
DisasContext ctx;
422
423
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
424
+ translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
425
}
426
427
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
428
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/riscv/translate.c
431
+++ b/target/riscv/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
433
.disas_log = riscv_tr_disas_log,
434
};
435
436
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
437
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
438
+ target_ulong pc, void *host_pc)
439
{
440
DisasContext ctx;
441
442
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
443
+ translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
444
}
445
446
void riscv_translate_init(void)
447
diff --git a/target/rx/translate.c b/target/rx/translate.c
448
index XXXXXXX..XXXXXXX 100644
449
--- a/target/rx/translate.c
450
+++ b/target/rx/translate.c
451
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
452
.disas_log = rx_tr_disas_log,
453
};
454
455
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
456
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
457
+ target_ulong pc, void *host_pc)
458
{
459
DisasContext dc;
460
461
- translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
462
+ translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
463
}
464
465
void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
466
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
467
index XXXXXXX..XXXXXXX 100644
468
--- a/target/s390x/tcg/translate.c
469
+++ b/target/s390x/tcg/translate.c
470
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
471
.disas_log = s390x_tr_disas_log,
472
};
473
474
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
475
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
476
+ target_ulong pc, void *host_pc)
477
{
478
DisasContext dc;
479
480
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
481
+ translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
482
}
483
484
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
485
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
486
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/translate.c
488
+++ b/target/sh4/translate.c
489
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
490
.disas_log = sh4_tr_disas_log,
491
};
492
493
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
494
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
495
+ target_ulong pc, void *host_pc)
496
{
497
DisasContext ctx;
498
499
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
500
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
501
}
502
503
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
504
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
505
index XXXXXXX..XXXXXXX 100644
506
--- a/target/sparc/translate.c
507
+++ b/target/sparc/translate.c
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
509
.disas_log = sparc_tr_disas_log,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
513
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
514
+ target_ulong pc, void *host_pc)
515
{
516
DisasContext dc = {};
517
518
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
519
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
520
}
521
522
void sparc_tcg_init(void)
523
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
524
index XXXXXXX..XXXXXXX 100644
525
--- a/target/tricore/translate.c
526
+++ b/target/tricore/translate.c
527
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
528
};
529
530
531
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
532
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
533
+ target_ulong pc, void *host_pc)
534
{
535
DisasContext ctx;
536
- translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
537
+ translator_loop(cs, tb, max_insns, pc, host_pc,
538
+ &tricore_tr_ops, &ctx.base);
539
}
540
541
void
542
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
543
index XXXXXXX..XXXXXXX 100644
544
--- a/target/xtensa/translate.c
545
+++ b/target/xtensa/translate.c
546
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
547
.disas_log = xtensa_tr_disas_log,
548
};
549
550
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
551
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
552
+ target_ulong pc, void *host_pc)
553
{
554
DisasContext dc = {};
555
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
556
+ translator_loop(cpu, tb, max_insns, pc, host_pc,
557
+ &xtensa_translator_ops, &dc.base);
558
}
559
560
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
34
--
561
--
35
2.20.1
562
2.34.1
36
37
diff view generated by jsdifflib
1
There are no users of this function outside cputlb.c,
1
Cache the translation from guest to host address, so we may
2
and its interface will change in the next patch.
2
use direct loads when we hit on the primary translation page.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Look up the second translation page only once, during translation.
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
This obviates another lookup of the second page within tb_gen_code
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
after translation.
7
8
Fixes a bug in that plugin_insn_append should be passed the bytes
9
in the original memory order, not bswapped by pieces.
10
11
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
12
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
14
---
9
include/exec/cpu_ldst.h | 5 -----
15
include/exec/translator.h | 63 +++++++++++--------
10
accel/tcg/cputlb.c | 5 +++++
16
accel/tcg/translate-all.c | 23 +++----
11
2 files changed, 5 insertions(+), 5 deletions(-)
17
accel/tcg/translator.c | 126 +++++++++++++++++++++++++++++---------
18
3 files changed, 141 insertions(+), 71 deletions(-)
12
19
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
20
diff --git a/include/exec/translator.h b/include/exec/translator.h
14
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
22
--- a/include/exec/translator.h
16
+++ b/include/exec/cpu_ldst.h
23
+++ b/include/exec/translator.h
17
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
24
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
18
return (addr >> TARGET_PAGE_BITS) & size_mask;
25
* Architecture-agnostic disassembly context.
26
*/
27
typedef struct DisasContextBase {
28
- const TranslationBlock *tb;
29
+ TranslationBlock *tb;
30
target_ulong pc_first;
31
target_ulong pc_next;
32
DisasJumpType is_jmp;
33
int num_insns;
34
int max_insns;
35
bool singlestep_enabled;
36
-#ifdef CONFIG_USER_ONLY
37
- /*
38
- * Guest address of the last byte of the last protected page.
39
- *
40
- * Pages containing the translated instructions are made non-writable in
41
- * order to achieve consistency in case another thread is modifying the
42
- * code while translate_insn() fetches the instruction bytes piecemeal.
43
- * Such writer threads are blocked on mmap_lock() in page_unprotect().
44
- */
45
- target_ulong page_protect_end;
46
-#endif
47
+ void *host_addr[2];
48
} DisasContextBase;
49
50
/**
51
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
52
* the relevant information at translation time.
53
*/
54
55
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
56
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
57
- abi_ptr pc, bool do_swap); \
58
- static inline type fullname(CPUArchState *env, \
59
- DisasContextBase *dcbase, abi_ptr pc) \
60
- { \
61
- return fullname ## _swap(env, dcbase, pc, false); \
62
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
63
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
64
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
65
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
66
+
67
+static inline uint16_t
68
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
69
+ abi_ptr pc, bool do_swap)
70
+{
71
+ uint16_t ret = translator_lduw(env, db, pc);
72
+ if (do_swap) {
73
+ ret = bswap16(ret);
74
}
75
+ return ret;
76
+}
77
78
-#define FOR_EACH_TRANSLATOR_LD(F) \
79
- F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
80
- F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
81
- F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
82
- F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
83
+static inline uint32_t
84
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
85
+ abi_ptr pc, bool do_swap)
86
+{
87
+ uint32_t ret = translator_ldl(env, db, pc);
88
+ if (do_swap) {
89
+ ret = bswap32(ret);
90
+ }
91
+ return ret;
92
+}
93
94
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
95
-
96
-#undef GEN_TRANSLATOR_LD
97
+static inline uint64_t
98
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
99
+ abi_ptr pc, bool do_swap)
100
+{
101
+ uint64_t ret = translator_ldq(env, db, pc);
102
+ if (do_swap) {
103
+ ret = bswap64(ret);
104
+ }
105
+ return ret;
106
+}
107
108
/*
109
* Return whether addr is on the same page as where disassembly started.
110
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/accel/tcg/translate-all.c
113
+++ b/accel/tcg/translate-all.c
114
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
115
{
116
CPUArchState *env = cpu->env_ptr;
117
TranslationBlock *tb, *existing_tb;
118
- tb_page_addr_t phys_pc, phys_page2;
119
- target_ulong virt_page2;
120
+ tb_page_addr_t phys_pc;
121
tcg_insn_unit *gen_code_buf;
122
int gen_code_size, search_size, max_insns;
123
#ifdef CONFIG_PROFILER
124
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
125
tb->flags = flags;
126
tb->cflags = cflags;
127
tb->trace_vcpu_dstate = *cpu->trace_dstate;
128
+ tb->page_addr[0] = phys_pc;
129
+ tb->page_addr[1] = -1;
130
tcg_ctx->tb_cflags = cflags;
131
tb_overflow:
132
133
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
134
}
135
136
/*
137
- * If the TB is not associated with a physical RAM page then
138
- * it must be a temporary one-insn TB, and we have nothing to do
139
- * except fill in the page_addr[] fields. Return early before
140
- * attempting to link to other TBs or add to the lookup table.
141
+ * If the TB is not associated with a physical RAM page then it must be
142
+ * a temporary one-insn TB, and we have nothing left to do. Return early
143
+ * before attempting to link to other TBs or add to the lookup table.
144
*/
145
- if (phys_pc == -1) {
146
- tb->page_addr[0] = tb->page_addr[1] = -1;
147
+ if (tb->page_addr[0] == -1) {
148
return tb;
149
}
150
151
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
152
*/
153
tcg_tb_insert(tb);
154
155
- /* check next page if needed */
156
- virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
157
- phys_page2 = -1;
158
- if ((pc & TARGET_PAGE_MASK) != virt_page2) {
159
- phys_page2 = get_page_addr_code(env, virt_page2);
160
- }
161
/*
162
* No explicit memory barrier is required -- tb_link_page() makes the
163
* TB visible in a consistent state.
164
*/
165
- existing_tb = tb_link_page(tb, phys_pc, phys_page2);
166
+ existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
167
/* if the TB already exists, discard what we just translated */
168
if (unlikely(existing_tb != tb)) {
169
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
170
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/accel/tcg/translator.c
173
+++ b/accel/tcg/translator.c
174
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
175
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
19
}
176
}
20
177
21
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
178
-static inline void translator_page_protect(DisasContextBase *dcbase,
179
- target_ulong pc)
22
-{
180
-{
23
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
181
-#ifdef CONFIG_USER_ONLY
182
- dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
183
- page_protect(pc);
184
-#endif
24
-}
185
-}
25
-
186
-
26
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
187
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
27
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
188
target_ulong pc, void *host_pc,
28
target_ulong addr)
189
const TranslatorOps *ops, DisasContextBase *db)
29
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
190
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
30
index XXXXXXX..XXXXXXX 100644
191
db->num_insns = 0;
31
--- a/accel/tcg/cputlb.c
192
db->max_insns = max_insns;
32
+++ b/accel/tcg/cputlb.c
193
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
33
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
194
- translator_page_protect(db, db->pc_next);
34
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
195
+ db->host_addr[0] = host_pc;
35
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
196
+ db->host_addr[1] = NULL;
36
197
+
37
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
198
+#ifdef CONFIG_USER_ONLY
38
+{
199
+ page_protect(pc);
39
+ return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
200
+#endif
40
+}
201
41
+
202
ops->init_disas_context(db, cpu);
42
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
203
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
204
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
205
#endif
206
}
207
208
-static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
209
- target_ulong pc, size_t len)
210
+static void *translator_access(CPUArchState *env, DisasContextBase *db,
211
+ target_ulong pc, size_t len)
43
{
212
{
44
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
213
-#ifdef CONFIG_USER_ONLY
214
- target_ulong end = pc + len - 1;
215
+ void *host;
216
+ target_ulong base, end;
217
+ TranslationBlock *tb;
218
219
- if (end > dcbase->page_protect_end) {
220
- translator_page_protect(dcbase, end);
221
+ tb = db->tb;
222
+
223
+ /* Use slow path if first page is MMIO. */
224
+ if (unlikely(tb->page_addr[0] == -1)) {
225
+ return NULL;
226
}
227
+
228
+ end = pc + len - 1;
229
+ if (likely(is_same_page(db, end))) {
230
+ host = db->host_addr[0];
231
+ base = db->pc_first;
232
+ } else {
233
+ host = db->host_addr[1];
234
+ base = TARGET_PAGE_ALIGN(db->pc_first);
235
+ if (host == NULL) {
236
+ tb->page_addr[1] =
237
+ get_page_addr_code_hostp(env, base, &db->host_addr[1]);
238
+#ifdef CONFIG_USER_ONLY
239
+ page_protect(end);
240
#endif
241
+ /* We cannot handle MMIO as second page. */
242
+ assert(tb->page_addr[1] != -1);
243
+ host = db->host_addr[1];
244
+ }
245
+
246
+ /* Use slow path when crossing pages. */
247
+ if (is_same_page(db, pc)) {
248
+ return NULL;
249
+ }
250
+ }
251
+
252
+ tcg_debug_assert(pc >= base);
253
+ return host + (pc - base);
254
}
255
256
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
257
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
258
- abi_ptr pc, bool do_swap) \
259
- { \
260
- translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
261
- type ret = load_fn(env, pc); \
262
- if (do_swap) { \
263
- ret = swap_fn(ret); \
264
- } \
265
- plugin_insn_append(pc, &ret, sizeof(ret)); \
266
- return ret; \
267
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
268
+{
269
+ uint8_t ret;
270
+ void *p = translator_access(env, db, pc, sizeof(ret));
271
+
272
+ if (p) {
273
+ plugin_insn_append(pc, p, sizeof(ret));
274
+ return ldub_p(p);
275
}
276
+ ret = cpu_ldub_code(env, pc);
277
+ plugin_insn_append(pc, &ret, sizeof(ret));
278
+ return ret;
279
+}
280
281
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
282
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
283
+{
284
+ uint16_t ret, plug;
285
+ void *p = translator_access(env, db, pc, sizeof(ret));
286
287
-#undef GEN_TRANSLATOR_LD
288
+ if (p) {
289
+ plugin_insn_append(pc, p, sizeof(ret));
290
+ return lduw_p(p);
291
+ }
292
+ ret = cpu_lduw_code(env, pc);
293
+ plug = tswap16(ret);
294
+ plugin_insn_append(pc, &plug, sizeof(ret));
295
+ return ret;
296
+}
297
+
298
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
299
+{
300
+ uint32_t ret, plug;
301
+ void *p = translator_access(env, db, pc, sizeof(ret));
302
+
303
+ if (p) {
304
+ plugin_insn_append(pc, p, sizeof(ret));
305
+ return ldl_p(p);
306
+ }
307
+ ret = cpu_ldl_code(env, pc);
308
+ plug = tswap32(ret);
309
+ plugin_insn_append(pc, &plug, sizeof(ret));
310
+ return ret;
311
+}
312
+
313
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
314
+{
315
+ uint64_t ret, plug;
316
+ void *p = translator_access(env, db, pc, sizeof(ret));
317
+
318
+ if (p) {
319
+ plugin_insn_append(pc, p, sizeof(ret));
320
+ return ldq_p(p);
321
+ }
322
+ ret = cpu_ldq_code(env, pc);
323
+ plug = tswap64(ret);
324
+ plugin_insn_append(pc, &plug, sizeof(ret));
325
+ return ret;
326
+}
45
--
327
--
46
2.20.1
328
2.34.1
47
48
diff view generated by jsdifflib
1
There's little point in leaving these data structures half initialized,
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
and relying on a flush to be done during reset.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Right now translator stops right *after* the end of a page, which
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20220817150506.592862-3-iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
accel/tcg/cputlb.c | 5 +++--
12
target/s390x/tcg/translate.c | 15 +++-
9
1 file changed, 3 insertions(+), 2 deletions(-)
13
tests/tcg/s390x/noexec.c | 106 +++++++++++++++++++++++
14
tests/tcg/multiarch/noexec.c.inc | 139 +++++++++++++++++++++++++++++++
15
tests/tcg/s390x/Makefile.target | 1 +
16
4 files changed, 257 insertions(+), 4 deletions(-)
17
create mode 100644 tests/tcg/s390x/noexec.c
18
create mode 100644 tests/tcg/multiarch/noexec.c.inc
10
19
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
22
--- a/target/s390x/tcg/translate.c
14
+++ b/accel/tcg/cputlb.c
23
+++ b/target/s390x/tcg/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
24
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
16
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
25
dc->insn_start = tcg_last_op();
17
fast->table = g_new(CPUTLBEntry, n_entries);
18
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
19
+ tlb_mmu_flush_locked(desc, fast);
20
}
26
}
21
27
22
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
28
+static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
23
@@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu)
29
+ uint64_t pc)
24
30
+{
25
qemu_spin_init(&env_tlb(env)->c.lock);
31
+ uint64_t insn = ld_code2(env, s, pc);
26
32
+
27
- /* Ensure that cpu_reset performs a full flush. */
33
+ return pc + get_ilen((insn >> 8) & 0xff);
28
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
34
+}
29
+ /* All tlbs are initialized flushed. */
35
+
30
+ env_tlb(env)->c.dirty = 0;
36
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
31
37
{
32
for (i = 0; i < NB_MMU_MODES; i++) {
38
CPUS390XState *env = cs->env_ptr;
33
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
39
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
40
41
dc->base.is_jmp = translate_one(env, dc);
42
if (dc->base.is_jmp == DISAS_NEXT) {
43
- uint64_t page_start;
44
-
45
- page_start = dc->base.pc_first & TARGET_PAGE_MASK;
46
- if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
47
+ if (!is_same_page(dcbase, dc->base.pc_next) ||
48
+ !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next)) ||
49
+ dc->ex_value) {
50
dc->base.is_jmp = DISAS_TOO_MANY;
51
}
52
}
53
diff --git a/tests/tcg/s390x/noexec.c b/tests/tcg/s390x/noexec.c
54
new file mode 100644
55
index XXXXXXX..XXXXXXX
56
--- /dev/null
57
+++ b/tests/tcg/s390x/noexec.c
58
@@ -XXX,XX +XXX,XX @@
59
+#include "../multiarch/noexec.c.inc"
60
+
61
+static void *arch_mcontext_pc(const mcontext_t *ctx)
62
+{
63
+ return (void *)ctx->psw.addr;
64
+}
65
+
66
+static int arch_mcontext_arg(const mcontext_t *ctx)
67
+{
68
+ return ctx->gregs[2];
69
+}
70
+
71
+static void arch_flush(void *p, int len)
72
+{
73
+}
74
+
75
+extern char noexec_1[];
76
+extern char noexec_2[];
77
+extern char noexec_end[];
78
+
79
+asm("noexec_1:\n"
80
+ " lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */
81
+ "noexec_2:\n"
82
+ " lgfi %r2,2\n" /* %r2 is 0/1; set 2. */
83
+ " br %r14\n" /* return */
84
+ "noexec_end:");
85
+
86
+extern char exrl_1[];
87
+extern char exrl_2[];
88
+extern char exrl_end[];
89
+
90
+asm("exrl_1:\n"
91
+ " exrl %r0, exrl_2\n"
92
+ " br %r14\n"
93
+ "exrl_2:\n"
94
+ " lgfi %r2,2\n"
95
+ "exrl_end:");
96
+
97
+int main(void)
98
+{
99
+ struct noexec_test noexec_tests[] = {
100
+ {
101
+ .name = "fallthrough",
102
+ .test_code = noexec_1,
103
+ .test_len = noexec_end - noexec_1,
104
+ .page_ofs = noexec_1 - noexec_2,
105
+ .entry_ofs = noexec_1 - noexec_2,
106
+ .expected_si_ofs = 0,
107
+ .expected_pc_ofs = 0,
108
+ .expected_arg = 1,
109
+ },
110
+ {
111
+ .name = "jump",
112
+ .test_code = noexec_1,
113
+ .test_len = noexec_end - noexec_1,
114
+ .page_ofs = noexec_1 - noexec_2,
115
+ .entry_ofs = 0,
116
+ .expected_si_ofs = 0,
117
+ .expected_pc_ofs = 0,
118
+ .expected_arg = 0,
119
+ },
120
+ {
121
+ .name = "exrl",
122
+ .test_code = exrl_1,
123
+ .test_len = exrl_end - exrl_1,
124
+ .page_ofs = exrl_1 - exrl_2,
125
+ .entry_ofs = exrl_1 - exrl_2,
126
+ .expected_si_ofs = 0,
127
+ .expected_pc_ofs = exrl_1 - exrl_2,
128
+ .expected_arg = 0,
129
+ },
130
+ {
131
+ .name = "fallthrough [cross]",
132
+ .test_code = noexec_1,
133
+ .test_len = noexec_end - noexec_1,
134
+ .page_ofs = noexec_1 - noexec_2 - 2,
135
+ .entry_ofs = noexec_1 - noexec_2 - 2,
136
+ .expected_si_ofs = 0,
137
+ .expected_pc_ofs = -2,
138
+ .expected_arg = 1,
139
+ },
140
+ {
141
+ .name = "jump [cross]",
142
+ .test_code = noexec_1,
143
+ .test_len = noexec_end - noexec_1,
144
+ .page_ofs = noexec_1 - noexec_2 - 2,
145
+ .entry_ofs = -2,
146
+ .expected_si_ofs = 0,
147
+ .expected_pc_ofs = -2,
148
+ .expected_arg = 0,
149
+ },
150
+ {
151
+ .name = "exrl [cross]",
152
+ .test_code = exrl_1,
153
+ .test_len = exrl_end - exrl_1,
154
+ .page_ofs = exrl_1 - exrl_2 - 2,
155
+ .entry_ofs = exrl_1 - exrl_2 - 2,
156
+ .expected_si_ofs = 0,
157
+ .expected_pc_ofs = exrl_1 - exrl_2 - 2,
158
+ .expected_arg = 0,
159
+ },
160
+ };
161
+
162
+ return test_noexec(noexec_tests,
163
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
164
+}
165
diff --git a/tests/tcg/multiarch/noexec.c.inc b/tests/tcg/multiarch/noexec.c.inc
166
new file mode 100644
167
index XXXXXXX..XXXXXXX
168
--- /dev/null
169
+++ b/tests/tcg/multiarch/noexec.c.inc
170
@@ -XXX,XX +XXX,XX @@
171
+/*
172
+ * Common code for arch-specific MMU_INST_FETCH fault testing.
173
+ */
174
+
175
+#define _GNU_SOURCE
176
+
177
+#include <assert.h>
178
+#include <signal.h>
179
+#include <stdio.h>
180
+#include <stdlib.h>
181
+#include <string.h>
182
+#include <errno.h>
183
+#include <unistd.h>
184
+#include <sys/mman.h>
185
+#include <sys/ucontext.h>
186
+
187
+/* Forward declarations. */
188
+
189
+static void *arch_mcontext_pc(const mcontext_t *ctx);
190
+static int arch_mcontext_arg(const mcontext_t *ctx);
191
+static void arch_flush(void *p, int len);
192
+
193
+/* Testing infrastructure. */
194
+
195
+struct noexec_test {
196
+ const char *name;
197
+ const char *test_code;
198
+ int test_len;
199
+ int page_ofs;
200
+ int entry_ofs;
201
+ int expected_si_ofs;
202
+ int expected_pc_ofs;
203
+ int expected_arg;
204
+};
205
+
206
+static void *page_base;
207
+static int page_size;
208
+static const struct noexec_test *current_noexec_test;
209
+
210
+static void handle_err(const char *syscall)
211
+{
212
+ printf("[ FAILED ] %s: %s\n", syscall, strerror(errno));
213
+ exit(EXIT_FAILURE);
214
+}
215
+
216
+static void handle_segv(int sig, siginfo_t *info, void *ucontext)
217
+{
218
+ const struct noexec_test *test = current_noexec_test;
219
+ const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext;
220
+ void *expected_si;
221
+ void *expected_pc;
222
+ void *pc;
223
+ int arg;
224
+
225
+ if (test == NULL) {
226
+ printf("[ FAILED ] unexpected SEGV\n");
227
+ exit(EXIT_FAILURE);
228
+ }
229
+ current_noexec_test = NULL;
230
+
231
+ expected_si = page_base + test->expected_si_ofs;
232
+ if (info->si_addr != expected_si) {
233
+ printf("[ FAILED ] wrong si_addr (%p != %p)\n",
234
+ info->si_addr, expected_si);
235
+ exit(EXIT_FAILURE);
236
+ }
237
+
238
+ pc = arch_mcontext_pc(mc);
239
+ expected_pc = page_base + test->expected_pc_ofs;
240
+ if (pc != expected_pc) {
241
+ printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc);
242
+ exit(EXIT_FAILURE);
243
+ }
244
+
245
+ arg = arch_mcontext_arg(mc);
246
+ if (arg != test->expected_arg) {
247
+ printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg);
248
+ exit(EXIT_FAILURE);
249
+ }
250
+
251
+ if (mprotect(page_base, page_size,
252
+ PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
253
+ handle_err("mprotect");
254
+ }
255
+}
256
+
257
+static void test_noexec_1(const struct noexec_test *test)
258
+{
259
+ void *start = page_base + test->page_ofs;
260
+ void (*fn)(int arg) = page_base + test->entry_ofs;
261
+
262
+ memcpy(start, test->test_code, test->test_len);
263
+ arch_flush(start, test->test_len);
264
+
265
+ /* Trigger TB creation in order to test invalidation. */
266
+ fn(0);
267
+
268
+ if (mprotect(page_base, page_size, PROT_NONE) < 0) {
269
+ handle_err("mprotect");
270
+ }
271
+
272
+ /* Trigger SEGV and check that handle_segv() ran. */
273
+ current_noexec_test = test;
274
+ fn(0);
275
+ assert(current_noexec_test == NULL);
276
+}
277
+
278
+static int test_noexec(struct noexec_test *tests, size_t n_tests)
279
+{
280
+ struct sigaction act;
281
+ size_t i;
282
+
283
+ memset(&act, 0, sizeof(act));
284
+ act.sa_sigaction = handle_segv;
285
+ act.sa_flags = SA_SIGINFO;
286
+ if (sigaction(SIGSEGV, &act, NULL) < 0) {
287
+ handle_err("sigaction");
288
+ }
289
+
290
+ page_size = getpagesize();
291
+ page_base = mmap(NULL, 2 * page_size,
292
+ PROT_READ | PROT_WRITE | PROT_EXEC,
293
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
294
+ if (page_base == MAP_FAILED) {
295
+ handle_err("mmap");
296
+ }
297
+ page_base += page_size;
298
+
299
+ for (i = 0; i < n_tests; i++) {
300
+ struct noexec_test *test = &tests[i];
301
+
302
+ printf("[ RUN ] %s\n", test->name);
303
+ test_noexec_1(test);
304
+ printf("[ OK ]\n");
305
+ }
306
+
307
+ printf("[ PASSED ]\n");
308
+ return EXIT_SUCCESS;
309
+}
310
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tests/tcg/s390x/Makefile.target
313
+++ b/tests/tcg/s390x/Makefile.target
314
@@ -XXX,XX +XXX,XX @@ TESTS+=shift
315
TESTS+=trap
316
TESTS+=signals-s390x
317
TESTS+=branch-relative-long
318
+TESTS+=noexec
319
320
Z14_TESTS=vfminmax
321
vfminmax: LDFLAGS+=-lm
34
--
322
--
35
2.20.1
323
2.34.1
36
37
diff view generated by jsdifflib
1
No functional change, but the smaller expressions make
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
the code easier to read.
2
3
3
Right now translator stops right *after* the end of a page, which
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
breaks reporting of fault locations when the last instruction of a
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
multi-insn translation block crosses a page boundary.
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
7
An implementation, like the one arm and s390x have, would require an
8
i386 length disassembler, which is burdensome to maintain. Another
9
alternative would be to single-step at the end of a guest page, but
10
this may come with a performance impact.
11
12
Fix by snapshotting disassembly state and restoring it after we figure
13
out we crossed a page boundary. This includes rolling back cc_op
14
updates and emitted ops.
15
16
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1143
19
Message-Id: <20220817150506.592862-4-iii@linux.ibm.com>
20
[rth: Simplify end-of-insn cross-page checks.]
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
22
---
9
accel/tcg/cputlb.c | 35 +++++++++++++++++------------------
23
target/i386/tcg/translate.c | 64 ++++++++++++++++-----------
10
1 file changed, 17 insertions(+), 18 deletions(-)
24
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++++++++++++++
11
25
tests/tcg/x86_64/Makefile.target | 3 +-
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
26
3 files changed, 116 insertions(+), 26 deletions(-)
27
create mode 100644 tests/tcg/x86_64/noexec.c
28
29
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
13
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
31
--- a/target/i386/tcg/translate.c
15
+++ b/accel/tcg/cputlb.c
32
+++ b/target/i386/tcg/translate.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
17
34
TCGv_i64 tmp1_i64;
18
/**
35
19
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
36
sigjmp_buf jmpbuf;
20
- * @env: CPU that owns the TLB
37
+ TCGOp *prev_insn_end;
21
- * @mmu_idx: MMU index of the TLB
38
} DisasContext;
22
+ * @desc: The CPUTLBDesc portion of the TLB
39
23
+ * @fast: The CPUTLBDescFast portion of the same TLB
40
/* The environment in which user-only runs is constrained. */
24
*
41
@@ -XXX,XX +XXX,XX @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
25
* Called with tlb_lock_held.
26
*
27
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
28
* high), since otherwise we are likely to have a significant amount of
29
* conflict misses.
30
*/
31
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
32
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
33
{
42
{
34
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
43
uint64_t pc = s->pc;
35
- size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
44
36
+ size_t old_size = tlb_n_entries(fast);
45
+ /* This is a subsequent insn that crosses a page boundary. */
37
size_t rate;
46
+ if (s->base.num_insns > 1 &&
38
size_t new_size = old_size;
47
+ !is_same_page(&s->base, s->pc + num_bytes - 1)) {
39
int64_t now = get_clock_realtime();
48
+ siglongjmp(s->jmpbuf, 2);
40
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
49
+ }
41
return;
50
+
51
s->pc += num_bytes;
52
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
53
/* If the instruction's 16th byte is on a different page than the 1st, a
54
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
55
int modrm, reg, rm, mod, op, opreg, val;
56
target_ulong next_eip, tval;
57
target_ulong pc_start = s->base.pc_next;
58
+ bool orig_cc_op_dirty = s->cc_op_dirty;
59
+ CCOp orig_cc_op = s->cc_op;
60
61
s->pc_start = s->pc = pc_start;
62
s->override = -1;
63
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
64
s->rip_offset = 0; /* for relative ip address */
65
s->vex_l = 0;
66
s->vex_v = 0;
67
- if (sigsetjmp(s->jmpbuf, 0) != 0) {
68
+ switch (sigsetjmp(s->jmpbuf, 0)) {
69
+ case 0:
70
+ break;
71
+ case 1:
72
gen_exception_gpf(s);
73
return s->pc;
74
+ case 2:
75
+ /* Restore state that may affect the next instruction. */
76
+ s->cc_op_dirty = orig_cc_op_dirty;
77
+ s->cc_op = orig_cc_op;
78
+ s->base.num_insns--;
79
+ tcg_remove_ops_after(s->prev_insn_end);
80
+ s->base.is_jmp = DISAS_TOO_MANY;
81
+ return pc_start;
82
+ default:
83
+ g_assert_not_reached();
42
}
84
}
43
85
44
- g_free(env_tlb(env)->f[mmu_idx].table);
86
prefixes = 0;
45
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
87
@@ -XXX,XX +XXX,XX @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
46
+ g_free(fast->table);
88
{
47
+ g_free(desc->iotlb);
89
DisasContext *dc = container_of(dcbase, DisasContext, base);
48
90
49
tlb_window_reset(desc, now, 0);
91
+ dc->prev_insn_end = tcg_last_op();
50
/* desc->n_used_entries is cleared by the caller */
92
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
51
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
52
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
53
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
54
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
55
+ fast->table = g_try_new(CPUTLBEntry, new_size);
56
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
57
+
58
/*
59
* If the allocations fail, try smaller sizes. We just freed some
60
* memory, so going back to half of new_size has a good chance of working.
61
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
62
* allocations to fail though, so we progressively reduce the allocation
63
* size, aborting if we cannot even allocate the smallest TLB we support.
64
*/
65
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
66
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
67
+ while (fast->table == NULL || desc->iotlb == NULL) {
68
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
69
error_report("%s: %s", __func__, strerror(errno));
70
abort();
71
}
72
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
73
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
74
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
75
76
- g_free(env_tlb(env)->f[mmu_idx].table);
77
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
78
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
79
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
80
+ g_free(fast->table);
81
+ g_free(desc->iotlb);
82
+ fast->table = g_try_new(CPUTLBEntry, new_size);
83
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
84
}
85
}
93
}
86
94
87
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
95
@@ -XXX,XX +XXX,XX @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
88
{
96
#endif
89
- tlb_mmu_resize_locked(env, mmu_idx);
97
90
+ tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
98
pc_next = disas_insn(dc, cpu);
91
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
99
-
92
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
100
- if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
93
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
101
- /* if single step mode, we generate only one instruction and
102
- generate an exception */
103
- /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
104
- the flag and abort the translation to give the irqs a
105
- chance to happen */
106
- dc->base.is_jmp = DISAS_TOO_MANY;
107
- } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
108
- && ((pc_next & TARGET_PAGE_MASK)
109
- != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
110
- & TARGET_PAGE_MASK)
111
- || (pc_next & ~TARGET_PAGE_MASK) == 0)) {
112
- /* Do not cross the boundary of the pages in icount mode,
113
- it can cause an exception. Do it only when boundary is
114
- crossed by the first instruction in the block.
115
- If current instruction already crossed the bound - it's ok,
116
- because an exception hasn't stopped this code.
117
- */
118
- dc->base.is_jmp = DISAS_TOO_MANY;
119
- } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
120
- dc->base.is_jmp = DISAS_TOO_MANY;
121
- }
122
-
123
dc->base.pc_next = pc_next;
124
+
125
+ if (dc->base.is_jmp == DISAS_NEXT) {
126
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
127
+ /*
128
+ * If single step mode, we generate only one instruction and
129
+ * generate an exception.
130
+ * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
131
+ * the flag and abort the translation to give the irqs a
132
+ * chance to happen.
133
+ */
134
+ dc->base.is_jmp = DISAS_TOO_MANY;
135
+ } else if (!is_same_page(&dc->base, pc_next)) {
136
+ dc->base.is_jmp = DISAS_TOO_MANY;
137
+ }
138
+ }
139
}
140
141
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
142
diff --git a/tests/tcg/x86_64/noexec.c b/tests/tcg/x86_64/noexec.c
143
new file mode 100644
144
index XXXXXXX..XXXXXXX
145
--- /dev/null
146
+++ b/tests/tcg/x86_64/noexec.c
147
@@ -XXX,XX +XXX,XX @@
148
+#include "../multiarch/noexec.c.inc"
149
+
150
+static void *arch_mcontext_pc(const mcontext_t *ctx)
151
+{
152
+ return (void *)ctx->gregs[REG_RIP];
153
+}
154
+
155
+int arch_mcontext_arg(const mcontext_t *ctx)
156
+{
157
+ return ctx->gregs[REG_RDI];
158
+}
159
+
160
+static void arch_flush(void *p, int len)
161
+{
162
+}
163
+
164
+extern char noexec_1[];
165
+extern char noexec_2[];
166
+extern char noexec_end[];
167
+
168
+asm("noexec_1:\n"
169
+ " movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
170
+ "noexec_2:\n"
171
+ " movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
172
+ " ret\n"
173
+ "noexec_end:");
174
+
175
+int main(void)
176
+{
177
+ struct noexec_test noexec_tests[] = {
178
+ {
179
+ .name = "fallthrough",
180
+ .test_code = noexec_1,
181
+ .test_len = noexec_end - noexec_1,
182
+ .page_ofs = noexec_1 - noexec_2,
183
+ .entry_ofs = noexec_1 - noexec_2,
184
+ .expected_si_ofs = 0,
185
+ .expected_pc_ofs = 0,
186
+ .expected_arg = 1,
187
+ },
188
+ {
189
+ .name = "jump",
190
+ .test_code = noexec_1,
191
+ .test_len = noexec_end - noexec_1,
192
+ .page_ofs = noexec_1 - noexec_2,
193
+ .entry_ofs = 0,
194
+ .expected_si_ofs = 0,
195
+ .expected_pc_ofs = 0,
196
+ .expected_arg = 0,
197
+ },
198
+ {
199
+ .name = "fallthrough [cross]",
200
+ .test_code = noexec_1,
201
+ .test_len = noexec_end - noexec_1,
202
+ .page_ofs = noexec_1 - noexec_2 - 2,
203
+ .entry_ofs = noexec_1 - noexec_2 - 2,
204
+ .expected_si_ofs = 0,
205
+ .expected_pc_ofs = -2,
206
+ .expected_arg = 1,
207
+ },
208
+ {
209
+ .name = "jump [cross]",
210
+ .test_code = noexec_1,
211
+ .test_len = noexec_end - noexec_1,
212
+ .page_ofs = noexec_1 - noexec_2 - 2,
213
+ .entry_ofs = -2,
214
+ .expected_si_ofs = 0,
215
+ .expected_pc_ofs = -2,
216
+ .expected_arg = 0,
217
+ },
218
+ };
219
+
220
+ return test_noexec(noexec_tests,
221
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
222
+}
223
diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target
224
index XXXXXXX..XXXXXXX 100644
225
--- a/tests/tcg/x86_64/Makefile.target
226
+++ b/tests/tcg/x86_64/Makefile.target
227
@@ -XXX,XX +XXX,XX @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
228
229
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
230
X86_64_TESTS += vsyscall
231
+X86_64_TESTS += noexec
232
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
233
else
234
TESTS=$(MULTIARCH_TESTS)
235
@@ -XXX,XX +XXX,XX @@ test-x86_64: LDFLAGS+=-lm -lc
236
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
237
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
238
239
-vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
240
+%: $(SRC_PATH)/tests/tcg/x86_64/%.c
241
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
94
--
242
--
95
2.20.1
243
2.34.1
96
97
diff view generated by jsdifflib
1
From: Carlos Santos <casantos@redhat.com>
1
These will be useful in properly ending the TB.
2
2
3
uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
3
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
but the corresponding sysconf calls returns -1, which is a valid result,
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
meaning that the limit is indeterminate.
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
7
Handle this situation using the fallback values instead of crashing due
8
to an assertion failure.
9
10
Signed-off-by: Carlos Santos <casantos@redhat.com>
11
Message-Id: <20191017123713.30192-1-casantos@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
7
---
14
util/cacheinfo.c | 10 ++++++++--
8
target/riscv/translate.c | 10 +++++++++-
15
1 file changed, 8 insertions(+), 2 deletions(-)
9
1 file changed, 9 insertions(+), 1 deletion(-)
16
10
17
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/util/cacheinfo.c
13
--- a/target/riscv/translate.c
20
+++ b/util/cacheinfo.c
14
+++ b/target/riscv/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
15
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
22
static void sys_cache_info(int *isize, int *dsize)
16
/* Include decoders for factored-out extensions */
17
#include "decode-XVentanaCondOps.c.inc"
18
19
+/* The specification allows for longer insns, but not supported by qemu. */
20
+#define MAX_INSN_LEN 4
21
+
22
+static inline int insn_len(uint16_t first_word)
23
+{
24
+ return (first_word & 3) == 3 ? 4 : 2;
25
+}
26
+
27
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
23
{
28
{
24
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
29
/*
25
- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
30
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
26
+ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
31
};
27
+ if (tmp_isize > 0) {
32
28
+ *isize = tmp_isize;
33
/* Check for compressed insn */
29
+ }
34
- if (extract16(opcode, 0, 2) != 3) {
30
# endif
35
+ if (insn_len(opcode) == 2) {
31
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
36
if (!has_ext(ctx, RVC)) {
32
- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
37
gen_exception_illegal(ctx);
33
+ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
38
} else {
34
+ if (tmp_dsize > 0) {
35
+ *dsize = tmp_dsize;
36
+ }
37
# endif
38
}
39
#endif /* sys_cache_info */
40
--
39
--
41
2.20.1
40
2.34.1
42
43
diff view generated by jsdifflib
1
In target/arm we will shortly have "too many" mmu_idx.
1
Right now the translator stops right *after* the end of a page, which
2
The current minimum barrier is caused by the way in which
2
breaks reporting of fault locations when the last instruction of a
3
tlb_flush_page_by_mmuidx is coded.
3
multi-insn translation block crosses a page boundary.
4
4
5
We can remove this limitation by allocating memory for
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1155
6
consumption by the worker. Let us assume that this is
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
the unlikely case, as will be the case for the majority
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
of targets which have so far satisfied the BUILD_BUG_ON,
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
and only allocate memory when necessary.
10
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
10
---
14
accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
11
target/riscv/translate.c | 17 +++++--
15
1 file changed, 132 insertions(+), 35 deletions(-)
12
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++++++++++++
13
tests/tcg/riscv64/Makefile.target | 1 +
14
3 files changed, 93 insertions(+), 4 deletions(-)
15
create mode 100644 tests/tcg/riscv64/noexec.c
16
16
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
19
--- a/target/riscv/translate.c
20
+++ b/accel/tcg/cputlb.c
20
+++ b/target/riscv/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
21
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
22
}
23
ctx->nftemp = 0;
24
25
+ /* Only the first insn within a TB is allowed to cross a page boundary. */
26
if (ctx->base.is_jmp == DISAS_NEXT) {
27
- target_ulong page_start;
28
-
29
- page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
30
- if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
31
+ if (!is_same_page(&ctx->base, ctx->base.pc_next)) {
32
ctx->base.is_jmp = DISAS_TOO_MANY;
33
+ } else {
34
+ unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
35
+
36
+ if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
37
+ uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
38
+ int len = insn_len(next_insn);
39
+
40
+ if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
41
+ ctx->base.is_jmp = DISAS_TOO_MANY;
42
+ }
43
+ }
44
}
22
}
45
}
23
}
46
}
24
47
diff --git a/tests/tcg/riscv64/noexec.c b/tests/tcg/riscv64/noexec.c
25
-/* As we are going to hijack the bottom bits of the page address for a
48
new file mode 100644
26
- * mmuidx bit mask we need to fail to build if we can't do that
49
index XXXXXXX..XXXXXXX
27
+/**
50
--- /dev/null
28
+ * tlb_flush_page_by_mmuidx_async_0:
51
+++ b/tests/tcg/riscv64/noexec.c
29
+ * @cpu: cpu on which to flush
52
@@ -XXX,XX +XXX,XX @@
30
+ * @addr: page of virtual address to flush
53
+#include "../multiarch/noexec.c.inc"
31
+ * @idxmap: set of mmu_idx to flush
54
+
32
+ *
55
+static void *arch_mcontext_pc(const mcontext_t *ctx)
33
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
34
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
35
*/
36
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
37
-
38
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
39
- run_on_cpu_data data)
40
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
41
+ target_ulong addr,
42
+ uint16_t idxmap)
43
{
44
CPUArchState *env = cpu->env_ptr;
45
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
46
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
47
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
48
int mmu_idx;
49
50
assert_cpu_is_self(cpu);
51
52
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
53
- addr, mmu_idx_bitmap);
54
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
55
56
qemu_spin_lock(&env_tlb(env)->c.lock);
57
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
58
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
59
+ if ((idxmap >> mmu_idx) & 1) {
60
tlb_flush_page_locked(env, mmu_idx, addr);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
64
tb_flush_jmp_cache(cpu, addr);
65
}
66
67
+/**
68
+ * tlb_flush_page_by_mmuidx_async_1:
69
+ * @cpu: cpu on which to flush
70
+ * @data: encoded addr + idxmap
71
+ *
72
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
73
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
74
+ * offset of the target_ptr field. This limits the set of mmu_idx
75
+ * that can be passed via this method.
76
+ */
77
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
78
+ run_on_cpu_data data)
79
+{
56
+{
80
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
57
+ return (void *)ctx->__gregs[REG_PC];
81
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
82
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
83
+
84
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
85
+}
58
+}
86
+
59
+
87
+typedef struct {
60
+static int arch_mcontext_arg(const mcontext_t *ctx)
88
+ target_ulong addr;
89
+ uint16_t idxmap;
90
+} TLBFlushPageByMMUIdxData;
91
+
92
+/**
93
+ * tlb_flush_page_by_mmuidx_async_2:
94
+ * @cpu: cpu on which to flush
95
+ * @data: allocated addr + idxmap
96
+ *
97
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
98
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
99
+ * TLBFlushPageByMMUIdxData structure that has been allocated
100
+ * specifically for this helper. Free the structure when done.
101
+ */
102
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
103
+ run_on_cpu_data data)
104
+{
61
+{
105
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
62
+ return ctx->__gregs[REG_A0];
106
+
107
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
108
+ g_free(d);
109
+}
63
+}
110
+
64
+
111
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
65
+static void arch_flush(void *p, int len)
112
{
66
+{
113
- target_ulong addr_and_mmu_idx;
67
+ __builtin___clear_cache(p, p + len);
114
-
68
+}
115
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
116
117
/* This should already be page aligned */
118
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
119
- addr_and_mmu_idx |= idxmap;
120
+ addr &= TARGET_PAGE_MASK;
121
122
- if (!qemu_cpu_is_self(cpu)) {
123
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
124
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
125
+ if (qemu_cpu_is_self(cpu)) {
126
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
127
+ } else if (idxmap < TARGET_PAGE_SIZE) {
128
+ /*
129
+ * Most targets have only a few mmu_idx. In the case where
130
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
131
+ * allocating memory for this operation.
132
+ */
133
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
134
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
135
} else {
136
- tlb_flush_page_by_mmuidx_async_work(
137
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
138
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
139
+
69
+
140
+ /* Otherwise allocate a structure, freed by the worker. */
70
+extern char noexec_1[];
141
+ d->addr = addr;
71
+extern char noexec_2[];
142
+ d->idxmap = idxmap;
72
+extern char noexec_end[];
143
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
144
+ RUN_ON_CPU_HOST_PTR(d));
145
}
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
149
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
150
uint16_t idxmap)
151
{
152
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
153
- target_ulong addr_and_mmu_idx;
154
-
155
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
156
157
/* This should already be page aligned */
158
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
159
- addr_and_mmu_idx |= idxmap;
160
+ addr &= TARGET_PAGE_MASK;
161
162
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
163
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
164
+ /*
165
+ * Allocate memory to hold addr+idxmap only when needed.
166
+ * See tlb_flush_page_by_mmuidx for details.
167
+ */
168
+ if (idxmap < TARGET_PAGE_SIZE) {
169
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
170
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
171
+ } else {
172
+ CPUState *dst_cpu;
173
+
73
+
174
+ /* Allocate a separate data block for each destination cpu. */
74
+asm(".option push\n"
175
+ CPU_FOREACH(dst_cpu) {
75
+ ".option norvc\n"
176
+ if (dst_cpu != src_cpu) {
76
+ "noexec_1:\n"
177
+ TLBFlushPageByMMUIdxData *d
77
+ " li a0,1\n" /* a0 is 0 on entry, set 1. */
178
+ = g_new(TLBFlushPageByMMUIdxData, 1);
78
+ "noexec_2:\n"
79
+ " li a0,2\n" /* a0 is 0/1; set 2. */
80
+ " ret\n"
81
+ "noexec_end:\n"
82
+ ".option pop");
179
+
83
+
180
+ d->addr = addr;
84
+int main(void)
181
+ d->idxmap = idxmap;
85
+{
182
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
86
+ struct noexec_test noexec_tests[] = {
183
+ RUN_ON_CPU_HOST_PTR(d));
87
+ {
184
+ }
88
+ .name = "fallthrough",
185
+ }
89
+ .test_code = noexec_1,
186
+ }
90
+ .test_len = noexec_end - noexec_1,
91
+ .page_ofs = noexec_1 - noexec_2,
92
+ .entry_ofs = noexec_1 - noexec_2,
93
+ .expected_si_ofs = 0,
94
+ .expected_pc_ofs = 0,
95
+ .expected_arg = 1,
96
+ },
97
+ {
98
+ .name = "jump",
99
+ .test_code = noexec_1,
100
+ .test_len = noexec_end - noexec_1,
101
+ .page_ofs = noexec_1 - noexec_2,
102
+ .entry_ofs = 0,
103
+ .expected_si_ofs = 0,
104
+ .expected_pc_ofs = 0,
105
+ .expected_arg = 0,
106
+ },
107
+ {
108
+ .name = "fallthrough [cross]",
109
+ .test_code = noexec_1,
110
+ .test_len = noexec_end - noexec_1,
111
+ .page_ofs = noexec_1 - noexec_2 - 2,
112
+ .entry_ofs = noexec_1 - noexec_2 - 2,
113
+ .expected_si_ofs = 0,
114
+ .expected_pc_ofs = -2,
115
+ .expected_arg = 1,
116
+ },
117
+ {
118
+ .name = "jump [cross]",
119
+ .test_code = noexec_1,
120
+ .test_len = noexec_end - noexec_1,
121
+ .page_ofs = noexec_1 - noexec_2 - 2,
122
+ .entry_ofs = -2,
123
+ .expected_si_ofs = 0,
124
+ .expected_pc_ofs = -2,
125
+ .expected_arg = 0,
126
+ },
127
+ };
187
+
128
+
188
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
129
+ return test_noexec(noexec_tests,
189
}
130
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
190
131
+}
191
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
132
diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target
192
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
133
index XXXXXXX..XXXXXXX 100644
193
target_ulong addr,
134
--- a/tests/tcg/riscv64/Makefile.target
194
uint16_t idxmap)
135
+++ b/tests/tcg/riscv64/Makefile.target
195
{
136
@@ -XXX,XX +XXX,XX @@
196
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
137
197
- target_ulong addr_and_mmu_idx;
138
VPATH += $(SRC_PATH)/tests/tcg/riscv64
198
-
139
TESTS += test-div
199
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
140
+TESTS += noexec
200
201
/* This should already be page aligned */
202
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
203
- addr_and_mmu_idx |= idxmap;
204
+ addr &= TARGET_PAGE_MASK;
205
206
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
207
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
208
+ /*
209
+ * Allocate memory to hold addr+idxmap only when needed.
210
+ * See tlb_flush_page_by_mmuidx for details.
211
+ */
212
+ if (idxmap < TARGET_PAGE_SIZE) {
213
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
214
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
215
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
216
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
217
+ } else {
218
+ CPUState *dst_cpu;
219
+ TLBFlushPageByMMUIdxData *d;
220
+
221
+ /* Allocate a separate data block for each destination cpu. */
222
+ CPU_FOREACH(dst_cpu) {
223
+ if (dst_cpu != src_cpu) {
224
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
225
+ d->addr = addr;
226
+ d->idxmap = idxmap;
227
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
228
+ RUN_ON_CPU_HOST_PTR(d));
229
+ }
230
+ }
231
+
232
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
233
+ d->addr = addr;
234
+ d->idxmap = idxmap;
235
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
236
+ RUN_ON_CPU_HOST_PTR(d));
237
+ }
238
}
239
240
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
241
--
141
--
242
2.20.1
142
2.34.1
243
244
diff view generated by jsdifflib