1
The following changes since commit 0e32462630687a18039464511bd0447ada5709c3:
1
v2: Fix FreeBSD build error in patch 18.
2
2
3
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.0-pull-request' into staging (2021-01-22 10:35:55 +0000)
3
r~
4
5
6
The following changes since commit 0d239e513e0117e66fa739fb71a43b9383a108ff:
7
8
Merge tag 'pull-lu-20231018' of https://gitlab.com/rth7680/qemu into staging (2023-10-19 10:20:57 -0700)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210123
12
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20231018-2
8
13
9
for you to fetch changes up to 2e34067e9959f149a904cf1255985d3b68b52566:
14
for you to fetch changes up to a75f704d972b9408f5e2843784b3add48c724c52:
10
15
11
tcg: Toggle page execution for Apple Silicon (2021-01-22 12:48:01 -1000)
16
target/i386: Use i128 for 128 and 256-bit loads and stores (2023-10-19 21:11:44 -0700)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
Fix tcg constant segv.
19
tcg: Drop unused tcg_temp_free define
15
Optimize inline dup_const for MO_64.
20
tcg: Introduce tcg_use_softmmu
16
Update the cpu running flag in cpu_exec_step_atomic
21
tcg: Optimize past conditional branches
17
Some tidy up of tcg vs other accelerators
22
tcg: Use constant zero when expanding with divu2
23
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
24
tcg/ppc: Use ADDPCIS for power9
25
tcg/ppc: Use prefixed instructions for power10
26
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
18
27
19
----------------------------------------------------------------
28
----------------------------------------------------------------
20
Douglas Crosher (1):
29
Jordan Niethe (1):
21
tcg: update the cpu running flag in cpu_exec_step_atomic
30
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
22
31
23
Philippe Mathieu-Daudé (4):
32
Mike Frysinger (1):
24
accel/tcg: Make cpu_gen_init() static
33
tcg: drop unused tcg_temp_free define
25
accel/tcg: Restrict tb_gen_code() from other accelerators
26
accel/tcg: Declare missing cpu_loop_exit*() stubs
27
accel/tcg: Restrict cpu_io_recompile() from other accelerators
28
34
29
Richard Henderson (4):
35
Richard Henderson (27):
30
qemu/compiler: Split out qemu_build_not_reached_always
36
tcg/ppc: Untabify tcg-target.c.inc
31
tcg: Optimize inline dup_const for MO_64
37
tcg/ppc: Reinterpret tb-relative to TB+4
32
tcg: Increase the static number of temporaries
38
tcg/ppc: Use ADDPCIS in tcg_out_tb_start
33
accel/tcg: Move tb_flush_jmp_cache() to cputlb.c
39
tcg/ppc: Use ADDPCIS in tcg_out_movi_int
40
tcg/ppc: Use ADDPCIS for the constant pool
41
tcg/ppc: Use ADDPCIS in tcg_out_goto_tb
42
tcg/ppc: Use PADDI in tcg_out_movi
43
tcg/ppc: Use prefixed instructions in tcg_out_mem_long
44
tcg/ppc: Use PLD in tcg_out_movi for constant pool
45
tcg/ppc: Use prefixed instructions in tcg_out_dupi_vec
46
tcg/ppc: Use PLD in tcg_out_goto_tb
47
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
48
tcg: Introduce tcg_use_softmmu
49
tcg: Provide guest_base fallback for system mode
50
tcg/arm: Use tcg_use_softmmu
51
tcg/aarch64: Use tcg_use_softmmu
52
tcg/i386: Use tcg_use_softmmu
53
tcg/loongarch64: Use tcg_use_softmmu
54
tcg/mips: Use tcg_use_softmmu
55
tcg/ppc: Use tcg_use_softmmu
56
tcg/riscv: Do not reserve TCG_GUEST_BASE_REG for guest_base zero
57
tcg/riscv: Use tcg_use_softmmu
58
tcg/s390x: Use tcg_use_softmmu
59
tcg: Use constant zero when expanding with divu2
60
tcg: Optimize past conditional branches
61
tcg: Add tcg_gen_{ld,st}_i128
62
target/i386: Use i128 for 128 and 256-bit loads and stores
34
63
35
Roman Bolshakov (1):
64
include/tcg/tcg-op-common.h | 3 +
36
tcg: Toggle page execution for Apple Silicon
65
include/tcg/tcg-op.h | 2 -
37
66
include/tcg/tcg.h | 8 +-
38
accel/tcg/internal.h | 20 ++++++++++++++++++++
67
target/i386/tcg/translate.c | 63 ++---
39
include/exec/exec-all.h | 11 -----------
68
tcg/optimize.c | 8 +-
40
include/qemu/compiler.h | 5 +++--
69
tcg/tcg-op-ldst.c | 14 +-
41
include/qemu/osdep.h | 28 ++++++++++++++++++++++++++++
70
tcg/tcg-op.c | 38 ++-
42
include/tcg/tcg.h | 5 +++--
71
tcg/tcg.c | 13 +-
43
accel/stubs/tcg-stub.c | 10 ++++++++++
72
tcg/aarch64/tcg-target.c.inc | 177 ++++++------
44
accel/tcg/cpu-exec.c | 7 +++++++
73
tcg/arm/tcg-target.c.inc | 203 +++++++-------
45
accel/tcg/cputlb.c | 19 +++++++++++++++++++
74
tcg/i386/tcg-target.c.inc | 198 +++++++-------
46
accel/tcg/translate-all.c | 23 +++++------------------
75
tcg/loongarch64/tcg-target.c.inc | 126 +++++----
47
tcg/tcg.c | 7 ++++---
76
tcg/mips/tcg-target.c.inc | 231 ++++++++--------
48
10 files changed, 99 insertions(+), 36 deletions(-)
77
tcg/ppc/tcg-target.c.inc | 561 ++++++++++++++++++++++++++-------------
49
create mode 100644 accel/tcg/internal.h
78
tcg/riscv/tcg-target.c.inc | 189 ++++++-------
50
79
tcg/s390x/tcg-target.c.inc | 161 ++++++-----
80
16 files changed, 1102 insertions(+), 893 deletions(-)
diff view generated by jsdifflib
Deleted patch
1
From: Douglas Crosher <dtc-ubuntu@scieneer.com>
2
1
3
The cpu_exec_step_atomic() function is called with the cpu->running
4
clear and proceeds to run target code without setting this flag. If
5
this target code generates an exception then handle_cpu_signal() will
6
unnecessarily abort. For example if atomic code generates a memory
7
protection fault.
8
9
This patch at least sets and clears this running flag, and adds some
10
assertions to help detect other cases.
11
12
Signed-off-by: Douglas Crosher <dtc-ubuntu@scieneer.com>
13
Message-Id: <a272c656-f7c5-019d-1cc0-499b8f80f2fc@scieneer.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
accel/tcg/cpu-exec.c | 4 ++++
17
1 file changed, 4 insertions(+)
18
19
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/cpu-exec.c
22
+++ b/accel/tcg/cpu-exec.c
23
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
24
25
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
26
start_exclusive();
27
+ g_assert(cpu == current_cpu);
28
+ g_assert(!cpu->running);
29
+ cpu->running = true;
30
31
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
32
if (tb == NULL) {
33
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
34
*/
35
g_assert(cpu_in_exclusive_context(cpu));
36
parallel_cpus = true;
37
+ cpu->running = false;
38
end_exclusive();
39
}
40
41
--
42
2.25.1
43
44
diff view generated by jsdifflib
Deleted patch
1
Provide a symbol that can always be used to signal an error,
2
regardless of optimization. Usage of this should be protected
3
by e.g. __builtin_constant_p, which guards for optimization.
4
1
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/qemu/compiler.h | 5 +++--
9
1 file changed, 3 insertions(+), 2 deletions(-)
10
11
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/qemu/compiler.h
14
+++ b/include/qemu/compiler.h
15
@@ -XXX,XX +XXX,XX @@
16
* supports QEMU_ERROR, this will be reported at compile time; otherwise
17
* this will be reported at link time due to the missing symbol.
18
*/
19
-#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
20
extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
21
- qemu_build_not_reached(void);
22
+ qemu_build_not_reached_always(void);
23
+#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
24
+#define qemu_build_not_reached() qemu_build_not_reached_always()
25
#else
26
#define qemu_build_not_reached() g_assert_not_reached()
27
#endif
28
--
29
2.25.1
30
31
diff view generated by jsdifflib
Deleted patch
1
Avoid the out-of-line function call for immediate MO_64.
2
In addition, diagnose all invalid constants at compile-time.
3
1
4
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg.h | 3 ++-
8
1 file changed, 2 insertions(+), 1 deletion(-)
9
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg.h
13
+++ b/include/tcg/tcg.h
14
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
15
? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \
16
: (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \
17
: (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \
18
- : dup_const(VECE, C)) \
19
+ : (VECE) == MO_64 ? (uint64_t)(C) \
20
+ : (qemu_build_not_reached_always(), 0)) \
21
: dup_const(VECE, C))
22
23
24
--
25
2.25.1
26
27
diff view generated by jsdifflib
Deleted patch
1
This isn't a total or permanent solution to the problem of running
2
out of temporaries, but it puts off the issue for a bit.
3
1
4
Make the assert in tcg_temp_alloc unconditional. If we do run out
5
of temps, this can fail much later as a weird SIGSEGV, due to the
6
buffer overrun of the temp array.
7
8
Remove the inlines from tcg_temp_alloc and tcg_global_alloc.
9
10
Buglink: https://bugs.launchpad.net/bugs/1912065
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
include/tcg/tcg.h | 2 +-
16
tcg/tcg.c | 6 +++---
17
2 files changed, 4 insertions(+), 4 deletions(-)
18
19
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/tcg/tcg.h
22
+++ b/include/tcg/tcg.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct TCGPool {
24
25
#define TCG_POOL_CHUNK_SIZE 32768
26
27
-#define TCG_MAX_TEMPS 512
28
+#define TCG_MAX_TEMPS 1024
29
#define TCG_MAX_INSNS 512
30
31
/* when the size of the arguments of a called function is smaller than
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg.c
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
37
QSIMPLEQ_INIT(&s->labels);
38
}
39
40
-static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
41
+static TCGTemp *tcg_temp_alloc(TCGContext *s)
42
{
43
int n = s->nb_temps++;
44
- tcg_debug_assert(n < TCG_MAX_TEMPS);
45
+ g_assert(n < TCG_MAX_TEMPS);
46
return memset(&s->temps[n], 0, sizeof(TCGTemp));
47
}
48
49
-static inline TCGTemp *tcg_global_alloc(TCGContext *s)
50
+static TCGTemp *tcg_global_alloc(TCGContext *s)
51
{
52
TCGTemp *ts;
53
54
--
55
2.25.1
56
57
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
cpu_gen_init() is TCG specific, only used in tcg/translate-all.c.
4
No need to export it to other accelerators, declare it statically.
5
6
Reviewed-by: Claudio Fontana <cfontana@suse.de>
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-Id: <20210117164813.4101761-2-f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/exec-all.h | 2 --
12
accel/tcg/translate-all.c | 2 +-
13
2 files changed, 1 insertion(+), 3 deletions(-)
14
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/exec-all.h
18
+++ b/include/exec/exec-all.h
19
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
20
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
21
target_ulong *data);
22
23
-void cpu_gen_init(void);
24
-
25
/**
26
* cpu_restore_state:
27
* @cpu: the vCPU state is to be restore to
28
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/translate-all.c
31
+++ b/accel/tcg/translate-all.c
32
@@ -XXX,XX +XXX,XX @@ static void page_table_config_init(void)
33
assert(v_l2_levels >= 0);
34
}
35
36
-void cpu_gen_init(void)
37
+static void cpu_gen_init(void)
38
{
39
tcg_context_init(&tcg_init_ctx);
40
}
41
--
42
2.25.1
43
44
diff view generated by jsdifflib
Deleted patch
1
Move and make the function static, as the only users
2
are here in cputlb.c.
3
1
4
Suggested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/exec-all.h | 3 ---
8
accel/tcg/cputlb.c | 18 ++++++++++++++++++
9
accel/tcg/translate-all.c | 17 -----------------
10
3 files changed, 18 insertions(+), 20 deletions(-)
11
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/exec-all.h
15
+++ b/include/exec/exec-all.h
16
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
17
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
18
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
19
20
-/* exec.c */
21
-void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
22
-
23
MemoryRegionSection *
24
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
25
hwaddr *xlat, hwaddr *plen,
26
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/cputlb.c
29
+++ b/accel/tcg/cputlb.c
30
@@ -XXX,XX +XXX,XX @@
31
#include "exec/address-spaces.h"
32
#include "exec/cpu_ldst.h"
33
#include "exec/cputlb.h"
34
+#include "exec/tb-hash.h"
35
#include "exec/memory-internal.h"
36
#include "exec/ram_addr.h"
37
#include "tcg/tcg.h"
38
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
39
desc->window_max_entries = max_entries;
40
}
41
42
+static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
43
+{
44
+ unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
45
+
46
+ for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
47
+ qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
48
+ }
49
+}
50
+
51
+static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
52
+{
53
+ /* Discard jump cache entries for any tb which might potentially
54
+ overlap the flushed page. */
55
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
56
+ tb_jmp_cache_clear_page(cpu, addr);
57
+}
58
+
59
/**
60
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
61
* @desc: The CPUTLBDesc portion of the TLB
62
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/translate-all.c
65
+++ b/accel/tcg/translate-all.c
66
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
67
cpu_loop_exit_noexc(cpu);
68
}
69
70
-static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
71
-{
72
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
73
-
74
- for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
75
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
76
- }
77
-}
78
-
79
-void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
80
-{
81
- /* Discard jump cache entries for any tb which might potentially
82
- overlap the flushed page. */
83
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
84
- tb_jmp_cache_clear_page(cpu, addr);
85
-}
86
-
87
static void print_qht_statistics(struct qht_stats hst)
88
{
89
uint32_t hgram_opts;
90
--
91
2.25.1
92
93
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
tb_gen_code() is only called within TCG accelerator, declare it locally.
4
5
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210117164813.4101761-4-f4bug@amsat.org>
7
[rth: Adjust vs changed tb_flush_jmp_cache patch.]
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
accel/tcg/internal.h | 18 ++++++++++++++++++
11
include/exec/exec-all.h | 5 -----
12
accel/tcg/cpu-exec.c | 1 +
13
accel/tcg/translate-all.c | 1 +
14
4 files changed, 20 insertions(+), 5 deletions(-)
15
create mode 100644 accel/tcg/internal.h
16
17
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/accel/tcg/internal.h
22
@@ -XXX,XX +XXX,XX @@
23
+/*
24
+ * Internal execution defines for qemu
25
+ *
26
+ * Copyright (c) 2003 Fabrice Bellard
27
+ *
28
+ * SPDX-License-Identifier: LGPL-2.1-or-later
29
+ */
30
+
31
+#ifndef ACCEL_TCG_INTERNAL_H
32
+#define ACCEL_TCG_INTERNAL_H
33
+
34
+#include "exec/exec-all.h"
35
+
36
+TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
37
+ target_ulong cs_base, uint32_t flags,
38
+ int cflags);
39
+
40
+#endif /* ACCEL_TCG_INTERNAL_H */
41
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/exec/exec-all.h
44
+++ b/include/exec/exec-all.h
45
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
46
47
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
48
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
49
-TranslationBlock *tb_gen_code(CPUState *cpu,
50
- target_ulong pc, target_ulong cs_base,
51
- uint32_t flags,
52
- int cflags);
53
-
54
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
55
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
56
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
57
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/accel/tcg/cpu-exec.c
60
+++ b/accel/tcg/cpu-exec.c
61
@@ -XXX,XX +XXX,XX @@
62
#include "exec/cpu-all.h"
63
#include "sysemu/cpu-timers.h"
64
#include "sysemu/replay.h"
65
+#include "internal.h"
66
67
/* -icount align implementation. */
68
69
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/accel/tcg/translate-all.c
72
+++ b/accel/tcg/translate-all.c
73
@@ -XXX,XX +XXX,XX @@
74
#include "sysemu/cpu-timers.h"
75
#include "sysemu/tcg.h"
76
#include "qapi/error.h"
77
+#include "internal.h"
78
79
/* #define DEBUG_TB_INVALIDATE */
80
/* #define DEBUG_TB_FLUSH */
81
--
82
2.25.1
83
84
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
cpu_loop_exit*() functions are declared in accel/tcg/cpu-exec-common.c,
4
and are not available when TCG accelerator is not built. Add stubs so
5
linking without TCG succeed.
6
7
Problematic files:
8
9
- hw/semihosting/console.c in qemu_semihosting_console_inc()
10
- hw/ppc/spapr_hcall.c in h_confer()
11
- hw/s390x/ipl.c in s390_ipl_reset_request()
12
- hw/misc/mips_itu.c
13
14
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Message-Id: <20210117164813.4101761-5-f4bug@amsat.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
18
accel/stubs/tcg-stub.c | 10 ++++++++++
19
1 file changed, 10 insertions(+)
20
21
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/accel/stubs/tcg-stub.c
24
+++ b/accel/stubs/tcg-stub.c
25
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
26
/* Handled by hardware accelerator. */
27
g_assert_not_reached();
28
}
29
+
30
+void QEMU_NORETURN cpu_loop_exit(CPUState *cpu)
31
+{
32
+ g_assert_not_reached();
33
+}
34
+
35
+void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
36
+{
37
+ g_assert_not_reached();
38
+}
39
--
40
2.25.1
41
42
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
1
3
As cpu_io_recompile() is only called within TCG accelerator
4
in cputlb.c, declare it locally.
5
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Message-Id: <20210117164813.4101761-6-f4bug@amsat.org>
8
[rth: Adjust vs changed tb_flush_jmp_cache patch.]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
accel/tcg/internal.h | 2 ++
12
include/exec/exec-all.h | 1 -
13
accel/tcg/cputlb.c | 1 +
14
3 files changed, 3 insertions(+), 1 deletion(-)
15
16
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/internal.h
19
+++ b/accel/tcg/internal.h
20
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
21
target_ulong cs_base, uint32_t flags,
22
int cflags);
23
24
+void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
25
+
26
#endif /* ACCEL_TCG_INTERNAL_H */
27
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/exec/exec-all.h
30
+++ b/include/exec/exec-all.h
31
@@ -XXX,XX +XXX,XX @@ void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
32
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
33
34
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
35
-void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
36
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
37
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
38
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
39
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/tcg/cputlb.c
42
+++ b/accel/tcg/cputlb.c
43
@@ -XXX,XX +XXX,XX @@
44
#include "exec/translate-all.h"
45
#include "trace/trace-root.h"
46
#include "trace/mem.h"
47
+#include "internal.h"
48
#ifdef CONFIG_PLUGIN
49
#include "qemu/plugin-memory.h"
50
#endif
51
--
52
2.25.1
53
54
diff view generated by jsdifflib
1
From: Roman Bolshakov <r.bolshakov@yadro.com>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Pages can't be both write and executable at the same time on Apple
4
Silicon. macOS provides public API to switch write protection [1] for
5
JIT applications, like TCG.
6
7
1. https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon
8
9
Tested-by: Alexander Graf <agraf@csgraf.de>
10
Signed-off-by: Roman Bolshakov <r.bolshakov@yadro.com>
11
Message-Id: <20210113032806.18220-1-r.bolshakov@yadro.com>
12
[rth: Inline the qemu_thread_jit_* functions;
13
drop the MAP_JIT change for a follow-on patch.]
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
3
---
16
include/qemu/osdep.h | 28 ++++++++++++++++++++++++++++
4
tcg/i386/tcg-target.c.inc | 198 +++++++++++++++++++-------------------
17
accel/tcg/cpu-exec.c | 2 ++
5
1 file changed, 98 insertions(+), 100 deletions(-)
18
accel/tcg/translate-all.c | 3 +++
19
tcg/tcg.c | 1 +
20
4 files changed, 34 insertions(+)
21
6
22
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
7
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/osdep.h
9
--- a/tcg/i386/tcg-target.c.inc
25
+++ b/include/qemu/osdep.h
10
+++ b/tcg/i386/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@ extern int daemon(int, int);
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
27
#include "sysemu/os-posix.h"
12
# define ALL_VECTOR_REGS 0x00ff0000u
13
# define ALL_BYTEL_REGS 0x0000000fu
28
#endif
14
#endif
29
15
-#ifdef CONFIG_SOFTMMU
30
+#ifdef __APPLE__
16
-# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
31
+#include <AvailabilityMacros.h>
17
-#else
18
-# define SOFTMMU_RESERVE_REGS 0
19
-#endif
20
+#define SOFTMMU_RESERVE_REGS \
21
+ (tcg_use_softmmu ? (1 << TCG_REG_L0) | (1 << TCG_REG_L1) : 0)
22
23
/* For 64-bit, we always know that CMOV is available. */
24
#if TCG_TARGET_REG_BITS == 64
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
26
return true;
27
}
28
29
-#ifndef CONFIG_SOFTMMU
30
+#ifdef CONFIG_USER_ONLY
31
static HostAddress x86_guest_base = {
32
.index = -1
33
};
34
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
35
}
36
return 0;
37
}
38
+#define setup_guest_base_seg setup_guest_base_seg
39
#elif defined(__x86_64__) && \
40
(defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
41
# include <machine/sysarch.h>
42
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
43
}
44
return 0;
45
}
46
+#define setup_guest_base_seg setup_guest_base_seg
32
+#endif
47
+#endif
48
#else
49
-static inline int setup_guest_base_seg(void)
50
-{
51
- return 0;
52
-}
53
-#endif /* setup_guest_base_seg */
54
-#endif /* !SOFTMMU */
55
+# define x86_guest_base (*(HostAddress *)({ qemu_build_not_reached(); NULL; }))
56
+#endif /* CONFIG_USER_ONLY */
57
+#ifndef setup_guest_base_seg
58
+# define setup_guest_base_seg() 0
59
+#endif
60
61
#define MIN_TLB_MASK_TABLE_OFS INT_MIN
62
63
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
64
MemOp s_bits = opc & MO_SIZE;
65
unsigned a_mask;
66
67
-#ifdef CONFIG_SOFTMMU
68
- h->index = TCG_REG_L0;
69
- h->ofs = 0;
70
- h->seg = 0;
71
-#else
72
- *h = x86_guest_base;
73
-#endif
74
+ if (tcg_use_softmmu) {
75
+ h->index = TCG_REG_L0;
76
+ h->ofs = 0;
77
+ h->seg = 0;
78
+ } else {
79
+ *h = x86_guest_base;
80
+ }
81
h->base = addrlo;
82
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
83
a_mask = (1 << h->aa.align) - 1;
84
85
-#ifdef CONFIG_SOFTMMU
86
- int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
87
- : offsetof(CPUTLBEntry, addr_write);
88
- TCGType ttype = TCG_TYPE_I32;
89
- TCGType tlbtype = TCG_TYPE_I32;
90
- int trexw = 0, hrexw = 0, tlbrexw = 0;
91
- unsigned mem_index = get_mmuidx(oi);
92
- unsigned s_mask = (1 << s_bits) - 1;
93
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
94
- int tlb_mask;
95
+ if (tcg_use_softmmu) {
96
+ int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
97
+ : offsetof(CPUTLBEntry, addr_write);
98
+ TCGType ttype = TCG_TYPE_I32;
99
+ TCGType tlbtype = TCG_TYPE_I32;
100
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
101
+ unsigned mem_index = get_mmuidx(oi);
102
+ unsigned s_mask = (1 << s_bits) - 1;
103
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
104
+ int tlb_mask;
105
106
- ldst = new_ldst_label(s);
107
- ldst->is_ld = is_ld;
108
- ldst->oi = oi;
109
- ldst->addrlo_reg = addrlo;
110
- ldst->addrhi_reg = addrhi;
111
+ ldst = new_ldst_label(s);
112
+ ldst->is_ld = is_ld;
113
+ ldst->oi = oi;
114
+ ldst->addrlo_reg = addrlo;
115
+ ldst->addrhi_reg = addrhi;
116
117
- if (TCG_TARGET_REG_BITS == 64) {
118
- ttype = s->addr_type;
119
- trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
120
- if (TCG_TYPE_PTR == TCG_TYPE_I64) {
121
- hrexw = P_REXW;
122
- if (s->page_bits + s->tlb_dyn_max_bits > 32) {
123
- tlbtype = TCG_TYPE_I64;
124
- tlbrexw = P_REXW;
125
+ if (TCG_TARGET_REG_BITS == 64) {
126
+ ttype = s->addr_type;
127
+ trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
128
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
129
+ hrexw = P_REXW;
130
+ if (s->page_bits + s->tlb_dyn_max_bits > 32) {
131
+ tlbtype = TCG_TYPE_I64;
132
+ tlbrexw = P_REXW;
133
+ }
134
}
135
}
136
- }
137
138
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
139
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
140
- s->page_bits - CPU_TLB_ENTRY_BITS);
141
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
142
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
143
+ s->page_bits - CPU_TLB_ENTRY_BITS);
144
145
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
146
- fast_ofs + offsetof(CPUTLBDescFast, mask));
147
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
148
+ fast_ofs + offsetof(CPUTLBDescFast, mask));
149
150
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
151
- fast_ofs + offsetof(CPUTLBDescFast, table));
152
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
153
+ fast_ofs + offsetof(CPUTLBDescFast, table));
154
155
- /*
156
- * If the required alignment is at least as large as the access, simply
157
- * copy the address and mask. For lesser alignments, check that we don't
158
- * cross pages for the complete access.
159
- */
160
- if (a_mask >= s_mask) {
161
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
162
- } else {
163
- tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
164
- addrlo, s_mask - a_mask);
165
- }
166
- tlb_mask = s->page_mask | a_mask;
167
- tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
168
+ /*
169
+ * If the required alignment is at least as large as the access,
170
+ * simply copy the address and mask. For lesser alignments,
171
+ * check that we don't cross pages for the complete access.
172
+ */
173
+ if (a_mask >= s_mask) {
174
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
175
+ } else {
176
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
177
+ addrlo, s_mask - a_mask);
178
+ }
179
+ tlb_mask = s->page_mask | a_mask;
180
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
181
182
- /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
183
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
184
- TCG_REG_L1, TCG_REG_L0, cmp_ofs);
185
-
186
- /* jne slow_path */
187
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
188
- ldst->label_ptr[0] = s->code_ptr;
189
- s->code_ptr += 4;
190
-
191
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
192
- /* cmp 4(TCG_REG_L0), addrhi */
193
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4);
194
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
195
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
196
+ TCG_REG_L1, TCG_REG_L0, cmp_ofs);
197
198
/* jne slow_path */
199
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
200
- ldst->label_ptr[1] = s->code_ptr;
201
+ ldst->label_ptr[0] = s->code_ptr;
202
s->code_ptr += 4;
203
- }
204
205
- /* TLB Hit. */
206
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
207
- offsetof(CPUTLBEntry, addend));
208
-#else
209
- if (a_mask) {
210
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
211
+ /* cmp 4(TCG_REG_L0), addrhi */
212
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
213
+ TCG_REG_L0, cmp_ofs + 4);
33
+
214
+
34
#include "glib-compat.h"
215
+ /* jne slow_path */
35
#include "qemu/typedefs.h"
216
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
36
217
+ ldst->label_ptr[1] = s->code_ptr;
37
@@ -XXX,XX +XXX,XX @@ char *qemu_get_host_name(Error **errp);
218
+ s->code_ptr += 4;
38
*/
219
+ }
39
size_t qemu_get_host_physmem(void);
220
+
40
221
+ /* TLB Hit. */
41
+/*
222
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
42
+ * Toggle write/execute on the pages marked MAP_JIT
223
+ offsetof(CPUTLBEntry, addend));
43
+ * for the current thread.
224
+ } else if (a_mask) {
44
+ */
225
ldst = new_ldst_label(s);
45
+#if defined(MAC_OS_VERSION_11_0) && \
226
46
+ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_11_0
227
ldst->is_ld = is_ld;
47
+static inline void qemu_thread_jit_execute(void)
228
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
48
+{
229
ldst->label_ptr[0] = s->code_ptr;
49
+ if (__builtin_available(macOS 11.0, *)) {
230
s->code_ptr += 4;
50
+ pthread_jit_write_protect_np(true);
231
}
232
-#endif
233
234
return ldst;
235
}
236
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
237
tcg_out_push(s, tcg_target_callee_save_regs[i]);
238
}
239
240
-#if TCG_TARGET_REG_BITS == 32
241
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
242
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
243
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
244
- /* jmp *tb. */
245
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
246
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
247
- + stack_addend);
248
-#else
249
-# if !defined(CONFIG_SOFTMMU)
250
- if (guest_base) {
251
+ if (!tcg_use_softmmu && guest_base) {
252
int seg = setup_guest_base_seg();
253
if (seg != 0) {
254
x86_guest_base.seg = seg;
255
} else if (guest_base == (int32_t)guest_base) {
256
x86_guest_base.ofs = guest_base;
257
} else {
258
+ assert(TCG_TARGET_REG_BITS == 64);
259
/* Choose R12 because, as a base, it requires a SIB byte. */
260
x86_guest_base.index = TCG_REG_R12;
261
tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
262
tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
263
}
264
}
265
-# endif
266
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
267
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
268
- /* jmp *tb. */
269
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
270
-#endif
271
+
272
+ if (TCG_TARGET_REG_BITS == 32) {
273
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
274
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
275
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
276
+ /* jmp *tb. */
277
+ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
278
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
279
+ + stack_addend);
280
+ } else {
281
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
282
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
283
+ /* jmp *tb. */
284
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
51
+ }
285
+ }
52
+}
286
53
+
54
+static inline void qemu_thread_jit_write(void)
55
+{
56
+ if (__builtin_available(macOS 11.0, *)) {
57
+ pthread_jit_write_protect_np(false);
58
+ }
59
+}
60
+#else
61
+static inline void qemu_thread_jit_write(void) {}
62
+static inline void qemu_thread_jit_execute(void) {}
63
+#endif
64
+
65
#endif
66
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/accel/tcg/cpu-exec.c
69
+++ b/accel/tcg/cpu-exec.c
70
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
71
}
72
#endif /* DEBUG_DISAS */
73
74
+ qemu_thread_jit_execute();
75
ret = tcg_qemu_tb_exec(env, tb_ptr);
76
cpu->can_do_io = 1;
77
/*
287
/*
78
@@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
288
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
79
{
80
uintptr_t old;
81
82
+ qemu_thread_jit_write();
83
assert(n < ARRAY_SIZE(tb->jmp_list_next));
84
qemu_spin_lock(&tb_next->jmp_lock);
85
86
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/accel/tcg/translate-all.c
89
+++ b/accel/tcg/translate-all.c
90
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
91
92
static void tb_phys_invalidate__locked(TranslationBlock *tb)
93
{
94
+ qemu_thread_jit_write();
95
do_tb_phys_invalidate(tb, true);
96
+ qemu_thread_jit_execute();
97
}
98
99
/* invalidate one TB
100
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
101
#endif
102
103
assert_memory_lock();
104
+ qemu_thread_jit_write();
105
106
phys_pc = get_page_addr_code(env, pc);
107
108
diff --git a/tcg/tcg.c b/tcg/tcg.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/tcg.c
111
+++ b/tcg/tcg.c
112
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
113
s->pool_labels = NULL;
114
#endif
115
116
+ qemu_thread_jit_write();
117
/* Generate the prologue. */
118
tcg_target_qemu_prologue(s);
119
120
--
289
--
121
2.25.1
290
2.34.1
122
291
123
292
diff view generated by jsdifflib