1
The following changes since commit d37158bb2425e7ebffb167d611be01f1e9e6c86f:
1
With a couple of linux-user and target/sparc patches thrown in for good measure.
2
2
3
Update version for v8.0.0-rc2 release (2023-03-28 20:43:21 +0100)
3
r~
4
5
6
The following changes since commit 495de0fd82d8bb2d7035f82d9869cfeb48de2f9e:
7
8
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging (2025-02-14 08:19:05 -0500)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230328
12
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20250215
8
13
9
for you to fetch changes up to 87e303de70f93bf700f58412fb9b2c3ec918c4b5:
14
for you to fetch changes up to 2132751069134114814c7e1609e9cf644f077aad:
10
15
11
softmmu: Restore use of CPU watchpoint for all accelerators (2023-03-28 15:24:06 -0700)
16
target/sparc: fake UltraSPARC T1 PCR and PIC registers (2025-02-15 12:04:13 -0800)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
Use a local version of GTree [#285]
19
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
15
Fix page_set_flags vs the last page of the address space [#1528]
20
tcg: Cleanups after disallowing 64-on-32
16
Re-enable gdbstub breakpoints under KVM
21
tcg: Introduce constraint for zero register
22
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
23
linux-user: Fix alignment when unmapping excess reservation
24
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
25
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
26
target/sparc: fake UltraSPARC T1 PCR and PIC registers
17
27
18
----------------------------------------------------------------
28
----------------------------------------------------------------
19
Emilio Cota (2):
29
Andreas Schwab (1):
20
util: import GTree as QTree
30
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
21
tcg: use QTree instead of GTree
22
31
23
Philippe Mathieu-Daudé (3):
32
Artyom Tarasenko (1):
24
softmmu: Restrict cpu_check_watchpoint / address_matches to TCG accel
33
target/sparc: fake UltraSPARC T1 PCR and PIC registers
25
softmmu/watchpoint: Add missing 'qemu/error-report.h' include
26
softmmu: Restore use of CPU watchpoint for all accelerators
27
34
28
Richard Henderson (10):
35
Fabiano Rosas (1):
29
linux-user: Diagnose misaligned -R size
36
elfload: Fix alignment when unmapping excess reservation
30
accel/tcg: Pass last not end to page_set_flags
31
accel/tcg: Pass last not end to page_reset_target_data
32
accel/tcg: Pass last not end to PAGE_FOR_EACH_TB
33
accel/tcg: Pass last not end to page_collection_lock
34
accel/tcg: Pass last not end to tb_invalidate_phys_page_range__locked
35
accel/tcg: Pass last not end to tb_invalidate_phys_range
36
linux-user: Pass last not end to probe_guest_base
37
include/exec: Change reserved_va semantics to last byte
38
linux-user/arm: Take more care allocating commpage
39
37
40
configure | 15 +
38
Mikael Szreder (2):
41
meson.build | 4 +
39
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
42
include/exec/cpu-all.h | 15 +-
40
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
43
include/exec/exec-all.h | 2 +-
44
include/hw/core/cpu.h | 39 +-
45
include/hw/core/tcg-cpu-ops.h | 43 ++
46
include/qemu/qtree.h | 201 ++++++
47
linux-user/arm/target_cpu.h | 2 +-
48
linux-user/user-internals.h | 12 +-
49
accel/tcg/tb-maint.c | 112 ++--
50
accel/tcg/translate-all.c | 2 +-
51
accel/tcg/user-exec.c | 25 +-
52
bsd-user/main.c | 10 +-
53
bsd-user/mmap.c | 10 +-
54
linux-user/elfload.c | 72 ++-
55
linux-user/flatload.c | 2 +-
56
linux-user/main.c | 31 +-
57
linux-user/mmap.c | 22 +-
58
linux-user/syscall.c | 4 +-
59
softmmu/physmem.c | 2 +-
60
softmmu/watchpoint.c | 5 +
61
target/arm/tcg/mte_helper.c | 1 +
62
target/arm/tcg/sve_helper.c | 1 +
63
target/s390x/tcg/mem_helper.c | 1 +
64
tcg/region.c | 19 +-
65
tests/bench/qtree-bench.c | 286 +++++++++
66
tests/unit/test-qtree.c | 333 ++++++++++
67
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++++++++
68
softmmu/meson.build | 2 +-
69
tests/bench/meson.build | 4 +
70
tests/unit/meson.build | 1 +
71
util/meson.build | 1 +
72
32 files changed, 2474 insertions(+), 195 deletions(-)
73
create mode 100644 include/qemu/qtree.h
74
create mode 100644 tests/bench/qtree-bench.c
75
create mode 100644 tests/unit/test-qtree.c
76
create mode 100644 util/qtree.c
77
41
42
Richard Henderson (19):
43
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
44
tcg: Remove TCG_OVERSIZED_GUEST
45
tcg: Drop support for two address registers in gen_ldst
46
tcg: Merge INDEX_op_qemu_*_{a32,a64}_*
47
tcg/arm: Drop addrhi from prepare_host_addr
48
tcg/i386: Drop addrhi from prepare_host_addr
49
tcg/mips: Drop addrhi from prepare_host_addr
50
tcg/ppc: Drop addrhi from prepare_host_addr
51
tcg: Replace addr{lo,hi}_reg with addr_reg in TCGLabelQemuLdst
52
plugins: Fix qemu_plugin_read_memory_vaddr parameters
53
accel/tcg: Fix tlb_set_page_with_attrs, tlb_set_page
54
include/exec: Change vaddr to uintptr_t
55
include/exec: Use uintptr_t in CPUTLBEntry
56
tcg: Introduce the 'z' constraint for a hardware zero register
57
tcg/aarch64: Use 'z' constraint
58
tcg/loongarch64: Use 'z' constraint
59
tcg/mips: Use 'z' constraint
60
tcg/riscv: Use 'z' constraint
61
tcg/sparc64: Use 'z' constraint
62
63
include/exec/tlb-common.h | 10 +-
64
include/exec/vaddr.h | 16 ++--
65
include/qemu/atomic.h | 18 +---
66
include/tcg/oversized-guest.h | 23 -----
67
include/tcg/tcg-opc.h | 28 ++----
68
include/tcg/tcg.h | 3 +-
69
linux-user/aarch64/target_signal.h | 2 +
70
linux-user/arm/target_signal.h | 2 +
71
linux-user/generic/signal.h | 1 -
72
linux-user/i386/target_signal.h | 2 +
73
linux-user/m68k/target_signal.h | 1 +
74
linux-user/microblaze/target_signal.h | 2 +
75
linux-user/ppc/target_signal.h | 2 +
76
linux-user/s390x/target_signal.h | 2 +
77
linux-user/sh4/target_signal.h | 2 +
78
linux-user/x86_64/target_signal.h | 2 +
79
linux-user/xtensa/target_signal.h | 2 +
80
tcg/aarch64/tcg-target-con-set.h | 12 +--
81
tcg/aarch64/tcg-target.h | 2 +
82
tcg/loongarch64/tcg-target-con-set.h | 15 ++-
83
tcg/loongarch64/tcg-target-con-str.h | 1 -
84
tcg/loongarch64/tcg-target.h | 2 +
85
tcg/mips/tcg-target-con-set.h | 26 +++---
86
tcg/mips/tcg-target-con-str.h | 1 -
87
tcg/mips/tcg-target.h | 2 +
88
tcg/riscv/tcg-target-con-set.h | 10 +-
89
tcg/riscv/tcg-target-con-str.h | 1 -
90
tcg/riscv/tcg-target.h | 2 +
91
tcg/sparc64/tcg-target-con-set.h | 12 +--
92
tcg/sparc64/tcg-target-con-str.h | 1 -
93
tcg/sparc64/tcg-target.h | 3 +-
94
tcg/tci/tcg-target.h | 1 -
95
accel/tcg/cputlb.c | 32 ++-----
96
accel/tcg/tcg-all.c | 9 +-
97
linux-user/elfload.c | 4 +-
98
plugins/api.c | 2 +-
99
target/arm/ptw.c | 34 -------
100
target/riscv/cpu_helper.c | 13 +--
101
target/sparc/gdbstub.c | 18 +++-
102
target/sparc/translate.c | 19 ++++
103
tcg/optimize.c | 21 ++---
104
tcg/tcg-op-ldst.c | 103 +++++----------------
105
tcg/tcg.c | 97 +++++++++----------
106
tcg/tci.c | 119 +++++-------------------
107
docs/devel/multi-thread-tcg.rst | 1 -
108
docs/devel/tcg-ops.rst | 4 +-
109
target/sparc/insns.decode | 19 ++--
110
tcg/aarch64/tcg-target.c.inc | 86 +++++++----------
111
tcg/arm/tcg-target.c.inc | 104 ++++++---------------
112
tcg/i386/tcg-target.c.inc | 125 +++++++------------------
113
tcg/loongarch64/tcg-target.c.inc | 72 ++++++---------
114
tcg/mips/tcg-target.c.inc | 169 +++++++++++-----------------------
115
tcg/ppc/tcg-target.c.inc | 164 ++++++++-------------------------
116
tcg/riscv/tcg-target.c.inc | 56 +++++------
117
tcg/s390x/tcg-target.c.inc | 40 +++-----
118
tcg/sparc64/tcg-target.c.inc | 45 ++++-----
119
tcg/tci/tcg-target.c.inc | 60 +++---------
120
57 files changed, 536 insertions(+), 1089 deletions(-)
121
delete mode 100644 include/tcg/oversized-guest.h
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
These should have been removed with the rest. There are
2
a couple of hosts which can emit guest_base into the
3
constant pool: aarch64, mips64, ppc64, riscv64.
2
4
3
Both cpu_check_watchpoint() and cpu_watchpoint_address_matches()
5
Fixes: a417ef835058 ("tcg: Remove TCG_TARGET_NEED_LDST_LABELS and TCG_TARGET_NEED_POOL_LABELS")
4
are specific to TCG system emulation. Declare them in "tcg-cpu-ops.h"
5
to be sure accessing them from non-TCG code is a compilation error.
6
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20230328173117.15226-2-philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/hw/core/cpu.h | 37 ------------------------------
8
tcg/tci/tcg-target.h | 1 -
12
include/hw/core/tcg-cpu-ops.h | 43 +++++++++++++++++++++++++++++++++++
9
tcg/tcg.c | 4 ----
13
target/arm/tcg/mte_helper.c | 1 +
10
2 files changed, 5 deletions(-)
14
target/arm/tcg/sve_helper.c | 1 +
15
target/s390x/tcg/mem_helper.c | 1 +
16
5 files changed, 46 insertions(+), 37 deletions(-)
17
11
18
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
12
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/core/cpu.h
14
--- a/tcg/tci/tcg-target.h
21
+++ b/include/hw/core/cpu.h
15
+++ b/tcg/tci/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
16
@@ -XXX,XX +XXX,XX @@ typedef enum {
23
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
17
} TCGReg;
24
{
18
25
}
19
#define HAVE_TCG_QEMU_TB_EXEC
26
-
20
-#define TCG_TARGET_NEED_POOL_LABELS
27
-static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
21
28
- MemTxAttrs atr, int fl, uintptr_t ra)
22
#endif /* TCG_TARGET_H */
29
-{
23
diff --git a/tcg/tcg.c b/tcg/tcg.c
30
-}
24
index XXXXXXX..XXXXXXX 100644
31
-
25
--- a/tcg/tcg.c
32
-static inline int cpu_watchpoint_address_matches(CPUState *cpu,
26
+++ b/tcg/tcg.c
33
- vaddr addr, vaddr len)
27
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(void)
34
-{
28
tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
35
- return 0;
36
-}
37
#else
38
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
39
int flags, CPUWatchpoint **watchpoint);
40
@@ -XXX,XX +XXX,XX @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
41
vaddr len, int flags);
42
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
43
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
44
-
45
-/**
46
- * cpu_check_watchpoint:
47
- * @cpu: cpu context
48
- * @addr: guest virtual address
49
- * @len: access length
50
- * @attrs: memory access attributes
51
- * @flags: watchpoint access type
52
- * @ra: unwind return address
53
- *
54
- * Check for a watchpoint hit in [addr, addr+len) of the type
55
- * specified by @flags. Exit via exception with a hit.
56
- */
57
-void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
58
- MemTxAttrs attrs, int flags, uintptr_t ra);
59
-
60
-/**
61
- * cpu_watchpoint_address_matches:
62
- * @cpu: cpu context
63
- * @addr: guest virtual address
64
- * @len: access length
65
- *
66
- * Return the watchpoint flags that apply to [addr, addr+len).
67
- * If no watchpoint is registered for the range, the result is 0.
68
- */
69
-int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
70
#endif
29
#endif
71
30
72
/**
31
-#ifdef TCG_TARGET_NEED_POOL_LABELS
73
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
32
s->pool_labels = NULL;
74
index XXXXXXX..XXXXXXX 100644
33
-#endif
75
--- a/include/hw/core/tcg-cpu-ops.h
34
76
+++ b/include/hw/core/tcg-cpu-ops.h
35
qemu_thread_jit_write();
77
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
36
/* Generate the prologue. */
78
37
tcg_target_qemu_prologue(s);
79
};
38
80
39
-#ifdef TCG_TARGET_NEED_POOL_LABELS
81
+#if defined(CONFIG_USER_ONLY)
40
/* Allow the prologue to put e.g. guest_base into a pool entry. */
82
+
41
{
83
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
42
int result = tcg_out_pool_finalize(s);
84
+ MemTxAttrs atr, int fl, uintptr_t ra)
43
tcg_debug_assert(result == 0);
85
+{
44
}
86
+}
45
-#endif
87
+
46
88
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
47
prologue_size = tcg_current_code_size(s);
89
+ vaddr addr, vaddr len)
48
perf_report_prologue(s->code_gen_ptr, prologue_size);
90
+{
91
+ return 0;
92
+}
93
+
94
+#else
95
+
96
+/**
97
+ * cpu_check_watchpoint:
98
+ * @cpu: cpu context
99
+ * @addr: guest virtual address
100
+ * @len: access length
101
+ * @attrs: memory access attributes
102
+ * @flags: watchpoint access type
103
+ * @ra: unwind return address
104
+ *
105
+ * Check for a watchpoint hit in [addr, addr+len) of the type
106
+ * specified by @flags. Exit via exception with a hit.
107
+ */
108
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
109
+ MemTxAttrs attrs, int flags, uintptr_t ra);
110
+
111
+/**
112
+ * cpu_watchpoint_address_matches:
113
+ * @cpu: cpu context
114
+ * @addr: guest virtual address
115
+ * @len: access length
116
+ *
117
+ * Return the watchpoint flags that apply to [addr, addr+len).
118
+ * If no watchpoint is registered for the range, the result is 0.
119
+ */
120
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
121
+
122
+#endif
123
+
124
#endif /* TCG_CPU_OPS_H */
125
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/tcg/mte_helper.c
128
+++ b/target/arm/tcg/mte_helper.c
129
@@ -XXX,XX +XXX,XX @@
130
#include "exec/ram_addr.h"
131
#include "exec/cpu_ldst.h"
132
#include "exec/helper-proto.h"
133
+#include "hw/core/tcg-cpu-ops.h"
134
#include "qapi/error.h"
135
#include "qemu/guest-random.h"
136
137
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/arm/tcg/sve_helper.c
140
+++ b/target/arm/tcg/sve_helper.c
141
@@ -XXX,XX +XXX,XX @@
142
#include "tcg/tcg.h"
143
#include "vec_internal.h"
144
#include "sve_ldst_internal.h"
145
+#include "hw/core/tcg-cpu-ops.h"
146
147
148
/* Return a value for NZCV as per the ARM PredTest pseudofunction.
149
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/s390x/tcg/mem_helper.c
152
+++ b/target/s390x/tcg/mem_helper.c
153
@@ -XXX,XX +XXX,XX @@
154
#include "exec/helper-proto.h"
155
#include "exec/exec-all.h"
156
#include "exec/cpu_ldst.h"
157
+#include "hw/core/tcg-cpu-ops.h"
158
#include "qemu/int128.h"
159
#include "qemu/atomic128.h"
160
#include "trace.h"
161
--
49
--
162
2.34.1
50
2.43.0
163
164
diff view generated by jsdifflib
New patch
1
1
This is now prohibited in configuration.
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/qemu/atomic.h | 18 +++--------------
7
include/tcg/oversized-guest.h | 23 ----------------------
8
accel/tcg/cputlb.c | 7 -------
9
accel/tcg/tcg-all.c | 9 ++++-----
10
target/arm/ptw.c | 34 ---------------------------------
11
target/riscv/cpu_helper.c | 13 +------------
12
docs/devel/multi-thread-tcg.rst | 1 -
13
7 files changed, 8 insertions(+), 97 deletions(-)
14
delete mode 100644 include/tcg/oversized-guest.h
15
16
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/atomic.h
19
+++ b/include/qemu/atomic.h
20
@@ -XXX,XX +XXX,XX @@
21
*/
22
#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
23
24
-/* Sanity check that the size of an atomic operation isn't "overly large".
25
+/*
26
+ * Sanity check that the size of an atomic operation isn't "overly large".
27
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
28
* want to use them because we ought not need them, and this lets us do a
29
* bit of sanity checking that other 32-bit hosts might build.
30
- *
31
- * That said, we have a problem on 64-bit ILP32 hosts in that in order to
32
- * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
33
- * We'd prefer not want to pull in everything else TCG related, so handle
34
- * those few cases by hand.
35
- *
36
- * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
37
- * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
38
- * n64 (LP64) ABIs are both detected using __mips64.
39
*/
40
-#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
41
-# define ATOMIC_REG_SIZE 8
42
-#else
43
-# define ATOMIC_REG_SIZE sizeof(void *)
44
-#endif
45
+#define ATOMIC_REG_SIZE sizeof(void *)
46
47
/* Weak atomic operations prevent the compiler moving other
48
* loads/stores past the atomic operation load/store. However there is
49
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
50
deleted file mode 100644
51
index XXXXXXX..XXXXXXX
52
--- a/include/tcg/oversized-guest.h
53
+++ /dev/null
54
@@ -XXX,XX +XXX,XX @@
55
-/* SPDX-License-Identifier: MIT */
56
-/*
57
- * Define TCG_OVERSIZED_GUEST
58
- * Copyright (c) 2008 Fabrice Bellard
59
- */
60
-
61
-#ifndef EXEC_TCG_OVERSIZED_GUEST_H
62
-#define EXEC_TCG_OVERSIZED_GUEST_H
63
-
64
-#include "tcg-target-reg-bits.h"
65
-#include "cpu-param.h"
66
-
67
-/*
68
- * Oversized TCG guests make things like MTTCG hard
69
- * as we can't use atomics for cputlb updates.
70
- */
71
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
72
-#define TCG_OVERSIZED_GUEST 1
73
-#else
74
-#define TCG_OVERSIZED_GUEST 0
75
-#endif
76
-
77
-#endif
78
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/cputlb.c
81
+++ b/accel/tcg/cputlb.c
82
@@ -XXX,XX +XXX,XX @@
83
#include "qemu/plugin-memory.h"
84
#endif
85
#include "tcg/tcg-ldst.h"
86
-#include "tcg/oversized-guest.h"
87
88
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
89
/* #define DEBUG_TLB */
90
@@ -XXX,XX +XXX,XX @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
91
return qatomic_read(ptr);
92
#else
93
const uint64_t *ptr = &entry->addr_idx[access_type];
94
-# if TCG_OVERSIZED_GUEST
95
- return *ptr;
96
-# else
97
/* ofs might correspond to .addr_write, so use qatomic_read */
98
return qatomic_read(ptr);
99
-# endif
100
#endif
101
}
102
103
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
104
uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
105
ptr_write += HOST_BIG_ENDIAN;
106
qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
107
-#elif TCG_OVERSIZED_GUEST
108
- tlb_entry->addr_write |= TLB_NOTDIRTY;
109
#else
110
qatomic_set(&tlb_entry->addr_write,
111
tlb_entry->addr_write | TLB_NOTDIRTY);
112
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/accel/tcg/tcg-all.c
115
+++ b/accel/tcg/tcg-all.c
116
@@ -XXX,XX +XXX,XX @@
117
#include "exec/replay-core.h"
118
#include "system/cpu-timers.h"
119
#include "tcg/startup.h"
120
-#include "tcg/oversized-guest.h"
121
#include "qapi/error.h"
122
#include "qemu/error-report.h"
123
#include "qemu/accel.h"
124
@@ -XXX,XX +XXX,XX @@
125
#include "hw/boards.h"
126
#endif
127
#include "internal-common.h"
128
+#include "cpu-param.h"
129
+
130
131
struct TCGState {
132
AccelState parent_obj;
133
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
134
135
static bool default_mttcg_enabled(void)
136
{
137
- if (icount_enabled() || TCG_OVERSIZED_GUEST) {
138
+ if (icount_enabled()) {
139
return false;
140
}
141
#ifdef TARGET_SUPPORTS_MTTCG
142
@@ -XXX,XX +XXX,XX @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
143
TCGState *s = TCG_STATE(obj);
144
145
if (strcmp(value, "multi") == 0) {
146
- if (TCG_OVERSIZED_GUEST) {
147
- error_setg(errp, "No MTTCG when guest word size > hosts");
148
- } else if (icount_enabled()) {
149
+ if (icount_enabled()) {
150
error_setg(errp, "No MTTCG when icount is enabled");
151
} else {
152
#ifndef TARGET_SUPPORTS_MTTCG
153
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/target/arm/ptw.c
156
+++ b/target/arm/ptw.c
157
@@ -XXX,XX +XXX,XX @@
158
#include "internals.h"
159
#include "cpu-features.h"
160
#include "idau.h"
161
-#ifdef CONFIG_TCG
162
-# include "tcg/oversized-guest.h"
163
-#endif
164
165
typedef struct S1Translate {
166
/*
167
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
168
ptw->out_rw = true;
169
}
170
171
-#ifdef CONFIG_ATOMIC64
172
if (ptw->out_be) {
173
old_val = cpu_to_be64(old_val);
174
new_val = cpu_to_be64(new_val);
175
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
176
cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
177
cur_val = le64_to_cpu(cur_val);
178
}
179
-#else
180
- /*
181
- * We can't support the full 64-bit atomic cmpxchg on the host.
182
- * Because this is only used for FEAT_HAFDBS, which is only for AA64,
183
- * we know that TCG_OVERSIZED_GUEST is set, which means that we are
184
- * running in round-robin mode and could only race with dma i/o.
185
- */
186
-#if !TCG_OVERSIZED_GUEST
187
-# error "Unexpected configuration"
188
-#endif
189
- bool locked = bql_locked();
190
- if (!locked) {
191
- bql_lock();
192
- }
193
- if (ptw->out_be) {
194
- cur_val = ldq_be_p(host);
195
- if (cur_val == old_val) {
196
- stq_be_p(host, new_val);
197
- }
198
- } else {
199
- cur_val = ldq_le_p(host);
200
- if (cur_val == old_val) {
201
- stq_le_p(host, new_val);
202
- }
203
- }
204
- if (!locked) {
205
- bql_unlock();
206
- }
207
-#endif
208
-
209
return cur_val;
210
#else
211
/* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
212
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/riscv/cpu_helper.c
215
+++ b/target/riscv/cpu_helper.c
216
@@ -XXX,XX +XXX,XX @@
217
#include "system/cpu-timers.h"
218
#include "cpu_bits.h"
219
#include "debug.h"
220
-#include "tcg/oversized-guest.h"
221
#include "pmp.h"
222
223
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
224
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
225
hwaddr pte_addr;
226
int i;
227
228
-#if !TCG_OVERSIZED_GUEST
229
-restart:
230
-#endif
231
+ restart:
232
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
233
target_ulong idx;
234
if (i == 0) {
235
@@ -XXX,XX +XXX,XX @@ restart:
236
false, MEMTXATTRS_UNSPECIFIED);
237
if (memory_region_is_ram(mr)) {
238
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
239
-#if TCG_OVERSIZED_GUEST
240
- /*
241
- * MTTCG is not enabled on oversized TCG guests so
242
- * page table updates do not need to be atomic
243
- */
244
- *pte_pa = pte = updated_pte;
245
-#else
246
target_ulong old_pte;
247
if (riscv_cpu_sxl(env) == MXL_RV32) {
248
old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
249
@@ -XXX,XX +XXX,XX @@ restart:
250
goto restart;
251
}
252
pte = updated_pte;
253
-#endif
254
} else {
255
/*
256
* Misconfigured PTE in ROM (AD bits are not preset) or
257
diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst
258
index XXXXXXX..XXXXXXX 100644
259
--- a/docs/devel/multi-thread-tcg.rst
260
+++ b/docs/devel/multi-thread-tcg.rst
261
@@ -XXX,XX +XXX,XX @@ if:
262
263
* forced by --accel tcg,thread=single
264
* enabling --icount mode
265
-* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST)
266
267
In the general case of running translated code there should be no
268
inter-vCPU dependencies and all vCPUs should be able to run at full
269
--
270
2.43.0
271
272
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
2
3
qemu-user can hang in a multi-threaded fork. One common
4
reason is that when creating a TB, between fork and exec
5
we manipulate a GTree whose memory allocator (GSlice) is
6
not fork-safe.
7
8
Although POSIX does not mandate it, the system's allocator
9
(e.g. tcmalloc, libc malloc) is probably fork-safe.
10
11
Fix some of these hangs by using QTree, which uses the system's
12
allocator regardless of the Glib version that we used at
13
configuration time.
14
15
Tested with the test program in the original bug report, i.e.:
16
```
17
18
void garble() {
19
int pid = fork();
20
if (pid == 0) {
21
exit(0);
22
} else {
23
int wstatus;
24
waitpid(pid, &wstatus, 0);
25
}
26
}
27
28
void supragarble(unsigned depth) {
29
if (depth == 0)
30
return ;
31
32
std::thread a(supragarble, depth-1);
33
std::thread b(supragarble, depth-1);
34
garble();
35
a.join();
36
b.join();
37
}
38
39
int main() {
40
supragarble(10);
41
}
42
```
43
44
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/285
45
Reported-by: Valentin David <me@valentindavid.com>
46
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
47
Signed-off-by: Emilio Cota <cota@braap.org>
48
Message-Id: <20230205163758.416992-3-cota@braap.org>
49
[rth: Add QEMU_DISABLE_CFI for all callback using functions.]
50
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
51
---
3
---
52
accel/tcg/tb-maint.c | 17 +++++++++--------
4
tcg/tcg-op-ldst.c | 21 +++------------------
53
tcg/region.c | 19 ++++++++++---------
5
tcg/tcg.c | 4 +---
54
util/qtree.c | 8 ++++----
6
2 files changed, 4 insertions(+), 21 deletions(-)
55
3 files changed, 23 insertions(+), 21 deletions(-)
56
7
57
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
8
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
58
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
59
--- a/accel/tcg/tb-maint.c
10
--- a/tcg/tcg-op-ldst.c
60
+++ b/accel/tcg/tb-maint.c
11
+++ b/tcg/tcg-op-ldst.c
61
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
62
13
static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
63
#include "qemu/osdep.h"
14
TCGTemp *addr, MemOpIdx oi)
64
#include "qemu/interval-tree.h"
65
+#include "qemu/qtree.h"
66
#include "exec/cputlb.h"
67
#include "exec/log.h"
68
#include "exec/exec-all.h"
69
@@ -XXX,XX +XXX,XX @@ struct page_entry {
70
* See also: page_collection_lock().
71
*/
72
struct page_collection {
73
- GTree *tree;
74
+ QTree *tree;
75
struct page_entry *max;
76
};
77
78
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
79
struct page_entry *pe;
80
PageDesc *pd;
81
82
- pe = g_tree_lookup(set->tree, &index);
83
+ pe = q_tree_lookup(set->tree, &index);
84
if (pe) {
85
return false;
86
}
87
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
88
}
89
90
pe = page_entry_new(pd, index);
91
- g_tree_insert(set->tree, &pe->index, pe);
92
+ q_tree_insert(set->tree, &pe->index, pe);
93
94
/*
95
* If this is either (1) the first insertion or (2) a page whose index
96
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
97
end >>= TARGET_PAGE_BITS;
98
g_assert(start <= end);
99
100
- set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
101
+ set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
102
page_entry_destroy);
103
set->max = NULL;
104
assert_no_pages_locked();
105
106
retry:
107
- g_tree_foreach(set->tree, page_entry_lock, NULL);
108
+ q_tree_foreach(set->tree, page_entry_lock, NULL);
109
110
for (index = start; index <= end; index++) {
111
TranslationBlock *tb;
112
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
113
continue;
114
}
115
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
116
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
117
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
118
goto retry;
119
}
120
assert_page_locked(pd);
121
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
122
(tb_page_addr1(tb) != -1 &&
123
page_trylock_add(set, tb_page_addr1(tb)))) {
124
/* drop all locks, and reacquire in order */
125
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
126
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
127
goto retry;
128
}
129
}
130
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
131
static void page_collection_unlock(struct page_collection *set)
132
{
15
{
133
/* entries are unlocked and freed via page_entry_destroy */
16
- if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
134
- g_tree_destroy(set->tree);
17
- if (vh) {
135
+ q_tree_destroy(set->tree);
18
- tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
136
g_free(set);
19
- temp_arg(addr), oi);
137
}
20
- } else {
138
21
- tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
139
diff --git a/tcg/region.c b/tcg/region.c
22
- }
140
index XXXXXXX..XXXXXXX 100644
23
+ if (vh) {
141
--- a/tcg/region.c
24
+ tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
142
+++ b/tcg/region.c
25
} else {
143
@@ -XXX,XX +XXX,XX @@
26
- /* See TCGV_LOW/HIGH. */
144
#include "qemu/mprotect.h"
27
- TCGTemp *al = addr + HOST_BIG_ENDIAN;
145
#include "qemu/memalign.h"
28
- TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
146
#include "qemu/cacheinfo.h"
29
-
147
+#include "qemu/qtree.h"
30
- if (vh) {
148
#include "qapi/error.h"
31
- tcg_gen_op5(opc, type, temp_arg(vl), temp_arg(vh),
149
#include "exec/exec-all.h"
32
- temp_arg(al), temp_arg(ah), oi);
150
#include "tcg/tcg.h"
33
- } else {
151
@@ -XXX,XX +XXX,XX @@
34
- tcg_gen_op4(opc, type, temp_arg(vl),
152
35
- temp_arg(al), temp_arg(ah), oi);
153
struct tcg_region_tree {
36
- }
154
QemuMutex lock;
37
+ tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
155
- GTree *tree;
156
+ QTree *tree;
157
/* padding to avoid false sharing is computed at run-time */
158
};
159
160
@@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void)
161
struct tcg_region_tree *rt = region_trees + i * tree_size;
162
163
qemu_mutex_init(&rt->lock);
164
- rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
165
+ rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
166
}
38
}
167
}
39
}
168
40
169
@@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb)
41
diff --git a/tcg/tcg.c b/tcg/tcg.c
170
42
index XXXXXXX..XXXXXXX 100644
171
g_assert(rt != NULL);
43
--- a/tcg/tcg.c
172
qemu_mutex_lock(&rt->lock);
44
+++ b/tcg/tcg.c
173
- g_tree_insert(rt->tree, &tb->tc, tb);
45
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
174
+ q_tree_insert(rt->tree, &tb->tc, tb);
46
s->emit_before_op = NULL;
175
qemu_mutex_unlock(&rt->lock);
47
QSIMPLEQ_INIT(&s->labels);
48
49
- tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
50
- s->addr_type == TCG_TYPE_I64);
51
-
52
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
53
tcg_debug_assert(s->insn_start_words > 0);
176
}
54
}
177
55
178
@@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb)
179
180
g_assert(rt != NULL);
181
qemu_mutex_lock(&rt->lock);
182
- g_tree_remove(rt->tree, &tb->tc);
183
+ q_tree_remove(rt->tree, &tb->tc);
184
qemu_mutex_unlock(&rt->lock);
185
}
186
187
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
188
}
189
190
qemu_mutex_lock(&rt->lock);
191
- tb = g_tree_lookup(rt->tree, &s);
192
+ tb = q_tree_lookup(rt->tree, &s);
193
qemu_mutex_unlock(&rt->lock);
194
return tb;
195
}
196
@@ -XXX,XX +XXX,XX @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
197
for (i = 0; i < region.n; i++) {
198
struct tcg_region_tree *rt = region_trees + i * tree_size;
199
200
- g_tree_foreach(rt->tree, func, user_data);
201
+ q_tree_foreach(rt->tree, func, user_data);
202
}
203
tcg_region_tree_unlock_all();
204
}
205
@@ -XXX,XX +XXX,XX @@ size_t tcg_nb_tbs(void)
206
for (i = 0; i < region.n; i++) {
207
struct tcg_region_tree *rt = region_trees + i * tree_size;
208
209
- nb_tbs += g_tree_nnodes(rt->tree);
210
+ nb_tbs += q_tree_nnodes(rt->tree);
211
}
212
tcg_region_tree_unlock_all();
213
return nb_tbs;
214
@@ -XXX,XX +XXX,XX @@ static void tcg_region_tree_reset_all(void)
215
struct tcg_region_tree *rt = region_trees + i * tree_size;
216
217
/* Increment the refcount first so that destroy acts as a reset */
218
- g_tree_ref(rt->tree);
219
- g_tree_destroy(rt->tree);
220
+ q_tree_ref(rt->tree);
221
+ q_tree_destroy(rt->tree);
222
}
223
tcg_region_tree_unlock_all();
224
}
225
diff --git a/util/qtree.c b/util/qtree.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/util/qtree.c
228
+++ b/util/qtree.c
229
@@ -XXX,XX +XXX,XX @@ q_tree_node_next(QTreeNode *node)
230
*
231
* Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
232
*/
233
-static void
234
+static void QEMU_DISABLE_CFI
235
q_tree_remove_all(QTree *tree)
236
{
237
QTreeNode *node;
238
@@ -XXX,XX +XXX,XX @@ q_tree_replace(QTree *tree,
239
}
240
241
/* internal insert routine */
242
-static QTreeNode *
243
+static QTreeNode * QEMU_DISABLE_CFI
244
q_tree_insert_internal(QTree *tree,
245
gpointer key,
246
gpointer value,
247
@@ -XXX,XX +XXX,XX @@ q_tree_steal(QTree *tree,
248
}
249
250
/* internal remove routine */
251
-static gboolean
252
+static gboolean QEMU_DISABLE_CFI
253
q_tree_remove_internal(QTree *tree,
254
gconstpointer key,
255
gboolean steal)
256
@@ -XXX,XX +XXX,XX @@ q_tree_node_balance(QTreeNode *node)
257
return node;
258
}
259
260
-static QTreeNode *
261
+static QTreeNode * QEMU_DISABLE_CFI
262
q_tree_find_node(QTree *tree,
263
gconstpointer key)
264
{
265
--
56
--
266
2.34.1
57
2.43.0
267
58
268
59
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Since 64-on-32 is now unsupported, guest addresses always
2
fit in one host register. Drop the replication of opcodes.
2
3
3
CPU watchpoints can be use by non-TCG accelerators.
4
5
KVM uses them:
6
7
$ git grep CPUWatchpoint|fgrep kvm
8
target/arm/kvm64.c:1558: CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
9
target/i386/kvm/kvm.c:5216:static CPUWatchpoint hw_watchpoint;
10
target/ppc/kvm.c:443:static CPUWatchpoint hw_watchpoint;
11
target/s390x/kvm/kvm.c:139:static CPUWatchpoint hw_watchpoint;
12
13
See for example commit e4482ab7e3 ("target-arm: kvm - add support
14
for HW assisted debug"):
15
16
This adds basic support for HW assisted debug. The ioctl interface
17
to KVM allows us to pass an implementation defined number of break
18
and watch point registers. [...]
19
20
This partially reverts commit 2609ec2868e6c286e755a73b4504714a0296a.
21
22
Fixes: 2609ec2868 ("softmmu: Extract watchpoint API from physmem.c")
23
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
24
Message-Id: <20230328173117.15226-4-philmd@linaro.org>
25
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
26
---
5
---
27
include/hw/core/cpu.h | 2 +-
6
include/tcg/tcg-opc.h | 28 ++------
28
softmmu/watchpoint.c | 4 ++++
7
tcg/optimize.c | 21 ++----
29
softmmu/meson.build | 2 +-
8
tcg/tcg-op-ldst.c | 82 +++++----------------
30
3 files changed, 6 insertions(+), 2 deletions(-)
9
tcg/tcg.c | 42 ++++-------
10
tcg/tci.c | 119 ++++++-------------------------
11
tcg/aarch64/tcg-target.c.inc | 36 ++++------
12
tcg/arm/tcg-target.c.inc | 40 +++--------
13
tcg/i386/tcg-target.c.inc | 69 ++++--------------
14
tcg/loongarch64/tcg-target.c.inc | 36 ++++------
15
tcg/mips/tcg-target.c.inc | 51 +++----------
16
tcg/ppc/tcg-target.c.inc | 68 ++++--------------
17
tcg/riscv/tcg-target.c.inc | 24 +++----
18
tcg/s390x/tcg-target.c.inc | 36 ++++------
19
tcg/sparc64/tcg-target.c.inc | 24 +++----
20
tcg/tci/tcg-target.c.inc | 60 ++++------------
21
15 files changed, 177 insertions(+), 559 deletions(-)
31
22
32
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
23
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
33
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
34
--- a/include/hw/core/cpu.h
25
--- a/include/tcg/tcg-opc.h
35
+++ b/include/hw/core/cpu.h
26
+++ b/include/tcg/tcg-opc.h
36
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
27
@@ -XXX,XX +XXX,XX @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
37
return false;
28
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
29
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
30
31
-/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
32
-DEF(qemu_ld_a32_i32, 1, 1, 1,
33
+DEF(qemu_ld_i32, 1, 1, 1,
34
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
35
-DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
36
+DEF(qemu_st_i32, 0, 1 + 1, 1,
37
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
38
-DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
39
+DEF(qemu_ld_i64, DATA64_ARGS, 1, 1,
40
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
41
-DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
42
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
43
-
44
-DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
45
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
46
-DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
47
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
48
-DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
49
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
50
-DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
51
+DEF(qemu_st_i64, 0, DATA64_ARGS + 1, 1,
52
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
53
54
/* Only used by i386 to cope with stupid register constraints. */
55
-DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
56
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
57
-DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
58
+DEF(qemu_st8_i32, 0, 1 + 1, 1,
59
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
60
61
/* Only for 64-bit hosts at the moment. */
62
-DEF(qemu_ld_a32_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
63
-DEF(qemu_ld_a64_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
64
-DEF(qemu_st_a32_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
65
-DEF(qemu_st_a64_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
66
+DEF(qemu_ld_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
67
+DEF(qemu_st_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
68
69
/* Host vector support. */
70
71
diff --git a/tcg/optimize.c b/tcg/optimize.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/tcg/optimize.c
74
+++ b/tcg/optimize.c
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64_VEC(orc):
77
done = fold_orc(&ctx, op);
78
break;
79
- case INDEX_op_qemu_ld_a32_i32:
80
- case INDEX_op_qemu_ld_a64_i32:
81
+ case INDEX_op_qemu_ld_i32:
82
done = fold_qemu_ld_1reg(&ctx, op);
83
break;
84
- case INDEX_op_qemu_ld_a32_i64:
85
- case INDEX_op_qemu_ld_a64_i64:
86
+ case INDEX_op_qemu_ld_i64:
87
if (TCG_TARGET_REG_BITS == 64) {
88
done = fold_qemu_ld_1reg(&ctx, op);
89
break;
90
}
91
QEMU_FALLTHROUGH;
92
- case INDEX_op_qemu_ld_a32_i128:
93
- case INDEX_op_qemu_ld_a64_i128:
94
+ case INDEX_op_qemu_ld_i128:
95
done = fold_qemu_ld_2reg(&ctx, op);
96
break;
97
- case INDEX_op_qemu_st8_a32_i32:
98
- case INDEX_op_qemu_st8_a64_i32:
99
- case INDEX_op_qemu_st_a32_i32:
100
- case INDEX_op_qemu_st_a64_i32:
101
- case INDEX_op_qemu_st_a32_i64:
102
- case INDEX_op_qemu_st_a64_i64:
103
- case INDEX_op_qemu_st_a32_i128:
104
- case INDEX_op_qemu_st_a64_i128:
105
+ case INDEX_op_qemu_st8_i32:
106
+ case INDEX_op_qemu_st_i32:
107
+ case INDEX_op_qemu_st_i64:
108
+ case INDEX_op_qemu_st_i128:
109
done = fold_qemu_st(&ctx, op);
110
break;
111
CASE_OP_32_64(rem):
112
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/tcg-op-ldst.c
115
+++ b/tcg/tcg-op-ldst.c
116
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
117
MemOp orig_memop;
118
MemOpIdx orig_oi, oi;
119
TCGv_i64 copy_addr;
120
- TCGOpcode opc;
121
122
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
123
orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
124
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
125
}
126
127
copy_addr = plugin_maybe_preserve_addr(addr);
128
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
129
- opc = INDEX_op_qemu_ld_a32_i32;
130
- } else {
131
- opc = INDEX_op_qemu_ld_a64_i32;
132
- }
133
- gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
134
+ gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
135
+ tcgv_i32_temp(val), NULL, addr, oi);
136
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
137
QEMU_PLUGIN_MEM_R);
138
139
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
140
}
141
142
if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
143
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
144
- opc = INDEX_op_qemu_st8_a32_i32;
145
- } else {
146
- opc = INDEX_op_qemu_st8_a64_i32;
147
- }
148
+ opc = INDEX_op_qemu_st8_i32;
149
} else {
150
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
151
- opc = INDEX_op_qemu_st_a32_i32;
152
- } else {
153
- opc = INDEX_op_qemu_st_a64_i32;
154
- }
155
+ opc = INDEX_op_qemu_st_i32;
156
}
157
gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
158
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
159
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
160
MemOp orig_memop;
161
MemOpIdx orig_oi, oi;
162
TCGv_i64 copy_addr;
163
- TCGOpcode opc;
164
165
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
166
tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
167
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
168
}
169
170
copy_addr = plugin_maybe_preserve_addr(addr);
171
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
172
- opc = INDEX_op_qemu_ld_a32_i64;
173
- } else {
174
- opc = INDEX_op_qemu_ld_a64_i64;
175
- }
176
- gen_ldst_i64(opc, val, addr, oi);
177
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
178
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
179
QEMU_PLUGIN_MEM_R);
180
181
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
182
{
183
TCGv_i64 swap = NULL;
184
MemOpIdx orig_oi, oi;
185
- TCGOpcode opc;
186
187
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
188
tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
189
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
190
oi = make_memop_idx(memop, idx);
191
}
192
193
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
194
- opc = INDEX_op_qemu_st_a32_i64;
195
- } else {
196
- opc = INDEX_op_qemu_st_a64_i64;
197
- }
198
- gen_ldst_i64(opc, val, addr, oi);
199
+ gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
200
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
201
202
if (swap) {
203
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
204
{
205
MemOpIdx orig_oi;
206
TCGv_i64 ext_addr = NULL;
207
- TCGOpcode opc;
208
209
check_max_alignment(memop_alignment_bits(memop));
210
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
211
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
212
hi = TCGV128_HIGH(val);
213
}
214
215
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
216
- opc = INDEX_op_qemu_ld_a32_i128;
217
- } else {
218
- opc = INDEX_op_qemu_ld_a64_i128;
219
- }
220
- gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
221
+ gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
222
tcgv_i64_temp(hi), addr, oi);
223
224
if (need_bswap) {
225
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
226
canonicalize_memop_i128_as_i64(mop, memop);
227
need_bswap = (mop[0] ^ memop) & MO_BSWAP;
228
229
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
230
- opc = INDEX_op_qemu_ld_a32_i64;
231
- } else {
232
- opc = INDEX_op_qemu_ld_a64_i64;
233
- }
234
-
235
/*
236
* Since there are no global TCGv_i128, there is no visible state
237
* changed if the second load faults. Load directly into the two
238
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
239
y = TCGV128_LOW(val);
240
}
241
242
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
243
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
244
+ make_memop_idx(mop[0], idx));
245
246
if (need_bswap) {
247
tcg_gen_bswap64_i64(x, x);
248
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
249
addr_p8 = tcgv_i64_temp(t);
250
}
251
252
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
253
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
254
+ make_memop_idx(mop[1], idx));
255
tcg_temp_free_internal(addr_p8);
256
257
if (need_bswap) {
258
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
259
{
260
MemOpIdx orig_oi;
261
TCGv_i64 ext_addr = NULL;
262
- TCGOpcode opc;
263
264
check_max_alignment(memop_alignment_bits(memop));
265
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
266
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
267
hi = TCGV128_HIGH(val);
268
}
269
270
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
271
- opc = INDEX_op_qemu_st_a32_i128;
272
- } else {
273
- opc = INDEX_op_qemu_st_a64_i128;
274
- }
275
- gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
276
- tcgv_i64_temp(hi), addr, oi);
277
+ gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
278
+ tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
279
280
if (need_bswap) {
281
tcg_temp_free_i64(lo);
282
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
283
284
canonicalize_memop_i128_as_i64(mop, memop);
285
286
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
287
- opc = INDEX_op_qemu_st_a32_i64;
288
- } else {
289
- opc = INDEX_op_qemu_st_a64_i64;
290
- }
291
-
292
if ((memop & MO_BSWAP) == MO_LE) {
293
x = TCGV128_LOW(val);
294
y = TCGV128_HIGH(val);
295
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
296
x = b;
297
}
298
299
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
300
+ gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
301
+ make_memop_idx(mop[0], idx));
302
303
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
304
TCGv_i32 t = tcg_temp_ebb_new_i32();
305
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
306
307
if (b) {
308
tcg_gen_bswap64_i64(b, y);
309
- gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
310
+ gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
311
+ make_memop_idx(mop[1], idx));
312
tcg_temp_free_i64(b);
313
} else {
314
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
315
+ gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
316
+ make_memop_idx(mop[1], idx));
317
}
318
tcg_temp_free_internal(addr_p8);
319
} else {
320
diff --git a/tcg/tcg.c b/tcg/tcg.c
321
index XXXXXXX..XXXXXXX 100644
322
--- a/tcg/tcg.c
323
+++ b/tcg/tcg.c
324
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
325
case INDEX_op_exit_tb:
326
case INDEX_op_goto_tb:
327
case INDEX_op_goto_ptr:
328
- case INDEX_op_qemu_ld_a32_i32:
329
- case INDEX_op_qemu_ld_a64_i32:
330
- case INDEX_op_qemu_st_a32_i32:
331
- case INDEX_op_qemu_st_a64_i32:
332
- case INDEX_op_qemu_ld_a32_i64:
333
- case INDEX_op_qemu_ld_a64_i64:
334
- case INDEX_op_qemu_st_a32_i64:
335
- case INDEX_op_qemu_st_a64_i64:
336
+ case INDEX_op_qemu_ld_i32:
337
+ case INDEX_op_qemu_st_i32:
338
+ case INDEX_op_qemu_ld_i64:
339
+ case INDEX_op_qemu_st_i64:
340
return true;
341
342
- case INDEX_op_qemu_st8_a32_i32:
343
- case INDEX_op_qemu_st8_a64_i32:
344
+ case INDEX_op_qemu_st8_i32:
345
return TCG_TARGET_HAS_qemu_st8_i32;
346
347
- case INDEX_op_qemu_ld_a32_i128:
348
- case INDEX_op_qemu_ld_a64_i128:
349
- case INDEX_op_qemu_st_a32_i128:
350
- case INDEX_op_qemu_st_a64_i128:
351
+ case INDEX_op_qemu_ld_i128:
352
+ case INDEX_op_qemu_st_i128:
353
return TCG_TARGET_HAS_qemu_ldst_i128;
354
355
case INDEX_op_mov_i32:
356
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
357
}
358
i = 1;
359
break;
360
- case INDEX_op_qemu_ld_a32_i32:
361
- case INDEX_op_qemu_ld_a64_i32:
362
- case INDEX_op_qemu_st_a32_i32:
363
- case INDEX_op_qemu_st_a64_i32:
364
- case INDEX_op_qemu_st8_a32_i32:
365
- case INDEX_op_qemu_st8_a64_i32:
366
- case INDEX_op_qemu_ld_a32_i64:
367
- case INDEX_op_qemu_ld_a64_i64:
368
- case INDEX_op_qemu_st_a32_i64:
369
- case INDEX_op_qemu_st_a64_i64:
370
- case INDEX_op_qemu_ld_a32_i128:
371
- case INDEX_op_qemu_ld_a64_i128:
372
- case INDEX_op_qemu_st_a32_i128:
373
- case INDEX_op_qemu_st_a64_i128:
374
+ case INDEX_op_qemu_ld_i32:
375
+ case INDEX_op_qemu_st_i32:
376
+ case INDEX_op_qemu_st8_i32:
377
+ case INDEX_op_qemu_ld_i64:
378
+ case INDEX_op_qemu_st_i64:
379
+ case INDEX_op_qemu_ld_i128:
380
+ case INDEX_op_qemu_st_i128:
381
{
382
const char *s_al, *s_op, *s_at;
383
MemOpIdx oi = op->args[k++];
384
diff --git a/tcg/tci.c b/tcg/tci.c
385
index XXXXXXX..XXXXXXX 100644
386
--- a/tcg/tci.c
387
+++ b/tcg/tci.c
388
@@ -XXX,XX +XXX,XX @@ static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
389
*i4 = extract32(insn, 26, 6);
38
}
390
}
39
391
40
-#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY)
392
-static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
41
+#if defined(CONFIG_USER_ONLY)
393
- TCGReg *r2, TCGReg *r3, TCGReg *r4)
42
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
394
-{
43
int flags, CPUWatchpoint **watchpoint)
395
- *r0 = extract32(insn, 8, 4);
396
- *r1 = extract32(insn, 12, 4);
397
- *r2 = extract32(insn, 16, 4);
398
- *r3 = extract32(insn, 20, 4);
399
- *r4 = extract32(insn, 24, 4);
400
-}
401
-
402
static void tci_args_rrrr(uint32_t insn,
403
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
44
{
404
{
45
diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c
405
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
46
index XXXXXXX..XXXXXXX 100644
406
tb_ptr = ptr;
47
--- a/softmmu/watchpoint.c
407
break;
48
+++ b/softmmu/watchpoint.c
408
49
@@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
409
- case INDEX_op_qemu_ld_a32_i32:
50
}
410
+ case INDEX_op_qemu_ld_i32:
411
tci_args_rrm(insn, &r0, &r1, &oi);
412
- taddr = (uint32_t)regs[r1];
413
- goto do_ld_i32;
414
- case INDEX_op_qemu_ld_a64_i32:
415
- if (TCG_TARGET_REG_BITS == 64) {
416
- tci_args_rrm(insn, &r0, &r1, &oi);
417
- taddr = regs[r1];
418
- } else {
419
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
420
- taddr = tci_uint64(regs[r2], regs[r1]);
421
- oi = regs[r3];
422
- }
423
- do_ld_i32:
424
+ taddr = regs[r1];
425
regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
426
break;
427
428
- case INDEX_op_qemu_ld_a32_i64:
429
- if (TCG_TARGET_REG_BITS == 64) {
430
- tci_args_rrm(insn, &r0, &r1, &oi);
431
- taddr = (uint32_t)regs[r1];
432
- } else {
433
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
434
- taddr = (uint32_t)regs[r2];
435
- oi = regs[r3];
436
- }
437
- goto do_ld_i64;
438
- case INDEX_op_qemu_ld_a64_i64:
439
+ case INDEX_op_qemu_ld_i64:
440
if (TCG_TARGET_REG_BITS == 64) {
441
tci_args_rrm(insn, &r0, &r1, &oi);
442
taddr = regs[r1];
443
} else {
444
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
445
- taddr = tci_uint64(regs[r3], regs[r2]);
446
- oi = regs[r4];
447
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
448
+ taddr = regs[r2];
449
+ oi = regs[r3];
450
}
451
- do_ld_i64:
452
tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
453
if (TCG_TARGET_REG_BITS == 32) {
454
tci_write_reg64(regs, r1, r0, tmp64);
455
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
456
}
457
break;
458
459
- case INDEX_op_qemu_st_a32_i32:
460
+ case INDEX_op_qemu_st_i32:
461
tci_args_rrm(insn, &r0, &r1, &oi);
462
- taddr = (uint32_t)regs[r1];
463
- goto do_st_i32;
464
- case INDEX_op_qemu_st_a64_i32:
465
- if (TCG_TARGET_REG_BITS == 64) {
466
- tci_args_rrm(insn, &r0, &r1, &oi);
467
- taddr = regs[r1];
468
- } else {
469
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
470
- taddr = tci_uint64(regs[r2], regs[r1]);
471
- oi = regs[r3];
472
- }
473
- do_st_i32:
474
+ taddr = regs[r1];
475
tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
476
break;
477
478
- case INDEX_op_qemu_st_a32_i64:
479
- if (TCG_TARGET_REG_BITS == 64) {
480
- tci_args_rrm(insn, &r0, &r1, &oi);
481
- tmp64 = regs[r0];
482
- taddr = (uint32_t)regs[r1];
483
- } else {
484
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
485
- tmp64 = tci_uint64(regs[r1], regs[r0]);
486
- taddr = (uint32_t)regs[r2];
487
- oi = regs[r3];
488
- }
489
- goto do_st_i64;
490
- case INDEX_op_qemu_st_a64_i64:
491
+ case INDEX_op_qemu_st_i64:
492
if (TCG_TARGET_REG_BITS == 64) {
493
tci_args_rrm(insn, &r0, &r1, &oi);
494
tmp64 = regs[r0];
495
taddr = regs[r1];
496
} else {
497
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
498
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
499
tmp64 = tci_uint64(regs[r1], regs[r0]);
500
- taddr = tci_uint64(regs[r3], regs[r2]);
501
- oi = regs[r4];
502
+ taddr = regs[r2];
503
+ oi = regs[r3];
504
}
505
- do_st_i64:
506
tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
507
break;
508
509
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
510
str_r(r3), str_r(r4), str_r(r5));
511
break;
512
513
- case INDEX_op_qemu_ld_a32_i32:
514
- case INDEX_op_qemu_st_a32_i32:
515
- len = 1 + 1;
516
- goto do_qemu_ldst;
517
- case INDEX_op_qemu_ld_a32_i64:
518
- case INDEX_op_qemu_st_a32_i64:
519
- case INDEX_op_qemu_ld_a64_i32:
520
- case INDEX_op_qemu_st_a64_i32:
521
- len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
522
- goto do_qemu_ldst;
523
- case INDEX_op_qemu_ld_a64_i64:
524
- case INDEX_op_qemu_st_a64_i64:
525
- len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
526
- goto do_qemu_ldst;
527
- do_qemu_ldst:
528
- switch (len) {
529
- case 2:
530
- tci_args_rrm(insn, &r0, &r1, &oi);
531
- info->fprintf_func(info->stream, "%-12s %s, %s, %x",
532
- op_name, str_r(r0), str_r(r1), oi);
533
- break;
534
- case 3:
535
+ case INDEX_op_qemu_ld_i64:
536
+ case INDEX_op_qemu_st_i64:
537
+ if (TCG_TARGET_REG_BITS == 32) {
538
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
539
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
540
op_name, str_r(r0), str_r(r1),
541
str_r(r2), str_r(r3));
542
break;
543
- case 4:
544
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
545
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
546
- op_name, str_r(r0), str_r(r1),
547
- str_r(r2), str_r(r3), str_r(r4));
548
- break;
549
- default:
550
- g_assert_not_reached();
551
}
552
+ /* fall through */
553
+ case INDEX_op_qemu_ld_i32:
554
+ case INDEX_op_qemu_st_i32:
555
+ tci_args_rrm(insn, &r0, &r1, &oi);
556
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
557
+ op_name, str_r(r0), str_r(r1), oi);
558
break;
559
560
case 0:
561
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
562
index XXXXXXX..XXXXXXX 100644
563
--- a/tcg/aarch64/tcg-target.c.inc
564
+++ b/tcg/aarch64/tcg-target.c.inc
565
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
566
tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
567
break;
568
569
- case INDEX_op_qemu_ld_a32_i32:
570
- case INDEX_op_qemu_ld_a64_i32:
571
- case INDEX_op_qemu_ld_a32_i64:
572
- case INDEX_op_qemu_ld_a64_i64:
573
+ case INDEX_op_qemu_ld_i32:
574
+ case INDEX_op_qemu_ld_i64:
575
tcg_out_qemu_ld(s, a0, a1, a2, ext);
576
break;
577
- case INDEX_op_qemu_st_a32_i32:
578
- case INDEX_op_qemu_st_a64_i32:
579
- case INDEX_op_qemu_st_a32_i64:
580
- case INDEX_op_qemu_st_a64_i64:
581
+ case INDEX_op_qemu_st_i32:
582
+ case INDEX_op_qemu_st_i64:
583
tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
584
break;
585
- case INDEX_op_qemu_ld_a32_i128:
586
- case INDEX_op_qemu_ld_a64_i128:
587
+ case INDEX_op_qemu_ld_i128:
588
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
589
break;
590
- case INDEX_op_qemu_st_a32_i128:
591
- case INDEX_op_qemu_st_a64_i128:
592
+ case INDEX_op_qemu_st_i128:
593
tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
594
break;
595
596
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
597
case INDEX_op_movcond_i64:
598
return C_O1_I4(r, r, rC, rZ, rZ);
599
600
- case INDEX_op_qemu_ld_a32_i32:
601
- case INDEX_op_qemu_ld_a64_i32:
602
- case INDEX_op_qemu_ld_a32_i64:
603
- case INDEX_op_qemu_ld_a64_i64:
604
+ case INDEX_op_qemu_ld_i32:
605
+ case INDEX_op_qemu_ld_i64:
606
return C_O1_I1(r, r);
607
- case INDEX_op_qemu_ld_a32_i128:
608
- case INDEX_op_qemu_ld_a64_i128:
609
+ case INDEX_op_qemu_ld_i128:
610
return C_O2_I1(r, r, r);
611
- case INDEX_op_qemu_st_a32_i32:
612
- case INDEX_op_qemu_st_a64_i32:
613
- case INDEX_op_qemu_st_a32_i64:
614
- case INDEX_op_qemu_st_a64_i64:
615
+ case INDEX_op_qemu_st_i32:
616
+ case INDEX_op_qemu_st_i64:
617
return C_O0_I2(rZ, r);
618
- case INDEX_op_qemu_st_a32_i128:
619
- case INDEX_op_qemu_st_a64_i128:
620
+ case INDEX_op_qemu_st_i128:
621
return C_O0_I3(rZ, rZ, r);
622
623
case INDEX_op_deposit_i32:
624
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
625
index XXXXXXX..XXXXXXX 100644
626
--- a/tcg/arm/tcg-target.c.inc
627
+++ b/tcg/arm/tcg-target.c.inc
628
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
629
ARITH_MOV, args[0], 0, 0);
630
break;
631
632
- case INDEX_op_qemu_ld_a32_i32:
633
+ case INDEX_op_qemu_ld_i32:
634
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
635
break;
636
- case INDEX_op_qemu_ld_a64_i32:
637
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
638
- args[3], TCG_TYPE_I32);
639
- break;
640
- case INDEX_op_qemu_ld_a32_i64:
641
+ case INDEX_op_qemu_ld_i64:
642
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
643
args[3], TCG_TYPE_I64);
644
break;
645
- case INDEX_op_qemu_ld_a64_i64:
646
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
647
- args[4], TCG_TYPE_I64);
648
- break;
649
650
- case INDEX_op_qemu_st_a32_i32:
651
+ case INDEX_op_qemu_st_i32:
652
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
653
break;
654
- case INDEX_op_qemu_st_a64_i32:
655
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
656
- args[3], TCG_TYPE_I32);
657
- break;
658
- case INDEX_op_qemu_st_a32_i64:
659
+ case INDEX_op_qemu_st_i64:
660
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
661
args[3], TCG_TYPE_I64);
662
break;
663
- case INDEX_op_qemu_st_a64_i64:
664
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
665
- args[4], TCG_TYPE_I64);
666
- break;
667
668
case INDEX_op_bswap16_i32:
669
tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
670
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
671
case INDEX_op_setcond2_i32:
672
return C_O1_I4(r, r, r, rI, rI);
673
674
- case INDEX_op_qemu_ld_a32_i32:
675
+ case INDEX_op_qemu_ld_i32:
676
return C_O1_I1(r, q);
677
- case INDEX_op_qemu_ld_a64_i32:
678
- return C_O1_I2(r, q, q);
679
- case INDEX_op_qemu_ld_a32_i64:
680
+ case INDEX_op_qemu_ld_i64:
681
return C_O2_I1(e, p, q);
682
- case INDEX_op_qemu_ld_a64_i64:
683
- return C_O2_I2(e, p, q, q);
684
- case INDEX_op_qemu_st_a32_i32:
685
+ case INDEX_op_qemu_st_i32:
686
return C_O0_I2(q, q);
687
- case INDEX_op_qemu_st_a64_i32:
688
- return C_O0_I3(q, q, q);
689
- case INDEX_op_qemu_st_a32_i64:
690
+ case INDEX_op_qemu_st_i64:
691
return C_O0_I3(Q, p, q);
692
- case INDEX_op_qemu_st_a64_i64:
693
- return C_O0_I4(Q, p, q, q);
694
695
case INDEX_op_st_vec:
696
return C_O0_I2(w, r);
697
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
698
index XXXXXXX..XXXXXXX 100644
699
--- a/tcg/i386/tcg-target.c.inc
700
+++ b/tcg/i386/tcg-target.c.inc
701
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
702
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
703
break;
704
705
- case INDEX_op_qemu_ld_a64_i32:
706
- if (TCG_TARGET_REG_BITS == 32) {
707
- tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
708
- break;
709
- }
710
- /* fall through */
711
- case INDEX_op_qemu_ld_a32_i32:
712
+ case INDEX_op_qemu_ld_i32:
713
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
714
break;
715
- case INDEX_op_qemu_ld_a32_i64:
716
+ case INDEX_op_qemu_ld_i64:
717
if (TCG_TARGET_REG_BITS == 64) {
718
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
719
} else {
720
tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
721
}
722
break;
723
- case INDEX_op_qemu_ld_a64_i64:
724
- if (TCG_TARGET_REG_BITS == 64) {
725
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
726
- } else {
727
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
728
- }
729
- break;
730
- case INDEX_op_qemu_ld_a32_i128:
731
- case INDEX_op_qemu_ld_a64_i128:
732
+ case INDEX_op_qemu_ld_i128:
733
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
734
tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
735
break;
736
737
- case INDEX_op_qemu_st_a64_i32:
738
- case INDEX_op_qemu_st8_a64_i32:
739
- if (TCG_TARGET_REG_BITS == 32) {
740
- tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
741
- break;
742
- }
743
- /* fall through */
744
- case INDEX_op_qemu_st_a32_i32:
745
- case INDEX_op_qemu_st8_a32_i32:
746
+ case INDEX_op_qemu_st_i32:
747
+ case INDEX_op_qemu_st8_i32:
748
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
749
break;
750
- case INDEX_op_qemu_st_a32_i64:
751
+ case INDEX_op_qemu_st_i64:
752
if (TCG_TARGET_REG_BITS == 64) {
753
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
754
} else {
755
tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
756
}
757
break;
758
- case INDEX_op_qemu_st_a64_i64:
759
- if (TCG_TARGET_REG_BITS == 64) {
760
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
761
- } else {
762
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
763
- }
764
- break;
765
- case INDEX_op_qemu_st_a32_i128:
766
- case INDEX_op_qemu_st_a64_i128:
767
+ case INDEX_op_qemu_st_i128:
768
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
769
tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
770
break;
771
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
772
case INDEX_op_clz_i64:
773
return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
774
775
- case INDEX_op_qemu_ld_a32_i32:
776
+ case INDEX_op_qemu_ld_i32:
777
return C_O1_I1(r, L);
778
- case INDEX_op_qemu_ld_a64_i32:
779
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L);
780
781
- case INDEX_op_qemu_st_a32_i32:
782
+ case INDEX_op_qemu_st_i32:
783
return C_O0_I2(L, L);
784
- case INDEX_op_qemu_st_a64_i32:
785
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
786
- case INDEX_op_qemu_st8_a32_i32:
787
+ case INDEX_op_qemu_st8_i32:
788
return C_O0_I2(s, L);
789
- case INDEX_op_qemu_st8_a64_i32:
790
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L);
791
792
- case INDEX_op_qemu_ld_a32_i64:
793
+ case INDEX_op_qemu_ld_i64:
794
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
795
- case INDEX_op_qemu_ld_a64_i64:
796
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L);
797
798
- case INDEX_op_qemu_st_a32_i64:
799
+ case INDEX_op_qemu_st_i64:
800
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
801
- case INDEX_op_qemu_st_a64_i64:
802
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L);
803
804
- case INDEX_op_qemu_ld_a32_i128:
805
- case INDEX_op_qemu_ld_a64_i128:
806
+ case INDEX_op_qemu_ld_i128:
807
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
808
return C_O2_I1(r, r, L);
809
- case INDEX_op_qemu_st_a32_i128:
810
- case INDEX_op_qemu_st_a64_i128:
811
+ case INDEX_op_qemu_st_i128:
812
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
813
return C_O0_I3(L, L, L);
814
815
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
816
index XXXXXXX..XXXXXXX 100644
817
--- a/tcg/loongarch64/tcg-target.c.inc
818
+++ b/tcg/loongarch64/tcg-target.c.inc
819
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
820
tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
821
break;
822
823
- case INDEX_op_qemu_ld_a32_i32:
824
- case INDEX_op_qemu_ld_a64_i32:
825
+ case INDEX_op_qemu_ld_i32:
826
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
827
break;
828
- case INDEX_op_qemu_ld_a32_i64:
829
- case INDEX_op_qemu_ld_a64_i64:
830
+ case INDEX_op_qemu_ld_i64:
831
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
832
break;
833
- case INDEX_op_qemu_ld_a32_i128:
834
- case INDEX_op_qemu_ld_a64_i128:
835
+ case INDEX_op_qemu_ld_i128:
836
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
837
break;
838
- case INDEX_op_qemu_st_a32_i32:
839
- case INDEX_op_qemu_st_a64_i32:
840
+ case INDEX_op_qemu_st_i32:
841
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
842
break;
843
- case INDEX_op_qemu_st_a32_i64:
844
- case INDEX_op_qemu_st_a64_i64:
845
+ case INDEX_op_qemu_st_i64:
846
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
847
break;
848
- case INDEX_op_qemu_st_a32_i128:
849
- case INDEX_op_qemu_st_a64_i128:
850
+ case INDEX_op_qemu_st_i128:
851
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
852
break;
853
854
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
855
case INDEX_op_st32_i64:
856
case INDEX_op_st_i32:
857
case INDEX_op_st_i64:
858
- case INDEX_op_qemu_st_a32_i32:
859
- case INDEX_op_qemu_st_a64_i32:
860
- case INDEX_op_qemu_st_a32_i64:
861
- case INDEX_op_qemu_st_a64_i64:
862
+ case INDEX_op_qemu_st_i32:
863
+ case INDEX_op_qemu_st_i64:
864
return C_O0_I2(rZ, r);
865
866
- case INDEX_op_qemu_ld_a32_i128:
867
- case INDEX_op_qemu_ld_a64_i128:
868
+ case INDEX_op_qemu_ld_i128:
869
return C_N2_I1(r, r, r);
870
871
- case INDEX_op_qemu_st_a32_i128:
872
- case INDEX_op_qemu_st_a64_i128:
873
+ case INDEX_op_qemu_st_i128:
874
return C_O0_I3(r, r, r);
875
876
case INDEX_op_brcond_i32:
877
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
878
case INDEX_op_ld32u_i64:
879
case INDEX_op_ld_i32:
880
case INDEX_op_ld_i64:
881
- case INDEX_op_qemu_ld_a32_i32:
882
- case INDEX_op_qemu_ld_a64_i32:
883
- case INDEX_op_qemu_ld_a32_i64:
884
- case INDEX_op_qemu_ld_a64_i64:
885
+ case INDEX_op_qemu_ld_i32:
886
+ case INDEX_op_qemu_ld_i64:
887
return C_O1_I1(r, r);
888
889
case INDEX_op_andc_i32:
890
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
891
index XXXXXXX..XXXXXXX 100644
892
--- a/tcg/mips/tcg-target.c.inc
893
+++ b/tcg/mips/tcg-target.c.inc
894
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
895
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
896
break;
897
898
- case INDEX_op_qemu_ld_a64_i32:
899
- if (TCG_TARGET_REG_BITS == 32) {
900
- tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
901
- break;
902
- }
903
- /* fall through */
904
- case INDEX_op_qemu_ld_a32_i32:
905
+ case INDEX_op_qemu_ld_i32:
906
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
907
break;
908
- case INDEX_op_qemu_ld_a32_i64:
909
+ case INDEX_op_qemu_ld_i64:
910
if (TCG_TARGET_REG_BITS == 64) {
911
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
912
} else {
913
tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
914
}
915
break;
916
- case INDEX_op_qemu_ld_a64_i64:
917
- if (TCG_TARGET_REG_BITS == 64) {
918
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
919
- } else {
920
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
921
- }
922
- break;
923
924
- case INDEX_op_qemu_st_a64_i32:
925
- if (TCG_TARGET_REG_BITS == 32) {
926
- tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
927
- break;
928
- }
929
- /* fall through */
930
- case INDEX_op_qemu_st_a32_i32:
931
+ case INDEX_op_qemu_st_i32:
932
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
933
break;
934
- case INDEX_op_qemu_st_a32_i64:
935
+ case INDEX_op_qemu_st_i64:
936
if (TCG_TARGET_REG_BITS == 64) {
937
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
938
} else {
939
tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
940
}
941
break;
942
- case INDEX_op_qemu_st_a64_i64:
943
- if (TCG_TARGET_REG_BITS == 64) {
944
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
945
- } else {
946
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
947
- }
948
- break;
949
950
case INDEX_op_add2_i32:
951
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
952
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
953
case INDEX_op_brcond2_i32:
954
return C_O0_I4(rZ, rZ, rZ, rZ);
955
956
- case INDEX_op_qemu_ld_a32_i32:
957
+ case INDEX_op_qemu_ld_i32:
958
return C_O1_I1(r, r);
959
- case INDEX_op_qemu_ld_a64_i32:
960
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
961
- case INDEX_op_qemu_st_a32_i32:
962
+ case INDEX_op_qemu_st_i32:
963
return C_O0_I2(rZ, r);
964
- case INDEX_op_qemu_st_a64_i32:
965
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r);
966
- case INDEX_op_qemu_ld_a32_i64:
967
+ case INDEX_op_qemu_ld_i64:
968
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
969
- case INDEX_op_qemu_ld_a64_i64:
970
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
971
- case INDEX_op_qemu_st_a32_i64:
972
+ case INDEX_op_qemu_st_i64:
973
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r);
974
- case INDEX_op_qemu_st_a64_i64:
975
- return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r)
976
- : C_O0_I4(rZ, rZ, r, r));
977
978
default:
979
return C_NotImplemented;
980
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
981
index XXXXXXX..XXXXXXX 100644
982
--- a/tcg/ppc/tcg-target.c.inc
983
+++ b/tcg/ppc/tcg-target.c.inc
984
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
985
tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
986
break;
987
988
- case INDEX_op_qemu_ld_a64_i32:
989
- if (TCG_TARGET_REG_BITS == 32) {
990
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
991
- args[3], TCG_TYPE_I32);
992
- break;
993
- }
994
- /* fall through */
995
- case INDEX_op_qemu_ld_a32_i32:
996
+ case INDEX_op_qemu_ld_i32:
997
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
998
break;
999
- case INDEX_op_qemu_ld_a32_i64:
1000
+ case INDEX_op_qemu_ld_i64:
1001
if (TCG_TARGET_REG_BITS == 64) {
1002
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
1003
args[2], TCG_TYPE_I64);
1004
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1005
args[3], TCG_TYPE_I64);
1006
}
1007
break;
1008
- case INDEX_op_qemu_ld_a64_i64:
1009
- if (TCG_TARGET_REG_BITS == 64) {
1010
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
1011
- args[2], TCG_TYPE_I64);
1012
- } else {
1013
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
1014
- args[4], TCG_TYPE_I64);
1015
- }
1016
- break;
1017
- case INDEX_op_qemu_ld_a32_i128:
1018
- case INDEX_op_qemu_ld_a64_i128:
1019
+ case INDEX_op_qemu_ld_i128:
1020
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1021
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
1022
break;
1023
1024
- case INDEX_op_qemu_st_a64_i32:
1025
- if (TCG_TARGET_REG_BITS == 32) {
1026
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
1027
- args[3], TCG_TYPE_I32);
1028
- break;
1029
- }
1030
- /* fall through */
1031
- case INDEX_op_qemu_st_a32_i32:
1032
+ case INDEX_op_qemu_st_i32:
1033
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
1034
break;
1035
- case INDEX_op_qemu_st_a32_i64:
1036
+ case INDEX_op_qemu_st_i64:
1037
if (TCG_TARGET_REG_BITS == 64) {
1038
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
1039
args[2], TCG_TYPE_I64);
1040
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1041
args[3], TCG_TYPE_I64);
1042
}
1043
break;
1044
- case INDEX_op_qemu_st_a64_i64:
1045
- if (TCG_TARGET_REG_BITS == 64) {
1046
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
1047
- args[2], TCG_TYPE_I64);
1048
- } else {
1049
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
1050
- args[4], TCG_TYPE_I64);
1051
- }
1052
- break;
1053
- case INDEX_op_qemu_st_a32_i128:
1054
- case INDEX_op_qemu_st_a64_i128:
1055
+ case INDEX_op_qemu_st_i128:
1056
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1057
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
1058
break;
1059
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1060
case INDEX_op_sub2_i32:
1061
return C_O2_I4(r, r, rI, rZM, r, r);
1062
1063
- case INDEX_op_qemu_ld_a32_i32:
1064
+ case INDEX_op_qemu_ld_i32:
1065
return C_O1_I1(r, r);
1066
- case INDEX_op_qemu_ld_a64_i32:
1067
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
1068
- case INDEX_op_qemu_ld_a32_i64:
1069
+ case INDEX_op_qemu_ld_i64:
1070
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
1071
- case INDEX_op_qemu_ld_a64_i64:
1072
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
1073
1074
- case INDEX_op_qemu_st_a32_i32:
1075
+ case INDEX_op_qemu_st_i32:
1076
return C_O0_I2(r, r);
1077
- case INDEX_op_qemu_st_a64_i32:
1078
+ case INDEX_op_qemu_st_i64:
1079
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1080
- case INDEX_op_qemu_st_a32_i64:
1081
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1082
- case INDEX_op_qemu_st_a64_i64:
1083
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
1084
1085
- case INDEX_op_qemu_ld_a32_i128:
1086
- case INDEX_op_qemu_ld_a64_i128:
1087
+ case INDEX_op_qemu_ld_i128:
1088
return C_N1O1_I1(o, m, r);
1089
- case INDEX_op_qemu_st_a32_i128:
1090
- case INDEX_op_qemu_st_a64_i128:
1091
+ case INDEX_op_qemu_st_i128:
1092
return C_O0_I3(o, m, r);
1093
1094
case INDEX_op_add_vec:
1095
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
1096
index XXXXXXX..XXXXXXX 100644
1097
--- a/tcg/riscv/tcg-target.c.inc
1098
+++ b/tcg/riscv/tcg-target.c.inc
1099
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1100
args[3], const_args[3], args[4], const_args[4]);
1101
break;
1102
1103
- case INDEX_op_qemu_ld_a32_i32:
1104
- case INDEX_op_qemu_ld_a64_i32:
1105
+ case INDEX_op_qemu_ld_i32:
1106
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1107
break;
1108
- case INDEX_op_qemu_ld_a32_i64:
1109
- case INDEX_op_qemu_ld_a64_i64:
1110
+ case INDEX_op_qemu_ld_i64:
1111
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1112
break;
1113
- case INDEX_op_qemu_st_a32_i32:
1114
- case INDEX_op_qemu_st_a64_i32:
1115
+ case INDEX_op_qemu_st_i32:
1116
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1117
break;
1118
- case INDEX_op_qemu_st_a32_i64:
1119
- case INDEX_op_qemu_st_a64_i64:
1120
+ case INDEX_op_qemu_st_i64:
1121
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1122
break;
1123
1124
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1125
case INDEX_op_sub2_i64:
1126
return C_O2_I4(r, r, rZ, rZ, rM, rM);
1127
1128
- case INDEX_op_qemu_ld_a32_i32:
1129
- case INDEX_op_qemu_ld_a64_i32:
1130
- case INDEX_op_qemu_ld_a32_i64:
1131
- case INDEX_op_qemu_ld_a64_i64:
1132
+ case INDEX_op_qemu_ld_i32:
1133
+ case INDEX_op_qemu_ld_i64:
1134
return C_O1_I1(r, r);
1135
- case INDEX_op_qemu_st_a32_i32:
1136
- case INDEX_op_qemu_st_a64_i32:
1137
- case INDEX_op_qemu_st_a32_i64:
1138
- case INDEX_op_qemu_st_a64_i64:
1139
+ case INDEX_op_qemu_st_i32:
1140
+ case INDEX_op_qemu_st_i64:
1141
return C_O0_I2(rZ, r);
1142
1143
case INDEX_op_st_vec:
1144
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1145
index XXXXXXX..XXXXXXX 100644
1146
--- a/tcg/s390x/tcg-target.c.inc
1147
+++ b/tcg/s390x/tcg-target.c.inc
1148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1149
args[2], const_args[2], args[3], const_args[3], args[4]);
1150
break;
1151
1152
- case INDEX_op_qemu_ld_a32_i32:
1153
- case INDEX_op_qemu_ld_a64_i32:
1154
+ case INDEX_op_qemu_ld_i32:
1155
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
1156
break;
1157
- case INDEX_op_qemu_ld_a32_i64:
1158
- case INDEX_op_qemu_ld_a64_i64:
1159
+ case INDEX_op_qemu_ld_i64:
1160
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
1161
break;
1162
- case INDEX_op_qemu_st_a32_i32:
1163
- case INDEX_op_qemu_st_a64_i32:
1164
+ case INDEX_op_qemu_st_i32:
1165
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
1166
break;
1167
- case INDEX_op_qemu_st_a32_i64:
1168
- case INDEX_op_qemu_st_a64_i64:
1169
+ case INDEX_op_qemu_st_i64:
1170
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
1171
break;
1172
- case INDEX_op_qemu_ld_a32_i128:
1173
- case INDEX_op_qemu_ld_a64_i128:
1174
+ case INDEX_op_qemu_ld_i128:
1175
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
1176
break;
1177
- case INDEX_op_qemu_st_a32_i128:
1178
- case INDEX_op_qemu_st_a64_i128:
1179
+ case INDEX_op_qemu_st_i128:
1180
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
1181
break;
1182
1183
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1184
case INDEX_op_ctpop_i64:
1185
return C_O1_I1(r, r);
1186
1187
- case INDEX_op_qemu_ld_a32_i32:
1188
- case INDEX_op_qemu_ld_a64_i32:
1189
- case INDEX_op_qemu_ld_a32_i64:
1190
- case INDEX_op_qemu_ld_a64_i64:
1191
+ case INDEX_op_qemu_ld_i32:
1192
+ case INDEX_op_qemu_ld_i64:
1193
return C_O1_I1(r, r);
1194
- case INDEX_op_qemu_st_a32_i64:
1195
- case INDEX_op_qemu_st_a64_i64:
1196
- case INDEX_op_qemu_st_a32_i32:
1197
- case INDEX_op_qemu_st_a64_i32:
1198
+ case INDEX_op_qemu_st_i64:
1199
+ case INDEX_op_qemu_st_i32:
1200
return C_O0_I2(r, r);
1201
- case INDEX_op_qemu_ld_a32_i128:
1202
- case INDEX_op_qemu_ld_a64_i128:
1203
+ case INDEX_op_qemu_ld_i128:
1204
return C_O2_I1(o, m, r);
1205
- case INDEX_op_qemu_st_a32_i128:
1206
- case INDEX_op_qemu_st_a64_i128:
1207
+ case INDEX_op_qemu_st_i128:
1208
return C_O0_I3(o, m, r);
1209
1210
case INDEX_op_deposit_i32:
1211
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1212
index XXXXXXX..XXXXXXX 100644
1213
--- a/tcg/sparc64/tcg-target.c.inc
1214
+++ b/tcg/sparc64/tcg-target.c.inc
1215
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1216
tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1217
break;
1218
1219
- case INDEX_op_qemu_ld_a32_i32:
1220
- case INDEX_op_qemu_ld_a64_i32:
1221
+ case INDEX_op_qemu_ld_i32:
1222
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1223
break;
1224
- case INDEX_op_qemu_ld_a32_i64:
1225
- case INDEX_op_qemu_ld_a64_i64:
1226
+ case INDEX_op_qemu_ld_i64:
1227
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1228
break;
1229
- case INDEX_op_qemu_st_a32_i32:
1230
- case INDEX_op_qemu_st_a64_i32:
1231
+ case INDEX_op_qemu_st_i32:
1232
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1233
break;
1234
- case INDEX_op_qemu_st_a32_i64:
1235
- case INDEX_op_qemu_st_a64_i64:
1236
+ case INDEX_op_qemu_st_i64:
1237
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1238
break;
1239
1240
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1241
case INDEX_op_extu_i32_i64:
1242
case INDEX_op_extract_i64:
1243
case INDEX_op_sextract_i64:
1244
- case INDEX_op_qemu_ld_a32_i32:
1245
- case INDEX_op_qemu_ld_a64_i32:
1246
- case INDEX_op_qemu_ld_a32_i64:
1247
- case INDEX_op_qemu_ld_a64_i64:
1248
+ case INDEX_op_qemu_ld_i32:
1249
+ case INDEX_op_qemu_ld_i64:
1250
return C_O1_I1(r, r);
1251
1252
case INDEX_op_st8_i32:
1253
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1254
case INDEX_op_st_i32:
1255
case INDEX_op_st32_i64:
1256
case INDEX_op_st_i64:
1257
- case INDEX_op_qemu_st_a32_i32:
1258
- case INDEX_op_qemu_st_a64_i32:
1259
- case INDEX_op_qemu_st_a32_i64:
1260
- case INDEX_op_qemu_st_a64_i64:
1261
+ case INDEX_op_qemu_st_i32:
1262
+ case INDEX_op_qemu_st_i64:
1263
return C_O0_I2(rZ, r);
1264
1265
case INDEX_op_add_i32:
1266
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1267
index XXXXXXX..XXXXXXX 100644
1268
--- a/tcg/tci/tcg-target.c.inc
1269
+++ b/tcg/tci/tcg-target.c.inc
1270
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1271
case INDEX_op_setcond2_i32:
1272
return C_O1_I4(r, r, r, r, r);
1273
1274
- case INDEX_op_qemu_ld_a32_i32:
1275
+ case INDEX_op_qemu_ld_i32:
1276
return C_O1_I1(r, r);
1277
- case INDEX_op_qemu_ld_a64_i32:
1278
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
1279
- case INDEX_op_qemu_ld_a32_i64:
1280
+ case INDEX_op_qemu_ld_i64:
1281
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
1282
- case INDEX_op_qemu_ld_a64_i64:
1283
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
1284
- case INDEX_op_qemu_st_a32_i32:
1285
+ case INDEX_op_qemu_st_i32:
1286
return C_O0_I2(r, r);
1287
- case INDEX_op_qemu_st_a64_i32:
1288
+ case INDEX_op_qemu_st_i64:
1289
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1290
- case INDEX_op_qemu_st_a32_i64:
1291
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1292
- case INDEX_op_qemu_st_a64_i64:
1293
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
1294
1295
default:
1296
return C_NotImplemented;
1297
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
1298
tcg_out32(s, insn);
51
}
1299
}
52
1300
53
+#ifdef CONFIG_TCG
1301
-static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0,
54
+
1302
- TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4)
55
/*
1303
-{
56
* Return true if this watchpoint address matches the specified
1304
- tcg_insn_unit insn = 0;
57
* access (ie the address range covered by the watchpoint overlaps
1305
-
58
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
1306
- insn = deposit32(insn, 0, 8, op);
59
}
1307
- insn = deposit32(insn, 8, 4, r0);
60
}
1308
- insn = deposit32(insn, 12, 4, r1);
61
}
1309
- insn = deposit32(insn, 16, 4, r2);
62
+
1310
- insn = deposit32(insn, 20, 4, r3);
63
+#endif /* CONFIG_TCG */
1311
- insn = deposit32(insn, 24, 4, r4);
64
diff --git a/softmmu/meson.build b/softmmu/meson.build
1312
- tcg_out32(s, insn);
65
index XXXXXXX..XXXXXXX 100644
1313
-}
66
--- a/softmmu/meson.build
1314
-
67
+++ b/softmmu/meson.build
1315
static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
68
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
1316
TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
69
'physmem.c',
1317
{
70
'qtest.c',
1318
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
71
'dirtylimit.c',
1319
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
72
+ 'watchpoint.c',
1320
break;
73
)])
1321
74
1322
- case INDEX_op_qemu_ld_a32_i32:
75
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files(
1323
- case INDEX_op_qemu_st_a32_i32:
76
'icount.c',
1324
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
77
- 'watchpoint.c',
1325
- break;
78
)])
1326
- case INDEX_op_qemu_ld_a64_i32:
79
1327
- case INDEX_op_qemu_st_a64_i32:
80
softmmu_ss.add(files(
1328
- case INDEX_op_qemu_ld_a32_i64:
1329
- case INDEX_op_qemu_st_a32_i64:
1330
- if (TCG_TARGET_REG_BITS == 64) {
1331
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1332
- } else {
1333
+ case INDEX_op_qemu_ld_i64:
1334
+ case INDEX_op_qemu_st_i64:
1335
+ if (TCG_TARGET_REG_BITS == 32) {
1336
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1337
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1338
+ break;
1339
}
1340
- break;
1341
- case INDEX_op_qemu_ld_a64_i64:
1342
- case INDEX_op_qemu_st_a64_i64:
1343
- if (TCG_TARGET_REG_BITS == 64) {
1344
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1345
+ /* fall through */
1346
+ case INDEX_op_qemu_ld_i32:
1347
+ case INDEX_op_qemu_st_i32:
1348
+ if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1349
+ tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1350
+ tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1351
} else {
1352
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
1353
- tcg_out_op_rrrrr(s, opc, args[0], args[1],
1354
- args[2], args[3], TCG_REG_TMP);
1355
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1356
}
1357
break;
1358
81
--
1359
--
82
2.34.1
1360
2.43.0
83
84
diff view generated by jsdifflib
New patch
1
The guest address will now always be TCG_TYPE_I32.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/arm/tcg-target.c.inc | 63 ++++++++++++++--------------------------
6
1 file changed, 21 insertions(+), 42 deletions(-)
7
8
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/arm/tcg-target.c.inc
11
+++ b/tcg/arm/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
13
#define MIN_TLB_MASK_TABLE_OFS -256
14
15
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
16
- TCGReg addrlo, TCGReg addrhi,
17
- MemOpIdx oi, bool is_ld)
18
+ TCGReg addr, MemOpIdx oi, bool is_ld)
19
{
20
TCGLabelQemuLdst *ldst = NULL;
21
MemOp opc = get_memop(oi);
22
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
23
if (tcg_use_softmmu) {
24
*h = (HostAddress){
25
.cond = COND_AL,
26
- .base = addrlo,
27
+ .base = addr,
28
.index = TCG_REG_R1,
29
.index_scratch = true,
30
};
31
} else {
32
*h = (HostAddress){
33
.cond = COND_AL,
34
- .base = addrlo,
35
+ .base = addr,
36
.index = guest_base ? TCG_REG_GUEST_BASE : -1,
37
.index_scratch = false,
38
};
39
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
40
ldst = new_ldst_label(s);
41
ldst->is_ld = is_ld;
42
ldst->oi = oi;
43
- ldst->addrlo_reg = addrlo;
44
- ldst->addrhi_reg = addrhi;
45
+ ldst->addrlo_reg = addr;
46
47
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
48
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
49
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
50
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
51
52
/* Extract the tlb index from the address into R0. */
53
- tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
54
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
55
SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
56
57
/*
58
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
59
- * Load the tlb comparator into R2/R3 and the fast path addend into R1.
60
+ * Load the tlb comparator into R2 and the fast path addend into R1.
61
*/
62
QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
63
if (cmp_off == 0) {
64
- if (s->addr_type == TCG_TYPE_I32) {
65
- tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
66
- TCG_REG_R1, TCG_REG_R0);
67
- } else {
68
- tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
69
- TCG_REG_R1, TCG_REG_R0);
70
- }
71
+ tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
72
} else {
73
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
74
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
75
- if (s->addr_type == TCG_TYPE_I32) {
76
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
77
- } else {
78
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
79
- }
80
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
81
}
82
83
/* Load the tlb addend. */
84
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
85
* This leaves the least significant alignment bits unchanged, and of
86
* course must be zero.
87
*/
88
- t_addr = addrlo;
89
+ t_addr = addr;
90
if (a_mask < s_mask) {
91
t_addr = TCG_REG_R0;
92
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
93
- addrlo, s_mask - a_mask);
94
+ addr, s_mask - a_mask);
95
}
96
if (use_armv7_instructions && s->page_bits <= 16) {
97
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
98
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
99
} else {
100
if (a_mask) {
101
tcg_debug_assert(a_mask <= 0xff);
102
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
103
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
104
}
105
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
106
SHIFT_IMM_LSR(s->page_bits));
107
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
108
0, TCG_REG_R2, TCG_REG_TMP,
109
SHIFT_IMM_LSL(s->page_bits));
110
}
111
-
112
- if (s->addr_type != TCG_TYPE_I32) {
113
- tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
114
- }
115
} else if (a_mask) {
116
ldst = new_ldst_label(s);
117
ldst->is_ld = is_ld;
118
ldst->oi = oi;
119
- ldst->addrlo_reg = addrlo;
120
- ldst->addrhi_reg = addrhi;
121
+ ldst->addrlo_reg = addr;
122
123
/* We are expecting alignment to max out at 7 */
124
tcg_debug_assert(a_mask <= 0xff);
125
/* tst addr, #mask */
126
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
127
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
128
}
129
130
return ldst;
131
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
132
}
133
134
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
135
- TCGReg addrlo, TCGReg addrhi,
136
- MemOpIdx oi, TCGType data_type)
137
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
138
{
139
MemOp opc = get_memop(oi);
140
TCGLabelQemuLdst *ldst;
141
HostAddress h;
142
143
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
144
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
145
if (ldst) {
146
ldst->type = data_type;
147
ldst->datalo_reg = datalo;
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
149
}
150
151
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
152
- TCGReg addrlo, TCGReg addrhi,
153
- MemOpIdx oi, TCGType data_type)
154
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
155
{
156
MemOp opc = get_memop(oi);
157
TCGLabelQemuLdst *ldst;
158
HostAddress h;
159
160
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
161
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
162
if (ldst) {
163
ldst->type = data_type;
164
ldst->datalo_reg = datalo;
165
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
166
break;
167
168
case INDEX_op_qemu_ld_i32:
169
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
170
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
171
break;
172
case INDEX_op_qemu_ld_i64:
173
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
174
- args[3], TCG_TYPE_I64);
175
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
176
break;
177
178
case INDEX_op_qemu_st_i32:
179
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
180
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
181
break;
182
case INDEX_op_qemu_st_i64:
183
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
184
- args[3], TCG_TYPE_I64);
185
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
186
break;
187
188
case INDEX_op_bswap16_i32:
189
--
190
2.43.0
diff view generated by jsdifflib
1
User setting of -R reserved_va can lead to an assertion
1
The guest address will now always fit in one register.
2
failure in page_set_flags. Sanity check the value of
3
reserved_va and print an error message instead. Do not
4
allocate a commpage at all for m-profile cpus.
5
2
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
linux-user/elfload.c | 37 +++++++++++++++++++++++++++----------
6
tcg/i386/tcg-target.c.inc | 56 ++++++++++++++-------------------------
10
1 file changed, 27 insertions(+), 10 deletions(-)
7
1 file changed, 20 insertions(+), 36 deletions(-)
11
8
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
11
--- a/tcg/i386/tcg-target.c.inc
15
+++ b/linux-user/elfload.c
12
+++ b/tcg/i386/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ enum {
13
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
17
14
* is required and fill in @h with the host address for the fast path.
18
static bool init_guest_commpage(void)
15
*/
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
17
- TCGReg addrlo, TCGReg addrhi,
18
- MemOpIdx oi, bool is_ld)
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
19
{
20
{
20
- abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
21
TCGLabelQemuLdst *ldst = NULL;
21
- void *want = g2h_untagged(commpage);
22
MemOp opc = get_memop(oi);
22
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
23
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
24
} else {
24
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
25
*h = x86_guest_base;
25
+ abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
26
+ abi_ptr addr;
27
28
- if (addr == MAP_FAILED) {
29
+ /*
30
+ * M-profile allocates maximum of 2GB address space, so can never
31
+ * allocate the commpage. Skip it.
32
+ */
33
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
34
+ return true;
35
+ }
36
+
37
+ /*
38
+ * If reserved_va does not cover the commpage, we get an assert
39
+ * in page_set_flags. Produce an intelligent error instead.
40
+ */
41
+ if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
42
+ error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
43
+ (uint64_t)reserved_va + 1);
44
+ exit(EXIT_FAILURE);
45
+ }
46
+
47
+ addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
48
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
49
+
50
+ if (addr == -1) {
51
perror("Allocating guest commpage");
52
exit(EXIT_FAILURE);
53
}
26
}
54
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
27
- h->base = addrlo;
55
}
28
+ h->base = addr;
56
29
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
57
/* Set kernel helper versions; rest of page is 0. */
30
a_mask = (1 << h->aa.align) - 1;
58
- __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
31
59
+ put_user_u32(5, 0xffff0ffcu);
32
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
60
33
ldst = new_ldst_label(s);
61
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
34
ldst->is_ld = is_ld;
62
+ if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
35
ldst->oi = oi;
63
perror("Protecting guest commpage");
36
- ldst->addrlo_reg = addrlo;
64
exit(EXIT_FAILURE);
37
- ldst->addrhi_reg = addrhi;
65
}
38
+ ldst->addrlo_reg = addr;
39
40
if (TCG_TARGET_REG_BITS == 64) {
41
ttype = s->addr_type;
42
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
43
}
44
}
45
46
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
47
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addr);
48
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
49
s->page_bits - CPU_TLB_ENTRY_BITS);
50
51
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
52
* check that we don't cross pages for the complete access.
53
*/
54
if (a_mask >= s_mask) {
55
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
56
+ tcg_out_mov(s, ttype, TCG_REG_L1, addr);
57
} else {
58
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
59
- addrlo, s_mask - a_mask);
60
+ addr, s_mask - a_mask);
61
}
62
tlb_mask = s->page_mask | a_mask;
63
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
64
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
65
ldst->label_ptr[0] = s->code_ptr;
66
s->code_ptr += 4;
67
68
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
69
- /* cmp 4(TCG_REG_L0), addrhi */
70
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
71
- TCG_REG_L0, cmp_ofs + 4);
66
-
72
-
67
- page_set_flags(commpage, commpage | ~qemu_host_page_mask,
73
- /* jne slow_path */
68
- PAGE_READ | PAGE_EXEC | PAGE_VALID);
74
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
69
return true;
75
- ldst->label_ptr[1] = s->code_ptr;
76
- s->code_ptr += 4;
77
- }
78
-
79
/* TLB Hit. */
80
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
81
offsetof(CPUTLBEntry, addend));
82
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
83
ldst = new_ldst_label(s);
84
ldst->is_ld = is_ld;
85
ldst->oi = oi;
86
- ldst->addrlo_reg = addrlo;
87
- ldst->addrhi_reg = addrhi;
88
+ ldst->addrlo_reg = addr;
89
90
/* jne slow_path */
91
- jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false);
92
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false);
93
tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0);
94
ldst->label_ptr[0] = s->code_ptr;
95
s->code_ptr += 4;
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
70
}
97
}
71
98
99
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
100
- TCGReg addrlo, TCGReg addrhi,
101
- MemOpIdx oi, TCGType data_type)
102
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
103
{
104
TCGLabelQemuLdst *ldst;
105
HostAddress h;
106
107
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
108
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
109
tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi));
110
111
if (ldst) {
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
113
}
114
115
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
116
- TCGReg addrlo, TCGReg addrhi,
117
- MemOpIdx oi, TCGType data_type)
118
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
119
{
120
TCGLabelQemuLdst *ldst;
121
HostAddress h;
122
123
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
124
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
125
tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi));
126
127
if (ldst) {
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
129
break;
130
131
case INDEX_op_qemu_ld_i32:
132
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
133
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
134
break;
135
case INDEX_op_qemu_ld_i64:
136
if (TCG_TARGET_REG_BITS == 64) {
137
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
138
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I64);
139
} else {
140
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
141
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
142
}
143
break;
144
case INDEX_op_qemu_ld_i128:
145
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
146
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
147
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I128);
148
break;
149
150
case INDEX_op_qemu_st_i32:
151
case INDEX_op_qemu_st8_i32:
152
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
153
+ tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I32);
154
break;
155
case INDEX_op_qemu_st_i64:
156
if (TCG_TARGET_REG_BITS == 64) {
157
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
158
+ tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I64);
159
} else {
160
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
161
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
162
}
163
break;
164
case INDEX_op_qemu_st_i128:
165
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
166
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
167
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
168
break;
169
170
OP_32_64(mulu2):
72
--
171
--
73
2.34.1
172
2.43.0
74
173
75
174
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
The guest address will now always fit in one register.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/tb-maint.c | 28 ++++++++++++++++------------
6
tcg/mips/tcg-target.c.inc | 62 ++++++++++++++-------------------------
9
1 file changed, 16 insertions(+), 12 deletions(-)
7
1 file changed, 22 insertions(+), 40 deletions(-)
10
8
11
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tb-maint.c
11
--- a/tcg/mips/tcg-target.c.inc
14
+++ b/accel/tcg/tb-maint.c
12
+++ b/tcg/mips/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tb_remove(TranslationBlock *tb)
13
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
14
* is required and fill in @h with the host address for the fast path.
15
*/
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
17
- TCGReg addrlo, TCGReg addrhi,
18
- MemOpIdx oi, bool is_ld)
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
20
{
21
TCGType addr_type = s->addr_type;
22
TCGLabelQemuLdst *ldst = NULL;
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
24
ldst = new_ldst_label(s);
25
ldst->is_ld = is_ld;
26
ldst->oi = oi;
27
- ldst->addrlo_reg = addrlo;
28
- ldst->addrhi_reg = addrhi;
29
+ ldst->addrlo_reg = addr;
30
31
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
32
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
33
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
34
35
/* Extract the TLB index from the address into TMP3. */
36
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
37
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
38
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr,
39
s->page_bits - CPU_TLB_ENTRY_BITS);
40
} else {
41
- tcg_out_dsrl(s, TCG_TMP3, addrlo,
42
- s->page_bits - CPU_TLB_ENTRY_BITS);
43
+ tcg_out_dsrl(s, TCG_TMP3, addr, s->page_bits - CPU_TLB_ENTRY_BITS);
44
}
45
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
46
47
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
48
tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
49
|| addr_type == TCG_TYPE_I32
50
? OPC_ADDIU : OPC_DADDIU),
51
- TCG_TMP2, addrlo, s_mask - a_mask);
52
+ TCG_TMP2, addr, s_mask - a_mask);
53
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
54
} else {
55
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
56
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addr);
57
}
58
59
/* Zero extend a 32-bit guest address for a 64-bit host. */
60
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
61
- tcg_out_ext32u(s, TCG_TMP2, addrlo);
62
- addrlo = TCG_TMP2;
63
+ tcg_out_ext32u(s, TCG_TMP2, addr);
64
+ addr = TCG_TMP2;
65
}
66
67
ldst->label_ptr[0] = s->code_ptr;
68
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
69
70
- /* Load and test the high half tlb comparator. */
71
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
72
- /* delay slot */
73
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
74
-
75
- /* Load the tlb addend for the fast path. */
76
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
77
-
78
- ldst->label_ptr[1] = s->code_ptr;
79
- tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
80
- }
81
-
82
/* delay slot */
83
base = TCG_TMP3;
84
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
85
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addr);
86
} else {
87
if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
88
ldst = new_ldst_label(s);
89
90
ldst->is_ld = is_ld;
91
ldst->oi = oi;
92
- ldst->addrlo_reg = addrlo;
93
- ldst->addrhi_reg = addrhi;
94
+ ldst->addrlo_reg = addr;
95
96
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
97
tcg_debug_assert(a_bits < 16);
98
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
99
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addr, a_mask);
100
101
ldst->label_ptr[0] = s->code_ptr;
102
if (use_mips32r6_instructions) {
103
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
104
}
105
}
106
107
- base = addrlo;
108
+ base = addr;
109
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
110
tcg_out_ext32u(s, TCG_REG_A0, base);
111
base = TCG_REG_A0;
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
16
}
113
}
17
114
18
/* TODO: For now, still shared with translate-all.c for system mode. */
115
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
19
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
116
- TCGReg addrlo, TCGReg addrhi,
20
- for (T = foreach_tb_first(start, end), \
117
- MemOpIdx oi, TCGType data_type)
21
- N = foreach_tb_next(T, start, end); \
118
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
22
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
23
+ for (T = foreach_tb_first(start, last), \
24
+ N = foreach_tb_next(T, start, last); \
25
T != NULL; \
26
- T = N, N = foreach_tb_next(N, start, end))
27
+ T = N, N = foreach_tb_next(N, start, last))
28
29
typedef TranslationBlock *PageForEachNext;
30
31
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
32
- tb_page_addr_t end)
33
+ tb_page_addr_t last)
34
{
119
{
35
- IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
120
MemOp opc = get_memop(oi);
36
+ IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
121
TCGLabelQemuLdst *ldst;
37
return n ? container_of(n, TranslationBlock, itree) : NULL;
122
HostAddress h;
123
124
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
125
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
126
127
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
128
tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type);
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
38
}
130
}
39
131
40
static PageForEachNext foreach_tb_next(PageForEachNext tb,
132
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
41
tb_page_addr_t start,
133
- TCGReg addrlo, TCGReg addrhi,
42
- tb_page_addr_t end)
134
- MemOpIdx oi, TCGType data_type)
43
+ tb_page_addr_t last)
135
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
44
{
136
{
45
IntervalTreeNode *n;
137
MemOp opc = get_memop(oi);
46
138
TCGLabelQemuLdst *ldst;
47
if (tb) {
139
HostAddress h;
48
- n = interval_tree_iter_next(&tb->itree, start, end - 1);
140
49
+ n = interval_tree_iter_next(&tb->itree, start, last);
141
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
50
if (n) {
142
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
51
return container_of(n, TranslationBlock, itree);
143
144
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
145
tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc);
146
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
147
break;
148
149
case INDEX_op_qemu_ld_i32:
150
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
151
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
152
break;
153
case INDEX_op_qemu_ld_i64:
154
if (TCG_TARGET_REG_BITS == 64) {
155
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
156
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I64);
157
} else {
158
- tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
159
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
52
}
160
}
53
@@ -XXX,XX +XXX,XX @@ struct page_collection {
161
break;
54
};
162
55
163
case INDEX_op_qemu_st_i32:
56
typedef int PageForEachNext;
164
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
57
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
165
+ tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I32);
58
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
166
break;
59
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
167
case INDEX_op_qemu_st_i64:
60
168
if (TCG_TARGET_REG_BITS == 64) {
61
#ifdef CONFIG_DEBUG_TCG
169
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
62
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
170
+ tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I64);
63
{
171
} else {
64
TranslationBlock *tb;
172
- tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
65
PageForEachNext n;
173
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
66
+ tb_page_addr_t last = end - 1;
174
}
67
175
break;
68
assert_memory_lock();
176
69
70
- PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
71
+ PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
72
tb_phys_invalidate__locked(tb);
73
}
74
}
75
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
76
bool current_tb_modified;
77
TranslationBlock *tb;
78
PageForEachNext n;
79
+ tb_page_addr_t last;
80
81
/*
82
* Without precise smc semantics, or when outside of a TB,
83
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
84
assert_memory_lock();
85
current_tb = tcg_tb_lookup(pc);
86
87
+ last = addr | ~TARGET_PAGE_MASK;
88
addr &= TARGET_PAGE_MASK;
89
current_tb_modified = false;
90
91
- PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
92
+ PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
93
if (current_tb == tb &&
94
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
95
/*
96
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
97
bool current_tb_modified = false;
98
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
99
#endif /* TARGET_HAS_PRECISE_SMC */
100
+ tb_page_addr_t last G_GNUC_UNUSED = end - 1;
101
102
/*
103
* We remove all the TBs in the range [start, end[.
104
* XXX: see if in some cases it could be faster to invalidate all the code
105
*/
106
- PAGE_FOR_EACH_TB(start, end, p, tb, n) {
107
+ PAGE_FOR_EACH_TB(start, last, p, tb, n) {
108
/* NOTE: this is subtle as a TB may span two physical pages */
109
if (n == 0) {
110
/* NOTE: tb_end may be after the end of the page, but
111
--
177
--
112
2.34.1
178
2.43.0
113
179
114
180
diff view generated by jsdifflib
1
Change the semantics to be the last byte of the guest va, rather
1
The guest address will now always fit in one register.
2
than the following byte. This avoids some overflow conditions.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/exec/cpu-all.h | 11 ++++++++++-
6
tcg/ppc/tcg-target.c.inc | 75 ++++++++++++----------------------------
8
linux-user/arm/target_cpu.h | 2 +-
7
1 file changed, 23 insertions(+), 52 deletions(-)
9
bsd-user/main.c | 10 +++-------
8
10
bsd-user/mmap.c | 4 ++--
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
linux-user/elfload.c | 14 +++++++-------
12
linux-user/main.c | 27 +++++++++++++--------------
13
linux-user/mmap.c | 4 ++--
14
7 files changed, 38 insertions(+), 34 deletions(-)
15
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu-all.h
11
--- a/tcg/ppc/tcg-target.c.inc
19
+++ b/include/exec/cpu-all.h
12
+++ b/tcg/ppc/tcg-target.c.inc
20
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
13
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
14
* is required and fill in @h with the host address for the fast path.
21
*/
15
*/
22
extern uintptr_t guest_base;
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
23
extern bool have_guest_base;
17
- TCGReg addrlo, TCGReg addrhi,
24
+
18
- MemOpIdx oi, bool is_ld)
25
+/*
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
26
+ * If non-zero, the guest virtual address space is a contiguous subset
20
{
27
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
21
TCGType addr_type = s->addr_type;
28
+ * either from the command-line or by default. The value is the last
22
TCGLabelQemuLdst *ldst = NULL;
29
+ * byte of the guest address space e.g. UINT32_MAX.
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
30
+ *
24
ldst = new_ldst_label(s);
31
+ * If zero, the host and guest virtual address spaces are intermingled.
25
ldst->is_ld = is_ld;
32
+ */
26
ldst->oi = oi;
33
extern unsigned long reserved_va;
27
- ldst->addrlo_reg = addrlo;
34
28
- ldst->addrhi_reg = addrhi;
35
/*
29
+ ldst->addrlo_reg = addr;
36
@@ -XXX,XX +XXX,XX @@ extern unsigned long reserved_va;
30
37
#define GUEST_ADDR_MAX_ \
31
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
38
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
32
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
39
UINT32_MAX : ~0ul)
33
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
40
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
34
41
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
35
/* Extract the page index, shifted into place for tlb index. */
42
36
if (TCG_TARGET_REG_BITS == 32) {
43
#else
37
- tcg_out_shri32(s, TCG_REG_R0, addrlo,
44
38
+ tcg_out_shri32(s, TCG_REG_R0, addr,
45
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
39
s->page_bits - CPU_TLB_ENTRY_BITS);
46
index XXXXXXX..XXXXXXX 100644
40
} else {
47
--- a/linux-user/arm/target_cpu.h
41
- tcg_out_shri64(s, TCG_REG_R0, addrlo,
48
+++ b/linux-user/arm/target_cpu.h
42
+ tcg_out_shri64(s, TCG_REG_R0, addr,
49
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
43
s->page_bits - CPU_TLB_ENTRY_BITS);
50
* the high addresses. Restrict linux-user to the
44
}
51
* cached write-back RAM in the system map.
45
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
52
*/
46
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
53
- return 0x80000000ul;
47
if (a_bits < s_bits) {
54
+ return 0x7ffffffful;
48
a_bits = s_bits;
49
}
50
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
51
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0,
52
(32 - a_bits) & 31, 31 - s->page_bits);
53
} else {
54
- TCGReg t = addrlo;
55
+ TCGReg t = addr;
56
57
/*
58
* If the access is unaligned, we need to make sure we fail if we
59
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
60
}
61
}
62
63
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
64
- /* Low part comparison into cr7. */
65
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
66
- 0, 7, TCG_TYPE_I32);
67
-
68
- /* Load the high part TLB comparator into TMP2. */
69
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
70
- cmp_off + 4 * !HOST_BIG_ENDIAN);
71
-
72
- /* Load addend, deferred for this case. */
73
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
74
- offsetof(CPUTLBEntry, addend));
75
-
76
- /* High part comparison into cr6. */
77
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
78
- 0, 6, TCG_TYPE_I32);
79
-
80
- /* Combine comparisons into cr0. */
81
- tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
82
- } else {
83
- /* Full comparison into cr0. */
84
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
85
- 0, 0, addr_type);
86
- }
87
+ /* Full comparison into cr0. */
88
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 0, addr_type);
89
90
/* Load a pointer into the current opcode w/conditional branch-link. */
91
ldst->label_ptr[0] = s->code_ptr;
92
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
93
ldst = new_ldst_label(s);
94
ldst->is_ld = is_ld;
95
ldst->oi = oi;
96
- ldst->addrlo_reg = addrlo;
97
- ldst->addrhi_reg = addrhi;
98
+ ldst->addrlo_reg = addr;
99
100
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
101
tcg_debug_assert(a_bits < 16);
102
- tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
103
+ tcg_out32(s, ANDI | SAI(addr, TCG_REG_R0, (1 << a_bits) - 1));
104
105
ldst->label_ptr[0] = s->code_ptr;
106
tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
107
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
108
109
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
110
/* Zero-extend the guest address for use in the host address. */
111
- tcg_out_ext32u(s, TCG_REG_TMP2, addrlo);
112
+ tcg_out_ext32u(s, TCG_REG_TMP2, addr);
113
h->index = TCG_REG_TMP2;
55
} else {
114
} else {
56
/*
115
- h->index = addrlo;
57
* We need to be able to map the commpage.
116
+ h->index = addr;
58
diff --git a/bsd-user/main.c b/bsd-user/main.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/bsd-user/main.c
61
+++ b/bsd-user/main.c
62
@@ -XXX,XX +XXX,XX @@ bool have_guest_base;
63
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
64
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
65
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
66
-/*
67
- * There are a number of places where we assign reserved_va to a variable
68
- * of type abi_ulong and expect it to fit. Avoid the last page.
69
- */
70
-# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
71
+# define MAX_RESERVED_VA 0xfffffffful
72
# else
73
-# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
74
+# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
75
# endif
76
# else
77
# define MAX_RESERVED_VA 0
78
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
79
envlist_free(envlist);
80
81
if (reserved_va) {
82
- mmap_next_start = reserved_va;
83
+ mmap_next_start = reserved_va + 1;
84
}
117
}
85
118
86
{
119
return ldst;
87
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/bsd-user/mmap.c
90
+++ b/bsd-user/mmap.c
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
92
size = HOST_PAGE_ALIGN(size) + alignment;
93
end_addr = start + size;
94
if (end_addr > reserved_va) {
95
- end_addr = reserved_va;
96
+ end_addr = reserved_va + 1;
97
}
98
addr = end_addr - qemu_host_page_size;
99
100
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
101
if (looped) {
102
return (abi_ulong)-1;
103
}
104
- end_addr = reserved_va;
105
+ end_addr = reserved_va + 1;
106
addr = end_addr - qemu_host_page_size;
107
looped = 1;
108
continue;
109
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/linux-user/elfload.c
112
+++ b/linux-user/elfload.c
113
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
114
* has specified -R reserved_va, which would trigger an assert().
115
*/
116
if (reserved_va != 0 &&
117
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
118
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
119
error_report("Cannot allocate vsyscall page");
120
exit(EXIT_FAILURE);
121
}
122
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
123
if (guest_hiaddr > reserved_va) {
124
error_report("%s: requires more than reserved virtual "
125
"address space (0x%" PRIx64 " > 0x%lx)",
126
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
127
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
128
exit(EXIT_FAILURE);
129
}
130
} else {
131
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
132
if (reserved_va) {
133
guest_loaddr = (guest_base >= mmap_min_addr ? 0
134
: mmap_min_addr - guest_base);
135
- guest_hiaddr = reserved_va - 1;
136
+ guest_hiaddr = reserved_va;
137
}
138
139
/* Reserve the address space for the binary, or reserved_va. */
140
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
141
if (guest_hiaddr > reserved_va) {
142
error_report("%s: requires more than reserved virtual "
143
"address space (0x%" PRIx64 " > 0x%lx)",
144
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
145
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
146
exit(EXIT_FAILURE);
147
}
148
149
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
150
/* Reserve the memory on the host. */
151
assert(guest_base != 0);
152
test = g2h_untagged(0);
153
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
154
+ addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
155
if (addr == MAP_FAILED || addr != test) {
156
error_report("Unable to reserve 0x%lx bytes of virtual address "
157
"space at %p (%s) for use as guest address space (check your "
158
"virtual memory ulimit setting, min_mmap_addr or reserve less "
159
- "using -R option)", reserved_va, test, strerror(errno));
160
+ "using -R option)", reserved_va + 1, test, strerror(errno));
161
exit(EXIT_FAILURE);
162
}
163
164
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
165
- __func__, addr, reserved_va);
166
+ __func__, addr, reserved_va + 1);
167
}
120
}
168
121
169
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
122
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
170
diff --git a/linux-user/main.c b/linux-user/main.c
123
- TCGReg addrlo, TCGReg addrhi,
171
index XXXXXXX..XXXXXXX 100644
124
- MemOpIdx oi, TCGType data_type)
172
--- a/linux-user/main.c
125
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
173
+++ b/linux-user/main.c
174
@@ -XXX,XX +XXX,XX @@ static const char *last_log_filename;
175
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
176
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
177
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
178
-/* There are a number of places where we assign reserved_va to a variable
179
- of type abi_ulong and expect it to fit. Avoid the last page. */
180
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
181
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
182
# else
183
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
184
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
185
# endif
186
# else
187
# define MAX_RESERVED_VA(CPU) 0
188
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
189
{
126
{
190
char *p;
127
MemOp opc = get_memop(oi);
191
int shift = 0;
128
TCGLabelQemuLdst *ldst;
192
- reserved_va = strtoul(arg, &p, 0);
129
HostAddress h;
193
+ unsigned long val;
130
194
+
131
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
195
+ val = strtoul(arg, &p, 0);
132
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
196
switch (*p) {
133
197
case 'k':
134
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
198
case 'K':
135
if (opc & MO_BSWAP) {
199
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
136
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
200
break;
201
}
202
if (shift) {
203
- unsigned long unshifted = reserved_va;
204
+ unsigned long unshifted = val;
205
p++;
206
- reserved_va <<= shift;
207
- if (reserved_va >> shift != unshifted) {
208
+ val <<= shift;
209
+ if (val >> shift != unshifted) {
210
fprintf(stderr, "Reserved virtual address too big\n");
211
exit(EXIT_FAILURE);
212
}
213
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
214
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
215
exit(EXIT_FAILURE);
216
}
217
+ /* The representation is size - 1, with 0 remaining "default". */
218
+ reserved_va = val ? val - 1 : 0;
219
}
137
}
220
138
221
static void handle_arg_singlestep(const char *arg)
139
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
222
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
140
- TCGReg addrlo, TCGReg addrhi,
223
*/
141
- MemOpIdx oi, TCGType data_type)
224
max_reserved_va = MAX_RESERVED_VA(cpu);
142
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
225
if (reserved_va != 0) {
143
{
226
- if (reserved_va % qemu_host_page_size) {
144
MemOp opc = get_memop(oi);
227
+ if ((reserved_va + 1) % qemu_host_page_size) {
145
TCGLabelQemuLdst *ldst;
228
char *s = size_to_str(qemu_host_page_size);
146
HostAddress h;
229
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
147
230
g_free(s);
148
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
231
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
149
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
232
exit(EXIT_FAILURE);
150
233
}
151
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
234
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
152
if (opc & MO_BSWAP) {
235
- /*
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
236
- * reserved_va must be aligned with the host page size
154
uint32_t insn;
237
- * as it is used with mmap()
155
TCGReg index;
238
- */
156
239
- reserved_va = max_reserved_va & qemu_host_page_mask;
157
- ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
240
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
158
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
241
+ reserved_va = max_reserved_va;
159
242
}
160
/* Compose the final address, as LQ/STQ have no indexing. */
243
161
index = h.index;
244
{
162
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
245
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
163
break;
246
index XXXXXXX..XXXXXXX 100644
164
247
--- a/linux-user/mmap.c
165
case INDEX_op_qemu_ld_i32:
248
+++ b/linux-user/mmap.c
166
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
249
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
167
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
250
end_addr = start + size;
168
break;
251
if (start > reserved_va - size) {
169
case INDEX_op_qemu_ld_i64:
252
/* Start at the top of the address space. */
170
if (TCG_TARGET_REG_BITS == 64) {
253
- end_addr = ((reserved_va - size) & -align) + size;
171
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
254
+ end_addr = ((reserved_va + 1 - size) & -align) + size;
172
- args[2], TCG_TYPE_I64);
255
looped = true;
173
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
256
}
174
} else {
257
175
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
258
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
176
+ tcg_out_qemu_ld(s, args[0], args[1], args[2],
259
return (abi_ulong)-1;
177
args[3], TCG_TYPE_I64);
260
}
178
}
261
/* Re-start at the top of the address space. */
179
break;
262
- addr = end_addr = ((reserved_va - size) & -align) + size;
180
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
263
+ addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
181
break;
264
looped = true;
182
265
} else {
183
case INDEX_op_qemu_st_i32:
266
prot = page_get_flags(addr);
184
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
185
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
186
break;
187
case INDEX_op_qemu_st_i64:
188
if (TCG_TARGET_REG_BITS == 64) {
189
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
190
- args[2], TCG_TYPE_I64);
191
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
192
} else {
193
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
194
+ tcg_out_qemu_st(s, args[0], args[1], args[2],
195
args[3], TCG_TYPE_I64);
196
}
197
break;
267
--
198
--
268
2.34.1
199
2.43.0
269
200
270
201
diff view generated by jsdifflib
New patch
1
1
There is now always only one guest address register.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tcg.c | 18 +++++++++---------
7
tcg/aarch64/tcg-target.c.inc | 4 ++--
8
tcg/arm/tcg-target.c.inc | 4 ++--
9
tcg/i386/tcg-target.c.inc | 4 ++--
10
tcg/loongarch64/tcg-target.c.inc | 4 ++--
11
tcg/mips/tcg-target.c.inc | 4 ++--
12
tcg/ppc/tcg-target.c.inc | 4 ++--
13
tcg/riscv/tcg-target.c.inc | 4 ++--
14
tcg/s390x/tcg-target.c.inc | 4 ++--
15
tcg/sparc64/tcg-target.c.inc | 4 ++--
16
10 files changed, 27 insertions(+), 27 deletions(-)
17
18
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/tcg.c
21
+++ b/tcg/tcg.c
22
@@ -XXX,XX +XXX,XX @@ struct TCGLabelQemuLdst {
23
bool is_ld; /* qemu_ld: true, qemu_st: false */
24
MemOpIdx oi;
25
TCGType type; /* result type of a load */
26
- TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
27
- TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
28
+ TCGReg addr_reg; /* reg index for guest virtual addr */
29
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
30
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
31
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
32
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
33
*/
34
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
35
TCG_TYPE_I32, TCG_TYPE_I32,
36
- ldst->addrlo_reg, -1);
37
+ ldst->addr_reg, -1);
38
tcg_out_helper_load_slots(s, 1, mov, parm);
39
40
tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
42
next_arg += 2;
43
} else {
44
nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
45
- ldst->addrlo_reg, ldst->addrhi_reg);
46
+ ldst->addr_reg, -1);
47
tcg_out_helper_load_slots(s, nmov, mov, parm);
48
next_arg += nmov;
49
}
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
51
52
/* Handle addr argument. */
53
loc = &info->in[next_arg];
54
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
55
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
56
+ if (TCG_TARGET_REG_BITS == 32) {
57
/*
58
- * 32-bit host with 32-bit guest: zero-extend the guest address
59
+ * 32-bit host (and thus 32-bit guest): zero-extend the guest address
60
* to 64-bits for the helper by storing the low part. Later,
61
* after we have processed the register inputs, we will load a
62
* zero for the high part.
63
*/
64
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
65
TCG_TYPE_I32, TCG_TYPE_I32,
66
- ldst->addrlo_reg, -1);
67
+ ldst->addr_reg, -1);
68
next_arg += 2;
69
nmov += 1;
70
} else {
71
n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
72
- ldst->addrlo_reg, ldst->addrhi_reg);
73
+ ldst->addr_reg, -1);
74
next_arg += n;
75
nmov += n;
76
}
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
78
g_assert_not_reached();
79
}
80
81
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
82
+ if (TCG_TARGET_REG_BITS == 32) {
83
/* Zero extend the address by loading a zero for the high part. */
84
loc = &info->in[1 + !HOST_BIG_ENDIAN];
85
tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
86
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/aarch64/tcg-target.c.inc
89
+++ b/tcg/aarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
91
ldst = new_ldst_label(s);
92
ldst->is_ld = is_ld;
93
ldst->oi = oi;
94
- ldst->addrlo_reg = addr_reg;
95
+ ldst->addr_reg = addr_reg;
96
97
mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
98
? TCG_TYPE_I64 : TCG_TYPE_I32);
99
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
100
101
ldst->is_ld = is_ld;
102
ldst->oi = oi;
103
- ldst->addrlo_reg = addr_reg;
104
+ ldst->addr_reg = addr_reg;
105
106
/* tst addr, #mask */
107
tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
108
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/arm/tcg-target.c.inc
111
+++ b/tcg/arm/tcg-target.c.inc
112
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
113
ldst = new_ldst_label(s);
114
ldst->is_ld = is_ld;
115
ldst->oi = oi;
116
- ldst->addrlo_reg = addr;
117
+ ldst->addr_reg = addr;
118
119
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
120
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
121
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
122
ldst = new_ldst_label(s);
123
ldst->is_ld = is_ld;
124
ldst->oi = oi;
125
- ldst->addrlo_reg = addr;
126
+ ldst->addr_reg = addr;
127
128
/* We are expecting alignment to max out at 7 */
129
tcg_debug_assert(a_mask <= 0xff);
130
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/i386/tcg-target.c.inc
133
+++ b/tcg/i386/tcg-target.c.inc
134
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
135
ldst = new_ldst_label(s);
136
ldst->is_ld = is_ld;
137
ldst->oi = oi;
138
- ldst->addrlo_reg = addr;
139
+ ldst->addr_reg = addr;
140
141
if (TCG_TARGET_REG_BITS == 64) {
142
ttype = s->addr_type;
143
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
144
ldst = new_ldst_label(s);
145
ldst->is_ld = is_ld;
146
ldst->oi = oi;
147
- ldst->addrlo_reg = addr;
148
+ ldst->addr_reg = addr;
149
150
/* jne slow_path */
151
jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false);
152
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/loongarch64/tcg-target.c.inc
155
+++ b/tcg/loongarch64/tcg-target.c.inc
156
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
157
ldst = new_ldst_label(s);
158
ldst->is_ld = is_ld;
159
ldst->oi = oi;
160
- ldst->addrlo_reg = addr_reg;
161
+ ldst->addr_reg = addr_reg;
162
163
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
164
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
165
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
166
167
ldst->is_ld = is_ld;
168
ldst->oi = oi;
169
- ldst->addrlo_reg = addr_reg;
170
+ ldst->addr_reg = addr_reg;
171
172
/*
173
* Without micro-architecture details, we don't know which of
174
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/mips/tcg-target.c.inc
177
+++ b/tcg/mips/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
179
ldst = new_ldst_label(s);
180
ldst->is_ld = is_ld;
181
ldst->oi = oi;
182
- ldst->addrlo_reg = addr;
183
+ ldst->addr_reg = addr;
184
185
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
186
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
187
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
188
189
ldst->is_ld = is_ld;
190
ldst->oi = oi;
191
- ldst->addrlo_reg = addr;
192
+ ldst->addr_reg = addr;
193
194
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
195
tcg_debug_assert(a_bits < 16);
196
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tcg/ppc/tcg-target.c.inc
199
+++ b/tcg/ppc/tcg-target.c.inc
200
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
201
ldst = new_ldst_label(s);
202
ldst->is_ld = is_ld;
203
ldst->oi = oi;
204
- ldst->addrlo_reg = addr;
205
+ ldst->addr_reg = addr;
206
207
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
208
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
209
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
210
ldst = new_ldst_label(s);
211
ldst->is_ld = is_ld;
212
ldst->oi = oi;
213
- ldst->addrlo_reg = addr;
214
+ ldst->addr_reg = addr;
215
216
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
217
tcg_debug_assert(a_bits < 16);
218
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/riscv/tcg-target.c.inc
221
+++ b/tcg/riscv/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
223
ldst = new_ldst_label(s);
224
ldst->is_ld = is_ld;
225
ldst->oi = oi;
226
- ldst->addrlo_reg = addr_reg;
227
+ ldst->addr_reg = addr_reg;
228
229
init_setting_vtype(s);
230
231
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
232
ldst = new_ldst_label(s);
233
ldst->is_ld = is_ld;
234
ldst->oi = oi;
235
- ldst->addrlo_reg = addr_reg;
236
+ ldst->addr_reg = addr_reg;
237
238
init_setting_vtype(s);
239
240
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/s390x/tcg-target.c.inc
243
+++ b/tcg/s390x/tcg-target.c.inc
244
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
245
ldst = new_ldst_label(s);
246
ldst->is_ld = is_ld;
247
ldst->oi = oi;
248
- ldst->addrlo_reg = addr_reg;
249
+ ldst->addr_reg = addr_reg;
250
251
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
252
s->page_bits - CPU_TLB_ENTRY_BITS);
253
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
254
ldst = new_ldst_label(s);
255
ldst->is_ld = is_ld;
256
ldst->oi = oi;
257
- ldst->addrlo_reg = addr_reg;
258
+ ldst->addr_reg = addr_reg;
259
260
tcg_debug_assert(a_mask <= 0xffff);
261
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
262
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/sparc64/tcg-target.c.inc
265
+++ b/tcg/sparc64/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
267
ldst = new_ldst_label(s);
268
ldst->is_ld = is_ld;
269
ldst->oi = oi;
270
- ldst->addrlo_reg = addr_reg;
271
+ ldst->addr_reg = addr_reg;
272
ldst->label_ptr[0] = s->code_ptr;
273
274
/* bne,pn %[xi]cc, label0 */
275
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
276
ldst = new_ldst_label(s);
277
ldst->is_ld = is_ld;
278
ldst->oi = oi;
279
- ldst->addrlo_reg = addr_reg;
280
+ ldst->addr_reg = addr_reg;
281
ldst->label_ptr[0] = s->code_ptr;
282
283
/* bne,pn %icc, label0 */
284
--
285
2.43.0
286
287
diff view generated by jsdifflib
1
Pass the address of the last byte of the image, rather than
1
The declaration uses uint64_t for addr.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
2
3
Fixes: 595cd9ce2ec ("plugins: add plugin API to read guest memory")
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
linux-user/user-internals.h | 12 ++++++------
7
plugins/api.c | 2 +-
9
linux-user/elfload.c | 24 ++++++++++++------------
8
1 file changed, 1 insertion(+), 1 deletion(-)
10
linux-user/flatload.c | 2 +-
11
3 files changed, 19 insertions(+), 19 deletions(-)
12
9
13
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
10
diff --git a/plugins/api.c b/plugins/api.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/user-internals.h
12
--- a/plugins/api.c
16
+++ b/linux-user/user-internals.h
13
+++ b/plugins/api.c
17
@@ -XXX,XX +XXX,XX @@ void fork_end(int child);
14
@@ -XXX,XX +XXX,XX @@ GArray *qemu_plugin_get_registers(void)
18
/**
15
return create_register_handles(regs);
19
* probe_guest_base:
20
* @image_name: the executable being loaded
21
- * @loaddr: the lowest fixed address in the executable
22
- * @hiaddr: the highest fixed address in the executable
23
+ * @loaddr: the lowest fixed address within the executable
24
+ * @hiaddr: the highest fixed address within the executable
25
*
26
* Creates the initial guest address space in the host memory space.
27
*
28
- * If @loaddr == 0, then no address in the executable is fixed,
29
- * i.e. it is fully relocatable. In that case @hiaddr is the size
30
- * of the executable.
31
+ * If @loaddr == 0, then no address in the executable is fixed, i.e.
32
+ * it is fully relocatable. In that case @hiaddr is the size of the
33
+ * executable minus one.
34
*
35
* This function will not return if a valid value for guest_base
36
* cannot be chosen. On return, the executable loader can expect
37
*
38
- * target_mmap(loaddr, hiaddr - loaddr, ...)
39
+ * target_mmap(loaddr, hiaddr - loaddr + 1, ...)
40
*
41
* to succeed.
42
*/
43
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/linux-user/elfload.c
46
+++ b/linux-user/elfload.c
47
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
48
if (guest_hiaddr > reserved_va) {
49
error_report("%s: requires more than reserved virtual "
50
"address space (0x%" PRIx64 " > 0x%lx)",
51
- image_name, (uint64_t)guest_hiaddr, reserved_va);
52
+ image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
53
exit(EXIT_FAILURE);
54
}
55
} else {
56
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
57
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
58
error_report("%s: requires more virtual address space "
59
"than the host can provide (0x%" PRIx64 ")",
60
- image_name, (uint64_t)guest_hiaddr - guest_base);
61
+ image_name, (uint64_t)guest_hiaddr + 1 - guest_base);
62
exit(EXIT_FAILURE);
63
}
64
#endif
65
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
66
if (reserved_va) {
67
guest_loaddr = (guest_base >= mmap_min_addr ? 0
68
: mmap_min_addr - guest_base);
69
- guest_hiaddr = reserved_va;
70
+ guest_hiaddr = reserved_va - 1;
71
}
72
73
/* Reserve the address space for the binary, or reserved_va. */
74
test = g2h_untagged(guest_loaddr);
75
- addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
76
+ addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0);
77
if (test != addr) {
78
pgb_fail_in_use(image_name);
79
}
80
qemu_log_mask(CPU_LOG_PAGE,
81
- "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
82
- __func__, addr, guest_hiaddr - guest_loaddr);
83
+ "%s: base @ %p for %" PRIu64 " bytes\n",
84
+ __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1);
85
}
16
}
86
17
87
/**
18
-bool qemu_plugin_read_memory_vaddr(vaddr addr, GByteArray *data, size_t len)
88
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
19
+bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len)
89
if (hiaddr != orig_hiaddr) {
20
{
90
error_report("%s: requires virtual address space that the "
21
g_assert(current_cpu);
91
"host cannot provide (0x%" PRIx64 ")",
22
92
- image_name, (uint64_t)orig_hiaddr);
93
+ image_name, (uint64_t)orig_hiaddr + 1);
94
exit(EXIT_FAILURE);
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
98
* arithmetic wraps around.
99
*/
100
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
101
- hiaddr = (uintptr_t) 4 << 30;
102
+ hiaddr = UINT32_MAX;
103
} else {
104
offset = -(HI_COMMPAGE & -align);
105
}
106
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
107
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
108
}
109
110
- addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
111
+ addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset);
112
if (addr == -1) {
113
/*
114
* If HI_COMMPAGE, there *might* be a non-consecutive allocation
115
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
116
if (guest_hiaddr > reserved_va) {
117
error_report("%s: requires more than reserved virtual "
118
"address space (0x%" PRIx64 " > 0x%lx)",
119
- image_name, (uint64_t)guest_hiaddr, reserved_va);
120
+ image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
121
exit(EXIT_FAILURE);
122
}
123
124
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
125
if (a < loaddr) {
126
loaddr = a;
127
}
128
- a = eppnt->p_vaddr + eppnt->p_memsz;
129
+ a = eppnt->p_vaddr + eppnt->p_memsz - 1;
130
if (a > hiaddr) {
131
hiaddr = a;
132
}
133
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
134
* In both cases, we will overwrite pages in this range with mappings
135
* from the executable.
136
*/
137
- load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
138
+ load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
139
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
140
(ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
141
-1, 0);
142
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/linux-user/flatload.c
145
+++ b/linux-user/flatload.c
146
@@ -XXX,XX +XXX,XX @@ static int load_flat_file(struct linux_binprm * bprm,
147
* Allocate the address space.
148
*/
149
probe_guest_base(bprm->filename, 0,
150
- text_len + data_len + extra + indx_len);
151
+ text_len + data_len + extra + indx_len - 1);
152
153
/*
154
* there are a couple of cases here, the separate code/data
155
--
23
--
156
2.34.1
24
2.43.0
157
25
158
26
diff view generated by jsdifflib
New patch
1
The declarations use vaddr for size.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
accel/tcg/cputlb.c | 4 ++--
6
1 file changed, 2 insertions(+), 2 deletions(-)
7
8
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/accel/tcg/cputlb.c
11
+++ b/accel/tcg/cputlb.c
12
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
13
14
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
15
hwaddr paddr, MemTxAttrs attrs, int prot,
16
- int mmu_idx, uint64_t size)
17
+ int mmu_idx, vaddr size)
18
{
19
CPUTLBEntryFull full = {
20
.phys_addr = paddr,
21
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
22
23
void tlb_set_page(CPUState *cpu, vaddr addr,
24
hwaddr paddr, int prot,
25
- int mmu_idx, uint64_t size)
26
+ int mmu_idx, vaddr size)
27
{
28
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
29
prot, mmu_idx, size);
30
--
31
2.43.0
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Since we no longer support 64-bit guests on 32-bit hosts,
2
the first address past the last byte. This avoids overflow
2
we can use a 32-bit type on a 32-bit host.
3
when the last page of the address space is involved.
4
3
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1528
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/cpu-all.h | 2 +-
7
include/exec/vaddr.h | 16 +++++++++-------
10
accel/tcg/user-exec.c | 16 +++++++---------
8
1 file changed, 9 insertions(+), 7 deletions(-)
11
bsd-user/mmap.c | 6 +++---
12
linux-user/elfload.c | 11 ++++++-----
13
linux-user/mmap.c | 16 ++++++++--------
14
linux-user/syscall.c | 4 ++--
15
6 files changed, 27 insertions(+), 28 deletions(-)
16
9
17
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
10
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-all.h
12
--- a/include/exec/vaddr.h
20
+++ b/include/exec/cpu-all.h
13
+++ b/include/exec/vaddr.h
21
@@ -XXX,XX +XXX,XX @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
14
@@ -XXX,XX +XXX,XX @@
22
int walk_memory_regions(void *, walk_memory_regions_fn);
15
/**
23
16
* vaddr:
24
int page_get_flags(target_ulong address);
17
* Type wide enough to contain any #target_ulong virtual address.
25
-void page_set_flags(target_ulong start, target_ulong end, int flags);
18
+ * We do not support 64-bit guest on 32-host and detect at configure time.
26
+void page_set_flags(target_ulong start, target_ulong last, int flags);
19
+ * Therefore, a host pointer width will always fit a guest pointer.
27
void page_reset_target_data(target_ulong start, target_ulong end);
28
int page_check_range(target_ulong start, target_ulong len, int flags);
29
30
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/accel/tcg/user-exec.c
33
+++ b/accel/tcg/user-exec.c
34
@@ -XXX,XX +XXX,XX @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
35
* The flag PAGE_WRITE_ORG is positioned automatically depending
36
* on PAGE_WRITE. The mmap_lock should already be held.
37
*/
20
*/
38
-void page_set_flags(target_ulong start, target_ulong end, int flags)
21
-typedef uint64_t vaddr;
39
+void page_set_flags(target_ulong start, target_ulong last, int flags)
22
-#define VADDR_PRId PRId64
40
{
23
-#define VADDR_PRIu PRIu64
41
- target_ulong last;
24
-#define VADDR_PRIo PRIo64
42
bool reset = false;
25
-#define VADDR_PRIx PRIx64
43
bool inval_tb = false;
26
-#define VADDR_PRIX PRIX64
44
27
-#define VADDR_MAX UINT64_MAX
45
/* This function should never be called with addresses outside the
28
+typedef uintptr_t vaddr;
46
guest address space. If this assert fires, it probably indicates
29
+#define VADDR_PRId PRIdPTR
47
a missing call to h2g_valid. */
30
+#define VADDR_PRIu PRIuPTR
48
- assert(start < end);
31
+#define VADDR_PRIo PRIoPTR
49
- assert(end - 1 <= GUEST_ADDR_MAX);
32
+#define VADDR_PRIx PRIxPTR
50
+ assert(start <= last);
33
+#define VADDR_PRIX PRIXPTR
51
+ assert(last <= GUEST_ADDR_MAX);
34
+#define VADDR_MAX UINTPTR_MAX
52
/* Only set PAGE_ANON with new mappings. */
35
53
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
36
#endif
54
assert_memory_lock();
55
56
- start = start & TARGET_PAGE_MASK;
57
- end = TARGET_PAGE_ALIGN(end);
58
- last = end - 1;
59
+ start &= TARGET_PAGE_MASK;
60
+ last |= ~TARGET_PAGE_MASK;
61
62
if (!(flags & PAGE_VALID)) {
63
flags = 0;
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
}
66
67
if (!flags || reset) {
68
- page_reset_target_data(start, end);
69
+ page_reset_target_data(start, last + 1);
70
inval_tb |= pageflags_unset(start, last);
71
}
72
if (flags) {
73
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
74
~(reset ? 0 : PAGE_STICKY));
75
}
76
if (inval_tb) {
77
- tb_invalidate_phys_range(start, end);
78
+ tb_invalidate_phys_range(start, last + 1);
79
}
80
}
81
82
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/bsd-user/mmap.c
85
+++ b/bsd-user/mmap.c
86
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
87
if (ret != 0)
88
goto error;
89
}
90
- page_set_flags(start, start + len, prot | PAGE_VALID);
91
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
92
mmap_unlock();
93
return 0;
94
error:
95
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
96
}
97
}
98
the_end1:
99
- page_set_flags(start, start + len, prot | PAGE_VALID);
100
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
101
the_end:
102
#ifdef DEBUG_MMAP
103
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
104
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
105
}
106
107
if (ret == 0) {
108
- page_set_flags(start, start + len, 0);
109
+ page_set_flags(start, start + len - 1, 0);
110
}
111
mmap_unlock();
112
return ret;
113
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/linux-user/elfload.c
116
+++ b/linux-user/elfload.c
117
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
118
exit(EXIT_FAILURE);
119
}
120
page_set_flags(TARGET_VSYSCALL_PAGE,
121
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
122
+ TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
123
PAGE_EXEC | PAGE_VALID);
124
return true;
125
}
126
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
127
exit(EXIT_FAILURE);
128
}
129
130
- page_set_flags(commpage, commpage + qemu_host_page_size,
131
+ page_set_flags(commpage, commpage | ~qemu_host_page_mask,
132
PAGE_READ | PAGE_EXEC | PAGE_VALID);
133
return true;
134
}
135
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
136
exit(EXIT_FAILURE);
137
}
138
139
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
140
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
141
PAGE_READ | PAGE_EXEC | PAGE_VALID);
142
return true;
143
}
144
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
145
* and implement syscalls. Here, simply mark the page executable.
146
* Special case the entry points during translation (see do_page_zero).
147
*/
148
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
149
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
150
PAGE_EXEC | PAGE_VALID);
151
return true;
152
}
153
@@ -XXX,XX +XXX,XX @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
154
155
/* Ensure that the bss page(s) are valid */
156
if ((page_get_flags(last_bss-1) & prot) != prot) {
157
- page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
158
+ page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
159
+ prot | PAGE_VALID);
160
}
161
162
if (host_start < host_map_start) {
163
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/linux-user/mmap.c
166
+++ b/linux-user/mmap.c
167
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
168
}
169
}
170
171
- page_set_flags(start, start + len, page_flags);
172
+ page_set_flags(start, start + len - 1, page_flags);
173
ret = 0;
174
175
error:
176
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
177
}
178
page_flags |= PAGE_RESET;
179
if (passthrough_start == passthrough_end) {
180
- page_set_flags(start, start + len, page_flags);
181
+ page_set_flags(start, start + len - 1, page_flags);
182
} else {
183
if (start < passthrough_start) {
184
- page_set_flags(start, passthrough_start, page_flags);
185
+ page_set_flags(start, passthrough_start - 1, page_flags);
186
}
187
- page_set_flags(passthrough_start, passthrough_end,
188
+ page_set_flags(passthrough_start, passthrough_end - 1,
189
page_flags | PAGE_PASSTHROUGH);
190
if (passthrough_end < start + len) {
191
- page_set_flags(passthrough_end, start + len, page_flags);
192
+ page_set_flags(passthrough_end, start + len - 1, page_flags);
193
}
194
}
195
the_end:
196
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
197
}
198
199
if (ret == 0) {
200
- page_set_flags(start, start + len, 0);
201
+ page_set_flags(start, start + len - 1, 0);
202
}
203
mmap_unlock();
204
return ret;
205
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
206
} else {
207
new_addr = h2g(host_addr);
208
prot = page_get_flags(old_addr);
209
- page_set_flags(old_addr, old_addr + old_size, 0);
210
- page_set_flags(new_addr, new_addr + new_size,
211
+ page_set_flags(old_addr, old_addr + old_size - 1, 0);
212
+ page_set_flags(new_addr, new_addr + new_size - 1,
213
prot | PAGE_VALID | PAGE_RESET);
214
}
215
mmap_unlock();
216
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/linux-user/syscall.c
219
+++ b/linux-user/syscall.c
220
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
221
}
222
raddr=h2g((unsigned long)host_raddr);
223
224
- page_set_flags(raddr, raddr + shm_info.shm_segsz,
225
+ page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
226
PAGE_VALID | PAGE_RESET | PAGE_READ |
227
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
228
229
@@ -XXX,XX +XXX,XX @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
230
for (i = 0; i < N_SHM_REGIONS; ++i) {
231
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
232
shm_regions[i].in_use = false;
233
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
234
+ page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
235
break;
236
}
237
}
238
--
37
--
239
2.34.1
38
2.43.0
240
39
241
40
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Since we no longer support 64-bit guests on 32-bit hosts,
2
the first address past the last byte. This avoids overflow
2
we can use a 32-bit type on a 32-bit host. This shrinks
3
when the last page of the address space is involved.
3
the size of the structure to 16 bytes on a 32-bit host.
4
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/exec/exec-all.h | 2 +-
8
include/exec/tlb-common.h | 10 +++++-----
9
accel/tcg/tb-maint.c | 31 ++++++++++++++++---------------
9
accel/tcg/cputlb.c | 21 ++++-----------------
10
accel/tcg/translate-all.c | 2 +-
10
tcg/arm/tcg-target.c.inc | 1 -
11
accel/tcg/user-exec.c | 2 +-
11
tcg/mips/tcg-target.c.inc | 12 +++++-------
12
softmmu/physmem.c | 2 +-
12
tcg/ppc/tcg-target.c.inc | 21 +++++----------------
13
5 files changed, 20 insertions(+), 19 deletions(-)
13
5 files changed, 19 insertions(+), 46 deletions(-)
14
14
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
15
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/exec-all.h
17
--- a/include/exec/tlb-common.h
18
+++ b/include/exec/exec-all.h
18
+++ b/include/exec/tlb-common.h
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(target_ulong addr);
19
@@ -XXX,XX +XXX,XX @@
20
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
20
#ifndef EXEC_TLB_COMMON_H
21
#endif
21
#define EXEC_TLB_COMMON_H 1
22
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
22
23
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
23
-#define CPU_TLB_ENTRY_BITS 5
24
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
24
+#define CPU_TLB_ENTRY_BITS (HOST_LONG_BITS == 32 ? 4 : 5)
25
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
25
26
26
/* Minimalized TLB entry for use by TCG fast path. */
27
/* GETPC is the true target of the return instruction that we'll execute. */
27
typedef union CPUTLBEntry {
28
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
28
struct {
29
- uint64_t addr_read;
30
- uint64_t addr_write;
31
- uint64_t addr_code;
32
+ uintptr_t addr_read;
33
+ uintptr_t addr_write;
34
+ uintptr_t addr_code;
35
/*
36
* Addend to virtual address to get host address. IO accesses
37
* use the corresponding iotlb value.
38
@@ -XXX,XX +XXX,XX @@ typedef union CPUTLBEntry {
39
* Padding to get a power of two size, as well as index
40
* access to addr_{read,write,code}.
41
*/
42
- uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
43
+ uintptr_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uintptr_t)];
44
} CPUTLBEntry;
45
46
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
47
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
29
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/tb-maint.c
49
--- a/accel/tcg/cputlb.c
31
+++ b/accel/tcg/tb-maint.c
50
+++ b/accel/tcg/cputlb.c
32
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
51
@@ -XXX,XX +XXX,XX @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
33
* Called with mmap_lock held for user-mode emulation.
34
* NOTE: this function must not be called while a TB is running.
35
*/
36
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
37
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
38
{
52
{
39
TranslationBlock *tb;
53
/* Do not rearrange the CPUTLBEntry structure members. */
40
PageForEachNext n;
54
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
41
- tb_page_addr_t last = end - 1;
55
- MMU_DATA_LOAD * sizeof(uint64_t));
42
56
+ MMU_DATA_LOAD * sizeof(uintptr_t));
43
assert_memory_lock();
57
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
44
58
- MMU_DATA_STORE * sizeof(uint64_t));
45
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
59
+ MMU_DATA_STORE * sizeof(uintptr_t));
46
*/
60
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
47
void tb_invalidate_phys_page(tb_page_addr_t addr)
61
- MMU_INST_FETCH * sizeof(uint64_t));
48
{
62
+ MMU_INST_FETCH * sizeof(uintptr_t));
49
- tb_page_addr_t start, end;
63
50
+ tb_page_addr_t start, last;
64
-#if TARGET_LONG_BITS == 32
51
65
- /* Use qatomic_read, in case of addr_write; only care about low bits. */
52
start = addr & TARGET_PAGE_MASK;
66
- const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
53
- end = start + TARGET_PAGE_SIZE;
67
- ptr += HOST_BIG_ENDIAN;
54
- tb_invalidate_phys_range(start, end);
68
- return qatomic_read(ptr);
55
+ last = addr | ~TARGET_PAGE_MASK;
69
-#else
56
+ tb_invalidate_phys_range(start, last);
70
- const uint64_t *ptr = &entry->addr_idx[access_type];
71
+ const uintptr_t *ptr = &entry->addr_idx[access_type];
72
/* ofs might correspond to .addr_write, so use qatomic_read */
73
return qatomic_read(ptr);
74
-#endif
57
}
75
}
58
76
59
/*
77
static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
60
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
78
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
61
79
addr &= TARGET_PAGE_MASK;
62
/*
80
addr += tlb_entry->addend;
63
* Invalidate all TBs which intersect with the target physical address range
81
if ((addr - start) < length) {
64
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
82
-#if TARGET_LONG_BITS == 32
65
+ * [start;last]. NOTE: start and end may refer to *different* physical pages.
83
- uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
66
* 'is_cpu_write_access' should be true if called from a real cpu write
84
- ptr_write += HOST_BIG_ENDIAN;
67
* access: the virtual CPU will exit the current TB if code is modified inside
85
- qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
68
* this TB.
86
-#else
69
*/
87
qatomic_set(&tlb_entry->addr_write,
70
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
88
tlb_entry->addr_write | TLB_NOTDIRTY);
71
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
89
-#endif
72
{
73
struct page_collection *pages;
74
- tb_page_addr_t next;
75
+ tb_page_addr_t index, index_last;
76
77
- pages = page_collection_lock(start, end - 1);
78
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
79
- start < end;
80
- start = next, next += TARGET_PAGE_SIZE) {
81
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
82
- tb_page_addr_t bound = MIN(next, end);
83
+ pages = page_collection_lock(start, last);
84
+
85
+ index_last = last >> TARGET_PAGE_BITS;
86
+ for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
87
+ PageDesc *pd = page_find(index);
88
+ tb_page_addr_t bound;
89
90
if (pd == NULL) {
91
continue;
92
}
93
assert_page_locked(pd);
94
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
95
+ bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
96
+ bound = MIN(bound, last);
97
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
98
}
99
page_collection_unlock(pages);
100
}
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/accel/tcg/translate-all.c
104
+++ b/accel/tcg/translate-all.c
105
@@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
106
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
107
addr = get_page_addr_code(env, pc);
108
if (addr != -1) {
109
- tb_invalidate_phys_range(addr, addr + 1);
110
+ tb_invalidate_phys_range(addr, addr);
111
}
90
}
112
}
91
}
113
}
92
}
114
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
93
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
115
index XXXXXXX..XXXXXXX 100644
94
index XXXXXXX..XXXXXXX 100644
116
--- a/accel/tcg/user-exec.c
95
--- a/tcg/arm/tcg-target.c.inc
117
+++ b/accel/tcg/user-exec.c
96
+++ b/tcg/arm/tcg-target.c.inc
118
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
97
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
119
~(reset ? 0 : PAGE_STICKY));
98
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
120
}
99
* Load the tlb comparator into R2 and the fast path addend into R1.
121
if (inval_tb) {
100
*/
122
- tb_invalidate_phys_range(start, last + 1);
101
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
123
+ tb_invalidate_phys_range(start, last);
102
if (cmp_off == 0) {
124
}
103
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
125
}
104
} else {
126
105
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
127
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
128
index XXXXXXX..XXXXXXX 100644
106
index XXXXXXX..XXXXXXX 100644
129
--- a/softmmu/physmem.c
107
--- a/tcg/mips/tcg-target.c.inc
130
+++ b/softmmu/physmem.c
108
+++ b/tcg/mips/tcg-target.c.inc
131
@@ -XXX,XX +XXX,XX @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
109
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
132
}
110
/* Add the tlb_table pointer, creating the CPUTLBEntry address. */
133
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
111
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
134
assert(tcg_enabled());
112
135
- tb_invalidate_phys_range(addr, addr + length);
113
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
136
+ tb_invalidate_phys_range(addr, addr + length - 1);
114
- /* Load the (low half) tlb comparator. */
137
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
115
+ /* Load the tlb comparator. */
138
}
116
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
139
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
117
tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
118
cmp_off + HOST_BIG_ENDIAN * 4);
119
} else {
120
- tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
121
+ tcg_out_ld(s, TCG_TYPE_REG, TCG_TMP0, TCG_TMP3, cmp_off);
122
}
123
124
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
125
- /* Load the tlb addend for the fast path. */
126
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
127
- }
128
+ /* Load the tlb addend for the fast path. */
129
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
130
131
/*
132
* Mask the page bits, keeping the alignment bits to compare against.
133
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/ppc/tcg-target.c.inc
136
+++ b/tcg/ppc/tcg-target.c.inc
137
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
138
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
139
140
/*
141
- * Load the (low part) TLB comparator into TMP2.
142
+ * Load the TLB comparator into TMP2.
143
* For 64-bit host, always load the entire 64-bit slot for simplicity.
144
* We will ignore the high bits with tcg_out_cmp(..., addr_type).
145
*/
146
- if (TCG_TARGET_REG_BITS == 64) {
147
- if (cmp_off == 0) {
148
- tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
149
- TCG_REG_TMP1, TCG_REG_TMP2));
150
- } else {
151
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
152
- TCG_REG_TMP1, TCG_REG_TMP2));
153
- tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
154
- TCG_REG_TMP1, cmp_off);
155
- }
156
- } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
157
- tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
158
- TCG_REG_TMP1, TCG_REG_TMP2));
159
+ if (cmp_off == 0) {
160
+ tcg_out32(s, (TCG_TARGET_REG_BITS == 64 ? LDUX : LWZUX)
161
+ | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
162
} else {
163
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
164
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
165
- cmp_off + 4 * HOST_BIG_ENDIAN);
166
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
167
}
168
169
/*
140
--
170
--
141
2.34.1
171
2.43.0
142
172
143
173
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
1
For loongarch, mips, riscv and sparc, a zero register is
2
available all the time. For aarch64, register index 31
3
depends on context: sometimes it is the stack pointer,
4
and sometimes it is the zero register.
2
5
3
The only reason to add this implementation is to control the memory allocator
6
Introduce a new general-purpose constraint which maps 0
4
used. Some users (e.g. TCG) cannot work reliably in multi-threaded
7
to TCG_REG_ZERO, if defined. This differs from existing
5
environments (e.g. forking in user-mode) with GTree's allocator, GSlice.
8
constant constraints in that const_arg[*] is recorded as
6
See https://gitlab.com/qemu-project/qemu/-/issues/285 for details.
9
false, indicating that the value is in a register.
7
10
8
Importing GTree is a temporary workaround until GTree migrates away
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
from GSlice.
10
11
This implementation is identical to that in glib v2.75.0, except that
12
we don't import recent additions to the API nor deprecated API calls,
13
none of which are used in QEMU.
14
15
I've imported tests from glib and added a benchmark just to
16
make sure that performance is similar. Note: it cannot be identical
17
because (1) we are not using GSlice, (2) we use different compilation flags
18
(e.g. -fPIC) and (3) we're linking statically.
19
20
$ cat /proc/cpuinfo| grep 'model name' | head -1
21
model name : AMD Ryzen 7 PRO 5850U with Radeon Graphics
22
$ echo '0' | sudo tee /sys/devices/system/cpu/cpufreq/boost
23
$ tests/bench/qtree-bench
24
25
Tree Op 32 1024 4096 131072 1048576
26
------------------------------------------------------------------------------------------------
27
GTree Lookup 83.23 43.08 25.31 19.40 16.22
28
QTree Lookup 113.42 (1.36x) 53.83 (1.25x) 28.38 (1.12x) 17.64 (0.91x) 13.04 (0.80x)
29
GTree Insert 44.23 29.37 25.83 19.49 17.03
30
QTree Insert 46.87 (1.06x) 25.62 (0.87x) 24.29 (0.94x) 16.83 (0.86x) 12.97 (0.76x)
31
GTree Remove 53.27 35.15 31.43 24.64 16.70
32
QTree Remove 57.32 (1.08x) 41.76 (1.19x) 38.37 (1.22x) 29.30 (1.19x) 15.07 (0.90x)
33
GTree RemoveAll 135.44 127.52 126.72 120.11 64.34
34
QTree RemoveAll 127.15 (0.94x) 110.37 (0.87x) 107.97 (0.85x) 97.13 (0.81x) 55.10 (0.86x)
35
GTree Traverse 277.71 276.09 272.78 246.72 98.47
36
QTree Traverse 370.33 (1.33x) 411.97 (1.49x) 400.23 (1.47x) 262.82 (1.07x) 78.52 (0.80x)
37
------------------------------------------------------------------------------------------------
38
39
As a sanity check, the same benchmark when Glib's version
40
is >= $glib_dropped_gslice_version (i.e. QTree == GTree):
41
42
Tree Op 32 1024 4096 131072 1048576
43
------------------------------------------------------------------------------------------------
44
GTree Lookup 82.72 43.09 24.18 19.73 16.09
45
QTree Lookup 81.82 (0.99x) 43.10 (1.00x) 24.20 (1.00x) 19.76 (1.00x) 16.26 (1.01x)
46
GTree Insert 45.07 29.62 26.34 19.90 17.18
47
QTree Insert 45.72 (1.01x) 29.60 (1.00x) 26.38 (1.00x) 19.71 (0.99x) 17.20 (1.00x)
48
GTree Remove 54.48 35.36 31.77 24.97 16.95
49
QTree Remove 54.46 (1.00x) 35.32 (1.00x) 31.77 (1.00x) 24.91 (1.00x) 17.15 (1.01x)
50
GTree RemoveAll 140.68 127.36 125.43 121.45 68.20
51
QTree RemoveAll 140.65 (1.00x) 127.64 (1.00x) 125.01 (1.00x) 121.73 (1.00x) 67.06 (0.98x)
52
GTree Traverse 278.68 276.05 266.75 251.65 104.93
53
QTree Traverse 278.31 (1.00x) 275.78 (1.00x) 266.42 (1.00x) 247.89 (0.99x) 104.58 (1.00x)
54
------------------------------------------------------------------------------------------------
55
56
Signed-off-by: Emilio Cota <cota@braap.org>
57
Message-Id: <20230205163758.416992-2-cota@braap.org>
58
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
59
---
13
---
60
configure | 15 +
14
include/tcg/tcg.h | 3 ++-
61
meson.build | 4 +
15
tcg/aarch64/tcg-target.h | 2 ++
62
include/qemu/qtree.h | 201 ++++++
16
tcg/loongarch64/tcg-target.h | 2 ++
63
tests/bench/qtree-bench.c | 286 ++++++++
17
tcg/mips/tcg-target.h | 2 ++
64
tests/unit/test-qtree.c | 333 +++++++++
18
tcg/riscv/tcg-target.h | 2 ++
65
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++++
19
tcg/sparc64/tcg-target.h | 3 ++-
66
tests/bench/meson.build | 4 +
20
tcg/tcg.c | 29 ++++++++++++++++++++++-------
67
tests/unit/meson.build | 1 +
21
docs/devel/tcg-ops.rst | 4 +++-
68
util/meson.build | 1 +
22
8 files changed, 37 insertions(+), 10 deletions(-)
69
9 files changed, 2235 insertions(+)
70
create mode 100644 include/qemu/qtree.h
71
create mode 100644 tests/bench/qtree-bench.c
72
create mode 100644 tests/unit/test-qtree.c
73
create mode 100644 util/qtree.c
74
23
75
diff --git a/configure b/configure
24
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
76
index XXXXXXX..XXXXXXX 100755
25
index XXXXXXX..XXXXXXX 100644
77
--- a/configure
26
--- a/include/tcg/tcg.h
78
+++ b/configure
27
+++ b/include/tcg/tcg.h
79
@@ -XXX,XX +XXX,XX @@ safe_stack=""
28
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *, int,
80
use_containers="yes"
29
81
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
30
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
82
gdb_arches=""
31
83
+glib_has_gslice="no"
32
-#define TCG_CT_CONST 1 /* any constant of register size */
84
33
+#define TCG_CT_CONST 1 /* any constant of register size */
85
if test -e "$source_path/.git"
34
+#define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */
86
then
35
87
@@ -XXX,XX +XXX,XX @@ for i in $glib_modules; do
36
typedef struct TCGArgConstraint {
88
fi
37
unsigned ct : 16;
89
done
38
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
90
39
index XXXXXXX..XXXXXXX 100644
91
+# Check whether glib has gslice, which we have to avoid for correctness.
40
--- a/tcg/aarch64/tcg-target.h
92
+# TODO: remove this check and the corresponding workaround (qtree) when
41
+++ b/tcg/aarch64/tcg-target.h
93
+# the minimum supported glib is >= $glib_dropped_gslice_version.
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
94
+glib_dropped_gslice_version=2.75.3
43
TCG_AREG0 = TCG_REG_X19,
95
+for i in $glib_modules; do
44
} TCGReg;
96
+ if ! $pkg_config --atleast-version=$glib_dropped_gslice_version $i; then
45
97
+ glib_has_gslice="yes"
46
+#define TCG_REG_ZERO TCG_REG_XZR
98
+    break
99
+ fi
100
+done
101
+
47
+
102
glib_bindir="$($pkg_config --variable=bindir glib-2.0)"
48
#define TCG_TARGET_NB_REGS 64
103
if test -z "$glib_bindir" ; then
49
104
    glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin
50
#endif /* AARCH64_TCG_TARGET_H */
105
@@ -XXX,XX +XXX,XX @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
51
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
106
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
107
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
108
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
109
+if test "$glib_has_gslice" = "yes" ; then
110
+ echo "HAVE_GLIB_WITH_SLICE_ALLOCATOR=y" >> $config_host_mak
111
+fi
112
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
113
echo "EXESUF=$EXESUF" >> $config_host_mak
114
115
diff --git a/meson.build b/meson.build
116
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
117
--- a/meson.build
53
--- a/tcg/loongarch64/tcg-target.h
118
+++ b/meson.build
54
+++ b/tcg/loongarch64/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(),
55
@@ -XXX,XX +XXX,XX @@ typedef enum {
120
})
56
TCG_VEC_TMP0 = TCG_REG_V23,
121
# override glib dep with the configure results (for subprojects)
57
} TCGReg;
122
meson.override_dependency('glib-2.0', glib)
58
123
+# pass down whether Glib has the slice allocator
59
+#define TCG_REG_ZERO TCG_REG_ZERO
124
+if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR')
125
+ config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true)
126
+endif
127
128
gio = not_found
129
gdbus_codegen = not_found
130
diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h
131
new file mode 100644
132
index XXXXXXX..XXXXXXX
133
--- /dev/null
134
+++ b/include/qemu/qtree.h
135
@@ -XXX,XX +XXX,XX @@
136
+/*
137
+ * GLIB - Library of useful routines for C programming
138
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
139
+ *
140
+ * SPDX-License-Identifier: LGPL-2.1-or-later
141
+ *
142
+ * This library is free software; you can redistribute it and/or
143
+ * modify it under the terms of the GNU Lesser General Public
144
+ * License as published by the Free Software Foundation; either
145
+ * version 2.1 of the License, or (at your option) any later version.
146
+ *
147
+ * This library is distributed in the hope that it will be useful,
148
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
149
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
150
+ * Lesser General Public License for more details.
151
+ *
152
+ * You should have received a copy of the GNU Lesser General Public
153
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
154
+ */
155
+
60
+
156
+/*
61
#endif /* LOONGARCH_TCG_TARGET_H */
157
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
62
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
158
+ * file for a list of people on the GLib Team. See the ChangeLog
63
index XXXXXXX..XXXXXXX 100644
159
+ * files for a list of changes. These files are distributed with
64
--- a/tcg/mips/tcg-target.h
160
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
65
+++ b/tcg/mips/tcg-target.h
161
+ */
66
@@ -XXX,XX +XXX,XX @@ typedef enum {
67
TCG_AREG0 = TCG_REG_S8,
68
} TCGReg;
69
70
+#define TCG_REG_ZERO TCG_REG_ZERO
162
+
71
+
163
+/*
72
#endif
164
+ * QTree is a partial import of Glib's GTree. The parts excluded correspond
73
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
165
+ * to API calls either deprecated (e.g. g_tree_traverse) or recently added
74
index XXXXXXX..XXXXXXX 100644
166
+ * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
75
--- a/tcg/riscv/tcg-target.h
167
+ *
76
+++ b/tcg/riscv/tcg-target.h
168
+ * The reason for this import is to allow us to control the memory allocator
77
@@ -XXX,XX +XXX,XX @@ typedef enum {
169
+ * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
78
TCG_REG_TMP2 = TCG_REG_T4,
170
+ * slice allocator, which causes problems when forking in user-mode;
79
} TCGReg;
171
+ * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
80
172
+ * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
81
+#define TCG_REG_ZERO TCG_REG_ZERO
173
+ *
174
+ * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
175
+ */
176
+
82
+
177
+#ifndef QEMU_QTREE_H
83
#endif
178
+#define QEMU_QTREE_H
84
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
179
+
85
index XXXXXXX..XXXXXXX 100644
180
+#include "qemu/osdep.h"
86
--- a/tcg/sparc64/tcg-target.h
181
+
87
+++ b/tcg/sparc64/tcg-target.h
182
+#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
88
@@ -XXX,XX +XXX,XX @@ typedef enum {
183
+
89
TCG_REG_I7,
184
+typedef struct _QTree QTree;
90
} TCGReg;
185
+
91
186
+typedef struct _QTreeNode QTreeNode;
92
-#define TCG_AREG0 TCG_REG_I0
187
+
93
+#define TCG_AREG0 TCG_REG_I0
188
+typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
94
+#define TCG_REG_ZERO TCG_REG_G0
189
+ gpointer user_data);
95
190
+
96
#endif
191
+/*
97
diff --git a/tcg/tcg.c b/tcg/tcg.c
192
+ * Balanced binary trees
98
index XXXXXXX..XXXXXXX 100644
193
+ */
99
--- a/tcg/tcg.c
194
+QTree *q_tree_new(GCompareFunc key_compare_func);
100
+++ b/tcg/tcg.c
195
+QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
101
@@ -XXX,XX +XXX,XX @@ static void process_constraint_sets(void)
196
+ gpointer key_compare_data);
102
case 'i':
197
+QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
103
args_ct[i].ct |= TCG_CT_CONST;
198
+ gpointer key_compare_data,
104
break;
199
+ GDestroyNotify key_destroy_func,
105
+#ifdef TCG_REG_ZERO
200
+ GDestroyNotify value_destroy_func);
106
+ case 'z':
201
+QTree *q_tree_ref(QTree *tree);
107
+ args_ct[i].ct |= TCG_CT_REG_ZERO;
202
+void q_tree_unref(QTree *tree);
108
+ break;
203
+void q_tree_destroy(QTree *tree);
109
+#endif
204
+void q_tree_insert(QTree *tree,
110
205
+ gpointer key,
111
/* Include all of the target-specific constraints. */
206
+ gpointer value);
112
207
+void q_tree_replace(QTree *tree,
113
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
208
+ gpointer key,
114
arg_ct = &args_ct[i];
209
+ gpointer value);
115
ts = arg_temp(arg);
210
+gboolean q_tree_remove(QTree *tree,
116
211
+ gconstpointer key);
117
- if (ts->val_type == TEMP_VAL_CONST
212
+gboolean q_tree_steal(QTree *tree,
118
- && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
213
+ gconstpointer key);
119
- op_cond, TCGOP_VECE(op))) {
214
+gpointer q_tree_lookup(QTree *tree,
120
- /* constant is OK for instruction */
215
+ gconstpointer key);
121
- const_args[i] = 1;
216
+gboolean q_tree_lookup_extended(QTree *tree,
122
- new_args[i] = ts->val;
217
+ gconstpointer lookup_key,
123
- continue;
218
+ gpointer *orig_key,
124
+ if (ts->val_type == TEMP_VAL_CONST) {
219
+ gpointer *value);
125
+#ifdef TCG_REG_ZERO
220
+void q_tree_foreach(QTree *tree,
126
+ if (ts->val == 0 && (arg_ct->ct & TCG_CT_REG_ZERO)) {
221
+ GTraverseFunc func,
127
+ /* Hardware zero register: indicate register via non-const. */
222
+ gpointer user_data);
128
+ const_args[i] = 0;
223
+gpointer q_tree_search(QTree *tree,
129
+ new_args[i] = TCG_REG_ZERO;
224
+ GCompareFunc search_func,
130
+ continue;
225
+ gconstpointer user_data);
226
+gint q_tree_height(QTree *tree);
227
+gint q_tree_nnodes(QTree *tree);
228
+
229
+#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
230
+
231
+typedef GTree QTree;
232
+typedef GTreeNode QTreeNode;
233
+typedef GTraverseNodeFunc QTraverseNodeFunc;
234
+
235
+static inline QTree *q_tree_new(GCompareFunc key_compare_func)
236
+{
237
+ return g_tree_new(key_compare_func);
238
+}
239
+
240
+static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
241
+ gpointer key_compare_data)
242
+{
243
+ return g_tree_new_with_data(key_compare_func, key_compare_data);
244
+}
245
+
246
+static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
247
+ gpointer key_compare_data,
248
+ GDestroyNotify key_destroy_func,
249
+ GDestroyNotify value_destroy_func)
250
+{
251
+ return g_tree_new_full(key_compare_func, key_compare_data,
252
+ key_destroy_func, value_destroy_func);
253
+}
254
+
255
+static inline QTree *q_tree_ref(QTree *tree)
256
+{
257
+ return g_tree_ref(tree);
258
+}
259
+
260
+static inline void q_tree_unref(QTree *tree)
261
+{
262
+ g_tree_unref(tree);
263
+}
264
+
265
+static inline void q_tree_destroy(QTree *tree)
266
+{
267
+ g_tree_destroy(tree);
268
+}
269
+
270
+static inline void q_tree_insert(QTree *tree,
271
+ gpointer key,
272
+ gpointer value)
273
+{
274
+ g_tree_insert(tree, key, value);
275
+}
276
+
277
+static inline void q_tree_replace(QTree *tree,
278
+ gpointer key,
279
+ gpointer value)
280
+{
281
+ g_tree_replace(tree, key, value);
282
+}
283
+
284
+static inline gboolean q_tree_remove(QTree *tree,
285
+ gconstpointer key)
286
+{
287
+ return g_tree_remove(tree, key);
288
+}
289
+
290
+static inline gboolean q_tree_steal(QTree *tree,
291
+ gconstpointer key)
292
+{
293
+ return g_tree_steal(tree, key);
294
+}
295
+
296
+static inline gpointer q_tree_lookup(QTree *tree,
297
+ gconstpointer key)
298
+{
299
+ return g_tree_lookup(tree, key);
300
+}
301
+
302
+static inline gboolean q_tree_lookup_extended(QTree *tree,
303
+ gconstpointer lookup_key,
304
+ gpointer *orig_key,
305
+ gpointer *value)
306
+{
307
+ return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
308
+}
309
+
310
+static inline void q_tree_foreach(QTree *tree,
311
+ GTraverseFunc func,
312
+ gpointer user_data)
313
+{
314
+ return g_tree_foreach(tree, func, user_data);
315
+}
316
+
317
+static inline gpointer q_tree_search(QTree *tree,
318
+ GCompareFunc search_func,
319
+ gconstpointer user_data)
320
+{
321
+ return g_tree_search(tree, search_func, user_data);
322
+}
323
+
324
+static inline gint q_tree_height(QTree *tree)
325
+{
326
+ return g_tree_height(tree);
327
+}
328
+
329
+static inline gint q_tree_nnodes(QTree *tree)
330
+{
331
+ return g_tree_nnodes(tree);
332
+}
333
+
334
+#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
335
+
336
+#endif /* QEMU_QTREE_H */
337
diff --git a/tests/bench/qtree-bench.c b/tests/bench/qtree-bench.c
338
new file mode 100644
339
index XXXXXXX..XXXXXXX
340
--- /dev/null
341
+++ b/tests/bench/qtree-bench.c
342
@@ -XXX,XX +XXX,XX @@
343
+/* SPDX-License-Identifier: GPL-2.0-or-later */
344
+#include "qemu/osdep.h"
345
+#include "qemu/qtree.h"
346
+#include "qemu/timer.h"
347
+
348
+enum tree_op {
349
+ OP_LOOKUP,
350
+ OP_INSERT,
351
+ OP_REMOVE,
352
+ OP_REMOVE_ALL,
353
+ OP_TRAVERSE,
354
+};
355
+
356
+struct benchmark {
357
+ const char * const name;
358
+ enum tree_op op;
359
+ bool fill_on_init;
360
+};
361
+
362
+enum impl_type {
363
+ IMPL_GTREE,
364
+ IMPL_QTREE,
365
+};
366
+
367
+struct tree_implementation {
368
+ const char * const name;
369
+ enum impl_type type;
370
+};
371
+
372
+static const struct benchmark benchmarks[] = {
373
+ {
374
+ .name = "Lookup",
375
+ .op = OP_LOOKUP,
376
+ .fill_on_init = true,
377
+ },
378
+ {
379
+ .name = "Insert",
380
+ .op = OP_INSERT,
381
+ .fill_on_init = false,
382
+ },
383
+ {
384
+ .name = "Remove",
385
+ .op = OP_REMOVE,
386
+ .fill_on_init = true,
387
+ },
388
+ {
389
+ .name = "RemoveAll",
390
+ .op = OP_REMOVE_ALL,
391
+ .fill_on_init = true,
392
+ },
393
+ {
394
+ .name = "Traverse",
395
+ .op = OP_TRAVERSE,
396
+ .fill_on_init = true,
397
+ },
398
+};
399
+
400
+static const struct tree_implementation impls[] = {
401
+ {
402
+ .name = "GTree",
403
+ .type = IMPL_GTREE,
404
+ },
405
+ {
406
+ .name = "QTree",
407
+ .type = IMPL_QTREE,
408
+ },
409
+};
410
+
411
+static int compare_func(const void *ap, const void *bp)
412
+{
413
+ const size_t *a = ap;
414
+ const size_t *b = bp;
415
+
416
+ return *a - *b;
417
+}
418
+
419
+static void init_empty_tree_and_keys(enum impl_type impl,
420
+ void **ret_tree, size_t **ret_keys,
421
+ size_t n_elems)
422
+{
423
+ size_t *keys = g_malloc_n(n_elems, sizeof(*keys));
424
+ for (size_t i = 0; i < n_elems; i++) {
425
+ keys[i] = i;
426
+ }
427
+
428
+ void *tree;
429
+ switch (impl) {
430
+ case IMPL_GTREE:
431
+ tree = g_tree_new(compare_func);
432
+ break;
433
+ case IMPL_QTREE:
434
+ tree = q_tree_new(compare_func);
435
+ break;
436
+ default:
437
+ g_assert_not_reached();
438
+ }
439
+
440
+ *ret_tree = tree;
441
+ *ret_keys = keys;
442
+}
443
+
444
+static gboolean traverse_func(gpointer key, gpointer value, gpointer data)
445
+{
446
+ return FALSE;
447
+}
448
+
449
+static inline void remove_all(void *tree, enum impl_type impl)
450
+{
451
+ switch (impl) {
452
+ case IMPL_GTREE:
453
+ g_tree_destroy(tree);
454
+ break;
455
+ case IMPL_QTREE:
456
+ q_tree_destroy(tree);
457
+ break;
458
+ default:
459
+ g_assert_not_reached();
460
+ }
461
+}
462
+
463
+static int64_t run_benchmark(const struct benchmark *bench,
464
+ enum impl_type impl,
465
+ size_t n_elems)
466
+{
467
+ void *tree;
468
+ size_t *keys;
469
+
470
+ init_empty_tree_and_keys(impl, &tree, &keys, n_elems);
471
+ if (bench->fill_on_init) {
472
+ for (size_t i = 0; i < n_elems; i++) {
473
+ switch (impl) {
474
+ case IMPL_GTREE:
475
+ g_tree_insert(tree, &keys[i], &keys[i]);
476
+ break;
477
+ case IMPL_QTREE:
478
+ q_tree_insert(tree, &keys[i], &keys[i]);
479
+ break;
480
+ default:
481
+ g_assert_not_reached();
482
+ }
131
+ }
483
+ }
484
+ }
485
+
486
+ int64_t start_ns = get_clock();
487
+ switch (bench->op) {
488
+ case OP_LOOKUP:
489
+ for (size_t i = 0; i < n_elems; i++) {
490
+ void *value;
491
+ switch (impl) {
492
+ case IMPL_GTREE:
493
+ value = g_tree_lookup(tree, &keys[i]);
494
+ break;
495
+ case IMPL_QTREE:
496
+ value = q_tree_lookup(tree, &keys[i]);
497
+ break;
498
+ default:
499
+ g_assert_not_reached();
500
+ }
501
+ (void)value;
502
+ }
503
+ break;
504
+ case OP_INSERT:
505
+ for (size_t i = 0; i < n_elems; i++) {
506
+ switch (impl) {
507
+ case IMPL_GTREE:
508
+ g_tree_insert(tree, &keys[i], &keys[i]);
509
+ break;
510
+ case IMPL_QTREE:
511
+ q_tree_insert(tree, &keys[i], &keys[i]);
512
+ break;
513
+ default:
514
+ g_assert_not_reached();
515
+ }
516
+ }
517
+ break;
518
+ case OP_REMOVE:
519
+ for (size_t i = 0; i < n_elems; i++) {
520
+ switch (impl) {
521
+ case IMPL_GTREE:
522
+ g_tree_remove(tree, &keys[i]);
523
+ break;
524
+ case IMPL_QTREE:
525
+ q_tree_remove(tree, &keys[i]);
526
+ break;
527
+ default:
528
+ g_assert_not_reached();
529
+ }
530
+ }
531
+ break;
532
+ case OP_REMOVE_ALL:
533
+ remove_all(tree, impl);
534
+ break;
535
+ case OP_TRAVERSE:
536
+ switch (impl) {
537
+ case IMPL_GTREE:
538
+ g_tree_foreach(tree, traverse_func, NULL);
539
+ break;
540
+ case IMPL_QTREE:
541
+ q_tree_foreach(tree, traverse_func, NULL);
542
+ break;
543
+ default:
544
+ g_assert_not_reached();
545
+ }
546
+ break;
547
+ default:
548
+ g_assert_not_reached();
549
+ }
550
+ int64_t ns = get_clock() - start_ns;
551
+
552
+ if (bench->op != OP_REMOVE_ALL) {
553
+ remove_all(tree, impl);
554
+ }
555
+ g_free(keys);
556
+
557
+ return ns;
558
+}
559
+
560
+int main(int argc, char *argv[])
561
+{
562
+ size_t sizes[] = {
563
+ 32,
564
+ 1024,
565
+ 1024 * 4,
566
+ 1024 * 128,
567
+ 1024 * 1024,
568
+ };
569
+
570
+ double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)];
571
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
572
+ size_t size = sizes[i];
573
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
574
+ const struct tree_implementation *impl = &impls[j];
575
+ for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) {
576
+ const struct benchmark *bench = &benchmarks[k];
577
+
578
+ /* warm-up run */
579
+ run_benchmark(bench, impl->type, size);
580
+
581
+ int64_t total_ns = 0;
582
+ int64_t n_runs = 0;
583
+ while (total_ns < 2e8 || n_runs < 5) {
584
+ total_ns += run_benchmark(bench, impl->type, size);
585
+ n_runs++;
586
+ }
587
+ double ns_per_run = (double)total_ns / n_runs;
588
+
589
+ /* Throughput, in Mops/s */
590
+ res[k][j][i] = size / ns_per_run * 1e3;
591
+ }
592
+ }
593
+ }
594
+
595
+ printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n");
596
+ printf("%5s %10s ", "Tree", "Op");
597
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
598
+ printf("%7zu ", sizes[i]);
599
+ }
600
+ printf("\n");
601
+ char separator[97];
602
+ for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) {
603
+ separator[i] = '-';
604
+ }
605
+ separator[ARRAY_SIZE(separator) - 1] = '\0';
606
+ printf("%s\n", separator);
607
+ for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) {
608
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
609
+ printf("%5s %10s ", impls[j].name, benchmarks[i].name);
610
+ for (int k = 0; k < ARRAY_SIZE(sizes); k++) {
611
+ printf("%7.2f ", res[i][j][k]);
612
+ if (j == 0) {
613
+ printf(" ");
614
+ } else {
615
+ if (res[i][0][k] != 0) {
616
+ double speedup = res[i][j][k] / res[i][0][k];
617
+ printf("(%4.2fx) ", speedup);
618
+ } else {
619
+ printf("( ) ");
620
+ }
621
+ }
622
+ }
623
+ printf("\n");
624
+ }
625
+ }
626
+ printf("%s\n", separator);
627
+ return 0;
628
+}
629
diff --git a/tests/unit/test-qtree.c b/tests/unit/test-qtree.c
630
new file mode 100644
631
index XXXXXXX..XXXXXXX
632
--- /dev/null
633
+++ b/tests/unit/test-qtree.c
634
@@ -XXX,XX +XXX,XX @@
635
+/*
636
+ * SPDX-License-Identifier: LGPL-2.1-or-later
637
+ *
638
+ * Tests for QTree.
639
+ * Original source: glib
640
+ * https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c
641
+ * LGPL license.
642
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
643
+ */
644
+
645
+#include "qemu/osdep.h"
646
+#include "qemu/qtree.h"
647
+
648
+static gint my_compare(gconstpointer a, gconstpointer b)
649
+{
650
+ const char *cha = a;
651
+ const char *chb = b;
652
+
653
+ return *cha - *chb;
654
+}
655
+
656
+static gint my_compare_with_data(gconstpointer a,
657
+ gconstpointer b,
658
+ gpointer user_data)
659
+{
660
+ const char *cha = a;
661
+ const char *chb = b;
662
+
663
+ /* just check that we got the right data */
664
+ g_assert(GPOINTER_TO_INT(user_data) == 123);
665
+
666
+ return *cha - *chb;
667
+}
668
+
669
+static gint my_search(gconstpointer a, gconstpointer b)
670
+{
671
+ return my_compare(b, a);
672
+}
673
+
674
+static gpointer destroyed_key;
675
+static gpointer destroyed_value;
676
+static guint destroyed_key_count;
677
+static guint destroyed_value_count;
678
+
679
+static void my_key_destroy(gpointer key)
680
+{
681
+ destroyed_key = key;
682
+ destroyed_key_count++;
683
+}
684
+
685
+static void my_value_destroy(gpointer value)
686
+{
687
+ destroyed_value = value;
688
+ destroyed_value_count++;
689
+}
690
+
691
+static gint my_traverse(gpointer key, gpointer value, gpointer data)
692
+{
693
+ char *ch = key;
694
+
695
+ g_assert((*ch) > 0);
696
+
697
+ if (*ch == 'd') {
698
+ return TRUE;
699
+ }
700
+
701
+ return FALSE;
702
+}
703
+
704
+char chars[] =
705
+ "0123456789"
706
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
707
+ "abcdefghijklmnopqrstuvwxyz";
708
+
709
+char chars2[] =
710
+ "0123456789"
711
+ "abcdefghijklmnopqrstuvwxyz";
712
+
713
+static gint check_order(gpointer key, gpointer value, gpointer data)
714
+{
715
+ char **p = data;
716
+ char *ch = key;
717
+
718
+ g_assert(**p == *ch);
719
+
720
+ (*p)++;
721
+
722
+ return FALSE;
723
+}
724
+
725
+static void test_tree_search(void)
726
+{
727
+ gint i;
728
+ QTree *tree;
729
+ gboolean removed;
730
+ gchar c;
731
+ gchar *p, *d;
732
+
733
+ tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123));
734
+
735
+ for (i = 0; chars[i]; i++) {
736
+ q_tree_insert(tree, &chars[i], &chars[i]);
737
+ }
738
+
739
+ q_tree_foreach(tree, my_traverse, NULL);
740
+
741
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
742
+ g_assert(q_tree_height(tree) == 6);
743
+
744
+ p = chars;
745
+ q_tree_foreach(tree, check_order, &p);
746
+
747
+ for (i = 0; i < 26; i++) {
748
+ removed = q_tree_remove(tree, &chars[i + 10]);
749
+ g_assert(removed);
750
+ }
751
+
752
+ c = '\0';
753
+ removed = q_tree_remove(tree, &c);
754
+ g_assert(!removed);
755
+
756
+ q_tree_foreach(tree, my_traverse, NULL);
757
+
758
+ g_assert(q_tree_nnodes(tree) == strlen(chars2));
759
+ g_assert(q_tree_height(tree) == 6);
760
+
761
+ p = chars2;
762
+ q_tree_foreach(tree, check_order, &p);
763
+
764
+ for (i = 25; i >= 0; i--) {
765
+ q_tree_insert(tree, &chars[i + 10], &chars[i + 10]);
766
+ }
767
+
768
+ p = chars;
769
+ q_tree_foreach(tree, check_order, &p);
770
+
771
+ c = '0';
772
+ p = q_tree_lookup(tree, &c);
773
+ g_assert(p && *p == c);
774
+ g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p));
775
+ g_assert(c == *d && c == *p);
776
+
777
+ c = 'A';
778
+ p = q_tree_lookup(tree, &c);
779
+ g_assert(p && *p == c);
780
+
781
+ c = 'a';
782
+ p = q_tree_lookup(tree, &c);
783
+ g_assert(p && *p == c);
784
+
785
+ c = 'z';
786
+ p = q_tree_lookup(tree, &c);
787
+ g_assert(p && *p == c);
788
+
789
+ c = '!';
790
+ p = q_tree_lookup(tree, &c);
791
+ g_assert(p == NULL);
792
+
793
+ c = '=';
794
+ p = q_tree_lookup(tree, &c);
795
+ g_assert(p == NULL);
796
+
797
+ c = '|';
798
+ p = q_tree_lookup(tree, &c);
799
+ g_assert(p == NULL);
800
+
801
+ c = '0';
802
+ p = q_tree_search(tree, my_search, &c);
803
+ g_assert(p && *p == c);
804
+
805
+ c = 'A';
806
+ p = q_tree_search(tree, my_search, &c);
807
+ g_assert(p && *p == c);
808
+
809
+ c = 'a';
810
+ p = q_tree_search(tree, my_search, &c);
811
+ g_assert(p && *p == c);
812
+
813
+ c = 'z';
814
+ p = q_tree_search(tree, my_search, &c);
815
+ g_assert(p && *p == c);
816
+
817
+ c = '!';
818
+ p = q_tree_search(tree, my_search, &c);
819
+ g_assert(p == NULL);
820
+
821
+ c = '=';
822
+ p = q_tree_search(tree, my_search, &c);
823
+ g_assert(p == NULL);
824
+
825
+ c = '|';
826
+ p = q_tree_search(tree, my_search, &c);
827
+ g_assert(p == NULL);
828
+
829
+ q_tree_destroy(tree);
830
+}
831
+
832
+static void test_tree_remove(void)
833
+{
834
+ QTree *tree;
835
+ char c, d;
836
+ gint i;
837
+ gboolean removed;
838
+
839
+ tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL,
840
+ my_key_destroy,
841
+ my_value_destroy);
842
+
843
+ for (i = 0; chars[i]; i++) {
844
+ q_tree_insert(tree, &chars[i], &chars[i]);
845
+ }
846
+
847
+ c = '0';
848
+ q_tree_insert(tree, &c, &c);
849
+ g_assert(destroyed_key == &c);
850
+ g_assert(destroyed_value == &chars[0]);
851
+ destroyed_key = NULL;
852
+ destroyed_value = NULL;
853
+
854
+ d = '1';
855
+ q_tree_replace(tree, &d, &d);
856
+ g_assert(destroyed_key == &chars[1]);
857
+ g_assert(destroyed_value == &chars[1]);
858
+ destroyed_key = NULL;
859
+ destroyed_value = NULL;
860
+
861
+ c = '2';
862
+ removed = q_tree_remove(tree, &c);
863
+ g_assert(removed);
864
+ g_assert(destroyed_key == &chars[2]);
865
+ g_assert(destroyed_value == &chars[2]);
866
+ destroyed_key = NULL;
867
+ destroyed_value = NULL;
868
+
869
+ c = '3';
870
+ removed = q_tree_steal(tree, &c);
871
+ g_assert(removed);
872
+ g_assert(destroyed_key == NULL);
873
+ g_assert(destroyed_value == NULL);
874
+
875
+ const gchar *remove = "omkjigfedba";
876
+ for (i = 0; remove[i]; i++) {
877
+ removed = q_tree_remove(tree, &remove[i]);
878
+ g_assert(removed);
879
+ }
880
+
881
+ q_tree_destroy(tree);
882
+}
883
+
884
+static void test_tree_destroy(void)
885
+{
886
+ QTree *tree;
887
+ gint i;
888
+
889
+ tree = q_tree_new(my_compare);
890
+
891
+ for (i = 0; chars[i]; i++) {
892
+ q_tree_insert(tree, &chars[i], &chars[i]);
893
+ }
894
+
895
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
896
+
897
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
898
+ q_tree_ref(tree);
899
+ q_tree_destroy(tree);
900
+
901
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
902
+ g_assert(q_tree_nnodes(tree) == 0);
903
+
904
+ q_tree_unref(tree);
905
+}
906
+
907
+static void test_tree_insert(void)
908
+{
909
+ QTree *tree;
910
+ gchar *p;
911
+ gint i;
912
+ gchar *scrambled;
913
+
914
+ tree = q_tree_new(my_compare);
915
+
916
+ for (i = 0; chars[i]; i++) {
917
+ q_tree_insert(tree, &chars[i], &chars[i]);
918
+ }
919
+ p = chars;
920
+ q_tree_foreach(tree, check_order, &p);
921
+
922
+ q_tree_unref(tree);
923
+ tree = q_tree_new(my_compare);
924
+
925
+ for (i = strlen(chars) - 1; i >= 0; i--) {
926
+ q_tree_insert(tree, &chars[i], &chars[i]);
927
+ }
928
+ p = chars;
929
+ q_tree_foreach(tree, check_order, &p);
930
+
931
+ q_tree_unref(tree);
932
+ tree = q_tree_new(my_compare);
933
+
934
+ scrambled = g_strdup(chars);
935
+
936
+ for (i = 0; i < 30; i++) {
937
+ gchar tmp;
938
+ gint a, b;
939
+
940
+ a = g_random_int_range(0, strlen(scrambled));
941
+ b = g_random_int_range(0, strlen(scrambled));
942
+ tmp = scrambled[a];
943
+ scrambled[a] = scrambled[b];
944
+ scrambled[b] = tmp;
945
+ }
946
+
947
+ for (i = 0; scrambled[i]; i++) {
948
+ q_tree_insert(tree, &scrambled[i], &scrambled[i]);
949
+ }
950
+ p = chars;
951
+ q_tree_foreach(tree, check_order, &p);
952
+
953
+ g_free(scrambled);
954
+ q_tree_unref(tree);
955
+}
956
+
957
+int main(int argc, char *argv[])
958
+{
959
+ g_test_init(&argc, &argv, NULL);
960
+
961
+ g_test_add_func("/qtree/search", test_tree_search);
962
+ g_test_add_func("/qtree/remove", test_tree_remove);
963
+ g_test_add_func("/qtree/destroy", test_tree_destroy);
964
+ g_test_add_func("/qtree/insert", test_tree_insert);
965
+
966
+ return g_test_run();
967
+}
968
diff --git a/util/qtree.c b/util/qtree.c
969
new file mode 100644
970
index XXXXXXX..XXXXXXX
971
--- /dev/null
972
+++ b/util/qtree.c
973
@@ -XXX,XX +XXX,XX @@
974
+/*
975
+ * GLIB - Library of useful routines for C programming
976
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
977
+ *
978
+ * SPDX-License-Identifier: LGPL-2.1-or-later
979
+ *
980
+ * This library is free software; you can redistribute it and/or
981
+ * modify it under the terms of the GNU Lesser General Public
982
+ * License as published by the Free Software Foundation; either
983
+ * version 2.1 of the License, or (at your option) any later version.
984
+ *
985
+ * This library is distributed in the hope that it will be useful,
986
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
987
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
988
+ * Lesser General Public License for more details.
989
+ *
990
+ * You should have received a copy of the GNU Lesser General Public
991
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
992
+ */
993
+
994
+/*
995
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
996
+ * file for a list of people on the GLib Team. See the ChangeLog
997
+ * files for a list of changes. These files are distributed with
998
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
999
+ */
1000
+
1001
+/*
1002
+ * MT safe
1003
+ */
1004
+
1005
+#include "qemu/osdep.h"
1006
+#include "qemu/qtree.h"
1007
+
1008
+/**
1009
+ * SECTION:trees-binary
1010
+ * @title: Balanced Binary Trees
1011
+ * @short_description: a sorted collection of key/value pairs optimized
1012
+ * for searching and traversing in order
1013
+ *
1014
+ * The #QTree structure and its associated functions provide a sorted
1015
+ * collection of key/value pairs optimized for searching and traversing
1016
+ * in order. This means that most of the operations (access, search,
1017
+ * insertion, deletion, ...) on #QTree are O(log(n)) in average and O(n)
1018
+ * in worst case for time complexity. But, note that maintaining a
1019
+ * balanced sorted #QTree of n elements is done in time O(n log(n)).
1020
+ *
1021
+ * To create a new #QTree use q_tree_new().
1022
+ *
1023
+ * To insert a key/value pair into a #QTree use q_tree_insert()
1024
+ * (O(n log(n))).
1025
+ *
1026
+ * To remove a key/value pair use q_tree_remove() (O(n log(n))).
1027
+ *
1028
+ * To look up the value corresponding to a given key, use
1029
+ * q_tree_lookup() and q_tree_lookup_extended().
1030
+ *
1031
+ * To find out the number of nodes in a #QTree, use q_tree_nnodes(). To
1032
+ * get the height of a #QTree, use q_tree_height().
1033
+ *
1034
+ * To traverse a #QTree, calling a function for each node visited in
1035
+ * the traversal, use q_tree_foreach().
1036
+ *
1037
+ * To destroy a #QTree, use q_tree_destroy().
1038
+ **/
1039
+
1040
+#define MAX_GTREE_HEIGHT 40
1041
+
1042
+/**
1043
+ * QTree:
1044
+ *
1045
+ * The QTree struct is an opaque data structure representing a
1046
+ * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be
1047
+ * accessed only by using the following functions.
1048
+ */
1049
+struct _QTree {
1050
+ QTreeNode *root;
1051
+ GCompareDataFunc key_compare;
1052
+ GDestroyNotify key_destroy_func;
1053
+ GDestroyNotify value_destroy_func;
1054
+ gpointer key_compare_data;
1055
+ guint nnodes;
1056
+ gint ref_count;
1057
+};
1058
+
1059
+struct _QTreeNode {
1060
+ gpointer key; /* key for this node */
1061
+ gpointer value; /* value stored at this node */
1062
+ QTreeNode *left; /* left subtree */
1063
+ QTreeNode *right; /* right subtree */
1064
+ gint8 balance; /* height (right) - height (left) */
1065
+ guint8 left_child;
1066
+ guint8 right_child;
1067
+};
1068
+
1069
+
1070
+static QTreeNode *q_tree_node_new(gpointer key,
1071
+ gpointer value);
1072
+static QTreeNode *q_tree_insert_internal(QTree *tree,
1073
+ gpointer key,
1074
+ gpointer value,
1075
+ gboolean replace);
1076
+static gboolean q_tree_remove_internal(QTree *tree,
1077
+ gconstpointer key,
1078
+ gboolean steal);
1079
+static QTreeNode *q_tree_node_balance(QTreeNode *node);
1080
+static QTreeNode *q_tree_find_node(QTree *tree,
1081
+ gconstpointer key);
1082
+static QTreeNode *q_tree_node_search(QTreeNode *node,
1083
+ GCompareFunc search_func,
1084
+ gconstpointer data);
1085
+static QTreeNode *q_tree_node_rotate_left(QTreeNode *node);
1086
+static QTreeNode *q_tree_node_rotate_right(QTreeNode *node);
1087
+#ifdef Q_TREE_DEBUG
1088
+static void q_tree_node_check(QTreeNode *node);
1089
+#endif
132
+#endif
1090
+
133
+
1091
+static QTreeNode*
134
+ if (tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
1092
+q_tree_node_new(gpointer key,
135
+ op_cond, TCGOP_VECE(op))) {
1093
+ gpointer value)
136
+ /* constant is OK for instruction */
1094
+{
137
+ const_args[i] = 1;
1095
+ QTreeNode *node = g_new(QTreeNode, 1);
138
+ new_args[i] = ts->val;
1096
+
139
+ continue;
1097
+ node->balance = 0;
1098
+ node->left = NULL;
1099
+ node->right = NULL;
1100
+ node->left_child = FALSE;
1101
+ node->right_child = FALSE;
1102
+ node->key = key;
1103
+ node->value = value;
1104
+
1105
+ return node;
1106
+}
1107
+
1108
+/**
1109
+ * q_tree_new:
1110
+ * @key_compare_func: the function used to order the nodes in the #QTree.
1111
+ * It should return values similar to the standard strcmp() function -
1112
+ * 0 if the two arguments are equal, a negative value if the first argument
1113
+ * comes before the second, or a positive value if the first argument comes
1114
+ * after the second.
1115
+ *
1116
+ * Creates a new #QTree.
1117
+ *
1118
+ * Returns: a newly allocated #QTree
1119
+ */
1120
+QTree *
1121
+q_tree_new(GCompareFunc key_compare_func)
1122
+{
1123
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1124
+
1125
+ return q_tree_new_full((GCompareDataFunc) key_compare_func, NULL,
1126
+ NULL, NULL);
1127
+}
1128
+
1129
+/**
1130
+ * q_tree_new_with_data:
1131
+ * @key_compare_func: qsort()-style comparison function
1132
+ * @key_compare_data: data to pass to comparison function
1133
+ *
1134
+ * Creates a new #QTree with a comparison function that accepts user data.
1135
+ * See q_tree_new() for more details.
1136
+ *
1137
+ * Returns: a newly allocated #QTree
1138
+ */
1139
+QTree *
1140
+q_tree_new_with_data(GCompareDataFunc key_compare_func,
1141
+ gpointer key_compare_data)
1142
+{
1143
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1144
+
1145
+ return q_tree_new_full(key_compare_func, key_compare_data,
1146
+ NULL, NULL);
1147
+}
1148
+
1149
+/**
1150
+ * q_tree_new_full:
1151
+ * @key_compare_func: qsort()-style comparison function
1152
+ * @key_compare_data: data to pass to comparison function
1153
+ * @key_destroy_func: a function to free the memory allocated for the key
1154
+ * used when removing the entry from the #QTree or %NULL if you don't
1155
+ * want to supply such a function
1156
+ * @value_destroy_func: a function to free the memory allocated for the
1157
+ * value used when removing the entry from the #QTree or %NULL if you
1158
+ * don't want to supply such a function
1159
+ *
1160
+ * Creates a new #QTree like q_tree_new() and allows to specify functions
1161
+ * to free the memory allocated for the key and value that get called when
1162
+ * removing the entry from the #QTree.
1163
+ *
1164
+ * Returns: a newly allocated #QTree
1165
+ */
1166
+QTree *
1167
+q_tree_new_full(GCompareDataFunc key_compare_func,
1168
+ gpointer key_compare_data,
1169
+ GDestroyNotify key_destroy_func,
1170
+ GDestroyNotify value_destroy_func)
1171
+{
1172
+ QTree *tree;
1173
+
1174
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1175
+
1176
+ tree = g_new(QTree, 1);
1177
+ tree->root = NULL;
1178
+ tree->key_compare = key_compare_func;
1179
+ tree->key_destroy_func = key_destroy_func;
1180
+ tree->value_destroy_func = value_destroy_func;
1181
+ tree->key_compare_data = key_compare_data;
1182
+ tree->nnodes = 0;
1183
+ tree->ref_count = 1;
1184
+
1185
+ return tree;
1186
+}
1187
+
1188
+/**
1189
+ * q_tree_node_first:
1190
+ * @tree: a #QTree
1191
+ *
1192
+ * Returns the first in-order node of the tree, or %NULL
1193
+ * for an empty tree.
1194
+ *
1195
+ * Returns: (nullable) (transfer none): the first node in the tree
1196
+ *
1197
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1198
+ */
1199
+static QTreeNode *
1200
+q_tree_node_first(QTree *tree)
1201
+{
1202
+ QTreeNode *tmp;
1203
+
1204
+ g_return_val_if_fail(tree != NULL, NULL);
1205
+
1206
+ if (!tree->root) {
1207
+ return NULL;
1208
+ }
1209
+
1210
+ tmp = tree->root;
1211
+
1212
+ while (tmp->left_child) {
1213
+ tmp = tmp->left;
1214
+ }
1215
+
1216
+ return tmp;
1217
+}
1218
+
1219
+/**
1220
+ * q_tree_node_previous
1221
+ * @node: a #QTree node
1222
+ *
1223
+ * Returns the previous in-order node of the tree, or %NULL
1224
+ * if the passed node was already the first one.
1225
+ *
1226
+ * Returns: (nullable) (transfer none): the previous node in the tree
1227
+ *
1228
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1229
+ */
1230
+static QTreeNode *
1231
+q_tree_node_previous(QTreeNode *node)
1232
+{
1233
+ QTreeNode *tmp;
1234
+
1235
+ g_return_val_if_fail(node != NULL, NULL);
1236
+
1237
+ tmp = node->left;
1238
+
1239
+ if (node->left_child) {
1240
+ while (tmp->right_child) {
1241
+ tmp = tmp->right;
1242
+ }
1243
+ }
1244
+
1245
+ return tmp;
1246
+}
1247
+
1248
+/**
1249
+ * q_tree_node_next
1250
+ * @node: a #QTree node
1251
+ *
1252
+ * Returns the next in-order node of the tree, or %NULL
1253
+ * if the passed node was already the last one.
1254
+ *
1255
+ * Returns: (nullable) (transfer none): the next node in the tree
1256
+ *
1257
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1258
+ */
1259
+static QTreeNode *
1260
+q_tree_node_next(QTreeNode *node)
1261
+{
1262
+ QTreeNode *tmp;
1263
+
1264
+ g_return_val_if_fail(node != NULL, NULL);
1265
+
1266
+ tmp = node->right;
1267
+
1268
+ if (node->right_child) {
1269
+ while (tmp->left_child) {
1270
+ tmp = tmp->left;
1271
+ }
1272
+ }
1273
+
1274
+ return tmp;
1275
+}
1276
+
1277
+/**
1278
+ * q_tree_remove_all:
1279
+ * @tree: a #QTree
1280
+ *
1281
+ * Removes all nodes from a #QTree and destroys their keys and values,
1282
+ * then resets the #QTree’s root to %NULL.
1283
+ *
1284
+ * Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
1285
+ */
1286
+static void
1287
+q_tree_remove_all(QTree *tree)
1288
+{
1289
+ QTreeNode *node;
1290
+ QTreeNode *next;
1291
+
1292
+ g_return_if_fail(tree != NULL);
1293
+
1294
+ node = q_tree_node_first(tree);
1295
+
1296
+ while (node) {
1297
+ next = q_tree_node_next(node);
1298
+
1299
+ if (tree->key_destroy_func) {
1300
+ tree->key_destroy_func(node->key);
1301
+ }
1302
+ if (tree->value_destroy_func) {
1303
+ tree->value_destroy_func(node->value);
1304
+ }
1305
+ g_free(node);
1306
+
1307
+#ifdef Q_TREE_DEBUG
1308
+ g_assert(tree->nnodes > 0);
1309
+ tree->nnodes--;
1310
+#endif
1311
+
1312
+ node = next;
1313
+ }
1314
+
1315
+#ifdef Q_TREE_DEBUG
1316
+ g_assert(tree->nnodes == 0);
1317
+#endif
1318
+
1319
+ tree->root = NULL;
1320
+#ifndef Q_TREE_DEBUG
1321
+ tree->nnodes = 0;
1322
+#endif
1323
+}
1324
+
1325
+/**
1326
+ * q_tree_ref:
1327
+ * @tree: a #QTree
1328
+ *
1329
+ * Increments the reference count of @tree by one.
1330
+ *
1331
+ * It is safe to call this function from any thread.
1332
+ *
1333
+ * Returns: the passed in #QTree
1334
+ *
1335
+ * Since: 2.22
1336
+ */
1337
+QTree *
1338
+q_tree_ref(QTree *tree)
1339
+{
1340
+ g_return_val_if_fail(tree != NULL, NULL);
1341
+
1342
+ g_atomic_int_inc(&tree->ref_count);
1343
+
1344
+ return tree;
1345
+}
1346
+
1347
+/**
1348
+ * q_tree_unref:
1349
+ * @tree: a #QTree
1350
+ *
1351
+ * Decrements the reference count of @tree by one.
1352
+ * If the reference count drops to 0, all keys and values will
1353
+ * be destroyed (if destroy functions were specified) and all
1354
+ * memory allocated by @tree will be released.
1355
+ *
1356
+ * It is safe to call this function from any thread.
1357
+ *
1358
+ * Since: 2.22
1359
+ */
1360
+void
1361
+q_tree_unref(QTree *tree)
1362
+{
1363
+ g_return_if_fail(tree != NULL);
1364
+
1365
+ if (g_atomic_int_dec_and_test(&tree->ref_count)) {
1366
+ q_tree_remove_all(tree);
1367
+ g_free(tree);
1368
+ }
1369
+}
1370
+
1371
+/**
1372
+ * q_tree_destroy:
1373
+ * @tree: a #QTree
1374
+ *
1375
+ * Removes all keys and values from the #QTree and decreases its
1376
+ * reference count by one. If keys and/or values are dynamically
1377
+ * allocated, you should either free them first or create the #QTree
1378
+ * using q_tree_new_full(). In the latter case the destroy functions
1379
+ * you supplied will be called on all keys and values before destroying
1380
+ * the #QTree.
1381
+ */
1382
+void
1383
+q_tree_destroy(QTree *tree)
1384
+{
1385
+ g_return_if_fail(tree != NULL);
1386
+
1387
+ q_tree_remove_all(tree);
1388
+ q_tree_unref(tree);
1389
+}
1390
+
1391
+/**
1392
+ * q_tree_insert_node:
1393
+ * @tree: a #QTree
1394
+ * @key: the key to insert
1395
+ * @value: the value corresponding to the key
1396
+ *
1397
+ * Inserts a key/value pair into a #QTree.
1398
+ *
1399
+ * If the given key already exists in the #QTree its corresponding value
1400
+ * is set to the new value. If you supplied a @value_destroy_func when
1401
+ * creating the #QTree, the old value is freed using that function. If
1402
+ * you supplied a @key_destroy_func when creating the #QTree, the passed
1403
+ * key is freed using that function.
1404
+ *
1405
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1406
+ * so that the distance from the root to every leaf is as small as possible.
1407
+ * The cost of maintaining a balanced tree while inserting new key/value
1408
+ * result in a O(n log(n)) operation where most of the other operations
1409
+ * are O(log(n)).
1410
+ *
1411
+ * Returns: (transfer none): the inserted (or set) node.
1412
+ *
1413
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1414
+ */
1415
+static QTreeNode *
1416
+q_tree_insert_node(QTree *tree,
1417
+ gpointer key,
1418
+ gpointer value)
1419
+{
1420
+ QTreeNode *node;
1421
+
1422
+ g_return_val_if_fail(tree != NULL, NULL);
1423
+
1424
+ node = q_tree_insert_internal(tree, key, value, FALSE);
1425
+
1426
+#ifdef Q_TREE_DEBUG
1427
+ q_tree_node_check(tree->root);
1428
+#endif
1429
+
1430
+ return node;
1431
+}
1432
+
1433
+/**
1434
+ * q_tree_insert:
1435
+ * @tree: a #QTree
1436
+ * @key: the key to insert
1437
+ * @value: the value corresponding to the key
1438
+ *
1439
+ * Inserts a key/value pair into a #QTree.
1440
+ *
1441
+ * Inserts a new key and value into a #QTree as q_tree_insert_node() does,
1442
+ * only this function does not return the inserted or set node.
1443
+ */
1444
+void
1445
+q_tree_insert(QTree *tree,
1446
+ gpointer key,
1447
+ gpointer value)
1448
+{
1449
+ q_tree_insert_node(tree, key, value);
1450
+}
1451
+
1452
+/**
1453
+ * q_tree_replace_node:
1454
+ * @tree: a #QTree
1455
+ * @key: the key to insert
1456
+ * @value: the value corresponding to the key
1457
+ *
1458
+ * Inserts a new key and value into a #QTree similar to q_tree_insert_node().
1459
+ * The difference is that if the key already exists in the #QTree, it gets
1460
+ * replaced by the new key. If you supplied a @value_destroy_func when
1461
+ * creating the #QTree, the old value is freed using that function. If you
1462
+ * supplied a @key_destroy_func when creating the #QTree, the old key is
1463
+ * freed using that function.
1464
+ *
1465
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1466
+ * so that the distance from the root to every leaf is as small as possible.
1467
+ *
1468
+ * Returns: (transfer none): the inserted (or set) node.
1469
+ *
1470
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1471
+ */
1472
+static QTreeNode *
1473
+q_tree_replace_node(QTree *tree,
1474
+ gpointer key,
1475
+ gpointer value)
1476
+{
1477
+ QTreeNode *node;
1478
+
1479
+ g_return_val_if_fail(tree != NULL, NULL);
1480
+
1481
+ node = q_tree_insert_internal(tree, key, value, TRUE);
1482
+
1483
+#ifdef Q_TREE_DEBUG
1484
+ q_tree_node_check(tree->root);
1485
+#endif
1486
+
1487
+ return node;
1488
+}
1489
+
1490
+/**
1491
+ * q_tree_replace:
1492
+ * @tree: a #QTree
1493
+ * @key: the key to insert
1494
+ * @value: the value corresponding to the key
1495
+ *
1496
+ * Inserts a new key and value into a #QTree as q_tree_replace_node() does,
1497
+ * only this function does not return the inserted or set node.
1498
+ */
1499
+void
1500
+q_tree_replace(QTree *tree,
1501
+ gpointer key,
1502
+ gpointer value)
1503
+{
1504
+ q_tree_replace_node(tree, key, value);
1505
+}
1506
+
1507
+/* internal insert routine */
1508
+static QTreeNode *
1509
+q_tree_insert_internal(QTree *tree,
1510
+ gpointer key,
1511
+ gpointer value,
1512
+ gboolean replace)
1513
+{
1514
+ QTreeNode *node, *retnode;
1515
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1516
+ int idx;
1517
+
1518
+ g_return_val_if_fail(tree != NULL, NULL);
1519
+
1520
+ if (!tree->root) {
1521
+ tree->root = q_tree_node_new(key, value);
1522
+ tree->nnodes++;
1523
+ return tree->root;
1524
+ }
1525
+
1526
+ idx = 0;
1527
+ path[idx++] = NULL;
1528
+ node = tree->root;
1529
+
1530
+ while (1) {
1531
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1532
+
1533
+ if (cmp == 0) {
1534
+ if (tree->value_destroy_func) {
1535
+ tree->value_destroy_func(node->value);
1536
+ }
140
+ }
1537
+
141
}
1538
+ node->value = value;
142
1539
+
143
reg = ts->reg;
1540
+ if (replace) {
144
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
1541
+ if (tree->key_destroy_func) {
1542
+ tree->key_destroy_func(node->key);
1543
+ }
1544
+
1545
+ node->key = key;
1546
+ } else {
1547
+ /* free the passed key */
1548
+ if (tree->key_destroy_func) {
1549
+ tree->key_destroy_func(key);
1550
+ }
1551
+ }
1552
+
1553
+ return node;
1554
+ } else if (cmp < 0) {
1555
+ if (node->left_child) {
1556
+ path[idx++] = node;
1557
+ node = node->left;
1558
+ } else {
1559
+ QTreeNode *child = q_tree_node_new(key, value);
1560
+
1561
+ child->left = node->left;
1562
+ child->right = node;
1563
+ node->left = child;
1564
+ node->left_child = TRUE;
1565
+ node->balance -= 1;
1566
+
1567
+ tree->nnodes++;
1568
+
1569
+ retnode = child;
1570
+ break;
1571
+ }
1572
+ } else {
1573
+ if (node->right_child) {
1574
+ path[idx++] = node;
1575
+ node = node->right;
1576
+ } else {
1577
+ QTreeNode *child = q_tree_node_new(key, value);
1578
+
1579
+ child->right = node->right;
1580
+ child->left = node;
1581
+ node->right = child;
1582
+ node->right_child = TRUE;
1583
+ node->balance += 1;
1584
+
1585
+ tree->nnodes++;
1586
+
1587
+ retnode = child;
1588
+ break;
1589
+ }
1590
+ }
1591
+ }
1592
+
1593
+ /*
1594
+ * Restore balance. This is the goodness of a non-recursive
1595
+ * implementation, when we are done with balancing we 'break'
1596
+ * the loop and we are done.
1597
+ */
1598
+ while (1) {
1599
+ QTreeNode *bparent = path[--idx];
1600
+ gboolean left_node = (bparent && node == bparent->left);
1601
+ g_assert(!bparent || bparent->left == node || bparent->right == node);
1602
+
1603
+ if (node->balance < -1 || node->balance > 1) {
1604
+ node = q_tree_node_balance(node);
1605
+ if (bparent == NULL) {
1606
+ tree->root = node;
1607
+ } else if (left_node) {
1608
+ bparent->left = node;
1609
+ } else {
1610
+ bparent->right = node;
1611
+ }
1612
+ }
1613
+
1614
+ if (node->balance == 0 || bparent == NULL) {
1615
+ break;
1616
+ }
1617
+
1618
+ if (left_node) {
1619
+ bparent->balance -= 1;
1620
+ } else {
1621
+ bparent->balance += 1;
1622
+ }
1623
+
1624
+ node = bparent;
1625
+ }
1626
+
1627
+ return retnode;
1628
+}
1629
+
1630
+/**
1631
+ * q_tree_remove:
1632
+ * @tree: a #QTree
1633
+ * @key: the key to remove
1634
+ *
1635
+ * Removes a key/value pair from a #QTree.
1636
+ *
1637
+ * If the #QTree was created using q_tree_new_full(), the key and value
1638
+ * are freed using the supplied destroy functions, otherwise you have to
1639
+ * make sure that any dynamically allocated values are freed yourself.
1640
+ * If the key does not exist in the #QTree, the function does nothing.
1641
+ *
1642
+ * The cost of maintaining a balanced tree while removing a key/value
1643
+ * result in a O(n log(n)) operation where most of the other operations
1644
+ * are O(log(n)).
1645
+ *
1646
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1647
+ * returned nothing)
1648
+ */
1649
+gboolean
1650
+q_tree_remove(QTree *tree,
1651
+ gconstpointer key)
1652
+{
1653
+ gboolean removed;
1654
+
1655
+ g_return_val_if_fail(tree != NULL, FALSE);
1656
+
1657
+ removed = q_tree_remove_internal(tree, key, FALSE);
1658
+
1659
+#ifdef Q_TREE_DEBUG
1660
+ q_tree_node_check(tree->root);
1661
+#endif
1662
+
1663
+ return removed;
1664
+}
1665
+
1666
+/**
1667
+ * q_tree_steal:
1668
+ * @tree: a #QTree
1669
+ * @key: the key to remove
1670
+ *
1671
+ * Removes a key and its associated value from a #QTree without calling
1672
+ * the key and value destroy functions.
1673
+ *
1674
+ * If the key does not exist in the #QTree, the function does nothing.
1675
+ *
1676
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1677
+ * returned nothing)
1678
+ */
1679
+gboolean
1680
+q_tree_steal(QTree *tree,
1681
+ gconstpointer key)
1682
+{
1683
+ gboolean removed;
1684
+
1685
+ g_return_val_if_fail(tree != NULL, FALSE);
1686
+
1687
+ removed = q_tree_remove_internal(tree, key, TRUE);
1688
+
1689
+#ifdef Q_TREE_DEBUG
1690
+ q_tree_node_check(tree->root);
1691
+#endif
1692
+
1693
+ return removed;
1694
+}
1695
+
1696
+/* internal remove routine */
1697
+static gboolean
1698
+q_tree_remove_internal(QTree *tree,
1699
+ gconstpointer key,
1700
+ gboolean steal)
1701
+{
1702
+ QTreeNode *node, *parent, *balance;
1703
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1704
+ int idx;
1705
+ gboolean left_node;
1706
+
1707
+ g_return_val_if_fail(tree != NULL, FALSE);
1708
+
1709
+ if (!tree->root) {
1710
+ return FALSE;
1711
+ }
1712
+
1713
+ idx = 0;
1714
+ path[idx++] = NULL;
1715
+ node = tree->root;
1716
+
1717
+ while (1) {
1718
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1719
+
1720
+ if (cmp == 0) {
1721
+ break;
1722
+ } else if (cmp < 0) {
1723
+ if (!node->left_child) {
1724
+ return FALSE;
1725
+ }
1726
+
1727
+ path[idx++] = node;
1728
+ node = node->left;
1729
+ } else {
1730
+ if (!node->right_child) {
1731
+ return FALSE;
1732
+ }
1733
+
1734
+ path[idx++] = node;
1735
+ node = node->right;
1736
+ }
1737
+ }
1738
+
1739
+ /*
1740
+ * The following code is almost equal to q_tree_remove_node,
1741
+ * except that we do not have to call q_tree_node_parent.
1742
+ */
1743
+ balance = parent = path[--idx];
1744
+ g_assert(!parent || parent->left == node || parent->right == node);
1745
+ left_node = (parent && node == parent->left);
1746
+
1747
+ if (!node->left_child) {
1748
+ if (!node->right_child) {
1749
+ if (!parent) {
1750
+ tree->root = NULL;
1751
+ } else if (left_node) {
1752
+ parent->left_child = FALSE;
1753
+ parent->left = node->left;
1754
+ parent->balance += 1;
1755
+ } else {
1756
+ parent->right_child = FALSE;
1757
+ parent->right = node->right;
1758
+ parent->balance -= 1;
1759
+ }
1760
+ } else {
1761
+ /* node has a right child */
1762
+ QTreeNode *tmp = q_tree_node_next(node);
1763
+ tmp->left = node->left;
1764
+
1765
+ if (!parent) {
1766
+ tree->root = node->right;
1767
+ } else if (left_node) {
1768
+ parent->left = node->right;
1769
+ parent->balance += 1;
1770
+ } else {
1771
+ parent->right = node->right;
1772
+ parent->balance -= 1;
1773
+ }
1774
+ }
1775
+ } else {
1776
+ /* node has a left child */
1777
+ if (!node->right_child) {
1778
+ QTreeNode *tmp = q_tree_node_previous(node);
1779
+ tmp->right = node->right;
1780
+
1781
+ if (parent == NULL) {
1782
+ tree->root = node->left;
1783
+ } else if (left_node) {
1784
+ parent->left = node->left;
1785
+ parent->balance += 1;
1786
+ } else {
1787
+ parent->right = node->left;
1788
+ parent->balance -= 1;
1789
+ }
1790
+ } else {
1791
+ /* node has a both children (pant, pant!) */
1792
+ QTreeNode *prev = node->left;
1793
+ QTreeNode *next = node->right;
1794
+ QTreeNode *nextp = node;
1795
+ int old_idx = idx + 1;
1796
+ idx++;
1797
+
1798
+ /* path[idx] == parent */
1799
+ /* find the immediately next node (and its parent) */
1800
+ while (next->left_child) {
1801
+ path[++idx] = nextp = next;
1802
+ next = next->left;
1803
+ }
1804
+
1805
+ path[old_idx] = next;
1806
+ balance = path[idx];
1807
+
1808
+ /* remove 'next' from the tree */
1809
+ if (nextp != node) {
1810
+ if (next->right_child) {
1811
+ nextp->left = next->right;
1812
+ } else {
1813
+ nextp->left_child = FALSE;
1814
+ }
1815
+ nextp->balance += 1;
1816
+
1817
+ next->right_child = TRUE;
1818
+ next->right = node->right;
1819
+ } else {
1820
+ node->balance -= 1;
1821
+ }
1822
+
1823
+ /* set the prev to point to the right place */
1824
+ while (prev->right_child) {
1825
+ prev = prev->right;
1826
+ }
1827
+ prev->right = next;
1828
+
1829
+ /* prepare 'next' to replace 'node' */
1830
+ next->left_child = TRUE;
1831
+ next->left = node->left;
1832
+ next->balance = node->balance;
1833
+
1834
+ if (!parent) {
1835
+ tree->root = next;
1836
+ } else if (left_node) {
1837
+ parent->left = next;
1838
+ } else {
1839
+ parent->right = next;
1840
+ }
1841
+ }
1842
+ }
1843
+
1844
+ /* restore balance */
1845
+ if (balance) {
1846
+ while (1) {
1847
+ QTreeNode *bparent = path[--idx];
1848
+ g_assert(!bparent ||
1849
+ bparent->left == balance ||
1850
+ bparent->right == balance);
1851
+ left_node = (bparent && balance == bparent->left);
1852
+
1853
+ if (balance->balance < -1 || balance->balance > 1) {
1854
+ balance = q_tree_node_balance(balance);
1855
+ if (!bparent) {
1856
+ tree->root = balance;
1857
+ } else if (left_node) {
1858
+ bparent->left = balance;
1859
+ } else {
1860
+ bparent->right = balance;
1861
+ }
1862
+ }
1863
+
1864
+ if (balance->balance != 0 || !bparent) {
1865
+ break;
1866
+ }
1867
+
1868
+ if (left_node) {
1869
+ bparent->balance += 1;
1870
+ } else {
1871
+ bparent->balance -= 1;
1872
+ }
1873
+
1874
+ balance = bparent;
1875
+ }
1876
+ }
1877
+
1878
+ if (!steal) {
1879
+ if (tree->key_destroy_func) {
1880
+ tree->key_destroy_func(node->key);
1881
+ }
1882
+ if (tree->value_destroy_func) {
1883
+ tree->value_destroy_func(node->value);
1884
+ }
1885
+ }
1886
+
1887
+ g_free(node);
1888
+
1889
+ tree->nnodes--;
1890
+
1891
+ return TRUE;
1892
+}
1893
+
1894
+/**
1895
+ * q_tree_lookup_node:
1896
+ * @tree: a #QTree
1897
+ * @key: the key to look up
1898
+ *
1899
+ * Gets the tree node corresponding to the given key. Since a #QTree is
1900
+ * automatically balanced as key/value pairs are added, key lookup
1901
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1902
+ *
1903
+ * Returns: (nullable) (transfer none): the tree node corresponding to
1904
+ * the key, or %NULL if the key was not found
1905
+ *
1906
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1907
+ */
1908
+static QTreeNode *
1909
+q_tree_lookup_node(QTree *tree,
1910
+ gconstpointer key)
1911
+{
1912
+ g_return_val_if_fail(tree != NULL, NULL);
1913
+
1914
+ return q_tree_find_node(tree, key);
1915
+}
1916
+
1917
+/**
1918
+ * q_tree_lookup:
1919
+ * @tree: a #QTree
1920
+ * @key: the key to look up
1921
+ *
1922
+ * Gets the value corresponding to the given key. Since a #QTree is
1923
+ * automatically balanced as key/value pairs are added, key lookup
1924
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1925
+ *
1926
+ * Returns: the value corresponding to the key, or %NULL
1927
+ * if the key was not found
1928
+ */
1929
+gpointer
1930
+q_tree_lookup(QTree *tree,
1931
+ gconstpointer key)
1932
+{
1933
+ QTreeNode *node;
1934
+
1935
+ node = q_tree_lookup_node(tree, key);
1936
+
1937
+ return node ? node->value : NULL;
1938
+}
1939
+
1940
+/**
1941
+ * q_tree_lookup_extended:
1942
+ * @tree: a #QTree
1943
+ * @lookup_key: the key to look up
1944
+ * @orig_key: (out) (optional) (nullable): returns the original key
1945
+ * @value: (out) (optional) (nullable): returns the value associated with
1946
+ * the key
1947
+ *
1948
+ * Looks up a key in the #QTree, returning the original key and the
1949
+ * associated value. This is useful if you need to free the memory
1950
+ * allocated for the original key, for example before calling
1951
+ * q_tree_remove().
1952
+ *
1953
+ * Returns: %TRUE if the key was found in the #QTree
1954
+ */
1955
+gboolean
1956
+q_tree_lookup_extended(QTree *tree,
1957
+ gconstpointer lookup_key,
1958
+ gpointer *orig_key,
1959
+ gpointer *value)
1960
+{
1961
+ QTreeNode *node;
1962
+
1963
+ g_return_val_if_fail(tree != NULL, FALSE);
1964
+
1965
+ node = q_tree_find_node(tree, lookup_key);
1966
+
1967
+ if (node) {
1968
+ if (orig_key) {
1969
+ *orig_key = node->key;
1970
+ }
1971
+ if (value) {
1972
+ *value = node->value;
1973
+ }
1974
+ return TRUE;
1975
+ } else {
1976
+ return FALSE;
1977
+ }
1978
+}
1979
+
1980
+/**
1981
+ * q_tree_foreach:
1982
+ * @tree: a #QTree
1983
+ * @func: the function to call for each node visited.
1984
+ * If this function returns %TRUE, the traversal is stopped.
1985
+ * @user_data: user data to pass to the function
1986
+ *
1987
+ * Calls the given function for each of the key/value pairs in the #QTree.
1988
+ * The function is passed the key and value of each pair, and the given
1989
+ * @data parameter. The tree is traversed in sorted order.
1990
+ *
1991
+ * The tree may not be modified while iterating over it (you can't
1992
+ * add/remove items). To remove all items matching a predicate, you need
1993
+ * to add each item to a list in your #GTraverseFunc as you walk over
1994
+ * the tree, then walk the list and remove each item.
1995
+ */
1996
+void
1997
+q_tree_foreach(QTree *tree,
1998
+ GTraverseFunc func,
1999
+ gpointer user_data)
2000
+{
2001
+ QTreeNode *node;
2002
+
2003
+ g_return_if_fail(tree != NULL);
2004
+
2005
+ if (!tree->root) {
2006
+ return;
2007
+ }
2008
+
2009
+ node = q_tree_node_first(tree);
2010
+
2011
+ while (node) {
2012
+ if ((*func)(node->key, node->value, user_data)) {
2013
+ break;
2014
+ }
2015
+
2016
+ node = q_tree_node_next(node);
2017
+ }
2018
+}
2019
+
2020
+/**
2021
+ * q_tree_search_node:
2022
+ * @tree: a #QTree
2023
+ * @search_func: a function used to search the #QTree
2024
+ * @user_data: the data passed as the second argument to @search_func
2025
+ *
2026
+ * Searches a #QTree using @search_func.
2027
+ *
2028
+ * The @search_func is called with a pointer to the key of a key/value
2029
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2030
+ * 0 for a key/value pair, then the corresponding node is returned as
2031
+ * the result of q_tree_search(). If @search_func returns -1, searching
2032
+ * will proceed among the key/value pairs that have a smaller key; if
2033
+ * @search_func returns 1, searching will proceed among the key/value
2034
+ * pairs that have a larger key.
2035
+ *
2036
+ * Returns: (nullable) (transfer none): the node corresponding to the
2037
+ * found key, or %NULL if the key was not found
2038
+ *
2039
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
2040
+ */
2041
+static QTreeNode *
2042
+q_tree_search_node(QTree *tree,
2043
+ GCompareFunc search_func,
2044
+ gconstpointer user_data)
2045
+{
2046
+ g_return_val_if_fail(tree != NULL, NULL);
2047
+
2048
+ if (!tree->root) {
2049
+ return NULL;
2050
+ }
2051
+
2052
+ return q_tree_node_search(tree->root, search_func, user_data);
2053
+}
2054
+
2055
+/**
2056
+ * q_tree_search:
2057
+ * @tree: a #QTree
2058
+ * @search_func: a function used to search the #QTree
2059
+ * @user_data: the data passed as the second argument to @search_func
2060
+ *
2061
+ * Searches a #QTree using @search_func.
2062
+ *
2063
+ * The @search_func is called with a pointer to the key of a key/value
2064
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2065
+ * 0 for a key/value pair, then the corresponding value is returned as
2066
+ * the result of q_tree_search(). If @search_func returns -1, searching
2067
+ * will proceed among the key/value pairs that have a smaller key; if
2068
+ * @search_func returns 1, searching will proceed among the key/value
2069
+ * pairs that have a larger key.
2070
+ *
2071
+ * Returns: the value corresponding to the found key, or %NULL
2072
+ * if the key was not found
2073
+ */
2074
+gpointer
2075
+q_tree_search(QTree *tree,
2076
+ GCompareFunc search_func,
2077
+ gconstpointer user_data)
2078
+{
2079
+ QTreeNode *node;
2080
+
2081
+ node = q_tree_search_node(tree, search_func, user_data);
2082
+
2083
+ return node ? node->value : NULL;
2084
+}
2085
+
2086
+/**
2087
+ * q_tree_height:
2088
+ * @tree: a #QTree
2089
+ *
2090
+ * Gets the height of a #QTree.
2091
+ *
2092
+ * If the #QTree contains no nodes, the height is 0.
2093
+ * If the #QTree contains only one root node the height is 1.
2094
+ * If the root node has children the height is 2, etc.
2095
+ *
2096
+ * Returns: the height of @tree
2097
+ */
2098
+gint
2099
+q_tree_height(QTree *tree)
2100
+{
2101
+ QTreeNode *node;
2102
+ gint height;
2103
+
2104
+ g_return_val_if_fail(tree != NULL, 0);
2105
+
2106
+ if (!tree->root) {
2107
+ return 0;
2108
+ }
2109
+
2110
+ height = 0;
2111
+ node = tree->root;
2112
+
2113
+ while (1) {
2114
+ height += 1 + MAX(node->balance, 0);
2115
+
2116
+ if (!node->left_child) {
2117
+ return height;
2118
+ }
2119
+
2120
+ node = node->left;
2121
+ }
2122
+}
2123
+
2124
+/**
2125
+ * q_tree_nnodes:
2126
+ * @tree: a #QTree
2127
+ *
2128
+ * Gets the number of nodes in a #QTree.
2129
+ *
2130
+ * Returns: the number of nodes in @tree
2131
+ */
2132
+gint
2133
+q_tree_nnodes(QTree *tree)
2134
+{
2135
+ g_return_val_if_fail(tree != NULL, 0);
2136
+
2137
+ return tree->nnodes;
2138
+}
2139
+
2140
+static QTreeNode *
2141
+q_tree_node_balance(QTreeNode *node)
2142
+{
2143
+ if (node->balance < -1) {
2144
+ if (node->left->balance > 0) {
2145
+ node->left = q_tree_node_rotate_left(node->left);
2146
+ }
2147
+ node = q_tree_node_rotate_right(node);
2148
+ } else if (node->balance > 1) {
2149
+ if (node->right->balance < 0) {
2150
+ node->right = q_tree_node_rotate_right(node->right);
2151
+ }
2152
+ node = q_tree_node_rotate_left(node);
2153
+ }
2154
+
2155
+ return node;
2156
+}
2157
+
2158
+static QTreeNode *
2159
+q_tree_find_node(QTree *tree,
2160
+ gconstpointer key)
2161
+{
2162
+ QTreeNode *node;
2163
+ gint cmp;
2164
+
2165
+ node = tree->root;
2166
+ if (!node) {
2167
+ return NULL;
2168
+ }
2169
+
2170
+ while (1) {
2171
+ cmp = tree->key_compare(key, node->key, tree->key_compare_data);
2172
+ if (cmp == 0) {
2173
+ return node;
2174
+ } else if (cmp < 0) {
2175
+ if (!node->left_child) {
2176
+ return NULL;
2177
+ }
2178
+
2179
+ node = node->left;
2180
+ } else {
2181
+ if (!node->right_child) {
2182
+ return NULL;
2183
+ }
2184
+
2185
+ node = node->right;
2186
+ }
2187
+ }
2188
+}
2189
+
2190
+static QTreeNode *
2191
+q_tree_node_search(QTreeNode *node,
2192
+ GCompareFunc search_func,
2193
+ gconstpointer data)
2194
+{
2195
+ gint dir;
2196
+
2197
+ if (!node) {
2198
+ return NULL;
2199
+ }
2200
+
2201
+ while (1) {
2202
+ dir = (*search_func)(node->key, data);
2203
+ if (dir == 0) {
2204
+ return node;
2205
+ } else if (dir < 0) {
2206
+ if (!node->left_child) {
2207
+ return NULL;
2208
+ }
2209
+
2210
+ node = node->left;
2211
+ } else {
2212
+ if (!node->right_child) {
2213
+ return NULL;
2214
+ }
2215
+
2216
+ node = node->right;
2217
+ }
2218
+ }
2219
+}
2220
+
2221
+static QTreeNode *
2222
+q_tree_node_rotate_left(QTreeNode *node)
2223
+{
2224
+ QTreeNode *right;
2225
+ gint a_bal;
2226
+ gint b_bal;
2227
+
2228
+ right = node->right;
2229
+
2230
+ if (right->left_child) {
2231
+ node->right = right->left;
2232
+ } else {
2233
+ node->right_child = FALSE;
2234
+ right->left_child = TRUE;
2235
+ }
2236
+ right->left = node;
2237
+
2238
+ a_bal = node->balance;
2239
+ b_bal = right->balance;
2240
+
2241
+ if (b_bal <= 0) {
2242
+ if (a_bal >= 1) {
2243
+ right->balance = b_bal - 1;
2244
+ } else {
2245
+ right->balance = a_bal + b_bal - 2;
2246
+ }
2247
+ node->balance = a_bal - 1;
2248
+ } else {
2249
+ if (a_bal <= b_bal) {
2250
+ right->balance = a_bal - 2;
2251
+ } else {
2252
+ right->balance = b_bal - 1;
2253
+ }
2254
+ node->balance = a_bal - b_bal - 1;
2255
+ }
2256
+
2257
+ return right;
2258
+}
2259
+
2260
+static QTreeNode *
2261
+q_tree_node_rotate_right(QTreeNode *node)
2262
+{
2263
+ QTreeNode *left;
2264
+ gint a_bal;
2265
+ gint b_bal;
2266
+
2267
+ left = node->left;
2268
+
2269
+ if (left->right_child) {
2270
+ node->left = left->right;
2271
+ } else {
2272
+ node->left_child = FALSE;
2273
+ left->right_child = TRUE;
2274
+ }
2275
+ left->right = node;
2276
+
2277
+ a_bal = node->balance;
2278
+ b_bal = left->balance;
2279
+
2280
+ if (b_bal <= 0) {
2281
+ if (b_bal > a_bal) {
2282
+ left->balance = b_bal + 1;
2283
+ } else {
2284
+ left->balance = a_bal + 2;
2285
+ }
2286
+ node->balance = a_bal - b_bal + 1;
2287
+ } else {
2288
+ if (a_bal <= -1) {
2289
+ left->balance = b_bal + 1;
2290
+ } else {
2291
+ left->balance = a_bal + b_bal + 2;
2292
+ }
2293
+ node->balance = a_bal + 1;
2294
+ }
2295
+
2296
+ return left;
2297
+}
2298
+
2299
+#ifdef Q_TREE_DEBUG
2300
+static gint
2301
+q_tree_node_height(QTreeNode *node)
2302
+{
2303
+ gint left_height;
2304
+ gint right_height;
2305
+
2306
+ if (node) {
2307
+ left_height = 0;
2308
+ right_height = 0;
2309
+
2310
+ if (node->left_child) {
2311
+ left_height = q_tree_node_height(node->left);
2312
+ }
2313
+
2314
+ if (node->right_child) {
2315
+ right_height = q_tree_node_height(node->right);
2316
+ }
2317
+
2318
+ return MAX(left_height, right_height) + 1;
2319
+ }
2320
+
2321
+ return 0;
2322
+}
2323
+
2324
+static void q_tree_node_check(QTreeNode *node)
2325
+{
2326
+ gint left_height;
2327
+ gint right_height;
2328
+ gint balance;
2329
+ QTreeNode *tmp;
2330
+
2331
+ if (node) {
2332
+ if (node->left_child) {
2333
+ tmp = q_tree_node_previous(node);
2334
+ g_assert(tmp->right == node);
2335
+ }
2336
+
2337
+ if (node->right_child) {
2338
+ tmp = q_tree_node_next(node);
2339
+ g_assert(tmp->left == node);
2340
+ }
2341
+
2342
+ left_height = 0;
2343
+ right_height = 0;
2344
+
2345
+ if (node->left_child) {
2346
+ left_height = q_tree_node_height(node->left);
2347
+ }
2348
+ if (node->right_child) {
2349
+ right_height = q_tree_node_height(node->right);
2350
+ }
2351
+
2352
+ balance = right_height - left_height;
2353
+ g_assert(balance == node->balance);
2354
+
2355
+ if (node->left_child) {
2356
+ q_tree_node_check(node->left);
2357
+ }
2358
+ if (node->right_child) {
2359
+ q_tree_node_check(node->right);
2360
+ }
2361
+ }
2362
+}
2363
+#endif
2364
diff --git a/tests/bench/meson.build b/tests/bench/meson.build
2365
index XXXXXXX..XXXXXXX 100644
145
index XXXXXXX..XXXXXXX 100644
2366
--- a/tests/bench/meson.build
146
--- a/docs/devel/tcg-ops.rst
2367
+++ b/tests/bench/meson.build
147
+++ b/docs/devel/tcg-ops.rst
2368
@@ -XXX,XX +XXX,XX @@ xbzrle_bench = executable('xbzrle-bench',
148
@@ -XXX,XX +XXX,XX @@ operation uses a constant input constraint which does not allow all
2369
dependencies: [qemuutil,migration])
149
constants, it must also accept registers in order to have a fallback.
2370
endif
150
The constraint '``i``' is defined generically to accept any constant.
2371
151
The constraint '``r``' is not defined generically, but is consistently
2372
+qtree_bench = executable('qtree-bench',
152
-used by each backend to indicate all registers.
2373
+ sources: 'qtree-bench.c',
153
+used by each backend to indicate all registers. If ``TCG_REG_ZERO``
2374
+ dependencies: [qemuutil])
154
+is defined by the backend, the constraint '``z``' is defined generically
2375
+
155
+to map constant 0 to the hardware zero register.
2376
executable('atomic_add-bench',
156
2377
sources: files('atomic_add-bench.c'),
157
The movi_i32 and movi_i64 operations must accept any constants.
2378
dependencies: [qemuutil],
158
2379
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
2380
index XXXXXXX..XXXXXXX 100644
2381
--- a/tests/unit/meson.build
2382
+++ b/tests/unit/meson.build
2383
@@ -XXX,XX +XXX,XX @@ tests = {
2384
'test-rcu-slist': [],
2385
'test-qdist': [],
2386
'test-qht': [],
2387
+ 'test-qtree': [],
2388
'test-bitops': [],
2389
'test-bitcnt': [],
2390
'test-qgraph': ['../qtest/libqos/qgraph.c'],
2391
diff --git a/util/meson.build b/util/meson.build
2392
index XXXXXXX..XXXXXXX 100644
2393
--- a/util/meson.build
2394
+++ b/util/meson.build
2395
@@ -XXX,XX +XXX,XX @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c'))
2396
util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c'))
2397
util_ss.add(when: 'CONFIG_WIN32', if_true: winmm)
2398
util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
2399
+util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c'))
2400
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
2401
util_ss.add(files('host-utils.c'))
2402
util_ss.add(files('bitmap.c', 'bitops.c'))
2403
--
159
--
2404
2.34.1
160
2.43.0
2405
161
2406
162
diff view generated by jsdifflib
New patch
1
Note that 'Z' is still used for addsub2.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/aarch64/tcg-target-con-set.h | 12 ++++-----
6
tcg/aarch64/tcg-target.c.inc | 46 ++++++++++++++------------------
7
2 files changed, 26 insertions(+), 32 deletions(-)
8
9
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/aarch64/tcg-target-con-set.h
12
+++ b/tcg/aarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@
14
*/
15
C_O0_I1(r)
16
C_O0_I2(r, rC)
17
-C_O0_I2(rZ, r)
18
+C_O0_I2(rz, r)
19
C_O0_I2(w, r)
20
-C_O0_I3(rZ, rZ, r)
21
+C_O0_I3(rz, rz, r)
22
C_O1_I1(r, r)
23
C_O1_I1(w, r)
24
C_O1_I1(w, w)
25
C_O1_I1(w, wr)
26
-C_O1_I2(r, 0, rZ)
27
+C_O1_I2(r, 0, rz)
28
C_O1_I2(r, r, r)
29
C_O1_I2(r, r, rA)
30
C_O1_I2(r, r, rAL)
31
C_O1_I2(r, r, rC)
32
C_O1_I2(r, r, ri)
33
C_O1_I2(r, r, rL)
34
-C_O1_I2(r, rZ, rZ)
35
+C_O1_I2(r, rz, rz)
36
C_O1_I2(w, 0, w)
37
C_O1_I2(w, w, w)
38
C_O1_I2(w, w, wN)
39
C_O1_I2(w, w, wO)
40
C_O1_I2(w, w, wZ)
41
C_O1_I3(w, w, w, w)
42
-C_O1_I4(r, r, rC, rZ, rZ)
43
+C_O1_I4(r, r, rC, rz, rz)
44
C_O2_I1(r, r, r)
45
-C_O2_I4(r, r, rZ, rZ, rA, rMZ)
46
+C_O2_I4(r, r, rz, rz, rA, rMZ)
47
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/aarch64/tcg-target.c.inc
50
+++ b/tcg/aarch64/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
52
TCGArg a2 = args[2];
53
int c2 = const_args[2];
54
55
- /* Some operands are defined with "rZ" constraint, a register or
56
- the zero register. These need not actually test args[I] == 0. */
57
-#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
58
-
59
switch (opc) {
60
case INDEX_op_goto_ptr:
61
tcg_out_insn(s, 3207, BR, a0);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
63
64
case INDEX_op_st8_i32:
65
case INDEX_op_st8_i64:
66
- tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0);
67
+ tcg_out_ldst(s, I3312_STRB, a0, a1, a2, 0);
68
break;
69
case INDEX_op_st16_i32:
70
case INDEX_op_st16_i64:
71
- tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1);
72
+ tcg_out_ldst(s, I3312_STRH, a0, a1, a2, 1);
73
break;
74
case INDEX_op_st_i32:
75
case INDEX_op_st32_i64:
76
- tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2);
77
+ tcg_out_ldst(s, I3312_STRW, a0, a1, a2, 2);
78
break;
79
case INDEX_op_st_i64:
80
- tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
81
+ tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
82
break;
83
84
case INDEX_op_add_i32:
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
86
/* FALLTHRU */
87
case INDEX_op_movcond_i64:
88
tcg_out_cmp(s, ext, args[5], a1, a2, c2);
89
- tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
90
+ tcg_out_insn(s, 3506, CSEL, ext, a0, args[3], args[4], args[5]);
91
break;
92
93
case INDEX_op_qemu_ld_i32:
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
95
break;
96
case INDEX_op_qemu_st_i32:
97
case INDEX_op_qemu_st_i64:
98
- tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
99
+ tcg_out_qemu_st(s, a0, a1, a2, ext);
100
break;
101
case INDEX_op_qemu_ld_i128:
102
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
103
break;
104
case INDEX_op_qemu_st_i128:
105
- tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
106
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
107
break;
108
109
case INDEX_op_bswap64_i64:
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
111
112
case INDEX_op_deposit_i64:
113
case INDEX_op_deposit_i32:
114
- tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
115
+ tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
116
break;
117
118
case INDEX_op_extract_i64:
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
120
121
case INDEX_op_extract2_i64:
122
case INDEX_op_extract2_i32:
123
- tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
124
+ tcg_out_extr(s, ext, a0, a2, a1, args[3]);
125
break;
126
127
case INDEX_op_add2_i32:
128
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
129
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
130
(int32_t)args[4], args[5], const_args[4],
131
const_args[5], false);
132
break;
133
case INDEX_op_add2_i64:
134
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
135
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
136
args[5], const_args[4], const_args[5], false);
137
break;
138
case INDEX_op_sub2_i32:
139
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
140
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
141
(int32_t)args[4], args[5], const_args[4],
142
const_args[5], true);
143
break;
144
case INDEX_op_sub2_i64:
145
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
146
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
147
args[5], const_args[4], const_args[5], true);
148
break;
149
150
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
151
default:
152
g_assert_not_reached();
153
}
154
-
155
-#undef REG0
156
}
157
158
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
159
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
160
case INDEX_op_st16_i64:
161
case INDEX_op_st32_i64:
162
case INDEX_op_st_i64:
163
- return C_O0_I2(rZ, r);
164
+ return C_O0_I2(rz, r);
165
166
case INDEX_op_add_i32:
167
case INDEX_op_add_i64:
168
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
169
170
case INDEX_op_movcond_i32:
171
case INDEX_op_movcond_i64:
172
- return C_O1_I4(r, r, rC, rZ, rZ);
173
+ return C_O1_I4(r, r, rC, rz, rz);
174
175
case INDEX_op_qemu_ld_i32:
176
case INDEX_op_qemu_ld_i64:
177
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
178
return C_O2_I1(r, r, r);
179
case INDEX_op_qemu_st_i32:
180
case INDEX_op_qemu_st_i64:
181
- return C_O0_I2(rZ, r);
182
+ return C_O0_I2(rz, r);
183
case INDEX_op_qemu_st_i128:
184
- return C_O0_I3(rZ, rZ, r);
185
+ return C_O0_I3(rz, rz, r);
186
187
case INDEX_op_deposit_i32:
188
case INDEX_op_deposit_i64:
189
- return C_O1_I2(r, 0, rZ);
190
+ return C_O1_I2(r, 0, rz);
191
192
case INDEX_op_extract2_i32:
193
case INDEX_op_extract2_i64:
194
- return C_O1_I2(r, rZ, rZ);
195
+ return C_O1_I2(r, rz, rz);
196
197
case INDEX_op_add2_i32:
198
case INDEX_op_add2_i64:
199
case INDEX_op_sub2_i32:
200
case INDEX_op_sub2_i64:
201
- return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
202
+ return C_O2_I4(r, r, rz, rz, rA, rMZ);
203
204
case INDEX_op_add_vec:
205
case INDEX_op_sub_vec:
206
--
207
2.43.0
diff view generated by jsdifflib
1
We have been enforcing host page alignment for the non-R
1
Replace target-specific 'Z' with generic 'z'.
2
fallback of MAX_RESERVED_VA, but failing to enforce for -R.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
linux-user/main.c | 6 ++++++
6
tcg/loongarch64/tcg-target-con-set.h | 15 ++++++-------
8
1 file changed, 6 insertions(+)
7
tcg/loongarch64/tcg-target-con-str.h | 1 -
8
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++----------------
9
3 files changed, 21 insertions(+), 27 deletions(-)
9
10
10
diff --git a/linux-user/main.c b/linux-user/main.c
11
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/linux-user/main.c
13
--- a/tcg/loongarch64/tcg-target-con-set.h
13
+++ b/linux-user/main.c
14
+++ b/tcg/loongarch64/tcg-target-con-set.h
14
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
15
@@ -XXX,XX +XXX,XX @@
15
*/
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
16
max_reserved_va = MAX_RESERVED_VA(cpu);
17
*/
17
if (reserved_va != 0) {
18
C_O0_I1(r)
18
+ if (reserved_va % qemu_host_page_size) {
19
-C_O0_I2(rZ, r)
19
+ char *s = size_to_str(qemu_host_page_size);
20
-C_O0_I2(rZ, rZ)
20
+ fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
21
+C_O0_I2(rz, r)
21
+ g_free(s);
22
+C_O0_I2(rz, rz)
22
+ exit(EXIT_FAILURE);
23
C_O0_I2(w, r)
23
+ }
24
C_O0_I3(r, r, r)
24
if (max_reserved_va && reserved_va > max_reserved_va) {
25
C_O1_I1(r, r)
25
fprintf(stderr, "Reserved virtual address too big\n");
26
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
26
exit(EXIT_FAILURE);
27
C_O1_I2(r, r, rJ)
28
C_O1_I2(r, r, rU)
29
C_O1_I2(r, r, rW)
30
-C_O1_I2(r, r, rZ)
31
-C_O1_I2(r, 0, rZ)
32
-C_O1_I2(r, rZ, ri)
33
-C_O1_I2(r, rZ, rJ)
34
-C_O1_I2(r, rZ, rZ)
35
+C_O1_I2(r, 0, rz)
36
+C_O1_I2(r, rz, ri)
37
+C_O1_I2(r, rz, rJ)
38
+C_O1_I2(r, rz, rz)
39
C_O1_I2(w, w, w)
40
C_O1_I2(w, w, wM)
41
C_O1_I2(w, w, wA)
42
C_O1_I3(w, w, w, w)
43
-C_O1_I4(r, rZ, rJ, rZ, rZ)
44
+C_O1_I4(r, rz, rJ, rz, rz)
45
C_N2_I1(r, r, r)
46
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/loongarch64/tcg-target-con-str.h
49
+++ b/tcg/loongarch64/tcg-target-con-str.h
50
@@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS)
51
CONST('I', TCG_CT_CONST_S12)
52
CONST('J', TCG_CT_CONST_S32)
53
CONST('U', TCG_CT_CONST_U12)
54
-CONST('Z', TCG_CT_CONST_ZERO)
55
CONST('C', TCG_CT_CONST_C12)
56
CONST('W', TCG_CT_CONST_WSZ)
57
CONST('M', TCG_CT_CONST_VCMP)
58
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
59
index XXXXXXX..XXXXXXX 100644
60
--- a/tcg/loongarch64/tcg-target.c.inc
61
+++ b/tcg/loongarch64/tcg-target.c.inc
62
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
63
64
#define TCG_GUEST_BASE_REG TCG_REG_S1
65
66
-#define TCG_CT_CONST_ZERO 0x100
67
-#define TCG_CT_CONST_S12 0x200
68
-#define TCG_CT_CONST_S32 0x400
69
-#define TCG_CT_CONST_U12 0x800
70
-#define TCG_CT_CONST_C12 0x1000
71
-#define TCG_CT_CONST_WSZ 0x2000
72
-#define TCG_CT_CONST_VCMP 0x4000
73
-#define TCG_CT_CONST_VADD 0x8000
74
+#define TCG_CT_CONST_S12 0x100
75
+#define TCG_CT_CONST_S32 0x200
76
+#define TCG_CT_CONST_U12 0x400
77
+#define TCG_CT_CONST_C12 0x800
78
+#define TCG_CT_CONST_WSZ 0x1000
79
+#define TCG_CT_CONST_VCMP 0x2000
80
+#define TCG_CT_CONST_VADD 0x4000
81
82
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
83
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
84
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
85
if (ct & TCG_CT_CONST) {
86
return true;
87
}
88
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
89
- return true;
90
- }
91
if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
92
return true;
93
}
94
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
95
case INDEX_op_st_i64:
96
case INDEX_op_qemu_st_i32:
97
case INDEX_op_qemu_st_i64:
98
- return C_O0_I2(rZ, r);
99
+ return C_O0_I2(rz, r);
100
101
case INDEX_op_qemu_ld_i128:
102
return C_N2_I1(r, r, r);
103
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
104
105
case INDEX_op_brcond_i32:
106
case INDEX_op_brcond_i64:
107
- return C_O0_I2(rZ, rZ);
108
+ return C_O0_I2(rz, rz);
109
110
case INDEX_op_ext8s_i32:
111
case INDEX_op_ext8s_i64:
112
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
113
case INDEX_op_deposit_i32:
114
case INDEX_op_deposit_i64:
115
/* Must deposit into the same register as input */
116
- return C_O1_I2(r, 0, rZ);
117
+ return C_O1_I2(r, 0, rz);
118
119
case INDEX_op_sub_i32:
120
case INDEX_op_setcond_i32:
121
- return C_O1_I2(r, rZ, ri);
122
+ return C_O1_I2(r, rz, ri);
123
case INDEX_op_sub_i64:
124
case INDEX_op_setcond_i64:
125
- return C_O1_I2(r, rZ, rJ);
126
+ return C_O1_I2(r, rz, rJ);
127
128
case INDEX_op_mul_i32:
129
case INDEX_op_mul_i64:
130
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
131
case INDEX_op_rem_i64:
132
case INDEX_op_remu_i32:
133
case INDEX_op_remu_i64:
134
- return C_O1_I2(r, rZ, rZ);
135
+ return C_O1_I2(r, rz, rz);
136
137
case INDEX_op_movcond_i32:
138
case INDEX_op_movcond_i64:
139
- return C_O1_I4(r, rZ, rJ, rZ, rZ);
140
+ return C_O1_I4(r, rz, rJ, rz, rz);
141
142
case INDEX_op_ld_vec:
143
case INDEX_op_dupm_vec:
27
--
144
--
28
2.34.1
145
2.43.0
29
146
30
147
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Replace target-specific 'Z' with generic 'z'.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
5
Fixes a bug in the loop comparision where "<= end" would lock
6
one more page than required.
7
2
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
accel/tcg/tb-maint.c | 22 +++++++++++-----------
6
tcg/mips/tcg-target-con-set.h | 26 ++++++++++-----------
12
1 file changed, 11 insertions(+), 11 deletions(-)
7
tcg/mips/tcg-target-con-str.h | 1 -
8
tcg/mips/tcg-target.c.inc | 44 ++++++++++++++---------------------
9
3 files changed, 31 insertions(+), 40 deletions(-)
13
10
14
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
11
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/tb-maint.c
13
--- a/tcg/mips/tcg-target-con-set.h
17
+++ b/accel/tcg/tb-maint.c
14
+++ b/tcg/mips/tcg-target-con-set.h
18
@@ -XXX,XX +XXX,XX @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
15
@@ -XXX,XX +XXX,XX @@
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
17
*/
18
C_O0_I1(r)
19
-C_O0_I2(rZ, r)
20
-C_O0_I2(rZ, rZ)
21
-C_O0_I3(rZ, r, r)
22
-C_O0_I3(rZ, rZ, r)
23
-C_O0_I4(rZ, rZ, rZ, rZ)
24
-C_O0_I4(rZ, rZ, r, r)
25
+C_O0_I2(rz, r)
26
+C_O0_I2(rz, rz)
27
+C_O0_I3(rz, r, r)
28
+C_O0_I3(rz, rz, r)
29
+C_O0_I4(rz, rz, rz, rz)
30
+C_O0_I4(rz, rz, r, r)
31
C_O1_I1(r, r)
32
-C_O1_I2(r, 0, rZ)
33
+C_O1_I2(r, 0, rz)
34
C_O1_I2(r, r, r)
35
C_O1_I2(r, r, ri)
36
C_O1_I2(r, r, rI)
37
C_O1_I2(r, r, rIK)
38
C_O1_I2(r, r, rJ)
39
-C_O1_I2(r, r, rWZ)
40
-C_O1_I2(r, rZ, rN)
41
-C_O1_I2(r, rZ, rZ)
42
-C_O1_I4(r, rZ, rZ, rZ, 0)
43
-C_O1_I4(r, rZ, rZ, rZ, rZ)
44
+C_O1_I2(r, r, rzW)
45
+C_O1_I2(r, rz, rN)
46
+C_O1_I2(r, rz, rz)
47
+C_O1_I4(r, rz, rz, rz, 0)
48
+C_O1_I4(r, rz, rz, rz, rz)
49
C_O2_I1(r, r, r)
50
C_O2_I2(r, r, r, r)
51
-C_O2_I4(r, r, rZ, rZ, rN, rN)
52
+C_O2_I4(r, r, rz, rz, rN, rN)
53
diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/mips/tcg-target-con-str.h
56
+++ b/tcg/mips/tcg-target-con-str.h
57
@@ -XXX,XX +XXX,XX @@ CONST('J', TCG_CT_CONST_S16)
58
CONST('K', TCG_CT_CONST_P2M1)
59
CONST('N', TCG_CT_CONST_N16)
60
CONST('W', TCG_CT_CONST_WSZ)
61
-CONST('Z', TCG_CT_CONST_ZERO)
62
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/mips/tcg-target.c.inc
65
+++ b/tcg/mips/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
67
g_assert_not_reached();
19
}
68
}
20
69
21
/*
70
-#define TCG_CT_CONST_ZERO 0x100
22
- * Lock a range of pages ([@start,@end[) as well as the pages of all
71
-#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
23
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
72
-#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
24
* intersecting TBs.
73
-#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
25
* Locking order: acquire locks in ascending order of page index.
74
-#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
26
*/
75
-#define TCG_CT_CONST_WSZ 0x2000 /* word size */
27
static struct page_collection *page_collection_lock(tb_page_addr_t start,
76
+#define TCG_CT_CONST_U16 0x100 /* Unsigned 16-bit: 0 - 0xffff. */
28
- tb_page_addr_t end)
77
+#define TCG_CT_CONST_S16 0x200 /* Signed 16-bit: -32768 - 32767 */
29
+ tb_page_addr_t last)
78
+#define TCG_CT_CONST_P2M1 0x400 /* Power of 2 minus 1. */
79
+#define TCG_CT_CONST_N16 0x800 /* "Negatable" 16-bit: -32767 - 32767 */
80
+#define TCG_CT_CONST_WSZ 0x1000 /* word size */
81
82
#define ALL_GENERAL_REGS 0xffffffffu
83
84
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
30
{
85
{
31
struct page_collection *set = g_malloc(sizeof(*set));
86
if (ct & TCG_CT_CONST) {
32
tb_page_addr_t index;
87
return 1;
33
PageDesc *pd;
88
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
34
89
- return 1;
35
start >>= TARGET_PAGE_BITS;
90
} else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
36
- end >>= TARGET_PAGE_BITS;
91
return 1;
37
- g_assert(start <= end);
92
} else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
38
+ last >>= TARGET_PAGE_BITS;
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
39
+ g_assert(start <= last);
94
TCGArg a0, a1, a2;
40
95
int c2;
41
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
96
42
page_entry_destroy);
97
- /*
43
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
98
- * Note that many operands use the constraint set "rZ".
44
retry:
99
- * We make use of the fact that 0 is the ZERO register,
45
q_tree_foreach(set->tree, page_entry_lock, NULL);
100
- * and hence such cases need not check for const_args.
46
101
- */
47
- for (index = start; index <= end; index++) {
102
a0 = args[0];
48
+ for (index = start; index <= last; index++) {
103
a1 = args[1];
49
TranslationBlock *tb;
104
a2 = args[2];
50
PageForEachNext n;
105
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
51
106
case INDEX_op_st16_i64:
52
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
107
case INDEX_op_st32_i64:
53
void tb_invalidate_phys_page(tb_page_addr_t addr)
108
case INDEX_op_st_i64:
54
{
109
- return C_O0_I2(rZ, r);
55
struct page_collection *pages;
110
+ return C_O0_I2(rz, r);
56
- tb_page_addr_t start, end;
111
57
+ tb_page_addr_t start, last;
112
case INDEX_op_add_i32:
58
PageDesc *p;
113
case INDEX_op_add_i64:
59
114
return C_O1_I2(r, r, rJ);
60
p = page_find(addr >> TARGET_PAGE_BITS);
115
case INDEX_op_sub_i32:
61
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
116
case INDEX_op_sub_i64:
62
}
117
- return C_O1_I2(r, rZ, rN);
63
118
+ return C_O1_I2(r, rz, rN);
64
start = addr & TARGET_PAGE_MASK;
119
case INDEX_op_mul_i32:
65
- end = start + TARGET_PAGE_SIZE;
120
case INDEX_op_mulsh_i32:
66
- pages = page_collection_lock(start, end);
121
case INDEX_op_muluh_i32:
67
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
122
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
68
+ last = addr | ~TARGET_PAGE_MASK;
123
case INDEX_op_remu_i64:
69
+ pages = page_collection_lock(start, last);
124
case INDEX_op_nor_i64:
70
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
125
case INDEX_op_setcond_i64:
71
page_collection_unlock(pages);
126
- return C_O1_I2(r, rZ, rZ);
72
}
127
+ return C_O1_I2(r, rz, rz);
73
128
case INDEX_op_muls2_i32:
74
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
129
case INDEX_op_mulu2_i32:
75
struct page_collection *pages;
130
case INDEX_op_muls2_i64:
76
tb_page_addr_t next;
131
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
77
132
return C_O1_I2(r, r, ri);
78
- pages = page_collection_lock(start, end);
133
case INDEX_op_clz_i32:
79
+ pages = page_collection_lock(start, end - 1);
134
case INDEX_op_clz_i64:
80
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
135
- return C_O1_I2(r, r, rWZ);
81
start < end;
136
+ return C_O1_I2(r, r, rzW);
82
start = next, next += TARGET_PAGE_SIZE) {
137
83
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
138
case INDEX_op_deposit_i32:
84
{
139
case INDEX_op_deposit_i64:
85
struct page_collection *pages;
140
- return C_O1_I2(r, 0, rZ);
86
141
+ return C_O1_I2(r, 0, rz);
87
- pages = page_collection_lock(ram_addr, ram_addr + size);
142
case INDEX_op_brcond_i32:
88
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
143
case INDEX_op_brcond_i64:
89
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
144
- return C_O0_I2(rZ, rZ);
90
page_collection_unlock(pages);
145
+ return C_O0_I2(rz, rz);
91
}
146
case INDEX_op_movcond_i32:
147
case INDEX_op_movcond_i64:
148
return (use_mips32r6_instructions
149
- ? C_O1_I4(r, rZ, rZ, rZ, rZ)
150
- : C_O1_I4(r, rZ, rZ, rZ, 0));
151
+ ? C_O1_I4(r, rz, rz, rz, rz)
152
+ : C_O1_I4(r, rz, rz, rz, 0));
153
case INDEX_op_add2_i32:
154
case INDEX_op_sub2_i32:
155
- return C_O2_I4(r, r, rZ, rZ, rN, rN);
156
+ return C_O2_I4(r, r, rz, rz, rN, rN);
157
case INDEX_op_setcond2_i32:
158
- return C_O1_I4(r, rZ, rZ, rZ, rZ);
159
+ return C_O1_I4(r, rz, rz, rz, rz);
160
case INDEX_op_brcond2_i32:
161
- return C_O0_I4(rZ, rZ, rZ, rZ);
162
+ return C_O0_I4(rz, rz, rz, rz);
163
164
case INDEX_op_qemu_ld_i32:
165
return C_O1_I1(r, r);
166
case INDEX_op_qemu_st_i32:
167
- return C_O0_I2(rZ, r);
168
+ return C_O0_I2(rz, r);
169
case INDEX_op_qemu_ld_i64:
170
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
171
case INDEX_op_qemu_st_i64:
172
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r);
173
+ return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rz, r) : C_O0_I3(rz, rz, r);
174
175
default:
176
return C_NotImplemented;
92
--
177
--
93
2.34.1
178
2.43.0
94
179
95
180
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Replace target-specific 'Z' with generic 'z'.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
5
Properly truncate tb_last to the end of the page; the comment about
6
tb_end being past the end of the page being ok is not correct,
7
considering overflow.
8
2
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
accel/tcg/tb-maint.c | 26 ++++++++++++--------------
6
tcg/riscv/tcg-target-con-set.h | 10 +++++-----
13
1 file changed, 12 insertions(+), 14 deletions(-)
7
tcg/riscv/tcg-target-con-str.h | 1 -
8
tcg/riscv/tcg-target.c.inc | 28 ++++++++++++----------------
9
3 files changed, 17 insertions(+), 22 deletions(-)
14
10
15
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
11
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/tb-maint.c
13
--- a/tcg/riscv/tcg-target-con-set.h
18
+++ b/accel/tcg/tb-maint.c
14
+++ b/tcg/riscv/tcg-target-con-set.h
19
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
15
@@ -XXX,XX +XXX,XX @@
20
static void
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
21
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
17
*/
22
PageDesc *p, tb_page_addr_t start,
18
C_O0_I1(r)
23
- tb_page_addr_t end,
19
-C_O0_I2(rZ, r)
24
+ tb_page_addr_t last,
20
-C_O0_I2(rZ, rZ)
25
uintptr_t retaddr)
21
+C_O0_I2(rz, r)
26
{
22
+C_O0_I2(rz, rz)
27
TranslationBlock *tb;
23
C_O1_I1(r, r)
28
- tb_page_addr_t tb_start, tb_end;
24
C_O1_I2(r, r, ri)
29
PageForEachNext n;
25
C_O1_I2(r, r, rI)
30
#ifdef TARGET_HAS_PRECISE_SMC
26
C_O1_I2(r, r, rJ)
31
bool current_tb_modified = false;
27
-C_O1_I2(r, rZ, rN)
32
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
28
-C_O1_I2(r, rZ, rZ)
33
#endif /* TARGET_HAS_PRECISE_SMC */
29
+C_O1_I2(r, rz, rN)
34
- tb_page_addr_t last G_GNUC_UNUSED = end - 1;
30
+C_O1_I2(r, rz, rz)
35
31
C_N1_I2(r, r, rM)
36
/*
32
C_O1_I4(r, r, rI, rM, rM)
37
- * We remove all the TBs in the range [start, end[.
33
-C_O2_I4(r, r, rZ, rZ, rM, rM)
38
+ * We remove all the TBs in the range [start, last].
34
+C_O2_I4(r, r, rz, rz, rM, rM)
39
* XXX: see if in some cases it could be faster to invalidate all the code
35
C_O0_I2(v, r)
40
*/
36
C_O1_I1(v, r)
41
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
37
C_O1_I1(v, v)
42
+ tb_page_addr_t tb_start, tb_last;
38
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
43
+
39
index XXXXXXX..XXXXXXX 100644
44
/* NOTE: this is subtle as a TB may span two physical pages */
40
--- a/tcg/riscv/tcg-target-con-str.h
45
+ tb_start = tb_page_addr0(tb);
41
+++ b/tcg/riscv/tcg-target-con-str.h
46
+ tb_last = tb_start + tb->size - 1;
42
@@ -XXX,XX +XXX,XX @@ CONST('K', TCG_CT_CONST_S5)
47
if (n == 0) {
43
CONST('L', TCG_CT_CONST_CMP_VI)
48
- /* NOTE: tb_end may be after the end of the page, but
44
CONST('N', TCG_CT_CONST_N12)
49
- it is not a problem */
45
CONST('M', TCG_CT_CONST_M12)
50
- tb_start = tb_page_addr0(tb);
46
-CONST('Z', TCG_CT_CONST_ZERO)
51
- tb_end = tb_start + tb->size;
47
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
52
+ tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
48
index XXXXXXX..XXXXXXX 100644
53
} else {
49
--- a/tcg/riscv/tcg-target.c.inc
54
tb_start = tb_page_addr1(tb);
50
+++ b/tcg/riscv/tcg-target.c.inc
55
- tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
51
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
56
- & ~TARGET_PAGE_MASK);
52
return TCG_REG_A0 + slot;
57
+ tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
58
}
59
- if (!(tb_end <= start || tb_start >= end)) {
60
+ if (!(tb_last < start || tb_start > last)) {
61
#ifdef TARGET_HAS_PRECISE_SMC
62
if (current_tb == tb &&
63
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
65
start = addr & TARGET_PAGE_MASK;
66
last = addr | ~TARGET_PAGE_MASK;
67
pages = page_collection_lock(start, last);
68
- tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
69
+ tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
70
page_collection_unlock(pages);
71
}
53
}
72
54
73
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
55
-#define TCG_CT_CONST_ZERO 0x100
74
continue;
56
-#define TCG_CT_CONST_S12 0x200
75
}
57
-#define TCG_CT_CONST_N12 0x400
76
assert_page_locked(pd);
58
-#define TCG_CT_CONST_M12 0x800
77
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
59
-#define TCG_CT_CONST_J12 0x1000
78
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
60
-#define TCG_CT_CONST_S5 0x2000
61
-#define TCG_CT_CONST_CMP_VI 0x4000
62
+#define TCG_CT_CONST_S12 0x100
63
+#define TCG_CT_CONST_N12 0x200
64
+#define TCG_CT_CONST_M12 0x400
65
+#define TCG_CT_CONST_J12 0x800
66
+#define TCG_CT_CONST_S5 0x1000
67
+#define TCG_CT_CONST_CMP_VI 0x2000
68
69
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
70
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
71
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
72
if (ct & TCG_CT_CONST) {
73
return 1;
79
}
74
}
80
page_collection_unlock(pages);
75
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
81
}
76
- return 1;
82
@@ -XXX,XX +XXX,XX @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
77
- }
83
}
78
if (type >= TCG_TYPE_V64) {
84
79
/* Val is replicated by VECE; extract the highest element. */
85
assert_page_locked(p);
80
val >>= (-8 << vece) & 63;
86
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
81
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
87
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
82
case INDEX_op_st16_i64:
88
}
83
case INDEX_op_st32_i64:
89
84
case INDEX_op_st_i64:
90
/*
85
- return C_O0_I2(rZ, r);
86
+ return C_O0_I2(rz, r);
87
88
case INDEX_op_add_i32:
89
case INDEX_op_and_i32:
90
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
91
92
case INDEX_op_sub_i32:
93
case INDEX_op_sub_i64:
94
- return C_O1_I2(r, rZ, rN);
95
+ return C_O1_I2(r, rz, rN);
96
97
case INDEX_op_mul_i32:
98
case INDEX_op_mulsh_i32:
99
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
100
case INDEX_op_divu_i64:
101
case INDEX_op_rem_i64:
102
case INDEX_op_remu_i64:
103
- return C_O1_I2(r, rZ, rZ);
104
+ return C_O1_I2(r, rz, rz);
105
106
case INDEX_op_shl_i32:
107
case INDEX_op_shr_i32:
108
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
109
110
case INDEX_op_brcond_i32:
111
case INDEX_op_brcond_i64:
112
- return C_O0_I2(rZ, rZ);
113
+ return C_O0_I2(rz, rz);
114
115
case INDEX_op_movcond_i32:
116
case INDEX_op_movcond_i64:
117
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
118
case INDEX_op_add2_i64:
119
case INDEX_op_sub2_i32:
120
case INDEX_op_sub2_i64:
121
- return C_O2_I4(r, r, rZ, rZ, rM, rM);
122
+ return C_O2_I4(r, r, rz, rz, rM, rM);
123
124
case INDEX_op_qemu_ld_i32:
125
case INDEX_op_qemu_ld_i64:
126
return C_O1_I1(r, r);
127
case INDEX_op_qemu_st_i32:
128
case INDEX_op_qemu_st_i64:
129
- return C_O0_I2(rZ, r);
130
+ return C_O0_I2(rz, r);
131
132
case INDEX_op_st_vec:
133
return C_O0_I2(v, r);
91
--
134
--
92
2.34.1
135
2.43.0
93
136
94
137
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Replace target-specific 'Z' with generic 'z'.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/exec/cpu-all.h | 2 +-
6
tcg/sparc64/tcg-target-con-set.h | 12 ++++++------
9
accel/tcg/user-exec.c | 11 +++++------
7
tcg/sparc64/tcg-target-con-str.h | 1 -
10
linux-user/mmap.c | 2 +-
8
tcg/sparc64/tcg-target.c.inc | 17 +++++++----------
11
3 files changed, 7 insertions(+), 8 deletions(-)
9
3 files changed, 13 insertions(+), 17 deletions(-)
12
10
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
11
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
13
--- a/tcg/sparc64/tcg-target-con-set.h
16
+++ b/include/exec/cpu-all.h
14
+++ b/tcg/sparc64/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ int walk_memory_regions(void *, walk_memory_regions_fn);
15
@@ -XXX,XX +XXX,XX @@
18
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
19
int page_get_flags(target_ulong address);
17
*/
20
void page_set_flags(target_ulong start, target_ulong last, int flags);
18
C_O0_I1(r)
21
-void page_reset_target_data(target_ulong start, target_ulong end);
19
-C_O0_I2(rZ, r)
22
+void page_reset_target_data(target_ulong start, target_ulong last);
20
-C_O0_I2(rZ, rJ)
23
int page_check_range(target_ulong start, target_ulong len, int flags);
21
+C_O0_I2(rz, r)
24
22
+C_O0_I2(rz, rJ)
25
/**
23
C_O1_I1(r, r)
26
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
24
C_O1_I2(r, r, r)
25
-C_O1_I2(r, rZ, rJ)
26
-C_O1_I4(r, rZ, rJ, rI, 0)
27
-C_O2_I2(r, r, rZ, rJ)
28
-C_O2_I4(r, r, rZ, rZ, rJ, rJ)
29
+C_O1_I2(r, rz, rJ)
30
+C_O1_I4(r, rz, rJ, rI, 0)
31
+C_O2_I2(r, r, rz, rJ)
32
+C_O2_I4(r, r, rz, rz, rJ, rJ)
33
diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h
27
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/user-exec.c
35
--- a/tcg/sparc64/tcg-target-con-str.h
29
+++ b/accel/tcg/user-exec.c
36
+++ b/tcg/sparc64/tcg-target-con-str.h
30
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
37
@@ -XXX,XX +XXX,XX @@ REGS('r', ALL_GENERAL_REGS)
38
*/
39
CONST('I', TCG_CT_CONST_S11)
40
CONST('J', TCG_CT_CONST_S13)
41
-CONST('Z', TCG_CT_CONST_ZERO)
42
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/sparc64/tcg-target.c.inc
45
+++ b/tcg/sparc64/tcg-target.c.inc
46
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
47
48
#define TCG_CT_CONST_S11 0x100
49
#define TCG_CT_CONST_S13 0x200
50
-#define TCG_CT_CONST_ZERO 0x400
51
52
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
53
54
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
55
val = (int32_t)val;
31
}
56
}
32
57
33
if (!flags || reset) {
58
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
34
- page_reset_target_data(start, last + 1);
59
- return 1;
35
+ page_reset_target_data(start, last);
60
- } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
36
inval_tb |= pageflags_unset(start, last);
61
+ if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
37
}
62
return 1;
38
if (flags) {
63
} else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
39
@@ -XXX,XX +XXX,XX @@ typedef struct TargetPageDataNode {
64
return 1;
40
65
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41
static IntervalTreeRoot targetdata_root;
66
case INDEX_op_st_i64:
42
67
case INDEX_op_qemu_st_i32:
43
-void page_reset_target_data(target_ulong start, target_ulong end)
68
case INDEX_op_qemu_st_i64:
44
+void page_reset_target_data(target_ulong start, target_ulong last)
69
- return C_O0_I2(rZ, r);
45
{
70
+ return C_O0_I2(rz, r);
46
IntervalTreeNode *n, *next;
71
47
- target_ulong last;
72
case INDEX_op_add_i32:
48
73
case INDEX_op_add_i64:
49
assert_memory_lock();
74
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
50
75
case INDEX_op_setcond_i64:
51
- start = start & TARGET_PAGE_MASK;
76
case INDEX_op_negsetcond_i32:
52
- last = TARGET_PAGE_ALIGN(end) - 1;
77
case INDEX_op_negsetcond_i64:
53
+ start &= TARGET_PAGE_MASK;
78
- return C_O1_I2(r, rZ, rJ);
54
+ last |= ~TARGET_PAGE_MASK;
79
+ return C_O1_I2(r, rz, rJ);
55
80
56
for (n = interval_tree_iter_first(&targetdata_root, start, last),
81
case INDEX_op_brcond_i32:
57
next = n ? interval_tree_iter_next(n, start, last) : NULL;
82
case INDEX_op_brcond_i64:
58
@@ -XXX,XX +XXX,XX @@ void *page_get_target_data(target_ulong address)
83
- return C_O0_I2(rZ, rJ);
59
return t->data[(page - region) >> TARGET_PAGE_BITS];
84
+ return C_O0_I2(rz, rJ);
60
}
85
case INDEX_op_movcond_i32:
61
#else
86
case INDEX_op_movcond_i64:
62
-void page_reset_target_data(target_ulong start, target_ulong end) { }
87
- return C_O1_I4(r, rZ, rJ, rI, 0);
63
+void page_reset_target_data(target_ulong start, target_ulong last) { }
88
+ return C_O1_I4(r, rz, rJ, rI, 0);
64
#endif /* TARGET_PAGE_DATA_SIZE */
89
case INDEX_op_add2_i32:
65
90
case INDEX_op_add2_i64:
66
/* The softmmu versions of these helpers are in cputlb.c. */
91
case INDEX_op_sub2_i32:
67
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
92
case INDEX_op_sub2_i64:
68
index XXXXXXX..XXXXXXX 100644
93
- return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
69
--- a/linux-user/mmap.c
94
+ return C_O2_I4(r, r, rz, rz, rJ, rJ);
70
+++ b/linux-user/mmap.c
95
case INDEX_op_mulu2_i32:
71
@@ -XXX,XX +XXX,XX @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
96
case INDEX_op_muls2_i32:
72
if (can_passthrough_madvise(start, end)) {
97
- return C_O2_I2(r, r, rZ, rJ);
73
ret = get_errno(madvise(g2h_untagged(start), len, advice));
98
+ return C_O2_I2(r, r, rz, rJ);
74
if ((advice == MADV_DONTNEED) && (ret == 0)) {
99
case INDEX_op_muluh_i64:
75
- page_reset_target_data(start, start + len);
100
return C_O1_I2(r, r, r);
76
+ page_reset_target_data(start, start + len - 1);
101
77
}
78
}
79
}
80
--
102
--
81
2.34.1
103
2.43.0
82
104
83
105
diff view generated by jsdifflib
New patch
1
From: Fabiano Rosas <farosas@suse.de>
1
2
3
When complying with the alignment requested in the ELF and unmapping
4
the excess reservation, having align_end not aligned to the guest page
5
causes the unmap to be rejected by the alignment check at
6
target_munmap and later brk adjustments hit an EEXIST.
7
8
Fix by aligning the start of region to be unmapped.
9
10
Fixes: c81d1fafa6 ("linux-user: Honor elf alignment when placing images")
11
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1913
12
Signed-off-by: Fabiano Rosas <farosas@suse.de>
13
[rth: Align load_end as well.]
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20250213143558.10504-1-farosas@suse.de>
16
---
17
linux-user/elfload.c | 4 ++--
18
1 file changed, 2 insertions(+), 2 deletions(-)
19
20
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/linux-user/elfload.c
23
+++ b/linux-user/elfload.c
24
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, const ImageSource *src,
25
26
if (align_size != reserve_size) {
27
abi_ulong align_addr = ROUND_UP(load_addr, align);
28
- abi_ulong align_end = align_addr + reserve_size;
29
- abi_ulong load_end = load_addr + align_size;
30
+ abi_ulong align_end = TARGET_PAGE_ALIGN(align_addr + reserve_size);
31
+ abi_ulong load_end = TARGET_PAGE_ALIGN(load_addr + align_size);
32
33
if (align_addr != load_addr) {
34
target_munmap(load_addr, align_addr - load_addr);
35
--
36
2.43.0
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
From: Andreas Schwab <schwab@suse.de>
2
2
3
cpu_watchpoint_insert() calls error_report() which is declared
3
SA_RESTORER and the associated sa_restorer field of struct sigaction are
4
in "qemu/error-report.h". When moving this code in commit 2609ec2868
4
an obsolete feature, not expected to be used by future architectures.
5
("softmmu: Extract watchpoint API from physmem.c") we neglected to
5
They are also absent on RISC-V, LoongArch, Hexagon and OpenRISC, but
6
include this header. This works so far because it is indirectly
6
defined due to their use of generic/signal.h. This leads to corrupted
7
included by TCG headers -> "qemu/plugin.h" -> "qemu/error-report.h".
7
data and out-of-bounds accesses.
8
8
9
Currently cpu_watchpoint_insert() is only built with the TCG
9
Move the definition of TARGET_SA_RESTORER out of generic/signal.h into the
10
accelerator. When building it with other ones (or without TCG)
10
target_signal.h files that need it. Note that m68k has the sa_restorer
11
we get:
11
field, but does not use it and does not define SA_RESTORER.
12
12
13
softmmu/watchpoint.c:38:9: error: implicit declaration of function 'error_report' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
13
Reported-by: Thomas Weißschuh <thomas@t-8ch.de>
14
error_report("tried to set invalid watchpoint at %"
14
Signed-off-by: Andreas Schwab <schwab@suse.de>
15
^
15
Reviewed-by: Thomas Weißschuh <thomas@t-8ch.de>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-ID: <mvmed060xc9.fsf@suse.de>
19
---
20
linux-user/aarch64/target_signal.h | 2 ++
21
linux-user/arm/target_signal.h | 2 ++
22
linux-user/generic/signal.h | 1 -
23
linux-user/i386/target_signal.h | 2 ++
24
linux-user/m68k/target_signal.h | 1 +
25
linux-user/microblaze/target_signal.h | 2 ++
26
linux-user/ppc/target_signal.h | 2 ++
27
linux-user/s390x/target_signal.h | 2 ++
28
linux-user/sh4/target_signal.h | 2 ++
29
linux-user/x86_64/target_signal.h | 2 ++
30
linux-user/xtensa/target_signal.h | 2 ++
31
11 files changed, 19 insertions(+), 1 deletion(-)
16
32
17
Include "qemu/error-report.h" in order to fix this for non-TCG
33
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
18
builds.
19
20
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
21
Message-Id: <20230328173117.15226-3-philmd@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
softmmu/watchpoint.c | 1 +
25
1 file changed, 1 insertion(+)
26
27
diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c
28
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
29
--- a/softmmu/watchpoint.c
35
--- a/linux-user/aarch64/target_signal.h
30
+++ b/softmmu/watchpoint.c
36
+++ b/linux-user/aarch64/target_signal.h
31
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@
32
38
33
#include "qemu/osdep.h"
39
#include "../generic/signal.h"
34
#include "qemu/main-loop.h"
40
35
+#include "qemu/error-report.h"
41
+#define TARGET_SA_RESTORER 0x04000000
36
#include "exec/exec-all.h"
42
+
37
#include "exec/translate-all.h"
43
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
38
#include "sysemu/tcg.h"
44
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
45
46
diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/linux-user/arm/target_signal.h
49
+++ b/linux-user/arm/target_signal.h
50
@@ -XXX,XX +XXX,XX @@
51
52
#include "../generic/signal.h"
53
54
+#define TARGET_SA_RESTORER 0x04000000
55
+
56
#define TARGET_ARCH_HAS_SETUP_FRAME
57
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
58
59
diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/linux-user/generic/signal.h
62
+++ b/linux-user/generic/signal.h
63
@@ -XXX,XX +XXX,XX @@
64
#define TARGET_SA_RESTART 0x10000000
65
#define TARGET_SA_NODEFER 0x40000000
66
#define TARGET_SA_RESETHAND 0x80000000
67
-#define TARGET_SA_RESTORER 0x04000000
68
69
#define TARGET_SIGHUP 1
70
#define TARGET_SIGINT 2
71
diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h
72
index XXXXXXX..XXXXXXX 100644
73
--- a/linux-user/i386/target_signal.h
74
+++ b/linux-user/i386/target_signal.h
75
@@ -XXX,XX +XXX,XX @@
76
77
#include "../generic/signal.h"
78
79
+#define TARGET_SA_RESTORER 0x04000000
80
+
81
#define TARGET_ARCH_HAS_SETUP_FRAME
82
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
83
84
diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/linux-user/m68k/target_signal.h
87
+++ b/linux-user/m68k/target_signal.h
88
@@ -XXX,XX +XXX,XX @@
89
90
#include "../generic/signal.h"
91
92
+#define TARGET_ARCH_HAS_SA_RESTORER 1
93
#define TARGET_ARCH_HAS_SETUP_FRAME
94
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
95
96
diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/linux-user/microblaze/target_signal.h
99
+++ b/linux-user/microblaze/target_signal.h
100
@@ -XXX,XX +XXX,XX @@
101
102
#include "../generic/signal.h"
103
104
+#define TARGET_SA_RESTORER 0x04000000
105
+
106
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
107
108
#endif /* MICROBLAZE_TARGET_SIGNAL_H */
109
diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h
110
index XXXXXXX..XXXXXXX 100644
111
--- a/linux-user/ppc/target_signal.h
112
+++ b/linux-user/ppc/target_signal.h
113
@@ -XXX,XX +XXX,XX @@
114
115
#include "../generic/signal.h"
116
117
+#define TARGET_SA_RESTORER 0x04000000
118
+
119
#if !defined(TARGET_PPC64)
120
#define TARGET_ARCH_HAS_SETUP_FRAME
121
#endif
122
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/linux-user/s390x/target_signal.h
125
+++ b/linux-user/s390x/target_signal.h
126
@@ -XXX,XX +XXX,XX @@
127
128
#include "../generic/signal.h"
129
130
+#define TARGET_SA_RESTORER 0x04000000
131
+
132
#define TARGET_ARCH_HAS_SETUP_FRAME
133
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
134
135
diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/linux-user/sh4/target_signal.h
138
+++ b/linux-user/sh4/target_signal.h
139
@@ -XXX,XX +XXX,XX @@
140
141
#include "../generic/signal.h"
142
143
+#define TARGET_SA_RESTORER 0x04000000
144
+
145
#define TARGET_ARCH_HAS_SETUP_FRAME
146
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
147
148
diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h
149
index XXXXXXX..XXXXXXX 100644
150
--- a/linux-user/x86_64/target_signal.h
151
+++ b/linux-user/x86_64/target_signal.h
152
@@ -XXX,XX +XXX,XX @@
153
154
#include "../generic/signal.h"
155
156
+#define TARGET_SA_RESTORER 0x04000000
157
+
158
/* For x86_64, use of SA_RESTORER is mandatory. */
159
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
160
161
diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h
162
index XXXXXXX..XXXXXXX 100644
163
--- a/linux-user/xtensa/target_signal.h
164
+++ b/linux-user/xtensa/target_signal.h
165
@@ -XXX,XX +XXX,XX @@
166
167
#include "../generic/signal.h"
168
169
+#define TARGET_SA_RESTORER 0x04000000
170
+
171
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
172
173
#endif
39
--
174
--
40
2.34.1
175
2.43.0
41
176
42
177
diff view generated by jsdifflib
New patch
1
From: Mikael Szreder <git@miszr.win>
1
2
3
A bug was introduced in commit 0bba7572d40d which causes the fdtox
4
and fqtox instructions to incorrectly select the destination registers.
5
More information and a test program can be found in issue #2802.
6
7
Fixes: 0bba7572d40d ("target/sparc: Perform DFPREG/QFPREG in decodetree")
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2802
9
Signed-off-by: Mikael Szreder <git@miszr.win>
10
Acked-by: Artyom Tarasenko <atar4qemu@gmail.com>
11
[rth: Squash patches together, since the second fixes a typo in the first.]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-ID: <20250205090333.19626-3-git@miszr.win>
14
---
15
target/sparc/insns.decode | 12 ++++++------
16
1 file changed, 6 insertions(+), 6 deletions(-)
17
18
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/sparc/insns.decode
21
+++ b/target/sparc/insns.decode
22
@@ -XXX,XX +XXX,XX @@ FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @q_d_d
23
FNHADDs 10 ..... 110100 ..... 0 0111 0001 ..... @r_r_r
24
FNHADDd 10 ..... 110100 ..... 0 0111 0010 ..... @d_d_d
25
FNsMULd 10 ..... 110100 ..... 0 0111 1001 ..... @d_r_r
26
-FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2
27
-FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_d2
28
-FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_q2
29
-FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2
30
-FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_r2
31
-FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_r2
32
+FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @d_r2
33
+FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @d_d2
34
+FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @d_q2
35
+FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_d2
36
+FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_d2
37
+FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_d2
38
FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2
39
FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_d2
40
FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_q2
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
From: Mikael Szreder <git@miszr.win>
1
2
3
The gdbstub implementation for the Sparc architecture would
4
incorrectly calculate the the floating point register offset.
5
This resulted in, for example, registers f32 and f34 to point to
6
the same value.
7
8
The issue was caused by the confusion between even register numbers
9
and even register indexes. For example, the register index of f32 is 64
10
and f34 is 65.
11
12
Fixes: 30038fd81808 ("target-sparc: Change fpr representation to doubles.")
13
Signed-off-by: Mikael Szreder <git@miszr.win>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-ID: <20250214070343.11501-1-git@miszr.win>
17
---
18
target/sparc/gdbstub.c | 18 ++++++++++++++----
19
1 file changed, 14 insertions(+), 4 deletions(-)
20
21
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/sparc/gdbstub.c
24
+++ b/target/sparc/gdbstub.c
25
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
26
}
27
}
28
if (n < 80) {
29
- /* f32-f62 (double width, even numbers only) */
30
- return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
31
+ /* f32-f62 (16 double width registers, even register numbers only)
32
+ * n == 64: f32 : env->fpr[16]
33
+ * n == 65: f34 : env->fpr[17]
34
+ * etc...
35
+ * n == 79: f62 : env->fpr[31]
36
+ */
37
+ return gdb_get_reg64(mem_buf, env->fpr[(n - 64) + 16].ll);
38
}
39
switch (n) {
40
case 80:
41
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
42
}
43
return 4;
44
} else if (n < 80) {
45
- /* f32-f62 (double width, even numbers only) */
46
- env->fpr[(n - 32) / 2].ll = tmp;
47
+ /* f32-f62 (16 double width registers, even register numbers only)
48
+ * n == 64: f32 : env->fpr[16]
49
+ * n == 65: f34 : env->fpr[17]
50
+ * etc...
51
+ * n == 79: f62 : env->fpr[31]
52
+ */
53
+ env->fpr[(n - 64) + 16].ll = tmp;
54
} else {
55
switch (n) {
56
case 80:
57
--
58
2.43.0
diff view generated by jsdifflib
New patch
1
From: Artyom Tarasenko <atar4qemu@gmail.com>
1
2
3
Fake access to
4
PCR Performance Control Register
5
and
6
PIC Performance Instrumentation Counter.
7
8
Ignore writes in privileged mode, and return 0 on reads.
9
10
This allows booting Tribblix, MilaX and v9os under Niagara target.
11
12
Signed-off-by: Artyom Tarasenko <atar4qemu@gmail.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20250209211248.50383-1-atar4qemu@gmail.com>
16
---
17
target/sparc/translate.c | 19 +++++++++++++++++++
18
target/sparc/insns.decode | 7 ++++++-
19
2 files changed, 25 insertions(+), 1 deletion(-)
20
21
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/sparc/translate.c
24
+++ b/target/sparc/translate.c
25
@@ -XXX,XX +XXX,XX @@ static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
26
27
TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
28
29
+static TCGv do_rdpic(DisasContext *dc, TCGv dst)
30
+{
31
+ return tcg_constant_tl(0);
32
+}
33
+
34
+TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
35
+
36
+
37
static TCGv do_rdccr(DisasContext *dc, TCGv dst)
38
{
39
gen_helper_rdccr(dst, tcg_env);
40
@@ -XXX,XX +XXX,XX @@ static void do_wrfprs(DisasContext *dc, TCGv src)
41
42
TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
43
44
+static bool do_priv_nop(DisasContext *dc, bool priv)
45
+{
46
+ if (!priv) {
47
+ return raise_priv(dc);
48
+ }
49
+ return advance_pc(dc);
50
+}
51
+
52
+TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
53
+TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
54
+
55
static void do_wrgsr(DisasContext *dc, TCGv src)
56
{
57
gen_trap_ifnofpu(dc);
58
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/sparc/insns.decode
61
+++ b/target/sparc/insns.decode
62
@@ -XXX,XX +XXX,XX @@ CALL 01 i:s30
63
RDTICK 10 rd:5 101000 00100 0 0000000000000
64
RDPC 10 rd:5 101000 00101 0 0000000000000
65
RDFPRS 10 rd:5 101000 00110 0 0000000000000
66
- RDASR17 10 rd:5 101000 10001 0 0000000000000
67
+ {
68
+ RDASR17 10 rd:5 101000 10001 0 0000000000000
69
+ RDPIC 10 rd:5 101000 10001 0 0000000000000
70
+ }
71
RDGSR 10 rd:5 101000 10011 0 0000000000000
72
RDSOFTINT 10 rd:5 101000 10110 0 0000000000000
73
RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000
74
@@ -XXX,XX +XXX,XX @@ CALL 01 i:s30
75
WRCCR 10 00010 110000 ..... . ............. @n_r_ri
76
WRASI 10 00011 110000 ..... . ............. @n_r_ri
77
WRFPRS 10 00110 110000 ..... . ............. @n_r_ri
78
+ WRPCR 10 10000 110000 01000 0 0000000000000
79
+ WRPIC 10 10001 110000 01000 0 0000000000000
80
{
81
WRGSR 10 10011 110000 ..... . ............. @n_r_ri
82
WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri
83
--
84
2.43.0
diff view generated by jsdifflib