1
The following changes since commit d37158bb2425e7ebffb167d611be01f1e9e6c86f:
1
The following changes since commit b300c134465465385045ab705b68a42699688332:
2
2
3
Update version for v8.0.0-rc2 release (2023-03-28 20:43:21 +0100)
3
Merge tag 'pull-vfio-20230524' of https://github.com/legoater/qemu into staging (2023-05-24 14:23:41 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230328
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230525
8
8
9
for you to fetch changes up to 87e303de70f93bf700f58412fb9b2c3ec918c4b5:
9
for you to fetch changes up to a30498fcea5a8b9c544324ccfb0186090104b229:
10
10
11
softmmu: Restore use of CPU watchpoint for all accelerators (2023-03-28 15:24:06 -0700)
11
tcg/riscv: Support CTZ, CLZ from Zbb (2023-05-25 15:29:36 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Use a local version of GTree [#285]
14
tcg/mips:
15
Fix page_set_flags vs the last page of the address space [#1528]
15
- Constant formation improvements
16
Re-enable gdbstub breakpoints under KVM
16
- Replace MIPS_BE with HOST_BIG_ENDIAN
17
- General cleanups
18
tcg/riscv:
19
- Improve setcond
20
- Support movcond
21
- Support Zbb, Zba
17
22
18
----------------------------------------------------------------
23
----------------------------------------------------------------
19
Emilio Cota (2):
24
Richard Henderson (23):
20
util: import GTree as QTree
25
tcg/mips: Move TCG_AREG0 to S8
21
tcg: use QTree instead of GTree
26
tcg/mips: Move TCG_GUEST_BASE_REG to S7
27
tcg/mips: Unify TCG_GUEST_BASE_REG tests
28
tcg/mips: Create and use TCG_REG_TB
29
tcg/mips: Split out tcg_out_movi_one
30
tcg/mips: Split out tcg_out_movi_two
31
tcg/mips: Use the constant pool for 64-bit constants
32
tcg/mips: Aggressively use the constant pool for n64 calls
33
tcg/mips: Try tb-relative addresses in tcg_out_movi
34
tcg/mips: Try three insns with shift and add in tcg_out_movi
35
tcg/mips: Use qemu_build_not_reached for LO/HI_OFF
36
tcg/mips: Replace MIPS_BE with HOST_BIG_ENDIAN
37
disas/riscv: Decode czero.{eqz,nez}
38
tcg/riscv: Probe for Zba, Zbb, Zicond extensions
39
tcg/riscv: Support ANDN, ORN, XNOR from Zbb
40
tcg/riscv: Support ADD.UW, SEXT.B, SEXT.H, ZEXT.H from Zba+Zbb
41
tcg/riscv: Use ADD.UW for guest address generation
42
tcg/riscv: Support rotates from Zbb
43
tcg/riscv: Support REV8 from Zbb
44
tcg/riscv: Support CPOP from Zbb
45
tcg/riscv: Improve setcond expansion
46
tcg/riscv: Implement movcond
47
tcg/riscv: Support CTZ, CLZ from Zbb
22
48
23
Philippe Mathieu-Daudé (3):
49
tcg/mips/tcg-target.h | 3 +-
24
softmmu: Restrict cpu_check_watchpoint / address_matches to TCG accel
50
tcg/riscv/tcg-target-con-set.h | 3 +
25
softmmu/watchpoint: Add missing 'qemu/error-report.h' include
51
tcg/riscv/tcg-target-con-str.h | 1 +
26
softmmu: Restore use of CPU watchpoint for all accelerators
52
tcg/riscv/tcg-target.h | 48 ++--
27
53
disas/riscv.c | 6 +
28
Richard Henderson (10):
54
tcg/mips/tcg-target.c.inc | 308 ++++++++++++++++-----
29
linux-user: Diagnose misaligned -R size
55
tcg/riscv/tcg-target.c.inc | 612 ++++++++++++++++++++++++++++++++++++-----
30
accel/tcg: Pass last not end to page_set_flags
56
7 files changed, 825 insertions(+), 156 deletions(-)
31
accel/tcg: Pass last not end to page_reset_target_data
32
accel/tcg: Pass last not end to PAGE_FOR_EACH_TB
33
accel/tcg: Pass last not end to page_collection_lock
34
accel/tcg: Pass last not end to tb_invalidate_phys_page_range__locked
35
accel/tcg: Pass last not end to tb_invalidate_phys_range
36
linux-user: Pass last not end to probe_guest_base
37
include/exec: Change reserved_va semantics to last byte
38
linux-user/arm: Take more care allocating commpage
39
40
configure | 15 +
41
meson.build | 4 +
42
include/exec/cpu-all.h | 15 +-
43
include/exec/exec-all.h | 2 +-
44
include/hw/core/cpu.h | 39 +-
45
include/hw/core/tcg-cpu-ops.h | 43 ++
46
include/qemu/qtree.h | 201 ++++++
47
linux-user/arm/target_cpu.h | 2 +-
48
linux-user/user-internals.h | 12 +-
49
accel/tcg/tb-maint.c | 112 ++--
50
accel/tcg/translate-all.c | 2 +-
51
accel/tcg/user-exec.c | 25 +-
52
bsd-user/main.c | 10 +-
53
bsd-user/mmap.c | 10 +-
54
linux-user/elfload.c | 72 ++-
55
linux-user/flatload.c | 2 +-
56
linux-user/main.c | 31 +-
57
linux-user/mmap.c | 22 +-
58
linux-user/syscall.c | 4 +-
59
softmmu/physmem.c | 2 +-
60
softmmu/watchpoint.c | 5 +
61
target/arm/tcg/mte_helper.c | 1 +
62
target/arm/tcg/sve_helper.c | 1 +
63
target/s390x/tcg/mem_helper.c | 1 +
64
tcg/region.c | 19 +-
65
tests/bench/qtree-bench.c | 286 +++++++++
66
tests/unit/test-qtree.c | 333 ++++++++++
67
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++++++++
68
softmmu/meson.build | 2 +-
69
tests/bench/meson.build | 4 +
70
tests/unit/meson.build | 1 +
71
util/meson.build | 1 +
72
32 files changed, 2474 insertions(+), 195 deletions(-)
73
create mode 100644 include/qemu/qtree.h
74
create mode 100644 tests/bench/qtree-bench.c
75
create mode 100644 tests/unit/test-qtree.c
76
create mode 100644 util/qtree.c
77
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
No functional change; just moving the saved reserved regs to the end.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/tb-maint.c | 28 ++++++++++++++++------------
6
tcg/mips/tcg-target.h | 2 +-
9
1 file changed, 16 insertions(+), 12 deletions(-)
7
tcg/mips/tcg-target.c.inc | 4 ++--
8
2 files changed, 3 insertions(+), 3 deletions(-)
10
9
11
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
10
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tb-maint.c
12
--- a/tcg/mips/tcg-target.h
14
+++ b/accel/tcg/tb-maint.c
13
+++ b/tcg/mips/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ static void tb_remove(TranslationBlock *tb)
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
TCG_REG_RA,
16
17
TCG_REG_CALL_STACK = TCG_REG_SP,
18
- TCG_AREG0 = TCG_REG_S0,
19
+ TCG_AREG0 = TCG_REG_S8,
20
} TCGReg;
21
22
/* used for function call generation */
23
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/mips/tcg-target.c.inc
26
+++ b/tcg/mips/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
16
}
28
}
17
29
18
/* TODO: For now, still shared with translate-all.c for system mode. */
30
static const int tcg_target_callee_save_regs[] = {
19
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
31
- TCG_REG_S0, /* used for the global env (TCG_AREG0) */
20
- for (T = foreach_tb_first(start, end), \
32
+ TCG_REG_S0,
21
- N = foreach_tb_next(T, start, end); \
33
TCG_REG_S1,
22
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
34
TCG_REG_S2,
23
+ for (T = foreach_tb_first(start, last), \
35
TCG_REG_S3,
24
+ N = foreach_tb_next(T, start, last); \
36
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_callee_save_regs[] = {
25
T != NULL; \
37
TCG_REG_S5,
26
- T = N, N = foreach_tb_next(N, start, end))
38
TCG_REG_S6,
27
+ T = N, N = foreach_tb_next(N, start, last))
39
TCG_REG_S7,
28
40
- TCG_REG_S8,
29
typedef TranslationBlock *PageForEachNext;
41
+ TCG_REG_S8, /* used for the global env (TCG_AREG0) */
30
42
TCG_REG_RA, /* should be last for ABI compliance */
31
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
32
- tb_page_addr_t end)
33
+ tb_page_addr_t last)
34
{
35
- IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
36
+ IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
37
return n ? container_of(n, TranslationBlock, itree) : NULL;
38
}
39
40
static PageForEachNext foreach_tb_next(PageForEachNext tb,
41
tb_page_addr_t start,
42
- tb_page_addr_t end)
43
+ tb_page_addr_t last)
44
{
45
IntervalTreeNode *n;
46
47
if (tb) {
48
- n = interval_tree_iter_next(&tb->itree, start, end - 1);
49
+ n = interval_tree_iter_next(&tb->itree, start, last);
50
if (n) {
51
return container_of(n, TranslationBlock, itree);
52
}
53
@@ -XXX,XX +XXX,XX @@ struct page_collection {
54
};
43
};
55
44
56
typedef int PageForEachNext;
57
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
58
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
59
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
60
61
#ifdef CONFIG_DEBUG_TCG
62
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
63
{
64
TranslationBlock *tb;
65
PageForEachNext n;
66
+ tb_page_addr_t last = end - 1;
67
68
assert_memory_lock();
69
70
- PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
71
+ PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
72
tb_phys_invalidate__locked(tb);
73
}
74
}
75
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
76
bool current_tb_modified;
77
TranslationBlock *tb;
78
PageForEachNext n;
79
+ tb_page_addr_t last;
80
81
/*
82
* Without precise smc semantics, or when outside of a TB,
83
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
84
assert_memory_lock();
85
current_tb = tcg_tb_lookup(pc);
86
87
+ last = addr | ~TARGET_PAGE_MASK;
88
addr &= TARGET_PAGE_MASK;
89
current_tb_modified = false;
90
91
- PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
92
+ PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
93
if (current_tb == tb &&
94
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
95
/*
96
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
97
bool current_tb_modified = false;
98
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
99
#endif /* TARGET_HAS_PRECISE_SMC */
100
+ tb_page_addr_t last G_GNUC_UNUSED = end - 1;
101
102
/*
103
* We remove all the TBs in the range [start, end[.
104
* XXX: see if in some cases it could be faster to invalidate all the code
105
*/
106
- PAGE_FOR_EACH_TB(start, end, p, tb, n) {
107
+ PAGE_FOR_EACH_TB(start, last, p, tb, n) {
108
/* NOTE: this is subtle as a TB may span two physical pages */
109
if (n == 0) {
110
/* NOTE: tb_end may be after the end of the page, but
111
--
45
--
112
2.34.1
46
2.34.1
113
47
114
48
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
No functional change; just moving the saved reserved regs to the end.
2
2
3
Both cpu_check_watchpoint() and cpu_watchpoint_address_matches()
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
are specific to TCG system emulation. Declare them in "tcg-cpu-ops.h"
5
to be sure accessing them from non-TCG code is a compilation error.
6
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20230328173117.15226-2-philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
include/hw/core/cpu.h | 37 ------------------------------
6
tcg/mips/tcg-target.c.inc | 4 ++--
12
include/hw/core/tcg-cpu-ops.h | 43 +++++++++++++++++++++++++++++++++++
7
1 file changed, 2 insertions(+), 2 deletions(-)
13
target/arm/tcg/mte_helper.c | 1 +
14
target/arm/tcg/sve_helper.c | 1 +
15
target/s390x/tcg/mem_helper.c | 1 +
16
5 files changed, 46 insertions(+), 37 deletions(-)
17
8
18
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
19
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/core/cpu.h
11
--- a/tcg/mips/tcg-target.c.inc
21
+++ b/include/hw/core/cpu.h
12
+++ b/tcg/mips/tcg-target.c.inc
22
@@ -XXX,XX +XXX,XX @@ static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
13
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
23
static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
14
#define TCG_TMP3 TCG_REG_T7
24
{
15
25
}
16
#ifndef CONFIG_SOFTMMU
26
-
17
-#define TCG_GUEST_BASE_REG TCG_REG_S1
27
-static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
18
+#define TCG_GUEST_BASE_REG TCG_REG_S7
28
- MemTxAttrs atr, int fl, uintptr_t ra)
29
-{
30
-}
31
-
32
-static inline int cpu_watchpoint_address_matches(CPUState *cpu,
33
- vaddr addr, vaddr len)
34
-{
35
- return 0;
36
-}
37
#else
38
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
39
int flags, CPUWatchpoint **watchpoint);
40
@@ -XXX,XX +XXX,XX @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
41
vaddr len, int flags);
42
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
43
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
44
-
45
-/**
46
- * cpu_check_watchpoint:
47
- * @cpu: cpu context
48
- * @addr: guest virtual address
49
- * @len: access length
50
- * @attrs: memory access attributes
51
- * @flags: watchpoint access type
52
- * @ra: unwind return address
53
- *
54
- * Check for a watchpoint hit in [addr, addr+len) of the type
55
- * specified by @flags. Exit via exception with a hit.
56
- */
57
-void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
58
- MemTxAttrs attrs, int flags, uintptr_t ra);
59
-
60
-/**
61
- * cpu_watchpoint_address_matches:
62
- * @cpu: cpu context
63
- * @addr: guest virtual address
64
- * @len: access length
65
- *
66
- * Return the watchpoint flags that apply to [addr, addr+len).
67
- * If no watchpoint is registered for the range, the result is 0.
68
- */
69
-int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
70
#endif
19
#endif
71
20
72
/**
21
/* check if we really need so many registers :P */
73
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
22
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_callee_save_regs[] = {
74
index XXXXXXX..XXXXXXX 100644
23
TCG_REG_S4,
75
--- a/include/hw/core/tcg-cpu-ops.h
24
TCG_REG_S5,
76
+++ b/include/hw/core/tcg-cpu-ops.h
25
TCG_REG_S6,
77
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
26
- TCG_REG_S7,
78
27
+ TCG_REG_S7, /* used for guest_base */
28
TCG_REG_S8, /* used for the global env (TCG_AREG0) */
29
TCG_REG_RA, /* should be last for ABI compliance */
79
};
30
};
80
81
+#if defined(CONFIG_USER_ONLY)
82
+
83
+static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
84
+ MemTxAttrs atr, int fl, uintptr_t ra)
85
+{
86
+}
87
+
88
+static inline int cpu_watchpoint_address_matches(CPUState *cpu,
89
+ vaddr addr, vaddr len)
90
+{
91
+ return 0;
92
+}
93
+
94
+#else
95
+
96
+/**
97
+ * cpu_check_watchpoint:
98
+ * @cpu: cpu context
99
+ * @addr: guest virtual address
100
+ * @len: access length
101
+ * @attrs: memory access attributes
102
+ * @flags: watchpoint access type
103
+ * @ra: unwind return address
104
+ *
105
+ * Check for a watchpoint hit in [addr, addr+len) of the type
106
+ * specified by @flags. Exit via exception with a hit.
107
+ */
108
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
109
+ MemTxAttrs attrs, int flags, uintptr_t ra);
110
+
111
+/**
112
+ * cpu_watchpoint_address_matches:
113
+ * @cpu: cpu context
114
+ * @addr: guest virtual address
115
+ * @len: access length
116
+ *
117
+ * Return the watchpoint flags that apply to [addr, addr+len).
118
+ * If no watchpoint is registered for the range, the result is 0.
119
+ */
120
+int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
121
+
122
+#endif
123
+
124
#endif /* TCG_CPU_OPS_H */
125
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/tcg/mte_helper.c
128
+++ b/target/arm/tcg/mte_helper.c
129
@@ -XXX,XX +XXX,XX @@
130
#include "exec/ram_addr.h"
131
#include "exec/cpu_ldst.h"
132
#include "exec/helper-proto.h"
133
+#include "hw/core/tcg-cpu-ops.h"
134
#include "qapi/error.h"
135
#include "qemu/guest-random.h"
136
137
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/arm/tcg/sve_helper.c
140
+++ b/target/arm/tcg/sve_helper.c
141
@@ -XXX,XX +XXX,XX @@
142
#include "tcg/tcg.h"
143
#include "vec_internal.h"
144
#include "sve_ldst_internal.h"
145
+#include "hw/core/tcg-cpu-ops.h"
146
147
148
/* Return a value for NZCV as per the ARM PredTest pseudofunction.
149
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/s390x/tcg/mem_helper.c
152
+++ b/target/s390x/tcg/mem_helper.c
153
@@ -XXX,XX +XXX,XX @@
154
#include "exec/helper-proto.h"
155
#include "exec/exec-all.h"
156
#include "exec/cpu_ldst.h"
157
+#include "hw/core/tcg-cpu-ops.h"
158
#include "qemu/int128.h"
159
#include "qemu/atomic128.h"
160
#include "trace.h"
161
--
31
--
162
2.34.1
32
2.34.1
163
33
164
34
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
In tcg_out_qemu_ld/st, we already check for guest_base matching int16_t.
2
the first address past the last byte. This avoids overflow
2
Mirror that when setting up TCG_GUEST_BASE_REG in the prologue.
3
when the last page of the address space is involved.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/exec/cpu-all.h | 2 +-
6
tcg/mips/tcg-target.c.inc | 2 +-
9
accel/tcg/user-exec.c | 11 +++++------
7
1 file changed, 1 insertion(+), 1 deletion(-)
10
linux-user/mmap.c | 2 +-
11
3 files changed, 7 insertions(+), 8 deletions(-)
12
8
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
11
--- a/tcg/mips/tcg-target.c.inc
16
+++ b/include/exec/cpu-all.h
12
+++ b/tcg/mips/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ int walk_memory_regions(void *, walk_memory_regions_fn);
13
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
18
19
int page_get_flags(target_ulong address);
20
void page_set_flags(target_ulong start, target_ulong last, int flags);
21
-void page_reset_target_data(target_ulong start, target_ulong end);
22
+void page_reset_target_data(target_ulong start, target_ulong last);
23
int page_check_range(target_ulong start, target_ulong len, int flags);
24
25
/**
26
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/user-exec.c
29
+++ b/accel/tcg/user-exec.c
30
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
31
}
14
}
32
15
33
if (!flags || reset) {
16
#ifndef CONFIG_SOFTMMU
34
- page_reset_target_data(start, last + 1);
17
- if (guest_base) {
35
+ page_reset_target_data(start, last);
18
+ if (guest_base != (int16_t)guest_base) {
36
inval_tb |= pageflags_unset(start, last);
19
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
37
}
20
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
38
if (flags) {
39
@@ -XXX,XX +XXX,XX @@ typedef struct TargetPageDataNode {
40
41
static IntervalTreeRoot targetdata_root;
42
43
-void page_reset_target_data(target_ulong start, target_ulong end)
44
+void page_reset_target_data(target_ulong start, target_ulong last)
45
{
46
IntervalTreeNode *n, *next;
47
- target_ulong last;
48
49
assert_memory_lock();
50
51
- start = start & TARGET_PAGE_MASK;
52
- last = TARGET_PAGE_ALIGN(end) - 1;
53
+ start &= TARGET_PAGE_MASK;
54
+ last |= ~TARGET_PAGE_MASK;
55
56
for (n = interval_tree_iter_first(&targetdata_root, start, last),
57
next = n ? interval_tree_iter_next(n, start, last) : NULL;
58
@@ -XXX,XX +XXX,XX @@ void *page_get_target_data(target_ulong address)
59
return t->data[(page - region) >> TARGET_PAGE_BITS];
60
}
61
#else
62
-void page_reset_target_data(target_ulong start, target_ulong end) { }
63
+void page_reset_target_data(target_ulong start, target_ulong last) { }
64
#endif /* TARGET_PAGE_DATA_SIZE */
65
66
/* The softmmu versions of these helpers are in cputlb.c. */
67
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/linux-user/mmap.c
70
+++ b/linux-user/mmap.c
71
@@ -XXX,XX +XXX,XX @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
72
if (can_passthrough_madvise(start, end)) {
73
ret = get_errno(madvise(g2h_untagged(start), len, advice));
74
if ((advice == MADV_DONTNEED) && (ret == 0)) {
75
- page_reset_target_data(start, start + len);
76
+ page_reset_target_data(start, start + len - 1);
77
}
78
}
79
}
21
}
80
--
22
--
81
2.34.1
23
2.34.1
82
83
diff view generated by jsdifflib
New patch
1
This vastly reduces the size of code generated for 64-bit addresses.
2
The code for exit_tb, for instance, where we load a (tagged) pointer
3
to the current TB, goes from
1
4
5
0x400aa9725c: li v0,64
6
0x400aa97260: dsll v0,v0,0x10
7
0x400aa97264: ori v0,v0,0xaa9
8
0x400aa97268: dsll v0,v0,0x10
9
0x400aa9726c: j 0x400aa9703c
10
0x400aa97270: ori v0,v0,0x7083
11
12
to
13
14
0x400aa97240: j 0x400aa97040
15
0x400aa97244: daddiu v0,s6,-189
16
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
---
19
tcg/mips/tcg-target.c.inc | 69 +++++++++++++++++++++++++++++++++------
20
1 file changed, 59 insertions(+), 10 deletions(-)
21
22
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/mips/tcg-target.c.inc
25
+++ b/tcg/mips/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27
#ifndef CONFIG_SOFTMMU
28
#define TCG_GUEST_BASE_REG TCG_REG_S7
29
#endif
30
+#if TCG_TARGET_REG_BITS == 64
31
+#define TCG_REG_TB TCG_REG_S6
32
+#else
33
+#define TCG_REG_TB (qemu_build_not_reached(), TCG_REG_ZERO)
34
+#endif
35
36
/* check if we really need so many registers :P */
37
static const int tcg_target_reg_alloc_order[] = {
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
39
40
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
41
{
42
- TCGReg b0 = TCG_REG_ZERO;
43
+ TCGReg base = TCG_REG_ZERO;
44
+ int16_t lo = 0;
45
46
- if (a0 & ~0xffff) {
47
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
48
- b0 = TCG_REG_V0;
49
+ if (a0) {
50
+ intptr_t ofs;
51
+ if (TCG_TARGET_REG_BITS == 64) {
52
+ ofs = tcg_tbrel_diff(s, (void *)a0);
53
+ lo = ofs;
54
+ if (ofs == lo) {
55
+ base = TCG_REG_TB;
56
+ } else {
57
+ base = TCG_REG_V0;
58
+ tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo);
59
+ tcg_out_opc_reg(s, ALIAS_PADD, base, base, TCG_REG_TB);
60
+ }
61
+ } else {
62
+ ofs = a0;
63
+ lo = ofs;
64
+ base = TCG_REG_V0;
65
+ tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo);
66
+ }
67
}
68
if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
69
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr);
70
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
71
}
72
- tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
73
+ /* delay slot */
74
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_V0, base, lo);
75
}
76
77
static void tcg_out_goto_tb(TCGContext *s, int which)
78
{
79
+ intptr_t ofs = get_jmp_target_addr(s, which);
80
+ TCGReg base, dest;
81
+
82
/* indirect jump method */
83
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
84
- get_jmp_target_addr(s, which));
85
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
86
+ if (TCG_TARGET_REG_BITS == 64) {
87
+ dest = TCG_REG_TB;
88
+ base = TCG_REG_TB;
89
+ ofs = tcg_tbrel_diff(s, (void *)ofs);
90
+ } else {
91
+ dest = TCG_TMP0;
92
+ base = TCG_REG_ZERO;
93
+ }
94
+ tcg_out_ld(s, TCG_TYPE_PTR, dest, base, ofs);
95
+ tcg_out_opc_reg(s, OPC_JR, 0, dest, 0);
96
+ /* delay slot */
97
tcg_out_nop(s);
98
+
99
set_jmp_reset_offset(s, which);
100
+ if (TCG_TARGET_REG_BITS == 64) {
101
+ /* For the unlinked case, need to reset TCG_REG_TB. */
102
+ tcg_out_ldst(s, ALIAS_PADDI, TCG_REG_TB, TCG_REG_TB,
103
+ -tcg_current_code_size(s));
104
+ }
105
}
106
107
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
108
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
109
case INDEX_op_goto_ptr:
110
/* jmp to the given host address (could be epilogue) */
111
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
112
- tcg_out_nop(s);
113
+ if (TCG_TARGET_REG_BITS == 64) {
114
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
115
+ } else {
116
+ tcg_out_nop(s);
117
+ }
118
break;
119
case INDEX_op_br:
120
tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
121
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_callee_save_regs[] = {
122
TCG_REG_S3,
123
TCG_REG_S4,
124
TCG_REG_S5,
125
- TCG_REG_S6,
126
+ TCG_REG_S6, /* used for the tb base (TCG_REG_TB) */
127
TCG_REG_S7, /* used for guest_base */
128
TCG_REG_S8, /* used for the global env (TCG_AREG0) */
129
TCG_REG_RA, /* should be last for ABI compliance */
130
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
131
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
132
}
133
#endif
134
+ if (TCG_TARGET_REG_BITS == 64) {
135
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
136
+ }
137
138
/* Call generated code */
139
tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
140
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
141
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
142
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
143
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
144
+ if (TCG_TARGET_REG_BITS == 64) {
145
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tc->tc_ptr */
146
+ }
147
}
148
149
typedef struct {
150
--
151
2.34.1
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Emit all constants that can be loaded in exactly one insn.
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
5
Fixes a bug in the loop comparision where "<= end" would lock
6
one more page than required.
7
2
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
accel/tcg/tb-maint.c | 22 +++++++++++-----------
6
tcg/mips/tcg-target.c.inc | 26 ++++++++++++++++++++------
12
1 file changed, 11 insertions(+), 11 deletions(-)
7
1 file changed, 20 insertions(+), 6 deletions(-)
13
8
14
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/tb-maint.c
11
--- a/tcg/mips/tcg-target.c.inc
17
+++ b/accel/tcg/tb-maint.c
12
+++ b/tcg/mips/tcg-target.c.inc
18
@@ -XXX,XX +XXX,XX @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
13
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
14
return true;
19
}
15
}
20
16
21
/*
17
+static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
22
- * Lock a range of pages ([@start,@end[) as well as the pages of all
18
+{
23
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
19
+ if (arg == (int16_t)arg) {
24
* intersecting TBs.
20
+ tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg);
25
* Locking order: acquire locks in ascending order of page index.
21
+ return true;
26
*/
22
+ }
27
static struct page_collection *page_collection_lock(tb_page_addr_t start,
23
+ if (arg == (uint16_t)arg) {
28
- tb_page_addr_t end)
24
+ tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg);
29
+ tb_page_addr_t last)
25
+ return true;
26
+ }
27
+ if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
28
+ tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static void tcg_out_movi(TCGContext *s, TCGType type,
35
TCGReg ret, tcg_target_long arg)
30
{
36
{
31
struct page_collection *set = g_malloc(sizeof(*set));
37
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
32
tb_page_addr_t index;
38
arg = (int32_t)arg;
33
PageDesc *pd;
34
35
start >>= TARGET_PAGE_BITS;
36
- end >>= TARGET_PAGE_BITS;
37
- g_assert(start <= end);
38
+ last >>= TARGET_PAGE_BITS;
39
+ g_assert(start <= last);
40
41
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
42
page_entry_destroy);
43
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
44
retry:
45
q_tree_foreach(set->tree, page_entry_lock, NULL);
46
47
- for (index = start; index <= end; index++) {
48
+ for (index = start; index <= last; index++) {
49
TranslationBlock *tb;
50
PageForEachNext n;
51
52
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
53
void tb_invalidate_phys_page(tb_page_addr_t addr)
54
{
55
struct page_collection *pages;
56
- tb_page_addr_t start, end;
57
+ tb_page_addr_t start, last;
58
PageDesc *p;
59
60
p = page_find(addr >> TARGET_PAGE_BITS);
61
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
62
}
39
}
63
40
- if (arg == (int16_t)arg) {
64
start = addr & TARGET_PAGE_MASK;
41
- tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg);
65
- end = start + TARGET_PAGE_SIZE;
42
- return;
66
- pages = page_collection_lock(start, end);
43
- }
67
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
44
- if (arg == (uint16_t)arg) {
68
+ last = addr | ~TARGET_PAGE_MASK;
45
- tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg);
69
+ pages = page_collection_lock(start, last);
46
+
70
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
47
+ if (tcg_out_movi_one(s, ret, arg)) {
71
page_collection_unlock(pages);
48
return;
72
}
49
}
73
50
+
74
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
51
if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
75
struct page_collection *pages;
52
tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
76
tb_page_addr_t next;
53
} else {
77
78
- pages = page_collection_lock(start, end);
79
+ pages = page_collection_lock(start, end - 1);
80
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
81
start < end;
82
start = next, next += TARGET_PAGE_SIZE) {
83
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
84
{
85
struct page_collection *pages;
86
87
- pages = page_collection_lock(ram_addr, ram_addr + size);
88
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
89
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
90
page_collection_unlock(pages);
91
}
92
--
54
--
93
2.34.1
55
2.34.1
94
56
95
57
diff view generated by jsdifflib
New patch
1
Emit all 32-bit signed constants, which can be loaded in two insns.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/mips/tcg-target.c.inc | 35 ++++++++++++++++++++++++-----------
6
1 file changed, 24 insertions(+), 11 deletions(-)
7
8
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/mips/tcg-target.c.inc
11
+++ b/tcg/mips/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
13
return false;
14
}
15
16
+static bool tcg_out_movi_two(TCGContext *s, TCGReg ret, tcg_target_long arg)
17
+{
18
+ /*
19
+ * All signed 32-bit constants are loadable with two immediates,
20
+ * and everything else requires more work.
21
+ */
22
+ if (arg == (int32_t)arg) {
23
+ if (!tcg_out_movi_one(s, ret, arg)) {
24
+ tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
25
+ tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
26
+ }
27
+ return true;
28
+ }
29
+ return false;
30
+}
31
+
32
static void tcg_out_movi(TCGContext *s, TCGType type,
33
TCGReg ret, tcg_target_long arg)
34
{
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
36
arg = (int32_t)arg;
37
}
38
39
- if (tcg_out_movi_one(s, ret, arg)) {
40
+ /* Load all 32-bit constants. */
41
+ if (tcg_out_movi_two(s, ret, arg)) {
42
return;
43
}
44
45
- if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
46
- tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
47
+ tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
48
+ if (arg & 0xffff0000ull) {
49
+ tcg_out_dsll(s, ret, ret, 16);
50
+ tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
51
+ tcg_out_dsll(s, ret, ret, 16);
52
} else {
53
- tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
54
- if (arg & 0xffff0000ull) {
55
- tcg_out_dsll(s, ret, ret, 16);
56
- tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
57
- tcg_out_dsll(s, ret, ret, 16);
58
- } else {
59
- tcg_out_dsll(s, ret, ret, 32);
60
- }
61
+ tcg_out_dsll(s, ret, ret, 32);
62
}
63
if (arg & 0xffff) {
64
tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
65
--
66
2.34.1
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
During normal processing, the constant pool is accessible via
2
TCG_REG_TB. During the prologue, it is accessible via TCG_REG_T9.
2
3
3
CPU watchpoints can be use by non-TCG accelerators.
4
5
KVM uses them:
6
7
$ git grep CPUWatchpoint|fgrep kvm
8
target/arm/kvm64.c:1558: CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
9
target/i386/kvm/kvm.c:5216:static CPUWatchpoint hw_watchpoint;
10
target/ppc/kvm.c:443:static CPUWatchpoint hw_watchpoint;
11
target/s390x/kvm/kvm.c:139:static CPUWatchpoint hw_watchpoint;
12
13
See for example commit e4482ab7e3 ("target-arm: kvm - add support
14
for HW assisted debug"):
15
16
This adds basic support for HW assisted debug. The ioctl interface
17
to KVM allows us to pass an implementation defined number of break
18
and watch point registers. [...]
19
20
This partially reverts commit 2609ec2868e6c286e755a73b4504714a0296a.
21
22
Fixes: 2609ec2868 ("softmmu: Extract watchpoint API from physmem.c")
23
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
24
Message-Id: <20230328173117.15226-4-philmd@linaro.org>
25
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
26
---
5
---
27
include/hw/core/cpu.h | 2 +-
6
tcg/mips/tcg-target.h | 1 +
28
softmmu/watchpoint.c | 4 ++++
7
tcg/mips/tcg-target.c.inc | 65 +++++++++++++++++++++++++++++----------
29
softmmu/meson.build | 2 +-
8
2 files changed, 49 insertions(+), 17 deletions(-)
30
3 files changed, 6 insertions(+), 2 deletions(-)
31
9
32
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
33
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
34
--- a/include/hw/core/cpu.h
12
--- a/tcg/mips/tcg-target.h
35
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/mips/tcg-target.h
36
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
14
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
15
16
#define TCG_TARGET_DEFAULT_MO 0
17
#define TCG_TARGET_NEED_LDST_LABELS
18
+#define TCG_TARGET_NEED_POOL_LABELS
19
20
#endif
21
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/mips/tcg-target.c.inc
24
+++ b/tcg/mips/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@
26
*/
27
28
#include "../tcg-ldst.c.inc"
29
+#include "../tcg-pool.c.inc"
30
31
#if HOST_BIG_ENDIAN
32
# define MIPS_BE 1
33
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
34
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
35
intptr_t value, intptr_t addend)
36
{
37
- tcg_debug_assert(type == R_MIPS_PC16);
38
- tcg_debug_assert(addend == 0);
39
- return reloc_pc16(code_ptr, (const tcg_insn_unit *)value);
40
+ value += addend;
41
+ switch (type) {
42
+ case R_MIPS_PC16:
43
+ return reloc_pc16(code_ptr, (const tcg_insn_unit *)value);
44
+ case R_MIPS_16:
45
+ if (value != (int16_t)value) {
46
+ return false;
47
+ }
48
+ *code_ptr = deposit32(*code_ptr, 0, 16, value);
49
+ return true;
50
+ }
51
+ g_assert_not_reached();
52
}
53
54
#define TCG_CT_CONST_ZERO 0x100
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop(TCGContext *s)
56
tcg_out32(s, 0);
57
}
58
59
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
60
+{
61
+ memset(p, 0, count * sizeof(tcg_insn_unit));
62
+}
63
+
64
static void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
65
{
66
tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa);
67
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_movi_two(TCGContext *s, TCGReg ret, tcg_target_long arg)
37
return false;
68
return false;
38
}
69
}
39
70
40
-#if !defined(CONFIG_TCG) || defined(CONFIG_USER_ONLY)
71
-static void tcg_out_movi(TCGContext *s, TCGType type,
41
+#if defined(CONFIG_USER_ONLY)
72
- TCGReg ret, tcg_target_long arg)
42
static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
73
+static void tcg_out_movi_pool(TCGContext *s, TCGReg ret,
43
int flags, CPUWatchpoint **watchpoint)
74
+ tcg_target_long arg, TCGReg tbreg)
75
+{
76
+ new_pool_label(s, arg, R_MIPS_16, s->code_ptr, tcg_tbrel_diff(s, NULL));
77
+ tcg_out_opc_imm(s, OPC_LD, ret, tbreg, 0);
78
+}
79
+
80
+static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
81
+ tcg_target_long arg, TCGReg tbreg)
44
{
82
{
45
diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c
83
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
46
index XXXXXXX..XXXXXXX 100644
84
arg = (int32_t)arg;
47
--- a/softmmu/watchpoint.c
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
48
+++ b/softmmu/watchpoint.c
86
if (tcg_out_movi_two(s, ret, arg)) {
49
@@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
87
return;
50
}
88
}
89
+ assert(TCG_TARGET_REG_BITS == 64);
90
91
- tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
92
- if (arg & 0xffff0000ull) {
93
- tcg_out_dsll(s, ret, ret, 16);
94
- tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
95
- tcg_out_dsll(s, ret, ret, 16);
96
- } else {
97
- tcg_out_dsll(s, ret, ret, 32);
98
- }
99
- if (arg & 0xffff) {
100
- tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
101
- }
102
+ /* Otherwise, put 64-bit constants into the constant pool. */
103
+ tcg_out_movi_pool(s, ret, arg, tbreg);
104
+}
105
+
106
+static void tcg_out_movi(TCGContext *s, TCGType type,
107
+ TCGReg ret, tcg_target_long arg)
108
+{
109
+ TCGReg tbreg = TCG_TARGET_REG_BITS == 64 ? TCG_REG_TB : 0;
110
+ tcg_out_movi_int(s, type, ret, arg, tbreg);
51
}
111
}
52
112
53
+#ifdef CONFIG_TCG
113
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
114
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
115
116
#ifndef CONFIG_SOFTMMU
117
if (guest_base != (int16_t)guest_base) {
118
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
119
+ /*
120
+ * The function call abi for n32 and n64 will have loaded $25 (t9)
121
+ * with the address of the prologue, so we can use that instead
122
+ * of TCG_REG_TB.
123
+ */
124
+#if TCG_TARGET_REG_BITS == 64 && !defined(__mips_abicalls)
125
+# error "Unknown mips abi"
126
+#endif
127
+ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base,
128
+ TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0);
129
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
130
}
131
#endif
54
+
132
+
55
/*
133
if (TCG_TARGET_REG_BITS == 64) {
56
* Return true if this watchpoint address matches the specified
134
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
57
* access (ie the address range covered by the watchpoint overlaps
58
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
59
}
60
}
135
}
61
}
62
+
63
+#endif /* CONFIG_TCG */
64
diff --git a/softmmu/meson.build b/softmmu/meson.build
65
index XXXXXXX..XXXXXXX 100644
66
--- a/softmmu/meson.build
67
+++ b/softmmu/meson.build
68
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
69
'physmem.c',
70
'qtest.c',
71
'dirtylimit.c',
72
+ 'watchpoint.c',
73
)])
74
75
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files(
76
'icount.c',
77
- 'watchpoint.c',
78
)])
79
80
softmmu_ss.add(files(
81
--
136
--
82
2.34.1
137
2.34.1
83
84
diff view generated by jsdifflib
New patch
1
Repeated calls to a single helper are common -- especially
2
the ones for softmmu memory access. Prefer the constant pool
3
to longer sequences to increase sharing.
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/mips/tcg-target.c.inc | 16 +++++++++++++---
8
1 file changed, 13 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/mips/tcg-target.c.inc
13
+++ b/tcg/mips/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
15
16
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
17
{
18
- /* Note that the ABI requires the called function's address to be
19
- loaded into T9, even if a direct branch is in range. */
20
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
21
+ /*
22
+ * Note that __mips_abicalls requires the called function's address
23
+ * to be loaded into $25 (t9), even if a direct branch is in range.
24
+ *
25
+ * For n64, always drop the pointer into the constant pool.
26
+ * We can re-use helper addresses often and do not want any
27
+ * of the longer sequences tcg_out_movi may try.
28
+ */
29
+ if (sizeof(uintptr_t) == 8) {
30
+ tcg_out_movi_pool(s, TCG_REG_T9, (uintptr_t)arg, TCG_REG_TB);
31
+ } else {
32
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
33
+ }
34
35
/* But do try a direct branch, allowing the cpu better insn prefetch. */
36
if (tail) {
37
--
38
2.34.1
diff view generated by jsdifflib
New patch
1
These addresses are often loaded by the qemu_ld/st slow path,
2
for loading the retaddr value.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/mips/tcg-target.c.inc | 13 +++++++++++++
7
1 file changed, 13 insertions(+)
8
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/mips/tcg-target.c.inc
12
+++ b/tcg/mips/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_pool(TCGContext *s, TCGReg ret,
14
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
15
tcg_target_long arg, TCGReg tbreg)
16
{
17
+ tcg_target_long tmp;
18
+
19
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
20
arg = (int32_t)arg;
21
}
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
23
}
24
assert(TCG_TARGET_REG_BITS == 64);
25
26
+ /* Load addresses within 2GB of TB with 1 or 3 insns. */
27
+ tmp = tcg_tbrel_diff(s, (void *)arg);
28
+ if (tmp == (int16_t)tmp) {
29
+ tcg_out_opc_imm(s, OPC_DADDIU, ret, tbreg, tmp);
30
+ return;
31
+ }
32
+ if (tcg_out_movi_two(s, ret, tmp)) {
33
+ tcg_out_opc_reg(s, OPC_DADDU, ret, ret, tbreg);
34
+ return;
35
+ }
36
+
37
/* Otherwise, put 64-bit constants into the constant pool. */
38
tcg_out_movi_pool(s, ret, arg, tbreg);
39
}
40
--
41
2.34.1
diff view generated by jsdifflib
1
User setting of -R reserved_va can lead to an assertion
1
These sequences are inexpensive to test. Maxing out at three insns
2
failure in page_set_flags. Sanity check the value of
2
results in the same space as a load plus the constant pool entry.
3
reserved_va and print an error message instead. Do not
4
allocate a commpage at all for m-profile cpus.
5
3
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
linux-user/elfload.c | 37 +++++++++++++++++++++++++++----------
6
tcg/mips/tcg-target.c.inc | 44 +++++++++++++++++++++++++++++++++++++++
10
1 file changed, 27 insertions(+), 10 deletions(-)
7
1 file changed, 44 insertions(+)
11
8
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
11
--- a/tcg/mips/tcg-target.c.inc
15
+++ b/linux-user/elfload.c
12
+++ b/tcg/mips/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ enum {
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
17
14
tcg_target_long arg, TCGReg tbreg)
18
static bool init_guest_commpage(void)
19
{
15
{
20
- abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
16
tcg_target_long tmp;
21
- void *want = g2h_untagged(commpage);
17
+ int sh, lo;
22
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
18
23
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
19
if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
24
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
20
arg = (int32_t)arg;
25
+ abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
26
+ abi_ptr addr;
22
return;
27
23
}
28
- if (addr == MAP_FAILED) {
24
29
+ /*
25
+ /*
30
+ * M-profile allocates maximum of 2GB address space, so can never
26
+ * Load bitmasks with a right-shift. This is good for things
31
+ * allocate the commpage. Skip it.
27
+ * like 0x0fff_ffff_ffff_fff0: ADDUI r,0,0xff00 + DSRL r,r,4.
28
+ * or similarly using LUI. For this to work, bit 31 must be set.
32
+ */
29
+ */
33
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
30
+ if (arg > 0 && (int32_t)arg < 0) {
34
+ return true;
31
+ sh = clz64(arg);
32
+ if (tcg_out_movi_one(s, ret, arg << sh)) {
33
+ tcg_out_dsrl(s, ret, ret, sh);
34
+ return;
35
+ }
35
+ }
36
+ }
36
+
37
+
37
+ /*
38
+ /*
38
+ * If reserved_va does not cover the commpage, we get an assert
39
+ * Load slightly larger constants using left-shift.
39
+ * in page_set_flags. Produce an intelligent error instead.
40
+ * Limit this sequence to 3 insns to avoid too much expansion.
40
+ */
41
+ */
41
+ if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
42
+ sh = ctz64(arg);
42
+ error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
43
+ if (sh && tcg_out_movi_two(s, ret, arg >> sh)) {
43
+ (uint64_t)reserved_va + 1);
44
+ tcg_out_dsll(s, ret, ret, sh);
44
+ exit(EXIT_FAILURE);
45
+ return;
45
+ }
46
+ }
46
+
47
+
47
+ addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
48
+ /*
48
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
49
+ * Load slightly larger constants using left-shift and add/or.
50
+ * Prefer addi with a negative immediate when that would produce
51
+ * a larger shift. For this to work, bits 15 and 16 must be set.
52
+ */
53
+ lo = arg & 0xffff;
54
+ if (lo) {
55
+ if ((arg & 0x18000) == 0x18000) {
56
+ lo = (int16_t)arg;
57
+ }
58
+ tmp = arg - lo;
59
+ sh = ctz64(tmp);
60
+ tmp >>= sh;
61
+ if (tcg_out_movi_one(s, ret, tmp)) {
62
+ tcg_out_dsll(s, ret, ret, sh);
63
+ tcg_out_opc_imm(s, lo < 0 ? OPC_DADDIU : OPC_ORI, ret, ret, lo);
64
+ return;
65
+ }
66
+ }
49
+
67
+
50
+ if (addr == -1) {
68
/* Otherwise, put 64-bit constants into the constant pool. */
51
perror("Allocating guest commpage");
69
tcg_out_movi_pool(s, ret, arg, tbreg);
52
exit(EXIT_FAILURE);
53
}
54
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
55
}
56
57
/* Set kernel helper versions; rest of page is 0. */
58
- __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
59
+ put_user_u32(5, 0xffff0ffcu);
60
61
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
62
+ if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
63
perror("Protecting guest commpage");
64
exit(EXIT_FAILURE);
65
}
66
-
67
- page_set_flags(commpage, commpage | ~qemu_host_page_mask,
68
- PAGE_READ | PAGE_EXEC | PAGE_VALID);
69
return true;
70
}
70
}
71
72
--
71
--
73
2.34.1
72
2.34.1
74
75
diff view generated by jsdifflib
New patch
1
The new(ish) macro produces a compile-time error instead
2
of a link-time error.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/mips/tcg-target.c.inc | 8 +++-----
7
1 file changed, 3 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/mips/tcg-target.c.inc
12
+++ b/tcg/mips/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@
14
# define LO_OFF (MIPS_BE * 4)
15
# define HI_OFF (4 - LO_OFF)
16
#else
17
-/* To assert at compile-time that these values are never used
18
- for TCG_TARGET_REG_BITS == 64. */
19
-int link_error(void);
20
-# define LO_OFF link_error()
21
-# define HI_OFF link_error()
22
+/* Assert at compile-time that these values are never used for 64-bit. */
23
+# define LO_OFF ({ qemu_build_not_reached(); 0; })
24
+# define HI_OFF ({ qemu_build_not_reached(); 0; })
25
#endif
26
27
#ifdef CONFIG_DEBUG_TCG
28
--
29
2.34.1
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Since e03b56863d2b, which replaced HOST_WORDS_BIGENDIAN
2
the first address past the last byte. This avoids overflow
2
with HOST_BIG_ENDIAN, there is no need to define a second
3
when the last page of the address space is involved.
3
symbol which is [0,1].
4
5
Properly truncate tb_last to the end of the page; the comment about
6
tb_end being past the end of the page being ok is not correct,
7
considering overflow.
8
4
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
accel/tcg/tb-maint.c | 26 ++++++++++++--------------
8
tcg/mips/tcg-target.c.inc | 46 +++++++++++++++++----------------------
13
1 file changed, 12 insertions(+), 14 deletions(-)
9
1 file changed, 20 insertions(+), 26 deletions(-)
14
10
15
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
11
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/tb-maint.c
13
--- a/tcg/mips/tcg-target.c.inc
18
+++ b/accel/tcg/tb-maint.c
14
+++ b/tcg/mips/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
15
@@ -XXX,XX +XXX,XX @@
20
static void
16
#include "../tcg-ldst.c.inc"
21
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
17
#include "../tcg-pool.c.inc"
22
PageDesc *p, tb_page_addr_t start,
18
23
- tb_page_addr_t end,
19
-#if HOST_BIG_ENDIAN
24
+ tb_page_addr_t last,
20
-# define MIPS_BE 1
25
uintptr_t retaddr)
21
-#else
22
-# define MIPS_BE 0
23
-#endif
24
-
25
#if TCG_TARGET_REG_BITS == 32
26
-# define LO_OFF (MIPS_BE * 4)
27
+# define LO_OFF (HOST_BIG_ENDIAN * 4)
28
# define HI_OFF (4 - LO_OFF)
29
#else
30
/* Assert at compile-time that these values are never used for 64-bit. */
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
32
/* Prefer to load from offset 0 first, but allow for overlap. */
33
if (TCG_TARGET_REG_BITS == 64) {
34
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
35
- } else if (MIPS_BE ? hi != base : lo == base) {
36
+ } else if (HOST_BIG_ENDIAN ? hi != base : lo == base) {
37
tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
38
tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
39
} else {
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
41
static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
42
TCGReg base, MemOp opc, TCGType type)
26
{
43
{
27
TranslationBlock *tb;
44
- const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
28
- tb_page_addr_t tb_start, tb_end;
45
- const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
29
PageForEachNext n;
46
- const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR;
30
#ifdef TARGET_HAS_PRECISE_SMC
47
- const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL;
31
bool current_tb_modified = false;
48
+ const MIPSInsn lw1 = HOST_BIG_ENDIAN ? OPC_LWL : OPC_LWR;
32
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
49
+ const MIPSInsn lw2 = HOST_BIG_ENDIAN ? OPC_LWR : OPC_LWL;
33
#endif /* TARGET_HAS_PRECISE_SMC */
50
+ const MIPSInsn ld1 = HOST_BIG_ENDIAN ? OPC_LDL : OPC_LDR;
34
- tb_page_addr_t last G_GNUC_UNUSED = end - 1;
51
+ const MIPSInsn ld2 = HOST_BIG_ENDIAN ? OPC_LDR : OPC_LDL;
35
52
bool sgn = opc & MO_SIGN;
36
/*
53
37
- * We remove all the TBs in the range [start, end[.
54
switch (opc & MO_SIZE) {
38
+ * We remove all the TBs in the range [start, last].
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
39
* XXX: see if in some cases it could be faster to invalidate all the code
56
tcg_out_opc_imm(s, ld1, lo, base, 0);
40
*/
57
tcg_out_opc_imm(s, ld2, lo, base, 7);
41
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
42
+ tb_page_addr_t tb_start, tb_last;
43
+
44
/* NOTE: this is subtle as a TB may span two physical pages */
45
+ tb_start = tb_page_addr0(tb);
46
+ tb_last = tb_start + tb->size - 1;
47
if (n == 0) {
48
- /* NOTE: tb_end may be after the end of the page, but
49
- it is not a problem */
50
- tb_start = tb_page_addr0(tb);
51
- tb_end = tb_start + tb->size;
52
+ tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
53
} else {
58
} else {
54
tb_start = tb_page_addr1(tb);
59
- tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0);
55
- tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
60
- tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3);
56
- & ~TARGET_PAGE_MASK);
61
- tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0);
57
+ tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
62
- tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3);
63
+ tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0);
64
+ tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3);
65
+ tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0);
66
+ tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3);
58
}
67
}
59
- if (!(tb_end <= start || tb_start >= end)) {
68
break;
60
+ if (!(tb_last < start || tb_start > last)) {
69
61
#ifdef TARGET_HAS_PRECISE_SMC
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
62
if (current_tb == tb &&
71
if (TCG_TARGET_REG_BITS == 64) {
63
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
72
tcg_out_opc_imm(s, OPC_SD, lo, base, 0);
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
73
} else {
65
start = addr & TARGET_PAGE_MASK;
74
- tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0);
66
last = addr | ~TARGET_PAGE_MASK;
75
- tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4);
67
pages = page_collection_lock(start, last);
76
+ tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? hi : lo, base, 0);
68
- tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
77
+ tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? lo : hi, base, 4);
69
+ tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
70
page_collection_unlock(pages);
71
}
72
73
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
74
continue;
75
}
78
}
76
assert_page_locked(pd);
79
break;
77
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
80
default:
78
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
79
}
82
static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
80
page_collection_unlock(pages);
83
TCGReg base, MemOp opc)
81
}
84
{
82
@@ -XXX,XX +XXX,XX @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
85
- const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
83
}
86
- const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL;
84
87
- const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR;
85
assert_page_locked(p);
88
- const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL;
86
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
89
+ const MIPSInsn sw1 = HOST_BIG_ENDIAN ? OPC_SWL : OPC_SWR;
87
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
90
+ const MIPSInsn sw2 = HOST_BIG_ENDIAN ? OPC_SWR : OPC_SWL;
88
}
91
+ const MIPSInsn sd1 = HOST_BIG_ENDIAN ? OPC_SDL : OPC_SDR;
89
92
+ const MIPSInsn sd2 = HOST_BIG_ENDIAN ? OPC_SDR : OPC_SDL;
90
/*
93
94
switch (opc & MO_SIZE) {
95
case MO_16:
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
97
tcg_out_opc_imm(s, sd1, lo, base, 0);
98
tcg_out_opc_imm(s, sd2, lo, base, 7);
99
} else {
100
- tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0);
101
- tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3);
102
- tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0);
103
- tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3);
104
+ tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0);
105
+ tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3);
106
+ tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0);
107
+ tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3);
108
}
109
break;
110
91
--
111
--
92
2.34.1
112
2.34.1
93
113
94
114
diff view generated by jsdifflib
1
We have been enforcing host page alignment for the non-R
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
fallback of MAX_RESERVED_VA, but failing to enforce for -R.
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
linux-user/main.c | 6 ++++++
5
disas/riscv.c | 6 ++++++
8
1 file changed, 6 insertions(+)
6
1 file changed, 6 insertions(+)
9
7
10
diff --git a/linux-user/main.c b/linux-user/main.c
8
diff --git a/disas/riscv.c b/disas/riscv.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/linux-user/main.c
10
--- a/disas/riscv.c
13
+++ b/linux-user/main.c
11
+++ b/disas/riscv.c
14
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
*/
13
rv_op_cm_mvsa01 = 786,
16
max_reserved_va = MAX_RESERVED_VA(cpu);
14
rv_op_cm_jt = 787,
17
if (reserved_va != 0) {
15
rv_op_cm_jalt = 788,
18
+ if (reserved_va % qemu_host_page_size) {
16
+ rv_op_czero_eqz = 789,
19
+ char *s = size_to_str(qemu_host_page_size);
17
+ rv_op_czero_nez = 790,
20
+ fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
18
} rv_op;
21
+ g_free(s);
19
22
+ exit(EXIT_FAILURE);
20
/* structures */
23
+ }
21
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data opcode_data[] = {
24
if (max_reserved_va && reserved_va > max_reserved_va) {
22
{ "cm.mvsa01", rv_codec_zcmp_cm_mv, rv_fmt_rd_rs2, NULL, 0, 0, 0 },
25
fprintf(stderr, "Reserved virtual address too big\n");
23
{ "cm.jt", rv_codec_zcmt_jt, rv_fmt_zcmt_index, NULL, 0 },
26
exit(EXIT_FAILURE);
24
{ "cm.jalt", rv_codec_zcmt_jt, rv_fmt_zcmt_index, NULL, 0 },
25
+ { "czero.eqz", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
26
+ { "czero.nez", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
27
};
28
29
/* CSR names */
30
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
31
case 45: op = rv_op_minu; break;
32
case 46: op = rv_op_max; break;
33
case 47: op = rv_op_maxu; break;
34
+ case 075: op = rv_op_czero_eqz; break;
35
+ case 077: op = rv_op_czero_nez; break;
36
case 130: op = rv_op_sh1add; break;
37
case 132: op = rv_op_sh2add; break;
38
case 134: op = rv_op_sh3add; break;
27
--
39
--
28
2.34.1
40
2.34.1
29
30
diff view generated by jsdifflib
1
Change the semantics to be the last byte of the guest va, rather
1
Define a useful subset of the extensions. Probe for them
2
than the following byte. This avoids some overflow conditions.
2
via compiler pre-processor feature macros and SIGILL.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Acked-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
include/exec/cpu-all.h | 11 ++++++++++-
8
tcg/riscv/tcg-target.h | 6 +++
8
linux-user/arm/target_cpu.h | 2 +-
9
tcg/riscv/tcg-target.c.inc | 96 ++++++++++++++++++++++++++++++++++++++
9
bsd-user/main.c | 10 +++-------
10
2 files changed, 102 insertions(+)
10
bsd-user/mmap.c | 4 ++--
11
linux-user/elfload.c | 14 +++++++-------
12
linux-user/main.c | 27 +++++++++++++--------------
13
linux-user/mmap.c | 4 ++--
14
7 files changed, 38 insertions(+), 34 deletions(-)
15
11
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
12
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu-all.h
14
--- a/tcg/riscv/tcg-target.h
19
+++ b/include/exec/cpu-all.h
15
+++ b/tcg/riscv/tcg-target.h
20
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
16
@@ -XXX,XX +XXX,XX @@ typedef enum {
21
*/
17
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
22
extern uintptr_t guest_base;
18
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
23
extern bool have_guest_base;
19
20
+#if defined(__riscv_arch_test) && defined(__riscv_zbb)
21
+# define have_zbb true
22
+#else
23
+extern bool have_zbb;
24
+#endif
24
+
25
+
25
+/*
26
/* optional instructions */
26
+ * If non-zero, the guest virtual address space is a contiguous subset
27
#define TCG_TARGET_HAS_movcond_i32 0
27
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
28
#define TCG_TARGET_HAS_div_i32 1
28
+ * either from the command-line or by default. The value is the last
29
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
29
+ * byte of the guest address space e.g. UINT32_MAX.
30
index XXXXXXX..XXXXXXX 100644
30
+ *
31
--- a/tcg/riscv/tcg-target.c.inc
31
+ * If zero, the host and guest virtual address spaces are intermingled.
32
+++ b/tcg/riscv/tcg-target.c.inc
32
+ */
33
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_iarg_regs[] = {
33
extern unsigned long reserved_va;
34
TCG_REG_A7,
35
};
36
37
+#ifndef have_zbb
38
+bool have_zbb;
39
+#endif
40
+#if defined(__riscv_arch_test) && defined(__riscv_zba)
41
+# define have_zba true
42
+#else
43
+static bool have_zba;
44
+#endif
45
+#if defined(__riscv_arch_test) && defined(__riscv_zicond)
46
+# define have_zicond true
47
+#else
48
+static bool have_zicond;
49
+#endif
50
+
51
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
52
{
53
tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
54
@@ -XXX,XX +XXX,XX @@ typedef enum {
55
56
OPC_FENCE = 0x0000000f,
57
OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
58
+
59
+ /* Zba: Bit manipulation extension, address generation */
60
+ OPC_ADD_UW = 0x0800003b,
61
+
62
+ /* Zbb: Bit manipulation extension, basic bit manipulaton */
63
+ OPC_ANDN = 0x40007033,
64
+ OPC_CLZ = 0x60001013,
65
+ OPC_CLZW = 0x6000101b,
66
+ OPC_CPOP = 0x60201013,
67
+ OPC_CPOPW = 0x6020101b,
68
+ OPC_CTZ = 0x60101013,
69
+ OPC_CTZW = 0x6010101b,
70
+ OPC_ORN = 0x40006033,
71
+ OPC_REV8 = 0x6b805013,
72
+ OPC_ROL = 0x60001033,
73
+ OPC_ROLW = 0x6000103b,
74
+ OPC_ROR = 0x60005033,
75
+ OPC_RORW = 0x6000503b,
76
+ OPC_RORI = 0x60005013,
77
+ OPC_RORIW = 0x6000501b,
78
+ OPC_SEXT_B = 0x60401013,
79
+ OPC_SEXT_H = 0x60501013,
80
+ OPC_XNOR = 0x40004033,
81
+ OPC_ZEXT_H = 0x0800403b,
82
+
83
+ /* Zicond: integer conditional operations */
84
+ OPC_CZERO_EQZ = 0x0e005033,
85
+ OPC_CZERO_NEZ = 0x0e007033,
86
} RISCVInsn;
34
87
35
/*
88
/*
36
@@ -XXX,XX +XXX,XX @@ extern unsigned long reserved_va;
89
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
37
#define GUEST_ADDR_MAX_ \
90
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
38
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
39
UINT32_MAX : ~0ul)
40
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
41
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
42
43
#else
44
45
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/linux-user/arm/target_cpu.h
48
+++ b/linux-user/arm/target_cpu.h
49
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
50
* the high addresses. Restrict linux-user to the
51
* cached write-back RAM in the system map.
52
*/
53
- return 0x80000000ul;
54
+ return 0x7ffffffful;
55
} else {
56
/*
57
* We need to be able to map the commpage.
58
diff --git a/bsd-user/main.c b/bsd-user/main.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/bsd-user/main.c
61
+++ b/bsd-user/main.c
62
@@ -XXX,XX +XXX,XX @@ bool have_guest_base;
63
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
64
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
65
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
66
-/*
67
- * There are a number of places where we assign reserved_va to a variable
68
- * of type abi_ulong and expect it to fit. Avoid the last page.
69
- */
70
-# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
71
+# define MAX_RESERVED_VA 0xfffffffful
72
# else
73
-# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
74
+# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
75
# endif
76
# else
77
# define MAX_RESERVED_VA 0
78
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
79
envlist_free(envlist);
80
81
if (reserved_va) {
82
- mmap_next_start = reserved_va;
83
+ mmap_next_start = reserved_va + 1;
84
}
85
86
{
87
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/bsd-user/mmap.c
90
+++ b/bsd-user/mmap.c
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
92
size = HOST_PAGE_ALIGN(size) + alignment;
93
end_addr = start + size;
94
if (end_addr > reserved_va) {
95
- end_addr = reserved_va;
96
+ end_addr = reserved_va + 1;
97
}
98
addr = end_addr - qemu_host_page_size;
99
100
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
101
if (looped) {
102
return (abi_ulong)-1;
103
}
104
- end_addr = reserved_va;
105
+ end_addr = reserved_va + 1;
106
addr = end_addr - qemu_host_page_size;
107
looped = 1;
108
continue;
109
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/linux-user/elfload.c
112
+++ b/linux-user/elfload.c
113
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
114
* has specified -R reserved_va, which would trigger an assert().
115
*/
116
if (reserved_va != 0 &&
117
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
118
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
119
error_report("Cannot allocate vsyscall page");
120
exit(EXIT_FAILURE);
121
}
122
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
123
if (guest_hiaddr > reserved_va) {
124
error_report("%s: requires more than reserved virtual "
125
"address space (0x%" PRIx64 " > 0x%lx)",
126
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
127
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
128
exit(EXIT_FAILURE);
129
}
130
} else {
131
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
132
if (reserved_va) {
133
guest_loaddr = (guest_base >= mmap_min_addr ? 0
134
: mmap_min_addr - guest_base);
135
- guest_hiaddr = reserved_va - 1;
136
+ guest_hiaddr = reserved_va;
137
}
138
139
/* Reserve the address space for the binary, or reserved_va. */
140
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
141
if (guest_hiaddr > reserved_va) {
142
error_report("%s: requires more than reserved virtual "
143
"address space (0x%" PRIx64 " > 0x%lx)",
144
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
145
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
146
exit(EXIT_FAILURE);
147
}
148
149
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
150
/* Reserve the memory on the host. */
151
assert(guest_base != 0);
152
test = g2h_untagged(0);
153
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
154
+ addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
155
if (addr == MAP_FAILED || addr != test) {
156
error_report("Unable to reserve 0x%lx bytes of virtual address "
157
"space at %p (%s) for use as guest address space (check your "
158
"virtual memory ulimit setting, min_mmap_addr or reserve less "
159
- "using -R option)", reserved_va, test, strerror(errno));
160
+ "using -R option)", reserved_va + 1, test, strerror(errno));
161
exit(EXIT_FAILURE);
162
}
163
164
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
165
- __func__, addr, reserved_va);
166
+ __func__, addr, reserved_va + 1);
167
}
91
}
168
92
169
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
93
+static volatile sig_atomic_t got_sigill;
170
diff --git a/linux-user/main.c b/linux-user/main.c
94
+
171
index XXXXXXX..XXXXXXX 100644
95
+static void sigill_handler(int signo, siginfo_t *si, void *data)
172
--- a/linux-user/main.c
96
+{
173
+++ b/linux-user/main.c
97
+ /* Skip the faulty instruction */
174
@@ -XXX,XX +XXX,XX @@ static const char *last_log_filename;
98
+ ucontext_t *uc = (ucontext_t *)data;
175
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
99
+ uc->uc_mcontext.__gregs[REG_PC] += 4;
176
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
100
+
177
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
101
+ got_sigill = 1;
178
-/* There are a number of places where we assign reserved_va to a variable
102
+}
179
- of type abi_ulong and expect it to fit. Avoid the last page. */
103
+
180
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
104
+static void tcg_target_detect_isa(void)
181
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
105
+{
182
# else
106
+#if !defined(have_zba) || !defined(have_zbb) || !defined(have_zicond)
183
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
107
+ /*
184
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
108
+ * TODO: It is expected that this will be determinable via
185
# endif
109
+ * linux riscv_hwprobe syscall, not yet merged.
186
# else
110
+ * In the meantime, test via sigill.
187
# define MAX_RESERVED_VA(CPU) 0
111
+ */
188
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
112
+
113
+ struct sigaction sa_old, sa_new;
114
+
115
+ memset(&sa_new, 0, sizeof(sa_new));
116
+ sa_new.sa_flags = SA_SIGINFO;
117
+ sa_new.sa_sigaction = sigill_handler;
118
+ sigaction(SIGILL, &sa_new, &sa_old);
119
+
120
+#ifndef have_zba
121
+ /* Probe for Zba: add.uw zero,zero,zero. */
122
+ got_sigill = 0;
123
+ asm volatile(".insn r 0x3b, 0, 0x04, zero, zero, zero" : : : "memory");
124
+ have_zba = !got_sigill;
125
+#endif
126
+
127
+#ifndef have_zbb
128
+ /* Probe for Zba: andn zero,zero,zero. */
129
+ got_sigill = 0;
130
+ asm volatile(".insn r 0x33, 7, 0x20, zero, zero, zero" : : : "memory");
131
+ have_zbb = !got_sigill;
132
+#endif
133
+
134
+#ifndef have_zicond
135
+ /* Probe for Zicond: czero.eqz zero,zero,zero. */
136
+ got_sigill = 0;
137
+ asm volatile(".insn r 0x33, 5, 0x07, zero, zero, zero" : : : "memory");
138
+ have_zicond = !got_sigill;
139
+#endif
140
+
141
+ sigaction(SIGILL, &sa_old, NULL);
142
+#endif
143
+}
144
+
145
static void tcg_target_init(TCGContext *s)
189
{
146
{
190
char *p;
147
+ tcg_target_detect_isa();
191
int shift = 0;
192
- reserved_va = strtoul(arg, &p, 0);
193
+ unsigned long val;
194
+
148
+
195
+ val = strtoul(arg, &p, 0);
149
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
196
switch (*p) {
150
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
197
case 'k':
151
198
case 'K':
199
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
200
break;
201
}
202
if (shift) {
203
- unsigned long unshifted = reserved_va;
204
+ unsigned long unshifted = val;
205
p++;
206
- reserved_va <<= shift;
207
- if (reserved_va >> shift != unshifted) {
208
+ val <<= shift;
209
+ if (val >> shift != unshifted) {
210
fprintf(stderr, "Reserved virtual address too big\n");
211
exit(EXIT_FAILURE);
212
}
213
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
214
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
215
exit(EXIT_FAILURE);
216
}
217
+ /* The representation is size - 1, with 0 remaining "default". */
218
+ reserved_va = val ? val - 1 : 0;
219
}
220
221
static void handle_arg_singlestep(const char *arg)
222
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
223
*/
224
max_reserved_va = MAX_RESERVED_VA(cpu);
225
if (reserved_va != 0) {
226
- if (reserved_va % qemu_host_page_size) {
227
+ if ((reserved_va + 1) % qemu_host_page_size) {
228
char *s = size_to_str(qemu_host_page_size);
229
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
230
g_free(s);
231
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
232
exit(EXIT_FAILURE);
233
}
234
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
235
- /*
236
- * reserved_va must be aligned with the host page size
237
- * as it is used with mmap()
238
- */
239
- reserved_va = max_reserved_va & qemu_host_page_mask;
240
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
241
+ reserved_va = max_reserved_va;
242
}
243
244
{
245
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
246
index XXXXXXX..XXXXXXX 100644
247
--- a/linux-user/mmap.c
248
+++ b/linux-user/mmap.c
249
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
250
end_addr = start + size;
251
if (start > reserved_va - size) {
252
/* Start at the top of the address space. */
253
- end_addr = ((reserved_va - size) & -align) + size;
254
+ end_addr = ((reserved_va + 1 - size) & -align) + size;
255
looped = true;
256
}
257
258
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
259
return (abi_ulong)-1;
260
}
261
/* Re-start at the top of the address space. */
262
- addr = end_addr = ((reserved_va - size) & -align) + size;
263
+ addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
264
looped = true;
265
} else {
266
prot = page_get_flags(addr);
267
--
152
--
268
2.34.1
153
2.34.1
269
270
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
the first address past the last byte. This avoids overflow
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
when the last page of the address space is involved.
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
include/exec/exec-all.h | 2 +-
5
tcg/riscv/tcg-target-con-set.h | 1 +
9
accel/tcg/tb-maint.c | 31 ++++++++++++++++---------------
6
tcg/riscv/tcg-target-con-str.h | 1 +
10
accel/tcg/translate-all.c | 2 +-
7
tcg/riscv/tcg-target.h | 12 +++++-----
11
accel/tcg/user-exec.c | 2 +-
8
tcg/riscv/tcg-target.c.inc | 41 ++++++++++++++++++++++++++++++++++
12
softmmu/physmem.c | 2 +-
9
4 files changed, 49 insertions(+), 6 deletions(-)
13
5 files changed, 20 insertions(+), 19 deletions(-)
14
10
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
11
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/exec-all.h
13
--- a/tcg/riscv/tcg-target-con-set.h
18
+++ b/include/exec/exec-all.h
14
+++ b/tcg/riscv/tcg-target-con-set.h
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(target_ulong addr);
15
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rZ, rZ)
20
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
16
C_O1_I1(r, r)
21
#endif
17
C_O1_I2(r, r, ri)
22
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
18
C_O1_I2(r, r, rI)
23
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
19
+C_O1_I2(r, r, rJ)
24
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
20
C_O1_I2(r, rZ, rN)
25
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
21
C_O1_I2(r, rZ, rZ)
26
22
C_O2_I4(r, r, rZ, rZ, rM, rM)
27
/* GETPC is the true target of the return instruction that we'll execute. */
23
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
28
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
29
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/tb-maint.c
25
--- a/tcg/riscv/tcg-target-con-str.h
31
+++ b/accel/tcg/tb-maint.c
26
+++ b/tcg/riscv/tcg-target-con-str.h
32
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
27
@@ -XXX,XX +XXX,XX @@ REGS('r', ALL_GENERAL_REGS)
33
* Called with mmap_lock held for user-mode emulation.
28
* CONST(letter, TCG_CT_CONST_* bit set)
34
* NOTE: this function must not be called while a TB is running.
35
*/
29
*/
36
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
30
CONST('I', TCG_CT_CONST_S12)
37
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
31
+CONST('J', TCG_CT_CONST_J12)
38
{
32
CONST('N', TCG_CT_CONST_N12)
39
TranslationBlock *tb;
33
CONST('M', TCG_CT_CONST_M12)
40
PageForEachNext n;
34
CONST('Z', TCG_CT_CONST_ZERO)
41
- tb_page_addr_t last = end - 1;
35
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
42
36
index XXXXXXX..XXXXXXX 100644
43
assert_memory_lock();
37
--- a/tcg/riscv/tcg-target.h
44
38
+++ b/tcg/riscv/tcg-target.h
45
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
39
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
46
*/
40
#define TCG_TARGET_HAS_bswap32_i32 0
47
void tb_invalidate_phys_page(tb_page_addr_t addr)
41
#define TCG_TARGET_HAS_not_i32 1
48
{
42
#define TCG_TARGET_HAS_neg_i32 1
49
- tb_page_addr_t start, end;
43
-#define TCG_TARGET_HAS_andc_i32 0
50
+ tb_page_addr_t start, last;
44
-#define TCG_TARGET_HAS_orc_i32 0
51
45
-#define TCG_TARGET_HAS_eqv_i32 0
52
start = addr & TARGET_PAGE_MASK;
46
+#define TCG_TARGET_HAS_andc_i32 have_zbb
53
- end = start + TARGET_PAGE_SIZE;
47
+#define TCG_TARGET_HAS_orc_i32 have_zbb
54
- tb_invalidate_phys_range(start, end);
48
+#define TCG_TARGET_HAS_eqv_i32 have_zbb
55
+ last = addr | ~TARGET_PAGE_MASK;
49
#define TCG_TARGET_HAS_nand_i32 0
56
+ tb_invalidate_phys_range(start, last);
50
#define TCG_TARGET_HAS_nor_i32 0
51
#define TCG_TARGET_HAS_clz_i32 0
52
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
53
#define TCG_TARGET_HAS_bswap64_i64 0
54
#define TCG_TARGET_HAS_not_i64 1
55
#define TCG_TARGET_HAS_neg_i64 1
56
-#define TCG_TARGET_HAS_andc_i64 0
57
-#define TCG_TARGET_HAS_orc_i64 0
58
-#define TCG_TARGET_HAS_eqv_i64 0
59
+#define TCG_TARGET_HAS_andc_i64 have_zbb
60
+#define TCG_TARGET_HAS_orc_i64 have_zbb
61
+#define TCG_TARGET_HAS_eqv_i64 have_zbb
62
#define TCG_TARGET_HAS_nand_i64 0
63
#define TCG_TARGET_HAS_nor_i64 0
64
#define TCG_TARGET_HAS_clz_i64 0
65
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/riscv/tcg-target.c.inc
68
+++ b/tcg/riscv/tcg-target.c.inc
69
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
70
#define TCG_CT_CONST_S12 0x200
71
#define TCG_CT_CONST_N12 0x400
72
#define TCG_CT_CONST_M12 0x800
73
+#define TCG_CT_CONST_J12 0x1000
74
75
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
76
77
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
78
if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
79
return 1;
80
}
81
+ /*
82
+ * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
83
+ * Used to map ANDN back to ANDI, etc.
84
+ */
85
+ if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
86
+ return 1;
87
+ }
88
return 0;
57
}
89
}
58
90
59
/*
91
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
60
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
92
}
61
93
break;
62
/*
94
63
* Invalidate all TBs which intersect with the target physical address range
95
+ case INDEX_op_andc_i32:
64
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
96
+ case INDEX_op_andc_i64:
65
+ * [start;last]. NOTE: start and end may refer to *different* physical pages.
97
+ if (c2) {
66
* 'is_cpu_write_access' should be true if called from a real cpu write
98
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
67
* access: the virtual CPU will exit the current TB if code is modified inside
99
+ } else {
68
* this TB.
100
+ tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
69
*/
101
+ }
70
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
102
+ break;
71
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
103
+ case INDEX_op_orc_i32:
72
{
104
+ case INDEX_op_orc_i64:
73
struct page_collection *pages;
105
+ if (c2) {
74
- tb_page_addr_t next;
106
+ tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
75
+ tb_page_addr_t index, index_last;
107
+ } else {
76
108
+ tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
77
- pages = page_collection_lock(start, end - 1);
109
+ }
78
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
110
+ break;
79
- start < end;
111
+ case INDEX_op_eqv_i32:
80
- start = next, next += TARGET_PAGE_SIZE) {
112
+ case INDEX_op_eqv_i64:
81
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
113
+ if (c2) {
82
- tb_page_addr_t bound = MIN(next, end);
114
+ tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
83
+ pages = page_collection_lock(start, last);
115
+ } else {
116
+ tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
117
+ }
118
+ break;
84
+
119
+
85
+ index_last = last >> TARGET_PAGE_BITS;
120
case INDEX_op_not_i32:
86
+ for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
121
case INDEX_op_not_i64:
87
+ PageDesc *pd = page_find(index);
122
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
88
+ tb_page_addr_t bound;
123
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
89
124
case INDEX_op_xor_i64:
90
if (pd == NULL) {
125
return C_O1_I2(r, r, rI);
91
continue;
126
92
}
127
+ case INDEX_op_andc_i32:
93
assert_page_locked(pd);
128
+ case INDEX_op_andc_i64:
94
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
129
+ case INDEX_op_orc_i32:
95
+ bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
130
+ case INDEX_op_orc_i64:
96
+ bound = MIN(bound, last);
131
+ case INDEX_op_eqv_i32:
97
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
132
+ case INDEX_op_eqv_i64:
98
}
133
+ return C_O1_I2(r, r, rJ);
99
page_collection_unlock(pages);
134
+
100
}
135
case INDEX_op_sub_i32:
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
136
case INDEX_op_sub_i64:
102
index XXXXXXX..XXXXXXX 100644
137
return C_O1_I2(r, rZ, rN);
103
--- a/accel/tcg/translate-all.c
104
+++ b/accel/tcg/translate-all.c
105
@@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
106
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
107
addr = get_page_addr_code(env, pc);
108
if (addr != -1) {
109
- tb_invalidate_phys_range(addr, addr + 1);
110
+ tb_invalidate_phys_range(addr, addr);
111
}
112
}
113
}
114
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/accel/tcg/user-exec.c
117
+++ b/accel/tcg/user-exec.c
118
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
119
~(reset ? 0 : PAGE_STICKY));
120
}
121
if (inval_tb) {
122
- tb_invalidate_phys_range(start, last + 1);
123
+ tb_invalidate_phys_range(start, last);
124
}
125
}
126
127
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
128
index XXXXXXX..XXXXXXX 100644
129
--- a/softmmu/physmem.c
130
+++ b/softmmu/physmem.c
131
@@ -XXX,XX +XXX,XX @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
132
}
133
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
134
assert(tcg_enabled());
135
- tb_invalidate_phys_range(addr, addr + length);
136
+ tb_invalidate_phys_range(addr, addr + length - 1);
137
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
138
}
139
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
140
--
138
--
141
2.34.1
139
2.34.1
142
143
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
qemu-user can hang in a multi-threaded fork. One common
4
reason is that when creating a TB, between fork and exec
5
we manipulate a GTree whose memory allocator (GSlice) is
6
not fork-safe.
7
8
Although POSIX does not mandate it, the system's allocator
9
(e.g. tcmalloc, libc malloc) is probably fork-safe.
10
11
Fix some of these hangs by using QTree, which uses the system's
12
allocator regardless of the Glib version that we used at
13
configuration time.
14
15
Tested with the test program in the original bug report, i.e.:
16
```
17
18
void garble() {
19
int pid = fork();
20
if (pid == 0) {
21
exit(0);
22
} else {
23
int wstatus;
24
waitpid(pid, &wstatus, 0);
25
}
26
}
27
28
void supragarble(unsigned depth) {
29
if (depth == 0)
30
return ;
31
32
std::thread a(supragarble, depth-1);
33
std::thread b(supragarble, depth-1);
34
garble();
35
a.join();
36
b.join();
37
}
38
39
int main() {
40
supragarble(10);
41
}
42
```
43
44
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/285
45
Reported-by: Valentin David <me@valentindavid.com>
46
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
47
Signed-off-by: Emilio Cota <cota@braap.org>
48
Message-Id: <20230205163758.416992-3-cota@braap.org>
49
[rth: Add QEMU_DISABLE_CFI for all callback using functions.]
50
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
51
---
4
---
52
accel/tcg/tb-maint.c | 17 +++++++++--------
5
tcg/riscv/tcg-target.c.inc | 32 ++++++++++++++++++++++++--------
53
tcg/region.c | 19 ++++++++++---------
6
1 file changed, 24 insertions(+), 8 deletions(-)
54
util/qtree.c | 8 ++++----
55
3 files changed, 23 insertions(+), 21 deletions(-)
56
7
57
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
8
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
58
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
59
--- a/accel/tcg/tb-maint.c
10
--- a/tcg/riscv/tcg-target.c.inc
60
+++ b/accel/tcg/tb-maint.c
11
+++ b/tcg/riscv/tcg-target.c.inc
61
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
62
13
63
#include "qemu/osdep.h"
14
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
64
#include "qemu/interval-tree.h"
65
+#include "qemu/qtree.h"
66
#include "exec/cputlb.h"
67
#include "exec/log.h"
68
#include "exec/exec-all.h"
69
@@ -XXX,XX +XXX,XX @@ struct page_entry {
70
* See also: page_collection_lock().
71
*/
72
struct page_collection {
73
- GTree *tree;
74
+ QTree *tree;
75
struct page_entry *max;
76
};
77
78
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
79
struct page_entry *pe;
80
PageDesc *pd;
81
82
- pe = g_tree_lookup(set->tree, &index);
83
+ pe = q_tree_lookup(set->tree, &index);
84
if (pe) {
85
return false;
86
}
87
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
88
}
89
90
pe = page_entry_new(pd, index);
91
- g_tree_insert(set->tree, &pe->index, pe);
92
+ q_tree_insert(set->tree, &pe->index, pe);
93
94
/*
95
* If this is either (1) the first insertion or (2) a page whose index
96
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
97
end >>= TARGET_PAGE_BITS;
98
g_assert(start <= end);
99
100
- set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
101
+ set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
102
page_entry_destroy);
103
set->max = NULL;
104
assert_no_pages_locked();
105
106
retry:
107
- g_tree_foreach(set->tree, page_entry_lock, NULL);
108
+ q_tree_foreach(set->tree, page_entry_lock, NULL);
109
110
for (index = start; index <= end; index++) {
111
TranslationBlock *tb;
112
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
113
continue;
114
}
115
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
116
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
117
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
118
goto retry;
119
}
120
assert_page_locked(pd);
121
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
122
(tb_page_addr1(tb) != -1 &&
123
page_trylock_add(set, tb_page_addr1(tb)))) {
124
/* drop all locks, and reacquire in order */
125
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
126
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
127
goto retry;
128
}
129
}
130
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
131
static void page_collection_unlock(struct page_collection *set)
132
{
15
{
133
/* entries are unlocked and freed via page_entry_destroy */
16
- tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
134
- g_tree_destroy(set->tree);
17
- tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
135
+ q_tree_destroy(set->tree);
18
+ if (have_zbb) {
136
g_free(set);
19
+ tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
20
+ } else {
21
+ tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
22
+ tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
23
+ }
137
}
24
}
138
25
139
diff --git a/tcg/region.c b/tcg/region.c
26
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
140
index XXXXXXX..XXXXXXX 100644
27
{
141
--- a/tcg/region.c
28
- tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
142
+++ b/tcg/region.c
29
- tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
143
@@ -XXX,XX +XXX,XX @@
30
+ if (have_zba) {
144
#include "qemu/mprotect.h"
31
+ tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
145
#include "qemu/memalign.h"
32
+ } else {
146
#include "qemu/cacheinfo.h"
33
+ tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
147
+#include "qemu/qtree.h"
34
+ tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
148
#include "qapi/error.h"
35
+ }
149
#include "exec/exec-all.h"
150
#include "tcg/tcg.h"
151
@@ -XXX,XX +XXX,XX @@
152
153
struct tcg_region_tree {
154
QemuMutex lock;
155
- GTree *tree;
156
+ QTree *tree;
157
/* padding to avoid false sharing is computed at run-time */
158
};
159
160
@@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void)
161
struct tcg_region_tree *rt = region_trees + i * tree_size;
162
163
qemu_mutex_init(&rt->lock);
164
- rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
165
+ rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
166
}
167
}
36
}
168
37
169
@@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb)
38
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
170
39
{
171
g_assert(rt != NULL);
40
- tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
172
qemu_mutex_lock(&rt->lock);
41
- tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
173
- g_tree_insert(rt->tree, &tb->tc, tb);
42
+ if (have_zbb) {
174
+ q_tree_insert(rt->tree, &tb->tc, tb);
43
+ tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
175
qemu_mutex_unlock(&rt->lock);
44
+ } else {
45
+ tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
46
+ tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
47
+ }
176
}
48
}
177
49
178
@@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb)
50
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
179
51
{
180
g_assert(rt != NULL);
52
- tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
181
qemu_mutex_lock(&rt->lock);
53
- tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
182
- g_tree_remove(rt->tree, &tb->tc);
54
+ if (have_zbb) {
183
+ q_tree_remove(rt->tree, &tb->tc);
55
+ tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
184
qemu_mutex_unlock(&rt->lock);
56
+ } else {
57
+ tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
58
+ tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
59
+ }
185
}
60
}
186
61
187
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
62
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
188
}
189
190
qemu_mutex_lock(&rt->lock);
191
- tb = g_tree_lookup(rt->tree, &s);
192
+ tb = q_tree_lookup(rt->tree, &s);
193
qemu_mutex_unlock(&rt->lock);
194
return tb;
195
}
196
@@ -XXX,XX +XXX,XX @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
197
for (i = 0; i < region.n; i++) {
198
struct tcg_region_tree *rt = region_trees + i * tree_size;
199
200
- g_tree_foreach(rt->tree, func, user_data);
201
+ q_tree_foreach(rt->tree, func, user_data);
202
}
203
tcg_region_tree_unlock_all();
204
}
205
@@ -XXX,XX +XXX,XX @@ size_t tcg_nb_tbs(void)
206
for (i = 0; i < region.n; i++) {
207
struct tcg_region_tree *rt = region_trees + i * tree_size;
208
209
- nb_tbs += g_tree_nnodes(rt->tree);
210
+ nb_tbs += q_tree_nnodes(rt->tree);
211
}
212
tcg_region_tree_unlock_all();
213
return nb_tbs;
214
@@ -XXX,XX +XXX,XX @@ static void tcg_region_tree_reset_all(void)
215
struct tcg_region_tree *rt = region_trees + i * tree_size;
216
217
/* Increment the refcount first so that destroy acts as a reset */
218
- g_tree_ref(rt->tree);
219
- g_tree_destroy(rt->tree);
220
+ q_tree_ref(rt->tree);
221
+ q_tree_destroy(rt->tree);
222
}
223
tcg_region_tree_unlock_all();
224
}
225
diff --git a/util/qtree.c b/util/qtree.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/util/qtree.c
228
+++ b/util/qtree.c
229
@@ -XXX,XX +XXX,XX @@ q_tree_node_next(QTreeNode *node)
230
*
231
* Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
232
*/
233
-static void
234
+static void QEMU_DISABLE_CFI
235
q_tree_remove_all(QTree *tree)
236
{
237
QTreeNode *node;
238
@@ -XXX,XX +XXX,XX @@ q_tree_replace(QTree *tree,
239
}
240
241
/* internal insert routine */
242
-static QTreeNode *
243
+static QTreeNode * QEMU_DISABLE_CFI
244
q_tree_insert_internal(QTree *tree,
245
gpointer key,
246
gpointer value,
247
@@ -XXX,XX +XXX,XX @@ q_tree_steal(QTree *tree,
248
}
249
250
/* internal remove routine */
251
-static gboolean
252
+static gboolean QEMU_DISABLE_CFI
253
q_tree_remove_internal(QTree *tree,
254
gconstpointer key,
255
gboolean steal)
256
@@ -XXX,XX +XXX,XX @@ q_tree_node_balance(QTreeNode *node)
257
return node;
258
}
259
260
-static QTreeNode *
261
+static QTreeNode * QEMU_DISABLE_CFI
262
q_tree_find_node(QTree *tree,
263
gconstpointer key)
264
{
265
--
63
--
266
2.34.1
64
2.34.1
267
268
diff view generated by jsdifflib
New patch
1
The instruction is a combined zero-extend and add.
2
Use it for exactly that.
1
3
4
Acked-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/riscv/tcg-target.c.inc | 33 ++++++++++++++++++++++-----------
9
1 file changed, 22 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target.c.inc
14
+++ b/tcg/riscv/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
16
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
17
18
/* TLB Hit - translate address using addend. */
19
- addr_adj = addr_reg;
20
- if (TARGET_LONG_BITS == 32) {
21
- addr_adj = TCG_REG_TMP0;
22
- tcg_out_ext32u(s, addr_adj, addr_reg);
23
+ if (TARGET_LONG_BITS == 64) {
24
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
25
+ } else if (have_zba) {
26
+ tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
27
+ } else {
28
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
29
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2);
30
}
31
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj);
32
*pbase = TCG_REG_TMP0;
33
#else
34
+ TCGReg base;
35
+
36
if (a_mask) {
37
ldst = new_ldst_label(s);
38
ldst->is_ld = is_ld;
39
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
40
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
41
}
42
43
- TCGReg base = addr_reg;
44
- if (TARGET_LONG_BITS == 32) {
45
- tcg_out_ext32u(s, TCG_REG_TMP0, base);
46
- base = TCG_REG_TMP0;
47
- }
48
if (guest_base != 0) {
49
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
50
base = TCG_REG_TMP0;
51
+ if (TARGET_LONG_BITS == 64) {
52
+ tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
53
+ } else if (have_zba) {
54
+ tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
55
+ } else {
56
+ tcg_out_ext32u(s, base, addr_reg);
57
+ tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
58
+ }
59
+ } else if (TARGET_LONG_BITS == 64) {
60
+ base = addr_reg;
61
+ } else {
62
+ base = TCG_REG_TMP0;
63
+ tcg_out_ext32u(s, base, addr_reg);
64
}
65
*pbase = base;
66
#endif
67
--
68
2.34.1
diff view generated by jsdifflib
New patch
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/riscv/tcg-target.h | 4 ++--
6
tcg/riscv/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++++++++
7
2 files changed, 36 insertions(+), 2 deletions(-)
1
8
9
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/riscv/tcg-target.h
12
+++ b/tcg/riscv/tcg-target.h
13
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
14
#define TCG_TARGET_HAS_div_i32 1
15
#define TCG_TARGET_HAS_rem_i32 1
16
#define TCG_TARGET_HAS_div2_i32 0
17
-#define TCG_TARGET_HAS_rot_i32 0
18
+#define TCG_TARGET_HAS_rot_i32 have_zbb
19
#define TCG_TARGET_HAS_deposit_i32 0
20
#define TCG_TARGET_HAS_extract_i32 0
21
#define TCG_TARGET_HAS_sextract_i32 0
22
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
23
#define TCG_TARGET_HAS_div_i64 1
24
#define TCG_TARGET_HAS_rem_i64 1
25
#define TCG_TARGET_HAS_div2_i64 0
26
-#define TCG_TARGET_HAS_rot_i64 0
27
+#define TCG_TARGET_HAS_rot_i64 have_zbb
28
#define TCG_TARGET_HAS_deposit_i64 0
29
#define TCG_TARGET_HAS_extract_i64 0
30
#define TCG_TARGET_HAS_sextract_i64 0
31
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/riscv/tcg-target.c.inc
34
+++ b/tcg/riscv/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
36
}
37
break;
38
39
+ case INDEX_op_rotl_i32:
40
+ if (c2) {
41
+ tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
42
+ } else {
43
+ tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
44
+ }
45
+ break;
46
+ case INDEX_op_rotl_i64:
47
+ if (c2) {
48
+ tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
49
+ } else {
50
+ tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
51
+ }
52
+ break;
53
+
54
+ case INDEX_op_rotr_i32:
55
+ if (c2) {
56
+ tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
57
+ } else {
58
+ tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
59
+ }
60
+ break;
61
+ case INDEX_op_rotr_i64:
62
+ if (c2) {
63
+ tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
64
+ } else {
65
+ tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
66
+ }
67
+ break;
68
+
69
case INDEX_op_add2_i32:
70
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
71
const_args[4], const_args[5], false, true);
72
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
73
case INDEX_op_shl_i32:
74
case INDEX_op_shr_i32:
75
case INDEX_op_sar_i32:
76
+ case INDEX_op_rotl_i32:
77
+ case INDEX_op_rotr_i32:
78
case INDEX_op_shl_i64:
79
case INDEX_op_shr_i64:
80
case INDEX_op_sar_i64:
81
+ case INDEX_op_rotl_i64:
82
+ case INDEX_op_rotr_i64:
83
return C_O1_I2(r, r, ri);
84
85
case INDEX_op_brcond_i32:
86
--
87
2.34.1
diff view generated by jsdifflib
New patch
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/riscv/tcg-target.h | 10 +++++-----
6
tcg/riscv/tcg-target.c.inc | 29 +++++++++++++++++++++++++++++
7
2 files changed, 34 insertions(+), 5 deletions(-)
1
8
9
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/riscv/tcg-target.h
12
+++ b/tcg/riscv/tcg-target.h
13
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
14
#define TCG_TARGET_HAS_ext16s_i32 1
15
#define TCG_TARGET_HAS_ext8u_i32 1
16
#define TCG_TARGET_HAS_ext16u_i32 1
17
-#define TCG_TARGET_HAS_bswap16_i32 0
18
-#define TCG_TARGET_HAS_bswap32_i32 0
19
+#define TCG_TARGET_HAS_bswap16_i32 have_zbb
20
+#define TCG_TARGET_HAS_bswap32_i32 have_zbb
21
#define TCG_TARGET_HAS_not_i32 1
22
#define TCG_TARGET_HAS_neg_i32 1
23
#define TCG_TARGET_HAS_andc_i32 have_zbb
24
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
25
#define TCG_TARGET_HAS_ext8u_i64 1
26
#define TCG_TARGET_HAS_ext16u_i64 1
27
#define TCG_TARGET_HAS_ext32u_i64 1
28
-#define TCG_TARGET_HAS_bswap16_i64 0
29
-#define TCG_TARGET_HAS_bswap32_i64 0
30
-#define TCG_TARGET_HAS_bswap64_i64 0
31
+#define TCG_TARGET_HAS_bswap16_i64 have_zbb
32
+#define TCG_TARGET_HAS_bswap32_i64 have_zbb
33
+#define TCG_TARGET_HAS_bswap64_i64 have_zbb
34
#define TCG_TARGET_HAS_not_i64 1
35
#define TCG_TARGET_HAS_neg_i64 1
36
#define TCG_TARGET_HAS_andc_i64 have_zbb
37
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/riscv/tcg-target.c.inc
40
+++ b/tcg/riscv/tcg-target.c.inc
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
42
}
43
break;
44
45
+ case INDEX_op_bswap64_i64:
46
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
47
+ break;
48
+ case INDEX_op_bswap32_i32:
49
+ a2 = 0;
50
+ /* fall through */
51
+ case INDEX_op_bswap32_i64:
52
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
53
+ if (a2 & TCG_BSWAP_OZ) {
54
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
55
+ } else {
56
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
57
+ }
58
+ break;
59
+ case INDEX_op_bswap16_i64:
60
+ case INDEX_op_bswap16_i32:
61
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
62
+ if (a2 & TCG_BSWAP_OZ) {
63
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
64
+ } else {
65
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
66
+ }
67
+ break;
68
+
69
case INDEX_op_add2_i32:
70
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
71
const_args[4], const_args[5], false, true);
72
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
73
case INDEX_op_extrl_i64_i32:
74
case INDEX_op_extrh_i64_i32:
75
case INDEX_op_ext_i32_i64:
76
+ case INDEX_op_bswap16_i32:
77
+ case INDEX_op_bswap32_i32:
78
+ case INDEX_op_bswap16_i64:
79
+ case INDEX_op_bswap32_i64:
80
+ case INDEX_op_bswap64_i64:
81
return C_O1_I1(r, r);
82
83
case INDEX_op_st8_i32:
84
--
85
2.34.1
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
cpu_watchpoint_insert() calls error_report() which is declared
4
in "qemu/error-report.h". When moving this code in commit 2609ec2868
5
("softmmu: Extract watchpoint API from physmem.c") we neglected to
6
include this header. This works so far because it is indirectly
7
included by TCG headers -> "qemu/plugin.h" -> "qemu/error-report.h".
8
9
Currently cpu_watchpoint_insert() is only built with the TCG
10
accelerator. When building it with other ones (or without TCG)
11
we get:
12
13
softmmu/watchpoint.c:38:9: error: implicit declaration of function 'error_report' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
14
error_report("tried to set invalid watchpoint at %"
15
^
16
17
Include "qemu/error-report.h" in order to fix this for non-TCG
18
builds.
19
20
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
21
Message-Id: <20230328173117.15226-3-philmd@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
4
---
24
softmmu/watchpoint.c | 1 +
5
tcg/riscv/tcg-target.h | 4 ++--
25
1 file changed, 1 insertion(+)
6
tcg/riscv/tcg-target.c.inc | 9 +++++++++
7
2 files changed, 11 insertions(+), 2 deletions(-)
26
8
27
diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c
9
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
28
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
29
--- a/softmmu/watchpoint.c
11
--- a/tcg/riscv/tcg-target.h
30
+++ b/softmmu/watchpoint.c
12
+++ b/tcg/riscv/tcg-target.h
31
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
32
14
#define TCG_TARGET_HAS_nor_i32 0
33
#include "qemu/osdep.h"
15
#define TCG_TARGET_HAS_clz_i32 0
34
#include "qemu/main-loop.h"
16
#define TCG_TARGET_HAS_ctz_i32 0
35
+#include "qemu/error-report.h"
17
-#define TCG_TARGET_HAS_ctpop_i32 0
36
#include "exec/exec-all.h"
18
+#define TCG_TARGET_HAS_ctpop_i32 have_zbb
37
#include "exec/translate-all.h"
19
#define TCG_TARGET_HAS_brcond2 1
38
#include "sysemu/tcg.h"
20
#define TCG_TARGET_HAS_setcond2 1
21
#define TCG_TARGET_HAS_qemu_st8_i32 0
22
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
23
#define TCG_TARGET_HAS_nor_i64 0
24
#define TCG_TARGET_HAS_clz_i64 0
25
#define TCG_TARGET_HAS_ctz_i64 0
26
-#define TCG_TARGET_HAS_ctpop_i64 0
27
+#define TCG_TARGET_HAS_ctpop_i64 have_zbb
28
#define TCG_TARGET_HAS_add2_i64 1
29
#define TCG_TARGET_HAS_sub2_i64 1
30
#define TCG_TARGET_HAS_mulu2_i64 0
31
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/riscv/tcg-target.c.inc
34
+++ b/tcg/riscv/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
36
}
37
break;
38
39
+ case INDEX_op_ctpop_i32:
40
+ tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
41
+ break;
42
+ case INDEX_op_ctpop_i64:
43
+ tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
44
+ break;
45
+
46
case INDEX_op_add2_i32:
47
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
48
const_args[4], const_args[5], false, true);
49
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
50
case INDEX_op_bswap16_i64:
51
case INDEX_op_bswap32_i64:
52
case INDEX_op_bswap64_i64:
53
+ case INDEX_op_ctpop_i32:
54
+ case INDEX_op_ctpop_i64:
55
return C_O1_I1(r, r);
56
57
case INDEX_op_st8_i32:
39
--
58
--
40
2.34.1
59
2.34.1
41
42
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
1
Split out a helper function, tcg_out_setcond_int, which does not
2
always produce the complete boolean result, but returns a set of
3
flags to do so.
2
4
3
The only reason to add this implementation is to control the memory allocator
5
Based on 21af16198425, the same improvement for loongarch64.
4
used. Some users (e.g. TCG) cannot work reliably in multi-threaded
5
environments (e.g. forking in user-mode) with GTree's allocator, GSlice.
6
See https://gitlab.com/qemu-project/qemu/-/issues/285 for details.
7
6
8
Importing GTree is a temporary workaround until GTree migrates away
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
from GSlice.
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
11
This implementation is identical to that in glib v2.75.0, except that
12
we don't import recent additions to the API nor deprecated API calls,
13
none of which are used in QEMU.
14
15
I've imported tests from glib and added a benchmark just to
16
make sure that performance is similar. Note: it cannot be identical
17
because (1) we are not using GSlice, (2) we use different compilation flags
18
(e.g. -fPIC) and (3) we're linking statically.
19
20
$ cat /proc/cpuinfo| grep 'model name' | head -1
21
model name : AMD Ryzen 7 PRO 5850U with Radeon Graphics
22
$ echo '0' | sudo tee /sys/devices/system/cpu/cpufreq/boost
23
$ tests/bench/qtree-bench
24
25
Tree Op 32 1024 4096 131072 1048576
26
------------------------------------------------------------------------------------------------
27
GTree Lookup 83.23 43.08 25.31 19.40 16.22
28
QTree Lookup 113.42 (1.36x) 53.83 (1.25x) 28.38 (1.12x) 17.64 (0.91x) 13.04 (0.80x)
29
GTree Insert 44.23 29.37 25.83 19.49 17.03
30
QTree Insert 46.87 (1.06x) 25.62 (0.87x) 24.29 (0.94x) 16.83 (0.86x) 12.97 (0.76x)
31
GTree Remove 53.27 35.15 31.43 24.64 16.70
32
QTree Remove 57.32 (1.08x) 41.76 (1.19x) 38.37 (1.22x) 29.30 (1.19x) 15.07 (0.90x)
33
GTree RemoveAll 135.44 127.52 126.72 120.11 64.34
34
QTree RemoveAll 127.15 (0.94x) 110.37 (0.87x) 107.97 (0.85x) 97.13 (0.81x) 55.10 (0.86x)
35
GTree Traverse 277.71 276.09 272.78 246.72 98.47
36
QTree Traverse 370.33 (1.33x) 411.97 (1.49x) 400.23 (1.47x) 262.82 (1.07x) 78.52 (0.80x)
37
------------------------------------------------------------------------------------------------
38
39
As a sanity check, the same benchmark when Glib's version
40
is >= $glib_dropped_gslice_version (i.e. QTree == GTree):
41
42
Tree Op 32 1024 4096 131072 1048576
43
------------------------------------------------------------------------------------------------
44
GTree Lookup 82.72 43.09 24.18 19.73 16.09
45
QTree Lookup 81.82 (0.99x) 43.10 (1.00x) 24.20 (1.00x) 19.76 (1.00x) 16.26 (1.01x)
46
GTree Insert 45.07 29.62 26.34 19.90 17.18
47
QTree Insert 45.72 (1.01x) 29.60 (1.00x) 26.38 (1.00x) 19.71 (0.99x) 17.20 (1.00x)
48
GTree Remove 54.48 35.36 31.77 24.97 16.95
49
QTree Remove 54.46 (1.00x) 35.32 (1.00x) 31.77 (1.00x) 24.91 (1.00x) 17.15 (1.01x)
50
GTree RemoveAll 140.68 127.36 125.43 121.45 68.20
51
QTree RemoveAll 140.65 (1.00x) 127.64 (1.00x) 125.01 (1.00x) 121.73 (1.00x) 67.06 (0.98x)
52
GTree Traverse 278.68 276.05 266.75 251.65 104.93
53
QTree Traverse 278.31 (1.00x) 275.78 (1.00x) 266.42 (1.00x) 247.89 (0.99x) 104.58 (1.00x)
54
------------------------------------------------------------------------------------------------
55
56
Signed-off-by: Emilio Cota <cota@braap.org>
57
Message-Id: <20230205163758.416992-2-cota@braap.org>
58
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
59
---
10
---
60
configure | 15 +
11
tcg/riscv/tcg-target.c.inc | 164 +++++++++++++++++++++++++++----------
61
meson.build | 4 +
12
1 file changed, 121 insertions(+), 43 deletions(-)
62
include/qemu/qtree.h | 201 ++++++
63
tests/bench/qtree-bench.c | 286 ++++++++
64
tests/unit/test-qtree.c | 333 +++++++++
65
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++++
66
tests/bench/meson.build | 4 +
67
tests/unit/meson.build | 1 +
68
util/meson.build | 1 +
69
9 files changed, 2235 insertions(+)
70
create mode 100644 include/qemu/qtree.h
71
create mode 100644 tests/bench/qtree-bench.c
72
create mode 100644 tests/unit/test-qtree.c
73
create mode 100644 util/qtree.c
74
13
75
diff --git a/configure b/configure
14
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
76
index XXXXXXX..XXXXXXX 100755
77
--- a/configure
78
+++ b/configure
79
@@ -XXX,XX +XXX,XX @@ safe_stack=""
80
use_containers="yes"
81
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
82
gdb_arches=""
83
+glib_has_gslice="no"
84
85
if test -e "$source_path/.git"
86
then
87
@@ -XXX,XX +XXX,XX @@ for i in $glib_modules; do
88
fi
89
done
90
91
+# Check whether glib has gslice, which we have to avoid for correctness.
92
+# TODO: remove this check and the corresponding workaround (qtree) when
93
+# the minimum supported glib is >= $glib_dropped_gslice_version.
94
+glib_dropped_gslice_version=2.75.3
95
+for i in $glib_modules; do
96
+ if ! $pkg_config --atleast-version=$glib_dropped_gslice_version $i; then
97
+ glib_has_gslice="yes"
98
+    break
99
+ fi
100
+done
101
+
102
glib_bindir="$($pkg_config --variable=bindir glib-2.0)"
103
if test -z "$glib_bindir" ; then
104
    glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin
105
@@ -XXX,XX +XXX,XX @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
106
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
107
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
108
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
109
+if test "$glib_has_gslice" = "yes" ; then
110
+ echo "HAVE_GLIB_WITH_SLICE_ALLOCATOR=y" >> $config_host_mak
111
+fi
112
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
113
echo "EXESUF=$EXESUF" >> $config_host_mak
114
115
diff --git a/meson.build b/meson.build
116
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
117
--- a/meson.build
16
--- a/tcg/riscv/tcg-target.c.inc
118
+++ b/meson.build
17
+++ b/tcg/riscv/tcg-target.c.inc
119
@@ -XXX,XX +XXX,XX @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(),
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
120
})
19
tcg_out_opc_branch(s, op, arg1, arg2, 0);
121
# override glib dep with the configure results (for subprojects)
20
}
122
meson.override_dependency('glib-2.0', glib)
21
123
+# pass down whether Glib has the slice allocator
22
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
124
+if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR')
23
- TCGReg arg1, TCGReg arg2)
125
+ config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true)
24
+#define SETCOND_INV TCG_TARGET_NB_REGS
126
+endif
25
+#define SETCOND_NEZ (SETCOND_INV << 1)
127
26
+#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
128
gio = not_found
27
+
129
gdbus_codegen = not_found
28
+static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
130
diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h
29
+ TCGReg arg1, tcg_target_long arg2, bool c2)
131
new file mode 100644
30
{
132
index XXXXXXX..XXXXXXX
31
+ int flags = 0;
133
--- /dev/null
32
+
134
+++ b/include/qemu/qtree.h
33
switch (cond) {
135
@@ -XXX,XX +XXX,XX @@
34
- case TCG_COND_EQ:
136
+/*
35
- tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
137
+ * GLIB - Library of useful routines for C programming
36
- tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
138
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
37
- break;
139
+ *
38
- case TCG_COND_NE:
140
+ * SPDX-License-Identifier: LGPL-2.1-or-later
39
- tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
141
+ *
40
- tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
142
+ * This library is free software; you can redistribute it and/or
41
- break;
143
+ * modify it under the terms of the GNU Lesser General Public
42
- case TCG_COND_LT:
144
+ * License as published by the Free Software Foundation; either
43
- tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
145
+ * version 2.1 of the License, or (at your option) any later version.
44
- break;
146
+ *
45
- case TCG_COND_GE:
147
+ * This library is distributed in the hope that it will be useful,
46
- tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
148
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
47
- tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
149
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
48
- break;
150
+ * Lesser General Public License for more details.
49
- case TCG_COND_LE:
151
+ *
50
- tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
152
+ * You should have received a copy of the GNU Lesser General Public
51
- tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
153
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
52
- break;
154
+ */
53
- case TCG_COND_GT:
155
+
54
- tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
156
+/*
55
- break;
157
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
56
- case TCG_COND_LTU:
158
+ * file for a list of people on the GLib Team. See the ChangeLog
57
- tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
159
+ * files for a list of changes. These files are distributed with
58
- break;
160
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
59
- case TCG_COND_GEU:
161
+ */
60
- tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
162
+
61
- tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
163
+/*
62
- break;
164
+ * QTree is a partial import of Glib's GTree. The parts excluded correspond
63
- case TCG_COND_LEU:
165
+ * to API calls either deprecated (e.g. g_tree_traverse) or recently added
64
- tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
166
+ * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
65
- tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
167
+ *
66
- break;
168
+ * The reason for this import is to allow us to control the memory allocator
67
- case TCG_COND_GTU:
169
+ * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
68
- tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
170
+ * slice allocator, which causes problems when forking in user-mode;
69
+ case TCG_COND_EQ: /* -> NE */
171
+ * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
70
+ case TCG_COND_GE: /* -> LT */
172
+ * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
71
+ case TCG_COND_GEU: /* -> LTU */
173
+ *
72
+ case TCG_COND_GT: /* -> LE */
174
+ * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
73
+ case TCG_COND_GTU: /* -> LEU */
175
+ */
74
+ cond = tcg_invert_cond(cond);
176
+
75
+ flags ^= SETCOND_INV;
177
+#ifndef QEMU_QTREE_H
76
break;
178
+#define QEMU_QTREE_H
77
default:
179
+
78
- g_assert_not_reached();
180
+#include "qemu/osdep.h"
79
- break;
181
+
80
- }
182
+#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
81
+ break;
183
+
82
+ }
184
+typedef struct _QTree QTree;
83
+
185
+
84
+ switch (cond) {
186
+typedef struct _QTreeNode QTreeNode;
85
+ case TCG_COND_LE:
187
+
86
+ case TCG_COND_LEU:
188
+typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
87
+ /*
189
+ gpointer user_data);
88
+ * If we have a constant input, the most efficient way to implement
190
+
89
+ * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
191
+/*
90
+ * We don't need to care for this for LE because the constant input
192
+ * Balanced binary trees
91
+ * is constrained to signed 12-bit, and 0x800 is representable in the
193
+ */
92
+ * temporary register.
194
+QTree *q_tree_new(GCompareFunc key_compare_func);
93
+ */
195
+QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
94
+ if (c2) {
196
+ gpointer key_compare_data);
95
+ if (cond == TCG_COND_LEU) {
197
+QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
96
+ /* unsigned <= -1 is true */
198
+ gpointer key_compare_data,
97
+ if (arg2 == -1) {
199
+ GDestroyNotify key_destroy_func,
98
+ tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
200
+ GDestroyNotify value_destroy_func);
99
+ return ret;
201
+QTree *q_tree_ref(QTree *tree);
100
+ }
202
+void q_tree_unref(QTree *tree);
101
+ cond = TCG_COND_LTU;
203
+void q_tree_destroy(QTree *tree);
102
+ } else {
204
+void q_tree_insert(QTree *tree,
103
+ cond = TCG_COND_LT;
205
+ gpointer key,
104
+ }
206
+ gpointer value);
105
+ tcg_debug_assert(arg2 <= 0x7ff);
207
+void q_tree_replace(QTree *tree,
106
+ if (++arg2 == 0x800) {
208
+ gpointer key,
107
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
209
+ gpointer value);
108
+ arg2 = TCG_REG_TMP0;
210
+gboolean q_tree_remove(QTree *tree,
109
+ c2 = false;
211
+ gconstpointer key);
110
+ }
212
+gboolean q_tree_steal(QTree *tree,
111
+ } else {
213
+ gconstpointer key);
112
+ TCGReg tmp = arg2;
214
+gpointer q_tree_lookup(QTree *tree,
113
+ arg2 = arg1;
215
+ gconstpointer key);
114
+ arg1 = tmp;
216
+gboolean q_tree_lookup_extended(QTree *tree,
115
+ cond = tcg_swap_cond(cond); /* LE -> GE */
217
+ gconstpointer lookup_key,
116
+ cond = tcg_invert_cond(cond); /* GE -> LT */
218
+ gpointer *orig_key,
117
+ flags ^= SETCOND_INV;
219
+ gpointer *value);
118
+ }
220
+void q_tree_foreach(QTree *tree,
119
+ break;
221
+ GTraverseFunc func,
120
+ default:
222
+ gpointer user_data);
121
+ break;
223
+gpointer q_tree_search(QTree *tree,
122
+ }
224
+ GCompareFunc search_func,
123
+
225
+ gconstpointer user_data);
124
+ switch (cond) {
226
+gint q_tree_height(QTree *tree);
125
+ case TCG_COND_NE:
227
+gint q_tree_nnodes(QTree *tree);
126
+ flags |= SETCOND_NEZ;
228
+
127
+ if (!c2) {
229
+#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
128
+ tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
230
+
129
+ } else if (arg2 == 0) {
231
+typedef GTree QTree;
130
+ ret = arg1;
232
+typedef GTreeNode QTreeNode;
131
+ } else {
233
+typedef GTraverseNodeFunc QTraverseNodeFunc;
132
+ tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
234
+
133
+ }
235
+static inline QTree *q_tree_new(GCompareFunc key_compare_func)
134
+ break;
236
+{
135
+
237
+ return g_tree_new(key_compare_func);
136
+ case TCG_COND_LT:
238
+}
137
+ if (c2) {
239
+
138
+ tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
240
+static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
139
+ } else {
241
+ gpointer key_compare_data)
140
+ tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
242
+{
141
+ }
243
+ return g_tree_new_with_data(key_compare_func, key_compare_data);
142
+ break;
244
+}
143
+
245
+
144
+ case TCG_COND_LTU:
246
+static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
145
+ if (c2) {
247
+ gpointer key_compare_data,
146
+ tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
248
+ GDestroyNotify key_destroy_func,
147
+ } else {
249
+ GDestroyNotify value_destroy_func)
148
+ tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
250
+{
149
+ }
251
+ return g_tree_new_full(key_compare_func, key_compare_data,
150
+ break;
252
+ key_destroy_func, value_destroy_func);
151
+
253
+}
254
+
255
+static inline QTree *q_tree_ref(QTree *tree)
256
+{
257
+ return g_tree_ref(tree);
258
+}
259
+
260
+static inline void q_tree_unref(QTree *tree)
261
+{
262
+ g_tree_unref(tree);
263
+}
264
+
265
+static inline void q_tree_destroy(QTree *tree)
266
+{
267
+ g_tree_destroy(tree);
268
+}
269
+
270
+static inline void q_tree_insert(QTree *tree,
271
+ gpointer key,
272
+ gpointer value)
273
+{
274
+ g_tree_insert(tree, key, value);
275
+}
276
+
277
+static inline void q_tree_replace(QTree *tree,
278
+ gpointer key,
279
+ gpointer value)
280
+{
281
+ g_tree_replace(tree, key, value);
282
+}
283
+
284
+static inline gboolean q_tree_remove(QTree *tree,
285
+ gconstpointer key)
286
+{
287
+ return g_tree_remove(tree, key);
288
+}
289
+
290
+static inline gboolean q_tree_steal(QTree *tree,
291
+ gconstpointer key)
292
+{
293
+ return g_tree_steal(tree, key);
294
+}
295
+
296
+static inline gpointer q_tree_lookup(QTree *tree,
297
+ gconstpointer key)
298
+{
299
+ return g_tree_lookup(tree, key);
300
+}
301
+
302
+static inline gboolean q_tree_lookup_extended(QTree *tree,
303
+ gconstpointer lookup_key,
304
+ gpointer *orig_key,
305
+ gpointer *value)
306
+{
307
+ return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
308
+}
309
+
310
+static inline void q_tree_foreach(QTree *tree,
311
+ GTraverseFunc func,
312
+ gpointer user_data)
313
+{
314
+ return g_tree_foreach(tree, func, user_data);
315
+}
316
+
317
+static inline gpointer q_tree_search(QTree *tree,
318
+ GCompareFunc search_func,
319
+ gconstpointer user_data)
320
+{
321
+ return g_tree_search(tree, search_func, user_data);
322
+}
323
+
324
+static inline gint q_tree_height(QTree *tree)
325
+{
326
+ return g_tree_height(tree);
327
+}
328
+
329
+static inline gint q_tree_nnodes(QTree *tree)
330
+{
331
+ return g_tree_nnodes(tree);
332
+}
333
+
334
+#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
335
+
336
+#endif /* QEMU_QTREE_H */
337
diff --git a/tests/bench/qtree-bench.c b/tests/bench/qtree-bench.c
338
new file mode 100644
339
index XXXXXXX..XXXXXXX
340
--- /dev/null
341
+++ b/tests/bench/qtree-bench.c
342
@@ -XXX,XX +XXX,XX @@
343
+/* SPDX-License-Identifier: GPL-2.0-or-later */
344
+#include "qemu/osdep.h"
345
+#include "qemu/qtree.h"
346
+#include "qemu/timer.h"
347
+
348
+enum tree_op {
349
+ OP_LOOKUP,
350
+ OP_INSERT,
351
+ OP_REMOVE,
352
+ OP_REMOVE_ALL,
353
+ OP_TRAVERSE,
354
+};
355
+
356
+struct benchmark {
357
+ const char * const name;
358
+ enum tree_op op;
359
+ bool fill_on_init;
360
+};
361
+
362
+enum impl_type {
363
+ IMPL_GTREE,
364
+ IMPL_QTREE,
365
+};
366
+
367
+struct tree_implementation {
368
+ const char * const name;
369
+ enum impl_type type;
370
+};
371
+
372
+static const struct benchmark benchmarks[] = {
373
+ {
374
+ .name = "Lookup",
375
+ .op = OP_LOOKUP,
376
+ .fill_on_init = true,
377
+ },
378
+ {
379
+ .name = "Insert",
380
+ .op = OP_INSERT,
381
+ .fill_on_init = false,
382
+ },
383
+ {
384
+ .name = "Remove",
385
+ .op = OP_REMOVE,
386
+ .fill_on_init = true,
387
+ },
388
+ {
389
+ .name = "RemoveAll",
390
+ .op = OP_REMOVE_ALL,
391
+ .fill_on_init = true,
392
+ },
393
+ {
394
+ .name = "Traverse",
395
+ .op = OP_TRAVERSE,
396
+ .fill_on_init = true,
397
+ },
398
+};
399
+
400
+static const struct tree_implementation impls[] = {
401
+ {
402
+ .name = "GTree",
403
+ .type = IMPL_GTREE,
404
+ },
405
+ {
406
+ .name = "QTree",
407
+ .type = IMPL_QTREE,
408
+ },
409
+};
410
+
411
+static int compare_func(const void *ap, const void *bp)
412
+{
413
+ const size_t *a = ap;
414
+ const size_t *b = bp;
415
+
416
+ return *a - *b;
417
+}
418
+
419
+static void init_empty_tree_and_keys(enum impl_type impl,
420
+ void **ret_tree, size_t **ret_keys,
421
+ size_t n_elems)
422
+{
423
+ size_t *keys = g_malloc_n(n_elems, sizeof(*keys));
424
+ for (size_t i = 0; i < n_elems; i++) {
425
+ keys[i] = i;
426
+ }
427
+
428
+ void *tree;
429
+ switch (impl) {
430
+ case IMPL_GTREE:
431
+ tree = g_tree_new(compare_func);
432
+ break;
433
+ case IMPL_QTREE:
434
+ tree = q_tree_new(compare_func);
435
+ break;
436
+ default:
152
+ default:
437
+ g_assert_not_reached();
153
+ g_assert_not_reached();
438
+ }
154
+ }
439
+
155
+
440
+ *ret_tree = tree;
156
+ return ret | flags;
441
+ *ret_keys = keys;
442
+}
157
+}
443
+
158
+
444
+static gboolean traverse_func(gpointer key, gpointer value, gpointer data)
159
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
160
+ TCGReg arg1, tcg_target_long arg2, bool c2)
445
+{
161
+{
446
+ return FALSE;
162
+ int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
447
+}
163
+
448
+
164
+ if (tmpflags != ret) {
449
+static inline void remove_all(void *tree, enum impl_type impl)
165
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
450
+{
166
+
451
+ switch (impl) {
167
+ switch (tmpflags & SETCOND_FLAGS) {
452
+ case IMPL_GTREE:
168
+ case SETCOND_INV:
453
+ g_tree_destroy(tree);
169
+ /* Intermediate result is boolean: simply invert. */
454
+ break;
170
+ tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
455
+ case IMPL_QTREE:
456
+ q_tree_destroy(tree);
457
+ break;
458
+ default:
459
+ g_assert_not_reached();
460
+ }
461
+}
462
+
463
+static int64_t run_benchmark(const struct benchmark *bench,
464
+ enum impl_type impl,
465
+ size_t n_elems)
466
+{
467
+ void *tree;
468
+ size_t *keys;
469
+
470
+ init_empty_tree_and_keys(impl, &tree, &keys, n_elems);
471
+ if (bench->fill_on_init) {
472
+ for (size_t i = 0; i < n_elems; i++) {
473
+ switch (impl) {
474
+ case IMPL_GTREE:
475
+ g_tree_insert(tree, &keys[i], &keys[i]);
476
+ break;
477
+ case IMPL_QTREE:
478
+ q_tree_insert(tree, &keys[i], &keys[i]);
479
+ break;
480
+ default:
481
+ g_assert_not_reached();
482
+ }
483
+ }
484
+ }
485
+
486
+ int64_t start_ns = get_clock();
487
+ switch (bench->op) {
488
+ case OP_LOOKUP:
489
+ for (size_t i = 0; i < n_elems; i++) {
490
+ void *value;
491
+ switch (impl) {
492
+ case IMPL_GTREE:
493
+ value = g_tree_lookup(tree, &keys[i]);
494
+ break;
495
+ case IMPL_QTREE:
496
+ value = q_tree_lookup(tree, &keys[i]);
497
+ break;
498
+ default:
499
+ g_assert_not_reached();
500
+ }
501
+ (void)value;
502
+ }
503
+ break;
504
+ case OP_INSERT:
505
+ for (size_t i = 0; i < n_elems; i++) {
506
+ switch (impl) {
507
+ case IMPL_GTREE:
508
+ g_tree_insert(tree, &keys[i], &keys[i]);
509
+ break;
510
+ case IMPL_QTREE:
511
+ q_tree_insert(tree, &keys[i], &keys[i]);
512
+ break;
513
+ default:
514
+ g_assert_not_reached();
515
+ }
516
+ }
517
+ break;
518
+ case OP_REMOVE:
519
+ for (size_t i = 0; i < n_elems; i++) {
520
+ switch (impl) {
521
+ case IMPL_GTREE:
522
+ g_tree_remove(tree, &keys[i]);
523
+ break;
524
+ case IMPL_QTREE:
525
+ q_tree_remove(tree, &keys[i]);
526
+ break;
527
+ default:
528
+ g_assert_not_reached();
529
+ }
530
+ }
531
+ break;
532
+ case OP_REMOVE_ALL:
533
+ remove_all(tree, impl);
534
+ break;
535
+ case OP_TRAVERSE:
536
+ switch (impl) {
537
+ case IMPL_GTREE:
538
+ g_tree_foreach(tree, traverse_func, NULL);
539
+ break;
171
+ break;
540
+ case IMPL_QTREE:
172
+ case SETCOND_NEZ:
541
+ q_tree_foreach(tree, traverse_func, NULL);
173
+ /* Intermediate result is zero/non-zero: test != 0. */
174
+ tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
175
+ break;
176
+ case SETCOND_NEZ | SETCOND_INV:
177
+ /* Intermediate result is zero/non-zero: test == 0. */
178
+ tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
542
+ break;
179
+ break;
543
+ default:
180
+ default:
544
+ g_assert_not_reached();
181
+ g_assert_not_reached();
545
+ }
182
+ }
546
+ break;
183
+ }
547
+ default:
184
}
548
+ g_assert_not_reached();
185
549
+ }
186
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
550
+ int64_t ns = get_clock() - start_ns;
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
551
+
188
552
+ if (bench->op != OP_REMOVE_ALL) {
189
case INDEX_op_setcond_i32:
553
+ remove_all(tree, impl);
190
case INDEX_op_setcond_i64:
554
+ }
191
- tcg_out_setcond(s, args[3], a0, a1, a2);
555
+ g_free(keys);
192
+ tcg_out_setcond(s, args[3], a0, a1, a2, c2);
556
+
193
break;
557
+ return ns;
194
558
+}
195
case INDEX_op_qemu_ld_a32_i32:
559
+
196
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
560
+int main(int argc, char *argv[])
197
case INDEX_op_and_i64:
561
+{
198
case INDEX_op_or_i64:
562
+ size_t sizes[] = {
199
case INDEX_op_xor_i64:
563
+ 32,
200
+ case INDEX_op_setcond_i32:
564
+ 1024,
201
+ case INDEX_op_setcond_i64:
565
+ 1024 * 4,
202
return C_O1_I2(r, r, rI);
566
+ 1024 * 128,
203
567
+ 1024 * 1024,
204
case INDEX_op_andc_i32:
568
+ };
205
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
569
+
206
case INDEX_op_divu_i32:
570
+ double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)];
207
case INDEX_op_rem_i32:
571
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
208
case INDEX_op_remu_i32:
572
+ size_t size = sizes[i];
209
- case INDEX_op_setcond_i32:
573
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
210
case INDEX_op_mul_i64:
574
+ const struct tree_implementation *impl = &impls[j];
211
case INDEX_op_mulsh_i64:
575
+ for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) {
212
case INDEX_op_muluh_i64:
576
+ const struct benchmark *bench = &benchmarks[k];
213
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
577
+
214
case INDEX_op_divu_i64:
578
+ /* warm-up run */
215
case INDEX_op_rem_i64:
579
+ run_benchmark(bench, impl->type, size);
216
case INDEX_op_remu_i64:
580
+
217
- case INDEX_op_setcond_i64:
581
+ int64_t total_ns = 0;
218
return C_O1_I2(r, rZ, rZ);
582
+ int64_t n_runs = 0;
219
583
+ while (total_ns < 2e8 || n_runs < 5) {
220
case INDEX_op_shl_i32:
584
+ total_ns += run_benchmark(bench, impl->type, size);
585
+ n_runs++;
586
+ }
587
+ double ns_per_run = (double)total_ns / n_runs;
588
+
589
+ /* Throughput, in Mops/s */
590
+ res[k][j][i] = size / ns_per_run * 1e3;
591
+ }
592
+ }
593
+ }
594
+
595
+ printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n");
596
+ printf("%5s %10s ", "Tree", "Op");
597
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
598
+ printf("%7zu ", sizes[i]);
599
+ }
600
+ printf("\n");
601
+ char separator[97];
602
+ for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) {
603
+ separator[i] = '-';
604
+ }
605
+ separator[ARRAY_SIZE(separator) - 1] = '\0';
606
+ printf("%s\n", separator);
607
+ for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) {
608
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
609
+ printf("%5s %10s ", impls[j].name, benchmarks[i].name);
610
+ for (int k = 0; k < ARRAY_SIZE(sizes); k++) {
611
+ printf("%7.2f ", res[i][j][k]);
612
+ if (j == 0) {
613
+ printf(" ");
614
+ } else {
615
+ if (res[i][0][k] != 0) {
616
+ double speedup = res[i][j][k] / res[i][0][k];
617
+ printf("(%4.2fx) ", speedup);
618
+ } else {
619
+ printf("( ) ");
620
+ }
621
+ }
622
+ }
623
+ printf("\n");
624
+ }
625
+ }
626
+ printf("%s\n", separator);
627
+ return 0;
628
+}
629
diff --git a/tests/unit/test-qtree.c b/tests/unit/test-qtree.c
630
new file mode 100644
631
index XXXXXXX..XXXXXXX
632
--- /dev/null
633
+++ b/tests/unit/test-qtree.c
634
@@ -XXX,XX +XXX,XX @@
635
+/*
636
+ * SPDX-License-Identifier: LGPL-2.1-or-later
637
+ *
638
+ * Tests for QTree.
639
+ * Original source: glib
640
+ * https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c
641
+ * LGPL license.
642
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
643
+ */
644
+
645
+#include "qemu/osdep.h"
646
+#include "qemu/qtree.h"
647
+
648
+static gint my_compare(gconstpointer a, gconstpointer b)
649
+{
650
+ const char *cha = a;
651
+ const char *chb = b;
652
+
653
+ return *cha - *chb;
654
+}
655
+
656
+static gint my_compare_with_data(gconstpointer a,
657
+ gconstpointer b,
658
+ gpointer user_data)
659
+{
660
+ const char *cha = a;
661
+ const char *chb = b;
662
+
663
+ /* just check that we got the right data */
664
+ g_assert(GPOINTER_TO_INT(user_data) == 123);
665
+
666
+ return *cha - *chb;
667
+}
668
+
669
+static gint my_search(gconstpointer a, gconstpointer b)
670
+{
671
+ return my_compare(b, a);
672
+}
673
+
674
+static gpointer destroyed_key;
675
+static gpointer destroyed_value;
676
+static guint destroyed_key_count;
677
+static guint destroyed_value_count;
678
+
679
+static void my_key_destroy(gpointer key)
680
+{
681
+ destroyed_key = key;
682
+ destroyed_key_count++;
683
+}
684
+
685
+static void my_value_destroy(gpointer value)
686
+{
687
+ destroyed_value = value;
688
+ destroyed_value_count++;
689
+}
690
+
691
+static gint my_traverse(gpointer key, gpointer value, gpointer data)
692
+{
693
+ char *ch = key;
694
+
695
+ g_assert((*ch) > 0);
696
+
697
+ if (*ch == 'd') {
698
+ return TRUE;
699
+ }
700
+
701
+ return FALSE;
702
+}
703
+
704
+char chars[] =
705
+ "0123456789"
706
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
707
+ "abcdefghijklmnopqrstuvwxyz";
708
+
709
+char chars2[] =
710
+ "0123456789"
711
+ "abcdefghijklmnopqrstuvwxyz";
712
+
713
+static gint check_order(gpointer key, gpointer value, gpointer data)
714
+{
715
+ char **p = data;
716
+ char *ch = key;
717
+
718
+ g_assert(**p == *ch);
719
+
720
+ (*p)++;
721
+
722
+ return FALSE;
723
+}
724
+
725
+static void test_tree_search(void)
726
+{
727
+ gint i;
728
+ QTree *tree;
729
+ gboolean removed;
730
+ gchar c;
731
+ gchar *p, *d;
732
+
733
+ tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123));
734
+
735
+ for (i = 0; chars[i]; i++) {
736
+ q_tree_insert(tree, &chars[i], &chars[i]);
737
+ }
738
+
739
+ q_tree_foreach(tree, my_traverse, NULL);
740
+
741
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
742
+ g_assert(q_tree_height(tree) == 6);
743
+
744
+ p = chars;
745
+ q_tree_foreach(tree, check_order, &p);
746
+
747
+ for (i = 0; i < 26; i++) {
748
+ removed = q_tree_remove(tree, &chars[i + 10]);
749
+ g_assert(removed);
750
+ }
751
+
752
+ c = '\0';
753
+ removed = q_tree_remove(tree, &c);
754
+ g_assert(!removed);
755
+
756
+ q_tree_foreach(tree, my_traverse, NULL);
757
+
758
+ g_assert(q_tree_nnodes(tree) == strlen(chars2));
759
+ g_assert(q_tree_height(tree) == 6);
760
+
761
+ p = chars2;
762
+ q_tree_foreach(tree, check_order, &p);
763
+
764
+ for (i = 25; i >= 0; i--) {
765
+ q_tree_insert(tree, &chars[i + 10], &chars[i + 10]);
766
+ }
767
+
768
+ p = chars;
769
+ q_tree_foreach(tree, check_order, &p);
770
+
771
+ c = '0';
772
+ p = q_tree_lookup(tree, &c);
773
+ g_assert(p && *p == c);
774
+ g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p));
775
+ g_assert(c == *d && c == *p);
776
+
777
+ c = 'A';
778
+ p = q_tree_lookup(tree, &c);
779
+ g_assert(p && *p == c);
780
+
781
+ c = 'a';
782
+ p = q_tree_lookup(tree, &c);
783
+ g_assert(p && *p == c);
784
+
785
+ c = 'z';
786
+ p = q_tree_lookup(tree, &c);
787
+ g_assert(p && *p == c);
788
+
789
+ c = '!';
790
+ p = q_tree_lookup(tree, &c);
791
+ g_assert(p == NULL);
792
+
793
+ c = '=';
794
+ p = q_tree_lookup(tree, &c);
795
+ g_assert(p == NULL);
796
+
797
+ c = '|';
798
+ p = q_tree_lookup(tree, &c);
799
+ g_assert(p == NULL);
800
+
801
+ c = '0';
802
+ p = q_tree_search(tree, my_search, &c);
803
+ g_assert(p && *p == c);
804
+
805
+ c = 'A';
806
+ p = q_tree_search(tree, my_search, &c);
807
+ g_assert(p && *p == c);
808
+
809
+ c = 'a';
810
+ p = q_tree_search(tree, my_search, &c);
811
+ g_assert(p && *p == c);
812
+
813
+ c = 'z';
814
+ p = q_tree_search(tree, my_search, &c);
815
+ g_assert(p && *p == c);
816
+
817
+ c = '!';
818
+ p = q_tree_search(tree, my_search, &c);
819
+ g_assert(p == NULL);
820
+
821
+ c = '=';
822
+ p = q_tree_search(tree, my_search, &c);
823
+ g_assert(p == NULL);
824
+
825
+ c = '|';
826
+ p = q_tree_search(tree, my_search, &c);
827
+ g_assert(p == NULL);
828
+
829
+ q_tree_destroy(tree);
830
+}
831
+
832
+static void test_tree_remove(void)
833
+{
834
+ QTree *tree;
835
+ char c, d;
836
+ gint i;
837
+ gboolean removed;
838
+
839
+ tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL,
840
+ my_key_destroy,
841
+ my_value_destroy);
842
+
843
+ for (i = 0; chars[i]; i++) {
844
+ q_tree_insert(tree, &chars[i], &chars[i]);
845
+ }
846
+
847
+ c = '0';
848
+ q_tree_insert(tree, &c, &c);
849
+ g_assert(destroyed_key == &c);
850
+ g_assert(destroyed_value == &chars[0]);
851
+ destroyed_key = NULL;
852
+ destroyed_value = NULL;
853
+
854
+ d = '1';
855
+ q_tree_replace(tree, &d, &d);
856
+ g_assert(destroyed_key == &chars[1]);
857
+ g_assert(destroyed_value == &chars[1]);
858
+ destroyed_key = NULL;
859
+ destroyed_value = NULL;
860
+
861
+ c = '2';
862
+ removed = q_tree_remove(tree, &c);
863
+ g_assert(removed);
864
+ g_assert(destroyed_key == &chars[2]);
865
+ g_assert(destroyed_value == &chars[2]);
866
+ destroyed_key = NULL;
867
+ destroyed_value = NULL;
868
+
869
+ c = '3';
870
+ removed = q_tree_steal(tree, &c);
871
+ g_assert(removed);
872
+ g_assert(destroyed_key == NULL);
873
+ g_assert(destroyed_value == NULL);
874
+
875
+ const gchar *remove = "omkjigfedba";
876
+ for (i = 0; remove[i]; i++) {
877
+ removed = q_tree_remove(tree, &remove[i]);
878
+ g_assert(removed);
879
+ }
880
+
881
+ q_tree_destroy(tree);
882
+}
883
+
884
+static void test_tree_destroy(void)
885
+{
886
+ QTree *tree;
887
+ gint i;
888
+
889
+ tree = q_tree_new(my_compare);
890
+
891
+ for (i = 0; chars[i]; i++) {
892
+ q_tree_insert(tree, &chars[i], &chars[i]);
893
+ }
894
+
895
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
896
+
897
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
898
+ q_tree_ref(tree);
899
+ q_tree_destroy(tree);
900
+
901
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
902
+ g_assert(q_tree_nnodes(tree) == 0);
903
+
904
+ q_tree_unref(tree);
905
+}
906
+
907
+static void test_tree_insert(void)
908
+{
909
+ QTree *tree;
910
+ gchar *p;
911
+ gint i;
912
+ gchar *scrambled;
913
+
914
+ tree = q_tree_new(my_compare);
915
+
916
+ for (i = 0; chars[i]; i++) {
917
+ q_tree_insert(tree, &chars[i], &chars[i]);
918
+ }
919
+ p = chars;
920
+ q_tree_foreach(tree, check_order, &p);
921
+
922
+ q_tree_unref(tree);
923
+ tree = q_tree_new(my_compare);
924
+
925
+ for (i = strlen(chars) - 1; i >= 0; i--) {
926
+ q_tree_insert(tree, &chars[i], &chars[i]);
927
+ }
928
+ p = chars;
929
+ q_tree_foreach(tree, check_order, &p);
930
+
931
+ q_tree_unref(tree);
932
+ tree = q_tree_new(my_compare);
933
+
934
+ scrambled = g_strdup(chars);
935
+
936
+ for (i = 0; i < 30; i++) {
937
+ gchar tmp;
938
+ gint a, b;
939
+
940
+ a = g_random_int_range(0, strlen(scrambled));
941
+ b = g_random_int_range(0, strlen(scrambled));
942
+ tmp = scrambled[a];
943
+ scrambled[a] = scrambled[b];
944
+ scrambled[b] = tmp;
945
+ }
946
+
947
+ for (i = 0; scrambled[i]; i++) {
948
+ q_tree_insert(tree, &scrambled[i], &scrambled[i]);
949
+ }
950
+ p = chars;
951
+ q_tree_foreach(tree, check_order, &p);
952
+
953
+ g_free(scrambled);
954
+ q_tree_unref(tree);
955
+}
956
+
957
+int main(int argc, char *argv[])
958
+{
959
+ g_test_init(&argc, &argv, NULL);
960
+
961
+ g_test_add_func("/qtree/search", test_tree_search);
962
+ g_test_add_func("/qtree/remove", test_tree_remove);
963
+ g_test_add_func("/qtree/destroy", test_tree_destroy);
964
+ g_test_add_func("/qtree/insert", test_tree_insert);
965
+
966
+ return g_test_run();
967
+}
968
diff --git a/util/qtree.c b/util/qtree.c
969
new file mode 100644
970
index XXXXXXX..XXXXXXX
971
--- /dev/null
972
+++ b/util/qtree.c
973
@@ -XXX,XX +XXX,XX @@
974
+/*
975
+ * GLIB - Library of useful routines for C programming
976
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
977
+ *
978
+ * SPDX-License-Identifier: LGPL-2.1-or-later
979
+ *
980
+ * This library is free software; you can redistribute it and/or
981
+ * modify it under the terms of the GNU Lesser General Public
982
+ * License as published by the Free Software Foundation; either
983
+ * version 2.1 of the License, or (at your option) any later version.
984
+ *
985
+ * This library is distributed in the hope that it will be useful,
986
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
987
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
988
+ * Lesser General Public License for more details.
989
+ *
990
+ * You should have received a copy of the GNU Lesser General Public
991
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
992
+ */
993
+
994
+/*
995
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
996
+ * file for a list of people on the GLib Team. See the ChangeLog
997
+ * files for a list of changes. These files are distributed with
998
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
999
+ */
1000
+
1001
+/*
1002
+ * MT safe
1003
+ */
1004
+
1005
+#include "qemu/osdep.h"
1006
+#include "qemu/qtree.h"
1007
+
1008
+/**
1009
+ * SECTION:trees-binary
1010
+ * @title: Balanced Binary Trees
1011
+ * @short_description: a sorted collection of key/value pairs optimized
1012
+ * for searching and traversing in order
1013
+ *
1014
+ * The #QTree structure and its associated functions provide a sorted
1015
+ * collection of key/value pairs optimized for searching and traversing
1016
+ * in order. This means that most of the operations (access, search,
1017
+ * insertion, deletion, ...) on #QTree are O(log(n)) in average and O(n)
1018
+ * in worst case for time complexity. But, note that maintaining a
1019
+ * balanced sorted #QTree of n elements is done in time O(n log(n)).
1020
+ *
1021
+ * To create a new #QTree use q_tree_new().
1022
+ *
1023
+ * To insert a key/value pair into a #QTree use q_tree_insert()
1024
+ * (O(n log(n))).
1025
+ *
1026
+ * To remove a key/value pair use q_tree_remove() (O(n log(n))).
1027
+ *
1028
+ * To look up the value corresponding to a given key, use
1029
+ * q_tree_lookup() and q_tree_lookup_extended().
1030
+ *
1031
+ * To find out the number of nodes in a #QTree, use q_tree_nnodes(). To
1032
+ * get the height of a #QTree, use q_tree_height().
1033
+ *
1034
+ * To traverse a #QTree, calling a function for each node visited in
1035
+ * the traversal, use q_tree_foreach().
1036
+ *
1037
+ * To destroy a #QTree, use q_tree_destroy().
1038
+ **/
1039
+
1040
+#define MAX_GTREE_HEIGHT 40
1041
+
1042
+/**
1043
+ * QTree:
1044
+ *
1045
+ * The QTree struct is an opaque data structure representing a
1046
+ * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be
1047
+ * accessed only by using the following functions.
1048
+ */
1049
+struct _QTree {
1050
+ QTreeNode *root;
1051
+ GCompareDataFunc key_compare;
1052
+ GDestroyNotify key_destroy_func;
1053
+ GDestroyNotify value_destroy_func;
1054
+ gpointer key_compare_data;
1055
+ guint nnodes;
1056
+ gint ref_count;
1057
+};
1058
+
1059
+struct _QTreeNode {
1060
+ gpointer key; /* key for this node */
1061
+ gpointer value; /* value stored at this node */
1062
+ QTreeNode *left; /* left subtree */
1063
+ QTreeNode *right; /* right subtree */
1064
+ gint8 balance; /* height (right) - height (left) */
1065
+ guint8 left_child;
1066
+ guint8 right_child;
1067
+};
1068
+
1069
+
1070
+static QTreeNode *q_tree_node_new(gpointer key,
1071
+ gpointer value);
1072
+static QTreeNode *q_tree_insert_internal(QTree *tree,
1073
+ gpointer key,
1074
+ gpointer value,
1075
+ gboolean replace);
1076
+static gboolean q_tree_remove_internal(QTree *tree,
1077
+ gconstpointer key,
1078
+ gboolean steal);
1079
+static QTreeNode *q_tree_node_balance(QTreeNode *node);
1080
+static QTreeNode *q_tree_find_node(QTree *tree,
1081
+ gconstpointer key);
1082
+static QTreeNode *q_tree_node_search(QTreeNode *node,
1083
+ GCompareFunc search_func,
1084
+ gconstpointer data);
1085
+static QTreeNode *q_tree_node_rotate_left(QTreeNode *node);
1086
+static QTreeNode *q_tree_node_rotate_right(QTreeNode *node);
1087
+#ifdef Q_TREE_DEBUG
1088
+static void q_tree_node_check(QTreeNode *node);
1089
+#endif
1090
+
1091
+static QTreeNode*
1092
+q_tree_node_new(gpointer key,
1093
+ gpointer value)
1094
+{
1095
+ QTreeNode *node = g_new(QTreeNode, 1);
1096
+
1097
+ node->balance = 0;
1098
+ node->left = NULL;
1099
+ node->right = NULL;
1100
+ node->left_child = FALSE;
1101
+ node->right_child = FALSE;
1102
+ node->key = key;
1103
+ node->value = value;
1104
+
1105
+ return node;
1106
+}
1107
+
1108
+/**
1109
+ * q_tree_new:
1110
+ * @key_compare_func: the function used to order the nodes in the #QTree.
1111
+ * It should return values similar to the standard strcmp() function -
1112
+ * 0 if the two arguments are equal, a negative value if the first argument
1113
+ * comes before the second, or a positive value if the first argument comes
1114
+ * after the second.
1115
+ *
1116
+ * Creates a new #QTree.
1117
+ *
1118
+ * Returns: a newly allocated #QTree
1119
+ */
1120
+QTree *
1121
+q_tree_new(GCompareFunc key_compare_func)
1122
+{
1123
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1124
+
1125
+ return q_tree_new_full((GCompareDataFunc) key_compare_func, NULL,
1126
+ NULL, NULL);
1127
+}
1128
+
1129
+/**
1130
+ * q_tree_new_with_data:
1131
+ * @key_compare_func: qsort()-style comparison function
1132
+ * @key_compare_data: data to pass to comparison function
1133
+ *
1134
+ * Creates a new #QTree with a comparison function that accepts user data.
1135
+ * See q_tree_new() for more details.
1136
+ *
1137
+ * Returns: a newly allocated #QTree
1138
+ */
1139
+QTree *
1140
+q_tree_new_with_data(GCompareDataFunc key_compare_func,
1141
+ gpointer key_compare_data)
1142
+{
1143
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1144
+
1145
+ return q_tree_new_full(key_compare_func, key_compare_data,
1146
+ NULL, NULL);
1147
+}
1148
+
1149
+/**
1150
+ * q_tree_new_full:
1151
+ * @key_compare_func: qsort()-style comparison function
1152
+ * @key_compare_data: data to pass to comparison function
1153
+ * @key_destroy_func: a function to free the memory allocated for the key
1154
+ * used when removing the entry from the #QTree or %NULL if you don't
1155
+ * want to supply such a function
1156
+ * @value_destroy_func: a function to free the memory allocated for the
1157
+ * value used when removing the entry from the #QTree or %NULL if you
1158
+ * don't want to supply such a function
1159
+ *
1160
+ * Creates a new #QTree like q_tree_new() and allows to specify functions
1161
+ * to free the memory allocated for the key and value that get called when
1162
+ * removing the entry from the #QTree.
1163
+ *
1164
+ * Returns: a newly allocated #QTree
1165
+ */
1166
+QTree *
1167
+q_tree_new_full(GCompareDataFunc key_compare_func,
1168
+ gpointer key_compare_data,
1169
+ GDestroyNotify key_destroy_func,
1170
+ GDestroyNotify value_destroy_func)
1171
+{
1172
+ QTree *tree;
1173
+
1174
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1175
+
1176
+ tree = g_new(QTree, 1);
1177
+ tree->root = NULL;
1178
+ tree->key_compare = key_compare_func;
1179
+ tree->key_destroy_func = key_destroy_func;
1180
+ tree->value_destroy_func = value_destroy_func;
1181
+ tree->key_compare_data = key_compare_data;
1182
+ tree->nnodes = 0;
1183
+ tree->ref_count = 1;
1184
+
1185
+ return tree;
1186
+}
1187
+
1188
+/**
1189
+ * q_tree_node_first:
1190
+ * @tree: a #QTree
1191
+ *
1192
+ * Returns the first in-order node of the tree, or %NULL
1193
+ * for an empty tree.
1194
+ *
1195
+ * Returns: (nullable) (transfer none): the first node in the tree
1196
+ *
1197
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1198
+ */
1199
+static QTreeNode *
1200
+q_tree_node_first(QTree *tree)
1201
+{
1202
+ QTreeNode *tmp;
1203
+
1204
+ g_return_val_if_fail(tree != NULL, NULL);
1205
+
1206
+ if (!tree->root) {
1207
+ return NULL;
1208
+ }
1209
+
1210
+ tmp = tree->root;
1211
+
1212
+ while (tmp->left_child) {
1213
+ tmp = tmp->left;
1214
+ }
1215
+
1216
+ return tmp;
1217
+}
1218
+
1219
+/**
1220
+ * q_tree_node_previous
1221
+ * @node: a #QTree node
1222
+ *
1223
+ * Returns the previous in-order node of the tree, or %NULL
1224
+ * if the passed node was already the first one.
1225
+ *
1226
+ * Returns: (nullable) (transfer none): the previous node in the tree
1227
+ *
1228
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1229
+ */
1230
+static QTreeNode *
1231
+q_tree_node_previous(QTreeNode *node)
1232
+{
1233
+ QTreeNode *tmp;
1234
+
1235
+ g_return_val_if_fail(node != NULL, NULL);
1236
+
1237
+ tmp = node->left;
1238
+
1239
+ if (node->left_child) {
1240
+ while (tmp->right_child) {
1241
+ tmp = tmp->right;
1242
+ }
1243
+ }
1244
+
1245
+ return tmp;
1246
+}
1247
+
1248
+/**
1249
+ * q_tree_node_next
1250
+ * @node: a #QTree node
1251
+ *
1252
+ * Returns the next in-order node of the tree, or %NULL
1253
+ * if the passed node was already the last one.
1254
+ *
1255
+ * Returns: (nullable) (transfer none): the next node in the tree
1256
+ *
1257
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1258
+ */
1259
+static QTreeNode *
1260
+q_tree_node_next(QTreeNode *node)
1261
+{
1262
+ QTreeNode *tmp;
1263
+
1264
+ g_return_val_if_fail(node != NULL, NULL);
1265
+
1266
+ tmp = node->right;
1267
+
1268
+ if (node->right_child) {
1269
+ while (tmp->left_child) {
1270
+ tmp = tmp->left;
1271
+ }
1272
+ }
1273
+
1274
+ return tmp;
1275
+}
1276
+
1277
+/**
1278
+ * q_tree_remove_all:
1279
+ * @tree: a #QTree
1280
+ *
1281
+ * Removes all nodes from a #QTree and destroys their keys and values,
1282
+ * then resets the #QTree’s root to %NULL.
1283
+ *
1284
+ * Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
1285
+ */
1286
+static void
1287
+q_tree_remove_all(QTree *tree)
1288
+{
1289
+ QTreeNode *node;
1290
+ QTreeNode *next;
1291
+
1292
+ g_return_if_fail(tree != NULL);
1293
+
1294
+ node = q_tree_node_first(tree);
1295
+
1296
+ while (node) {
1297
+ next = q_tree_node_next(node);
1298
+
1299
+ if (tree->key_destroy_func) {
1300
+ tree->key_destroy_func(node->key);
1301
+ }
1302
+ if (tree->value_destroy_func) {
1303
+ tree->value_destroy_func(node->value);
1304
+ }
1305
+ g_free(node);
1306
+
1307
+#ifdef Q_TREE_DEBUG
1308
+ g_assert(tree->nnodes > 0);
1309
+ tree->nnodes--;
1310
+#endif
1311
+
1312
+ node = next;
1313
+ }
1314
+
1315
+#ifdef Q_TREE_DEBUG
1316
+ g_assert(tree->nnodes == 0);
1317
+#endif
1318
+
1319
+ tree->root = NULL;
1320
+#ifndef Q_TREE_DEBUG
1321
+ tree->nnodes = 0;
1322
+#endif
1323
+}
1324
+
1325
+/**
1326
+ * q_tree_ref:
1327
+ * @tree: a #QTree
1328
+ *
1329
+ * Increments the reference count of @tree by one.
1330
+ *
1331
+ * It is safe to call this function from any thread.
1332
+ *
1333
+ * Returns: the passed in #QTree
1334
+ *
1335
+ * Since: 2.22
1336
+ */
1337
+QTree *
1338
+q_tree_ref(QTree *tree)
1339
+{
1340
+ g_return_val_if_fail(tree != NULL, NULL);
1341
+
1342
+ g_atomic_int_inc(&tree->ref_count);
1343
+
1344
+ return tree;
1345
+}
1346
+
1347
+/**
1348
+ * q_tree_unref:
1349
+ * @tree: a #QTree
1350
+ *
1351
+ * Decrements the reference count of @tree by one.
1352
+ * If the reference count drops to 0, all keys and values will
1353
+ * be destroyed (if destroy functions were specified) and all
1354
+ * memory allocated by @tree will be released.
1355
+ *
1356
+ * It is safe to call this function from any thread.
1357
+ *
1358
+ * Since: 2.22
1359
+ */
1360
+void
1361
+q_tree_unref(QTree *tree)
1362
+{
1363
+ g_return_if_fail(tree != NULL);
1364
+
1365
+ if (g_atomic_int_dec_and_test(&tree->ref_count)) {
1366
+ q_tree_remove_all(tree);
1367
+ g_free(tree);
1368
+ }
1369
+}
1370
+
1371
+/**
1372
+ * q_tree_destroy:
1373
+ * @tree: a #QTree
1374
+ *
1375
+ * Removes all keys and values from the #QTree and decreases its
1376
+ * reference count by one. If keys and/or values are dynamically
1377
+ * allocated, you should either free them first or create the #QTree
1378
+ * using q_tree_new_full(). In the latter case the destroy functions
1379
+ * you supplied will be called on all keys and values before destroying
1380
+ * the #QTree.
1381
+ */
1382
+void
1383
+q_tree_destroy(QTree *tree)
1384
+{
1385
+ g_return_if_fail(tree != NULL);
1386
+
1387
+ q_tree_remove_all(tree);
1388
+ q_tree_unref(tree);
1389
+}
1390
+
1391
+/**
1392
+ * q_tree_insert_node:
1393
+ * @tree: a #QTree
1394
+ * @key: the key to insert
1395
+ * @value: the value corresponding to the key
1396
+ *
1397
+ * Inserts a key/value pair into a #QTree.
1398
+ *
1399
+ * If the given key already exists in the #QTree its corresponding value
1400
+ * is set to the new value. If you supplied a @value_destroy_func when
1401
+ * creating the #QTree, the old value is freed using that function. If
1402
+ * you supplied a @key_destroy_func when creating the #QTree, the passed
1403
+ * key is freed using that function.
1404
+ *
1405
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1406
+ * so that the distance from the root to every leaf is as small as possible.
1407
+ * The cost of maintaining a balanced tree while inserting new key/value
1408
+ * result in a O(n log(n)) operation where most of the other operations
1409
+ * are O(log(n)).
1410
+ *
1411
+ * Returns: (transfer none): the inserted (or set) node.
1412
+ *
1413
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1414
+ */
1415
+static QTreeNode *
1416
+q_tree_insert_node(QTree *tree,
1417
+ gpointer key,
1418
+ gpointer value)
1419
+{
1420
+ QTreeNode *node;
1421
+
1422
+ g_return_val_if_fail(tree != NULL, NULL);
1423
+
1424
+ node = q_tree_insert_internal(tree, key, value, FALSE);
1425
+
1426
+#ifdef Q_TREE_DEBUG
1427
+ q_tree_node_check(tree->root);
1428
+#endif
1429
+
1430
+ return node;
1431
+}
1432
+
1433
+/**
1434
+ * q_tree_insert:
1435
+ * @tree: a #QTree
1436
+ * @key: the key to insert
1437
+ * @value: the value corresponding to the key
1438
+ *
1439
+ * Inserts a key/value pair into a #QTree.
1440
+ *
1441
+ * Inserts a new key and value into a #QTree as q_tree_insert_node() does,
1442
+ * only this function does not return the inserted or set node.
1443
+ */
1444
+void
1445
+q_tree_insert(QTree *tree,
1446
+ gpointer key,
1447
+ gpointer value)
1448
+{
1449
+ q_tree_insert_node(tree, key, value);
1450
+}
1451
+
1452
+/**
1453
+ * q_tree_replace_node:
1454
+ * @tree: a #QTree
1455
+ * @key: the key to insert
1456
+ * @value: the value corresponding to the key
1457
+ *
1458
+ * Inserts a new key and value into a #QTree similar to q_tree_insert_node().
1459
+ * The difference is that if the key already exists in the #QTree, it gets
1460
+ * replaced by the new key. If you supplied a @value_destroy_func when
1461
+ * creating the #QTree, the old value is freed using that function. If you
1462
+ * supplied a @key_destroy_func when creating the #QTree, the old key is
1463
+ * freed using that function.
1464
+ *
1465
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1466
+ * so that the distance from the root to every leaf is as small as possible.
1467
+ *
1468
+ * Returns: (transfer none): the inserted (or set) node.
1469
+ *
1470
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1471
+ */
1472
+static QTreeNode *
1473
+q_tree_replace_node(QTree *tree,
1474
+ gpointer key,
1475
+ gpointer value)
1476
+{
1477
+ QTreeNode *node;
1478
+
1479
+ g_return_val_if_fail(tree != NULL, NULL);
1480
+
1481
+ node = q_tree_insert_internal(tree, key, value, TRUE);
1482
+
1483
+#ifdef Q_TREE_DEBUG
1484
+ q_tree_node_check(tree->root);
1485
+#endif
1486
+
1487
+ return node;
1488
+}
1489
+
1490
+/**
1491
+ * q_tree_replace:
1492
+ * @tree: a #QTree
1493
+ * @key: the key to insert
1494
+ * @value: the value corresponding to the key
1495
+ *
1496
+ * Inserts a new key and value into a #QTree as q_tree_replace_node() does,
1497
+ * only this function does not return the inserted or set node.
1498
+ */
1499
+void
1500
+q_tree_replace(QTree *tree,
1501
+ gpointer key,
1502
+ gpointer value)
1503
+{
1504
+ q_tree_replace_node(tree, key, value);
1505
+}
1506
+
1507
+/* internal insert routine */
1508
+static QTreeNode *
1509
+q_tree_insert_internal(QTree *tree,
1510
+ gpointer key,
1511
+ gpointer value,
1512
+ gboolean replace)
1513
+{
1514
+ QTreeNode *node, *retnode;
1515
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1516
+ int idx;
1517
+
1518
+ g_return_val_if_fail(tree != NULL, NULL);
1519
+
1520
+ if (!tree->root) {
1521
+ tree->root = q_tree_node_new(key, value);
1522
+ tree->nnodes++;
1523
+ return tree->root;
1524
+ }
1525
+
1526
+ idx = 0;
1527
+ path[idx++] = NULL;
1528
+ node = tree->root;
1529
+
1530
+ while (1) {
1531
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1532
+
1533
+ if (cmp == 0) {
1534
+ if (tree->value_destroy_func) {
1535
+ tree->value_destroy_func(node->value);
1536
+ }
1537
+
1538
+ node->value = value;
1539
+
1540
+ if (replace) {
1541
+ if (tree->key_destroy_func) {
1542
+ tree->key_destroy_func(node->key);
1543
+ }
1544
+
1545
+ node->key = key;
1546
+ } else {
1547
+ /* free the passed key */
1548
+ if (tree->key_destroy_func) {
1549
+ tree->key_destroy_func(key);
1550
+ }
1551
+ }
1552
+
1553
+ return node;
1554
+ } else if (cmp < 0) {
1555
+ if (node->left_child) {
1556
+ path[idx++] = node;
1557
+ node = node->left;
1558
+ } else {
1559
+ QTreeNode *child = q_tree_node_new(key, value);
1560
+
1561
+ child->left = node->left;
1562
+ child->right = node;
1563
+ node->left = child;
1564
+ node->left_child = TRUE;
1565
+ node->balance -= 1;
1566
+
1567
+ tree->nnodes++;
1568
+
1569
+ retnode = child;
1570
+ break;
1571
+ }
1572
+ } else {
1573
+ if (node->right_child) {
1574
+ path[idx++] = node;
1575
+ node = node->right;
1576
+ } else {
1577
+ QTreeNode *child = q_tree_node_new(key, value);
1578
+
1579
+ child->right = node->right;
1580
+ child->left = node;
1581
+ node->right = child;
1582
+ node->right_child = TRUE;
1583
+ node->balance += 1;
1584
+
1585
+ tree->nnodes++;
1586
+
1587
+ retnode = child;
1588
+ break;
1589
+ }
1590
+ }
1591
+ }
1592
+
1593
+ /*
1594
+ * Restore balance. This is the goodness of a non-recursive
1595
+ * implementation, when we are done with balancing we 'break'
1596
+ * the loop and we are done.
1597
+ */
1598
+ while (1) {
1599
+ QTreeNode *bparent = path[--idx];
1600
+ gboolean left_node = (bparent && node == bparent->left);
1601
+ g_assert(!bparent || bparent->left == node || bparent->right == node);
1602
+
1603
+ if (node->balance < -1 || node->balance > 1) {
1604
+ node = q_tree_node_balance(node);
1605
+ if (bparent == NULL) {
1606
+ tree->root = node;
1607
+ } else if (left_node) {
1608
+ bparent->left = node;
1609
+ } else {
1610
+ bparent->right = node;
1611
+ }
1612
+ }
1613
+
1614
+ if (node->balance == 0 || bparent == NULL) {
1615
+ break;
1616
+ }
1617
+
1618
+ if (left_node) {
1619
+ bparent->balance -= 1;
1620
+ } else {
1621
+ bparent->balance += 1;
1622
+ }
1623
+
1624
+ node = bparent;
1625
+ }
1626
+
1627
+ return retnode;
1628
+}
1629
+
1630
+/**
1631
+ * q_tree_remove:
1632
+ * @tree: a #QTree
1633
+ * @key: the key to remove
1634
+ *
1635
+ * Removes a key/value pair from a #QTree.
1636
+ *
1637
+ * If the #QTree was created using q_tree_new_full(), the key and value
1638
+ * are freed using the supplied destroy functions, otherwise you have to
1639
+ * make sure that any dynamically allocated values are freed yourself.
1640
+ * If the key does not exist in the #QTree, the function does nothing.
1641
+ *
1642
+ * The cost of maintaining a balanced tree while removing a key/value
1643
+ * result in a O(n log(n)) operation where most of the other operations
1644
+ * are O(log(n)).
1645
+ *
1646
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1647
+ * returned nothing)
1648
+ */
1649
+gboolean
1650
+q_tree_remove(QTree *tree,
1651
+ gconstpointer key)
1652
+{
1653
+ gboolean removed;
1654
+
1655
+ g_return_val_if_fail(tree != NULL, FALSE);
1656
+
1657
+ removed = q_tree_remove_internal(tree, key, FALSE);
1658
+
1659
+#ifdef Q_TREE_DEBUG
1660
+ q_tree_node_check(tree->root);
1661
+#endif
1662
+
1663
+ return removed;
1664
+}
1665
+
1666
+/**
1667
+ * q_tree_steal:
1668
+ * @tree: a #QTree
1669
+ * @key: the key to remove
1670
+ *
1671
+ * Removes a key and its associated value from a #QTree without calling
1672
+ * the key and value destroy functions.
1673
+ *
1674
+ * If the key does not exist in the #QTree, the function does nothing.
1675
+ *
1676
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1677
+ * returned nothing)
1678
+ */
1679
+gboolean
1680
+q_tree_steal(QTree *tree,
1681
+ gconstpointer key)
1682
+{
1683
+ gboolean removed;
1684
+
1685
+ g_return_val_if_fail(tree != NULL, FALSE);
1686
+
1687
+ removed = q_tree_remove_internal(tree, key, TRUE);
1688
+
1689
+#ifdef Q_TREE_DEBUG
1690
+ q_tree_node_check(tree->root);
1691
+#endif
1692
+
1693
+ return removed;
1694
+}
1695
+
1696
+/* internal remove routine */
1697
+static gboolean
1698
+q_tree_remove_internal(QTree *tree,
1699
+ gconstpointer key,
1700
+ gboolean steal)
1701
+{
1702
+ QTreeNode *node, *parent, *balance;
1703
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1704
+ int idx;
1705
+ gboolean left_node;
1706
+
1707
+ g_return_val_if_fail(tree != NULL, FALSE);
1708
+
1709
+ if (!tree->root) {
1710
+ return FALSE;
1711
+ }
1712
+
1713
+ idx = 0;
1714
+ path[idx++] = NULL;
1715
+ node = tree->root;
1716
+
1717
+ while (1) {
1718
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1719
+
1720
+ if (cmp == 0) {
1721
+ break;
1722
+ } else if (cmp < 0) {
1723
+ if (!node->left_child) {
1724
+ return FALSE;
1725
+ }
1726
+
1727
+ path[idx++] = node;
1728
+ node = node->left;
1729
+ } else {
1730
+ if (!node->right_child) {
1731
+ return FALSE;
1732
+ }
1733
+
1734
+ path[idx++] = node;
1735
+ node = node->right;
1736
+ }
1737
+ }
1738
+
1739
+ /*
1740
+ * The following code is almost equal to q_tree_remove_node,
1741
+ * except that we do not have to call q_tree_node_parent.
1742
+ */
1743
+ balance = parent = path[--idx];
1744
+ g_assert(!parent || parent->left == node || parent->right == node);
1745
+ left_node = (parent && node == parent->left);
1746
+
1747
+ if (!node->left_child) {
1748
+ if (!node->right_child) {
1749
+ if (!parent) {
1750
+ tree->root = NULL;
1751
+ } else if (left_node) {
1752
+ parent->left_child = FALSE;
1753
+ parent->left = node->left;
1754
+ parent->balance += 1;
1755
+ } else {
1756
+ parent->right_child = FALSE;
1757
+ parent->right = node->right;
1758
+ parent->balance -= 1;
1759
+ }
1760
+ } else {
1761
+ /* node has a right child */
1762
+ QTreeNode *tmp = q_tree_node_next(node);
1763
+ tmp->left = node->left;
1764
+
1765
+ if (!parent) {
1766
+ tree->root = node->right;
1767
+ } else if (left_node) {
1768
+ parent->left = node->right;
1769
+ parent->balance += 1;
1770
+ } else {
1771
+ parent->right = node->right;
1772
+ parent->balance -= 1;
1773
+ }
1774
+ }
1775
+ } else {
1776
+ /* node has a left child */
1777
+ if (!node->right_child) {
1778
+ QTreeNode *tmp = q_tree_node_previous(node);
1779
+ tmp->right = node->right;
1780
+
1781
+ if (parent == NULL) {
1782
+ tree->root = node->left;
1783
+ } else if (left_node) {
1784
+ parent->left = node->left;
1785
+ parent->balance += 1;
1786
+ } else {
1787
+ parent->right = node->left;
1788
+ parent->balance -= 1;
1789
+ }
1790
+ } else {
1791
+ /* node has a both children (pant, pant!) */
1792
+ QTreeNode *prev = node->left;
1793
+ QTreeNode *next = node->right;
1794
+ QTreeNode *nextp = node;
1795
+ int old_idx = idx + 1;
1796
+ idx++;
1797
+
1798
+ /* path[idx] == parent */
1799
+ /* find the immediately next node (and its parent) */
1800
+ while (next->left_child) {
1801
+ path[++idx] = nextp = next;
1802
+ next = next->left;
1803
+ }
1804
+
1805
+ path[old_idx] = next;
1806
+ balance = path[idx];
1807
+
1808
+ /* remove 'next' from the tree */
1809
+ if (nextp != node) {
1810
+ if (next->right_child) {
1811
+ nextp->left = next->right;
1812
+ } else {
1813
+ nextp->left_child = FALSE;
1814
+ }
1815
+ nextp->balance += 1;
1816
+
1817
+ next->right_child = TRUE;
1818
+ next->right = node->right;
1819
+ } else {
1820
+ node->balance -= 1;
1821
+ }
1822
+
1823
+ /* set the prev to point to the right place */
1824
+ while (prev->right_child) {
1825
+ prev = prev->right;
1826
+ }
1827
+ prev->right = next;
1828
+
1829
+ /* prepare 'next' to replace 'node' */
1830
+ next->left_child = TRUE;
1831
+ next->left = node->left;
1832
+ next->balance = node->balance;
1833
+
1834
+ if (!parent) {
1835
+ tree->root = next;
1836
+ } else if (left_node) {
1837
+ parent->left = next;
1838
+ } else {
1839
+ parent->right = next;
1840
+ }
1841
+ }
1842
+ }
1843
+
1844
+ /* restore balance */
1845
+ if (balance) {
1846
+ while (1) {
1847
+ QTreeNode *bparent = path[--idx];
1848
+ g_assert(!bparent ||
1849
+ bparent->left == balance ||
1850
+ bparent->right == balance);
1851
+ left_node = (bparent && balance == bparent->left);
1852
+
1853
+ if (balance->balance < -1 || balance->balance > 1) {
1854
+ balance = q_tree_node_balance(balance);
1855
+ if (!bparent) {
1856
+ tree->root = balance;
1857
+ } else if (left_node) {
1858
+ bparent->left = balance;
1859
+ } else {
1860
+ bparent->right = balance;
1861
+ }
1862
+ }
1863
+
1864
+ if (balance->balance != 0 || !bparent) {
1865
+ break;
1866
+ }
1867
+
1868
+ if (left_node) {
1869
+ bparent->balance += 1;
1870
+ } else {
1871
+ bparent->balance -= 1;
1872
+ }
1873
+
1874
+ balance = bparent;
1875
+ }
1876
+ }
1877
+
1878
+ if (!steal) {
1879
+ if (tree->key_destroy_func) {
1880
+ tree->key_destroy_func(node->key);
1881
+ }
1882
+ if (tree->value_destroy_func) {
1883
+ tree->value_destroy_func(node->value);
1884
+ }
1885
+ }
1886
+
1887
+ g_free(node);
1888
+
1889
+ tree->nnodes--;
1890
+
1891
+ return TRUE;
1892
+}
1893
+
1894
+/**
1895
+ * q_tree_lookup_node:
1896
+ * @tree: a #QTree
1897
+ * @key: the key to look up
1898
+ *
1899
+ * Gets the tree node corresponding to the given key. Since a #QTree is
1900
+ * automatically balanced as key/value pairs are added, key lookup
1901
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1902
+ *
1903
+ * Returns: (nullable) (transfer none): the tree node corresponding to
1904
+ * the key, or %NULL if the key was not found
1905
+ *
1906
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1907
+ */
1908
+static QTreeNode *
1909
+q_tree_lookup_node(QTree *tree,
1910
+ gconstpointer key)
1911
+{
1912
+ g_return_val_if_fail(tree != NULL, NULL);
1913
+
1914
+ return q_tree_find_node(tree, key);
1915
+}
1916
+
1917
+/**
1918
+ * q_tree_lookup:
1919
+ * @tree: a #QTree
1920
+ * @key: the key to look up
1921
+ *
1922
+ * Gets the value corresponding to the given key. Since a #QTree is
1923
+ * automatically balanced as key/value pairs are added, key lookup
1924
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1925
+ *
1926
+ * Returns: the value corresponding to the key, or %NULL
1927
+ * if the key was not found
1928
+ */
1929
+gpointer
1930
+q_tree_lookup(QTree *tree,
1931
+ gconstpointer key)
1932
+{
1933
+ QTreeNode *node;
1934
+
1935
+ node = q_tree_lookup_node(tree, key);
1936
+
1937
+ return node ? node->value : NULL;
1938
+}
1939
+
1940
+/**
1941
+ * q_tree_lookup_extended:
1942
+ * @tree: a #QTree
1943
+ * @lookup_key: the key to look up
1944
+ * @orig_key: (out) (optional) (nullable): returns the original key
1945
+ * @value: (out) (optional) (nullable): returns the value associated with
1946
+ * the key
1947
+ *
1948
+ * Looks up a key in the #QTree, returning the original key and the
1949
+ * associated value. This is useful if you need to free the memory
1950
+ * allocated for the original key, for example before calling
1951
+ * q_tree_remove().
1952
+ *
1953
+ * Returns: %TRUE if the key was found in the #QTree
1954
+ */
1955
+gboolean
1956
+q_tree_lookup_extended(QTree *tree,
1957
+ gconstpointer lookup_key,
1958
+ gpointer *orig_key,
1959
+ gpointer *value)
1960
+{
1961
+ QTreeNode *node;
1962
+
1963
+ g_return_val_if_fail(tree != NULL, FALSE);
1964
+
1965
+ node = q_tree_find_node(tree, lookup_key);
1966
+
1967
+ if (node) {
1968
+ if (orig_key) {
1969
+ *orig_key = node->key;
1970
+ }
1971
+ if (value) {
1972
+ *value = node->value;
1973
+ }
1974
+ return TRUE;
1975
+ } else {
1976
+ return FALSE;
1977
+ }
1978
+}
1979
+
1980
+/**
1981
+ * q_tree_foreach:
1982
+ * @tree: a #QTree
1983
+ * @func: the function to call for each node visited.
1984
+ * If this function returns %TRUE, the traversal is stopped.
1985
+ * @user_data: user data to pass to the function
1986
+ *
1987
+ * Calls the given function for each of the key/value pairs in the #QTree.
1988
+ * The function is passed the key and value of each pair, and the given
1989
+ * @data parameter. The tree is traversed in sorted order.
1990
+ *
1991
+ * The tree may not be modified while iterating over it (you can't
1992
+ * add/remove items). To remove all items matching a predicate, you need
1993
+ * to add each item to a list in your #GTraverseFunc as you walk over
1994
+ * the tree, then walk the list and remove each item.
1995
+ */
1996
+void
1997
+q_tree_foreach(QTree *tree,
1998
+ GTraverseFunc func,
1999
+ gpointer user_data)
2000
+{
2001
+ QTreeNode *node;
2002
+
2003
+ g_return_if_fail(tree != NULL);
2004
+
2005
+ if (!tree->root) {
2006
+ return;
2007
+ }
2008
+
2009
+ node = q_tree_node_first(tree);
2010
+
2011
+ while (node) {
2012
+ if ((*func)(node->key, node->value, user_data)) {
2013
+ break;
2014
+ }
2015
+
2016
+ node = q_tree_node_next(node);
2017
+ }
2018
+}
2019
+
2020
+/**
2021
+ * q_tree_search_node:
2022
+ * @tree: a #QTree
2023
+ * @search_func: a function used to search the #QTree
2024
+ * @user_data: the data passed as the second argument to @search_func
2025
+ *
2026
+ * Searches a #QTree using @search_func.
2027
+ *
2028
+ * The @search_func is called with a pointer to the key of a key/value
2029
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2030
+ * 0 for a key/value pair, then the corresponding node is returned as
2031
+ * the result of q_tree_search(). If @search_func returns -1, searching
2032
+ * will proceed among the key/value pairs that have a smaller key; if
2033
+ * @search_func returns 1, searching will proceed among the key/value
2034
+ * pairs that have a larger key.
2035
+ *
2036
+ * Returns: (nullable) (transfer none): the node corresponding to the
2037
+ * found key, or %NULL if the key was not found
2038
+ *
2039
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
2040
+ */
2041
+static QTreeNode *
2042
+q_tree_search_node(QTree *tree,
2043
+ GCompareFunc search_func,
2044
+ gconstpointer user_data)
2045
+{
2046
+ g_return_val_if_fail(tree != NULL, NULL);
2047
+
2048
+ if (!tree->root) {
2049
+ return NULL;
2050
+ }
2051
+
2052
+ return q_tree_node_search(tree->root, search_func, user_data);
2053
+}
2054
+
2055
+/**
2056
+ * q_tree_search:
2057
+ * @tree: a #QTree
2058
+ * @search_func: a function used to search the #QTree
2059
+ * @user_data: the data passed as the second argument to @search_func
2060
+ *
2061
+ * Searches a #QTree using @search_func.
2062
+ *
2063
+ * The @search_func is called with a pointer to the key of a key/value
2064
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2065
+ * 0 for a key/value pair, then the corresponding value is returned as
2066
+ * the result of q_tree_search(). If @search_func returns -1, searching
2067
+ * will proceed among the key/value pairs that have a smaller key; if
2068
+ * @search_func returns 1, searching will proceed among the key/value
2069
+ * pairs that have a larger key.
2070
+ *
2071
+ * Returns: the value corresponding to the found key, or %NULL
2072
+ * if the key was not found
2073
+ */
2074
+gpointer
2075
+q_tree_search(QTree *tree,
2076
+ GCompareFunc search_func,
2077
+ gconstpointer user_data)
2078
+{
2079
+ QTreeNode *node;
2080
+
2081
+ node = q_tree_search_node(tree, search_func, user_data);
2082
+
2083
+ return node ? node->value : NULL;
2084
+}
2085
+
2086
+/**
2087
+ * q_tree_height:
2088
+ * @tree: a #QTree
2089
+ *
2090
+ * Gets the height of a #QTree.
2091
+ *
2092
+ * If the #QTree contains no nodes, the height is 0.
2093
+ * If the #QTree contains only one root node the height is 1.
2094
+ * If the root node has children the height is 2, etc.
2095
+ *
2096
+ * Returns: the height of @tree
2097
+ */
2098
+gint
2099
+q_tree_height(QTree *tree)
2100
+{
2101
+ QTreeNode *node;
2102
+ gint height;
2103
+
2104
+ g_return_val_if_fail(tree != NULL, 0);
2105
+
2106
+ if (!tree->root) {
2107
+ return 0;
2108
+ }
2109
+
2110
+ height = 0;
2111
+ node = tree->root;
2112
+
2113
+ while (1) {
2114
+ height += 1 + MAX(node->balance, 0);
2115
+
2116
+ if (!node->left_child) {
2117
+ return height;
2118
+ }
2119
+
2120
+ node = node->left;
2121
+ }
2122
+}
2123
+
2124
+/**
2125
+ * q_tree_nnodes:
2126
+ * @tree: a #QTree
2127
+ *
2128
+ * Gets the number of nodes in a #QTree.
2129
+ *
2130
+ * Returns: the number of nodes in @tree
2131
+ */
2132
+gint
2133
+q_tree_nnodes(QTree *tree)
2134
+{
2135
+ g_return_val_if_fail(tree != NULL, 0);
2136
+
2137
+ return tree->nnodes;
2138
+}
2139
+
2140
+static QTreeNode *
2141
+q_tree_node_balance(QTreeNode *node)
2142
+{
2143
+ if (node->balance < -1) {
2144
+ if (node->left->balance > 0) {
2145
+ node->left = q_tree_node_rotate_left(node->left);
2146
+ }
2147
+ node = q_tree_node_rotate_right(node);
2148
+ } else if (node->balance > 1) {
2149
+ if (node->right->balance < 0) {
2150
+ node->right = q_tree_node_rotate_right(node->right);
2151
+ }
2152
+ node = q_tree_node_rotate_left(node);
2153
+ }
2154
+
2155
+ return node;
2156
+}
2157
+
2158
+static QTreeNode *
2159
+q_tree_find_node(QTree *tree,
2160
+ gconstpointer key)
2161
+{
2162
+ QTreeNode *node;
2163
+ gint cmp;
2164
+
2165
+ node = tree->root;
2166
+ if (!node) {
2167
+ return NULL;
2168
+ }
2169
+
2170
+ while (1) {
2171
+ cmp = tree->key_compare(key, node->key, tree->key_compare_data);
2172
+ if (cmp == 0) {
2173
+ return node;
2174
+ } else if (cmp < 0) {
2175
+ if (!node->left_child) {
2176
+ return NULL;
2177
+ }
2178
+
2179
+ node = node->left;
2180
+ } else {
2181
+ if (!node->right_child) {
2182
+ return NULL;
2183
+ }
2184
+
2185
+ node = node->right;
2186
+ }
2187
+ }
2188
+}
2189
+
2190
+static QTreeNode *
2191
+q_tree_node_search(QTreeNode *node,
2192
+ GCompareFunc search_func,
2193
+ gconstpointer data)
2194
+{
2195
+ gint dir;
2196
+
2197
+ if (!node) {
2198
+ return NULL;
2199
+ }
2200
+
2201
+ while (1) {
2202
+ dir = (*search_func)(node->key, data);
2203
+ if (dir == 0) {
2204
+ return node;
2205
+ } else if (dir < 0) {
2206
+ if (!node->left_child) {
2207
+ return NULL;
2208
+ }
2209
+
2210
+ node = node->left;
2211
+ } else {
2212
+ if (!node->right_child) {
2213
+ return NULL;
2214
+ }
2215
+
2216
+ node = node->right;
2217
+ }
2218
+ }
2219
+}
2220
+
2221
+static QTreeNode *
2222
+q_tree_node_rotate_left(QTreeNode *node)
2223
+{
2224
+ QTreeNode *right;
2225
+ gint a_bal;
2226
+ gint b_bal;
2227
+
2228
+ right = node->right;
2229
+
2230
+ if (right->left_child) {
2231
+ node->right = right->left;
2232
+ } else {
2233
+ node->right_child = FALSE;
2234
+ right->left_child = TRUE;
2235
+ }
2236
+ right->left = node;
2237
+
2238
+ a_bal = node->balance;
2239
+ b_bal = right->balance;
2240
+
2241
+ if (b_bal <= 0) {
2242
+ if (a_bal >= 1) {
2243
+ right->balance = b_bal - 1;
2244
+ } else {
2245
+ right->balance = a_bal + b_bal - 2;
2246
+ }
2247
+ node->balance = a_bal - 1;
2248
+ } else {
2249
+ if (a_bal <= b_bal) {
2250
+ right->balance = a_bal - 2;
2251
+ } else {
2252
+ right->balance = b_bal - 1;
2253
+ }
2254
+ node->balance = a_bal - b_bal - 1;
2255
+ }
2256
+
2257
+ return right;
2258
+}
2259
+
2260
+static QTreeNode *
2261
+q_tree_node_rotate_right(QTreeNode *node)
2262
+{
2263
+ QTreeNode *left;
2264
+ gint a_bal;
2265
+ gint b_bal;
2266
+
2267
+ left = node->left;
2268
+
2269
+ if (left->right_child) {
2270
+ node->left = left->right;
2271
+ } else {
2272
+ node->left_child = FALSE;
2273
+ left->right_child = TRUE;
2274
+ }
2275
+ left->right = node;
2276
+
2277
+ a_bal = node->balance;
2278
+ b_bal = left->balance;
2279
+
2280
+ if (b_bal <= 0) {
2281
+ if (b_bal > a_bal) {
2282
+ left->balance = b_bal + 1;
2283
+ } else {
2284
+ left->balance = a_bal + 2;
2285
+ }
2286
+ node->balance = a_bal - b_bal + 1;
2287
+ } else {
2288
+ if (a_bal <= -1) {
2289
+ left->balance = b_bal + 1;
2290
+ } else {
2291
+ left->balance = a_bal + b_bal + 2;
2292
+ }
2293
+ node->balance = a_bal + 1;
2294
+ }
2295
+
2296
+ return left;
2297
+}
2298
+
2299
+#ifdef Q_TREE_DEBUG
2300
+static gint
2301
+q_tree_node_height(QTreeNode *node)
2302
+{
2303
+ gint left_height;
2304
+ gint right_height;
2305
+
2306
+ if (node) {
2307
+ left_height = 0;
2308
+ right_height = 0;
2309
+
2310
+ if (node->left_child) {
2311
+ left_height = q_tree_node_height(node->left);
2312
+ }
2313
+
2314
+ if (node->right_child) {
2315
+ right_height = q_tree_node_height(node->right);
2316
+ }
2317
+
2318
+ return MAX(left_height, right_height) + 1;
2319
+ }
2320
+
2321
+ return 0;
2322
+}
2323
+
2324
+static void q_tree_node_check(QTreeNode *node)
2325
+{
2326
+ gint left_height;
2327
+ gint right_height;
2328
+ gint balance;
2329
+ QTreeNode *tmp;
2330
+
2331
+ if (node) {
2332
+ if (node->left_child) {
2333
+ tmp = q_tree_node_previous(node);
2334
+ g_assert(tmp->right == node);
2335
+ }
2336
+
2337
+ if (node->right_child) {
2338
+ tmp = q_tree_node_next(node);
2339
+ g_assert(tmp->left == node);
2340
+ }
2341
+
2342
+ left_height = 0;
2343
+ right_height = 0;
2344
+
2345
+ if (node->left_child) {
2346
+ left_height = q_tree_node_height(node->left);
2347
+ }
2348
+ if (node->right_child) {
2349
+ right_height = q_tree_node_height(node->right);
2350
+ }
2351
+
2352
+ balance = right_height - left_height;
2353
+ g_assert(balance == node->balance);
2354
+
2355
+ if (node->left_child) {
2356
+ q_tree_node_check(node->left);
2357
+ }
2358
+ if (node->right_child) {
2359
+ q_tree_node_check(node->right);
2360
+ }
2361
+ }
2362
+}
2363
+#endif
2364
diff --git a/tests/bench/meson.build b/tests/bench/meson.build
2365
index XXXXXXX..XXXXXXX 100644
2366
--- a/tests/bench/meson.build
2367
+++ b/tests/bench/meson.build
2368
@@ -XXX,XX +XXX,XX @@ xbzrle_bench = executable('xbzrle-bench',
2369
dependencies: [qemuutil,migration])
2370
endif
2371
2372
+qtree_bench = executable('qtree-bench',
2373
+ sources: 'qtree-bench.c',
2374
+ dependencies: [qemuutil])
2375
+
2376
executable('atomic_add-bench',
2377
sources: files('atomic_add-bench.c'),
2378
dependencies: [qemuutil],
2379
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
2380
index XXXXXXX..XXXXXXX 100644
2381
--- a/tests/unit/meson.build
2382
+++ b/tests/unit/meson.build
2383
@@ -XXX,XX +XXX,XX @@ tests = {
2384
'test-rcu-slist': [],
2385
'test-qdist': [],
2386
'test-qht': [],
2387
+ 'test-qtree': [],
2388
'test-bitops': [],
2389
'test-bitcnt': [],
2390
'test-qgraph': ['../qtest/libqos/qgraph.c'],
2391
diff --git a/util/meson.build b/util/meson.build
2392
index XXXXXXX..XXXXXXX 100644
2393
--- a/util/meson.build
2394
+++ b/util/meson.build
2395
@@ -XXX,XX +XXX,XX @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c'))
2396
util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c'))
2397
util_ss.add(when: 'CONFIG_WIN32', if_true: winmm)
2398
util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
2399
+util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c'))
2400
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
2401
util_ss.add(files('host-utils.c'))
2402
util_ss.add(files('bitmap.c', 'bitops.c'))
2403
--
221
--
2404
2.34.1
222
2.34.1
2405
2406
diff view generated by jsdifflib
1
Pass the address of the last byte of the image, rather than
1
Implement with and without Zicond. Without Zicond, we were letting
2
the first address past the last byte. This avoids overflow
2
the middle-end expand to a 5 insn sequence; better to use a branch
3
when the last page of the address space is involved.
3
over a single insn.
4
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Acked-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
linux-user/user-internals.h | 12 ++++++------
9
tcg/riscv/tcg-target-con-set.h | 1 +
9
linux-user/elfload.c | 24 ++++++++++++------------
10
tcg/riscv/tcg-target.h | 4 +-
10
linux-user/flatload.c | 2 +-
11
tcg/riscv/tcg-target.c.inc | 139 ++++++++++++++++++++++++++++++++-
11
3 files changed, 19 insertions(+), 19 deletions(-)
12
3 files changed, 141 insertions(+), 3 deletions(-)
12
13
13
diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h
14
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/user-internals.h
16
--- a/tcg/riscv/tcg-target-con-set.h
16
+++ b/linux-user/user-internals.h
17
+++ b/tcg/riscv/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ void fork_end(int child);
18
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
18
/**
19
C_O1_I2(r, r, rJ)
19
* probe_guest_base:
20
C_O1_I2(r, rZ, rN)
20
* @image_name: the executable being loaded
21
C_O1_I2(r, rZ, rZ)
21
- * @loaddr: the lowest fixed address in the executable
22
+C_O1_I4(r, r, rI, rM, rM)
22
- * @hiaddr: the highest fixed address in the executable
23
C_O2_I4(r, r, rZ, rZ, rM, rM)
23
+ * @loaddr: the lowest fixed address within the executable
24
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
24
+ * @hiaddr: the highest fixed address within the executable
25
*
26
* Creates the initial guest address space in the host memory space.
27
*
28
- * If @loaddr == 0, then no address in the executable is fixed,
29
- * i.e. it is fully relocatable. In that case @hiaddr is the size
30
- * of the executable.
31
+ * If @loaddr == 0, then no address in the executable is fixed, i.e.
32
+ * it is fully relocatable. In that case @hiaddr is the size of the
33
+ * executable minus one.
34
*
35
* This function will not return if a valid value for guest_base
36
* cannot be chosen. On return, the executable loader can expect
37
*
38
- * target_mmap(loaddr, hiaddr - loaddr, ...)
39
+ * target_mmap(loaddr, hiaddr - loaddr + 1, ...)
40
*
41
* to succeed.
42
*/
43
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
44
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
45
--- a/linux-user/elfload.c
26
--- a/tcg/riscv/tcg-target.h
46
+++ b/linux-user/elfload.c
27
+++ b/tcg/riscv/tcg-target.h
47
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
28
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
48
if (guest_hiaddr > reserved_va) {
49
error_report("%s: requires more than reserved virtual "
50
"address space (0x%" PRIx64 " > 0x%lx)",
51
- image_name, (uint64_t)guest_hiaddr, reserved_va);
52
+ image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
53
exit(EXIT_FAILURE);
54
}
55
} else {
56
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
57
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
58
error_report("%s: requires more virtual address space "
59
"than the host can provide (0x%" PRIx64 ")",
60
- image_name, (uint64_t)guest_hiaddr - guest_base);
61
+ image_name, (uint64_t)guest_hiaddr + 1 - guest_base);
62
exit(EXIT_FAILURE);
63
}
64
#endif
29
#endif
65
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
30
66
if (reserved_va) {
31
/* optional instructions */
67
guest_loaddr = (guest_base >= mmap_min_addr ? 0
32
-#define TCG_TARGET_HAS_movcond_i32 0
68
: mmap_min_addr - guest_base);
33
+#define TCG_TARGET_HAS_movcond_i32 1
69
- guest_hiaddr = reserved_va;
34
#define TCG_TARGET_HAS_div_i32 1
70
+ guest_hiaddr = reserved_va - 1;
35
#define TCG_TARGET_HAS_rem_i32 1
36
#define TCG_TARGET_HAS_div2_i32 0
37
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
38
#define TCG_TARGET_HAS_setcond2 1
39
#define TCG_TARGET_HAS_qemu_st8_i32 0
40
41
-#define TCG_TARGET_HAS_movcond_i64 0
42
+#define TCG_TARGET_HAS_movcond_i64 1
43
#define TCG_TARGET_HAS_div_i64 1
44
#define TCG_TARGET_HAS_rem_i64 1
45
#define TCG_TARGET_HAS_div2_i64 0
46
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/riscv/tcg-target.c.inc
49
+++ b/tcg/riscv/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
71
}
51
}
72
52
/*
73
/* Reserve the address space for the binary, or reserved_va. */
53
* Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
74
test = g2h_untagged(guest_loaddr);
54
- * Used by addsub2, which may need the negative operation,
75
- addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
55
+ * Used by addsub2 and movcond, which may need the negative value,
76
+ addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0);
56
* and requires the modified constant to be representable.
77
if (test != addr) {
57
*/
78
pgb_fail_in_use(image_name);
58
if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
79
}
60
}
80
qemu_log_mask(CPU_LOG_PAGE,
81
- "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
82
- __func__, addr, guest_hiaddr - guest_loaddr);
83
+ "%s: base @ %p for %" PRIu64 " bytes\n",
84
+ __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1);
85
}
61
}
86
62
87
/**
63
+static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
88
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
64
+ int val1, bool c_val1,
89
if (hiaddr != orig_hiaddr) {
65
+ int val2, bool c_val2)
90
error_report("%s: requires virtual address space that the "
66
+{
91
"host cannot provide (0x%" PRIx64 ")",
67
+ if (val1 == 0) {
92
- image_name, (uint64_t)orig_hiaddr);
68
+ if (c_val2) {
93
+ image_name, (uint64_t)orig_hiaddr + 1);
69
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
94
exit(EXIT_FAILURE);
70
+ val2 = TCG_REG_TMP1;
95
}
71
+ }
96
72
+ tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
97
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
73
+ return;
98
* arithmetic wraps around.
74
+ }
99
*/
75
+
100
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
76
+ if (val2 == 0) {
101
- hiaddr = (uintptr_t) 4 << 30;
77
+ if (c_val1) {
102
+ hiaddr = UINT32_MAX;
78
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
103
} else {
79
+ val1 = TCG_REG_TMP1;
104
offset = -(HI_COMMPAGE & -align);
80
+ }
105
}
81
+ tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
106
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
82
+ return;
107
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
83
+ }
108
}
84
+
109
85
+ if (c_val2) {
110
- addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
86
+ if (c_val1) {
111
+ addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset);
87
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
112
if (addr == -1) {
88
+ } else {
113
/*
89
+ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
114
* If HI_COMMPAGE, there *might* be a non-consecutive allocation
90
+ }
115
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
91
+ tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
116
if (guest_hiaddr > reserved_va) {
92
+ tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
117
error_report("%s: requires more than reserved virtual "
93
+ return;
118
"address space (0x%" PRIx64 " > 0x%lx)",
94
+ }
119
- image_name, (uint64_t)guest_hiaddr, reserved_va);
95
+
120
+ image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
96
+ if (c_val1) {
121
exit(EXIT_FAILURE);
97
+ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
122
}
98
+ tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
123
99
+ tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
124
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
100
+ return;
125
if (a < loaddr) {
101
+ }
126
loaddr = a;
102
+
127
}
103
+ tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
128
- a = eppnt->p_vaddr + eppnt->p_memsz;
104
+ tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
129
+ a = eppnt->p_vaddr + eppnt->p_memsz - 1;
105
+ tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
130
if (a > hiaddr) {
106
+}
131
hiaddr = a;
107
+
132
}
108
+static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
133
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
109
+ TCGReg cmp1, TCGReg cmp2,
134
* In both cases, we will overwrite pages in this range with mappings
110
+ int val, bool c_val)
135
* from the executable.
111
+{
136
*/
112
+ RISCVInsn op;
137
- load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
113
+ int disp = 8;
138
+ load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
114
+
139
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
115
+ tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
140
(ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
116
+ op = tcg_brcond_to_riscv[cond].op;
141
-1, 0);
117
+ tcg_debug_assert(op != 0);
142
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
118
+
143
index XXXXXXX..XXXXXXX 100644
119
+ if (tcg_brcond_to_riscv[cond].swap) {
144
--- a/linux-user/flatload.c
120
+ tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
145
+++ b/linux-user/flatload.c
121
+ } else {
146
@@ -XXX,XX +XXX,XX @@ static int load_flat_file(struct linux_binprm * bprm,
122
+ tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
147
* Allocate the address space.
123
+ }
148
*/
124
+ if (c_val) {
149
probe_guest_base(bprm->filename, 0,
125
+ tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
150
- text_len + data_len + extra + indx_len);
126
+ } else {
151
+ text_len + data_len + extra + indx_len - 1);
127
+ tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
152
128
+ }
153
/*
129
+}
154
* there are a couple of cases here, the separate code/data
130
+
131
+static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
132
+ TCGReg cmp1, TCGReg cmp2,
133
+ int val1, bool c_val1,
134
+ int val2, bool c_val2)
135
+{
136
+ TCGReg tmp;
137
+
138
+ /* TCG optimizer reorders to prefer ret matching val2. */
139
+ if (!c_val2 && ret == val2) {
140
+ cond = tcg_invert_cond(cond);
141
+ tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
142
+ return;
143
+ }
144
+
145
+ if (!c_val1 && ret == val1) {
146
+ tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
147
+ return;
148
+ }
149
+
150
+ tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
151
+ if (c_val1) {
152
+ tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
153
+ } else {
154
+ tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
155
+ }
156
+ tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
157
+ tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
158
+}
159
+
160
+static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
161
+ TCGReg cmp1, int cmp2, bool c_cmp2,
162
+ TCGReg val1, bool c_val1,
163
+ TCGReg val2, bool c_val2)
164
+{
165
+ int tmpflags;
166
+ TCGReg t;
167
+
168
+ if (!have_zicond && (!c_cmp2 || cmp2 == 0)) {
169
+ tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
170
+ val1, c_val1, val2, c_val2);
171
+ return;
172
+ }
173
+
174
+ tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
175
+ t = tmpflags & ~SETCOND_FLAGS;
176
+
177
+ if (have_zicond) {
178
+ if (tmpflags & SETCOND_INV) {
179
+ tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
180
+ } else {
181
+ tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
182
+ }
183
+ } else {
184
+ cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
185
+ tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
186
+ val1, c_val1, val2, c_val2);
187
+ }
188
+}
189
+
190
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
191
{
192
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
193
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
194
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
195
break;
196
197
+ case INDEX_op_movcond_i32:
198
+ case INDEX_op_movcond_i64:
199
+ tcg_out_movcond(s, args[5], a0, a1, a2, c2,
200
+ args[3], const_args[3], args[4], const_args[4]);
201
+ break;
202
+
203
case INDEX_op_qemu_ld_a32_i32:
204
case INDEX_op_qemu_ld_a64_i32:
205
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
206
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
207
case INDEX_op_brcond_i64:
208
return C_O0_I2(rZ, rZ);
209
210
+ case INDEX_op_movcond_i32:
211
+ case INDEX_op_movcond_i64:
212
+ return C_O1_I4(r, r, rI, rM, rM);
213
+
214
case INDEX_op_add2_i32:
215
case INDEX_op_add2_i64:
216
case INDEX_op_sub2_i32:
155
--
217
--
156
2.34.1
218
2.34.1
157
158
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
the first address past the last byte. This avoids overflow
2
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
when the last page of the address space is involved.
4
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1528
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
include/exec/cpu-all.h | 2 +-
5
tcg/riscv/tcg-target-con-set.h | 1 +
10
accel/tcg/user-exec.c | 16 +++++++---------
6
tcg/riscv/tcg-target.h | 8 ++++----
11
bsd-user/mmap.c | 6 +++---
7
tcg/riscv/tcg-target.c.inc | 35 ++++++++++++++++++++++++++++++++++
12
linux-user/elfload.c | 11 ++++++-----
8
3 files changed, 40 insertions(+), 4 deletions(-)
13
linux-user/mmap.c | 16 ++++++++--------
14
linux-user/syscall.c | 4 ++--
15
6 files changed, 27 insertions(+), 28 deletions(-)
16
9
17
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
10
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-all.h
12
--- a/tcg/riscv/tcg-target-con-set.h
20
+++ b/include/exec/cpu-all.h
13
+++ b/tcg/riscv/tcg-target-con-set.h
21
@@ -XXX,XX +XXX,XX @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
14
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
22
int walk_memory_regions(void *, walk_memory_regions_fn);
15
C_O1_I2(r, r, rJ)
23
16
C_O1_I2(r, rZ, rN)
24
int page_get_flags(target_ulong address);
17
C_O1_I2(r, rZ, rZ)
25
-void page_set_flags(target_ulong start, target_ulong end, int flags);
18
+C_N1_I2(r, r, rM)
26
+void page_set_flags(target_ulong start, target_ulong last, int flags);
19
C_O1_I4(r, r, rI, rM, rM)
27
void page_reset_target_data(target_ulong start, target_ulong end);
20
C_O2_I4(r, r, rZ, rZ, rM, rM)
28
int page_check_range(target_ulong start, target_ulong len, int flags);
21
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
29
30
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
31
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
32
--- a/accel/tcg/user-exec.c
23
--- a/tcg/riscv/tcg-target.h
33
+++ b/accel/tcg/user-exec.c
24
+++ b/tcg/riscv/tcg-target.h
34
@@ -XXX,XX +XXX,XX @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
25
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
35
* The flag PAGE_WRITE_ORG is positioned automatically depending
26
#define TCG_TARGET_HAS_eqv_i32 have_zbb
36
* on PAGE_WRITE. The mmap_lock should already be held.
27
#define TCG_TARGET_HAS_nand_i32 0
37
*/
28
#define TCG_TARGET_HAS_nor_i32 0
38
-void page_set_flags(target_ulong start, target_ulong end, int flags)
29
-#define TCG_TARGET_HAS_clz_i32 0
39
+void page_set_flags(target_ulong start, target_ulong last, int flags)
30
-#define TCG_TARGET_HAS_ctz_i32 0
40
{
31
+#define TCG_TARGET_HAS_clz_i32 have_zbb
41
- target_ulong last;
32
+#define TCG_TARGET_HAS_ctz_i32 have_zbb
42
bool reset = false;
33
#define TCG_TARGET_HAS_ctpop_i32 have_zbb
43
bool inval_tb = false;
34
#define TCG_TARGET_HAS_brcond2 1
44
35
#define TCG_TARGET_HAS_setcond2 1
45
/* This function should never be called with addresses outside the
36
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
46
guest address space. If this assert fires, it probably indicates
37
#define TCG_TARGET_HAS_eqv_i64 have_zbb
47
a missing call to h2g_valid. */
38
#define TCG_TARGET_HAS_nand_i64 0
48
- assert(start < end);
39
#define TCG_TARGET_HAS_nor_i64 0
49
- assert(end - 1 <= GUEST_ADDR_MAX);
40
-#define TCG_TARGET_HAS_clz_i64 0
50
+ assert(start <= last);
41
-#define TCG_TARGET_HAS_ctz_i64 0
51
+ assert(last <= GUEST_ADDR_MAX);
42
+#define TCG_TARGET_HAS_clz_i64 have_zbb
52
/* Only set PAGE_ANON with new mappings. */
43
+#define TCG_TARGET_HAS_ctz_i64 have_zbb
53
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
44
#define TCG_TARGET_HAS_ctpop_i64 have_zbb
54
assert_memory_lock();
45
#define TCG_TARGET_HAS_add2_i64 1
55
46
#define TCG_TARGET_HAS_sub2_i64 1
56
- start = start & TARGET_PAGE_MASK;
47
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
57
- end = TARGET_PAGE_ALIGN(end);
48
index XXXXXXX..XXXXXXX 100644
58
- last = end - 1;
49
--- a/tcg/riscv/tcg-target.c.inc
59
+ start &= TARGET_PAGE_MASK;
50
+++ b/tcg/riscv/tcg-target.c.inc
60
+ last |= ~TARGET_PAGE_MASK;
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
61
62
if (!(flags & PAGE_VALID)) {
63
flags = 0;
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
}
66
67
if (!flags || reset) {
68
- page_reset_target_data(start, end);
69
+ page_reset_target_data(start, last + 1);
70
inval_tb |= pageflags_unset(start, last);
71
}
72
if (flags) {
73
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
74
~(reset ? 0 : PAGE_STICKY));
75
}
76
if (inval_tb) {
77
- tb_invalidate_phys_range(start, end);
78
+ tb_invalidate_phys_range(start, last + 1);
79
}
52
}
80
}
53
}
81
54
82
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
55
+static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
83
index XXXXXXX..XXXXXXX 100644
56
+ TCGReg ret, TCGReg src1, int src2, bool c_src2)
84
--- a/bsd-user/mmap.c
57
+{
85
+++ b/bsd-user/mmap.c
58
+ tcg_out_opc_imm(s, insn, ret, src1, 0);
86
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
59
+
87
if (ret != 0)
60
+ if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
88
goto error;
61
+ /*
89
}
62
+ * The requested zero result does not match the insn, so adjust.
90
- page_set_flags(start, start + len, prot | PAGE_VALID);
63
+ * Note that constraints put 'ret' in a new register, so the
91
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
64
+ * computation above did not clobber either 'src1' or 'src2'.
92
mmap_unlock();
65
+ */
93
return 0;
66
+ tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
94
error:
67
+ src2, c_src2, ret, false);
95
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
68
+ }
96
}
69
+}
97
}
70
+
98
the_end1:
71
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
99
- page_set_flags(start, start + len, prot | PAGE_VALID);
72
{
100
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
73
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
101
the_end:
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
102
#ifdef DEBUG_MMAP
75
tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
103
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
76
break;
104
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
77
105
}
78
+ case INDEX_op_clz_i32:
106
79
+ tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
107
if (ret == 0) {
80
+ break;
108
- page_set_flags(start, start + len, 0);
81
+ case INDEX_op_clz_i64:
109
+ page_set_flags(start, start + len - 1, 0);
82
+ tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
110
}
83
+ break;
111
mmap_unlock();
84
+ case INDEX_op_ctz_i32:
112
return ret;
85
+ tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
113
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
86
+ break;
114
index XXXXXXX..XXXXXXX 100644
87
+ case INDEX_op_ctz_i64:
115
--- a/linux-user/elfload.c
88
+ tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
116
+++ b/linux-user/elfload.c
89
+ break;
117
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
90
+
118
exit(EXIT_FAILURE);
91
case INDEX_op_add2_i32:
119
}
92
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
120
page_set_flags(TARGET_VSYSCALL_PAGE,
93
const_args[4], const_args[5], false, true);
121
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
94
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
122
+ TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
95
case INDEX_op_rotr_i64:
123
PAGE_EXEC | PAGE_VALID);
96
return C_O1_I2(r, r, ri);
124
return true;
97
125
}
98
+ case INDEX_op_clz_i32:
126
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
99
+ case INDEX_op_clz_i64:
127
exit(EXIT_FAILURE);
100
+ case INDEX_op_ctz_i32:
128
}
101
+ case INDEX_op_ctz_i64:
129
102
+ return C_N1_I2(r, r, rM);
130
- page_set_flags(commpage, commpage + qemu_host_page_size,
103
+
131
+ page_set_flags(commpage, commpage | ~qemu_host_page_mask,
104
case INDEX_op_brcond_i32:
132
PAGE_READ | PAGE_EXEC | PAGE_VALID);
105
case INDEX_op_brcond_i64:
133
return true;
106
return C_O0_I2(rZ, rZ);
134
}
135
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
136
exit(EXIT_FAILURE);
137
}
138
139
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
140
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
141
PAGE_READ | PAGE_EXEC | PAGE_VALID);
142
return true;
143
}
144
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
145
* and implement syscalls. Here, simply mark the page executable.
146
* Special case the entry points during translation (see do_page_zero).
147
*/
148
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
149
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
150
PAGE_EXEC | PAGE_VALID);
151
return true;
152
}
153
@@ -XXX,XX +XXX,XX @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
154
155
/* Ensure that the bss page(s) are valid */
156
if ((page_get_flags(last_bss-1) & prot) != prot) {
157
- page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
158
+ page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
159
+ prot | PAGE_VALID);
160
}
161
162
if (host_start < host_map_start) {
163
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/linux-user/mmap.c
166
+++ b/linux-user/mmap.c
167
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
168
}
169
}
170
171
- page_set_flags(start, start + len, page_flags);
172
+ page_set_flags(start, start + len - 1, page_flags);
173
ret = 0;
174
175
error:
176
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
177
}
178
page_flags |= PAGE_RESET;
179
if (passthrough_start == passthrough_end) {
180
- page_set_flags(start, start + len, page_flags);
181
+ page_set_flags(start, start + len - 1, page_flags);
182
} else {
183
if (start < passthrough_start) {
184
- page_set_flags(start, passthrough_start, page_flags);
185
+ page_set_flags(start, passthrough_start - 1, page_flags);
186
}
187
- page_set_flags(passthrough_start, passthrough_end,
188
+ page_set_flags(passthrough_start, passthrough_end - 1,
189
page_flags | PAGE_PASSTHROUGH);
190
if (passthrough_end < start + len) {
191
- page_set_flags(passthrough_end, start + len, page_flags);
192
+ page_set_flags(passthrough_end, start + len - 1, page_flags);
193
}
194
}
195
the_end:
196
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
197
}
198
199
if (ret == 0) {
200
- page_set_flags(start, start + len, 0);
201
+ page_set_flags(start, start + len - 1, 0);
202
}
203
mmap_unlock();
204
return ret;
205
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
206
} else {
207
new_addr = h2g(host_addr);
208
prot = page_get_flags(old_addr);
209
- page_set_flags(old_addr, old_addr + old_size, 0);
210
- page_set_flags(new_addr, new_addr + new_size,
211
+ page_set_flags(old_addr, old_addr + old_size - 1, 0);
212
+ page_set_flags(new_addr, new_addr + new_size - 1,
213
prot | PAGE_VALID | PAGE_RESET);
214
}
215
mmap_unlock();
216
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/linux-user/syscall.c
219
+++ b/linux-user/syscall.c
220
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
221
}
222
raddr=h2g((unsigned long)host_raddr);
223
224
- page_set_flags(raddr, raddr + shm_info.shm_segsz,
225
+ page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
226
PAGE_VALID | PAGE_RESET | PAGE_READ |
227
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
228
229
@@ -XXX,XX +XXX,XX @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
230
for (i = 0; i < N_SHM_REGIONS; ++i) {
231
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
232
shm_regions[i].in_use = false;
233
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
234
+ page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
235
break;
236
}
237
}
238
--
107
--
239
2.34.1
108
2.34.1
240
241
diff view generated by jsdifflib