1
Posting pre-PR because I had to adjust Emilio's QTree patch [1],
1
Merge the first set of reviewed patches from my queue.
2
and added a new patch to avoid an assert that can be generated
3
with incorrect -R reserved_va values vs the ARM commpage.
4
2
5
r~
3
r~
6
4
7
[1] https://gitlab.com/rth7680/qemu/-/jobs/3975817279#L92
5
The following changes since commit 6dd06214892d71cbbdd25daed7693e58afcb1093:
8
6
9
Emilio Cota (2):
7
Merge tag 'pull-hex-20230421' of https://github.com/quic/qemu into staging (2023-04-22 08:31:38 +0100)
10
util: import GTree as QTree
11
tcg: use QTree instead of GTree
12
8
13
Richard Henderson (9):
9
are available in the Git repository at:
14
linux-user: Diagnose misaligned -R size
15
include/exec: Change reserved_va semantics to last byte
16
accel/tcg: Pass last not end to page_set_flags
17
accel/tcg: Pass last not end to page_reset_target_data
18
accel/tcg: Pass last not end to PAGE_FOR_EACH_TB
19
accel/tcg: Pass last not end to page_collection_lock
20
accel/tcg: Pass last not end to tb_invalidate_phys_page_range__locked
21
accel/tcg: Pass last not end to tb_invalidate_phys_range
22
linux-user/arm: Take more care allocating commpage
23
10
24
configure | 15 +
11
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230423
25
meson.build | 4 +
26
include/exec/cpu-all.h | 15 +-
27
include/exec/exec-all.h | 2 +-
28
include/qemu/qtree.h | 201 +++++
29
linux-user/arm/target_cpu.h | 2 +-
30
accel/tcg/tb-maint.c | 112 +--
31
accel/tcg/translate-all.c | 2 +-
32
accel/tcg/user-exec.c | 25 +-
33
bsd-user/main.c | 10 +-
34
bsd-user/mmap.c | 10 +-
35
linux-user/elfload.c | 67 +-
36
linux-user/main.c | 31 +-
37
linux-user/mmap.c | 22 +-
38
linux-user/syscall.c | 4 +-
39
softmmu/physmem.c | 2 +-
40
tcg/region.c | 19 +-
41
tests/bench/qtree-bench.c | 286 +++++++
42
tests/unit/test-qtree.c | 333 +++++++++
43
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++
44
tests/bench/meson.build | 4 +
45
tests/unit/meson.build | 1 +
46
util/meson.build | 1 +
47
23 files changed, 2412 insertions(+), 146 deletions(-)
48
create mode 100644 include/qemu/qtree.h
49
create mode 100644 tests/bench/qtree-bench.c
50
create mode 100644 tests/unit/test-qtree.c
51
create mode 100644 util/qtree.c
52
12
53
--
13
for you to fetch changes up to 3ea9be33400f14305565a9a094cb6031c07183d5:
54
2.34.1
14
15
tcg/riscv: Conditionalize tcg_out_exts_i32_i64 (2023-04-23 08:46:45 +0100)
16
17
----------------------------------------------------------------
18
tcg cleanups:
19
- Remove tcg_abort()
20
- Split out extensions as known backend interfaces
21
- Put the separate extensions together as tcg_out_movext
22
- Introduce tcg_out_xchg as a backend interface
23
- Clear TCGLabelQemuLdst on allocation
24
- Avoid redundant extensions for riscv
25
26
----------------------------------------------------------------
27
Richard Henderson (15):
28
tcg: Replace if + tcg_abort with tcg_debug_assert
29
tcg: Replace tcg_abort with g_assert_not_reached
30
tcg: Split out tcg_out_ext8s
31
tcg: Split out tcg_out_ext8u
32
tcg: Split out tcg_out_ext16s
33
tcg: Split out tcg_out_ext16u
34
tcg: Split out tcg_out_ext32s
35
tcg: Split out tcg_out_ext32u
36
tcg: Split out tcg_out_exts_i32_i64
37
tcg: Split out tcg_out_extu_i32_i64
38
tcg: Split out tcg_out_extrl_i64_i32
39
tcg: Introduce tcg_out_movext
40
tcg: Introduce tcg_out_xchg
41
tcg: Clear TCGLabelQemuLdst on allocation
42
tcg/riscv: Conditionalize tcg_out_exts_i32_i64
43
44
include/tcg/tcg.h | 6 --
45
target/i386/tcg/translate.c | 20 +++---
46
target/s390x/tcg/translate.c | 4 +-
47
tcg/optimize.c | 10 ++-
48
tcg/tcg.c | 135 +++++++++++++++++++++++++++++++++++----
49
tcg/aarch64/tcg-target.c.inc | 106 +++++++++++++++++++-----------
50
tcg/arm/tcg-target.c.inc | 93 +++++++++++++++++----------
51
tcg/i386/tcg-target.c.inc | 129 ++++++++++++++++++-------------------
52
tcg/loongarch64/tcg-target.c.inc | 123 +++++++++++++----------------------
53
tcg/mips/tcg-target.c.inc | 94 +++++++++++++++++++--------
54
tcg/ppc/tcg-target.c.inc | 119 ++++++++++++++++++----------------
55
tcg/riscv/tcg-target.c.inc | 83 +++++++++++-------------
56
tcg/s390x/tcg-target.c.inc | 128 +++++++++++++++++--------------------
57
tcg/sparc64/tcg-target.c.inc | 117 +++++++++++++++++++++------------
58
tcg/tcg-ldst.c.inc | 1 +
59
tcg/tci/tcg-target.c.inc | 116 ++++++++++++++++++++++++++++++---
60
16 files changed, 786 insertions(+), 498 deletions(-)
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
2
the first address past the last byte. This avoids overflow
3
when the last page of the address space is involved.
4
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1528
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
3
---
9
include/exec/cpu-all.h | 2 +-
4
tcg/tcg.c | 4 +---
10
accel/tcg/user-exec.c | 16 +++++++---------
5
tcg/i386/tcg-target.c.inc | 8 +++-----
11
bsd-user/mmap.c | 6 +++---
6
2 files changed, 4 insertions(+), 8 deletions(-)
12
linux-user/elfload.c | 11 ++++++-----
13
linux-user/mmap.c | 16 ++++++++--------
14
linux-user/syscall.c | 4 ++--
15
6 files changed, 27 insertions(+), 28 deletions(-)
16
7
17
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
8
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-all.h
10
--- a/tcg/tcg.c
20
+++ b/include/exec/cpu-all.h
11
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef int (*walk_memory_regions_fn)(void *, target_ulong,
12
@@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
22
int walk_memory_regions(void *, walk_memory_regions_fn);
13
{
23
14
TCGTemp *ts;
24
int page_get_flags(target_ulong address);
15
25
-void page_set_flags(target_ulong start, target_ulong end, int flags);
16
- if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
26
+void page_set_flags(target_ulong start, target_ulong last, int flags);
17
- tcg_abort();
27
void page_reset_target_data(target_ulong start, target_ulong end);
18
- }
28
int page_check_range(target_ulong start, target_ulong len, int flags);
19
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
29
20
30
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
21
ts = tcg_global_alloc(s);
22
ts->base_type = type;
23
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
32
--- a/accel/tcg/user-exec.c
25
--- a/tcg/i386/tcg-target.c.inc
33
+++ b/accel/tcg/user-exec.c
26
+++ b/tcg/i386/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
27
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
35
* The flag PAGE_WRITE_ORG is positioned automatically depending
36
* on PAGE_WRITE. The mmap_lock should already be held.
37
*/
38
-void page_set_flags(target_ulong start, target_ulong end, int flags)
39
+void page_set_flags(target_ulong start, target_ulong last, int flags)
40
{
41
- target_ulong last;
42
bool reset = false;
43
bool inval_tb = false;
44
45
/* This function should never be called with addresses outside the
46
guest address space. If this assert fires, it probably indicates
47
a missing call to h2g_valid. */
48
- assert(start < end);
49
- assert(end - 1 <= GUEST_ADDR_MAX);
50
+ assert(start <= last);
51
+ assert(last <= GUEST_ADDR_MAX);
52
/* Only set PAGE_ANON with new mappings. */
53
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
54
assert_memory_lock();
55
56
- start = start & TARGET_PAGE_MASK;
57
- end = TARGET_PAGE_ALIGN(end);
58
- last = end - 1;
59
+ start &= TARGET_PAGE_MASK;
60
+ last |= ~TARGET_PAGE_MASK;
61
62
if (!(flags & PAGE_VALID)) {
63
flags = 0;
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
}
66
67
if (!flags || reset) {
68
- page_reset_target_data(start, end);
69
+ page_reset_target_data(start, last + 1);
70
inval_tb |= pageflags_unset(start, last);
71
}
72
if (flags) {
73
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
74
~(reset ? 0 : PAGE_STICKY));
75
}
76
if (inval_tb) {
77
- tb_invalidate_phys_range(start, end);
78
+ tb_invalidate_phys_range(start, last + 1);
79
}
28
}
80
}
29
}
81
30
82
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
31
-/* Use SMALL != 0 to force a short forward branch. */
83
index XXXXXXX..XXXXXXX 100644
32
-static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
84
--- a/bsd-user/mmap.c
33
+/* Set SMALL to force a short forward branch. */
85
+++ b/bsd-user/mmap.c
34
+static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
86
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
35
{
87
if (ret != 0)
36
int32_t val, val1;
88
goto error;
37
89
}
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, int small)
90
- page_set_flags(start, start + len, prot | PAGE_VALID);
39
}
91
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
40
tcg_out8(s, val1);
92
mmap_unlock();
41
} else {
93
return 0;
42
- if (small) {
94
error:
43
- tcg_abort();
95
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
44
- }
96
}
45
+ tcg_debug_assert(!small);
97
}
46
if (opc == -1) {
98
the_end1:
47
tcg_out8(s, OPC_JMP_long);
99
- page_set_flags(start, start + len, prot | PAGE_VALID);
48
tcg_out32(s, val - 5);
100
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
101
the_end:
102
#ifdef DEBUG_MMAP
103
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
104
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
105
}
106
107
if (ret == 0) {
108
- page_set_flags(start, start + len, 0);
109
+ page_set_flags(start, start + len - 1, 0);
110
}
111
mmap_unlock();
112
return ret;
113
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/linux-user/elfload.c
116
+++ b/linux-user/elfload.c
117
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
118
exit(EXIT_FAILURE);
119
}
120
page_set_flags(TARGET_VSYSCALL_PAGE,
121
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
122
+ TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
123
PAGE_EXEC | PAGE_VALID);
124
return true;
125
}
126
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
127
exit(EXIT_FAILURE);
128
}
129
130
- page_set_flags(commpage, commpage + qemu_host_page_size,
131
+ page_set_flags(commpage, commpage | ~qemu_host_page_mask,
132
PAGE_READ | PAGE_EXEC | PAGE_VALID);
133
return true;
134
}
135
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
136
exit(EXIT_FAILURE);
137
}
138
139
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
140
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
141
PAGE_READ | PAGE_EXEC | PAGE_VALID);
142
return true;
143
}
144
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
145
* and implement syscalls. Here, simply mark the page executable.
146
* Special case the entry points during translation (see do_page_zero).
147
*/
148
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
149
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
150
PAGE_EXEC | PAGE_VALID);
151
return true;
152
}
153
@@ -XXX,XX +XXX,XX @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
154
155
/* Ensure that the bss page(s) are valid */
156
if ((page_get_flags(last_bss-1) & prot) != prot) {
157
- page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
158
+ page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
159
+ prot | PAGE_VALID);
160
}
161
162
if (host_start < host_map_start) {
163
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/linux-user/mmap.c
166
+++ b/linux-user/mmap.c
167
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
168
}
169
}
170
171
- page_set_flags(start, start + len, page_flags);
172
+ page_set_flags(start, start + len - 1, page_flags);
173
ret = 0;
174
175
error:
176
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
177
}
178
page_flags |= PAGE_RESET;
179
if (passthrough_start == passthrough_end) {
180
- page_set_flags(start, start + len, page_flags);
181
+ page_set_flags(start, start + len - 1, page_flags);
182
} else {
183
if (start < passthrough_start) {
184
- page_set_flags(start, passthrough_start, page_flags);
185
+ page_set_flags(start, passthrough_start - 1, page_flags);
186
}
187
- page_set_flags(passthrough_start, passthrough_end,
188
+ page_set_flags(passthrough_start, passthrough_end - 1,
189
page_flags | PAGE_PASSTHROUGH);
190
if (passthrough_end < start + len) {
191
- page_set_flags(passthrough_end, start + len, page_flags);
192
+ page_set_flags(passthrough_end, start + len - 1, page_flags);
193
}
194
}
195
the_end:
196
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
197
}
198
199
if (ret == 0) {
200
- page_set_flags(start, start + len, 0);
201
+ page_set_flags(start, start + len - 1, 0);
202
}
203
mmap_unlock();
204
return ret;
205
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
206
} else {
207
new_addr = h2g(host_addr);
208
prot = page_get_flags(old_addr);
209
- page_set_flags(old_addr, old_addr + old_size, 0);
210
- page_set_flags(new_addr, new_addr + new_size,
211
+ page_set_flags(old_addr, old_addr + old_size - 1, 0);
212
+ page_set_flags(new_addr, new_addr + new_size - 1,
213
prot | PAGE_VALID | PAGE_RESET);
214
}
215
mmap_unlock();
216
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/linux-user/syscall.c
219
+++ b/linux-user/syscall.c
220
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
221
}
222
raddr=h2g((unsigned long)host_raddr);
223
224
- page_set_flags(raddr, raddr + shm_info.shm_segsz,
225
+ page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
226
PAGE_VALID | PAGE_RESET | PAGE_READ |
227
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
228
229
@@ -XXX,XX +XXX,XX @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
230
for (i = 0; i < N_SHM_REGIONS; ++i) {
231
if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
232
shm_regions[i].in_use = false;
233
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
234
+ page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
235
break;
236
}
237
}
238
--
49
--
239
2.34.1
50
2.34.1
240
51
241
52
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg.h | 6 ------
5
target/i386/tcg/translate.c | 20 ++++++++++----------
6
target/s390x/tcg/translate.c | 4 ++--
7
tcg/optimize.c | 10 ++++------
8
tcg/tcg.c | 8 ++++----
9
tcg/aarch64/tcg-target.c.inc | 4 ++--
10
tcg/arm/tcg-target.c.inc | 2 +-
11
tcg/i386/tcg-target.c.inc | 14 +++++++-------
12
tcg/mips/tcg-target.c.inc | 14 +++++++-------
13
tcg/ppc/tcg-target.c.inc | 8 ++++----
14
tcg/s390x/tcg-target.c.inc | 8 ++++----
15
tcg/sparc64/tcg-target.c.inc | 2 +-
16
tcg/tci/tcg-target.c.inc | 2 +-
17
13 files changed, 47 insertions(+), 55 deletions(-)
1
18
19
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/tcg/tcg.h
22
+++ b/include/tcg/tcg.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
24
const char *args_ct_str[TCG_MAX_OP_ARGS];
25
} TCGTargetOpDef;
26
27
-#define tcg_abort() \
28
-do {\
29
- fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
30
- abort();\
31
-} while (0)
32
-
33
bool tcg_op_supported(TCGOpcode op);
34
35
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
36
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/i386/tcg/translate.c
39
+++ b/target/i386/tcg/translate.c
40
@@ -XXX,XX +XXX,XX @@ static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest,
41
break;
42
#endif
43
default:
44
- tcg_abort();
45
+ g_assert_not_reached();
46
}
47
return cpu_regs[reg];
48
}
49
@@ -XXX,XX +XXX,XX @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
50
}
51
break;
52
default:
53
- tcg_abort();
54
+ g_assert_not_reached();
55
}
56
57
if (ovr_seg >= 0) {
58
@@ -XXX,XX +XXX,XX @@ static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
59
gen_helper_inl(v, cpu_env, n);
60
break;
61
default:
62
- tcg_abort();
63
+ g_assert_not_reached();
64
}
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
68
gen_helper_outl(cpu_env, v, n);
69
break;
70
default:
71
- tcg_abort();
72
+ g_assert_not_reached();
73
}
74
}
75
76
@@ -XXX,XX +XXX,XX @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
77
break;
78
#endif
79
default:
80
- tcg_abort();
81
+ g_assert_not_reached();
82
}
83
} else {
84
switch (ot) {
85
@@ -XXX,XX +XXX,XX @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
86
break;
87
#endif
88
default:
89
- tcg_abort();
90
+ g_assert_not_reached();
91
}
92
}
93
/* store */
94
@@ -XXX,XX +XXX,XX @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
95
break;
96
97
default:
98
- tcg_abort();
99
+ g_assert_not_reached();
100
}
101
102
done:
103
@@ -XXX,XX +XXX,XX @@ static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
104
ret = x86_ldl_code(env, s);
105
break;
106
default:
107
- tcg_abort();
108
+ g_assert_not_reached();
109
}
110
return ret;
111
}
112
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
113
gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
114
break;
115
default:
116
- tcg_abort();
117
+ g_assert_not_reached();
118
}
119
break;
120
case 0x99: /* CDQ/CWD */
121
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
122
gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
123
break;
124
default:
125
- tcg_abort();
126
+ g_assert_not_reached();
127
}
128
break;
129
case 0x1af: /* imul Gv, Ev */
130
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/target/s390x/tcg/translate.c
133
+++ b/target/s390x/tcg/translate.c
134
@@ -XXX,XX +XXX,XX @@ static int get_mem_index(DisasContext *s)
135
case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
136
return MMU_HOME_IDX;
137
default:
138
- tcg_abort();
139
+ g_assert_not_reached();
140
break;
141
}
142
#endif
143
@@ -XXX,XX +XXX,XX @@ static void gen_op_calc_cc(DisasContext *s)
144
gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
145
break;
146
default:
147
- tcg_abort();
148
+ g_assert_not_reached();
149
}
150
151
/* We now have cc in cc_op as constant */
152
diff --git a/tcg/optimize.c b/tcg/optimize.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/optimize.c
155
+++ b/tcg/optimize.c
156
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
157
return (uint64_t)x % ((uint64_t)y ? : 1);
158
159
default:
160
- fprintf(stderr,
161
- "Unrecognized operation %d in do_constant_folding.\n", op);
162
- tcg_abort();
163
+ g_assert_not_reached();
164
}
165
}
166
167
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
168
case TCG_COND_GTU:
169
return x > y;
170
default:
171
- tcg_abort();
172
+ g_assert_not_reached();
173
}
174
}
175
176
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
177
case TCG_COND_GTU:
178
return x > y;
179
default:
180
- tcg_abort();
181
+ g_assert_not_reached();
182
}
183
}
184
185
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
186
case TCG_COND_EQ:
187
return 1;
188
default:
189
- tcg_abort();
190
+ g_assert_not_reached();
191
}
192
}
193
194
diff --git a/tcg/tcg.c b/tcg/tcg.c
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/tcg.c
197
+++ b/tcg/tcg.c
198
@@ -XXX,XX +XXX,XX @@ static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
199
200
case TEMP_VAL_DEAD:
201
default:
202
- tcg_abort();
203
+ g_assert_not_reached();
204
}
205
ts->mem_coherent = 1;
206
}
207
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
208
}
209
}
210
211
- tcg_abort();
212
+ g_assert_not_reached();
213
}
214
215
static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
216
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
217
}
218
}
219
}
220
- tcg_abort();
221
+ g_assert_not_reached();
222
}
223
224
/* Make sure the temporary is in a register. If needed, allocate the register
225
@@ -XXX,XX +XXX,XX @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
226
break;
227
case TEMP_VAL_DEAD:
228
default:
229
- tcg_abort();
230
+ g_assert_not_reached();
231
}
232
set_temp_val_reg(s, ts, reg);
233
}
234
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
235
index XXXXXXX..XXXXXXX 100644
236
--- a/tcg/aarch64/tcg-target.c.inc
237
+++ b/tcg/aarch64/tcg-target.c.inc
238
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
239
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
240
break;
241
default:
242
- tcg_abort();
243
+ g_assert_not_reached();
244
}
245
}
246
247
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
248
tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
249
break;
250
default:
251
- tcg_abort();
252
+ g_assert_not_reached();
253
}
254
}
255
256
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
257
index XXXXXXX..XXXXXXX 100644
258
--- a/tcg/arm/tcg-target.c.inc
259
+++ b/tcg/arm/tcg-target.c.inc
260
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
261
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
262
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
263
default:
264
- tcg_abort();
265
+ g_assert_not_reached();
266
}
267
}
268
269
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
270
index XXXXXXX..XXXXXXX 100644
271
--- a/tcg/i386/tcg-target.c.inc
272
+++ b/tcg/i386/tcg-target.c.inc
273
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
274
tcg_patch8(code_ptr, value);
275
break;
276
default:
277
- tcg_abort();
278
+ g_assert_not_reached();
279
}
280
return true;
281
}
282
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
283
tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
284
tcg_out32(s, val);
285
} else {
286
- tcg_abort();
287
+ g_assert_not_reached();
288
}
289
}
290
291
@@ -XXX,XX +XXX,XX @@ static void tgen_arithi(TCGContext *s, int c, int r0,
292
return;
293
}
294
295
- tcg_abort();
296
+ g_assert_not_reached();
297
}
298
299
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
300
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
301
label_this, small);
302
break;
303
default:
304
- tcg_abort();
305
+ g_assert_not_reached();
306
}
307
tcg_out_label(s, label_next);
308
}
309
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
310
}
311
break;
312
default:
313
- tcg_abort();
314
+ g_assert_not_reached();
315
}
316
317
/* Jump to the code corresponding to next IR of qemu_st */
318
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
319
/* load bits 0..15 */
320
tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
321
} else {
322
- tcg_abort();
323
+ g_assert_not_reached();
324
}
325
break;
326
327
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
328
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
329
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
330
default:
331
- tcg_abort();
332
+ g_assert_not_reached();
333
}
334
335
#undef OP_32_64
336
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
337
index XXXXXXX..XXXXXXX 100644
338
--- a/tcg/mips/tcg-target.c.inc
339
+++ b/tcg/mips/tcg-target.c.inc
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
341
break;
342
343
default:
344
- tcg_abort();
345
+ g_assert_not_reached();
346
break;
347
}
348
}
349
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
350
break;
351
352
default:
353
- tcg_abort();
354
+ g_assert_not_reached();
355
break;
356
}
357
358
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
359
}
360
break;
361
default:
362
- tcg_abort();
363
+ g_assert_not_reached();
364
}
365
i = tcg_out_call_iarg_imm(s, i, oi);
366
367
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
368
}
369
break;
370
default:
371
- tcg_abort();
372
+ g_assert_not_reached();
373
}
374
}
375
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
377
break;
378
379
default:
380
- tcg_abort();
381
+ g_assert_not_reached();
382
}
383
}
384
385
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
386
break;
387
388
default:
389
- tcg_abort();
390
+ g_assert_not_reached();
391
}
392
}
393
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
394
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
395
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
396
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
397
default:
398
- tcg_abort();
399
+ g_assert_not_reached();
400
}
401
}
402
403
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
404
index XXXXXXX..XXXXXXX 100644
405
--- a/tcg/ppc/tcg-target.c.inc
406
+++ b/tcg/ppc/tcg-target.c.inc
407
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
408
break;
409
410
default:
411
- tcg_abort();
412
+ g_assert_not_reached();
413
}
414
op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
415
416
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
417
break;
418
419
default:
420
- tcg_abort();
421
+ g_assert_not_reached();
422
}
423
}
424
425
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
426
break;
427
428
default:
429
- tcg_abort();
430
+ g_assert_not_reached();
431
}
432
}
433
434
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
435
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
436
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
437
default:
438
- tcg_abort();
439
+ g_assert_not_reached();
440
}
441
}
442
443
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
444
index XXXXXXX..XXXXXXX 100644
445
--- a/tcg/s390x/tcg-target.c.inc
446
+++ b/tcg/s390x/tcg-target.c.inc
447
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
448
break;
449
450
default:
451
- tcg_abort();
452
+ g_assert_not_reached();
453
}
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
457
break;
458
459
default:
460
- tcg_abort();
461
+ g_assert_not_reached();
462
}
463
}
464
465
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
466
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
467
break;
468
default:
469
- tcg_abort();
470
+ g_assert_not_reached();
471
}
472
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
473
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
474
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
475
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
476
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
477
default:
478
- tcg_abort();
479
+ g_assert_not_reached();
480
}
481
}
482
483
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
484
index XXXXXXX..XXXXXXX 100644
485
--- a/tcg/sparc64/tcg-target.c.inc
486
+++ b/tcg/sparc64/tcg-target.c.inc
487
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
488
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
489
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
490
default:
491
- tcg_abort();
492
+ g_assert_not_reached();
493
}
494
}
495
496
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
497
index XXXXXXX..XXXXXXX 100644
498
--- a/tcg/tci/tcg-target.c.inc
499
+++ b/tcg/tci/tcg-target.c.inc
500
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
501
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
502
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
503
default:
504
- tcg_abort();
505
+ g_assert_not_reached();
506
}
507
}
508
509
--
510
2.34.1
511
512
diff view generated by jsdifflib
New patch
1
We will need a backend interface for performing 8-bit sign-extend.
2
Use it in tcg_reg_alloc_op in the meantime.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 21 ++++++++++++++++-----
8
tcg/aarch64/tcg-target.c.inc | 11 +++++++----
9
tcg/arm/tcg-target.c.inc | 10 ++++------
10
tcg/i386/tcg-target.c.inc | 10 +++++-----
11
tcg/loongarch64/tcg-target.c.inc | 11 ++++-------
12
tcg/mips/tcg-target.c.inc | 12 ++++++++----
13
tcg/ppc/tcg-target.c.inc | 10 ++++------
14
tcg/riscv/tcg-target.c.inc | 9 +++------
15
tcg/s390x/tcg-target.c.inc | 10 +++-------
16
tcg/sparc64/tcg-target.c.inc | 7 +++++++
17
tcg/tci/tcg-target.c.inc | 21 ++++++++++++++++++++-
18
11 files changed, 81 insertions(+), 51 deletions(-)
19
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
25
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
26
static void tcg_out_movi(TCGContext *s, TCGType type,
27
TCGReg ret, tcg_target_long arg);
28
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
31
static void tcg_out_goto_tb(TCGContext *s, int which);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
33
}
34
35
/* emit instruction */
36
- if (def->flags & TCG_OPF_VECTOR) {
37
- tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
38
- new_args, const_args);
39
- } else {
40
- tcg_out_op(s, op->opc, new_args, const_args);
41
+ switch (op->opc) {
42
+ case INDEX_op_ext8s_i32:
43
+ tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
44
+ break;
45
+ case INDEX_op_ext8s_i64:
46
+ tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
47
+ break;
48
+ default:
49
+ if (def->flags & TCG_OPF_VECTOR) {
50
+ tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
51
+ new_args, const_args);
52
+ } else {
53
+ tcg_out_op(s, op->opc, new_args, const_args);
54
+ }
55
+ break;
56
}
57
58
/* move the outputs in the correct register if needed */
59
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
60
index XXXXXXX..XXXXXXX 100644
61
--- a/tcg/aarch64/tcg-target.c.inc
62
+++ b/tcg/aarch64/tcg-target.c.inc
63
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
64
tcg_out_sbfm(s, ext, rd, rn, 0, bits);
65
}
66
67
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
68
+{
69
+ tcg_out_sxt(s, type, MO_8, rd, rn);
70
+}
71
+
72
static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
73
TCGReg rd, TCGReg rn)
74
{
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
76
}
77
break;
78
79
- case INDEX_op_ext8s_i64:
80
- case INDEX_op_ext8s_i32:
81
- tcg_out_sxt(s, ext, MO_8, a0, a1);
82
- break;
83
case INDEX_op_ext16s_i64:
84
case INDEX_op_ext16s_i32:
85
tcg_out_sxt(s, ext, MO_16, a0, a1);
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
case INDEX_op_call: /* Always emitted via tcg_out_call. */
88
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
89
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
90
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
91
+ case INDEX_op_ext8s_i64:
92
default:
93
g_assert_not_reached();
94
}
95
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/arm/tcg-target.c.inc
98
+++ b/tcg/arm/tcg-target.c.inc
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_udiv(TCGContext *s, ARMCond cond,
100
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
101
}
102
103
-static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
104
+static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
105
{
106
/* sxtb */
107
- tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
108
+ tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
109
}
110
111
static void __attribute__((unused))
112
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
113
datahi = lb->datahi_reg;
114
switch (opc & MO_SSIZE) {
115
case MO_SB:
116
- tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
117
+ tcg_out_ext8s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
118
break;
119
case MO_SW:
120
tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
121
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
122
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
123
break;
124
125
- case INDEX_op_ext8s_i32:
126
- tcg_out_ext8s(s, COND_AL, args[0], args[1]);
127
- break;
128
case INDEX_op_ext16s_i32:
129
tcg_out_ext16s(s, COND_AL, args[0], args[1]);
130
break;
131
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
132
case INDEX_op_call: /* Always emitted via tcg_out_call. */
133
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
134
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
135
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
136
default:
137
g_assert_not_reached();
138
}
139
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/i386/tcg-target.c.inc
142
+++ b/tcg/i386/tcg-target.c.inc
143
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
144
tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
145
}
146
147
-static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
148
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
149
{
150
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
151
/* movsbl */
152
tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
153
tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
154
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
155
data_reg = l->datalo_reg;
156
switch (opc & MO_SSIZE) {
157
case MO_SB:
158
- tcg_out_ext8s(s, data_reg, TCG_REG_EAX, rexw);
159
+ tcg_out_ext8s(s, l->type, data_reg, TCG_REG_EAX);
160
break;
161
case MO_SW:
162
tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw);
163
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
164
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
165
break;
166
167
- OP_32_64(ext8s):
168
- tcg_out_ext8s(s, a0, a1, rexw);
169
- break;
170
OP_32_64(ext16s):
171
tcg_out_ext16s(s, a0, a1, rexw);
172
break;
173
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
174
case INDEX_op_call: /* Always emitted via tcg_out_call. */
175
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
176
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
177
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
178
+ case INDEX_op_ext8s_i64:
179
default:
180
g_assert_not_reached();
181
}
182
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/loongarch64/tcg-target.c.inc
185
+++ b/tcg/loongarch64/tcg-target.c.inc
186
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
187
tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
188
}
189
190
-static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
191
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
192
{
193
tcg_out_opc_sext_b(s, ret, arg);
194
}
195
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
196
197
switch (opc & MO_SSIZE) {
198
case MO_SB:
199
- tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0);
200
+ tcg_out_ext8s(s, type, l->datalo_reg, TCG_REG_A0);
201
break;
202
case MO_SW:
203
tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
205
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
206
break;
207
208
- case INDEX_op_ext8s_i32:
209
- case INDEX_op_ext8s_i64:
210
- tcg_out_ext8s(s, a0, a1);
211
- break;
212
-
213
case INDEX_op_ext8u_i32:
214
case INDEX_op_ext8u_i64:
215
tcg_out_ext8u(s, a0, a1);
216
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
217
case INDEX_op_call: /* Always emitted via tcg_out_call. */
218
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
219
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
220
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
221
+ case INDEX_op_ext8s_i64:
222
default:
223
g_assert_not_reached();
224
}
225
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
226
index XXXXXXX..XXXXXXX 100644
227
--- a/tcg/mips/tcg-target.c.inc
228
+++ b/tcg/mips/tcg-target.c.inc
229
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
230
}
231
}
232
233
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
234
+{
235
+ tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
236
+ tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs);
237
+}
238
+
239
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
240
tcg_target_long imm)
241
{
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
243
case INDEX_op_not_i64:
244
i1 = OPC_NOR;
245
goto do_unary;
246
- case INDEX_op_ext8s_i32:
247
- case INDEX_op_ext8s_i64:
248
- i1 = OPC_SEB;
249
- goto do_unary;
250
case INDEX_op_ext16s_i32:
251
case INDEX_op_ext16s_i64:
252
i1 = OPC_SEH;
253
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
254
case INDEX_op_call: /* Always emitted via tcg_out_call. */
255
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
256
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
257
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
258
+ case INDEX_op_ext8s_i64:
259
default:
260
g_assert_not_reached();
261
}
262
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/ppc/tcg-target.c.inc
265
+++ b/tcg/ppc/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
267
tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
268
}
269
270
-static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src)
271
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
272
{
273
tcg_out32(s, EXTSB | RA(dst) | RS(src));
274
}
275
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
276
case INDEX_op_ld8s_i32:
277
case INDEX_op_ld8s_i64:
278
tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
279
- tcg_out_ext8s(s, args[0], args[0]);
280
+ tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
281
break;
282
case INDEX_op_ld16u_i32:
283
case INDEX_op_ld16u_i64:
284
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
285
tcg_out_qemu_st(s, args, true);
286
break;
287
288
- case INDEX_op_ext8s_i32:
289
- case INDEX_op_ext8s_i64:
290
- tcg_out_ext8s(s, args[0], args[1]);
291
- break;
292
case INDEX_op_ext16s_i32:
293
case INDEX_op_ext16s_i64:
294
tcg_out_ext16s(s, args[0], args[1]);
295
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
296
case INDEX_op_call: /* Always emitted via tcg_out_call. */
297
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
298
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
299
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
300
+ case INDEX_op_ext8s_i64:
301
default:
302
g_assert_not_reached();
303
}
304
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
305
index XXXXXXX..XXXXXXX 100644
306
--- a/tcg/riscv/tcg-target.c.inc
307
+++ b/tcg/riscv/tcg-target.c.inc
308
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
309
tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
310
}
311
312
-static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
313
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
314
{
315
tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
316
tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
317
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
318
tcg_out_ext32u(s, a0, a1);
319
break;
320
321
- case INDEX_op_ext8s_i32:
322
- case INDEX_op_ext8s_i64:
323
- tcg_out_ext8s(s, a0, a1);
324
- break;
325
-
326
case INDEX_op_ext16s_i32:
327
case INDEX_op_ext16s_i64:
328
tcg_out_ext16s(s, a0, a1);
329
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
330
case INDEX_op_call: /* Always emitted via tcg_out_call. */
331
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
332
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
333
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
334
+ case INDEX_op_ext8s_i64:
335
default:
336
g_assert_not_reached();
337
}
338
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
339
index XXXXXXX..XXXXXXX 100644
340
--- a/tcg/s390x/tcg-target.c.inc
341
+++ b/tcg/s390x/tcg-target.c.inc
342
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
343
tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
344
}
345
346
-static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
347
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
348
{
349
tcg_out_insn(s, RRE, LGBR, dest, src);
350
}
351
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
352
}
353
break;
354
355
- case INDEX_op_ext8s_i32:
356
- tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
357
- break;
358
case INDEX_op_ext16s_i32:
359
tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
360
break;
361
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
362
}
363
break;
364
365
- case INDEX_op_ext8s_i64:
366
- tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
367
- break;
368
case INDEX_op_ext16s_i64:
369
tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
370
break;
371
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
372
case INDEX_op_call: /* Always emitted via tcg_out_call. */
373
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
374
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
375
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
376
+ case INDEX_op_ext8s_i64:
377
default:
378
g_assert_not_reached();
379
}
380
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
381
index XXXXXXX..XXXXXXX 100644
382
--- a/tcg/sparc64/tcg-target.c.inc
383
+++ b/tcg/sparc64/tcg-target.c.inc
384
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
385
tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
386
}
387
388
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
389
+{
390
+ g_assert_not_reached();
391
+}
392
+
393
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
394
tcg_target_long imm)
395
{
396
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
397
case INDEX_op_call: /* Always emitted via tcg_out_call. */
398
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
399
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
400
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
401
+ case INDEX_op_ext8s_i64:
402
default:
403
g_assert_not_reached();
404
}
405
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
406
index XXXXXXX..XXXXXXX 100644
407
--- a/tcg/tci/tcg-target.c.inc
408
+++ b/tcg/tci/tcg-target.c.inc
409
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
410
}
411
}
412
413
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
414
+{
415
+ switch (type) {
416
+ case TCG_TYPE_I32:
417
+ tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
418
+ tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs);
419
+ break;
420
+#if TCG_TARGET_REG_BITS == 64
421
+ case TCG_TYPE_I64:
422
+ tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64);
423
+ tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs);
424
+ break;
425
+#endif
426
+ default:
427
+ g_assert_not_reached();
428
+ }
429
+}
430
+
431
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
432
tcg_target_long imm)
433
{
434
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
435
436
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
437
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
438
- CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
439
CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
440
CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
441
CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
442
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
443
case INDEX_op_call: /* Always emitted via tcg_out_call. */
444
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
445
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
446
+ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
447
+ case INDEX_op_ext8s_i64:
448
default:
449
g_assert_not_reached();
450
}
451
--
452
2.34.1
453
454
diff view generated by jsdifflib
New patch
1
We will need a backend interface for performing 8-bit zero-extend.
2
Use it in tcg_reg_alloc_op in the meantime.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 5 +++++
8
tcg/aarch64/tcg-target.c.inc | 11 +++++++----
9
tcg/arm/tcg-target.c.inc | 12 +++++++++---
10
tcg/i386/tcg-target.c.inc | 7 +++----
11
tcg/loongarch64/tcg-target.c.inc | 7 ++-----
12
tcg/mips/tcg-target.c.inc | 9 ++++++++-
13
tcg/ppc/tcg-target.c.inc | 7 +++++++
14
tcg/riscv/tcg-target.c.inc | 7 ++-----
15
tcg/s390x/tcg-target.c.inc | 14 +++++---------
16
tcg/sparc64/tcg-target.c.inc | 9 ++++++++-
17
tcg/tci/tcg-target.c.inc | 14 +++++++++++++-
18
11 files changed, 69 insertions(+), 33 deletions(-)
19
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
25
static void tcg_out_movi(TCGContext *s, TCGType type,
26
TCGReg ret, tcg_target_long arg);
27
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
28
+static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
31
static void tcg_out_goto_tb(TCGContext *s, int which);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
33
case INDEX_op_ext8s_i64:
34
tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
35
break;
36
+ case INDEX_op_ext8u_i32:
37
+ case INDEX_op_ext8u_i64:
38
+ tcg_out_ext8u(s, new_args[0], new_args[1]);
39
+ break;
40
default:
41
if (def->flags & TCG_OPF_VECTOR) {
42
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
48
tcg_out_ubfm(s, 0, rd, rn, 0, bits);
49
}
50
51
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
52
+{
53
+ tcg_out_uxt(s, MO_8, rd, rn);
54
+}
55
+
56
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
57
TCGReg rn, int64_t aimm)
58
{
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
60
case INDEX_op_ext32s_i64:
61
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
62
break;
63
- case INDEX_op_ext8u_i64:
64
- case INDEX_op_ext8u_i32:
65
- tcg_out_uxt(s, MO_8, a0, a1);
66
- break;
67
case INDEX_op_ext16u_i64:
68
case INDEX_op_ext16u_i32:
69
tcg_out_uxt(s, MO_16, a0, a1);
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
71
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
72
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
73
case INDEX_op_ext8s_i64:
74
+ case INDEX_op_ext8u_i32:
75
+ case INDEX_op_ext8u_i64:
76
default:
77
g_assert_not_reached();
78
}
79
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
80
index XXXXXXX..XXXXXXX 100644
81
--- a/tcg/arm/tcg-target.c.inc
82
+++ b/tcg/arm/tcg-target.c.inc
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
84
tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
85
}
86
87
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
88
+{
89
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
90
+}
91
+
92
static void __attribute__((unused))
93
-tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
94
+tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
95
{
96
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
97
}
98
@@ -XXX,XX +XXX,XX @@ static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
99
100
DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
101
(tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
102
-DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
103
- (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
104
+DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond,
105
+ (tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
106
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
107
(tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
108
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
109
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
110
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
111
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
112
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
113
+ case INDEX_op_ext8u_i32:
114
default:
115
g_assert_not_reached();
116
}
117
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
118
index XXXXXXX..XXXXXXX 100644
119
--- a/tcg/i386/tcg-target.c.inc
120
+++ b/tcg/i386/tcg-target.c.inc
121
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rolw_8(TCGContext *s, int reg)
122
tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
123
}
124
125
-static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
126
+static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
127
{
128
/* movzbl */
129
tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
130
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
131
OP_32_64(ext16s):
132
tcg_out_ext16s(s, a0, a1, rexw);
133
break;
134
- OP_32_64(ext8u):
135
- tcg_out_ext8u(s, a0, a1);
136
- break;
137
OP_32_64(ext16u):
138
tcg_out_ext16u(s, a0, a1);
139
break;
140
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
141
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
142
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
143
case INDEX_op_ext8s_i64:
144
+ case INDEX_op_ext8u_i32:
145
+ case INDEX_op_ext8u_i64:
146
default:
147
g_assert_not_reached();
148
}
149
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
150
index XXXXXXX..XXXXXXX 100644
151
--- a/tcg/loongarch64/tcg-target.c.inc
152
+++ b/tcg/loongarch64/tcg-target.c.inc
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
154
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
155
break;
156
157
- case INDEX_op_ext8u_i32:
158
- case INDEX_op_ext8u_i64:
159
- tcg_out_ext8u(s, a0, a1);
160
- break;
161
-
162
case INDEX_op_ext16s_i32:
163
case INDEX_op_ext16s_i64:
164
tcg_out_ext16s(s, a0, a1);
165
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
166
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
167
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
168
case INDEX_op_ext8s_i64:
169
+ case INDEX_op_ext8u_i32:
170
+ case INDEX_op_ext8u_i64:
171
default:
172
g_assert_not_reached();
173
}
174
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/mips/tcg-target.c.inc
177
+++ b/tcg/mips/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
179
tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs);
180
}
181
182
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
183
+{
184
+ tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xff);
185
+}
186
+
187
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
188
tcg_target_long imm)
189
{
190
@@ -XXX,XX +XXX,XX @@ static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
191
if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
192
tmp = tcg_target_call_iarg_regs[i];
193
}
194
- tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
195
+ tcg_out_ext8u(s, tmp, arg);
196
return tcg_out_call_iarg_reg(s, i, tmp);
197
}
198
199
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
200
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
201
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
202
case INDEX_op_ext8s_i64:
203
+ case INDEX_op_ext8u_i32:
204
+ case INDEX_op_ext8u_i64:
205
default:
206
g_assert_not_reached();
207
}
208
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tcg/ppc/tcg-target.c.inc
211
+++ b/tcg/ppc/tcg-target.c.inc
212
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
213
tcg_out32(s, EXTSB | RA(dst) | RS(src));
214
}
215
216
+static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
217
+{
218
+ tcg_out32(s, ANDI | SAI(src, dst, 0xff));
219
+}
220
+
221
static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
222
{
223
tcg_out32(s, EXTSH | RA(dst) | RS(src));
224
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
225
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
226
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
227
case INDEX_op_ext8s_i64:
228
+ case INDEX_op_ext8u_i32:
229
+ case INDEX_op_ext8u_i64:
230
default:
231
g_assert_not_reached();
232
}
233
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/riscv/tcg-target.c.inc
236
+++ b/tcg/riscv/tcg-target.c.inc
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
238
tcg_out_qemu_st(s, args, true);
239
break;
240
241
- case INDEX_op_ext8u_i32:
242
- case INDEX_op_ext8u_i64:
243
- tcg_out_ext8u(s, a0, a1);
244
- break;
245
-
246
case INDEX_op_ext16u_i32:
247
case INDEX_op_ext16u_i64:
248
tcg_out_ext16u(s, a0, a1);
249
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
250
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
251
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
252
case INDEX_op_ext8s_i64:
253
+ case INDEX_op_ext8u_i32:
254
+ case INDEX_op_ext8u_i64:
255
default:
256
g_assert_not_reached();
257
}
258
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
259
index XXXXXXX..XXXXXXX 100644
260
--- a/tcg/s390x/tcg-target.c.inc
261
+++ b/tcg/s390x/tcg-target.c.inc
262
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
263
tcg_out_insn(s, RRE, LGBR, dest, src);
264
}
265
266
-static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
267
+static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
268
{
269
tcg_out_insn(s, RRE, LLGCR, dest, src);
270
}
271
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
272
return;
273
}
274
if ((val & valid) == 0xff) {
275
- tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
276
+ tcg_out_ext8u(s, dest, dest);
277
return;
278
}
279
if ((val & valid) == 0xffff) {
280
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
281
}
282
switch (opc & MO_SIZE) {
283
case MO_UB:
284
- tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
285
+ tcg_out_ext8u(s, TCG_REG_R4, data_reg);
286
break;
287
case MO_UW:
288
tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
289
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
290
case INDEX_op_ext16s_i32:
291
tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
292
break;
293
- case INDEX_op_ext8u_i32:
294
- tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
295
- break;
296
case INDEX_op_ext16u_i32:
297
tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
298
break;
299
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
300
case INDEX_op_ext32s_i64:
301
tgen_ext32s(s, args[0], args[1]);
302
break;
303
- case INDEX_op_ext8u_i64:
304
- tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
305
- break;
306
case INDEX_op_ext16u_i64:
307
tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
308
break;
309
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
310
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
311
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
312
case INDEX_op_ext8s_i64:
313
+ case INDEX_op_ext8u_i32:
314
+ case INDEX_op_ext8u_i64:
315
default:
316
g_assert_not_reached();
317
}
318
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
319
index XXXXXXX..XXXXXXX 100644
320
--- a/tcg/sparc64/tcg-target.c.inc
321
+++ b/tcg/sparc64/tcg-target.c.inc
322
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
323
g_assert_not_reached();
324
}
325
326
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
327
+{
328
+ tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
329
+}
330
+
331
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
332
tcg_target_long imm)
333
{
334
@@ -XXX,XX +XXX,XX @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
335
*/
336
switch (op & MO_SIZE) {
337
case MO_8:
338
- tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
339
+ tcg_out_ext8u(s, r, r);
340
break;
341
case MO_16:
342
tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
343
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
344
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
345
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
346
case INDEX_op_ext8s_i64:
347
+ case INDEX_op_ext8u_i32:
348
+ case INDEX_op_ext8u_i64:
349
default:
350
g_assert_not_reached();
351
}
352
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/tci/tcg-target.c.inc
355
+++ b/tcg/tci/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
357
}
358
}
359
360
+static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
361
+{
362
+ if (TCG_TARGET_REG_BITS == 64) {
363
+ tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64);
364
+ tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs);
365
+ } else {
366
+ tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32);
367
+ tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs);
368
+ }
369
+}
370
+
371
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
372
tcg_target_long imm)
373
{
374
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
375
376
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
377
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
378
- CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
379
CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
380
CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
381
CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
382
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
383
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
384
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
385
case INDEX_op_ext8s_i64:
386
+ case INDEX_op_ext8u_i32:
387
+ case INDEX_op_ext8u_i64:
388
default:
389
g_assert_not_reached();
390
}
391
--
392
2.34.1
393
394
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
1
We will need a backend interface for performing 16-bit sign-extend.
2
Use it in tcg_reg_alloc_op in the meantime.
2
3
3
The only reason to add this implementation is to control the memory allocator
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
used. Some users (e.g. TCG) cannot work reliably in multi-threaded
5
environments (e.g. forking in user-mode) with GTree's allocator, GSlice.
6
See https://gitlab.com/qemu-project/qemu/-/issues/285 for details.
7
8
Importing GTree is a temporary workaround until GTree migrates away
9
from GSlice.
10
11
This implementation is identical to that in glib v2.75.0, except that
12
we don't import recent additions to the API nor deprecated API calls,
13
none of which are used in QEMU.
14
15
I've imported tests from glib and added a benchmark just to
16
make sure that performance is similar. Note: it cannot be identical
17
because (1) we are not using GSlice, (2) we use different compilation flags
18
(e.g. -fPIC) and (3) we're linking statically.
19
20
$ cat /proc/cpuinfo| grep 'model name' | head -1
21
model name : AMD Ryzen 7 PRO 5850U with Radeon Graphics
22
$ echo '0' | sudo tee /sys/devices/system/cpu/cpufreq/boost
23
$ tests/bench/qtree-bench
24
25
Tree Op 32 1024 4096 131072 1048576
26
------------------------------------------------------------------------------------------------
27
GTree Lookup 83.23 43.08 25.31 19.40 16.22
28
QTree Lookup 113.42 (1.36x) 53.83 (1.25x) 28.38 (1.12x) 17.64 (0.91x) 13.04 (0.80x)
29
GTree Insert 44.23 29.37 25.83 19.49 17.03
30
QTree Insert 46.87 (1.06x) 25.62 (0.87x) 24.29 (0.94x) 16.83 (0.86x) 12.97 (0.76x)
31
GTree Remove 53.27 35.15 31.43 24.64 16.70
32
QTree Remove 57.32 (1.08x) 41.76 (1.19x) 38.37 (1.22x) 29.30 (1.19x) 15.07 (0.90x)
33
GTree RemoveAll 135.44 127.52 126.72 120.11 64.34
34
QTree RemoveAll 127.15 (0.94x) 110.37 (0.87x) 107.97 (0.85x) 97.13 (0.81x) 55.10 (0.86x)
35
GTree Traverse 277.71 276.09 272.78 246.72 98.47
36
QTree Traverse 370.33 (1.33x) 411.97 (1.49x) 400.23 (1.47x) 262.82 (1.07x) 78.52 (0.80x)
37
------------------------------------------------------------------------------------------------
38
39
As a sanity check, the same benchmark when Glib's version
40
is >= $glib_dropped_gslice_version (i.e. QTree == GTree):
41
42
Tree Op 32 1024 4096 131072 1048576
43
------------------------------------------------------------------------------------------------
44
GTree Lookup 82.72 43.09 24.18 19.73 16.09
45
QTree Lookup 81.82 (0.99x) 43.10 (1.00x) 24.20 (1.00x) 19.76 (1.00x) 16.26 (1.01x)
46
GTree Insert 45.07 29.62 26.34 19.90 17.18
47
QTree Insert 45.72 (1.01x) 29.60 (1.00x) 26.38 (1.00x) 19.71 (0.99x) 17.20 (1.00x)
48
GTree Remove 54.48 35.36 31.77 24.97 16.95
49
QTree Remove 54.46 (1.00x) 35.32 (1.00x) 31.77 (1.00x) 24.91 (1.00x) 17.15 (1.01x)
50
GTree RemoveAll 140.68 127.36 125.43 121.45 68.20
51
QTree RemoveAll 140.65 (1.00x) 127.64 (1.00x) 125.01 (1.00x) 121.73 (1.00x) 67.06 (0.98x)
52
GTree Traverse 278.68 276.05 266.75 251.65 104.93
53
QTree Traverse 278.31 (1.00x) 275.78 (1.00x) 266.42 (1.00x) 247.89 (0.99x) 104.58 (1.00x)
54
------------------------------------------------------------------------------------------------
55
56
Signed-off-by: Emilio Cota <cota@braap.org>
57
Message-Id: <20230205163758.416992-2-cota@braap.org>
58
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
59
---
6
---
60
configure | 15 +
7
tcg/tcg.c | 7 +++++++
61
meson.build | 4 +
8
tcg/aarch64/tcg-target.c.inc | 13 ++++++++-----
62
include/qemu/qtree.h | 201 ++++++
9
tcg/arm/tcg-target.c.inc | 10 ++++------
63
tests/bench/qtree-bench.c | 286 ++++++++
10
tcg/i386/tcg-target.c.inc | 16 ++++++++--------
64
tests/unit/test-qtree.c | 333 +++++++++
11
tcg/loongarch64/tcg-target.c.inc | 13 +++++--------
65
util/qtree.c | 1390 +++++++++++++++++++++++++++++++++++++
12
tcg/mips/tcg-target.c.inc | 11 ++++++++---
66
tests/bench/meson.build | 4 +
13
tcg/ppc/tcg-target.c.inc | 12 +++++-------
67
tests/unit/meson.build | 1 +
14
tcg/riscv/tcg-target.c.inc | 9 +++------
68
util/meson.build | 1 +
15
tcg/s390x/tcg-target.c.inc | 12 ++++--------
69
9 files changed, 2235 insertions(+)
16
tcg/sparc64/tcg-target.c.inc | 7 +++++++
70
create mode 100644 include/qemu/qtree.h
17
tcg/tci/tcg-target.c.inc | 21 ++++++++++++++++++++-
71
create mode 100644 tests/bench/qtree-bench.c
18
11 files changed, 79 insertions(+), 52 deletions(-)
72
create mode 100644 tests/unit/test-qtree.c
73
create mode 100644 util/qtree.c
74
19
75
diff --git a/configure b/configure
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
76
index XXXXXXX..XXXXXXX 100755
21
index XXXXXXX..XXXXXXX 100644
77
--- a/configure
22
--- a/tcg/tcg.c
78
+++ b/configure
23
+++ b/tcg/tcg.c
79
@@ -XXX,XX +XXX,XX @@ safe_stack=""
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
80
use_containers="yes"
25
static void tcg_out_movi(TCGContext *s, TCGType type,
81
gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb")
26
TCGReg ret, tcg_target_long arg);
82
gdb_arches=""
27
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
83
+glib_has_gslice="no"
28
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
84
29
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
85
if test -e "$source_path/.git"
30
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
86
then
31
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
87
@@ -XXX,XX +XXX,XX @@ for i in $glib_modules; do
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
88
fi
33
case INDEX_op_ext8u_i64:
89
done
34
tcg_out_ext8u(s, new_args[0], new_args[1]);
90
35
break;
91
+# Check whether glib has gslice, which we have to avoid for correctness.
36
+ case INDEX_op_ext16s_i32:
92
+# TODO: remove this check and the corresponding workaround (qtree) when
37
+ tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
93
+# the minimum supported glib is >= $glib_dropped_gslice_version.
38
+ break;
94
+glib_dropped_gslice_version=2.75.3
39
+ case INDEX_op_ext16s_i64:
95
+for i in $glib_modules; do
40
+ tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
96
+ if ! $pkg_config --atleast-version=$glib_dropped_gslice_version $i; then
41
+ break;
97
+ glib_has_gslice="yes"
42
default:
98
+    break
43
if (def->flags & TCG_OPF_VECTOR) {
99
+ fi
44
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
100
+done
45
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
101
+
46
index XXXXXXX..XXXXXXX 100644
102
glib_bindir="$($pkg_config --variable=bindir glib-2.0)"
47
--- a/tcg/aarch64/tcg-target.c.inc
103
if test -z "$glib_bindir" ; then
48
+++ b/tcg/aarch64/tcg-target.c.inc
104
    glib_bindir="$($pkg_config --variable=prefix glib-2.0)"/bin
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
105
@@ -XXX,XX +XXX,XX @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
50
tcg_out_sxt(s, type, MO_8, rd, rn);
106
echo "GLIB_LIBS=$glib_libs" >> $config_host_mak
51
}
107
echo "GLIB_BINDIR=$glib_bindir" >> $config_host_mak
52
108
echo "GLIB_VERSION=$($pkg_config --modversion glib-2.0)" >> $config_host_mak
53
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
109
+if test "$glib_has_gslice" = "yes" ; then
110
+ echo "HAVE_GLIB_WITH_SLICE_ALLOCATOR=y" >> $config_host_mak
111
+fi
112
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
113
echo "EXESUF=$EXESUF" >> $config_host_mak
114
115
diff --git a/meson.build b/meson.build
116
index XXXXXXX..XXXXXXX 100644
117
--- a/meson.build
118
+++ b/meson.build
119
@@ -XXX,XX +XXX,XX @@ glib = declare_dependency(compile_args: config_host['GLIB_CFLAGS'].split(),
120
})
121
# override glib dep with the configure results (for subprojects)
122
meson.override_dependency('glib-2.0', glib)
123
+# pass down whether Glib has the slice allocator
124
+if config_host.has_key('HAVE_GLIB_WITH_SLICE_ALLOCATOR')
125
+ config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', true)
126
+endif
127
128
gio = not_found
129
gdbus_codegen = not_found
130
diff --git a/include/qemu/qtree.h b/include/qemu/qtree.h
131
new file mode 100644
132
index XXXXXXX..XXXXXXX
133
--- /dev/null
134
+++ b/include/qemu/qtree.h
135
@@ -XXX,XX +XXX,XX @@
136
+/*
137
+ * GLIB - Library of useful routines for C programming
138
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
139
+ *
140
+ * SPDX-License-Identifier: LGPL-2.1-or-later
141
+ *
142
+ * This library is free software; you can redistribute it and/or
143
+ * modify it under the terms of the GNU Lesser General Public
144
+ * License as published by the Free Software Foundation; either
145
+ * version 2.1 of the License, or (at your option) any later version.
146
+ *
147
+ * This library is distributed in the hope that it will be useful,
148
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
149
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
150
+ * Lesser General Public License for more details.
151
+ *
152
+ * You should have received a copy of the GNU Lesser General Public
153
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
154
+ */
155
+
156
+/*
157
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
158
+ * file for a list of people on the GLib Team. See the ChangeLog
159
+ * files for a list of changes. These files are distributed with
160
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
161
+ */
162
+
163
+/*
164
+ * QTree is a partial import of Glib's GTree. The parts excluded correspond
165
+ * to API calls either deprecated (e.g. g_tree_traverse) or recently added
166
+ * (e.g. g_tree_search_node, added in 2.68); neither have callers in QEMU.
167
+ *
168
+ * The reason for this import is to allow us to control the memory allocator
169
+ * used by the tree implementation. Until Glib 2.75.3, GTree uses Glib's
170
+ * slice allocator, which causes problems when forking in user-mode;
171
+ * see https://gitlab.com/qemu-project/qemu/-/issues/285 and glib's
172
+ * "45b5a6c1e gslice: Remove slice allocator and use malloc() instead".
173
+ *
174
+ * TODO: remove QTree when QEMU's minimum Glib version is >= 2.75.3.
175
+ */
176
+
177
+#ifndef QEMU_QTREE_H
178
+#define QEMU_QTREE_H
179
+
180
+#include "qemu/osdep.h"
181
+
182
+#ifdef HAVE_GLIB_WITH_SLICE_ALLOCATOR
183
+
184
+typedef struct _QTree QTree;
185
+
186
+typedef struct _QTreeNode QTreeNode;
187
+
188
+typedef gboolean (*QTraverseNodeFunc)(QTreeNode *node,
189
+ gpointer user_data);
190
+
191
+/*
192
+ * Balanced binary trees
193
+ */
194
+QTree *q_tree_new(GCompareFunc key_compare_func);
195
+QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
196
+ gpointer key_compare_data);
197
+QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
198
+ gpointer key_compare_data,
199
+ GDestroyNotify key_destroy_func,
200
+ GDestroyNotify value_destroy_func);
201
+QTree *q_tree_ref(QTree *tree);
202
+void q_tree_unref(QTree *tree);
203
+void q_tree_destroy(QTree *tree);
204
+void q_tree_insert(QTree *tree,
205
+ gpointer key,
206
+ gpointer value);
207
+void q_tree_replace(QTree *tree,
208
+ gpointer key,
209
+ gpointer value);
210
+gboolean q_tree_remove(QTree *tree,
211
+ gconstpointer key);
212
+gboolean q_tree_steal(QTree *tree,
213
+ gconstpointer key);
214
+gpointer q_tree_lookup(QTree *tree,
215
+ gconstpointer key);
216
+gboolean q_tree_lookup_extended(QTree *tree,
217
+ gconstpointer lookup_key,
218
+ gpointer *orig_key,
219
+ gpointer *value);
220
+void q_tree_foreach(QTree *tree,
221
+ GTraverseFunc func,
222
+ gpointer user_data);
223
+gpointer q_tree_search(QTree *tree,
224
+ GCompareFunc search_func,
225
+ gconstpointer user_data);
226
+gint q_tree_height(QTree *tree);
227
+gint q_tree_nnodes(QTree *tree);
228
+
229
+#else /* !HAVE_GLIB_WITH_SLICE_ALLOCATOR */
230
+
231
+typedef GTree QTree;
232
+typedef GTreeNode QTreeNode;
233
+typedef GTraverseNodeFunc QTraverseNodeFunc;
234
+
235
+static inline QTree *q_tree_new(GCompareFunc key_compare_func)
236
+{
54
+{
237
+ return g_tree_new(key_compare_func);
55
+ tcg_out_sxt(s, type, MO_16, rd, rn);
238
+}
56
+}
239
+
57
+
240
+static inline QTree *q_tree_new_with_data(GCompareDataFunc key_compare_func,
58
static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
241
+ gpointer key_compare_data)
59
TCGReg rd, TCGReg rn)
60
{
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
62
tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
63
if (a2 & TCG_BSWAP_OS) {
64
/* Output must be sign-extended. */
65
- tcg_out_sxt(s, ext, MO_16, a0, a0);
66
+ tcg_out_ext16s(s, ext, a0, a0);
67
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
68
/* Output must be zero-extended, but input isn't. */
69
tcg_out_uxt(s, MO_16, a0, a0);
70
}
71
break;
72
73
- case INDEX_op_ext16s_i64:
74
- case INDEX_op_ext16s_i32:
75
- tcg_out_sxt(s, ext, MO_16, a0, a1);
76
- break;
77
case INDEX_op_ext_i32_i64:
78
case INDEX_op_ext32s_i64:
79
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
81
case INDEX_op_ext8s_i64:
82
case INDEX_op_ext8u_i32:
83
case INDEX_op_ext8u_i64:
84
+ case INDEX_op_ext16s_i64:
85
+ case INDEX_op_ext16s_i32:
86
default:
87
g_assert_not_reached();
88
}
89
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/arm/tcg-target.c.inc
92
+++ b/tcg/arm/tcg-target.c.inc
93
@@ -XXX,XX +XXX,XX @@ tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
94
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
95
}
96
97
-static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
98
+static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
99
{
100
/* sxth */
101
- tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
102
+ tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
103
}
104
105
static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
106
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
107
tcg_out_ext8s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
108
break;
109
case MO_SW:
110
- tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
111
+ tcg_out_ext16s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
112
break;
113
default:
114
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
116
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
117
break;
118
119
- case INDEX_op_ext16s_i32:
120
- tcg_out_ext16s(s, COND_AL, args[0], args[1]);
121
- break;
122
case INDEX_op_ext16u_i32:
123
tcg_out_ext16u(s, COND_AL, args[0], args[1]);
124
break;
125
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
126
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
127
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
128
case INDEX_op_ext8u_i32:
129
+ case INDEX_op_ext16s_i32:
130
default:
131
g_assert_not_reached();
132
}
133
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/i386/tcg-target.c.inc
136
+++ b/tcg/i386/tcg-target.c.inc
137
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
138
tcg_out_modrm(s, OPC_MOVZWL, dest, src);
139
}
140
141
-static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
142
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
143
{
144
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
145
/* movsw[lq] */
146
tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
147
}
148
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
149
MemOp opc = get_memop(oi);
150
TCGReg data_reg;
151
tcg_insn_unit **label_ptr = &l->label_ptr[0];
152
- int rexw = (l->type == TCG_TYPE_I64 ? P_REXW : 0);
153
154
/* resolve label address */
155
tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
156
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
157
tcg_out_ext8s(s, l->type, data_reg, TCG_REG_EAX);
158
break;
159
case MO_SW:
160
- tcg_out_ext16s(s, data_reg, TCG_REG_EAX, rexw);
161
+ tcg_out_ext16s(s, l->type, data_reg, TCG_REG_EAX);
162
break;
163
#if TCG_TARGET_REG_BITS == 64
164
case MO_SL:
165
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
166
TCGReg base, int index, intptr_t ofs,
167
int seg, bool is64, MemOp memop)
168
{
169
+ TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
170
bool use_movbe = false;
171
int rexw = is64 * P_REXW;
172
int movop = OPC_MOVL_GvEv;
173
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
174
if (use_movbe) {
175
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
176
datalo, base, index, 0, ofs);
177
- tcg_out_ext16s(s, datalo, datalo, rexw);
178
+ tcg_out_ext16s(s, type, datalo, datalo);
179
} else {
180
tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
181
datalo, base, index, 0, ofs);
182
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
183
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
184
break;
185
186
- OP_32_64(ext16s):
187
- tcg_out_ext16s(s, a0, a1, rexw);
188
- break;
189
OP_32_64(ext16u):
190
tcg_out_ext16u(s, a0, a1);
191
break;
192
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
193
if (a1 < 4 && a0 < 8) {
194
tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
195
} else {
196
- tcg_out_ext16s(s, a0, a1, 0);
197
+ tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
198
tcg_out_shifti(s, SHIFT_SAR, a0, 8);
199
}
200
break;
201
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
202
case INDEX_op_ext8s_i64:
203
case INDEX_op_ext8u_i32:
204
case INDEX_op_ext8u_i64:
205
+ case INDEX_op_ext16s_i32:
206
+ case INDEX_op_ext16s_i64:
207
default:
208
g_assert_not_reached();
209
}
210
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/loongarch64/tcg-target.c.inc
213
+++ b/tcg/loongarch64/tcg-target.c.inc
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
215
tcg_out_opc_sext_b(s, ret, arg);
216
}
217
218
-static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
219
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
220
{
221
tcg_out_opc_sext_h(s, ret, arg);
222
}
223
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
224
tcg_out_ext8s(s, type, l->datalo_reg, TCG_REG_A0);
225
break;
226
case MO_SW:
227
- tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
228
+ tcg_out_ext16s(s, type, l->datalo_reg, TCG_REG_A0);
229
break;
230
case MO_SL:
231
tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
232
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
233
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
234
break;
235
236
- case INDEX_op_ext16s_i32:
237
- case INDEX_op_ext16s_i64:
238
- tcg_out_ext16s(s, a0, a1);
239
- break;
240
-
241
case INDEX_op_ext16u_i32:
242
case INDEX_op_ext16u_i64:
243
tcg_out_ext16u(s, a0, a1);
244
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
245
case INDEX_op_bswap16_i64:
246
tcg_out_opc_revb_2h(s, a0, a1);
247
if (a2 & TCG_BSWAP_OS) {
248
- tcg_out_ext16s(s, a0, a0);
249
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
250
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
251
tcg_out_ext16u(s, a0, a0);
252
}
253
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
254
case INDEX_op_ext8s_i64:
255
case INDEX_op_ext8u_i32:
256
case INDEX_op_ext8u_i64:
257
+ case INDEX_op_ext16s_i32:
258
+ case INDEX_op_ext16s_i64:
259
default:
260
g_assert_not_reached();
261
}
262
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/mips/tcg-target.c.inc
265
+++ b/tcg/mips/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
267
tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xff);
268
}
269
270
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
242
+{
271
+{
243
+ return g_tree_new_with_data(key_compare_func, key_compare_data);
272
+ tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
273
+ tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs);
244
+}
274
+}
245
+
275
+
246
+static inline QTree *q_tree_new_full(GCompareDataFunc key_compare_func,
276
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
247
+ gpointer key_compare_data,
277
tcg_target_long imm)
248
+ GDestroyNotify key_destroy_func,
278
{
249
+ GDestroyNotify value_destroy_func)
279
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
280
case INDEX_op_not_i64:
281
i1 = OPC_NOR;
282
goto do_unary;
283
- case INDEX_op_ext16s_i32:
284
- case INDEX_op_ext16s_i64:
285
- i1 = OPC_SEH;
286
do_unary:
287
tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
288
break;
289
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
290
case INDEX_op_ext8s_i64:
291
case INDEX_op_ext8u_i32:
292
case INDEX_op_ext8u_i64:
293
+ case INDEX_op_ext16s_i32:
294
+ case INDEX_op_ext16s_i64:
295
default:
296
g_assert_not_reached();
297
}
298
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
299
index XXXXXXX..XXXXXXX 100644
300
--- a/tcg/ppc/tcg-target.c.inc
301
+++ b/tcg/ppc/tcg-target.c.inc
302
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
303
tcg_out32(s, ANDI | SAI(src, dst, 0xff));
304
}
305
306
-static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
307
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
308
{
309
tcg_out32(s, EXTSH | RA(dst) | RS(src));
310
}
311
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
312
if (have_isa_3_10) {
313
tcg_out32(s, BRH | RA(dst) | RS(src));
314
if (flags & TCG_BSWAP_OS) {
315
- tcg_out_ext16s(s, dst, dst);
316
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
317
} else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
318
tcg_out_ext16u(s, dst, dst);
319
}
320
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
321
tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
322
323
if (flags & TCG_BSWAP_OS) {
324
- tcg_out_ext16s(s, dst, tmp);
325
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
326
} else {
327
tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
328
}
329
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
330
tcg_out_qemu_st(s, args, true);
331
break;
332
333
- case INDEX_op_ext16s_i32:
334
- case INDEX_op_ext16s_i64:
335
- tcg_out_ext16s(s, args[0], args[1]);
336
- break;
337
case INDEX_op_ext_i32_i64:
338
case INDEX_op_ext32s_i64:
339
tcg_out_ext32s(s, args[0], args[1]);
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
341
case INDEX_op_ext8s_i64:
342
case INDEX_op_ext8u_i32:
343
case INDEX_op_ext8u_i64:
344
+ case INDEX_op_ext16s_i32:
345
+ case INDEX_op_ext16s_i64:
346
default:
347
g_assert_not_reached();
348
}
349
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
350
index XXXXXXX..XXXXXXX 100644
351
--- a/tcg/riscv/tcg-target.c.inc
352
+++ b/tcg/riscv/tcg-target.c.inc
353
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
354
tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
355
}
356
357
-static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
358
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
359
{
360
tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
361
tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
362
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
363
tcg_out_ext32u(s, a0, a1);
364
break;
365
366
- case INDEX_op_ext16s_i32:
367
- case INDEX_op_ext16s_i64:
368
- tcg_out_ext16s(s, a0, a1);
369
- break;
370
-
371
case INDEX_op_ext32s_i64:
372
case INDEX_op_extrl_i64_i32:
373
case INDEX_op_ext_i32_i64:
374
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
375
case INDEX_op_ext8s_i64:
376
case INDEX_op_ext8u_i32:
377
case INDEX_op_ext8u_i64:
378
+ case INDEX_op_ext16s_i32:
379
+ case INDEX_op_ext16s_i64:
380
default:
381
g_assert_not_reached();
382
}
383
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
384
index XXXXXXX..XXXXXXX 100644
385
--- a/tcg/s390x/tcg-target.c.inc
386
+++ b/tcg/s390x/tcg-target.c.inc
387
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
388
tcg_out_insn(s, RRE, LLGCR, dest, src);
389
}
390
391
-static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
392
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
393
{
394
tcg_out_insn(s, RRE, LGHR, dest, src);
395
}
396
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
397
case MO_SW | MO_BSWAP:
398
/* swapped sign-extended halfword load */
399
tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
400
- tgen_ext16s(s, TCG_TYPE_I64, data, data);
401
+ tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
402
break;
403
case MO_SW:
404
tcg_out_insn(s, RXY, LGH, data, base, index, disp);
405
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
406
}
407
break;
408
409
- case INDEX_op_ext16s_i32:
410
- tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
411
- break;
412
case INDEX_op_ext16u_i32:
413
tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
414
break;
415
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
416
}
417
break;
418
419
- case INDEX_op_ext16s_i64:
420
- tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
421
- break;
422
case INDEX_op_ext_i32_i64:
423
case INDEX_op_ext32s_i64:
424
tgen_ext32s(s, args[0], args[1]);
425
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
426
case INDEX_op_ext8s_i64:
427
case INDEX_op_ext8u_i32:
428
case INDEX_op_ext8u_i64:
429
+ case INDEX_op_ext16s_i32:
430
+ case INDEX_op_ext16s_i64:
431
default:
432
g_assert_not_reached();
433
}
434
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
435
index XXXXXXX..XXXXXXX 100644
436
--- a/tcg/sparc64/tcg-target.c.inc
437
+++ b/tcg/sparc64/tcg-target.c.inc
438
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
439
g_assert_not_reached();
440
}
441
442
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
250
+{
443
+{
251
+ return g_tree_new_full(key_compare_func, key_compare_data,
444
+ g_assert_not_reached();
252
+ key_destroy_func, value_destroy_func);
253
+}
445
+}
254
+
446
+
255
+static inline QTree *q_tree_ref(QTree *tree)
447
static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
448
{
449
tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
450
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
451
case INDEX_op_ext8s_i64:
452
case INDEX_op_ext8u_i32:
453
case INDEX_op_ext8u_i64:
454
+ case INDEX_op_ext16s_i32:
455
+ case INDEX_op_ext16s_i64:
456
default:
457
g_assert_not_reached();
458
}
459
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
460
index XXXXXXX..XXXXXXX 100644
461
--- a/tcg/tci/tcg-target.c.inc
462
+++ b/tcg/tci/tcg-target.c.inc
463
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
464
}
465
}
466
467
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
256
+{
468
+{
257
+ return g_tree_ref(tree);
469
+ switch (type) {
258
+}
470
+ case TCG_TYPE_I32:
259
+
471
+ tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
260
+static inline void q_tree_unref(QTree *tree)
472
+ tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs);
261
+{
262
+ g_tree_unref(tree);
263
+}
264
+
265
+static inline void q_tree_destroy(QTree *tree)
266
+{
267
+ g_tree_destroy(tree);
268
+}
269
+
270
+static inline void q_tree_insert(QTree *tree,
271
+ gpointer key,
272
+ gpointer value)
273
+{
274
+ g_tree_insert(tree, key, value);
275
+}
276
+
277
+static inline void q_tree_replace(QTree *tree,
278
+ gpointer key,
279
+ gpointer value)
280
+{
281
+ g_tree_replace(tree, key, value);
282
+}
283
+
284
+static inline gboolean q_tree_remove(QTree *tree,
285
+ gconstpointer key)
286
+{
287
+ return g_tree_remove(tree, key);
288
+}
289
+
290
+static inline gboolean q_tree_steal(QTree *tree,
291
+ gconstpointer key)
292
+{
293
+ return g_tree_steal(tree, key);
294
+}
295
+
296
+static inline gpointer q_tree_lookup(QTree *tree,
297
+ gconstpointer key)
298
+{
299
+ return g_tree_lookup(tree, key);
300
+}
301
+
302
+static inline gboolean q_tree_lookup_extended(QTree *tree,
303
+ gconstpointer lookup_key,
304
+ gpointer *orig_key,
305
+ gpointer *value)
306
+{
307
+ return g_tree_lookup_extended(tree, lookup_key, orig_key, value);
308
+}
309
+
310
+static inline void q_tree_foreach(QTree *tree,
311
+ GTraverseFunc func,
312
+ gpointer user_data)
313
+{
314
+ return g_tree_foreach(tree, func, user_data);
315
+}
316
+
317
+static inline gpointer q_tree_search(QTree *tree,
318
+ GCompareFunc search_func,
319
+ gconstpointer user_data)
320
+{
321
+ return g_tree_search(tree, search_func, user_data);
322
+}
323
+
324
+static inline gint q_tree_height(QTree *tree)
325
+{
326
+ return g_tree_height(tree);
327
+}
328
+
329
+static inline gint q_tree_nnodes(QTree *tree)
330
+{
331
+ return g_tree_nnodes(tree);
332
+}
333
+
334
+#endif /* HAVE_GLIB_WITH_SLICE_ALLOCATOR */
335
+
336
+#endif /* QEMU_QTREE_H */
337
diff --git a/tests/bench/qtree-bench.c b/tests/bench/qtree-bench.c
338
new file mode 100644
339
index XXXXXXX..XXXXXXX
340
--- /dev/null
341
+++ b/tests/bench/qtree-bench.c
342
@@ -XXX,XX +XXX,XX @@
343
+/* SPDX-License-Identifier: GPL-2.0-or-later */
344
+#include "qemu/osdep.h"
345
+#include "qemu/qtree.h"
346
+#include "qemu/timer.h"
347
+
348
+enum tree_op {
349
+ OP_LOOKUP,
350
+ OP_INSERT,
351
+ OP_REMOVE,
352
+ OP_REMOVE_ALL,
353
+ OP_TRAVERSE,
354
+};
355
+
356
+struct benchmark {
357
+ const char * const name;
358
+ enum tree_op op;
359
+ bool fill_on_init;
360
+};
361
+
362
+enum impl_type {
363
+ IMPL_GTREE,
364
+ IMPL_QTREE,
365
+};
366
+
367
+struct tree_implementation {
368
+ const char * const name;
369
+ enum impl_type type;
370
+};
371
+
372
+static const struct benchmark benchmarks[] = {
373
+ {
374
+ .name = "Lookup",
375
+ .op = OP_LOOKUP,
376
+ .fill_on_init = true,
377
+ },
378
+ {
379
+ .name = "Insert",
380
+ .op = OP_INSERT,
381
+ .fill_on_init = false,
382
+ },
383
+ {
384
+ .name = "Remove",
385
+ .op = OP_REMOVE,
386
+ .fill_on_init = true,
387
+ },
388
+ {
389
+ .name = "RemoveAll",
390
+ .op = OP_REMOVE_ALL,
391
+ .fill_on_init = true,
392
+ },
393
+ {
394
+ .name = "Traverse",
395
+ .op = OP_TRAVERSE,
396
+ .fill_on_init = true,
397
+ },
398
+};
399
+
400
+static const struct tree_implementation impls[] = {
401
+ {
402
+ .name = "GTree",
403
+ .type = IMPL_GTREE,
404
+ },
405
+ {
406
+ .name = "QTree",
407
+ .type = IMPL_QTREE,
408
+ },
409
+};
410
+
411
+static int compare_func(const void *ap, const void *bp)
412
+{
413
+ const size_t *a = ap;
414
+ const size_t *b = bp;
415
+
416
+ return *a - *b;
417
+}
418
+
419
+static void init_empty_tree_and_keys(enum impl_type impl,
420
+ void **ret_tree, size_t **ret_keys,
421
+ size_t n_elems)
422
+{
423
+ size_t *keys = g_malloc_n(n_elems, sizeof(*keys));
424
+ for (size_t i = 0; i < n_elems; i++) {
425
+ keys[i] = i;
426
+ }
427
+
428
+ void *tree;
429
+ switch (impl) {
430
+ case IMPL_GTREE:
431
+ tree = g_tree_new(compare_func);
432
+ break;
473
+ break;
433
+ case IMPL_QTREE:
474
+#if TCG_TARGET_REG_BITS == 64
434
+ tree = q_tree_new(compare_func);
475
+ case TCG_TYPE_I64:
476
+ tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64);
477
+ tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs);
435
+ break;
478
+ break;
436
+ default:
479
+#endif
437
+ g_assert_not_reached();
438
+ }
439
+
440
+ *ret_tree = tree;
441
+ *ret_keys = keys;
442
+}
443
+
444
+static gboolean traverse_func(gpointer key, gpointer value, gpointer data)
445
+{
446
+ return FALSE;
447
+}
448
+
449
+static inline void remove_all(void *tree, enum impl_type impl)
450
+{
451
+ switch (impl) {
452
+ case IMPL_GTREE:
453
+ g_tree_destroy(tree);
454
+ break;
455
+ case IMPL_QTREE:
456
+ q_tree_destroy(tree);
457
+ break;
458
+ default:
480
+ default:
459
+ g_assert_not_reached();
481
+ g_assert_not_reached();
460
+ }
482
+ }
461
+}
483
+}
462
+
484
+
463
+static int64_t run_benchmark(const struct benchmark *bench,
485
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
464
+ enum impl_type impl,
486
tcg_target_long imm)
465
+ size_t n_elems)
487
{
466
+{
488
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
467
+ void *tree;
489
468
+ size_t *keys;
490
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
469
+
491
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
470
+ init_empty_tree_and_keys(impl, &tree, &keys, n_elems);
492
- CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
471
+ if (bench->fill_on_init) {
493
CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
472
+ for (size_t i = 0; i < n_elems; i++) {
494
CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
473
+ switch (impl) {
495
CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
474
+ case IMPL_GTREE:
496
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
475
+ g_tree_insert(tree, &keys[i], &keys[i]);
497
case INDEX_op_ext8s_i64:
476
+ break;
498
case INDEX_op_ext8u_i32:
477
+ case IMPL_QTREE:
499
case INDEX_op_ext8u_i64:
478
+ q_tree_insert(tree, &keys[i], &keys[i]);
500
+ case INDEX_op_ext16s_i32:
479
+ break;
501
+ case INDEX_op_ext16s_i64:
480
+ default:
502
default:
481
+ g_assert_not_reached();
503
g_assert_not_reached();
482
+ }
504
}
483
+ }
484
+ }
485
+
486
+ int64_t start_ns = get_clock();
487
+ switch (bench->op) {
488
+ case OP_LOOKUP:
489
+ for (size_t i = 0; i < n_elems; i++) {
490
+ void *value;
491
+ switch (impl) {
492
+ case IMPL_GTREE:
493
+ value = g_tree_lookup(tree, &keys[i]);
494
+ break;
495
+ case IMPL_QTREE:
496
+ value = q_tree_lookup(tree, &keys[i]);
497
+ break;
498
+ default:
499
+ g_assert_not_reached();
500
+ }
501
+ (void)value;
502
+ }
503
+ break;
504
+ case OP_INSERT:
505
+ for (size_t i = 0; i < n_elems; i++) {
506
+ switch (impl) {
507
+ case IMPL_GTREE:
508
+ g_tree_insert(tree, &keys[i], &keys[i]);
509
+ break;
510
+ case IMPL_QTREE:
511
+ q_tree_insert(tree, &keys[i], &keys[i]);
512
+ break;
513
+ default:
514
+ g_assert_not_reached();
515
+ }
516
+ }
517
+ break;
518
+ case OP_REMOVE:
519
+ for (size_t i = 0; i < n_elems; i++) {
520
+ switch (impl) {
521
+ case IMPL_GTREE:
522
+ g_tree_remove(tree, &keys[i]);
523
+ break;
524
+ case IMPL_QTREE:
525
+ q_tree_remove(tree, &keys[i]);
526
+ break;
527
+ default:
528
+ g_assert_not_reached();
529
+ }
530
+ }
531
+ break;
532
+ case OP_REMOVE_ALL:
533
+ remove_all(tree, impl);
534
+ break;
535
+ case OP_TRAVERSE:
536
+ switch (impl) {
537
+ case IMPL_GTREE:
538
+ g_tree_foreach(tree, traverse_func, NULL);
539
+ break;
540
+ case IMPL_QTREE:
541
+ q_tree_foreach(tree, traverse_func, NULL);
542
+ break;
543
+ default:
544
+ g_assert_not_reached();
545
+ }
546
+ break;
547
+ default:
548
+ g_assert_not_reached();
549
+ }
550
+ int64_t ns = get_clock() - start_ns;
551
+
552
+ if (bench->op != OP_REMOVE_ALL) {
553
+ remove_all(tree, impl);
554
+ }
555
+ g_free(keys);
556
+
557
+ return ns;
558
+}
559
+
560
+int main(int argc, char *argv[])
561
+{
562
+ size_t sizes[] = {
563
+ 32,
564
+ 1024,
565
+ 1024 * 4,
566
+ 1024 * 128,
567
+ 1024 * 1024,
568
+ };
569
+
570
+ double res[ARRAY_SIZE(benchmarks)][ARRAY_SIZE(impls)][ARRAY_SIZE(sizes)];
571
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
572
+ size_t size = sizes[i];
573
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
574
+ const struct tree_implementation *impl = &impls[j];
575
+ for (int k = 0; k < ARRAY_SIZE(benchmarks); k++) {
576
+ const struct benchmark *bench = &benchmarks[k];
577
+
578
+ /* warm-up run */
579
+ run_benchmark(bench, impl->type, size);
580
+
581
+ int64_t total_ns = 0;
582
+ int64_t n_runs = 0;
583
+ while (total_ns < 2e8 || n_runs < 5) {
584
+ total_ns += run_benchmark(bench, impl->type, size);
585
+ n_runs++;
586
+ }
587
+ double ns_per_run = (double)total_ns / n_runs;
588
+
589
+ /* Throughput, in Mops/s */
590
+ res[k][j][i] = size / ns_per_run * 1e3;
591
+ }
592
+ }
593
+ }
594
+
595
+ printf("# Results' breakdown: Tree, Op and #Elements. Units: Mops/s\n");
596
+ printf("%5s %10s ", "Tree", "Op");
597
+ for (int i = 0; i < ARRAY_SIZE(sizes); i++) {
598
+ printf("%7zu ", sizes[i]);
599
+ }
600
+ printf("\n");
601
+ char separator[97];
602
+ for (int i = 0; i < ARRAY_SIZE(separator) - 1; i++) {
603
+ separator[i] = '-';
604
+ }
605
+ separator[ARRAY_SIZE(separator) - 1] = '\0';
606
+ printf("%s\n", separator);
607
+ for (int i = 0; i < ARRAY_SIZE(benchmarks); i++) {
608
+ for (int j = 0; j < ARRAY_SIZE(impls); j++) {
609
+ printf("%5s %10s ", impls[j].name, benchmarks[i].name);
610
+ for (int k = 0; k < ARRAY_SIZE(sizes); k++) {
611
+ printf("%7.2f ", res[i][j][k]);
612
+ if (j == 0) {
613
+ printf(" ");
614
+ } else {
615
+ if (res[i][0][k] != 0) {
616
+ double speedup = res[i][j][k] / res[i][0][k];
617
+ printf("(%4.2fx) ", speedup);
618
+ } else {
619
+ printf("( ) ");
620
+ }
621
+ }
622
+ }
623
+ printf("\n");
624
+ }
625
+ }
626
+ printf("%s\n", separator);
627
+ return 0;
628
+}
629
diff --git a/tests/unit/test-qtree.c b/tests/unit/test-qtree.c
630
new file mode 100644
631
index XXXXXXX..XXXXXXX
632
--- /dev/null
633
+++ b/tests/unit/test-qtree.c
634
@@ -XXX,XX +XXX,XX @@
635
+/*
636
+ * SPDX-License-Identifier: LGPL-2.1-or-later
637
+ *
638
+ * Tests for QTree.
639
+ * Original source: glib
640
+ * https://gitlab.gnome.org/GNOME/glib/-/blob/main/glib/tests/tree.c
641
+ * LGPL license.
642
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
643
+ */
644
+
645
+#include "qemu/osdep.h"
646
+#include "qemu/qtree.h"
647
+
648
+static gint my_compare(gconstpointer a, gconstpointer b)
649
+{
650
+ const char *cha = a;
651
+ const char *chb = b;
652
+
653
+ return *cha - *chb;
654
+}
655
+
656
+static gint my_compare_with_data(gconstpointer a,
657
+ gconstpointer b,
658
+ gpointer user_data)
659
+{
660
+ const char *cha = a;
661
+ const char *chb = b;
662
+
663
+ /* just check that we got the right data */
664
+ g_assert(GPOINTER_TO_INT(user_data) == 123);
665
+
666
+ return *cha - *chb;
667
+}
668
+
669
+static gint my_search(gconstpointer a, gconstpointer b)
670
+{
671
+ return my_compare(b, a);
672
+}
673
+
674
+static gpointer destroyed_key;
675
+static gpointer destroyed_value;
676
+static guint destroyed_key_count;
677
+static guint destroyed_value_count;
678
+
679
+static void my_key_destroy(gpointer key)
680
+{
681
+ destroyed_key = key;
682
+ destroyed_key_count++;
683
+}
684
+
685
+static void my_value_destroy(gpointer value)
686
+{
687
+ destroyed_value = value;
688
+ destroyed_value_count++;
689
+}
690
+
691
+static gint my_traverse(gpointer key, gpointer value, gpointer data)
692
+{
693
+ char *ch = key;
694
+
695
+ g_assert((*ch) > 0);
696
+
697
+ if (*ch == 'd') {
698
+ return TRUE;
699
+ }
700
+
701
+ return FALSE;
702
+}
703
+
704
+char chars[] =
705
+ "0123456789"
706
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
707
+ "abcdefghijklmnopqrstuvwxyz";
708
+
709
+char chars2[] =
710
+ "0123456789"
711
+ "abcdefghijklmnopqrstuvwxyz";
712
+
713
+static gint check_order(gpointer key, gpointer value, gpointer data)
714
+{
715
+ char **p = data;
716
+ char *ch = key;
717
+
718
+ g_assert(**p == *ch);
719
+
720
+ (*p)++;
721
+
722
+ return FALSE;
723
+}
724
+
725
+static void test_tree_search(void)
726
+{
727
+ gint i;
728
+ QTree *tree;
729
+ gboolean removed;
730
+ gchar c;
731
+ gchar *p, *d;
732
+
733
+ tree = q_tree_new_with_data(my_compare_with_data, GINT_TO_POINTER(123));
734
+
735
+ for (i = 0; chars[i]; i++) {
736
+ q_tree_insert(tree, &chars[i], &chars[i]);
737
+ }
738
+
739
+ q_tree_foreach(tree, my_traverse, NULL);
740
+
741
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
742
+ g_assert(q_tree_height(tree) == 6);
743
+
744
+ p = chars;
745
+ q_tree_foreach(tree, check_order, &p);
746
+
747
+ for (i = 0; i < 26; i++) {
748
+ removed = q_tree_remove(tree, &chars[i + 10]);
749
+ g_assert(removed);
750
+ }
751
+
752
+ c = '\0';
753
+ removed = q_tree_remove(tree, &c);
754
+ g_assert(!removed);
755
+
756
+ q_tree_foreach(tree, my_traverse, NULL);
757
+
758
+ g_assert(q_tree_nnodes(tree) == strlen(chars2));
759
+ g_assert(q_tree_height(tree) == 6);
760
+
761
+ p = chars2;
762
+ q_tree_foreach(tree, check_order, &p);
763
+
764
+ for (i = 25; i >= 0; i--) {
765
+ q_tree_insert(tree, &chars[i + 10], &chars[i + 10]);
766
+ }
767
+
768
+ p = chars;
769
+ q_tree_foreach(tree, check_order, &p);
770
+
771
+ c = '0';
772
+ p = q_tree_lookup(tree, &c);
773
+ g_assert(p && *p == c);
774
+ g_assert(q_tree_lookup_extended(tree, &c, (gpointer *)&d, (gpointer *)&p));
775
+ g_assert(c == *d && c == *p);
776
+
777
+ c = 'A';
778
+ p = q_tree_lookup(tree, &c);
779
+ g_assert(p && *p == c);
780
+
781
+ c = 'a';
782
+ p = q_tree_lookup(tree, &c);
783
+ g_assert(p && *p == c);
784
+
785
+ c = 'z';
786
+ p = q_tree_lookup(tree, &c);
787
+ g_assert(p && *p == c);
788
+
789
+ c = '!';
790
+ p = q_tree_lookup(tree, &c);
791
+ g_assert(p == NULL);
792
+
793
+ c = '=';
794
+ p = q_tree_lookup(tree, &c);
795
+ g_assert(p == NULL);
796
+
797
+ c = '|';
798
+ p = q_tree_lookup(tree, &c);
799
+ g_assert(p == NULL);
800
+
801
+ c = '0';
802
+ p = q_tree_search(tree, my_search, &c);
803
+ g_assert(p && *p == c);
804
+
805
+ c = 'A';
806
+ p = q_tree_search(tree, my_search, &c);
807
+ g_assert(p && *p == c);
808
+
809
+ c = 'a';
810
+ p = q_tree_search(tree, my_search, &c);
811
+ g_assert(p && *p == c);
812
+
813
+ c = 'z';
814
+ p = q_tree_search(tree, my_search, &c);
815
+ g_assert(p && *p == c);
816
+
817
+ c = '!';
818
+ p = q_tree_search(tree, my_search, &c);
819
+ g_assert(p == NULL);
820
+
821
+ c = '=';
822
+ p = q_tree_search(tree, my_search, &c);
823
+ g_assert(p == NULL);
824
+
825
+ c = '|';
826
+ p = q_tree_search(tree, my_search, &c);
827
+ g_assert(p == NULL);
828
+
829
+ q_tree_destroy(tree);
830
+}
831
+
832
+static void test_tree_remove(void)
833
+{
834
+ QTree *tree;
835
+ char c, d;
836
+ gint i;
837
+ gboolean removed;
838
+
839
+ tree = q_tree_new_full((GCompareDataFunc)my_compare, NULL,
840
+ my_key_destroy,
841
+ my_value_destroy);
842
+
843
+ for (i = 0; chars[i]; i++) {
844
+ q_tree_insert(tree, &chars[i], &chars[i]);
845
+ }
846
+
847
+ c = '0';
848
+ q_tree_insert(tree, &c, &c);
849
+ g_assert(destroyed_key == &c);
850
+ g_assert(destroyed_value == &chars[0]);
851
+ destroyed_key = NULL;
852
+ destroyed_value = NULL;
853
+
854
+ d = '1';
855
+ q_tree_replace(tree, &d, &d);
856
+ g_assert(destroyed_key == &chars[1]);
857
+ g_assert(destroyed_value == &chars[1]);
858
+ destroyed_key = NULL;
859
+ destroyed_value = NULL;
860
+
861
+ c = '2';
862
+ removed = q_tree_remove(tree, &c);
863
+ g_assert(removed);
864
+ g_assert(destroyed_key == &chars[2]);
865
+ g_assert(destroyed_value == &chars[2]);
866
+ destroyed_key = NULL;
867
+ destroyed_value = NULL;
868
+
869
+ c = '3';
870
+ removed = q_tree_steal(tree, &c);
871
+ g_assert(removed);
872
+ g_assert(destroyed_key == NULL);
873
+ g_assert(destroyed_value == NULL);
874
+
875
+ const gchar *remove = "omkjigfedba";
876
+ for (i = 0; remove[i]; i++) {
877
+ removed = q_tree_remove(tree, &remove[i]);
878
+ g_assert(removed);
879
+ }
880
+
881
+ q_tree_destroy(tree);
882
+}
883
+
884
+static void test_tree_destroy(void)
885
+{
886
+ QTree *tree;
887
+ gint i;
888
+
889
+ tree = q_tree_new(my_compare);
890
+
891
+ for (i = 0; chars[i]; i++) {
892
+ q_tree_insert(tree, &chars[i], &chars[i]);
893
+ }
894
+
895
+ g_assert(q_tree_nnodes(tree) == strlen(chars));
896
+
897
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
898
+ q_tree_ref(tree);
899
+ q_tree_destroy(tree);
900
+
901
+ g_test_message("nnodes: %d", q_tree_nnodes(tree));
902
+ g_assert(q_tree_nnodes(tree) == 0);
903
+
904
+ q_tree_unref(tree);
905
+}
906
+
907
+static void test_tree_insert(void)
908
+{
909
+ QTree *tree;
910
+ gchar *p;
911
+ gint i;
912
+ gchar *scrambled;
913
+
914
+ tree = q_tree_new(my_compare);
915
+
916
+ for (i = 0; chars[i]; i++) {
917
+ q_tree_insert(tree, &chars[i], &chars[i]);
918
+ }
919
+ p = chars;
920
+ q_tree_foreach(tree, check_order, &p);
921
+
922
+ q_tree_unref(tree);
923
+ tree = q_tree_new(my_compare);
924
+
925
+ for (i = strlen(chars) - 1; i >= 0; i--) {
926
+ q_tree_insert(tree, &chars[i], &chars[i]);
927
+ }
928
+ p = chars;
929
+ q_tree_foreach(tree, check_order, &p);
930
+
931
+ q_tree_unref(tree);
932
+ tree = q_tree_new(my_compare);
933
+
934
+ scrambled = g_strdup(chars);
935
+
936
+ for (i = 0; i < 30; i++) {
937
+ gchar tmp;
938
+ gint a, b;
939
+
940
+ a = g_random_int_range(0, strlen(scrambled));
941
+ b = g_random_int_range(0, strlen(scrambled));
942
+ tmp = scrambled[a];
943
+ scrambled[a] = scrambled[b];
944
+ scrambled[b] = tmp;
945
+ }
946
+
947
+ for (i = 0; scrambled[i]; i++) {
948
+ q_tree_insert(tree, &scrambled[i], &scrambled[i]);
949
+ }
950
+ p = chars;
951
+ q_tree_foreach(tree, check_order, &p);
952
+
953
+ g_free(scrambled);
954
+ q_tree_unref(tree);
955
+}
956
+
957
+int main(int argc, char *argv[])
958
+{
959
+ g_test_init(&argc, &argv, NULL);
960
+
961
+ g_test_add_func("/qtree/search", test_tree_search);
962
+ g_test_add_func("/qtree/remove", test_tree_remove);
963
+ g_test_add_func("/qtree/destroy", test_tree_destroy);
964
+ g_test_add_func("/qtree/insert", test_tree_insert);
965
+
966
+ return g_test_run();
967
+}
968
diff --git a/util/qtree.c b/util/qtree.c
969
new file mode 100644
970
index XXXXXXX..XXXXXXX
971
--- /dev/null
972
+++ b/util/qtree.c
973
@@ -XXX,XX +XXX,XX @@
974
+/*
975
+ * GLIB - Library of useful routines for C programming
976
+ * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
977
+ *
978
+ * SPDX-License-Identifier: LGPL-2.1-or-later
979
+ *
980
+ * This library is free software; you can redistribute it and/or
981
+ * modify it under the terms of the GNU Lesser General Public
982
+ * License as published by the Free Software Foundation; either
983
+ * version 2.1 of the License, or (at your option) any later version.
984
+ *
985
+ * This library is distributed in the hope that it will be useful,
986
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
987
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
988
+ * Lesser General Public License for more details.
989
+ *
990
+ * You should have received a copy of the GNU Lesser General Public
991
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
992
+ */
993
+
994
+/*
995
+ * Modified by the GLib Team and others 1997-2000. See the AUTHORS
996
+ * file for a list of people on the GLib Team. See the ChangeLog
997
+ * files for a list of changes. These files are distributed with
998
+ * GLib at ftp://ftp.gtk.org/pub/gtk/.
999
+ */
1000
+
1001
+/*
1002
+ * MT safe
1003
+ */
1004
+
1005
+#include "qemu/osdep.h"
1006
+#include "qemu/qtree.h"
1007
+
1008
+/**
1009
+ * SECTION:trees-binary
1010
+ * @title: Balanced Binary Trees
1011
+ * @short_description: a sorted collection of key/value pairs optimized
1012
+ * for searching and traversing in order
1013
+ *
1014
+ * The #QTree structure and its associated functions provide a sorted
1015
+ * collection of key/value pairs optimized for searching and traversing
1016
+ * in order. This means that most of the operations (access, search,
1017
+ * insertion, deletion, ...) on #QTree are O(log(n)) in average and O(n)
1018
+ * in worst case for time complexity. But, note that maintaining a
1019
+ * balanced sorted #QTree of n elements is done in time O(n log(n)).
1020
+ *
1021
+ * To create a new #QTree use q_tree_new().
1022
+ *
1023
+ * To insert a key/value pair into a #QTree use q_tree_insert()
1024
+ * (O(n log(n))).
1025
+ *
1026
+ * To remove a key/value pair use q_tree_remove() (O(n log(n))).
1027
+ *
1028
+ * To look up the value corresponding to a given key, use
1029
+ * q_tree_lookup() and q_tree_lookup_extended().
1030
+ *
1031
+ * To find out the number of nodes in a #QTree, use q_tree_nnodes(). To
1032
+ * get the height of a #QTree, use q_tree_height().
1033
+ *
1034
+ * To traverse a #QTree, calling a function for each node visited in
1035
+ * the traversal, use q_tree_foreach().
1036
+ *
1037
+ * To destroy a #QTree, use q_tree_destroy().
1038
+ **/
1039
+
1040
+#define MAX_GTREE_HEIGHT 40
1041
+
1042
+/**
1043
+ * QTree:
1044
+ *
1045
+ * The QTree struct is an opaque data structure representing a
1046
+ * [balanced binary tree][glib-Balanced-Binary-Trees]. It should be
1047
+ * accessed only by using the following functions.
1048
+ */
1049
+struct _QTree {
1050
+ QTreeNode *root;
1051
+ GCompareDataFunc key_compare;
1052
+ GDestroyNotify key_destroy_func;
1053
+ GDestroyNotify value_destroy_func;
1054
+ gpointer key_compare_data;
1055
+ guint nnodes;
1056
+ gint ref_count;
1057
+};
1058
+
1059
+struct _QTreeNode {
1060
+ gpointer key; /* key for this node */
1061
+ gpointer value; /* value stored at this node */
1062
+ QTreeNode *left; /* left subtree */
1063
+ QTreeNode *right; /* right subtree */
1064
+ gint8 balance; /* height (right) - height (left) */
1065
+ guint8 left_child;
1066
+ guint8 right_child;
1067
+};
1068
+
1069
+
1070
+static QTreeNode *q_tree_node_new(gpointer key,
1071
+ gpointer value);
1072
+static QTreeNode *q_tree_insert_internal(QTree *tree,
1073
+ gpointer key,
1074
+ gpointer value,
1075
+ gboolean replace);
1076
+static gboolean q_tree_remove_internal(QTree *tree,
1077
+ gconstpointer key,
1078
+ gboolean steal);
1079
+static QTreeNode *q_tree_node_balance(QTreeNode *node);
1080
+static QTreeNode *q_tree_find_node(QTree *tree,
1081
+ gconstpointer key);
1082
+static QTreeNode *q_tree_node_search(QTreeNode *node,
1083
+ GCompareFunc search_func,
1084
+ gconstpointer data);
1085
+static QTreeNode *q_tree_node_rotate_left(QTreeNode *node);
1086
+static QTreeNode *q_tree_node_rotate_right(QTreeNode *node);
1087
+#ifdef Q_TREE_DEBUG
1088
+static void q_tree_node_check(QTreeNode *node);
1089
+#endif
1090
+
1091
+static QTreeNode*
1092
+q_tree_node_new(gpointer key,
1093
+ gpointer value)
1094
+{
1095
+ QTreeNode *node = g_new(QTreeNode, 1);
1096
+
1097
+ node->balance = 0;
1098
+ node->left = NULL;
1099
+ node->right = NULL;
1100
+ node->left_child = FALSE;
1101
+ node->right_child = FALSE;
1102
+ node->key = key;
1103
+ node->value = value;
1104
+
1105
+ return node;
1106
+}
1107
+
1108
+/**
1109
+ * q_tree_new:
1110
+ * @key_compare_func: the function used to order the nodes in the #QTree.
1111
+ * It should return values similar to the standard strcmp() function -
1112
+ * 0 if the two arguments are equal, a negative value if the first argument
1113
+ * comes before the second, or a positive value if the first argument comes
1114
+ * after the second.
1115
+ *
1116
+ * Creates a new #QTree.
1117
+ *
1118
+ * Returns: a newly allocated #QTree
1119
+ */
1120
+QTree *
1121
+q_tree_new(GCompareFunc key_compare_func)
1122
+{
1123
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1124
+
1125
+ return q_tree_new_full((GCompareDataFunc) key_compare_func, NULL,
1126
+ NULL, NULL);
1127
+}
1128
+
1129
+/**
1130
+ * q_tree_new_with_data:
1131
+ * @key_compare_func: qsort()-style comparison function
1132
+ * @key_compare_data: data to pass to comparison function
1133
+ *
1134
+ * Creates a new #QTree with a comparison function that accepts user data.
1135
+ * See q_tree_new() for more details.
1136
+ *
1137
+ * Returns: a newly allocated #QTree
1138
+ */
1139
+QTree *
1140
+q_tree_new_with_data(GCompareDataFunc key_compare_func,
1141
+ gpointer key_compare_data)
1142
+{
1143
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1144
+
1145
+ return q_tree_new_full(key_compare_func, key_compare_data,
1146
+ NULL, NULL);
1147
+}
1148
+
1149
+/**
1150
+ * q_tree_new_full:
1151
+ * @key_compare_func: qsort()-style comparison function
1152
+ * @key_compare_data: data to pass to comparison function
1153
+ * @key_destroy_func: a function to free the memory allocated for the key
1154
+ * used when removing the entry from the #QTree or %NULL if you don't
1155
+ * want to supply such a function
1156
+ * @value_destroy_func: a function to free the memory allocated for the
1157
+ * value used when removing the entry from the #QTree or %NULL if you
1158
+ * don't want to supply such a function
1159
+ *
1160
+ * Creates a new #QTree like q_tree_new() and allows to specify functions
1161
+ * to free the memory allocated for the key and value that get called when
1162
+ * removing the entry from the #QTree.
1163
+ *
1164
+ * Returns: a newly allocated #QTree
1165
+ */
1166
+QTree *
1167
+q_tree_new_full(GCompareDataFunc key_compare_func,
1168
+ gpointer key_compare_data,
1169
+ GDestroyNotify key_destroy_func,
1170
+ GDestroyNotify value_destroy_func)
1171
+{
1172
+ QTree *tree;
1173
+
1174
+ g_return_val_if_fail(key_compare_func != NULL, NULL);
1175
+
1176
+ tree = g_new(QTree, 1);
1177
+ tree->root = NULL;
1178
+ tree->key_compare = key_compare_func;
1179
+ tree->key_destroy_func = key_destroy_func;
1180
+ tree->value_destroy_func = value_destroy_func;
1181
+ tree->key_compare_data = key_compare_data;
1182
+ tree->nnodes = 0;
1183
+ tree->ref_count = 1;
1184
+
1185
+ return tree;
1186
+}
1187
+
1188
+/**
1189
+ * q_tree_node_first:
1190
+ * @tree: a #QTree
1191
+ *
1192
+ * Returns the first in-order node of the tree, or %NULL
1193
+ * for an empty tree.
1194
+ *
1195
+ * Returns: (nullable) (transfer none): the first node in the tree
1196
+ *
1197
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1198
+ */
1199
+static QTreeNode *
1200
+q_tree_node_first(QTree *tree)
1201
+{
1202
+ QTreeNode *tmp;
1203
+
1204
+ g_return_val_if_fail(tree != NULL, NULL);
1205
+
1206
+ if (!tree->root) {
1207
+ return NULL;
1208
+ }
1209
+
1210
+ tmp = tree->root;
1211
+
1212
+ while (tmp->left_child) {
1213
+ tmp = tmp->left;
1214
+ }
1215
+
1216
+ return tmp;
1217
+}
1218
+
1219
+/**
1220
+ * q_tree_node_previous
1221
+ * @node: a #QTree node
1222
+ *
1223
+ * Returns the previous in-order node of the tree, or %NULL
1224
+ * if the passed node was already the first one.
1225
+ *
1226
+ * Returns: (nullable) (transfer none): the previous node in the tree
1227
+ *
1228
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1229
+ */
1230
+static QTreeNode *
1231
+q_tree_node_previous(QTreeNode *node)
1232
+{
1233
+ QTreeNode *tmp;
1234
+
1235
+ g_return_val_if_fail(node != NULL, NULL);
1236
+
1237
+ tmp = node->left;
1238
+
1239
+ if (node->left_child) {
1240
+ while (tmp->right_child) {
1241
+ tmp = tmp->right;
1242
+ }
1243
+ }
1244
+
1245
+ return tmp;
1246
+}
1247
+
1248
+/**
1249
+ * q_tree_node_next
1250
+ * @node: a #QTree node
1251
+ *
1252
+ * Returns the next in-order node of the tree, or %NULL
1253
+ * if the passed node was already the last one.
1254
+ *
1255
+ * Returns: (nullable) (transfer none): the next node in the tree
1256
+ *
1257
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1258
+ */
1259
+static QTreeNode *
1260
+q_tree_node_next(QTreeNode *node)
1261
+{
1262
+ QTreeNode *tmp;
1263
+
1264
+ g_return_val_if_fail(node != NULL, NULL);
1265
+
1266
+ tmp = node->right;
1267
+
1268
+ if (node->right_child) {
1269
+ while (tmp->left_child) {
1270
+ tmp = tmp->left;
1271
+ }
1272
+ }
1273
+
1274
+ return tmp;
1275
+}
1276
+
1277
+/**
1278
+ * q_tree_remove_all:
1279
+ * @tree: a #QTree
1280
+ *
1281
+ * Removes all nodes from a #QTree and destroys their keys and values,
1282
+ * then resets the #QTree’s root to %NULL.
1283
+ *
1284
+ * Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
1285
+ */
1286
+static void
1287
+q_tree_remove_all(QTree *tree)
1288
+{
1289
+ QTreeNode *node;
1290
+ QTreeNode *next;
1291
+
1292
+ g_return_if_fail(tree != NULL);
1293
+
1294
+ node = q_tree_node_first(tree);
1295
+
1296
+ while (node) {
1297
+ next = q_tree_node_next(node);
1298
+
1299
+ if (tree->key_destroy_func) {
1300
+ tree->key_destroy_func(node->key);
1301
+ }
1302
+ if (tree->value_destroy_func) {
1303
+ tree->value_destroy_func(node->value);
1304
+ }
1305
+ g_free(node);
1306
+
1307
+#ifdef Q_TREE_DEBUG
1308
+ g_assert(tree->nnodes > 0);
1309
+ tree->nnodes--;
1310
+#endif
1311
+
1312
+ node = next;
1313
+ }
1314
+
1315
+#ifdef Q_TREE_DEBUG
1316
+ g_assert(tree->nnodes == 0);
1317
+#endif
1318
+
1319
+ tree->root = NULL;
1320
+#ifndef Q_TREE_DEBUG
1321
+ tree->nnodes = 0;
1322
+#endif
1323
+}
1324
+
1325
+/**
1326
+ * q_tree_ref:
1327
+ * @tree: a #QTree
1328
+ *
1329
+ * Increments the reference count of @tree by one.
1330
+ *
1331
+ * It is safe to call this function from any thread.
1332
+ *
1333
+ * Returns: the passed in #QTree
1334
+ *
1335
+ * Since: 2.22
1336
+ */
1337
+QTree *
1338
+q_tree_ref(QTree *tree)
1339
+{
1340
+ g_return_val_if_fail(tree != NULL, NULL);
1341
+
1342
+ g_atomic_int_inc(&tree->ref_count);
1343
+
1344
+ return tree;
1345
+}
1346
+
1347
+/**
1348
+ * q_tree_unref:
1349
+ * @tree: a #QTree
1350
+ *
1351
+ * Decrements the reference count of @tree by one.
1352
+ * If the reference count drops to 0, all keys and values will
1353
+ * be destroyed (if destroy functions were specified) and all
1354
+ * memory allocated by @tree will be released.
1355
+ *
1356
+ * It is safe to call this function from any thread.
1357
+ *
1358
+ * Since: 2.22
1359
+ */
1360
+void
1361
+q_tree_unref(QTree *tree)
1362
+{
1363
+ g_return_if_fail(tree != NULL);
1364
+
1365
+ if (g_atomic_int_dec_and_test(&tree->ref_count)) {
1366
+ q_tree_remove_all(tree);
1367
+ g_free(tree);
1368
+ }
1369
+}
1370
+
1371
+/**
1372
+ * q_tree_destroy:
1373
+ * @tree: a #QTree
1374
+ *
1375
+ * Removes all keys and values from the #QTree and decreases its
1376
+ * reference count by one. If keys and/or values are dynamically
1377
+ * allocated, you should either free them first or create the #QTree
1378
+ * using q_tree_new_full(). In the latter case the destroy functions
1379
+ * you supplied will be called on all keys and values before destroying
1380
+ * the #QTree.
1381
+ */
1382
+void
1383
+q_tree_destroy(QTree *tree)
1384
+{
1385
+ g_return_if_fail(tree != NULL);
1386
+
1387
+ q_tree_remove_all(tree);
1388
+ q_tree_unref(tree);
1389
+}
1390
+
1391
+/**
1392
+ * q_tree_insert_node:
1393
+ * @tree: a #QTree
1394
+ * @key: the key to insert
1395
+ * @value: the value corresponding to the key
1396
+ *
1397
+ * Inserts a key/value pair into a #QTree.
1398
+ *
1399
+ * If the given key already exists in the #QTree its corresponding value
1400
+ * is set to the new value. If you supplied a @value_destroy_func when
1401
+ * creating the #QTree, the old value is freed using that function. If
1402
+ * you supplied a @key_destroy_func when creating the #QTree, the passed
1403
+ * key is freed using that function.
1404
+ *
1405
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1406
+ * so that the distance from the root to every leaf is as small as possible.
1407
+ * The cost of maintaining a balanced tree while inserting new key/value
1408
+ * result in a O(n log(n)) operation where most of the other operations
1409
+ * are O(log(n)).
1410
+ *
1411
+ * Returns: (transfer none): the inserted (or set) node.
1412
+ *
1413
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1414
+ */
1415
+static QTreeNode *
1416
+q_tree_insert_node(QTree *tree,
1417
+ gpointer key,
1418
+ gpointer value)
1419
+{
1420
+ QTreeNode *node;
1421
+
1422
+ g_return_val_if_fail(tree != NULL, NULL);
1423
+
1424
+ node = q_tree_insert_internal(tree, key, value, FALSE);
1425
+
1426
+#ifdef Q_TREE_DEBUG
1427
+ q_tree_node_check(tree->root);
1428
+#endif
1429
+
1430
+ return node;
1431
+}
1432
+
1433
+/**
1434
+ * q_tree_insert:
1435
+ * @tree: a #QTree
1436
+ * @key: the key to insert
1437
+ * @value: the value corresponding to the key
1438
+ *
1439
+ * Inserts a key/value pair into a #QTree.
1440
+ *
1441
+ * Inserts a new key and value into a #QTree as q_tree_insert_node() does,
1442
+ * only this function does not return the inserted or set node.
1443
+ */
1444
+void
1445
+q_tree_insert(QTree *tree,
1446
+ gpointer key,
1447
+ gpointer value)
1448
+{
1449
+ q_tree_insert_node(tree, key, value);
1450
+}
1451
+
1452
+/**
1453
+ * q_tree_replace_node:
1454
+ * @tree: a #QTree
1455
+ * @key: the key to insert
1456
+ * @value: the value corresponding to the key
1457
+ *
1458
+ * Inserts a new key and value into a #QTree similar to q_tree_insert_node().
1459
+ * The difference is that if the key already exists in the #QTree, it gets
1460
+ * replaced by the new key. If you supplied a @value_destroy_func when
1461
+ * creating the #QTree, the old value is freed using that function. If you
1462
+ * supplied a @key_destroy_func when creating the #QTree, the old key is
1463
+ * freed using that function.
1464
+ *
1465
+ * The tree is automatically 'balanced' as new key/value pairs are added,
1466
+ * so that the distance from the root to every leaf is as small as possible.
1467
+ *
1468
+ * Returns: (transfer none): the inserted (or set) node.
1469
+ *
1470
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1471
+ */
1472
+static QTreeNode *
1473
+q_tree_replace_node(QTree *tree,
1474
+ gpointer key,
1475
+ gpointer value)
1476
+{
1477
+ QTreeNode *node;
1478
+
1479
+ g_return_val_if_fail(tree != NULL, NULL);
1480
+
1481
+ node = q_tree_insert_internal(tree, key, value, TRUE);
1482
+
1483
+#ifdef Q_TREE_DEBUG
1484
+ q_tree_node_check(tree->root);
1485
+#endif
1486
+
1487
+ return node;
1488
+}
1489
+
1490
+/**
1491
+ * q_tree_replace:
1492
+ * @tree: a #QTree
1493
+ * @key: the key to insert
1494
+ * @value: the value corresponding to the key
1495
+ *
1496
+ * Inserts a new key and value into a #QTree as q_tree_replace_node() does,
1497
+ * only this function does not return the inserted or set node.
1498
+ */
1499
+void
1500
+q_tree_replace(QTree *tree,
1501
+ gpointer key,
1502
+ gpointer value)
1503
+{
1504
+ q_tree_replace_node(tree, key, value);
1505
+}
1506
+
1507
+/* internal insert routine */
1508
+static QTreeNode *
1509
+q_tree_insert_internal(QTree *tree,
1510
+ gpointer key,
1511
+ gpointer value,
1512
+ gboolean replace)
1513
+{
1514
+ QTreeNode *node, *retnode;
1515
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1516
+ int idx;
1517
+
1518
+ g_return_val_if_fail(tree != NULL, NULL);
1519
+
1520
+ if (!tree->root) {
1521
+ tree->root = q_tree_node_new(key, value);
1522
+ tree->nnodes++;
1523
+ return tree->root;
1524
+ }
1525
+
1526
+ idx = 0;
1527
+ path[idx++] = NULL;
1528
+ node = tree->root;
1529
+
1530
+ while (1) {
1531
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1532
+
1533
+ if (cmp == 0) {
1534
+ if (tree->value_destroy_func) {
1535
+ tree->value_destroy_func(node->value);
1536
+ }
1537
+
1538
+ node->value = value;
1539
+
1540
+ if (replace) {
1541
+ if (tree->key_destroy_func) {
1542
+ tree->key_destroy_func(node->key);
1543
+ }
1544
+
1545
+ node->key = key;
1546
+ } else {
1547
+ /* free the passed key */
1548
+ if (tree->key_destroy_func) {
1549
+ tree->key_destroy_func(key);
1550
+ }
1551
+ }
1552
+
1553
+ return node;
1554
+ } else if (cmp < 0) {
1555
+ if (node->left_child) {
1556
+ path[idx++] = node;
1557
+ node = node->left;
1558
+ } else {
1559
+ QTreeNode *child = q_tree_node_new(key, value);
1560
+
1561
+ child->left = node->left;
1562
+ child->right = node;
1563
+ node->left = child;
1564
+ node->left_child = TRUE;
1565
+ node->balance -= 1;
1566
+
1567
+ tree->nnodes++;
1568
+
1569
+ retnode = child;
1570
+ break;
1571
+ }
1572
+ } else {
1573
+ if (node->right_child) {
1574
+ path[idx++] = node;
1575
+ node = node->right;
1576
+ } else {
1577
+ QTreeNode *child = q_tree_node_new(key, value);
1578
+
1579
+ child->right = node->right;
1580
+ child->left = node;
1581
+ node->right = child;
1582
+ node->right_child = TRUE;
1583
+ node->balance += 1;
1584
+
1585
+ tree->nnodes++;
1586
+
1587
+ retnode = child;
1588
+ break;
1589
+ }
1590
+ }
1591
+ }
1592
+
1593
+ /*
1594
+ * Restore balance. This is the goodness of a non-recursive
1595
+ * implementation, when we are done with balancing we 'break'
1596
+ * the loop and we are done.
1597
+ */
1598
+ while (1) {
1599
+ QTreeNode *bparent = path[--idx];
1600
+ gboolean left_node = (bparent && node == bparent->left);
1601
+ g_assert(!bparent || bparent->left == node || bparent->right == node);
1602
+
1603
+ if (node->balance < -1 || node->balance > 1) {
1604
+ node = q_tree_node_balance(node);
1605
+ if (bparent == NULL) {
1606
+ tree->root = node;
1607
+ } else if (left_node) {
1608
+ bparent->left = node;
1609
+ } else {
1610
+ bparent->right = node;
1611
+ }
1612
+ }
1613
+
1614
+ if (node->balance == 0 || bparent == NULL) {
1615
+ break;
1616
+ }
1617
+
1618
+ if (left_node) {
1619
+ bparent->balance -= 1;
1620
+ } else {
1621
+ bparent->balance += 1;
1622
+ }
1623
+
1624
+ node = bparent;
1625
+ }
1626
+
1627
+ return retnode;
1628
+}
1629
+
1630
+/**
1631
+ * q_tree_remove:
1632
+ * @tree: a #QTree
1633
+ * @key: the key to remove
1634
+ *
1635
+ * Removes a key/value pair from a #QTree.
1636
+ *
1637
+ * If the #QTree was created using q_tree_new_full(), the key and value
1638
+ * are freed using the supplied destroy functions, otherwise you have to
1639
+ * make sure that any dynamically allocated values are freed yourself.
1640
+ * If the key does not exist in the #QTree, the function does nothing.
1641
+ *
1642
+ * The cost of maintaining a balanced tree while removing a key/value
1643
+ * result in a O(n log(n)) operation where most of the other operations
1644
+ * are O(log(n)).
1645
+ *
1646
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1647
+ * returned nothing)
1648
+ */
1649
+gboolean
1650
+q_tree_remove(QTree *tree,
1651
+ gconstpointer key)
1652
+{
1653
+ gboolean removed;
1654
+
1655
+ g_return_val_if_fail(tree != NULL, FALSE);
1656
+
1657
+ removed = q_tree_remove_internal(tree, key, FALSE);
1658
+
1659
+#ifdef Q_TREE_DEBUG
1660
+ q_tree_node_check(tree->root);
1661
+#endif
1662
+
1663
+ return removed;
1664
+}
1665
+
1666
+/**
1667
+ * q_tree_steal:
1668
+ * @tree: a #QTree
1669
+ * @key: the key to remove
1670
+ *
1671
+ * Removes a key and its associated value from a #QTree without calling
1672
+ * the key and value destroy functions.
1673
+ *
1674
+ * If the key does not exist in the #QTree, the function does nothing.
1675
+ *
1676
+ * Returns: %TRUE if the key was found (prior to 2.8, this function
1677
+ * returned nothing)
1678
+ */
1679
+gboolean
1680
+q_tree_steal(QTree *tree,
1681
+ gconstpointer key)
1682
+{
1683
+ gboolean removed;
1684
+
1685
+ g_return_val_if_fail(tree != NULL, FALSE);
1686
+
1687
+ removed = q_tree_remove_internal(tree, key, TRUE);
1688
+
1689
+#ifdef Q_TREE_DEBUG
1690
+ q_tree_node_check(tree->root);
1691
+#endif
1692
+
1693
+ return removed;
1694
+}
1695
+
1696
+/* internal remove routine */
1697
+static gboolean
1698
+q_tree_remove_internal(QTree *tree,
1699
+ gconstpointer key,
1700
+ gboolean steal)
1701
+{
1702
+ QTreeNode *node, *parent, *balance;
1703
+ QTreeNode *path[MAX_GTREE_HEIGHT];
1704
+ int idx;
1705
+ gboolean left_node;
1706
+
1707
+ g_return_val_if_fail(tree != NULL, FALSE);
1708
+
1709
+ if (!tree->root) {
1710
+ return FALSE;
1711
+ }
1712
+
1713
+ idx = 0;
1714
+ path[idx++] = NULL;
1715
+ node = tree->root;
1716
+
1717
+ while (1) {
1718
+ int cmp = tree->key_compare(key, node->key, tree->key_compare_data);
1719
+
1720
+ if (cmp == 0) {
1721
+ break;
1722
+ } else if (cmp < 0) {
1723
+ if (!node->left_child) {
1724
+ return FALSE;
1725
+ }
1726
+
1727
+ path[idx++] = node;
1728
+ node = node->left;
1729
+ } else {
1730
+ if (!node->right_child) {
1731
+ return FALSE;
1732
+ }
1733
+
1734
+ path[idx++] = node;
1735
+ node = node->right;
1736
+ }
1737
+ }
1738
+
1739
+ /*
1740
+ * The following code is almost equal to q_tree_remove_node,
1741
+ * except that we do not have to call q_tree_node_parent.
1742
+ */
1743
+ balance = parent = path[--idx];
1744
+ g_assert(!parent || parent->left == node || parent->right == node);
1745
+ left_node = (parent && node == parent->left);
1746
+
1747
+ if (!node->left_child) {
1748
+ if (!node->right_child) {
1749
+ if (!parent) {
1750
+ tree->root = NULL;
1751
+ } else if (left_node) {
1752
+ parent->left_child = FALSE;
1753
+ parent->left = node->left;
1754
+ parent->balance += 1;
1755
+ } else {
1756
+ parent->right_child = FALSE;
1757
+ parent->right = node->right;
1758
+ parent->balance -= 1;
1759
+ }
1760
+ } else {
1761
+ /* node has a right child */
1762
+ QTreeNode *tmp = q_tree_node_next(node);
1763
+ tmp->left = node->left;
1764
+
1765
+ if (!parent) {
1766
+ tree->root = node->right;
1767
+ } else if (left_node) {
1768
+ parent->left = node->right;
1769
+ parent->balance += 1;
1770
+ } else {
1771
+ parent->right = node->right;
1772
+ parent->balance -= 1;
1773
+ }
1774
+ }
1775
+ } else {
1776
+ /* node has a left child */
1777
+ if (!node->right_child) {
1778
+ QTreeNode *tmp = q_tree_node_previous(node);
1779
+ tmp->right = node->right;
1780
+
1781
+ if (parent == NULL) {
1782
+ tree->root = node->left;
1783
+ } else if (left_node) {
1784
+ parent->left = node->left;
1785
+ parent->balance += 1;
1786
+ } else {
1787
+ parent->right = node->left;
1788
+ parent->balance -= 1;
1789
+ }
1790
+ } else {
1791
+ /* node has a both children (pant, pant!) */
1792
+ QTreeNode *prev = node->left;
1793
+ QTreeNode *next = node->right;
1794
+ QTreeNode *nextp = node;
1795
+ int old_idx = idx + 1;
1796
+ idx++;
1797
+
1798
+ /* path[idx] == parent */
1799
+ /* find the immediately next node (and its parent) */
1800
+ while (next->left_child) {
1801
+ path[++idx] = nextp = next;
1802
+ next = next->left;
1803
+ }
1804
+
1805
+ path[old_idx] = next;
1806
+ balance = path[idx];
1807
+
1808
+ /* remove 'next' from the tree */
1809
+ if (nextp != node) {
1810
+ if (next->right_child) {
1811
+ nextp->left = next->right;
1812
+ } else {
1813
+ nextp->left_child = FALSE;
1814
+ }
1815
+ nextp->balance += 1;
1816
+
1817
+ next->right_child = TRUE;
1818
+ next->right = node->right;
1819
+ } else {
1820
+ node->balance -= 1;
1821
+ }
1822
+
1823
+ /* set the prev to point to the right place */
1824
+ while (prev->right_child) {
1825
+ prev = prev->right;
1826
+ }
1827
+ prev->right = next;
1828
+
1829
+ /* prepare 'next' to replace 'node' */
1830
+ next->left_child = TRUE;
1831
+ next->left = node->left;
1832
+ next->balance = node->balance;
1833
+
1834
+ if (!parent) {
1835
+ tree->root = next;
1836
+ } else if (left_node) {
1837
+ parent->left = next;
1838
+ } else {
1839
+ parent->right = next;
1840
+ }
1841
+ }
1842
+ }
1843
+
1844
+ /* restore balance */
1845
+ if (balance) {
1846
+ while (1) {
1847
+ QTreeNode *bparent = path[--idx];
1848
+ g_assert(!bparent ||
1849
+ bparent->left == balance ||
1850
+ bparent->right == balance);
1851
+ left_node = (bparent && balance == bparent->left);
1852
+
1853
+ if (balance->balance < -1 || balance->balance > 1) {
1854
+ balance = q_tree_node_balance(balance);
1855
+ if (!bparent) {
1856
+ tree->root = balance;
1857
+ } else if (left_node) {
1858
+ bparent->left = balance;
1859
+ } else {
1860
+ bparent->right = balance;
1861
+ }
1862
+ }
1863
+
1864
+ if (balance->balance != 0 || !bparent) {
1865
+ break;
1866
+ }
1867
+
1868
+ if (left_node) {
1869
+ bparent->balance += 1;
1870
+ } else {
1871
+ bparent->balance -= 1;
1872
+ }
1873
+
1874
+ balance = bparent;
1875
+ }
1876
+ }
1877
+
1878
+ if (!steal) {
1879
+ if (tree->key_destroy_func) {
1880
+ tree->key_destroy_func(node->key);
1881
+ }
1882
+ if (tree->value_destroy_func) {
1883
+ tree->value_destroy_func(node->value);
1884
+ }
1885
+ }
1886
+
1887
+ g_free(node);
1888
+
1889
+ tree->nnodes--;
1890
+
1891
+ return TRUE;
1892
+}
1893
+
1894
+/**
1895
+ * q_tree_lookup_node:
1896
+ * @tree: a #QTree
1897
+ * @key: the key to look up
1898
+ *
1899
+ * Gets the tree node corresponding to the given key. Since a #QTree is
1900
+ * automatically balanced as key/value pairs are added, key lookup
1901
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1902
+ *
1903
+ * Returns: (nullable) (transfer none): the tree node corresponding to
1904
+ * the key, or %NULL if the key was not found
1905
+ *
1906
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
1907
+ */
1908
+static QTreeNode *
1909
+q_tree_lookup_node(QTree *tree,
1910
+ gconstpointer key)
1911
+{
1912
+ g_return_val_if_fail(tree != NULL, NULL);
1913
+
1914
+ return q_tree_find_node(tree, key);
1915
+}
1916
+
1917
+/**
1918
+ * q_tree_lookup:
1919
+ * @tree: a #QTree
1920
+ * @key: the key to look up
1921
+ *
1922
+ * Gets the value corresponding to the given key. Since a #QTree is
1923
+ * automatically balanced as key/value pairs are added, key lookup
1924
+ * is O(log n) (where n is the number of key/value pairs in the tree).
1925
+ *
1926
+ * Returns: the value corresponding to the key, or %NULL
1927
+ * if the key was not found
1928
+ */
1929
+gpointer
1930
+q_tree_lookup(QTree *tree,
1931
+ gconstpointer key)
1932
+{
1933
+ QTreeNode *node;
1934
+
1935
+ node = q_tree_lookup_node(tree, key);
1936
+
1937
+ return node ? node->value : NULL;
1938
+}
1939
+
1940
+/**
1941
+ * q_tree_lookup_extended:
1942
+ * @tree: a #QTree
1943
+ * @lookup_key: the key to look up
1944
+ * @orig_key: (out) (optional) (nullable): returns the original key
1945
+ * @value: (out) (optional) (nullable): returns the value associated with
1946
+ * the key
1947
+ *
1948
+ * Looks up a key in the #QTree, returning the original key and the
1949
+ * associated value. This is useful if you need to free the memory
1950
+ * allocated for the original key, for example before calling
1951
+ * q_tree_remove().
1952
+ *
1953
+ * Returns: %TRUE if the key was found in the #QTree
1954
+ */
1955
+gboolean
1956
+q_tree_lookup_extended(QTree *tree,
1957
+ gconstpointer lookup_key,
1958
+ gpointer *orig_key,
1959
+ gpointer *value)
1960
+{
1961
+ QTreeNode *node;
1962
+
1963
+ g_return_val_if_fail(tree != NULL, FALSE);
1964
+
1965
+ node = q_tree_find_node(tree, lookup_key);
1966
+
1967
+ if (node) {
1968
+ if (orig_key) {
1969
+ *orig_key = node->key;
1970
+ }
1971
+ if (value) {
1972
+ *value = node->value;
1973
+ }
1974
+ return TRUE;
1975
+ } else {
1976
+ return FALSE;
1977
+ }
1978
+}
1979
+
1980
+/**
1981
+ * q_tree_foreach:
1982
+ * @tree: a #QTree
1983
+ * @func: the function to call for each node visited.
1984
+ * If this function returns %TRUE, the traversal is stopped.
1985
+ * @user_data: user data to pass to the function
1986
+ *
1987
+ * Calls the given function for each of the key/value pairs in the #QTree.
1988
+ * The function is passed the key and value of each pair, and the given
1989
+ * @data parameter. The tree is traversed in sorted order.
1990
+ *
1991
+ * The tree may not be modified while iterating over it (you can't
1992
+ * add/remove items). To remove all items matching a predicate, you need
1993
+ * to add each item to a list in your #GTraverseFunc as you walk over
1994
+ * the tree, then walk the list and remove each item.
1995
+ */
1996
+void
1997
+q_tree_foreach(QTree *tree,
1998
+ GTraverseFunc func,
1999
+ gpointer user_data)
2000
+{
2001
+ QTreeNode *node;
2002
+
2003
+ g_return_if_fail(tree != NULL);
2004
+
2005
+ if (!tree->root) {
2006
+ return;
2007
+ }
2008
+
2009
+ node = q_tree_node_first(tree);
2010
+
2011
+ while (node) {
2012
+ if ((*func)(node->key, node->value, user_data)) {
2013
+ break;
2014
+ }
2015
+
2016
+ node = q_tree_node_next(node);
2017
+ }
2018
+}
2019
+
2020
+/**
2021
+ * q_tree_search_node:
2022
+ * @tree: a #QTree
2023
+ * @search_func: a function used to search the #QTree
2024
+ * @user_data: the data passed as the second argument to @search_func
2025
+ *
2026
+ * Searches a #QTree using @search_func.
2027
+ *
2028
+ * The @search_func is called with a pointer to the key of a key/value
2029
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2030
+ * 0 for a key/value pair, then the corresponding node is returned as
2031
+ * the result of q_tree_search(). If @search_func returns -1, searching
2032
+ * will proceed among the key/value pairs that have a smaller key; if
2033
+ * @search_func returns 1, searching will proceed among the key/value
2034
+ * pairs that have a larger key.
2035
+ *
2036
+ * Returns: (nullable) (transfer none): the node corresponding to the
2037
+ * found key, or %NULL if the key was not found
2038
+ *
2039
+ * Since: 2.68 in GLib. Internal in Qtree, i.e. not in the public API.
2040
+ */
2041
+static QTreeNode *
2042
+q_tree_search_node(QTree *tree,
2043
+ GCompareFunc search_func,
2044
+ gconstpointer user_data)
2045
+{
2046
+ g_return_val_if_fail(tree != NULL, NULL);
2047
+
2048
+ if (!tree->root) {
2049
+ return NULL;
2050
+ }
2051
+
2052
+ return q_tree_node_search(tree->root, search_func, user_data);
2053
+}
2054
+
2055
+/**
2056
+ * q_tree_search:
2057
+ * @tree: a #QTree
2058
+ * @search_func: a function used to search the #QTree
2059
+ * @user_data: the data passed as the second argument to @search_func
2060
+ *
2061
+ * Searches a #QTree using @search_func.
2062
+ *
2063
+ * The @search_func is called with a pointer to the key of a key/value
2064
+ * pair in the tree, and the passed in @user_data. If @search_func returns
2065
+ * 0 for a key/value pair, then the corresponding value is returned as
2066
+ * the result of q_tree_search(). If @search_func returns -1, searching
2067
+ * will proceed among the key/value pairs that have a smaller key; if
2068
+ * @search_func returns 1, searching will proceed among the key/value
2069
+ * pairs that have a larger key.
2070
+ *
2071
+ * Returns: the value corresponding to the found key, or %NULL
2072
+ * if the key was not found
2073
+ */
2074
+gpointer
2075
+q_tree_search(QTree *tree,
2076
+ GCompareFunc search_func,
2077
+ gconstpointer user_data)
2078
+{
2079
+ QTreeNode *node;
2080
+
2081
+ node = q_tree_search_node(tree, search_func, user_data);
2082
+
2083
+ return node ? node->value : NULL;
2084
+}
2085
+
2086
+/**
2087
+ * q_tree_height:
2088
+ * @tree: a #QTree
2089
+ *
2090
+ * Gets the height of a #QTree.
2091
+ *
2092
+ * If the #QTree contains no nodes, the height is 0.
2093
+ * If the #QTree contains only one root node the height is 1.
2094
+ * If the root node has children the height is 2, etc.
2095
+ *
2096
+ * Returns: the height of @tree
2097
+ */
2098
+gint
2099
+q_tree_height(QTree *tree)
2100
+{
2101
+ QTreeNode *node;
2102
+ gint height;
2103
+
2104
+ g_return_val_if_fail(tree != NULL, 0);
2105
+
2106
+ if (!tree->root) {
2107
+ return 0;
2108
+ }
2109
+
2110
+ height = 0;
2111
+ node = tree->root;
2112
+
2113
+ while (1) {
2114
+ height += 1 + MAX(node->balance, 0);
2115
+
2116
+ if (!node->left_child) {
2117
+ return height;
2118
+ }
2119
+
2120
+ node = node->left;
2121
+ }
2122
+}
2123
+
2124
+/**
2125
+ * q_tree_nnodes:
2126
+ * @tree: a #QTree
2127
+ *
2128
+ * Gets the number of nodes in a #QTree.
2129
+ *
2130
+ * Returns: the number of nodes in @tree
2131
+ */
2132
+gint
2133
+q_tree_nnodes(QTree *tree)
2134
+{
2135
+ g_return_val_if_fail(tree != NULL, 0);
2136
+
2137
+ return tree->nnodes;
2138
+}
2139
+
2140
+static QTreeNode *
2141
+q_tree_node_balance(QTreeNode *node)
2142
+{
2143
+ if (node->balance < -1) {
2144
+ if (node->left->balance > 0) {
2145
+ node->left = q_tree_node_rotate_left(node->left);
2146
+ }
2147
+ node = q_tree_node_rotate_right(node);
2148
+ } else if (node->balance > 1) {
2149
+ if (node->right->balance < 0) {
2150
+ node->right = q_tree_node_rotate_right(node->right);
2151
+ }
2152
+ node = q_tree_node_rotate_left(node);
2153
+ }
2154
+
2155
+ return node;
2156
+}
2157
+
2158
+static QTreeNode *
2159
+q_tree_find_node(QTree *tree,
2160
+ gconstpointer key)
2161
+{
2162
+ QTreeNode *node;
2163
+ gint cmp;
2164
+
2165
+ node = tree->root;
2166
+ if (!node) {
2167
+ return NULL;
2168
+ }
2169
+
2170
+ while (1) {
2171
+ cmp = tree->key_compare(key, node->key, tree->key_compare_data);
2172
+ if (cmp == 0) {
2173
+ return node;
2174
+ } else if (cmp < 0) {
2175
+ if (!node->left_child) {
2176
+ return NULL;
2177
+ }
2178
+
2179
+ node = node->left;
2180
+ } else {
2181
+ if (!node->right_child) {
2182
+ return NULL;
2183
+ }
2184
+
2185
+ node = node->right;
2186
+ }
2187
+ }
2188
+}
2189
+
2190
+static QTreeNode *
2191
+q_tree_node_search(QTreeNode *node,
2192
+ GCompareFunc search_func,
2193
+ gconstpointer data)
2194
+{
2195
+ gint dir;
2196
+
2197
+ if (!node) {
2198
+ return NULL;
2199
+ }
2200
+
2201
+ while (1) {
2202
+ dir = (*search_func)(node->key, data);
2203
+ if (dir == 0) {
2204
+ return node;
2205
+ } else if (dir < 0) {
2206
+ if (!node->left_child) {
2207
+ return NULL;
2208
+ }
2209
+
2210
+ node = node->left;
2211
+ } else {
2212
+ if (!node->right_child) {
2213
+ return NULL;
2214
+ }
2215
+
2216
+ node = node->right;
2217
+ }
2218
+ }
2219
+}
2220
+
2221
+static QTreeNode *
2222
+q_tree_node_rotate_left(QTreeNode *node)
2223
+{
2224
+ QTreeNode *right;
2225
+ gint a_bal;
2226
+ gint b_bal;
2227
+
2228
+ right = node->right;
2229
+
2230
+ if (right->left_child) {
2231
+ node->right = right->left;
2232
+ } else {
2233
+ node->right_child = FALSE;
2234
+ right->left_child = TRUE;
2235
+ }
2236
+ right->left = node;
2237
+
2238
+ a_bal = node->balance;
2239
+ b_bal = right->balance;
2240
+
2241
+ if (b_bal <= 0) {
2242
+ if (a_bal >= 1) {
2243
+ right->balance = b_bal - 1;
2244
+ } else {
2245
+ right->balance = a_bal + b_bal - 2;
2246
+ }
2247
+ node->balance = a_bal - 1;
2248
+ } else {
2249
+ if (a_bal <= b_bal) {
2250
+ right->balance = a_bal - 2;
2251
+ } else {
2252
+ right->balance = b_bal - 1;
2253
+ }
2254
+ node->balance = a_bal - b_bal - 1;
2255
+ }
2256
+
2257
+ return right;
2258
+}
2259
+
2260
+static QTreeNode *
2261
+q_tree_node_rotate_right(QTreeNode *node)
2262
+{
2263
+ QTreeNode *left;
2264
+ gint a_bal;
2265
+ gint b_bal;
2266
+
2267
+ left = node->left;
2268
+
2269
+ if (left->right_child) {
2270
+ node->left = left->right;
2271
+ } else {
2272
+ node->left_child = FALSE;
2273
+ left->right_child = TRUE;
2274
+ }
2275
+ left->right = node;
2276
+
2277
+ a_bal = node->balance;
2278
+ b_bal = left->balance;
2279
+
2280
+ if (b_bal <= 0) {
2281
+ if (b_bal > a_bal) {
2282
+ left->balance = b_bal + 1;
2283
+ } else {
2284
+ left->balance = a_bal + 2;
2285
+ }
2286
+ node->balance = a_bal - b_bal + 1;
2287
+ } else {
2288
+ if (a_bal <= -1) {
2289
+ left->balance = b_bal + 1;
2290
+ } else {
2291
+ left->balance = a_bal + b_bal + 2;
2292
+ }
2293
+ node->balance = a_bal + 1;
2294
+ }
2295
+
2296
+ return left;
2297
+}
2298
+
2299
+#ifdef Q_TREE_DEBUG
2300
+static gint
2301
+q_tree_node_height(QTreeNode *node)
2302
+{
2303
+ gint left_height;
2304
+ gint right_height;
2305
+
2306
+ if (node) {
2307
+ left_height = 0;
2308
+ right_height = 0;
2309
+
2310
+ if (node->left_child) {
2311
+ left_height = q_tree_node_height(node->left);
2312
+ }
2313
+
2314
+ if (node->right_child) {
2315
+ right_height = q_tree_node_height(node->right);
2316
+ }
2317
+
2318
+ return MAX(left_height, right_height) + 1;
2319
+ }
2320
+
2321
+ return 0;
2322
+}
2323
+
2324
+static void q_tree_node_check(QTreeNode *node)
2325
+{
2326
+ gint left_height;
2327
+ gint right_height;
2328
+ gint balance;
2329
+ QTreeNode *tmp;
2330
+
2331
+ if (node) {
2332
+ if (node->left_child) {
2333
+ tmp = q_tree_node_previous(node);
2334
+ g_assert(tmp->right == node);
2335
+ }
2336
+
2337
+ if (node->right_child) {
2338
+ tmp = q_tree_node_next(node);
2339
+ g_assert(tmp->left == node);
2340
+ }
2341
+
2342
+ left_height = 0;
2343
+ right_height = 0;
2344
+
2345
+ if (node->left_child) {
2346
+ left_height = q_tree_node_height(node->left);
2347
+ }
2348
+ if (node->right_child) {
2349
+ right_height = q_tree_node_height(node->right);
2350
+ }
2351
+
2352
+ balance = right_height - left_height;
2353
+ g_assert(balance == node->balance);
2354
+
2355
+ if (node->left_child) {
2356
+ q_tree_node_check(node->left);
2357
+ }
2358
+ if (node->right_child) {
2359
+ q_tree_node_check(node->right);
2360
+ }
2361
+ }
2362
+}
2363
+#endif
2364
diff --git a/tests/bench/meson.build b/tests/bench/meson.build
2365
index XXXXXXX..XXXXXXX 100644
2366
--- a/tests/bench/meson.build
2367
+++ b/tests/bench/meson.build
2368
@@ -XXX,XX +XXX,XX @@ xbzrle_bench = executable('xbzrle-bench',
2369
dependencies: [qemuutil,migration])
2370
endif
2371
2372
+qtree_bench = executable('qtree-bench',
2373
+ sources: 'qtree-bench.c',
2374
+ dependencies: [qemuutil])
2375
+
2376
executable('atomic_add-bench',
2377
sources: files('atomic_add-bench.c'),
2378
dependencies: [qemuutil],
2379
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
2380
index XXXXXXX..XXXXXXX 100644
2381
--- a/tests/unit/meson.build
2382
+++ b/tests/unit/meson.build
2383
@@ -XXX,XX +XXX,XX @@ tests = {
2384
'test-rcu-slist': [],
2385
'test-qdist': [],
2386
'test-qht': [],
2387
+ 'test-qtree': [],
2388
'test-bitops': [],
2389
'test-bitcnt': [],
2390
'test-qgraph': ['../qtest/libqos/qgraph.c'],
2391
diff --git a/util/meson.build b/util/meson.build
2392
index XXXXXXX..XXXXXXX 100644
2393
--- a/util/meson.build
2394
+++ b/util/meson.build
2395
@@ -XXX,XX +XXX,XX @@ util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c'))
2396
util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c'))
2397
util_ss.add(when: 'CONFIG_WIN32', if_true: winmm)
2398
util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch)
2399
+util_ss.add(when: 'HAVE_GLIB_WITH_SLICE_ALLOCATOR', if_true: files('qtree.c'))
2400
util_ss.add(files('envlist.c', 'path.c', 'module.c'))
2401
util_ss.add(files('host-utils.c'))
2402
util_ss.add(files('bitmap.c', 'bitops.c'))
2403
--
505
--
2404
2.34.1
506
2.34.1
2405
507
2406
508
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
We will need a backend interface for performing 16-bit zero-extend.
2
the first address past the last byte. This avoids overflow
2
Use it in tcg_reg_alloc_op in the meantime.
3
when the last page of the address space is involved.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
include/exec/exec-all.h | 2 +-
7
tcg/tcg.c | 5 +++++
9
accel/tcg/tb-maint.c | 31 ++++++++++++++++---------------
8
tcg/aarch64/tcg-target.c.inc | 13 ++++++++-----
10
accel/tcg/translate-all.c | 2 +-
9
tcg/arm/tcg-target.c.inc | 17 ++++++++++-------
11
accel/tcg/user-exec.c | 2 +-
10
tcg/i386/tcg-target.c.inc | 8 +++-----
12
softmmu/physmem.c | 2 +-
11
tcg/loongarch64/tcg-target.c.inc | 7 ++-----
13
5 files changed, 20 insertions(+), 19 deletions(-)
12
tcg/mips/tcg-target.c.inc | 5 +++++
13
tcg/ppc/tcg-target.c.inc | 4 +++-
14
tcg/riscv/tcg-target.c.inc | 7 ++-----
15
tcg/s390x/tcg-target.c.inc | 17 ++++++-----------
16
tcg/sparc64/tcg-target.c.inc | 11 +++++++++--
17
tcg/tci/tcg-target.c.inc | 14 +++++++++++++-
18
11 files changed, 66 insertions(+), 42 deletions(-)
14
19
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/exec-all.h
22
--- a/tcg/tcg.c
18
+++ b/include/exec/exec-all.h
23
+++ b/tcg/tcg.c
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(target_ulong addr);
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
20
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
25
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
21
#endif
26
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
22
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
27
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
23
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
28
+static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
24
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
25
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
26
31
static void tcg_out_goto_tb(TCGContext *s, int which);
27
/* GETPC is the true target of the return instruction that we'll execute. */
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
28
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
33
case INDEX_op_ext16s_i64:
29
index XXXXXXX..XXXXXXX 100644
34
tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
30
--- a/accel/tcg/tb-maint.c
35
break;
31
+++ b/accel/tcg/tb-maint.c
36
+ case INDEX_op_ext16u_i32:
32
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
37
+ case INDEX_op_ext16u_i64:
33
* Called with mmap_lock held for user-mode emulation.
38
+ tcg_out_ext16u(s, new_args[0], new_args[1]);
34
* NOTE: this function must not be called while a TB is running.
39
+ break;
35
*/
40
default:
36
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
41
if (def->flags & TCG_OPF_VECTOR) {
37
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
42
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
38
{
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
39
TranslationBlock *tb;
44
index XXXXXXX..XXXXXXX 100644
40
PageForEachNext n;
45
--- a/tcg/aarch64/tcg-target.c.inc
41
- tb_page_addr_t last = end - 1;
46
+++ b/tcg/aarch64/tcg-target.c.inc
42
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
43
assert_memory_lock();
48
tcg_out_uxt(s, MO_8, rd, rn);
44
49
}
45
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
50
46
*/
51
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
47
void tb_invalidate_phys_page(tb_page_addr_t addr)
52
+{
48
{
53
+ tcg_out_uxt(s, MO_16, rd, rn);
49
- tb_page_addr_t start, end;
54
+}
50
+ tb_page_addr_t start, last;
51
52
start = addr & TARGET_PAGE_MASK;
53
- end = start + TARGET_PAGE_SIZE;
54
- tb_invalidate_phys_range(start, end);
55
+ last = addr | ~TARGET_PAGE_MASK;
56
+ tb_invalidate_phys_range(start, last);
57
}
58
59
/*
60
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
61
62
/*
63
* Invalidate all TBs which intersect with the target physical address range
64
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
65
+ * [start;last]. NOTE: start and end may refer to *different* physical pages.
66
* 'is_cpu_write_access' should be true if called from a real cpu write
67
* access: the virtual CPU will exit the current TB if code is modified inside
68
* this TB.
69
*/
70
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
71
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
72
{
73
struct page_collection *pages;
74
- tb_page_addr_t next;
75
+ tb_page_addr_t index, index_last;
76
77
- pages = page_collection_lock(start, end - 1);
78
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
79
- start < end;
80
- start = next, next += TARGET_PAGE_SIZE) {
81
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
82
- tb_page_addr_t bound = MIN(next, end);
83
+ pages = page_collection_lock(start, last);
84
+
55
+
85
+ index_last = last >> TARGET_PAGE_BITS;
56
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
86
+ for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
57
TCGReg rn, int64_t aimm)
87
+ PageDesc *pd = page_find(index);
58
{
88
+ tb_page_addr_t bound;
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
89
60
tcg_out_ext16s(s, ext, a0, a0);
90
if (pd == NULL) {
61
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
91
continue;
62
/* Output must be zero-extended, but input isn't. */
63
- tcg_out_uxt(s, MO_16, a0, a0);
64
+ tcg_out_ext16u(s, a0, a0);
92
}
65
}
93
assert_page_locked(pd);
66
break;
94
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
67
95
+ bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
96
+ bound = MIN(bound, last);
69
case INDEX_op_ext32s_i64:
97
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
70
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
98
}
71
break;
99
page_collection_unlock(pages);
72
- case INDEX_op_ext16u_i64:
100
}
73
- case INDEX_op_ext16u_i32:
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
74
- tcg_out_uxt(s, MO_16, a0, a1);
102
index XXXXXXX..XXXXXXX 100644
75
- break;
103
--- a/accel/tcg/translate-all.c
76
case INDEX_op_extu_i32_i64:
104
+++ b/accel/tcg/translate-all.c
77
case INDEX_op_ext32u_i64:
105
@@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
78
tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
106
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
107
addr = get_page_addr_code(env, pc);
80
case INDEX_op_ext8u_i64:
108
if (addr != -1) {
81
case INDEX_op_ext16s_i64:
109
- tb_invalidate_phys_range(addr, addr + 1);
82
case INDEX_op_ext16s_i32:
110
+ tb_invalidate_phys_range(addr, addr);
83
+ case INDEX_op_ext16u_i64:
84
+ case INDEX_op_ext16u_i32:
85
default:
86
g_assert_not_reached();
87
}
88
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/arm/tcg-target.c.inc
91
+++ b/tcg/arm/tcg-target.c.inc
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
93
tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
94
}
95
96
-static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
97
+static void tcg_out_ext16u_cond(TCGContext *s, ARMCond cond,
98
+ TCGReg rd, TCGReg rn)
99
{
100
/* uxth */
101
tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
102
}
103
104
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
105
+{
106
+ tcg_out_ext16u_cond(s, COND_AL, rd, rn);
107
+}
108
+
109
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
110
TCGReg rd, TCGReg rn, int flags)
111
{
112
@@ -XXX,XX +XXX,XX @@ DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
113
(tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
114
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond,
115
(tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
116
-DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
117
- (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
118
+DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u_cond,
119
+ (tcg_out_ext16u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
120
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
121
122
static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
123
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
124
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
125
break;
126
127
- case INDEX_op_ext16u_i32:
128
- tcg_out_ext16u(s, COND_AL, args[0], args[1]);
129
- break;
130
-
131
case INDEX_op_deposit_i32:
132
tcg_out_deposit(s, COND_AL, args[0], args[2],
133
args[3], args[4], const_args[2]);
134
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
135
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
136
case INDEX_op_ext8u_i32:
137
case INDEX_op_ext16s_i32:
138
+ case INDEX_op_ext16u_i32:
139
default:
140
g_assert_not_reached();
141
}
142
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/i386/tcg-target.c.inc
145
+++ b/tcg/i386/tcg-target.c.inc
146
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
147
tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
148
}
149
150
-static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
151
+static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
152
{
153
/* movzwl */
154
tcg_out_modrm(s, OPC_MOVZWL, dest, src);
155
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
156
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
157
break;
158
159
- OP_32_64(ext16u):
160
- tcg_out_ext16u(s, a0, a1);
161
- break;
162
-
163
case INDEX_op_qemu_ld_i32:
164
tcg_out_qemu_ld(s, args, 0);
165
break;
166
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
167
case INDEX_op_ext8u_i64:
168
case INDEX_op_ext16s_i32:
169
case INDEX_op_ext16s_i64:
170
+ case INDEX_op_ext16u_i32:
171
+ case INDEX_op_ext16u_i64:
172
default:
173
g_assert_not_reached();
174
}
175
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
176
index XXXXXXX..XXXXXXX 100644
177
--- a/tcg/loongarch64/tcg-target.c.inc
178
+++ b/tcg/loongarch64/tcg-target.c.inc
179
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
180
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
181
break;
182
183
- case INDEX_op_ext16u_i32:
184
- case INDEX_op_ext16u_i64:
185
- tcg_out_ext16u(s, a0, a1);
186
- break;
187
-
188
case INDEX_op_ext32u_i64:
189
case INDEX_op_extu_i32_i64:
190
tcg_out_ext32u(s, a0, a1);
191
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
192
case INDEX_op_ext8u_i64:
193
case INDEX_op_ext16s_i32:
194
case INDEX_op_ext16s_i64:
195
+ case INDEX_op_ext16u_i32:
196
+ case INDEX_op_ext16u_i64:
197
default:
198
g_assert_not_reached();
199
}
200
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
201
index XXXXXXX..XXXXXXX 100644
202
--- a/tcg/mips/tcg-target.c.inc
203
+++ b/tcg/mips/tcg-target.c.inc
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
205
tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs);
206
}
207
208
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
209
+{
210
+ tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xffff);
211
+}
212
+
213
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
214
tcg_target_long imm)
215
{
216
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
217
index XXXXXXX..XXXXXXX 100644
218
--- a/tcg/ppc/tcg-target.c.inc
219
+++ b/tcg/ppc/tcg-target.c.inc
220
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
221
tcg_out32(s, EXTSH | RA(dst) | RS(src));
222
}
223
224
-static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
225
+static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
226
{
227
tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
228
}
229
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
230
case INDEX_op_ext8u_i64:
231
case INDEX_op_ext16s_i32:
232
case INDEX_op_ext16s_i64:
233
+ case INDEX_op_ext16u_i32:
234
+ case INDEX_op_ext16u_i64:
235
default:
236
g_assert_not_reached();
237
}
238
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
239
index XXXXXXX..XXXXXXX 100644
240
--- a/tcg/riscv/tcg-target.c.inc
241
+++ b/tcg/riscv/tcg-target.c.inc
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
243
tcg_out_qemu_st(s, args, true);
244
break;
245
246
- case INDEX_op_ext16u_i32:
247
- case INDEX_op_ext16u_i64:
248
- tcg_out_ext16u(s, a0, a1);
249
- break;
250
-
251
case INDEX_op_ext32u_i64:
252
case INDEX_op_extu_i32_i64:
253
tcg_out_ext32u(s, a0, a1);
254
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
255
case INDEX_op_ext8u_i64:
256
case INDEX_op_ext16s_i32:
257
case INDEX_op_ext16s_i64:
258
+ case INDEX_op_ext16u_i32:
259
+ case INDEX_op_ext16u_i64:
260
default:
261
g_assert_not_reached();
262
}
263
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
264
index XXXXXXX..XXXXXXX 100644
265
--- a/tcg/s390x/tcg-target.c.inc
266
+++ b/tcg/s390x/tcg-target.c.inc
267
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
268
tcg_out_insn(s, RRE, LGHR, dest, src);
269
}
270
271
-static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
272
+static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
273
{
274
tcg_out_insn(s, RRE, LLGHR, dest, src);
275
}
276
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
277
return;
278
}
279
if ((val & valid) == 0xffff) {
280
- tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
281
+ tcg_out_ext16u(s, dest, dest);
282
return;
283
}
284
285
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
286
case MO_UW | MO_BSWAP:
287
/* swapped unsigned halfword load with upper bits zeroed */
288
tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
289
- tgen_ext16u(s, TCG_TYPE_I64, data, data);
290
+ tcg_out_ext16u(s, data, data);
291
break;
292
case MO_UW:
293
tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
294
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
295
tcg_out_ext8u(s, TCG_REG_R4, data_reg);
296
break;
297
case MO_UW:
298
- tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
299
+ tcg_out_ext16u(s, TCG_REG_R4, data_reg);
300
break;
301
case MO_UL:
302
tgen_ext32u(s, TCG_REG_R4, data_reg);
303
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
111
}
304
}
112
}
305
break;
113
}
306
114
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
307
- case INDEX_op_ext16u_i32:
115
index XXXXXXX..XXXXXXX 100644
308
- tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
116
--- a/accel/tcg/user-exec.c
309
- break;
117
+++ b/accel/tcg/user-exec.c
310
-
118
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
311
case INDEX_op_bswap16_i32:
119
~(reset ? 0 : PAGE_STICKY));
312
a0 = args[0], a1 = args[1], a2 = args[2];
120
}
313
tcg_out_insn(s, RRE, LRVR, a0, a1);
121
if (inval_tb) {
314
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
122
- tb_invalidate_phys_range(start, last + 1);
315
case INDEX_op_ext32s_i64:
123
+ tb_invalidate_phys_range(start, last);
316
tgen_ext32s(s, args[0], args[1]);
124
}
317
break;
125
}
318
- case INDEX_op_ext16u_i64:
126
319
- tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
127
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
320
- break;
128
index XXXXXXX..XXXXXXX 100644
321
case INDEX_op_extu_i32_i64:
129
--- a/softmmu/physmem.c
322
case INDEX_op_ext32u_i64:
130
+++ b/softmmu/physmem.c
323
tgen_ext32u(s, args[0], args[1]);
131
@@ -XXX,XX +XXX,XX @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
324
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
132
}
325
case INDEX_op_ext8u_i64:
133
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
326
case INDEX_op_ext16s_i32:
134
assert(tcg_enabled());
327
case INDEX_op_ext16s_i64:
135
- tb_invalidate_phys_range(addr, addr + length);
328
+ case INDEX_op_ext16u_i32:
136
+ tb_invalidate_phys_range(addr, addr + length - 1);
329
+ case INDEX_op_ext16u_i64:
137
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
330
default:
138
}
331
g_assert_not_reached();
139
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
332
}
333
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
334
index XXXXXXX..XXXXXXX 100644
335
--- a/tcg/sparc64/tcg-target.c.inc
336
+++ b/tcg/sparc64/tcg-target.c.inc
337
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
338
tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
339
}
340
341
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
342
+{
343
+ tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
344
+ tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
345
+}
346
+
347
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
348
tcg_target_long imm)
349
{
350
@@ -XXX,XX +XXX,XX @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
351
tcg_out_ext8u(s, r, r);
352
break;
353
case MO_16:
354
- tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
355
- tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
356
+ tcg_out_ext16u(s, r, r);
357
break;
358
case MO_32:
359
tcg_out_arith(s, r, r, 0, SHIFT_SRL);
360
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
361
case INDEX_op_ext8u_i64:
362
case INDEX_op_ext16s_i32:
363
case INDEX_op_ext16s_i64:
364
+ case INDEX_op_ext16u_i32:
365
+ case INDEX_op_ext16u_i64:
366
default:
367
g_assert_not_reached();
368
}
369
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
370
index XXXXXXX..XXXXXXX 100644
371
--- a/tcg/tci/tcg-target.c.inc
372
+++ b/tcg/tci/tcg-target.c.inc
373
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
374
}
375
}
376
377
+static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
378
+{
379
+ if (TCG_TARGET_REG_BITS == 64) {
380
+ tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64);
381
+ tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs);
382
+ } else {
383
+ tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32);
384
+ tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs);
385
+ }
386
+}
387
+
388
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
389
tcg_target_long imm)
390
{
391
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
392
393
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
394
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
395
- CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
396
CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
397
CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
398
CASE_64(ext_i32)
399
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
400
case INDEX_op_ext8u_i64:
401
case INDEX_op_ext16s_i32:
402
case INDEX_op_ext16s_i64:
403
+ case INDEX_op_ext16u_i32:
404
+ case INDEX_op_ext16u_i64:
405
default:
406
g_assert_not_reached();
407
}
140
--
408
--
141
2.34.1
409
2.34.1
142
410
143
411
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
We will need a backend interface for performing 32-bit sign-extend.
2
the first address past the last byte. This avoids overflow
2
Use it in tcg_reg_alloc_op in the meantime.
3
when the last page of the address space is involved.
4
5
Properly truncate tb_last to the end of the page; the comment about
6
tb_end being past the end of the page being ok is not correct,
7
considering overflow.
8
3
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
accel/tcg/tb-maint.c | 26 ++++++++++++--------------
7
tcg/tcg.c | 4 ++++
13
1 file changed, 12 insertions(+), 14 deletions(-)
8
tcg/aarch64/tcg-target.c.inc | 9 +++++++--
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
tcg/i386/tcg-target.c.inc | 5 +++--
11
tcg/loongarch64/tcg-target.c.inc | 2 +-
12
tcg/mips/tcg-target.c.inc | 12 +++++++++---
13
tcg/ppc/tcg-target.c.inc | 5 +++--
14
tcg/riscv/tcg-target.c.inc | 2 +-
15
tcg/s390x/tcg-target.c.inc | 10 +++++-----
16
tcg/sparc64/tcg-target.c.inc | 11 ++++++++---
17
tcg/tci/tcg-target.c.inc | 9 ++++++++-
18
11 files changed, 54 insertions(+), 20 deletions(-)
14
19
15
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/tb-maint.c
22
--- a/tcg/tcg.c
18
+++ b/accel/tcg/tb-maint.c
23
+++ b/tcg/tcg.c
19
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
20
static void
25
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
21
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
26
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
22
PageDesc *p, tb_page_addr_t start,
27
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
23
- tb_page_addr_t end,
28
+static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
24
+ tb_page_addr_t last,
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
25
uintptr_t retaddr)
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
26
{
31
static void tcg_out_goto_tb(TCGContext *s, int which);
27
TranslationBlock *tb;
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
28
- tb_page_addr_t tb_start, tb_end;
33
case INDEX_op_ext16u_i64:
29
PageForEachNext n;
34
tcg_out_ext16u(s, new_args[0], new_args[1]);
30
#ifdef TARGET_HAS_PRECISE_SMC
35
break;
31
bool current_tb_modified = false;
36
+ case INDEX_op_ext32s_i64:
32
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
37
+ tcg_out_ext32s(s, new_args[0], new_args[1]);
33
#endif /* TARGET_HAS_PRECISE_SMC */
38
+ break;
34
- tb_page_addr_t last G_GNUC_UNUSED = end - 1;
39
default:
35
40
if (def->flags & TCG_OPF_VECTOR) {
36
/*
41
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
37
- * We remove all the TBs in the range [start, end[.
42
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
38
+ * We remove all the TBs in the range [start, last].
43
index XXXXXXX..XXXXXXX 100644
39
* XXX: see if in some cases it could be faster to invalidate all the code
44
--- a/tcg/aarch64/tcg-target.c.inc
40
*/
45
+++ b/tcg/aarch64/tcg-target.c.inc
41
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
42
+ tb_page_addr_t tb_start, tb_last;
47
tcg_out_sxt(s, type, MO_16, rd, rn);
43
+
48
}
44
/* NOTE: this is subtle as a TB may span two physical pages */
49
45
+ tb_start = tb_page_addr0(tb);
50
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
46
+ tb_last = tb_start + tb->size - 1;
51
+{
47
if (n == 0) {
52
+ tcg_out_sxt(s, TCG_TYPE_I64, MO_32, rd, rn);
48
- /* NOTE: tb_end may be after the end of the page, but
53
+}
49
- it is not a problem */
54
+
50
- tb_start = tb_page_addr0(tb);
55
static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
51
- tb_end = tb_start + tb->size;
56
TCGReg rd, TCGReg rn)
52
+ tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
57
{
53
} else {
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
54
tb_start = tb_page_addr1(tb);
59
case INDEX_op_bswap32_i64:
55
- tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
60
tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
56
- & ~TARGET_PAGE_MASK);
61
if (a2 & TCG_BSWAP_OS) {
57
+ tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
62
- tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a0);
63
+ tcg_out_ext32s(s, a0, a0);
58
}
64
}
59
- if (!(tb_end <= start || tb_start >= end)) {
65
break;
60
+ if (!(tb_last < start || tb_start > last)) {
66
case INDEX_op_bswap32_i32:
61
#ifdef TARGET_HAS_PRECISE_SMC
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
62
if (current_tb == tb &&
68
break;
63
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
69
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
70
case INDEX_op_ext_i32_i64:
65
start = addr & TARGET_PAGE_MASK;
71
- case INDEX_op_ext32s_i64:
66
last = addr | ~TARGET_PAGE_MASK;
72
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
67
pages = page_collection_lock(start, last);
73
break;
68
- tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
74
case INDEX_op_extu_i32_i64:
69
+ tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
70
page_collection_unlock(pages);
76
case INDEX_op_ext16s_i32:
71
}
77
case INDEX_op_ext16u_i64:
72
78
case INDEX_op_ext16u_i32:
73
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
79
+ case INDEX_op_ext32s_i64:
74
continue;
80
default:
81
g_assert_not_reached();
82
}
83
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/arm/tcg-target.c.inc
86
+++ b/tcg/arm/tcg-target.c.inc
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
88
tcg_out_ext16u_cond(s, COND_AL, rd, rn);
89
}
90
91
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
92
+{
93
+ g_assert_not_reached();
94
+}
95
+
96
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
97
TCGReg rd, TCGReg rn, int flags)
98
{
99
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/i386/tcg-target.c.inc
102
+++ b/tcg/i386/tcg-target.c.inc
103
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
104
tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
105
}
106
107
-static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
108
+static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
109
{
110
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
111
tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
112
}
113
114
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
115
tcg_out_ext32u(s, a0, a1);
116
break;
117
case INDEX_op_ext_i32_i64:
118
- case INDEX_op_ext32s_i64:
119
tcg_out_ext32s(s, a0, a1);
120
break;
121
case INDEX_op_extrh_i64_i32:
122
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
123
case INDEX_op_ext16s_i64:
124
case INDEX_op_ext16u_i32:
125
case INDEX_op_ext16u_i64:
126
+ case INDEX_op_ext32s_i64:
127
default:
128
g_assert_not_reached();
129
}
130
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/loongarch64/tcg-target.c.inc
133
+++ b/tcg/loongarch64/tcg-target.c.inc
134
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
135
tcg_out_ext32u(s, a0, a1);
136
break;
137
138
- case INDEX_op_ext32s_i64:
139
case INDEX_op_extrl_i64_i32:
140
case INDEX_op_ext_i32_i64:
141
tcg_out_ext32s(s, a0, a1);
142
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
143
case INDEX_op_ext16s_i64:
144
case INDEX_op_ext16u_i32:
145
case INDEX_op_ext16u_i64:
146
+ case INDEX_op_ext32s_i64:
147
default:
148
g_assert_not_reached();
149
}
150
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/mips/tcg-target.c.inc
153
+++ b/tcg/mips/tcg-target.c.inc
154
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
155
tcg_out_opc_imm(s, OPC_ANDI, rd, rs, 0xffff);
156
}
157
158
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
159
+{
160
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
161
+ tcg_out_opc_sa(s, OPC_SLL, rd, rs, 0);
162
+}
163
+
164
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
165
tcg_target_long imm)
166
{
167
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
168
/* delay slot */
169
if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
170
/* we always sign-extend 32-bit loads */
171
- tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0);
172
+ tcg_out_ext32s(s, v0, TCG_REG_V0);
173
} else {
174
tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
175
}
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
177
case INDEX_op_extrh_i64_i32:
178
tcg_out_dsra(s, a0, a1, 32);
179
break;
180
- case INDEX_op_ext32s_i64:
181
case INDEX_op_ext_i32_i64:
182
case INDEX_op_extrl_i64_i32:
183
- tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0);
184
+ tcg_out_ext32s(s, a0, a1);
185
break;
186
case INDEX_op_ext32u_i64:
187
case INDEX_op_extu_i32_i64:
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
189
case INDEX_op_ext8u_i64:
190
case INDEX_op_ext16s_i32:
191
case INDEX_op_ext16s_i64:
192
+ case INDEX_op_ext32s_i64:
193
default:
194
g_assert_not_reached();
195
}
196
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tcg/ppc/tcg-target.c.inc
199
+++ b/tcg/ppc/tcg-target.c.inc
200
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
201
tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
202
}
203
204
-static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
205
+static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
206
{
207
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
208
tcg_out32(s, EXTSW | RA(dst) | RS(src));
209
}
210
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
212
break;
213
214
case INDEX_op_ext_i32_i64:
215
- case INDEX_op_ext32s_i64:
216
tcg_out_ext32s(s, args[0], args[1]);
217
break;
218
case INDEX_op_extu_i32_i64:
219
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
220
case INDEX_op_ext16s_i64:
221
case INDEX_op_ext16u_i32:
222
case INDEX_op_ext16u_i64:
223
+ case INDEX_op_ext32s_i64:
224
default:
225
g_assert_not_reached();
226
}
227
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
228
index XXXXXXX..XXXXXXX 100644
229
--- a/tcg/riscv/tcg-target.c.inc
230
+++ b/tcg/riscv/tcg-target.c.inc
231
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
232
tcg_out_ext32u(s, a0, a1);
233
break;
234
235
- case INDEX_op_ext32s_i64:
236
case INDEX_op_extrl_i64_i32:
237
case INDEX_op_ext_i32_i64:
238
tcg_out_ext32s(s, a0, a1);
239
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
240
case INDEX_op_ext16s_i64:
241
case INDEX_op_ext16u_i32:
242
case INDEX_op_ext16u_i64:
243
+ case INDEX_op_ext32s_i64:
244
default:
245
g_assert_not_reached();
246
}
247
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
248
index XXXXXXX..XXXXXXX 100644
249
--- a/tcg/s390x/tcg-target.c.inc
250
+++ b/tcg/s390x/tcg-target.c.inc
251
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
252
tcg_out_insn(s, RRE, LLGHR, dest, src);
253
}
254
255
-static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
256
+static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
257
{
258
tcg_out_insn(s, RRE, LGFR, dest, src);
259
}
260
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
261
case MO_SL | MO_BSWAP:
262
/* swapped sign-extended int load */
263
tcg_out_insn(s, RXY, LRV, data, base, index, disp);
264
- tgen_ext32s(s, data, data);
265
+ tcg_out_ext32s(s, data, data);
266
break;
267
case MO_SL:
268
tcg_out_insn(s, RXY, LGF, data, base, index, disp);
269
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
270
a0 = args[0], a1 = args[1], a2 = args[2];
271
tcg_out_insn(s, RRE, LRVR, a0, a1);
272
if (a2 & TCG_BSWAP_OS) {
273
- tgen_ext32s(s, a0, a0);
274
+ tcg_out_ext32s(s, a0, a0);
275
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
276
tgen_ext32u(s, a0, a0);
75
}
277
}
76
assert_page_locked(pd);
278
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
77
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
279
break;
78
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
280
79
}
281
case INDEX_op_ext_i32_i64:
80
page_collection_unlock(pages);
282
- case INDEX_op_ext32s_i64:
81
}
283
- tgen_ext32s(s, args[0], args[1]);
82
@@ -XXX,XX +XXX,XX @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
284
+ tcg_out_ext32s(s, args[0], args[1]);
83
}
285
break;
84
286
case INDEX_op_extu_i32_i64:
85
assert_page_locked(p);
287
case INDEX_op_ext32u_i64:
86
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
288
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
289
case INDEX_op_ext16s_i64:
88
}
290
case INDEX_op_ext16u_i32:
89
291
case INDEX_op_ext16u_i64:
90
/*
292
+ case INDEX_op_ext32s_i64:
293
default:
294
g_assert_not_reached();
295
}
296
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
297
index XXXXXXX..XXXXXXX 100644
298
--- a/tcg/sparc64/tcg-target.c.inc
299
+++ b/tcg/sparc64/tcg-target.c.inc
300
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
301
tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
302
}
303
304
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
305
+{
306
+ tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
307
+}
308
+
309
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
310
tcg_target_long imm)
311
{
312
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
313
314
/* We let the helper sign-extend SB and SW, but leave SL for here. */
315
if (is_64 && (memop & MO_SSIZE) == MO_SL) {
316
- tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
317
+ tcg_out_ext32s(s, data, TCG_REG_O0);
318
} else {
319
tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
320
}
321
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
322
c = ARITH_UDIVX;
323
goto gen_arith;
324
case INDEX_op_ext_i32_i64:
325
- case INDEX_op_ext32s_i64:
326
- tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
327
+ tcg_out_ext32s(s, a0, a1);
328
break;
329
case INDEX_op_extu_i32_i64:
330
case INDEX_op_ext32u_i64:
331
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
332
case INDEX_op_ext16s_i64:
333
case INDEX_op_ext16u_i32:
334
case INDEX_op_ext16u_i64:
335
+ case INDEX_op_ext32s_i64:
336
default:
337
g_assert_not_reached();
338
}
339
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
340
index XXXXXXX..XXXXXXX 100644
341
--- a/tcg/tci/tcg-target.c.inc
342
+++ b/tcg/tci/tcg-target.c.inc
343
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
344
}
345
}
346
347
+static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
348
+{
349
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
350
+ tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64);
351
+ tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs);
352
+}
353
+
354
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
355
tcg_target_long imm)
356
{
357
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
358
359
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
360
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
361
- CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
362
CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
363
CASE_64(ext_i32)
364
CASE_64(extu_i32)
365
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
366
case INDEX_op_ext16s_i64:
367
case INDEX_op_ext16u_i32:
368
case INDEX_op_ext16u_i64:
369
+ case INDEX_op_ext32s_i64:
370
default:
371
g_assert_not_reached();
372
}
91
--
373
--
92
2.34.1
374
2.34.1
93
375
94
376
diff view generated by jsdifflib
1
We have been enforcing host page alignment for the non-R
1
We will need a backend interface for performing 32-bit zero-extend.
2
fallback of MAX_RESERVED_VA, but failing to enforce for -R.
2
Use it in tcg_reg_alloc_op in the meantime.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
linux-user/main.c | 6 ++++++
7
tcg/tcg.c | 4 ++++
8
1 file changed, 6 insertions(+)
8
tcg/aarch64/tcg-target.c.inc | 9 +++++++--
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
tcg/i386/tcg-target.c.inc | 4 ++--
11
tcg/loongarch64/tcg-target.c.inc | 2 +-
12
tcg/mips/tcg-target.c.inc | 3 ++-
13
tcg/ppc/tcg-target.c.inc | 4 +++-
14
tcg/riscv/tcg-target.c.inc | 2 +-
15
tcg/s390x/tcg-target.c.inc | 20 ++++++++++----------
16
tcg/sparc64/tcg-target.c.inc | 17 +++++++++++------
17
tcg/tci/tcg-target.c.inc | 9 ++++++++-
18
11 files changed, 54 insertions(+), 25 deletions(-)
9
19
10
diff --git a/linux-user/main.c b/linux-user/main.c
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
12
--- a/linux-user/main.c
22
--- a/tcg/tcg.c
13
+++ b/linux-user/main.c
23
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
15
*/
25
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
16
max_reserved_va = MAX_RESERVED_VA(cpu);
26
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
17
if (reserved_va != 0) {
27
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
18
+ if (reserved_va % qemu_host_page_size) {
28
+static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
19
+ char *s = size_to_str(qemu_host_page_size);
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
20
+ fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
21
+ g_free(s);
31
static void tcg_out_goto_tb(TCGContext *s, int which);
22
+ exit(EXIT_FAILURE);
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
23
+ }
33
case INDEX_op_ext32s_i64:
24
if (max_reserved_va && reserved_va > max_reserved_va) {
34
tcg_out_ext32s(s, new_args[0], new_args[1]);
25
fprintf(stderr, "Reserved virtual address too big\n");
35
break;
26
exit(EXIT_FAILURE);
36
+ case INDEX_op_ext32u_i64:
37
+ tcg_out_ext32u(s, new_args[0], new_args[1]);
38
+ break;
39
default:
40
if (def->flags & TCG_OPF_VECTOR) {
41
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
42
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/aarch64/tcg-target.c.inc
45
+++ b/tcg/aarch64/tcg-target.c.inc
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
47
tcg_out_uxt(s, MO_16, rd, rn);
48
}
49
50
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
51
+{
52
+ tcg_out_movr(s, TCG_TYPE_I32, rd, rn);
53
+}
54
+
55
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
56
TCGReg rn, int64_t aimm)
57
{
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
59
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
60
break;
61
case INDEX_op_extu_i32_i64:
62
- case INDEX_op_ext32u_i64:
63
- tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
64
+ tcg_out_ext32u(s, a0, a1);
65
break;
66
67
case INDEX_op_deposit_i64:
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
69
case INDEX_op_ext16u_i64:
70
case INDEX_op_ext16u_i32:
71
case INDEX_op_ext32s_i64:
72
+ case INDEX_op_ext32u_i64:
73
default:
74
g_assert_not_reached();
75
}
76
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/arm/tcg-target.c.inc
79
+++ b/tcg/arm/tcg-target.c.inc
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
81
g_assert_not_reached();
82
}
83
84
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
85
+{
86
+ g_assert_not_reached();
87
+}
88
+
89
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
90
TCGReg rd, TCGReg rn, int flags)
91
{
92
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/i386/tcg-target.c.inc
95
+++ b/tcg/i386/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
97
tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
98
}
99
100
-static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
101
+static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
102
{
103
/* 32-bit mov zero extends. */
104
tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
105
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
106
tcg_out_bswap64(s, a0);
107
break;
108
case INDEX_op_extu_i32_i64:
109
- case INDEX_op_ext32u_i64:
110
case INDEX_op_extrl_i64_i32:
111
tcg_out_ext32u(s, a0, a1);
112
break;
113
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
114
case INDEX_op_ext16u_i32:
115
case INDEX_op_ext16u_i64:
116
case INDEX_op_ext32s_i64:
117
+ case INDEX_op_ext32u_i64:
118
default:
119
g_assert_not_reached();
120
}
121
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/loongarch64/tcg-target.c.inc
124
+++ b/tcg/loongarch64/tcg-target.c.inc
125
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
126
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
127
break;
128
129
- case INDEX_op_ext32u_i64:
130
case INDEX_op_extu_i32_i64:
131
tcg_out_ext32u(s, a0, a1);
132
break;
133
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
134
case INDEX_op_ext16u_i32:
135
case INDEX_op_ext16u_i64:
136
case INDEX_op_ext32s_i64:
137
+ case INDEX_op_ext32u_i64:
138
default:
139
g_assert_not_reached();
140
}
141
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
142
index XXXXXXX..XXXXXXX 100644
143
--- a/tcg/mips/tcg-target.c.inc
144
+++ b/tcg/mips/tcg-target.c.inc
145
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
146
147
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
148
{
149
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
150
if (use_mips32r2_instructions) {
151
tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0);
152
} else {
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
154
case INDEX_op_extrl_i64_i32:
155
tcg_out_ext32s(s, a0, a1);
156
break;
157
- case INDEX_op_ext32u_i64:
158
case INDEX_op_extu_i32_i64:
159
tcg_out_ext32u(s, a0, a1);
160
break;
161
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
162
case INDEX_op_ext16s_i32:
163
case INDEX_op_ext16s_i64:
164
case INDEX_op_ext32s_i64:
165
+ case INDEX_op_ext32u_i64:
166
default:
167
g_assert_not_reached();
168
}
169
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/ppc/tcg-target.c.inc
172
+++ b/tcg/ppc/tcg-target.c.inc
173
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
174
tcg_out32(s, EXTSW | RA(dst) | RS(src));
175
}
176
177
-static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
178
+static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
179
{
180
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
181
tcg_out_rld(s, RLDICL, dst, src, 0, 32);
182
}
183
184
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
185
case INDEX_op_ext16u_i32:
186
case INDEX_op_ext16u_i64:
187
case INDEX_op_ext32s_i64:
188
+ case INDEX_op_ext32u_i64:
189
default:
190
g_assert_not_reached();
191
}
192
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
193
index XXXXXXX..XXXXXXX 100644
194
--- a/tcg/riscv/tcg-target.c.inc
195
+++ b/tcg/riscv/tcg-target.c.inc
196
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
197
tcg_out_qemu_st(s, args, true);
198
break;
199
200
- case INDEX_op_ext32u_i64:
201
case INDEX_op_extu_i32_i64:
202
tcg_out_ext32u(s, a0, a1);
203
break;
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
205
case INDEX_op_ext16u_i32:
206
case INDEX_op_ext16u_i64:
207
case INDEX_op_ext32s_i64:
208
+ case INDEX_op_ext32u_i64:
209
default:
210
g_assert_not_reached();
211
}
212
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/s390x/tcg-target.c.inc
215
+++ b/tcg/s390x/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
217
tcg_out_insn(s, RRE, LGFR, dest, src);
218
}
219
220
-static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
221
+static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
222
{
223
tcg_out_insn(s, RRE, LLGFR, dest, src);
224
}
225
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
226
227
/* Look for the zero-extensions. */
228
if ((val & valid) == 0xffffffff) {
229
- tgen_ext32u(s, dest, dest);
230
+ tcg_out_ext32u(s, dest, dest);
231
return;
232
}
233
if ((val & valid) == 0xff) {
234
@@ -XXX,XX +XXX,XX @@ static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
235
/* With MIE3, and bit 0 of m4 set, we get the complete result. */
236
if (HAVE_FACILITY(MISC_INSN_EXT3)) {
237
if (type == TCG_TYPE_I32) {
238
- tgen_ext32u(s, dest, src);
239
+ tcg_out_ext32u(s, dest, src);
240
src = dest;
241
}
242
tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
243
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
244
case MO_UL | MO_BSWAP:
245
/* swapped unsigned int load with upper bits zeroed */
246
tcg_out_insn(s, RXY, LRV, data, base, index, disp);
247
- tgen_ext32u(s, data, data);
248
+ tcg_out_ext32u(s, data, data);
249
break;
250
case MO_UL:
251
tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
252
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
253
offsetof(CPUTLBEntry, addend));
254
255
if (TARGET_LONG_BITS == 32) {
256
- tgen_ext32u(s, TCG_REG_R3, addr_reg);
257
+ tcg_out_ext32u(s, TCG_REG_R3, addr_reg);
258
return TCG_REG_R3;
259
}
260
return addr_reg;
261
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
262
tcg_out_ext16u(s, TCG_REG_R4, data_reg);
263
break;
264
case MO_UL:
265
- tgen_ext32u(s, TCG_REG_R4, data_reg);
266
+ tcg_out_ext32u(s, TCG_REG_R4, data_reg);
267
break;
268
case MO_UQ:
269
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
270
@@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
271
TCGReg *index_reg, tcg_target_long *disp)
272
{
273
if (TARGET_LONG_BITS == 32) {
274
- tgen_ext32u(s, TCG_TMP0, *addr_reg);
275
+ tcg_out_ext32u(s, TCG_TMP0, *addr_reg);
276
*addr_reg = TCG_TMP0;
277
}
278
if (guest_base < 0x80000) {
279
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
280
if (a2 & TCG_BSWAP_OS) {
281
tcg_out_ext32s(s, a0, a0);
282
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
283
- tgen_ext32u(s, a0, a0);
284
+ tcg_out_ext32u(s, a0, a0);
285
}
286
break;
287
288
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
289
tcg_out_ext32s(s, args[0], args[1]);
290
break;
291
case INDEX_op_extu_i32_i64:
292
- case INDEX_op_ext32u_i64:
293
- tgen_ext32u(s, args[0], args[1]);
294
+ tcg_out_ext32u(s, args[0], args[1]);
295
break;
296
297
case INDEX_op_add2_i64:
298
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
299
case INDEX_op_ext16u_i32:
300
case INDEX_op_ext16u_i64:
301
case INDEX_op_ext32s_i64:
302
+ case INDEX_op_ext32u_i64:
303
default:
304
g_assert_not_reached();
305
}
306
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
307
index XXXXXXX..XXXXXXX 100644
308
--- a/tcg/sparc64/tcg-target.c.inc
309
+++ b/tcg/sparc64/tcg-target.c.inc
310
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
311
tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
312
}
313
314
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
315
+{
316
+ tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
317
+}
318
+
319
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
320
tcg_target_long imm)
321
{
322
@@ -XXX,XX +XXX,XX @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
323
tcg_out_ext16u(s, r, r);
324
break;
325
case MO_32:
326
- tcg_out_arith(s, r, r, 0, SHIFT_SRL);
327
+ tcg_out_ext32u(s, r, r);
328
break;
329
case MO_64:
330
break;
331
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
332
333
/* If the guest address must be zero-extended, do so now. */
334
if (TARGET_LONG_BITS == 32) {
335
- tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
336
+ tcg_out_ext32u(s, r0, addr);
337
return r0;
338
}
339
return addr;
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
341
unsigned t_bits;
342
343
if (TARGET_LONG_BITS == 32) {
344
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
345
+ tcg_out_ext32u(s, TCG_REG_T1, addr);
346
addr = TCG_REG_T1;
347
}
348
349
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
350
unsigned t_bits;
351
352
if (TARGET_LONG_BITS == 32) {
353
- tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
354
+ tcg_out_ext32u(s, TCG_REG_T1, addr);
355
addr = TCG_REG_T1;
356
}
357
358
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
359
tcg_out_ext32s(s, a0, a1);
360
break;
361
case INDEX_op_extu_i32_i64:
362
- case INDEX_op_ext32u_i64:
363
- tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
364
+ tcg_out_ext32u(s, a0, a1);
365
break;
366
case INDEX_op_extrl_i64_i32:
367
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
368
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
369
case INDEX_op_ext16u_i32:
370
case INDEX_op_ext16u_i64:
371
case INDEX_op_ext32s_i64:
372
+ case INDEX_op_ext32u_i64:
373
default:
374
g_assert_not_reached();
375
}
376
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
377
index XXXXXXX..XXXXXXX 100644
378
--- a/tcg/tci/tcg-target.c.inc
379
+++ b/tcg/tci/tcg-target.c.inc
380
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
381
tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs);
382
}
383
384
+static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
385
+{
386
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
387
+ tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64);
388
+ tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs);
389
+}
390
+
391
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
392
tcg_target_long imm)
393
{
394
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
395
396
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
397
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
398
- CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
399
CASE_64(ext_i32)
400
CASE_64(extu_i32)
401
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
402
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
403
case INDEX_op_ext16u_i32:
404
case INDEX_op_ext16u_i64:
405
case INDEX_op_ext32s_i64:
406
+ case INDEX_op_ext32u_i64:
407
default:
408
g_assert_not_reached();
409
}
27
--
410
--
28
2.34.1
411
2.34.1
29
412
30
413
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
We will need a backend interface for type extension with sign.
2
the first address past the last byte. This avoids overflow
2
Use it in tcg_reg_alloc_op in the meantime.
3
when the last page of the address space is involved.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/tb-maint.c | 28 ++++++++++++++++------------
7
tcg/tcg.c | 4 ++++
9
1 file changed, 16 insertions(+), 12 deletions(-)
8
tcg/aarch64/tcg-target.c.inc | 9 ++++++---
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
tcg/i386/tcg-target.c.inc | 9 ++++++---
11
tcg/loongarch64/tcg-target.c.inc | 7 ++++++-
12
tcg/mips/tcg-target.c.inc | 7 ++++++-
13
tcg/ppc/tcg-target.c.inc | 9 ++++++---
14
tcg/riscv/tcg-target.c.inc | 7 ++++++-
15
tcg/s390x/tcg-target.c.inc | 9 ++++++---
16
tcg/sparc64/tcg-target.c.inc | 9 ++++++---
17
tcg/tci/tcg-target.c.inc | 7 ++++++-
18
11 files changed, 63 insertions(+), 19 deletions(-)
10
19
11
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tb-maint.c
22
--- a/tcg/tcg.c
14
+++ b/accel/tcg/tb-maint.c
23
+++ b/tcg/tcg.c
15
@@ -XXX,XX +XXX,XX @@ static void tb_remove(TranslationBlock *tb)
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
16
}
25
static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
17
26
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
18
/* TODO: For now, still shared with translate-all.c for system mode. */
27
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
19
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
28
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
20
- for (T = foreach_tb_first(start, end), \
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
21
- N = foreach_tb_next(T, start, end); \
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
22
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
31
static void tcg_out_goto_tb(TCGContext *s, int which);
23
+ for (T = foreach_tb_first(start, last), \
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
24
+ N = foreach_tb_next(T, start, last); \
33
case INDEX_op_ext32u_i64:
25
T != NULL; \
34
tcg_out_ext32u(s, new_args[0], new_args[1]);
26
- T = N, N = foreach_tb_next(N, start, end))
35
break;
27
+ T = N, N = foreach_tb_next(N, start, last))
36
+ case INDEX_op_ext_i32_i64:
28
37
+ tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
29
typedef TranslationBlock *PageForEachNext;
38
+ break;
30
39
default:
31
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
40
if (def->flags & TCG_OPF_VECTOR) {
32
- tb_page_addr_t end)
41
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
33
+ tb_page_addr_t last)
42
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
34
{
43
index XXXXXXX..XXXXXXX 100644
35
- IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
44
--- a/tcg/aarch64/tcg-target.c.inc
36
+ IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
45
+++ b/tcg/aarch64/tcg-target.c.inc
37
return n ? container_of(n, TranslationBlock, itree) : NULL;
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
38
}
47
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, rd, rn);
39
48
}
40
static PageForEachNext foreach_tb_next(PageForEachNext tb,
49
41
tb_page_addr_t start,
50
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
42
- tb_page_addr_t end)
51
+{
43
+ tb_page_addr_t last)
52
+ tcg_out_ext32s(s, rd, rn);
44
{
53
+}
45
IntervalTreeNode *n;
54
+
46
55
static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
47
if (tb) {
56
TCGReg rd, TCGReg rn)
48
- n = interval_tree_iter_next(&tb->itree, start, end - 1);
57
{
49
+ n = interval_tree_iter_next(&tb->itree, start, last);
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
50
if (n) {
51
return container_of(n, TranslationBlock, itree);
52
}
59
}
53
@@ -XXX,XX +XXX,XX @@ struct page_collection {
60
break;
54
};
61
55
62
- case INDEX_op_ext_i32_i64:
56
typedef int PageForEachNext;
63
- tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
57
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
64
- break;
58
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
65
case INDEX_op_extu_i32_i64:
59
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
66
tcg_out_ext32u(s, a0, a1);
60
67
break;
61
#ifdef CONFIG_DEBUG_TCG
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
62
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
69
case INDEX_op_ext16u_i32:
63
{
70
case INDEX_op_ext32s_i64:
64
TranslationBlock *tb;
71
case INDEX_op_ext32u_i64:
65
PageForEachNext n;
72
+ case INDEX_op_ext_i32_i64:
66
+ tb_page_addr_t last = end - 1;
73
default:
67
74
g_assert_not_reached();
68
assert_memory_lock();
75
}
69
76
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
70
- PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
77
index XXXXXXX..XXXXXXX 100644
71
+ PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
78
--- a/tcg/arm/tcg-target.c.inc
72
tb_phys_invalidate__locked(tb);
79
+++ b/tcg/arm/tcg-target.c.inc
73
}
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
74
}
81
g_assert_not_reached();
75
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
82
}
76
bool current_tb_modified;
83
77
TranslationBlock *tb;
84
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
78
PageForEachNext n;
85
+{
79
+ tb_page_addr_t last;
86
+ g_assert_not_reached();
80
87
+}
81
/*
88
+
82
* Without precise smc semantics, or when outside of a TB,
89
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
83
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
90
TCGReg rd, TCGReg rn, int flags)
84
assert_memory_lock();
91
{
85
current_tb = tcg_tb_lookup(pc);
92
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
86
93
index XXXXXXX..XXXXXXX 100644
87
+ last = addr | ~TARGET_PAGE_MASK;
94
--- a/tcg/i386/tcg-target.c.inc
88
addr &= TARGET_PAGE_MASK;
95
+++ b/tcg/i386/tcg-target.c.inc
89
current_tb_modified = false;
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
90
97
tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
91
- PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
98
}
92
+ PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
99
93
if (current_tb == tb &&
100
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
94
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
101
+{
95
/*
102
+ tcg_out_ext32s(s, dest, src);
96
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
103
+}
97
bool current_tb_modified = false;
104
+
98
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
105
static inline void tcg_out_bswap64(TCGContext *s, int reg)
99
#endif /* TARGET_HAS_PRECISE_SMC */
106
{
100
+ tb_page_addr_t last G_GNUC_UNUSED = end - 1;
107
tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
101
108
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
102
/*
109
case INDEX_op_extrl_i64_i32:
103
* We remove all the TBs in the range [start, end[.
110
tcg_out_ext32u(s, a0, a1);
104
* XXX: see if in some cases it could be faster to invalidate all the code
111
break;
105
*/
112
- case INDEX_op_ext_i32_i64:
106
- PAGE_FOR_EACH_TB(start, end, p, tb, n) {
113
- tcg_out_ext32s(s, a0, a1);
107
+ PAGE_FOR_EACH_TB(start, last, p, tb, n) {
114
- break;
108
/* NOTE: this is subtle as a TB may span two physical pages */
115
case INDEX_op_extrh_i64_i32:
109
if (n == 0) {
116
tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
110
/* NOTE: tb_end may be after the end of the page, but
117
break;
118
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
119
case INDEX_op_ext16u_i64:
120
case INDEX_op_ext32s_i64:
121
case INDEX_op_ext32u_i64:
122
+ case INDEX_op_ext_i32_i64:
123
default:
124
g_assert_not_reached();
125
}
126
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/loongarch64/tcg-target.c.inc
129
+++ b/tcg/loongarch64/tcg-target.c.inc
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
131
tcg_out_opc_addi_w(s, ret, arg, 0);
132
}
133
134
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
135
+{
136
+ tcg_out_ext32s(s, ret, arg);
137
+}
138
+
139
static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
140
TCGReg a0, TCGReg a1, TCGReg a2,
141
bool c2, bool is_32bit)
142
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
143
break;
144
145
case INDEX_op_extrl_i64_i32:
146
- case INDEX_op_ext_i32_i64:
147
tcg_out_ext32s(s, a0, a1);
148
break;
149
150
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
151
case INDEX_op_ext16u_i64:
152
case INDEX_op_ext32s_i64:
153
case INDEX_op_ext32u_i64:
154
+ case INDEX_op_ext_i32_i64:
155
default:
156
g_assert_not_reached();
157
}
158
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/mips/tcg-target.c.inc
161
+++ b/tcg/mips/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
163
tcg_out_opc_sa(s, OPC_SLL, rd, rs, 0);
164
}
165
166
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
167
+{
168
+ tcg_out_ext32s(s, rd, rs);
169
+}
170
+
171
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
172
tcg_target_long imm)
173
{
174
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
175
case INDEX_op_extrh_i64_i32:
176
tcg_out_dsra(s, a0, a1, 32);
177
break;
178
- case INDEX_op_ext_i32_i64:
179
case INDEX_op_extrl_i64_i32:
180
tcg_out_ext32s(s, a0, a1);
181
break;
182
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
183
case INDEX_op_ext16s_i64:
184
case INDEX_op_ext32s_i64:
185
case INDEX_op_ext32u_i64:
186
+ case INDEX_op_ext_i32_i64:
187
default:
188
g_assert_not_reached();
189
}
190
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/ppc/tcg-target.c.inc
193
+++ b/tcg/ppc/tcg-target.c.inc
194
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
195
tcg_out_rld(s, RLDICL, dst, src, 0, 32);
196
}
197
198
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
199
+{
200
+ tcg_out_ext32s(s, dst, src);
201
+}
202
+
203
static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
204
{
205
tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
206
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
207
tcg_out_qemu_st(s, args, true);
208
break;
209
210
- case INDEX_op_ext_i32_i64:
211
- tcg_out_ext32s(s, args[0], args[1]);
212
- break;
213
case INDEX_op_extu_i32_i64:
214
tcg_out_ext32u(s, args[0], args[1]);
215
break;
216
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
217
case INDEX_op_ext16u_i64:
218
case INDEX_op_ext32s_i64:
219
case INDEX_op_ext32u_i64:
220
+ case INDEX_op_ext_i32_i64:
221
default:
222
g_assert_not_reached();
223
}
224
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
225
index XXXXXXX..XXXXXXX 100644
226
--- a/tcg/riscv/tcg-target.c.inc
227
+++ b/tcg/riscv/tcg-target.c.inc
228
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
229
tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
230
}
231
232
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
233
+{
234
+ tcg_out_ext32s(s, ret, arg);
235
+}
236
+
237
static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
238
TCGReg addr, intptr_t offset)
239
{
240
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
241
break;
242
243
case INDEX_op_extrl_i64_i32:
244
- case INDEX_op_ext_i32_i64:
245
tcg_out_ext32s(s, a0, a1);
246
break;
247
248
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
249
case INDEX_op_ext16u_i64:
250
case INDEX_op_ext32s_i64:
251
case INDEX_op_ext32u_i64:
252
+ case INDEX_op_ext_i32_i64:
253
default:
254
g_assert_not_reached();
255
}
256
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
257
index XXXXXXX..XXXXXXX 100644
258
--- a/tcg/s390x/tcg-target.c.inc
259
+++ b/tcg/s390x/tcg-target.c.inc
260
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
261
tcg_out_insn(s, RRE, LLGFR, dest, src);
262
}
263
264
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
265
+{
266
+ tcg_out_ext32s(s, dest, src);
267
+}
268
+
269
static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
270
{
271
int msb, lsb;
272
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
273
}
274
break;
275
276
- case INDEX_op_ext_i32_i64:
277
- tcg_out_ext32s(s, args[0], args[1]);
278
- break;
279
case INDEX_op_extu_i32_i64:
280
tcg_out_ext32u(s, args[0], args[1]);
281
break;
282
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
283
case INDEX_op_ext16u_i64:
284
case INDEX_op_ext32s_i64:
285
case INDEX_op_ext32u_i64:
286
+ case INDEX_op_ext_i32_i64:
287
default:
288
g_assert_not_reached();
289
}
290
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
291
index XXXXXXX..XXXXXXX 100644
292
--- a/tcg/sparc64/tcg-target.c.inc
293
+++ b/tcg/sparc64/tcg-target.c.inc
294
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
295
tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
296
}
297
298
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
299
+{
300
+ tcg_out_ext32s(s, rd, rs);
301
+}
302
+
303
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
304
tcg_target_long imm)
305
{
306
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
307
case INDEX_op_divu_i64:
308
c = ARITH_UDIVX;
309
goto gen_arith;
310
- case INDEX_op_ext_i32_i64:
311
- tcg_out_ext32s(s, a0, a1);
312
- break;
313
case INDEX_op_extu_i32_i64:
314
tcg_out_ext32u(s, a0, a1);
315
break;
316
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
317
case INDEX_op_ext16u_i64:
318
case INDEX_op_ext32s_i64:
319
case INDEX_op_ext32u_i64:
320
+ case INDEX_op_ext_i32_i64:
321
default:
322
g_assert_not_reached();
323
}
324
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
325
index XXXXXXX..XXXXXXX 100644
326
--- a/tcg/tci/tcg-target.c.inc
327
+++ b/tcg/tci/tcg-target.c.inc
328
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
329
tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs);
330
}
331
332
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
333
+{
334
+ tcg_out_ext32s(s, rd, rs);
335
+}
336
+
337
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
338
tcg_target_long imm)
339
{
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
341
342
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
343
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
344
- CASE_64(ext_i32)
345
CASE_64(extu_i32)
346
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
347
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
348
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
349
case INDEX_op_ext16u_i64:
350
case INDEX_op_ext32s_i64:
351
case INDEX_op_ext32u_i64:
352
+ case INDEX_op_ext_i32_i64:
353
default:
354
g_assert_not_reached();
355
}
111
--
356
--
112
2.34.1
357
2.34.1
113
358
114
359
diff view generated by jsdifflib
1
Change the semantics to be the last byte of the guest va, rather
1
We will need a backend interface for type extension with zero.
2
than the following byte. This avoids some overflow conditions.
2
Use it in tcg_reg_alloc_op in the meantime.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/exec/cpu-all.h | 11 ++++++++++-
7
tcg/tcg.c | 4 ++++
8
linux-user/arm/target_cpu.h | 2 +-
8
tcg/aarch64/tcg-target.c.inc | 10 ++++++----
9
bsd-user/main.c | 10 +++-------
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
bsd-user/mmap.c | 4 ++--
10
tcg/i386/tcg-target.c.inc | 7 ++++++-
11
linux-user/elfload.c | 21 +++++++++++----------
11
tcg/loongarch64/tcg-target.c.inc | 10 ++++++----
12
linux-user/main.c | 27 +++++++++++++--------------
12
tcg/mips/tcg-target.c.inc | 9 ++++++---
13
linux-user/mmap.c | 4 ++--
13
tcg/ppc/tcg-target.c.inc | 10 ++++++----
14
7 files changed, 42 insertions(+), 37 deletions(-)
14
tcg/riscv/tcg-target.c.inc | 10 ++++++----
15
tcg/s390x/tcg-target.c.inc | 10 ++++++----
16
tcg/sparc64/tcg-target.c.inc | 9 ++++++---
17
tcg/tci/tcg-target.c.inc | 7 ++++++-
18
11 files changed, 63 insertions(+), 28 deletions(-)
15
19
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
17
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu-all.h
22
--- a/tcg/tcg.c
19
+++ b/include/exec/cpu-all.h
23
+++ b/tcg/tcg.c
20
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
21
*/
25
static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
22
extern uintptr_t guest_base;
26
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
23
extern bool have_guest_base;
27
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
24
+
28
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
25
+/*
29
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
26
+ * If non-zero, the guest virtual address space is a contiguous subset
30
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
27
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
31
static void tcg_out_goto_tb(TCGContext *s, int which);
28
+ * either from the command-line or by default. The value is the last
32
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
29
+ * byte of the guest address space e.g. UINT32_MAX.
33
case INDEX_op_ext_i32_i64:
30
+ *
34
tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
31
+ * If zero, the host and guest virtual address spaces are intermingled.
35
break;
32
+ */
36
+ case INDEX_op_extu_i32_i64:
33
extern unsigned long reserved_va;
37
+ tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
34
38
+ break;
35
/*
39
default:
36
@@ -XXX,XX +XXX,XX @@ extern unsigned long reserved_va;
40
if (def->flags & TCG_OPF_VECTOR) {
37
#define GUEST_ADDR_MAX_ \
41
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
38
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
42
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
39
UINT32_MAX : ~0ul)
43
index XXXXXXX..XXXXXXX 100644
40
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
44
--- a/tcg/aarch64/tcg-target.c.inc
41
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
45
+++ b/tcg/aarch64/tcg-target.c.inc
42
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
43
#else
47
tcg_out_movr(s, TCG_TYPE_I32, rd, rn);
44
48
}
45
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
49
46
index XXXXXXX..XXXXXXX 100644
50
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
47
--- a/linux-user/arm/target_cpu.h
51
+{
48
+++ b/linux-user/arm/target_cpu.h
52
+ tcg_out_ext32u(s, rd, rn);
49
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
53
+}
50
* the high addresses. Restrict linux-user to the
54
+
51
* cached write-back RAM in the system map.
55
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
52
*/
56
TCGReg rn, int64_t aimm)
53
- return 0x80000000ul;
57
{
54
+ return 0x7ffffffful;
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
55
} else {
56
/*
57
* We need to be able to map the commpage.
58
diff --git a/bsd-user/main.c b/bsd-user/main.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/bsd-user/main.c
61
+++ b/bsd-user/main.c
62
@@ -XXX,XX +XXX,XX @@ bool have_guest_base;
63
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
64
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
65
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
66
-/*
67
- * There are a number of places where we assign reserved_va to a variable
68
- * of type abi_ulong and expect it to fit. Avoid the last page.
69
- */
70
-# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
71
+# define MAX_RESERVED_VA 0xfffffffful
72
# else
73
-# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
74
+# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
75
# endif
76
# else
77
# define MAX_RESERVED_VA 0
78
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
79
envlist_free(envlist);
80
81
if (reserved_va) {
82
- mmap_next_start = reserved_va;
83
+ mmap_next_start = reserved_va + 1;
84
}
85
86
{
87
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/bsd-user/mmap.c
90
+++ b/bsd-user/mmap.c
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
92
size = HOST_PAGE_ALIGN(size) + alignment;
93
end_addr = start + size;
94
if (end_addr > reserved_va) {
95
- end_addr = reserved_va;
96
+ end_addr = reserved_va + 1;
97
}
98
addr = end_addr - qemu_host_page_size;
99
100
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
101
if (looped) {
102
return (abi_ulong)-1;
103
}
104
- end_addr = reserved_va;
105
+ end_addr = reserved_va + 1;
106
addr = end_addr - qemu_host_page_size;
107
looped = 1;
108
continue;
109
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/linux-user/elfload.c
112
+++ b/linux-user/elfload.c
113
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
114
* has specified -R reserved_va, which would trigger an assert().
115
*/
116
if (reserved_va != 0 &&
117
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
118
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
119
error_report("Cannot allocate vsyscall page");
120
exit(EXIT_FAILURE);
121
}
122
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
123
124
/* Sanity check the guest binary. */
125
if (reserved_va) {
126
- if (guest_hiaddr > reserved_va) {
127
+ if (guest_hiaddr - 1 > reserved_va) {
128
error_report("%s: requires more than reserved virtual "
129
"address space (0x%" PRIx64 " > 0x%lx)",
130
- image_name, (uint64_t)guest_hiaddr, reserved_va);
131
+ image_name, (uint64_t)guest_hiaddr - 1,
132
+ reserved_va);
133
exit(EXIT_FAILURE);
134
}
59
}
135
} else {
60
break;
136
@@ -XXX,XX +XXX,XX @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
61
137
if (reserved_va) {
62
- case INDEX_op_extu_i32_i64:
138
guest_loaddr = (guest_base >= mmap_min_addr ? 0
63
- tcg_out_ext32u(s, a0, a1);
139
: mmap_min_addr - guest_base);
64
- break;
140
- guest_hiaddr = reserved_va;
65
-
141
+ guest_hiaddr = reserved_va + 1;
66
case INDEX_op_deposit_i64:
142
}
67
case INDEX_op_deposit_i32:
143
68
tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
144
/* Reserve the address space for the binary, or reserved_va. */
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
145
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
70
case INDEX_op_ext32s_i64:
146
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
71
case INDEX_op_ext32u_i64:
147
void *addr, *test;
72
case INDEX_op_ext_i32_i64:
148
73
+ case INDEX_op_extu_i32_i64:
149
- if (guest_hiaddr > reserved_va) {
74
default:
150
+ if (guest_hiaddr - 1 > reserved_va) {
75
g_assert_not_reached();
151
error_report("%s: requires more than reserved virtual "
76
}
152
"address space (0x%" PRIx64 " > 0x%lx)",
77
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
153
- image_name, (uint64_t)guest_hiaddr, reserved_va);
78
index XXXXXXX..XXXXXXX 100644
154
+ image_name, (uint64_t)guest_hiaddr - 1, reserved_va);
79
--- a/tcg/arm/tcg-target.c.inc
155
exit(EXIT_FAILURE);
80
+++ b/tcg/arm/tcg-target.c.inc
156
}
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
157
82
g_assert_not_reached();
158
/* Widen the "image" to the entire reserved address space. */
83
}
159
- pgb_static(image_name, 0, reserved_va, align);
84
160
+ pgb_static(image_name, 0, reserved_va + 1, align);
85
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
161
86
+{
162
/* osdep.h defines this as 0 if it's missing */
87
+ g_assert_not_reached();
163
flags |= MAP_FIXED_NOREPLACE;
88
+}
164
@@ -XXX,XX +XXX,XX @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
89
+
165
/* Reserve the memory on the host. */
90
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
166
assert(guest_base != 0);
91
TCGReg rd, TCGReg rn, int flags)
167
test = g2h_untagged(0);
92
{
168
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
93
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
169
+ addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
94
index XXXXXXX..XXXXXXX 100644
170
if (addr == MAP_FAILED || addr != test) {
95
--- a/tcg/i386/tcg-target.c.inc
171
error_report("Unable to reserve 0x%lx bytes of virtual address "
96
+++ b/tcg/i386/tcg-target.c.inc
172
"space at %p (%s) for use as guest address space (check your "
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
173
"virtual memory ulimit setting, min_mmap_addr or reserve less "
98
tcg_out_ext32s(s, dest, src);
174
- "using -R option)", reserved_va, test, strerror(errno));
99
}
175
+ "using -R option)", reserved_va + 1, test, strerror(errno));
100
176
exit(EXIT_FAILURE);
101
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
177
}
102
+{
178
103
+ tcg_out_ext32u(s, dest, src);
179
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
104
+}
180
- __func__, addr, reserved_va);
105
+
181
+ __func__, addr, reserved_va + 1);
106
static inline void tcg_out_bswap64(TCGContext *s, int reg)
182
}
107
{
183
108
tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
184
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
109
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
185
diff --git a/linux-user/main.c b/linux-user/main.c
110
case INDEX_op_bswap64_i64:
186
index XXXXXXX..XXXXXXX 100644
111
tcg_out_bswap64(s, a0);
187
--- a/linux-user/main.c
112
break;
188
+++ b/linux-user/main.c
113
- case INDEX_op_extu_i32_i64:
189
@@ -XXX,XX +XXX,XX @@ static const char *last_log_filename;
114
case INDEX_op_extrl_i64_i32:
190
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
115
tcg_out_ext32u(s, a0, a1);
191
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
116
break;
192
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
117
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
193
-/* There are a number of places where we assign reserved_va to a variable
118
case INDEX_op_ext32s_i64:
194
- of type abi_ulong and expect it to fit. Avoid the last page. */
119
case INDEX_op_ext32u_i64:
195
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
120
case INDEX_op_ext_i32_i64:
196
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
121
+ case INDEX_op_extu_i32_i64:
197
# else
122
default:
198
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
123
g_assert_not_reached();
199
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
124
}
200
# endif
125
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
201
# else
126
index XXXXXXX..XXXXXXX 100644
202
# define MAX_RESERVED_VA(CPU) 0
127
--- a/tcg/loongarch64/tcg-target.c.inc
203
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
128
+++ b/tcg/loongarch64/tcg-target.c.inc
204
{
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
205
char *p;
130
tcg_out_ext32s(s, ret, arg);
206
int shift = 0;
131
}
207
- reserved_va = strtoul(arg, &p, 0);
132
208
+ unsigned long val;
133
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
209
+
134
+{
210
+ val = strtoul(arg, &p, 0);
135
+ tcg_out_ext32u(s, ret, arg);
211
switch (*p) {
136
+}
212
case 'k':
137
+
213
case 'K':
138
static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
214
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
139
TCGReg a0, TCGReg a1, TCGReg a2,
215
break;
140
bool c2, bool is_32bit)
216
}
141
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
217
if (shift) {
142
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
218
- unsigned long unshifted = reserved_va;
143
break;
219
+ unsigned long unshifted = val;
144
220
p++;
145
- case INDEX_op_extu_i32_i64:
221
- reserved_va <<= shift;
146
- tcg_out_ext32u(s, a0, a1);
222
- if (reserved_va >> shift != unshifted) {
147
- break;
223
+ val <<= shift;
148
-
224
+ if (val >> shift != unshifted) {
149
case INDEX_op_extrl_i64_i32:
225
fprintf(stderr, "Reserved virtual address too big\n");
150
tcg_out_ext32s(s, a0, a1);
226
exit(EXIT_FAILURE);
151
break;
152
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
153
case INDEX_op_ext32s_i64:
154
case INDEX_op_ext32u_i64:
155
case INDEX_op_ext_i32_i64:
156
+ case INDEX_op_extu_i32_i64:
157
default:
158
g_assert_not_reached();
159
}
160
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
161
index XXXXXXX..XXXXXXX 100644
162
--- a/tcg/mips/tcg-target.c.inc
163
+++ b/tcg/mips/tcg-target.c.inc
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
165
tcg_out_ext32s(s, rd, rs);
166
}
167
168
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
169
+{
170
+ tcg_out_ext32u(s, rd, rs);
171
+}
172
+
173
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
174
tcg_target_long imm)
175
{
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
177
case INDEX_op_extrl_i64_i32:
178
tcg_out_ext32s(s, a0, a1);
179
break;
180
- case INDEX_op_extu_i32_i64:
181
- tcg_out_ext32u(s, a0, a1);
182
- break;
183
184
case INDEX_op_sar_i32:
185
i1 = OPC_SRAV, i2 = OPC_SRA;
186
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
187
case INDEX_op_ext32s_i64:
188
case INDEX_op_ext32u_i64:
189
case INDEX_op_ext_i32_i64:
190
+ case INDEX_op_extu_i32_i64:
191
default:
192
g_assert_not_reached();
193
}
194
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/ppc/tcg-target.c.inc
197
+++ b/tcg/ppc/tcg-target.c.inc
198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
199
tcg_out_ext32s(s, dst, src);
200
}
201
202
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
203
+{
204
+ tcg_out_ext32u(s, dst, src);
205
+}
206
+
207
static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
208
{
209
tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
211
tcg_out_qemu_st(s, args, true);
212
break;
213
214
- case INDEX_op_extu_i32_i64:
215
- tcg_out_ext32u(s, args[0], args[1]);
216
- break;
217
-
218
case INDEX_op_setcond_i32:
219
tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
220
const_args[2]);
221
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
222
case INDEX_op_ext32s_i64:
223
case INDEX_op_ext32u_i64:
224
case INDEX_op_ext_i32_i64:
225
+ case INDEX_op_extu_i32_i64:
226
default:
227
g_assert_not_reached();
228
}
229
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
230
index XXXXXXX..XXXXXXX 100644
231
--- a/tcg/riscv/tcg-target.c.inc
232
+++ b/tcg/riscv/tcg-target.c.inc
233
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
234
tcg_out_ext32s(s, ret, arg);
235
}
236
237
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
238
+{
239
+ tcg_out_ext32u(s, ret, arg);
240
+}
241
+
242
static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
243
TCGReg addr, intptr_t offset)
244
{
245
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
246
tcg_out_qemu_st(s, args, true);
247
break;
248
249
- case INDEX_op_extu_i32_i64:
250
- tcg_out_ext32u(s, a0, a1);
251
- break;
252
-
253
case INDEX_op_extrl_i64_i32:
254
tcg_out_ext32s(s, a0, a1);
255
break;
256
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
257
case INDEX_op_ext32s_i64:
258
case INDEX_op_ext32u_i64:
259
case INDEX_op_ext_i32_i64:
260
+ case INDEX_op_extu_i32_i64:
261
default:
262
g_assert_not_reached();
263
}
264
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/tcg/s390x/tcg-target.c.inc
267
+++ b/tcg/s390x/tcg-target.c.inc
268
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
269
tcg_out_ext32s(s, dest, src);
270
}
271
272
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
273
+{
274
+ tcg_out_ext32u(s, dest, src);
275
+}
276
+
277
static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
278
{
279
int msb, lsb;
280
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
227
}
281
}
228
@@ -XXX,XX +XXX,XX @@ static void handle_arg_reserved_va(const char *arg)
282
break;
229
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
283
230
exit(EXIT_FAILURE);
284
- case INDEX_op_extu_i32_i64:
231
}
285
- tcg_out_ext32u(s, args[0], args[1]);
232
+ /* The representation is size - 1, with 0 remaining "default". */
286
- break;
233
+ reserved_va = val ? val - 1 : 0;
287
-
234
}
288
case INDEX_op_add2_i64:
235
289
if (const_args[4]) {
236
static void handle_arg_singlestep(const char *arg)
290
if ((int64_t)args[4] >= 0) {
237
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
291
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
238
*/
292
case INDEX_op_ext32s_i64:
239
max_reserved_va = MAX_RESERVED_VA(cpu);
293
case INDEX_op_ext32u_i64:
240
if (reserved_va != 0) {
294
case INDEX_op_ext_i32_i64:
241
- if (reserved_va % qemu_host_page_size) {
295
+ case INDEX_op_extu_i32_i64:
242
+ if ((reserved_va + 1) % qemu_host_page_size) {
296
default:
243
char *s = size_to_str(qemu_host_page_size);
297
g_assert_not_reached();
244
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
298
}
245
g_free(s);
299
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
246
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
300
index XXXXXXX..XXXXXXX 100644
247
exit(EXIT_FAILURE);
301
--- a/tcg/sparc64/tcg-target.c.inc
248
}
302
+++ b/tcg/sparc64/tcg-target.c.inc
249
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
303
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
250
- /*
304
tcg_out_ext32s(s, rd, rs);
251
- * reserved_va must be aligned with the host page size
305
}
252
- * as it is used with mmap()
306
253
- */
307
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
254
- reserved_va = max_reserved_va & qemu_host_page_mask;
308
+{
255
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
309
+ tcg_out_ext32u(s, rd, rs);
256
+ reserved_va = max_reserved_va;
310
+}
257
}
311
+
258
312
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
259
{
313
tcg_target_long imm)
260
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
314
{
261
index XXXXXXX..XXXXXXX 100644
315
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
262
--- a/linux-user/mmap.c
316
case INDEX_op_divu_i64:
263
+++ b/linux-user/mmap.c
317
c = ARITH_UDIVX;
264
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
318
goto gen_arith;
265
end_addr = start + size;
319
- case INDEX_op_extu_i32_i64:
266
if (start > reserved_va - size) {
320
- tcg_out_ext32u(s, a0, a1);
267
/* Start at the top of the address space. */
321
- break;
268
- end_addr = ((reserved_va - size) & -align) + size;
322
case INDEX_op_extrl_i64_i32:
269
+ end_addr = ((reserved_va + 1 - size) & -align) + size;
323
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
270
looped = true;
324
break;
271
}
325
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
272
326
case INDEX_op_ext32s_i64:
273
@@ -XXX,XX +XXX,XX @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
327
case INDEX_op_ext32u_i64:
274
return (abi_ulong)-1;
328
case INDEX_op_ext_i32_i64:
275
}
329
+ case INDEX_op_extu_i32_i64:
276
/* Re-start at the top of the address space. */
330
default:
277
- addr = end_addr = ((reserved_va - size) & -align) + size;
331
g_assert_not_reached();
278
+ addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
332
}
279
looped = true;
333
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
280
} else {
334
index XXXXXXX..XXXXXXX 100644
281
prot = page_get_flags(addr);
335
--- a/tcg/tci/tcg-target.c.inc
336
+++ b/tcg/tci/tcg-target.c.inc
337
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
338
tcg_out_ext32s(s, rd, rs);
339
}
340
341
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
342
+{
343
+ tcg_out_ext32u(s, rd, rs);
344
+}
345
+
346
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
347
tcg_target_long imm)
348
{
349
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
350
351
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
352
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
353
- CASE_64(extu_i32)
354
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
355
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
356
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
357
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
358
case INDEX_op_ext32s_i64:
359
case INDEX_op_ext32u_i64:
360
case INDEX_op_ext_i32_i64:
361
+ case INDEX_op_extu_i32_i64:
362
default:
363
g_assert_not_reached();
364
}
282
--
365
--
283
2.34.1
366
2.34.1
284
367
285
368
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
We will need a backend interface for type truncation. For those backends
2
the first address past the last byte. This avoids overflow
2
that did not enable TCG_TARGET_HAS_extrl_i64_i32, use tcg_out_mov.
3
when the last page of the address space is involved.
3
Use it in tcg_reg_alloc_op in the meantime.
4
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/exec/cpu-all.h | 2 +-
8
tcg/tcg.c | 4 ++++
9
accel/tcg/user-exec.c | 11 +++++------
9
tcg/aarch64/tcg-target.c.inc | 6 ++++++
10
linux-user/mmap.c | 2 +-
10
tcg/arm/tcg-target.c.inc | 5 +++++
11
3 files changed, 7 insertions(+), 8 deletions(-)
11
tcg/i386/tcg-target.c.inc | 9 ++++++---
12
tcg/loongarch64/tcg-target.c.inc | 10 ++++++----
13
tcg/mips/tcg-target.c.inc | 9 ++++++---
14
tcg/ppc/tcg-target.c.inc | 7 +++++++
15
tcg/riscv/tcg-target.c.inc | 10 ++++++----
16
tcg/s390x/tcg-target.c.inc | 6 ++++++
17
tcg/sparc64/tcg-target.c.inc | 9 ++++++---
18
tcg/tci/tcg-target.c.inc | 7 +++++++
19
11 files changed, 65 insertions(+), 17 deletions(-)
12
20
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
14
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
23
--- a/tcg/tcg.c
16
+++ b/include/exec/cpu-all.h
24
+++ b/tcg/tcg.c
17
@@ -XXX,XX +XXX,XX @@ int walk_memory_regions(void *, walk_memory_regions_fn);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
18
26
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
19
int page_get_flags(target_ulong address);
27
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
20
void page_set_flags(target_ulong start, target_ulong last, int flags);
28
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
21
-void page_reset_target_data(target_ulong start, target_ulong end);
29
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
22
+void page_reset_target_data(target_ulong start, target_ulong last);
30
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
23
int page_check_range(target_ulong start, target_ulong len, int flags);
31
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
24
32
static void tcg_out_goto_tb(TCGContext *s, int which);
25
/**
33
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
26
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
34
case INDEX_op_extu_i32_i64:
27
index XXXXXXX..XXXXXXX 100644
35
tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
28
--- a/accel/tcg/user-exec.c
36
break;
29
+++ b/accel/tcg/user-exec.c
37
+ case INDEX_op_extrl_i64_i32:
30
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
38
+ tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
31
}
39
+ break;
32
40
default:
33
if (!flags || reset) {
41
if (def->flags & TCG_OPF_VECTOR) {
34
- page_reset_target_data(start, last + 1);
42
tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
35
+ page_reset_target_data(start, last);
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
36
inval_tb |= pageflags_unset(start, last);
44
index XXXXXXX..XXXXXXX 100644
37
}
45
--- a/tcg/aarch64/tcg-target.c.inc
38
if (flags) {
46
+++ b/tcg/aarch64/tcg-target.c.inc
39
@@ -XXX,XX +XXX,XX @@ typedef struct TargetPageDataNode {
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
40
48
tcg_out_ext32u(s, rd, rn);
41
static IntervalTreeRoot targetdata_root;
49
}
42
50
43
-void page_reset_target_data(target_ulong start, target_ulong end)
51
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
44
+void page_reset_target_data(target_ulong start, target_ulong last)
52
+{
45
{
53
+ tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
46
IntervalTreeNode *n, *next;
54
+}
47
- target_ulong last;
55
+
48
56
static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
49
assert_memory_lock();
57
TCGReg rn, int64_t aimm)
50
58
{
51
- start = start & TARGET_PAGE_MASK;
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
52
- last = TARGET_PAGE_ALIGN(end) - 1;
60
case INDEX_op_ext32u_i64:
53
+ start &= TARGET_PAGE_MASK;
61
case INDEX_op_ext_i32_i64:
54
+ last |= ~TARGET_PAGE_MASK;
62
case INDEX_op_extu_i32_i64:
55
63
+ case INDEX_op_extrl_i64_i32:
56
for (n = interval_tree_iter_first(&targetdata_root, start, last),
64
default:
57
next = n ? interval_tree_iter_next(n, start, last) : NULL;
65
g_assert_not_reached();
58
@@ -XXX,XX +XXX,XX @@ void *page_get_target_data(target_ulong address)
66
}
59
return t->data[(page - region) >> TARGET_PAGE_BITS];
67
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
60
}
68
index XXXXXXX..XXXXXXX 100644
61
#else
69
--- a/tcg/arm/tcg-target.c.inc
62
-void page_reset_target_data(target_ulong start, target_ulong end) { }
70
+++ b/tcg/arm/tcg-target.c.inc
63
+void page_reset_target_data(target_ulong start, target_ulong last) { }
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
64
#endif /* TARGET_PAGE_DATA_SIZE */
72
g_assert_not_reached();
65
73
}
66
/* The softmmu versions of these helpers are in cputlb.c. */
74
67
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
75
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
68
index XXXXXXX..XXXXXXX 100644
76
+{
69
--- a/linux-user/mmap.c
77
+ g_assert_not_reached();
70
+++ b/linux-user/mmap.c
78
+}
71
@@ -XXX,XX +XXX,XX @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
79
+
72
if (can_passthrough_madvise(start, end)) {
80
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
73
ret = get_errno(madvise(g2h_untagged(start), len, advice));
81
TCGReg rd, TCGReg rn, int flags)
74
if ((advice == MADV_DONTNEED) && (ret == 0)) {
82
{
75
- page_reset_target_data(start, start + len);
83
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
76
+ page_reset_target_data(start, start + len - 1);
84
index XXXXXXX..XXXXXXX 100644
77
}
85
--- a/tcg/i386/tcg-target.c.inc
78
}
86
+++ b/tcg/i386/tcg-target.c.inc
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
88
tcg_out_ext32u(s, dest, src);
89
}
90
91
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
92
+{
93
+ tcg_out_ext32u(s, dest, src);
94
+}
95
+
96
static inline void tcg_out_bswap64(TCGContext *s, int reg)
97
{
98
tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
99
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
100
case INDEX_op_bswap64_i64:
101
tcg_out_bswap64(s, a0);
102
break;
103
- case INDEX_op_extrl_i64_i32:
104
- tcg_out_ext32u(s, a0, a1);
105
- break;
106
case INDEX_op_extrh_i64_i32:
107
tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
108
break;
109
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
110
case INDEX_op_ext32u_i64:
111
case INDEX_op_ext_i32_i64:
112
case INDEX_op_extu_i32_i64:
113
+ case INDEX_op_extrl_i64_i32:
114
default:
115
g_assert_not_reached();
116
}
117
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
118
index XXXXXXX..XXXXXXX 100644
119
--- a/tcg/loongarch64/tcg-target.c.inc
120
+++ b/tcg/loongarch64/tcg-target.c.inc
121
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
122
tcg_out_ext32u(s, ret, arg);
123
}
124
125
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
126
+{
127
+ tcg_out_ext32s(s, ret, arg);
128
+}
129
+
130
static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
131
TCGReg a0, TCGReg a1, TCGReg a2,
132
bool c2, bool is_32bit)
133
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
134
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
135
break;
136
137
- case INDEX_op_extrl_i64_i32:
138
- tcg_out_ext32s(s, a0, a1);
139
- break;
140
-
141
case INDEX_op_extrh_i64_i32:
142
tcg_out_opc_srai_d(s, a0, a1, 32);
143
break;
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
145
case INDEX_op_ext32u_i64:
146
case INDEX_op_ext_i32_i64:
147
case INDEX_op_extu_i32_i64:
148
+ case INDEX_op_extrl_i64_i32:
149
default:
150
g_assert_not_reached();
151
}
152
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/mips/tcg-target.c.inc
155
+++ b/tcg/mips/tcg-target.c.inc
156
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
157
tcg_out_ext32u(s, rd, rs);
158
}
159
160
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
161
+{
162
+ tcg_out_ext32s(s, rd, rs);
163
+}
164
+
165
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
166
tcg_target_long imm)
167
{
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
169
case INDEX_op_extrh_i64_i32:
170
tcg_out_dsra(s, a0, a1, 32);
171
break;
172
- case INDEX_op_extrl_i64_i32:
173
- tcg_out_ext32s(s, a0, a1);
174
- break;
175
176
case INDEX_op_sar_i32:
177
i1 = OPC_SRAV, i2 = OPC_SRA;
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
179
case INDEX_op_ext32u_i64:
180
case INDEX_op_ext_i32_i64:
181
case INDEX_op_extu_i32_i64:
182
+ case INDEX_op_extrl_i64_i32:
183
default:
184
g_assert_not_reached();
185
}
186
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/ppc/tcg-target.c.inc
189
+++ b/tcg/ppc/tcg-target.c.inc
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
191
tcg_out_ext32u(s, dst, src);
192
}
193
194
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
195
+{
196
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
197
+ tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
198
+}
199
+
200
static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
201
{
202
tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
203
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
204
case INDEX_op_ext32u_i64:
205
case INDEX_op_ext_i32_i64:
206
case INDEX_op_extu_i32_i64:
207
+ case INDEX_op_extrl_i64_i32:
208
default:
209
g_assert_not_reached();
210
}
211
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/riscv/tcg-target.c.inc
214
+++ b/tcg/riscv/tcg-target.c.inc
215
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
216
tcg_out_ext32u(s, ret, arg);
217
}
218
219
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
220
+{
221
+ tcg_out_ext32s(s, ret, arg);
222
+}
223
+
224
static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
225
TCGReg addr, intptr_t offset)
226
{
227
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
228
tcg_out_qemu_st(s, args, true);
229
break;
230
231
- case INDEX_op_extrl_i64_i32:
232
- tcg_out_ext32s(s, a0, a1);
233
- break;
234
-
235
case INDEX_op_extrh_i64_i32:
236
tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
237
break;
238
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
239
case INDEX_op_ext32u_i64:
240
case INDEX_op_ext_i32_i64:
241
case INDEX_op_extu_i32_i64:
242
+ case INDEX_op_extrl_i64_i32:
243
default:
244
g_assert_not_reached();
245
}
246
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tcg/s390x/tcg-target.c.inc
249
+++ b/tcg/s390x/tcg-target.c.inc
250
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
251
tcg_out_ext32u(s, dest, src);
252
}
253
254
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
255
+{
256
+ tcg_out_mov(s, TCG_TYPE_I32, dest, src);
257
+}
258
+
259
static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
260
{
261
int msb, lsb;
262
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
263
case INDEX_op_ext32u_i64:
264
case INDEX_op_ext_i32_i64:
265
case INDEX_op_extu_i32_i64:
266
+ case INDEX_op_extrl_i64_i32:
267
default:
268
g_assert_not_reached();
269
}
270
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
271
index XXXXXXX..XXXXXXX 100644
272
--- a/tcg/sparc64/tcg-target.c.inc
273
+++ b/tcg/sparc64/tcg-target.c.inc
274
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
275
tcg_out_ext32u(s, rd, rs);
276
}
277
278
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
279
+{
280
+ tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
281
+}
282
+
283
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
284
tcg_target_long imm)
285
{
286
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
287
case INDEX_op_divu_i64:
288
c = ARITH_UDIVX;
289
goto gen_arith;
290
- case INDEX_op_extrl_i64_i32:
291
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
292
- break;
293
case INDEX_op_extrh_i64_i32:
294
tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
295
break;
296
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
297
case INDEX_op_ext32u_i64:
298
case INDEX_op_ext_i32_i64:
299
case INDEX_op_extu_i32_i64:
300
+ case INDEX_op_extrl_i64_i32:
301
default:
302
g_assert_not_reached();
303
}
304
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
305
index XXXXXXX..XXXXXXX 100644
306
--- a/tcg/tci/tcg-target.c.inc
307
+++ b/tcg/tci/tcg-target.c.inc
308
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
309
tcg_out_ext32u(s, rd, rs);
310
}
311
312
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
313
+{
314
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
315
+ tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
316
+}
317
+
318
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
319
tcg_target_long imm)
320
{
321
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
322
case INDEX_op_ext32u_i64:
323
case INDEX_op_ext_i32_i64:
324
case INDEX_op_extu_i32_i64:
325
+ case INDEX_op_extrl_i64_i32:
326
default:
327
g_assert_not_reached();
79
}
328
}
80
--
329
--
81
2.34.1
330
2.34.1
82
331
83
332
diff view generated by jsdifflib
1
Pass the address of the last byte to be changed, rather than
1
This is common code in most qemu_{ld,st} slow paths, extending the
2
the first address past the last byte. This avoids overflow
2
input value for the store helper data argument or extending the
3
when the last page of the address space is involved.
3
return value from the load helper.
4
5
Fixes a bug in the loop comparision where "<= end" would lock
6
one more page than required.
7
4
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
accel/tcg/tb-maint.c | 22 +++++++++++-----------
8
tcg/tcg.c | 63 ++++++++++++++++++++++++++++++++
12
1 file changed, 11 insertions(+), 11 deletions(-)
9
tcg/aarch64/tcg-target.c.inc | 8 +---
10
tcg/arm/tcg-target.c.inc | 16 ++------
11
tcg/i386/tcg-target.c.inc | 30 +++------------
12
tcg/loongarch64/tcg-target.c.inc | 53 ++++-----------------------
13
tcg/ppc/tcg-target.c.inc | 38 +++++--------------
14
tcg/riscv/tcg-target.c.inc | 13 +------
15
tcg/s390x/tcg-target.c.inc | 19 ++--------
16
tcg/sparc64/tcg-target.c.inc | 31 +++-------------
17
9 files changed, 103 insertions(+), 168 deletions(-)
13
18
14
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
19
diff --git a/tcg/tcg.c b/tcg/tcg.c
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/tb-maint.c
21
--- a/tcg/tcg.c
17
+++ b/accel/tcg/tb-maint.c
22
+++ b/tcg/tcg.c
18
@@ -XXX,XX +XXX,XX @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
23
@@ -XXX,XX +XXX,XX @@ void tcg_raise_tb_overflow(TCGContext *s)
24
siglongjmp(s->jmp_trans, -2);
19
}
25
}
20
26
21
/*
27
+/**
22
- * Lock a range of pages ([@start,@end[) as well as the pages of all
28
+ * tcg_out_movext -- move and extend
23
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
29
+ * @s: tcg context
24
* intersecting TBs.
30
+ * @dst_type: integral type for destination
25
* Locking order: acquire locks in ascending order of page index.
31
+ * @dst: destination register
26
*/
32
+ * @src_type: integral type for source
27
static struct page_collection *page_collection_lock(tb_page_addr_t start,
33
+ * @src_ext: extension to apply to source
28
- tb_page_addr_t end)
34
+ * @src: source register
29
+ tb_page_addr_t last)
35
+ *
36
+ * Move or extend @src into @dst, depending on @src_ext and the types.
37
+ */
38
+static void __attribute__((unused))
39
+tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst,
40
+ TCGType src_type, MemOp src_ext, TCGReg src)
41
+{
42
+ switch (src_ext) {
43
+ case MO_UB:
44
+ tcg_out_ext8u(s, dst, src);
45
+ break;
46
+ case MO_SB:
47
+ tcg_out_ext8s(s, dst_type, dst, src);
48
+ break;
49
+ case MO_UW:
50
+ tcg_out_ext16u(s, dst, src);
51
+ break;
52
+ case MO_SW:
53
+ tcg_out_ext16s(s, dst_type, dst, src);
54
+ break;
55
+ case MO_UL:
56
+ case MO_SL:
57
+ if (dst_type == TCG_TYPE_I32) {
58
+ if (src_type == TCG_TYPE_I32) {
59
+ tcg_out_mov(s, TCG_TYPE_I32, dst, src);
60
+ } else {
61
+ tcg_out_extrl_i64_i32(s, dst, src);
62
+ }
63
+ } else if (src_type == TCG_TYPE_I32) {
64
+ if (src_ext & MO_SIGN) {
65
+ tcg_out_exts_i32_i64(s, dst, src);
66
+ } else {
67
+ tcg_out_extu_i32_i64(s, dst, src);
68
+ }
69
+ } else {
70
+ if (src_ext & MO_SIGN) {
71
+ tcg_out_ext32s(s, dst, src);
72
+ } else {
73
+ tcg_out_ext32u(s, dst, src);
74
+ }
75
+ }
76
+ break;
77
+ case MO_UQ:
78
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
79
+ if (dst_type == TCG_TYPE_I32) {
80
+ tcg_out_extrl_i64_i32(s, dst, src);
81
+ } else {
82
+ tcg_out_mov(s, TCG_TYPE_I64, dst, src);
83
+ }
84
+ break;
85
+ default:
86
+ g_assert_not_reached();
87
+ }
88
+}
89
+
90
#define C_PFX1(P, A) P##A
91
#define C_PFX2(P, A, B) P##A##_##B
92
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
93
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/aarch64/tcg-target.c.inc
96
+++ b/tcg/aarch64/tcg-target.c.inc
97
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
30
{
98
{
31
struct page_collection *set = g_malloc(sizeof(*set));
99
MemOpIdx oi = lb->oi;
32
tb_page_addr_t index;
100
MemOp opc = get_memop(oi);
33
PageDesc *pd;
101
- MemOp size = opc & MO_SIZE;
34
102
35
start >>= TARGET_PAGE_BITS;
103
if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
36
- end >>= TARGET_PAGE_BITS;
104
return false;
37
- g_assert(start <= end);
105
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
38
+ last >>= TARGET_PAGE_BITS;
106
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
39
+ g_assert(start <= last);
107
tcg_out_adr(s, TCG_REG_X3, lb->raddr);
40
108
tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
41
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
109
- if (opc & MO_SIGN) {
42
page_entry_destroy);
110
- tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
43
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
111
- } else {
44
retry:
112
- tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
45
q_tree_foreach(set->tree, page_entry_lock, NULL);
113
- }
46
114
47
- for (index = start; index <= end; index++) {
115
+ tcg_out_movext(s, lb->type, lb->datalo_reg,
48
+ for (index = start; index <= last; index++) {
116
+ TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_X0);
49
TranslationBlock *tb;
117
tcg_out_goto(s, lb->raddr);
50
PageForEachNext n;
118
return true;
51
119
}
52
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
120
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
53
void tb_invalidate_phys_page(tb_page_addr_t addr)
121
index XXXXXXX..XXXXXXX 100644
122
--- a/tcg/arm/tcg-target.c.inc
123
+++ b/tcg/arm/tcg-target.c.inc
124
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
125
126
datalo = lb->datalo_reg;
127
datahi = lb->datahi_reg;
128
- switch (opc & MO_SSIZE) {
129
- case MO_SB:
130
- tcg_out_ext8s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
131
- break;
132
- case MO_SW:
133
- tcg_out_ext16s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
134
- break;
135
- default:
136
- tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
137
- break;
138
- case MO_UQ:
139
+ if ((opc & MO_SIZE) == MO_64) {
140
if (datalo != TCG_REG_R1) {
141
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
142
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
143
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
144
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
145
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
146
}
147
- break;
148
+ } else {
149
+ tcg_out_movext(s, TCG_TYPE_I32, datalo,
150
+ TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0);
151
}
152
153
tcg_out_goto(s, COND_AL, lb->raddr);
154
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/i386/tcg-target.c.inc
157
+++ b/tcg/i386/tcg-target.c.inc
158
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
159
tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
160
161
data_reg = l->datalo_reg;
162
- switch (opc & MO_SSIZE) {
163
- case MO_SB:
164
- tcg_out_ext8s(s, l->type, data_reg, TCG_REG_EAX);
165
- break;
166
- case MO_SW:
167
- tcg_out_ext16s(s, l->type, data_reg, TCG_REG_EAX);
168
- break;
169
-#if TCG_TARGET_REG_BITS == 64
170
- case MO_SL:
171
- tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
172
- break;
173
-#endif
174
- case MO_UB:
175
- case MO_UW:
176
- /* Note that the helpers have zero-extended to tcg_target_long. */
177
- case MO_UL:
178
- tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
179
- break;
180
- case MO_UQ:
181
- if (TCG_TARGET_REG_BITS == 64) {
182
- tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
183
- } else if (data_reg == TCG_REG_EDX) {
184
+ if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
185
+ if (data_reg == TCG_REG_EDX) {
186
/* xchg %edx, %eax */
187
tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
188
tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
189
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
190
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
191
tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
192
}
193
- break;
194
- default:
195
- g_assert_not_reached();
196
+ } else {
197
+ tcg_out_movext(s, l->type, data_reg,
198
+ TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_EAX);
199
}
200
201
/* Jump to the code corresponding to next IR of qemu_st */
202
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/loongarch64/tcg-target.c.inc
205
+++ b/tcg/loongarch64/tcg-target.c.inc
206
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
207
MemOpIdx oi = l->oi;
208
MemOp opc = get_memop(oi);
209
MemOp size = opc & MO_SIZE;
210
- TCGType type = l->type;
211
212
/* resolve label address */
213
if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
214
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
215
216
tcg_out_call_int(s, qemu_ld_helpers[size], false);
217
218
- switch (opc & MO_SSIZE) {
219
- case MO_SB:
220
- tcg_out_ext8s(s, type, l->datalo_reg, TCG_REG_A0);
221
- break;
222
- case MO_SW:
223
- tcg_out_ext16s(s, type, l->datalo_reg, TCG_REG_A0);
224
- break;
225
- case MO_SL:
226
- tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
227
- break;
228
- case MO_UL:
229
- if (type == TCG_TYPE_I32) {
230
- /* MO_UL loads of i32 should be sign-extended too */
231
- tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
232
- break;
233
- }
234
- /* fallthrough */
235
- default:
236
- tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
237
- break;
238
- }
239
-
240
+ tcg_out_movext(s, l->type, l->datalo_reg,
241
+ TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_A0);
242
return tcg_out_goto(s, l->raddr);
243
}
244
245
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
246
/* call store helper */
247
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
248
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
249
- switch (size) {
250
- case MO_8:
251
- tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
252
- break;
253
- case MO_16:
254
- tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
255
- break;
256
- case MO_32:
257
- tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
258
- break;
259
- case MO_64:
260
- tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
261
- break;
262
- default:
263
- g_assert_not_reached();
264
- break;
265
- }
266
+ tcg_out_movext(s, size == MO_64 ? TCG_TYPE_I32 : TCG_TYPE_I32, TCG_REG_A2,
267
+ l->type, size, l->datalo_reg);
268
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
269
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
270
271
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
272
}
273
}
274
275
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
276
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
54
{
277
{
55
struct page_collection *pages;
278
TCGReg addr_regl;
56
- tb_page_addr_t start, end;
279
TCGReg data_regl;
57
+ tb_page_addr_t start, last;
280
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
58
PageDesc *p;
281
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
59
282
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
60
p = page_find(addr >> TARGET_PAGE_BITS);
283
tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
61
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
284
- add_qemu_ldst_label(s, 0, oi,
62
}
285
- 0, /* type param is unused for stores */
63
286
+ add_qemu_ldst_label(s, 0, oi, type,
64
start = addr & TARGET_PAGE_MASK;
287
data_regl, addr_regl,
65
- end = start + TARGET_PAGE_SIZE;
288
s->code_ptr, label_ptr);
66
- pages = page_collection_lock(start, end);
289
#else
67
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
290
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
68
+ last = addr | ~TARGET_PAGE_MASK;
291
tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
69
+ pages = page_collection_lock(start, last);
292
break;
70
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
293
case INDEX_op_qemu_st_i32:
71
page_collection_unlock(pages);
294
- tcg_out_qemu_st(s, args);
295
+ tcg_out_qemu_st(s, args, TCG_TYPE_I32);
296
break;
297
case INDEX_op_qemu_st_i64:
298
- tcg_out_qemu_st(s, args);
299
+ tcg_out_qemu_st(s, args, TCG_TYPE_I64);
300
break;
301
302
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
303
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
304
index XXXXXXX..XXXXXXX 100644
305
--- a/tcg/ppc/tcg-target.c.inc
306
+++ b/tcg/ppc/tcg-target.c.inc
307
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
308
[MO_BSWAP | MO_UQ] = STDBRX,
309
};
310
311
-static const uint32_t qemu_exts_opc[4] = {
312
- EXTSB, EXTSH, EXTSW, 0
313
-};
314
-
315
#if defined (CONFIG_SOFTMMU)
316
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
317
* int mmu_idx, uintptr_t ra)
318
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
319
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
320
tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
321
tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
322
- } else if (opc & MO_SIGN) {
323
- uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
324
- tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
325
} else {
326
- tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
327
+ tcg_out_movext(s, lb->type, lo,
328
+ TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_R3);
329
}
330
331
tcg_out_b(s, 0, lb->raddr);
332
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
333
334
lo = lb->datalo_reg;
335
hi = lb->datahi_reg;
336
- if (TCG_TARGET_REG_BITS == 32) {
337
- switch (s_bits) {
338
- case MO_64:
339
- arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
340
- tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
341
- /* FALLTHRU */
342
- case MO_32:
343
- tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
344
- break;
345
- default:
346
- tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
347
- break;
348
- }
349
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
350
+ arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
351
+ tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
352
+ tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
353
} else {
354
- if (s_bits == MO_64) {
355
- tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
356
- } else {
357
- tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
358
- }
359
+ tcg_out_movext(s, s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
360
+ arg++, lb->type, s_bits, lo);
361
}
362
363
tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
364
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
365
} else {
366
insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
367
tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
368
- insn = qemu_exts_opc[s_bits];
369
- tcg_out32(s, insn | RA(datalo) | RS(datalo));
370
+ tcg_out_movext(s, TCG_TYPE_REG, datalo,
371
+ TCG_TYPE_REG, opc & MO_SSIZE, datalo);
372
}
373
}
374
375
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
376
index XXXXXXX..XXXXXXX 100644
377
--- a/tcg/riscv/tcg-target.c.inc
378
+++ b/tcg/riscv/tcg-target.c.inc
379
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
380
/* call store helper */
381
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
382
tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg);
383
- tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg);
384
- switch (s_bits) {
385
- case MO_8:
386
- tcg_out_ext8u(s, a2, a2);
387
- break;
388
- case MO_16:
389
- tcg_out_ext16u(s, a2, a2);
390
- break;
391
- default:
392
- break;
393
- }
394
+ tcg_out_movext(s, s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, a2,
395
+ l->type, s_bits, l->datalo_reg);
396
tcg_out_movi(s, TCG_TYPE_PTR, a3, oi);
397
tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr);
398
399
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
400
index XXXXXXX..XXXXXXX 100644
401
--- a/tcg/s390x/tcg-target.c.inc
402
+++ b/tcg/s390x/tcg-target.c.inc
403
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
404
TCGReg data_reg = lb->datalo_reg;
405
MemOpIdx oi = lb->oi;
406
MemOp opc = get_memop(oi);
407
+ MemOp size = opc & MO_SIZE;
408
409
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
410
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
411
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
412
if (TARGET_LONG_BITS == 64) {
413
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
414
}
415
- switch (opc & MO_SIZE) {
416
- case MO_UB:
417
- tcg_out_ext8u(s, TCG_REG_R4, data_reg);
418
- break;
419
- case MO_UW:
420
- tcg_out_ext16u(s, TCG_REG_R4, data_reg);
421
- break;
422
- case MO_UL:
423
- tcg_out_ext32u(s, TCG_REG_R4, data_reg);
424
- break;
425
- case MO_UQ:
426
- tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
427
- break;
428
- default:
429
- g_assert_not_reached();
430
- }
431
+ tcg_out_movext(s, size == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
432
+ TCG_REG_R4, lb->type, size, data_reg);
433
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
434
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
435
tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
436
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
437
index XXXXXXX..XXXXXXX 100644
438
--- a/tcg/sparc64/tcg-target.c.inc
439
+++ b/tcg/sparc64/tcg-target.c.inc
440
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
441
static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
442
static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
443
444
-static void emit_extend(TCGContext *s, TCGReg r, int op)
445
-{
446
- /* Emit zero extend of 8, 16 or 32 bit data as
447
- * required by the MO_* value op; do nothing for 64 bit.
448
- */
449
- switch (op & MO_SIZE) {
450
- case MO_8:
451
- tcg_out_ext8u(s, r, r);
452
- break;
453
- case MO_16:
454
- tcg_out_ext16u(s, r, r);
455
- break;
456
- case MO_32:
457
- tcg_out_ext32u(s, r, r);
458
- break;
459
- case MO_64:
460
- break;
461
- }
462
-}
463
-
464
static void build_trampolines(TCGContext *s)
465
{
466
static void * const qemu_ld_helpers[] = {
467
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
468
}
469
qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
470
471
- emit_extend(s, TCG_REG_O2, i);
472
-
473
/* Set the retaddr operand. */
474
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
475
476
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
72
}
477
}
73
478
74
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
479
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
75
struct page_collection *pages;
480
- MemOpIdx oi)
76
tb_page_addr_t next;
481
+ MemOpIdx oi, TCGType data_type)
77
78
- pages = page_collection_lock(start, end);
79
+ pages = page_collection_lock(start, end - 1);
80
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
81
start < end;
82
start = next, next += TARGET_PAGE_SIZE) {
83
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
84
{
482
{
85
struct page_collection *pages;
483
MemOp memop = get_memop(oi);
86
484
tcg_insn_unit *label_ptr;
87
- pages = page_collection_lock(ram_addr, ram_addr + size);
485
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
88
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
486
/* TLB Miss. */
89
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
487
90
page_collection_unlock(pages);
488
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
91
}
489
- tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
490
+ tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
491
+ TCG_REG_O2, data_type, memop & MO_SIZE, data);
492
493
func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
494
tcg_debug_assert(func != NULL);
495
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
496
tcg_out_qemu_ld(s, a0, a1, a2, true);
497
break;
498
case INDEX_op_qemu_st_i32:
499
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
500
+ break;
501
case INDEX_op_qemu_st_i64:
502
- tcg_out_qemu_st(s, a0, a1, a2);
503
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
504
break;
505
506
case INDEX_op_ld32s_i64:
92
--
507
--
93
2.34.1
508
2.34.1
94
509
95
510
diff view generated by jsdifflib
1
From: Emilio Cota <cota@braap.org>
1
We will want a backend interface for register swapping.
2
2
This is only properly defined for x86; all others get a
3
qemu-user can hang in a multi-threaded fork. One common
3
stub version that always indicates failure.
4
reason is that when creating a TB, between fork and exec
4
5
we manipulate a GTree whose memory allocator (GSlice) is
6
not fork-safe.
7
8
Although POSIX does not mandate it, the system's allocator
9
(e.g. tcmalloc, libc malloc) is probably fork-safe.
10
11
Fix some of these hangs by using QTree, which uses the system's
12
allocator regardless of the Glib version that we used at
13
configuration time.
14
15
Tested with the test program in the original bug report, i.e.:
16
```
17
18
void garble() {
19
int pid = fork();
20
if (pid == 0) {
21
exit(0);
22
} else {
23
int wstatus;
24
waitpid(pid, &wstatus, 0);
25
}
26
}
27
28
void supragarble(unsigned depth) {
29
if (depth == 0)
30
return ;
31
32
std::thread a(supragarble, depth-1);
33
std::thread b(supragarble, depth-1);
34
garble();
35
a.join();
36
b.join();
37
}
38
39
int main() {
40
supragarble(10);
41
}
42
```
43
44
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/285
45
Reported-by: Valentin David <me@valentindavid.com>
46
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
47
Signed-off-by: Emilio Cota <cota@braap.org>
48
Message-Id: <20230205163758.416992-3-cota@braap.org>
49
[rth: Add QEMU_DISABLE_CFI for all callback using functions.]
50
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
51
---
7
---
52
accel/tcg/tb-maint.c | 17 +++++++++--------
8
tcg/tcg.c | 2 ++
53
tcg/region.c | 19 ++++++++++---------
9
tcg/aarch64/tcg-target.c.inc | 5 +++++
54
util/qtree.c | 8 ++++----
10
tcg/arm/tcg-target.c.inc | 5 +++++
55
3 files changed, 23 insertions(+), 21 deletions(-)
11
tcg/i386/tcg-target.c.inc | 8 ++++++++
56
12
tcg/loongarch64/tcg-target.c.inc | 5 +++++
57
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
13
tcg/mips/tcg-target.c.inc | 5 +++++
58
index XXXXXXX..XXXXXXX 100644
14
tcg/ppc/tcg-target.c.inc | 5 +++++
59
--- a/accel/tcg/tb-maint.c
15
tcg/riscv/tcg-target.c.inc | 5 +++++
60
+++ b/accel/tcg/tb-maint.c
16
tcg/s390x/tcg-target.c.inc | 5 +++++
61
@@ -XXX,XX +XXX,XX @@
17
tcg/sparc64/tcg-target.c.inc | 5 +++++
62
18
tcg/tci/tcg-target.c.inc | 5 +++++
63
#include "qemu/osdep.h"
19
11 files changed, 55 insertions(+)
64
#include "qemu/interval-tree.h"
20
65
+#include "qemu/qtree.h"
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
66
#include "exec/cputlb.h"
22
index XXXXXXX..XXXXXXX 100644
67
#include "exec/log.h"
23
--- a/tcg/tcg.c
68
#include "exec/exec-all.h"
24
+++ b/tcg/tcg.c
69
@@ -XXX,XX +XXX,XX @@ struct page_entry {
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
70
* See also: page_collection_lock().
26
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
71
*/
27
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
72
struct page_collection {
28
static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
73
- GTree *tree;
29
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
74
+ QTree *tree;
30
+ __attribute__((unused));
75
struct page_entry *max;
31
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
76
};
32
static void tcg_out_goto_tb(TCGContext *s, int which);
77
33
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
78
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
34
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
79
struct page_entry *pe;
35
index XXXXXXX..XXXXXXX 100644
80
PageDesc *pd;
36
--- a/tcg/aarch64/tcg-target.c.inc
81
37
+++ b/tcg/aarch64/tcg-target.c.inc
82
- pe = g_tree_lookup(set->tree, &index);
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
83
+ pe = q_tree_lookup(set->tree, &index);
39
tcg_out_insn(s, 3305, LDR, 0, rd);
84
if (pe) {
40
}
85
return false;
41
42
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
43
+{
44
+ return false;
45
+}
46
+
47
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
48
tcg_target_long imm)
49
{
50
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target.c.inc
53
+++ b/tcg/arm/tcg-target.c.inc
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
55
tcg_out_movi32(s, COND_AL, ret, arg);
56
}
57
58
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
59
+{
60
+ return false;
61
+}
62
+
63
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
64
tcg_target_long imm)
65
{
66
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/i386/tcg-target.c.inc
69
+++ b/tcg/i386/tcg-target.c.inc
70
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
71
#define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
72
#define OPC_VZEROUPPER (0x77 | P_EXT)
73
#define OPC_XCHG_ax_r32    (0x90)
74
+#define OPC_XCHG_EvGv (0x87)
75
76
#define OPC_GRP3_Eb (0xf6)
77
#define OPC_GRP3_Ev (0xf7)
78
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
86
}
79
}
87
@@ -XXX,XX +XXX,XX @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
80
}
81
82
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
83
+{
84
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
85
+ tcg_out_modrm(s, OPC_XCHG_EvGv + rexw, r1, r2);
86
+ return true;
87
+}
88
+
89
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
90
tcg_target_long imm)
91
{
92
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/loongarch64/tcg-target.c.inc
95
+++ b/tcg/loongarch64/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
88
}
97
}
89
98
}
90
pe = page_entry_new(pd, index);
99
91
- g_tree_insert(set->tree, &pe->index, pe);
100
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
92
+ q_tree_insert(set->tree, &pe->index, pe);
101
+{
93
102
+ return false;
94
/*
103
+}
95
* If this is either (1) the first insertion or (2) a page whose index
104
+
96
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
105
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
97
end >>= TARGET_PAGE_BITS;
106
tcg_target_long imm)
98
g_assert(start <= end);
107
{
99
108
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
100
- set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
109
index XXXXXXX..XXXXXXX 100644
101
+ set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
110
--- a/tcg/mips/tcg-target.c.inc
102
page_entry_destroy);
111
+++ b/tcg/mips/tcg-target.c.inc
103
set->max = NULL;
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
104
assert_no_pages_locked();
113
tcg_out_ext32s(s, rd, rs);
105
114
}
106
retry:
115
107
- g_tree_foreach(set->tree, page_entry_lock, NULL);
116
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
108
+ q_tree_foreach(set->tree, page_entry_lock, NULL);
117
+{
109
118
+ return false;
110
for (index = start; index <= end; index++) {
119
+}
111
TranslationBlock *tb;
120
+
112
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
121
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
113
continue;
122
tcg_target_long imm)
114
}
123
{
115
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
124
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
116
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
125
index XXXXXXX..XXXXXXX 100644
117
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
126
--- a/tcg/ppc/tcg-target.c.inc
118
goto retry;
127
+++ b/tcg/ppc/tcg-target.c.inc
119
}
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
120
assert_page_locked(pd);
121
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
122
(tb_page_addr1(tb) != -1 &&
123
page_trylock_add(set, tb_page_addr1(tb)))) {
124
/* drop all locks, and reacquire in order */
125
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
126
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
127
goto retry;
128
}
129
}
130
@@ -XXX,XX +XXX,XX @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
131
static void page_collection_unlock(struct page_collection *set)
132
{
133
/* entries are unlocked and freed via page_entry_destroy */
134
- g_tree_destroy(set->tree);
135
+ q_tree_destroy(set->tree);
136
g_free(set);
137
}
138
139
diff --git a/tcg/region.c b/tcg/region.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/region.c
142
+++ b/tcg/region.c
143
@@ -XXX,XX +XXX,XX @@
144
#include "qemu/mprotect.h"
145
#include "qemu/memalign.h"
146
#include "qemu/cacheinfo.h"
147
+#include "qemu/qtree.h"
148
#include "qapi/error.h"
149
#include "exec/exec-all.h"
150
#include "tcg/tcg.h"
151
@@ -XXX,XX +XXX,XX @@
152
153
struct tcg_region_tree {
154
QemuMutex lock;
155
- GTree *tree;
156
+ QTree *tree;
157
/* padding to avoid false sharing is computed at run-time */
158
};
159
160
@@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void)
161
struct tcg_region_tree *rt = region_trees + i * tree_size;
162
163
qemu_mutex_init(&rt->lock);
164
- rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
165
+ rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
166
}
129
}
167
}
130
}
168
131
169
@@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb)
132
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
170
133
+{
171
g_assert(rt != NULL);
134
+ return false;
172
qemu_mutex_lock(&rt->lock);
135
+}
173
- g_tree_insert(rt->tree, &tb->tc, tb);
136
+
174
+ q_tree_insert(rt->tree, &tb->tc, tb);
137
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
175
qemu_mutex_unlock(&rt->lock);
138
tcg_target_long imm)
176
}
139
{
177
140
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb)
141
index XXXXXXX..XXXXXXX 100644
179
142
--- a/tcg/riscv/tcg-target.c.inc
180
g_assert(rt != NULL);
143
+++ b/tcg/riscv/tcg-target.c.inc
181
qemu_mutex_lock(&rt->lock);
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
182
- g_tree_remove(rt->tree, &tb->tc);
145
tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
183
+ q_tree_remove(rt->tree, &tb->tc);
146
}
184
qemu_mutex_unlock(&rt->lock);
147
185
}
148
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
186
149
+{
187
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
150
+ return false;
188
}
151
+}
189
152
+
190
qemu_mutex_lock(&rt->lock);
153
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
191
- tb = g_tree_lookup(rt->tree, &s);
154
tcg_target_long imm)
192
+ tb = q_tree_lookup(rt->tree, &s);
155
{
193
qemu_mutex_unlock(&rt->lock);
156
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
194
return tb;
157
index XXXXXXX..XXXXXXX 100644
195
}
158
--- a/tcg/s390x/tcg-target.c.inc
196
@@ -XXX,XX +XXX,XX @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
159
+++ b/tcg/s390x/tcg-target.c.inc
197
for (i = 0; i < region.n; i++) {
160
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
198
struct tcg_region_tree *rt = region_trees + i * tree_size;
161
return false;
199
162
}
200
- g_tree_foreach(rt->tree, func, user_data);
163
201
+ q_tree_foreach(rt->tree, func, user_data);
164
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
202
}
165
+{
203
tcg_region_tree_unlock_all();
166
+ return false;
204
}
167
+}
205
@@ -XXX,XX +XXX,XX @@ size_t tcg_nb_tbs(void)
168
+
206
for (i = 0; i < region.n; i++) {
169
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
207
struct tcg_region_tree *rt = region_trees + i * tree_size;
170
tcg_target_long imm)
208
171
{
209
- nb_tbs += g_tree_nnodes(rt->tree);
172
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
210
+ nb_tbs += q_tree_nnodes(rt->tree);
173
index XXXXXXX..XXXXXXX 100644
211
}
174
--- a/tcg/sparc64/tcg-target.c.inc
212
tcg_region_tree_unlock_all();
175
+++ b/tcg/sparc64/tcg-target.c.inc
213
return nb_tbs;
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
214
@@ -XXX,XX +XXX,XX @@ static void tcg_region_tree_reset_all(void)
177
tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
215
struct tcg_region_tree *rt = region_trees + i * tree_size;
178
}
216
179
217
/* Increment the refcount first so that destroy acts as a reset */
180
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
218
- g_tree_ref(rt->tree);
181
+{
219
- g_tree_destroy(rt->tree);
182
+ return false;
220
+ q_tree_ref(rt->tree);
183
+}
221
+ q_tree_destroy(rt->tree);
184
+
222
}
185
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
223
tcg_region_tree_unlock_all();
186
tcg_target_long imm)
224
}
187
{
225
diff --git a/util/qtree.c b/util/qtree.c
188
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
226
index XXXXXXX..XXXXXXX 100644
189
index XXXXXXX..XXXXXXX 100644
227
--- a/util/qtree.c
190
--- a/tcg/tci/tcg-target.c.inc
228
+++ b/util/qtree.c
191
+++ b/tcg/tci/tcg-target.c.inc
229
@@ -XXX,XX +XXX,XX @@ q_tree_node_next(QTreeNode *node)
192
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
230
*
193
tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
231
* Since: 2.70 in GLib. Internal in Qtree, i.e. not in the public API.
194
}
232
*/
195
233
-static void
196
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
234
+static void QEMU_DISABLE_CFI
197
+{
235
q_tree_remove_all(QTree *tree)
198
+ return false;
236
{
199
+}
237
QTreeNode *node;
200
+
238
@@ -XXX,XX +XXX,XX @@ q_tree_replace(QTree *tree,
201
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
239
}
202
tcg_target_long imm)
240
241
/* internal insert routine */
242
-static QTreeNode *
243
+static QTreeNode * QEMU_DISABLE_CFI
244
q_tree_insert_internal(QTree *tree,
245
gpointer key,
246
gpointer value,
247
@@ -XXX,XX +XXX,XX @@ q_tree_steal(QTree *tree,
248
}
249
250
/* internal remove routine */
251
-static gboolean
252
+static gboolean QEMU_DISABLE_CFI
253
q_tree_remove_internal(QTree *tree,
254
gconstpointer key,
255
gboolean steal)
256
@@ -XXX,XX +XXX,XX @@ q_tree_node_balance(QTreeNode *node)
257
return node;
258
}
259
260
-static QTreeNode *
261
+static QTreeNode * QEMU_DISABLE_CFI
262
q_tree_find_node(QTree *tree,
263
gconstpointer key)
264
{
203
{
265
--
204
--
266
2.34.1
205
2.34.1
267
206
268
207
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg-ldst.c.inc | 1 +
5
1 file changed, 1 insertion(+)
1
6
7
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tcg-ldst.c.inc
10
+++ b/tcg/tcg-ldst.c.inc
11
@@ -XXX,XX +XXX,XX @@ static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
12
{
13
TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
14
15
+ memset(l, 0, sizeof(*l));
16
QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next);
17
18
return l;
19
--
20
2.34.1
21
22
diff view generated by jsdifflib
1
User setting of -R reserved_va can lead to an assertion
1
Since TCG_TYPE_I32 values are kept sign-extended in registers, via "w"
2
failure in page_set_flags. Sanity check the value of
2
instructions, we don't need to extend if the register matches.
3
reserved_va and print an error message instead. Do not
3
This is already relied upon by comparisons.
4
allocate a commpage at all for m-profile cpus.
5
4
5
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
linux-user/elfload.c | 37 +++++++++++++++++++++++++++----------
8
tcg/riscv/tcg-target.c.inc | 4 +++-
9
1 file changed, 27 insertions(+), 10 deletions(-)
9
1 file changed, 3 insertions(+), 1 deletion(-)
10
10
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
11
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/elfload.c
13
--- a/tcg/riscv/tcg-target.c.inc
14
+++ b/linux-user/elfload.c
14
+++ b/tcg/riscv/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ enum {
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
16
16
17
static bool init_guest_commpage(void)
17
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
18
{
18
{
19
- abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
19
- tcg_out_ext32s(s, ret, arg);
20
- void *want = g2h_untagged(commpage);
20
+ if (ret != arg) {
21
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
21
+ tcg_out_ext32s(s, ret, arg);
22
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
23
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
24
+ abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
25
+ abi_ptr addr;
26
27
- if (addr == MAP_FAILED) {
28
+ /*
29
+ * M-profile allocates maximum of 2GB address space, so can never
30
+ * allocate the commpage. Skip it.
31
+ */
32
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
33
+ return true;
34
+ }
22
+ }
35
+
36
+ /*
37
+ * If reserved_va does not cover the commpage, we get an assert
38
+ * in page_set_flags. Produce an intelligent error instead.
39
+ */
40
+ if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
41
+ error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
42
+ (uint64_t)reserved_va + 1);
43
+ exit(EXIT_FAILURE);
44
+ }
45
+
46
+ addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
47
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
48
+
49
+ if (addr == -1) {
50
perror("Allocating guest commpage");
51
exit(EXIT_FAILURE);
52
}
53
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
54
}
55
56
/* Set kernel helper versions; rest of page is 0. */
57
- __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
58
+ put_user_u32(5, 0xffff0ffcu);
59
60
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
61
+ if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
62
perror("Protecting guest commpage");
63
exit(EXIT_FAILURE);
64
}
65
-
66
- page_set_flags(commpage, commpage | ~qemu_host_page_mask,
67
- PAGE_READ | PAGE_EXEC | PAGE_VALID);
68
return true;
69
}
23
}
70
24
25
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
71
--
26
--
72
2.34.1
27
2.34.1
diff view generated by jsdifflib