1
The following changes since commit 3e08b2b9cb64bff2b73fa9128c0e49bfcde0dd40:
1
The following changes since commit 40c67636f67c2a89745f2e698522fe917326a952:
2
2
3
Merge remote-tracking branch 'remotes/philmd-gitlab/tags/edk2-next-20200121' into staging (2020-01-21 15:29:25 +0000)
3
Merge remote-tracking branch 'remotes/kraxel/tags/usb-20200317-pull-request' into staging (2020-03-17 14:00:56 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200121
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20200317
8
8
9
for you to fetch changes up to 75fa376cdab5e5db2c7fdd107358e16f95503ac6:
9
for you to fetch changes up to 0270bd503e3699b7202200a2d693ad1feb57473f:
10
10
11
scripts/git.orderfile: Display decodetree before C source (2020-01-21 15:26:09 -1000)
11
tcg: Remove tcg-runtime-gvec.c DO_CMP0 (2020-03-17 08:41:07 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Remove another limit to NB_MMU_MODES.
14
Fix tcg/i386 bug vs sari_vec.
15
Fix compilation using uclibc.
15
Fix tcg-runtime-gvec.c vs i386 without avx.
16
Fix defaulting of -accel parameters.
17
Tidy cputlb basic routines.
18
Adjust git.orderfile for decodetree.
19
16
20
----------------------------------------------------------------
17
----------------------------------------------------------------
21
Carlos Santos (1):
18
Richard Henderson (5):
22
util/cacheinfo: fix crash when compiling with uClibc
19
tcg/i386: Bound shift count expanding sari_vec
20
tcg: Remove CONFIG_VECTOR16
21
tcg: Tidy tcg-runtime-gvec.c types
22
tcg: Tidy tcg-runtime-gvec.c DUP*
23
tcg: Remove tcg-runtime-gvec.c DO_CMP0
23
24
24
Philippe Mathieu-Daudé (1):
25
configure | 56 --------
25
scripts/git.orderfile: Display decodetree before C source
26
accel/tcg/tcg-runtime-gvec.c | 298 +++++++++++++++++--------------------------
27
tcg/i386/tcg-target.inc.c | 9 +-
28
3 files changed, 122 insertions(+), 241 deletions(-)
26
29
27
Richard Henderson (14):
28
cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
29
vl: Remove unused variable in configure_accelerators
30
vl: Reduce scope of variables in configure_accelerators
31
vl: Remove useless test in configure_accelerators
32
vl: Only choose enabled accelerators in configure_accelerators
33
cputlb: Merge tlb_table_flush_by_mmuidx into tlb_flush_one_mmuidx_locked
34
cputlb: Make tlb_n_entries private to cputlb.c
35
cputlb: Pass CPUTLBDescFast to tlb_n_entries and sizeof_tlb
36
cputlb: Hoist tlb portions in tlb_mmu_resize_locked
37
cputlb: Hoist tlb portions in tlb_flush_one_mmuidx_locked
38
cputlb: Split out tlb_mmu_flush_locked
39
cputlb: Partially merge tlb_dyn_init into tlb_init
40
cputlb: Initialize tlbs as flushed
41
cputlb: Hoist timestamp outside of loops over tlbs
42
43
include/exec/cpu_ldst.h | 5 -
44
accel/tcg/cputlb.c | 287 +++++++++++++++++++++++++++++++++---------------
45
util/cacheinfo.c | 10 +-
46
vl.c | 27 +++--
47
scripts/git.orderfile | 3 +
48
5 files changed, 223 insertions(+), 109 deletions(-)
49
diff view generated by jsdifflib
Deleted patch
1
In target/arm we will shortly have "too many" mmu_idx.
2
The current minimum barrier is caused by the way in which
3
tlb_flush_page_by_mmuidx is coded.
4
1
5
We can remove this limitation by allocating memory for
6
consumption by the worker. Let us assume that this is
7
the unlikely case, as will be the case for the majority
8
of targets which have so far satisfied the BUILD_BUG_ON,
9
and only allocate memory when necessary.
10
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
15
1 file changed, 132 insertions(+), 35 deletions(-)
16
17
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/cputlb.c
20
+++ b/accel/tcg/cputlb.c
21
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
22
}
23
}
24
25
-/* As we are going to hijack the bottom bits of the page address for a
26
- * mmuidx bit mask we need to fail to build if we can't do that
27
+/**
28
+ * tlb_flush_page_by_mmuidx_async_0:
29
+ * @cpu: cpu on which to flush
30
+ * @addr: page of virtual address to flush
31
+ * @idxmap: set of mmu_idx to flush
32
+ *
33
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
34
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
35
*/
36
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
37
-
38
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
39
- run_on_cpu_data data)
40
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
41
+ target_ulong addr,
42
+ uint16_t idxmap)
43
{
44
CPUArchState *env = cpu->env_ptr;
45
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
46
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
47
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
48
int mmu_idx;
49
50
assert_cpu_is_self(cpu);
51
52
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
53
- addr, mmu_idx_bitmap);
54
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
55
56
qemu_spin_lock(&env_tlb(env)->c.lock);
57
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
58
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
59
+ if ((idxmap >> mmu_idx) & 1) {
60
tlb_flush_page_locked(env, mmu_idx, addr);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
64
tb_flush_jmp_cache(cpu, addr);
65
}
66
67
+/**
68
+ * tlb_flush_page_by_mmuidx_async_1:
69
+ * @cpu: cpu on which to flush
70
+ * @data: encoded addr + idxmap
71
+ *
72
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
73
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
74
+ * offset of the target_ptr field. This limits the set of mmu_idx
75
+ * that can be passed via this method.
76
+ */
77
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
78
+ run_on_cpu_data data)
79
+{
80
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
81
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
82
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
83
+
84
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
85
+}
86
+
87
+typedef struct {
88
+ target_ulong addr;
89
+ uint16_t idxmap;
90
+} TLBFlushPageByMMUIdxData;
91
+
92
+/**
93
+ * tlb_flush_page_by_mmuidx_async_2:
94
+ * @cpu: cpu on which to flush
95
+ * @data: allocated addr + idxmap
96
+ *
97
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
98
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
99
+ * TLBFlushPageByMMUIdxData structure that has been allocated
100
+ * specifically for this helper. Free the structure when done.
101
+ */
102
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
103
+ run_on_cpu_data data)
104
+{
105
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
106
+
107
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
108
+ g_free(d);
109
+}
110
+
111
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
112
{
113
- target_ulong addr_and_mmu_idx;
114
-
115
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
116
117
/* This should already be page aligned */
118
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
119
- addr_and_mmu_idx |= idxmap;
120
+ addr &= TARGET_PAGE_MASK;
121
122
- if (!qemu_cpu_is_self(cpu)) {
123
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
124
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
125
+ if (qemu_cpu_is_self(cpu)) {
126
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
127
+ } else if (idxmap < TARGET_PAGE_SIZE) {
128
+ /*
129
+ * Most targets have only a few mmu_idx. In the case where
130
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
131
+ * allocating memory for this operation.
132
+ */
133
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
134
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
135
} else {
136
- tlb_flush_page_by_mmuidx_async_work(
137
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
138
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
139
+
140
+ /* Otherwise allocate a structure, freed by the worker. */
141
+ d->addr = addr;
142
+ d->idxmap = idxmap;
143
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
144
+ RUN_ON_CPU_HOST_PTR(d));
145
}
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
149
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
150
uint16_t idxmap)
151
{
152
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
153
- target_ulong addr_and_mmu_idx;
154
-
155
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
156
157
/* This should already be page aligned */
158
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
159
- addr_and_mmu_idx |= idxmap;
160
+ addr &= TARGET_PAGE_MASK;
161
162
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
163
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
164
+ /*
165
+ * Allocate memory to hold addr+idxmap only when needed.
166
+ * See tlb_flush_page_by_mmuidx for details.
167
+ */
168
+ if (idxmap < TARGET_PAGE_SIZE) {
169
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
170
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
171
+ } else {
172
+ CPUState *dst_cpu;
173
+
174
+ /* Allocate a separate data block for each destination cpu. */
175
+ CPU_FOREACH(dst_cpu) {
176
+ if (dst_cpu != src_cpu) {
177
+ TLBFlushPageByMMUIdxData *d
178
+ = g_new(TLBFlushPageByMMUIdxData, 1);
179
+
180
+ d->addr = addr;
181
+ d->idxmap = idxmap;
182
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
183
+ RUN_ON_CPU_HOST_PTR(d));
184
+ }
185
+ }
186
+ }
187
+
188
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
189
}
190
191
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
192
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
193
target_ulong addr,
194
uint16_t idxmap)
195
{
196
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
197
- target_ulong addr_and_mmu_idx;
198
-
199
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
200
201
/* This should already be page aligned */
202
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
203
- addr_and_mmu_idx |= idxmap;
204
+ addr &= TARGET_PAGE_MASK;
205
206
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
207
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
208
+ /*
209
+ * Allocate memory to hold addr+idxmap only when needed.
210
+ * See tlb_flush_page_by_mmuidx for details.
211
+ */
212
+ if (idxmap < TARGET_PAGE_SIZE) {
213
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
214
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
215
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
216
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
217
+ } else {
218
+ CPUState *dst_cpu;
219
+ TLBFlushPageByMMUIdxData *d;
220
+
221
+ /* Allocate a separate data block for each destination cpu. */
222
+ CPU_FOREACH(dst_cpu) {
223
+ if (dst_cpu != src_cpu) {
224
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
225
+ d->addr = addr;
226
+ d->idxmap = idxmap;
227
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
228
+ RUN_ON_CPU_HOST_PTR(d));
229
+ }
230
+ }
231
+
232
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
233
+ d->addr = addr;
234
+ d->idxmap = idxmap;
235
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
236
+ RUN_ON_CPU_HOST_PTR(d));
237
+ }
238
}
239
240
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
241
--
242
2.20.1
243
244
diff view generated by jsdifflib
Deleted patch
1
From: Carlos Santos <casantos@redhat.com>
2
1
3
uClibc defines _SC_LEVEL1_ICACHE_LINESIZE and _SC_LEVEL1_DCACHE_LINESIZE
4
but the corresponding sysconf calls returns -1, which is a valid result,
5
meaning that the limit is indeterminate.
6
7
Handle this situation using the fallback values instead of crashing due
8
to an assertion failure.
9
10
Signed-off-by: Carlos Santos <casantos@redhat.com>
11
Message-Id: <20191017123713.30192-1-casantos@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
util/cacheinfo.c | 10 ++++++++--
15
1 file changed, 8 insertions(+), 2 deletions(-)
16
17
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/util/cacheinfo.c
20
+++ b/util/cacheinfo.c
21
@@ -XXX,XX +XXX,XX @@ static void sys_cache_info(int *isize, int *dsize)
22
static void sys_cache_info(int *isize, int *dsize)
23
{
24
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
25
- *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
26
+ int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
27
+ if (tmp_isize > 0) {
28
+ *isize = tmp_isize;
29
+ }
30
# endif
31
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
32
- *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
33
+ int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
34
+ if (tmp_dsize > 0) {
35
+ *dsize = tmp_dsize;
36
+ }
37
# endif
38
}
39
#endif /* sys_cache_info */
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
The accel_initialised variable no longer has any setters.
2
1
3
Fixes: 6f6e1698a68c
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
vl.c | 3 +--
11
1 file changed, 1 insertion(+), 2 deletions(-)
12
13
diff --git a/vl.c b/vl.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/vl.c
16
+++ b/vl.c
17
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
char **accel_list, **tmp;
21
- bool accel_initialised = false;
22
bool init_failed = false;
23
24
qemu_opts_foreach(qemu_find_opts("icount"),
25
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
26
27
accel_list = g_strsplit(accel, ":", 0);
28
29
- for (tmp = accel_list; !accel_initialised && tmp && *tmp; tmp++) {
30
+ for (tmp = accel_list; tmp && *tmp; tmp++) {
31
/*
32
* Filter invalid accelerators here, to prevent obscenities
33
* such as "-machine accel=tcg,,thread=single".
34
--
35
2.20.1
36
37
diff view generated by jsdifflib
Deleted patch
1
The accel_list and tmp variables are only used when manufacturing
2
-machine accel, options based on -accel.
3
1
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 3 ++-
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static int do_configure_accelerator(void *opaque, QemuOpts *opts, Error **errp)
17
static void configure_accelerators(const char *progname)
18
{
19
const char *accel;
20
- char **accel_list, **tmp;
21
bool init_failed = false;
22
23
qemu_opts_foreach(qemu_find_opts("icount"),
24
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
25
26
accel = qemu_opt_get(qemu_get_machine_opts(), "accel");
27
if (QTAILQ_EMPTY(&qemu_accel_opts.head)) {
28
+ char **accel_list, **tmp;
29
+
30
if (accel == NULL) {
31
/* Select the default accelerator */
32
if (!accel_find("tcg") && !accel_find("kvm")) {
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
The result of g_strsplit is never NULL.
2
1
3
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
vl.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/vl.c b/vl.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/vl.c
15
+++ b/vl.c
16
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
17
18
accel_list = g_strsplit(accel, ":", 0);
19
20
- for (tmp = accel_list; tmp && *tmp; tmp++) {
21
+ for (tmp = accel_list; *tmp; tmp++) {
22
/*
23
* Filter invalid accelerators here, to prevent obscenities
24
* such as "-machine accel=tcg,,thread=single".
25
--
26
2.20.1
27
28
diff view generated by jsdifflib
Deleted patch
1
By choosing "tcg:kvm" when kvm is not enabled, we generate
2
an incorrect warning: "invalid accelerator kvm".
3
1
4
At the same time, use g_str_has_suffix rather than open-coding
5
the same operation.
6
7
Presumably the inverse is also true with --disable-tcg.
8
9
Fixes: 28a0961757fc
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed by: Aleksandar Markovic <amarkovic@wavecomp.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
vl.c | 21 +++++++++++++--------
16
1 file changed, 13 insertions(+), 8 deletions(-)
17
18
diff --git a/vl.c b/vl.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
21
+++ b/vl.c
22
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
23
24
if (accel == NULL) {
25
/* Select the default accelerator */
26
- if (!accel_find("tcg") && !accel_find("kvm")) {
27
- error_report("No accelerator selected and"
28
- " no default accelerator available");
29
- exit(1);
30
- } else {
31
- int pnlen = strlen(progname);
32
- if (pnlen >= 3 && g_str_equal(&progname[pnlen - 3], "kvm")) {
33
+ bool have_tcg = accel_find("tcg");
34
+ bool have_kvm = accel_find("kvm");
35
+
36
+ if (have_tcg && have_kvm) {
37
+ if (g_str_has_suffix(progname, "kvm")) {
38
/* If the program name ends with "kvm", we prefer KVM */
39
accel = "kvm:tcg";
40
} else {
41
accel = "tcg:kvm";
42
}
43
+ } else if (have_kvm) {
44
+ accel = "kvm";
45
+ } else if (have_tcg) {
46
+ accel = "tcg";
47
+ } else {
48
+ error_report("No accelerator selected and"
49
+ " no default accelerator available");
50
+ exit(1);
51
}
52
}
53
-
54
accel_list = g_strsplit(accel, ":", 0);
55
56
for (tmp = accel_list; *tmp; tmp++) {
57
--
58
2.20.1
59
60
diff view generated by jsdifflib
Deleted patch
1
There is only one caller for tlb_table_flush_by_mmuidx. Place
2
the result at the earlier line number, due to an expected user
3
in the near future.
4
1
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 +++++++------------
10
1 file changed, 7 insertions(+), 12 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
17
}
18
}
19
20
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
21
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
22
{
23
tlb_mmu_resize_locked(env, mmu_idx);
24
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
25
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
26
+ env_tlb(env)->d[mmu_idx].large_page_addr = -1;
27
+ env_tlb(env)->d[mmu_idx].large_page_mask = -1;
28
+ env_tlb(env)->d[mmu_idx].vindex = 0;
29
+ memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
30
+ memset(env_tlb(env)->d[mmu_idx].vtable, -1,
31
+ sizeof(env_tlb(env)->d[0].vtable));
32
}
33
34
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
35
@@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
36
*pelide = elide;
37
}
38
39
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
40
-{
41
- tlb_table_flush_by_mmuidx(env, mmu_idx);
42
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
43
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
44
- env_tlb(env)->d[mmu_idx].vindex = 0;
45
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
46
- sizeof(env_tlb(env)->d[0].vtable));
47
-}
48
-
49
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
{
51
CPUArchState *env = cpu->env_ptr;
52
--
53
2.20.1
54
55
diff view generated by jsdifflib
Deleted patch
1
There are no users of this function outside cputlb.c,
2
and its interface will change in the next patch.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu_ldst.h | 5 -----
10
accel/tcg/cputlb.c | 5 +++++
11
2 files changed, 5 insertions(+), 5 deletions(-)
12
13
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu_ldst.h
16
+++ b/include/exec/cpu_ldst.h
17
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
18
return (addr >> TARGET_PAGE_BITS) & size_mask;
19
}
20
21
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
22
-{
23
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
24
-}
25
-
26
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
27
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
28
target_ulong addr)
29
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/tcg/cputlb.c
32
+++ b/accel/tcg/cputlb.c
33
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
34
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
35
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
36
37
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
38
+{
39
+ return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
40
+}
41
+
42
static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
43
{
44
return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
Deleted patch
1
We do not need the entire CPUArchState to compute these values.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cputlb.c | 15 ++++++++-------
9
1 file changed, 8 insertions(+), 7 deletions(-)
10
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
16
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
17
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
18
19
-static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
20
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
21
{
22
- return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1;
23
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
24
}
25
26
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
27
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
28
{
29
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
30
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
31
}
32
33
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
34
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
35
static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
36
{
37
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
38
- size_t old_size = tlb_n_entries(env, mmu_idx);
39
+ size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
40
size_t rate;
41
size_t new_size = old_size;
42
int64_t now = get_clock_realtime();
43
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
44
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
45
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
46
env_tlb(env)->d[mmu_idx].vindex = 0;
47
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
48
+ memset(env_tlb(env)->f[mmu_idx].table, -1,
49
+ sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
50
memset(env_tlb(env)->d[mmu_idx].vtable, -1,
51
sizeof(env_tlb(env)->d[0].vtable));
52
}
53
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
54
qemu_spin_lock(&env_tlb(env)->c.lock);
55
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
56
unsigned int i;
57
- unsigned int n = tlb_n_entries(env, mmu_idx);
58
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
59
60
for (i = 0; i < n; i++) {
61
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
62
--
63
2.20.1
64
65
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 35 +++++++++++++++++------------------
10
1 file changed, 17 insertions(+), 18 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
17
18
/**
19
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
20
- * @env: CPU that owns the TLB
21
- * @mmu_idx: MMU index of the TLB
22
+ * @desc: The CPUTLBDesc portion of the TLB
23
+ * @fast: The CPUTLBDescFast portion of the same TLB
24
*
25
* Called with tlb_lock_held.
26
*
27
@@ -XXX,XX +XXX,XX @@ static void tlb_dyn_init(CPUArchState *env)
28
* high), since otherwise we are likely to have a significant amount of
29
* conflict misses.
30
*/
31
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
32
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
33
{
34
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
35
- size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
36
+ size_t old_size = tlb_n_entries(fast);
37
size_t rate;
38
size_t new_size = old_size;
39
int64_t now = get_clock_realtime();
40
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
41
return;
42
}
43
44
- g_free(env_tlb(env)->f[mmu_idx].table);
45
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
46
+ g_free(fast->table);
47
+ g_free(desc->iotlb);
48
49
tlb_window_reset(desc, now, 0);
50
/* desc->n_used_entries is cleared by the caller */
51
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
52
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
53
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
54
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
55
+ fast->table = g_try_new(CPUTLBEntry, new_size);
56
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
57
+
58
/*
59
* If the allocations fail, try smaller sizes. We just freed some
60
* memory, so going back to half of new_size has a good chance of working.
61
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
62
* allocations to fail though, so we progressively reduce the allocation
63
* size, aborting if we cannot even allocate the smallest TLB we support.
64
*/
65
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
66
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
67
+ while (fast->table == NULL || desc->iotlb == NULL) {
68
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
69
error_report("%s: %s", __func__, strerror(errno));
70
abort();
71
}
72
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
73
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
74
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
75
76
- g_free(env_tlb(env)->f[mmu_idx].table);
77
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
78
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
79
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
80
+ g_free(fast->table);
81
+ g_free(desc->iotlb);
82
+ fast->table = g_try_new(CPUTLBEntry, new_size);
83
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
84
}
85
}
86
87
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
88
{
89
- tlb_mmu_resize_locked(env, mmu_idx);
90
+ tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
91
env_tlb(env)->d[mmu_idx].n_used_entries = 0;
92
env_tlb(env)->d[mmu_idx].large_page_addr = -1;
93
env_tlb(env)->d[mmu_idx].large_page_mask = -1;
94
--
95
2.20.1
96
97
diff view generated by jsdifflib
Deleted patch
1
No functional change, but the smaller expressions make
2
the code easier to read.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/cputlb.c | 19 ++++++++++---------
10
1 file changed, 10 insertions(+), 9 deletions(-)
11
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
15
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
17
18
static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
19
{
20
- tlb_mmu_resize_locked(&env_tlb(env)->d[mmu_idx], &env_tlb(env)->f[mmu_idx]);
21
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
22
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
23
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
24
- env_tlb(env)->d[mmu_idx].vindex = 0;
25
- memset(env_tlb(env)->f[mmu_idx].table, -1,
26
- sizeof_tlb(&env_tlb(env)->f[mmu_idx]));
27
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
28
- sizeof(env_tlb(env)->d[0].vtable));
29
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
30
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
31
+
32
+ tlb_mmu_resize_locked(desc, fast);
33
+ desc->n_used_entries = 0;
34
+ desc->large_page_addr = -1;
35
+ desc->large_page_mask = -1;
36
+ desc->vindex = 0;
37
+ memset(fast->table, -1, sizeof_tlb(fast));
38
+ memset(desc->vtable, -1, sizeof(desc->vtable));
39
}
40
41
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
42
--
43
2.20.1
44
45
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
A given RISU testcase for SVE can produce
2
2
3
To avoid scrolling each instruction when reviewing tcg
3
tcg-op-vec.c:511: do_shifti: Assertion `i >= 0 && i < (8 << vece)' failed.
4
helpers written for the decodetree script, display the
5
.decode files (similar to header declarations) before
6
the C source (implementation of previous declarations).
7
4
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
because expand_vec_sari gave a shift count of 32 to a MO_32
9
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
6
vector shift.
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
11
Message-Id: <20191230082856.30556-1-philmd@redhat.com>
8
In 44f1441dbe1, we changed from direct expansion of vector opcodes
9
to re-use of the tcg expanders. So while the comment correctly notes
10
that the hw will handle such a shift count, we now have to take our
11
own sanity checks into account. Which is easy in this particular case.
12
13
Fixes: 44f1441dbe1
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
15
---
14
scripts/git.orderfile | 3 +++
16
tcg/i386/tcg-target.inc.c | 9 ++++++---
15
1 file changed, 3 insertions(+)
17
1 file changed, 6 insertions(+), 3 deletions(-)
16
18
17
diff --git a/scripts/git.orderfile b/scripts/git.orderfile
19
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/scripts/git.orderfile
21
--- a/tcg/i386/tcg-target.inc.c
20
+++ b/scripts/git.orderfile
22
+++ b/tcg/i386/tcg-target.inc.c
21
@@ -XXX,XX +XXX,XX @@ qga/*.json
23
@@ -XXX,XX +XXX,XX @@ static void expand_vec_sari(TCGType type, unsigned vece,
22
# headers
24
23
*.h
25
case MO_64:
24
26
if (imm <= 32) {
25
+# decoding tree specification
27
- /* We can emulate a small sign extend by performing an arithmetic
26
+*.decode
28
+ /*
27
+
29
+ * We can emulate a small sign extend by performing an arithmetic
28
# code
30
* 32-bit shift and overwriting the high half of a 64-bit logical
29
*.c
31
- * shift (note that the ISA says shift of 32 is valid).
32
+ * shift. Note that the ISA says shift of 32 is valid, but TCG
33
+ * does not, so we have to bound the smaller shift -- we get the
34
+ * same result in the high half either way.
35
*/
36
t1 = tcg_temp_new_vec(type);
37
- tcg_gen_sari_vec(MO_32, t1, v1, imm);
38
+ tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31));
39
tcg_gen_shri_vec(MO_64, v0, v1, imm);
40
vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
41
tcgv_vec_arg(v0), tcgv_vec_arg(v0),
30
--
42
--
31
2.20.1
43
2.20.1
32
44
33
45
diff view generated by jsdifflib
1
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
1
The comment in tcg-runtime-gvec.c about CONFIG_VECTOR16 says that
2
but hoist outside of any loop over a set of tlbs. This is
2
tcg-op-gvec.c has eliminated size 8 vectors, and only passes on
3
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
3
multiples of 16. This may have been true of the first few operations,
4
and tlb_flush_page_locked, so not onerous.
4
but is not true of all operations.
5
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
In particular, multiply, shift by scalar, and compare of 8- and 16-bit
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
elements are not expanded inline if host vector operations are not
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
supported.
9
10
For an x86_64 host that does not support AVX, this means that we will
11
fall back to the helper, which will attempt to use SSE instructions,
12
which will SEGV on an invalid 8-byte aligned memory operation.
13
14
This patch simply removes the CONFIG_VECTOR16 code and configuration
15
without further simplification.
16
17
Buglink: https://bugs.launchpad.net/bugs/1863508
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
19
---
11
accel/tcg/cputlb.c | 14 ++++++++------
20
configure | 56 ------------------------------------
12
1 file changed, 8 insertions(+), 6 deletions(-)
21
accel/tcg/tcg-runtime-gvec.c | 35 +---------------------
22
2 files changed, 1 insertion(+), 90 deletions(-)
13
23
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
24
diff --git a/configure b/configure
25
index XXXXXXX..XXXXXXX 100755
26
--- a/configure
27
+++ b/configure
28
@@ -XXX,XX +XXX,XX @@ if test "$plugins" = "yes" &&
29
"for this purpose. You can't build with --static."
30
fi
31
32
-########################################
33
-# See if 16-byte vector operations are supported.
34
-# Even without a vector unit the compiler may expand these.
35
-# There is a bug in old GCC for PPC that crashes here.
36
-# Unfortunately it's the system compiler for Centos 7.
37
-
38
-cat > $TMPC << EOF
39
-typedef unsigned char U1 __attribute__((vector_size(16)));
40
-typedef unsigned short U2 __attribute__((vector_size(16)));
41
-typedef unsigned int U4 __attribute__((vector_size(16)));
42
-typedef unsigned long long U8 __attribute__((vector_size(16)));
43
-typedef signed char S1 __attribute__((vector_size(16)));
44
-typedef signed short S2 __attribute__((vector_size(16)));
45
-typedef signed int S4 __attribute__((vector_size(16)));
46
-typedef signed long long S8 __attribute__((vector_size(16)));
47
-static U1 a1, b1;
48
-static U2 a2, b2;
49
-static U4 a4, b4;
50
-static U8 a8, b8;
51
-static S1 c1;
52
-static S2 c2;
53
-static S4 c4;
54
-static S8 c8;
55
-static int i;
56
-void helper(void *d, void *a, int shift, int i);
57
-void helper(void *d, void *a, int shift, int i)
58
-{
59
- *(U1 *)(d + i) = *(U1 *)(a + i) << shift;
60
- *(U2 *)(d + i) = *(U2 *)(a + i) << shift;
61
- *(U4 *)(d + i) = *(U4 *)(a + i) << shift;
62
- *(U8 *)(d + i) = *(U8 *)(a + i) << shift;
63
-}
64
-int main(void)
65
-{
66
- a1 += b1; a2 += b2; a4 += b4; a8 += b8;
67
- a1 -= b1; a2 -= b2; a4 -= b4; a8 -= b8;
68
- a1 *= b1; a2 *= b2; a4 *= b4; a8 *= b8;
69
- a1 &= b1; a2 &= b2; a4 &= b4; a8 &= b8;
70
- a1 |= b1; a2 |= b2; a4 |= b4; a8 |= b8;
71
- a1 ^= b1; a2 ^= b2; a4 ^= b4; a8 ^= b8;
72
- a1 <<= i; a2 <<= i; a4 <<= i; a8 <<= i;
73
- a1 >>= i; a2 >>= i; a4 >>= i; a8 >>= i;
74
- c1 >>= i; c2 >>= i; c4 >>= i; c8 >>= i;
75
- return 0;
76
-}
77
-EOF
78
-
79
-vector16=no
80
-if compile_prog "" "" ; then
81
- vector16=yes
82
-fi
83
-
84
########################################
85
# See if __attribute__((alias)) is supported.
86
# This false for Xcode 9, but has been remedied for Xcode 10.
87
@@ -XXX,XX +XXX,XX @@ if test "$atomic64" = "yes" ; then
88
echo "CONFIG_ATOMIC64=y" >> $config_host_mak
89
fi
90
91
-if test "$vector16" = "yes" ; then
92
- echo "CONFIG_VECTOR16=y" >> $config_host_mak
93
-fi
94
-
95
if test "$attralias" = "yes" ; then
96
echo "CONFIG_ATTRIBUTE_ALIAS=y" >> $config_host_mak
97
fi
98
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
15
index XXXXXXX..XXXXXXX 100644
99
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/cputlb.c
100
--- a/accel/tcg/tcg-runtime-gvec.c
17
+++ b/accel/tcg/cputlb.c
101
+++ b/accel/tcg/tcg-runtime-gvec.c
18
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
102
@@ -XXX,XX +XXX,XX @@
19
* high), since otherwise we are likely to have a significant amount of
103
#include "tcg/tcg-gvec-desc.h"
20
* conflict misses.
104
21
*/
105
22
-static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
106
-/* Virtually all hosts support 16-byte vectors. Those that don't can emulate
23
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
107
- * them via GCC's generic vector extension. This turns out to be simpler and
24
+ int64_t now)
108
- * more reliable than getting the compiler to autovectorize.
109
- *
110
- * In tcg-op-gvec.c, we asserted that both the size and alignment of the data
111
- * are multiples of 16.
112
- *
113
- * When the compiler does not support all of the operations we require, the
114
- * loops are written so that we can always fall back on the base types.
115
- */
116
-#ifdef CONFIG_VECTOR16
117
-typedef uint8_t vec8 __attribute__((vector_size(16)));
118
-typedef uint16_t vec16 __attribute__((vector_size(16)));
119
-typedef uint32_t vec32 __attribute__((vector_size(16)));
120
-typedef uint64_t vec64 __attribute__((vector_size(16)));
121
-
122
-typedef int8_t svec8 __attribute__((vector_size(16)));
123
-typedef int16_t svec16 __attribute__((vector_size(16)));
124
-typedef int32_t svec32 __attribute__((vector_size(16)));
125
-typedef int64_t svec64 __attribute__((vector_size(16)));
126
-
127
-#define DUP16(X) { X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X }
128
-#define DUP8(X) { X, X, X, X, X, X, X, X }
129
-#define DUP4(X) { X, X, X, X }
130
-#define DUP2(X) { X, X }
131
-#else
132
typedef uint8_t vec8;
133
typedef uint16_t vec16;
134
typedef uint32_t vec32;
135
@@ -XXX,XX +XXX,XX @@ typedef int64_t svec64;
136
#define DUP8(X) X
137
#define DUP4(X) X
138
#define DUP2(X) X
139
-#endif /* CONFIG_VECTOR16 */
140
141
static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc)
25
{
142
{
26
size_t old_size = tlb_n_entries(fast);
143
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
27
size_t rate;
144
clear_high(d, oprsz, desc);
28
size_t new_size = old_size;
29
- int64_t now = get_clock_realtime();
30
int64_t window_len_ms = 100;
31
int64_t window_len_ns = window_len_ms * 1000 * 1000;
32
bool window_expired = now > desc->window_begin_ns + window_len_ns;
33
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
34
memset(desc->vtable, -1, sizeof(desc->vtable));
35
}
145
}
36
146
37
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
147
-/* If vectors are enabled, the compiler fills in -1 for true.
38
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
148
- Otherwise, we must take care of this by hand. */
39
+ int64_t now)
149
-#ifdef CONFIG_VECTOR16
40
{
150
-# define DO_CMP0(X) X
41
CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
151
-#else
42
CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
152
-# define DO_CMP0(X) -(X)
43
153
-#endif
44
- tlb_mmu_resize_locked(desc, fast);
154
+#define DO_CMP0(X) -(X)
45
+ tlb_mmu_resize_locked(desc, fast, now);
155
46
tlb_mmu_flush_locked(desc, fast);
156
#define DO_CMP1(NAME, TYPE, OP) \
47
}
157
void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \
48
49
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
50
CPUArchState *env = cpu->env_ptr;
51
uint16_t asked = data.host_int;
52
uint16_t all_dirty, work, to_clean;
53
+ int64_t now = get_clock_realtime();
54
55
assert_cpu_is_self(cpu);
56
57
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
58
59
for (work = to_clean; work != 0; work &= work - 1) {
60
int mmu_idx = ctz32(work);
61
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
62
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
63
}
64
65
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
67
tlb_debug("forcing full flush midx %d ("
68
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
69
midx, lp_addr, lp_mask);
70
- tlb_flush_one_mmuidx_locked(env, midx);
71
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
72
} else {
73
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
74
tlb_n_used_entries_dec(env, midx);
75
--
158
--
76
2.20.1
159
2.20.1
77
160
78
161
diff view generated by jsdifflib
1
We will want to be able to flush a tlb without resizing.
1
Partial cleanup from the CONFIG_VECTOR16 removal.
2
Replace the vec* types with their scalar expansions.
2
3
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/cputlb.c | 15 ++++++++++-----
7
accel/tcg/tcg-runtime-gvec.c | 270 +++++++++++++++++------------------
9
1 file changed, 10 insertions(+), 5 deletions(-)
8
1 file changed, 130 insertions(+), 140 deletions(-)
10
9
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
12
--- a/accel/tcg/tcg-runtime-gvec.c
14
+++ b/accel/tcg/cputlb.c
13
+++ b/accel/tcg/tcg-runtime-gvec.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
14
@@ -XXX,XX +XXX,XX @@
16
}
15
#include "tcg/tcg-gvec-desc.h"
17
}
16
18
17
19
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
18
-typedef uint8_t vec8;
20
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
19
-typedef uint16_t vec16;
21
{
20
-typedef uint32_t vec32;
22
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
21
-typedef uint64_t vec64;
23
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
24
-
22
-
25
- tlb_mmu_resize_locked(desc, fast);
23
-typedef int8_t svec8;
26
desc->n_used_entries = 0;
24
-typedef int16_t svec16;
27
desc->large_page_addr = -1;
25
-typedef int32_t svec32;
28
desc->large_page_mask = -1;
26
-typedef int64_t svec64;
29
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
27
-
30
memset(desc->vtable, -1, sizeof(desc->vtable));
28
#define DUP16(X) X
31
}
29
#define DUP8(X) X
32
30
#define DUP4(X) X
33
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
31
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add8)(void *d, void *a, void *b, uint32_t desc)
34
+{
32
intptr_t oprsz = simd_oprsz(desc);
35
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
33
intptr_t i;
36
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
34
37
+
35
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
38
+ tlb_mmu_resize_locked(desc, fast);
36
- *(vec8 *)(d + i) = *(vec8 *)(a + i) + *(vec8 *)(b + i);
39
+ tlb_mmu_flush_locked(desc, fast);
37
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
40
+}
38
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + *(uint8_t *)(b + i);
41
+
39
}
42
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
40
clear_high(d, oprsz, desc);
43
{
41
}
44
env_tlb(env)->d[mmu_idx].n_used_entries++;
42
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add16)(void *d, void *a, void *b, uint32_t desc)
43
intptr_t oprsz = simd_oprsz(desc);
44
intptr_t i;
45
46
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
47
- *(vec16 *)(d + i) = *(vec16 *)(a + i) + *(vec16 *)(b + i);
48
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
49
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + *(uint16_t *)(b + i);
50
}
51
clear_high(d, oprsz, desc);
52
}
53
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add32)(void *d, void *a, void *b, uint32_t desc)
54
intptr_t oprsz = simd_oprsz(desc);
55
intptr_t i;
56
57
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
58
- *(vec32 *)(d + i) = *(vec32 *)(a + i) + *(vec32 *)(b + i);
59
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
60
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + *(uint32_t *)(b + i);
61
}
62
clear_high(d, oprsz, desc);
63
}
64
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc)
65
intptr_t oprsz = simd_oprsz(desc);
66
intptr_t i;
67
68
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
69
- *(vec64 *)(d + i) = *(vec64 *)(a + i) + *(vec64 *)(b + i);
70
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
71
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + *(uint64_t *)(b + i);
72
}
73
clear_high(d, oprsz, desc);
74
}
75
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc)
76
void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc)
77
{
78
intptr_t oprsz = simd_oprsz(desc);
79
- vec8 vecb = (vec8)DUP16(b);
80
+ uint8_t vecb = (uint8_t)DUP16(b);
81
intptr_t i;
82
83
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
84
- *(vec8 *)(d + i) = *(vec8 *)(a + i) + vecb;
85
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
86
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + vecb;
87
}
88
clear_high(d, oprsz, desc);
89
}
90
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc)
91
void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc)
92
{
93
intptr_t oprsz = simd_oprsz(desc);
94
- vec16 vecb = (vec16)DUP8(b);
95
+ uint16_t vecb = (uint16_t)DUP8(b);
96
intptr_t i;
97
98
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
99
- *(vec16 *)(d + i) = *(vec16 *)(a + i) + vecb;
100
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
101
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + vecb;
102
}
103
clear_high(d, oprsz, desc);
104
}
105
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc)
106
void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc)
107
{
108
intptr_t oprsz = simd_oprsz(desc);
109
- vec32 vecb = (vec32)DUP4(b);
110
+ uint32_t vecb = (uint32_t)DUP4(b);
111
intptr_t i;
112
113
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
114
- *(vec32 *)(d + i) = *(vec32 *)(a + i) + vecb;
115
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
116
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + vecb;
117
}
118
clear_high(d, oprsz, desc);
119
}
120
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc)
121
void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc)
122
{
123
intptr_t oprsz = simd_oprsz(desc);
124
- vec64 vecb = (vec64)DUP2(b);
125
+ uint64_t vecb = (uint64_t)DUP2(b);
126
intptr_t i;
127
128
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
129
- *(vec64 *)(d + i) = *(vec64 *)(a + i) + vecb;
130
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
131
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + vecb;
132
}
133
clear_high(d, oprsz, desc);
134
}
135
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub8)(void *d, void *a, void *b, uint32_t desc)
136
intptr_t oprsz = simd_oprsz(desc);
137
intptr_t i;
138
139
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
140
- *(vec8 *)(d + i) = *(vec8 *)(a + i) - *(vec8 *)(b + i);
141
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
142
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - *(uint8_t *)(b + i);
143
}
144
clear_high(d, oprsz, desc);
145
}
146
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub16)(void *d, void *a, void *b, uint32_t desc)
147
intptr_t oprsz = simd_oprsz(desc);
148
intptr_t i;
149
150
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
151
- *(vec16 *)(d + i) = *(vec16 *)(a + i) - *(vec16 *)(b + i);
152
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
153
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - *(uint16_t *)(b + i);
154
}
155
clear_high(d, oprsz, desc);
156
}
157
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub32)(void *d, void *a, void *b, uint32_t desc)
158
intptr_t oprsz = simd_oprsz(desc);
159
intptr_t i;
160
161
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
162
- *(vec32 *)(d + i) = *(vec32 *)(a + i) - *(vec32 *)(b + i);
163
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
164
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - *(uint32_t *)(b + i);
165
}
166
clear_high(d, oprsz, desc);
167
}
168
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc)
169
intptr_t oprsz = simd_oprsz(desc);
170
intptr_t i;
171
172
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
173
- *(vec64 *)(d + i) = *(vec64 *)(a + i) - *(vec64 *)(b + i);
174
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
175
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - *(uint64_t *)(b + i);
176
}
177
clear_high(d, oprsz, desc);
178
}
179
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc)
180
void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc)
181
{
182
intptr_t oprsz = simd_oprsz(desc);
183
- vec8 vecb = (vec8)DUP16(b);
184
+ uint8_t vecb = (uint8_t)DUP16(b);
185
intptr_t i;
186
187
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
188
- *(vec8 *)(d + i) = *(vec8 *)(a + i) - vecb;
189
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
190
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - vecb;
191
}
192
clear_high(d, oprsz, desc);
193
}
194
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc)
195
void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc)
196
{
197
intptr_t oprsz = simd_oprsz(desc);
198
- vec16 vecb = (vec16)DUP8(b);
199
+ uint16_t vecb = (uint16_t)DUP8(b);
200
intptr_t i;
201
202
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
203
- *(vec16 *)(d + i) = *(vec16 *)(a + i) - vecb;
204
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
205
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - vecb;
206
}
207
clear_high(d, oprsz, desc);
208
}
209
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc)
210
void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc)
211
{
212
intptr_t oprsz = simd_oprsz(desc);
213
- vec32 vecb = (vec32)DUP4(b);
214
+ uint32_t vecb = (uint32_t)DUP4(b);
215
intptr_t i;
216
217
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
218
- *(vec32 *)(d + i) = *(vec32 *)(a + i) - vecb;
219
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
220
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - vecb;
221
}
222
clear_high(d, oprsz, desc);
223
}
224
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc)
225
void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc)
226
{
227
intptr_t oprsz = simd_oprsz(desc);
228
- vec64 vecb = (vec64)DUP2(b);
229
+ uint64_t vecb = (uint64_t)DUP2(b);
230
intptr_t i;
231
232
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
233
- *(vec64 *)(d + i) = *(vec64 *)(a + i) - vecb;
234
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
235
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - vecb;
236
}
237
clear_high(d, oprsz, desc);
238
}
239
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul8)(void *d, void *a, void *b, uint32_t desc)
240
intptr_t oprsz = simd_oprsz(desc);
241
intptr_t i;
242
243
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
244
- *(vec8 *)(d + i) = *(vec8 *)(a + i) * *(vec8 *)(b + i);
245
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
246
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * *(uint8_t *)(b + i);
247
}
248
clear_high(d, oprsz, desc);
249
}
250
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul16)(void *d, void *a, void *b, uint32_t desc)
251
intptr_t oprsz = simd_oprsz(desc);
252
intptr_t i;
253
254
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
255
- *(vec16 *)(d + i) = *(vec16 *)(a + i) * *(vec16 *)(b + i);
256
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
257
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * *(uint16_t *)(b + i);
258
}
259
clear_high(d, oprsz, desc);
260
}
261
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul32)(void *d, void *a, void *b, uint32_t desc)
262
intptr_t oprsz = simd_oprsz(desc);
263
intptr_t i;
264
265
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
266
- *(vec32 *)(d + i) = *(vec32 *)(a + i) * *(vec32 *)(b + i);
267
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
268
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * *(uint32_t *)(b + i);
269
}
270
clear_high(d, oprsz, desc);
271
}
272
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc)
273
intptr_t oprsz = simd_oprsz(desc);
274
intptr_t i;
275
276
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
277
- *(vec64 *)(d + i) = *(vec64 *)(a + i) * *(vec64 *)(b + i);
278
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
279
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * *(uint64_t *)(b + i);
280
}
281
clear_high(d, oprsz, desc);
282
}
283
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc)
284
void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc)
285
{
286
intptr_t oprsz = simd_oprsz(desc);
287
- vec8 vecb = (vec8)DUP16(b);
288
+ uint8_t vecb = (uint8_t)DUP16(b);
289
intptr_t i;
290
291
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
292
- *(vec8 *)(d + i) = *(vec8 *)(a + i) * vecb;
293
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
294
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * vecb;
295
}
296
clear_high(d, oprsz, desc);
297
}
298
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc)
299
void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc)
300
{
301
intptr_t oprsz = simd_oprsz(desc);
302
- vec16 vecb = (vec16)DUP8(b);
303
+ uint16_t vecb = (uint16_t)DUP8(b);
304
intptr_t i;
305
306
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
307
- *(vec16 *)(d + i) = *(vec16 *)(a + i) * vecb;
308
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
309
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * vecb;
310
}
311
clear_high(d, oprsz, desc);
312
}
313
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc)
314
void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc)
315
{
316
intptr_t oprsz = simd_oprsz(desc);
317
- vec32 vecb = (vec32)DUP4(b);
318
+ uint32_t vecb = (uint32_t)DUP4(b);
319
intptr_t i;
320
321
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
322
- *(vec32 *)(d + i) = *(vec32 *)(a + i) * vecb;
323
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
324
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * vecb;
325
}
326
clear_high(d, oprsz, desc);
327
}
328
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc)
329
void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc)
330
{
331
intptr_t oprsz = simd_oprsz(desc);
332
- vec64 vecb = (vec64)DUP2(b);
333
+ uint64_t vecb = (uint64_t)DUP2(b);
334
intptr_t i;
335
336
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
337
- *(vec64 *)(d + i) = *(vec64 *)(a + i) * vecb;
338
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
339
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * vecb;
340
}
341
clear_high(d, oprsz, desc);
342
}
343
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_neg8)(void *d, void *a, uint32_t desc)
344
intptr_t oprsz = simd_oprsz(desc);
345
intptr_t i;
346
347
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
348
- *(vec8 *)(d + i) = -*(vec8 *)(a + i);
349
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
350
+ *(uint8_t *)(d + i) = -*(uint8_t *)(a + i);
351
}
352
clear_high(d, oprsz, desc);
353
}
354
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_neg16)(void *d, void *a, uint32_t desc)
355
intptr_t oprsz = simd_oprsz(desc);
356
intptr_t i;
357
358
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
359
- *(vec16 *)(d + i) = -*(vec16 *)(a + i);
360
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
361
+ *(uint16_t *)(d + i) = -*(uint16_t *)(a + i);
362
}
363
clear_high(d, oprsz, desc);
364
}
365
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_neg32)(void *d, void *a, uint32_t desc)
366
intptr_t oprsz = simd_oprsz(desc);
367
intptr_t i;
368
369
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
370
- *(vec32 *)(d + i) = -*(vec32 *)(a + i);
371
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
372
+ *(uint32_t *)(d + i) = -*(uint32_t *)(a + i);
373
}
374
clear_high(d, oprsz, desc);
375
}
376
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_neg64)(void *d, void *a, uint32_t desc)
377
intptr_t oprsz = simd_oprsz(desc);
378
intptr_t i;
379
380
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
381
- *(vec64 *)(d + i) = -*(vec64 *)(a + i);
382
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
383
+ *(uint64_t *)(d + i) = -*(uint64_t *)(a + i);
384
}
385
clear_high(d, oprsz, desc);
386
}
387
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_not)(void *d, void *a, uint32_t desc)
388
intptr_t oprsz = simd_oprsz(desc);
389
intptr_t i;
390
391
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
392
- *(vec64 *)(d + i) = ~*(vec64 *)(a + i);
393
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
394
+ *(uint64_t *)(d + i) = ~*(uint64_t *)(a + i);
395
}
396
clear_high(d, oprsz, desc);
397
}
398
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_and)(void *d, void *a, void *b, uint32_t desc)
399
intptr_t oprsz = simd_oprsz(desc);
400
intptr_t i;
401
402
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
403
- *(vec64 *)(d + i) = *(vec64 *)(a + i) & *(vec64 *)(b + i);
404
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
405
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & *(uint64_t *)(b + i);
406
}
407
clear_high(d, oprsz, desc);
408
}
409
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_or)(void *d, void *a, void *b, uint32_t desc)
410
intptr_t oprsz = simd_oprsz(desc);
411
intptr_t i;
412
413
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
414
- *(vec64 *)(d + i) = *(vec64 *)(a + i) | *(vec64 *)(b + i);
415
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
416
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | *(uint64_t *)(b + i);
417
}
418
clear_high(d, oprsz, desc);
419
}
420
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_xor)(void *d, void *a, void *b, uint32_t desc)
421
intptr_t oprsz = simd_oprsz(desc);
422
intptr_t i;
423
424
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
425
- *(vec64 *)(d + i) = *(vec64 *)(a + i) ^ *(vec64 *)(b + i);
426
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
427
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ *(uint64_t *)(b + i);
428
}
429
clear_high(d, oprsz, desc);
430
}
431
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_andc)(void *d, void *a, void *b, uint32_t desc)
432
intptr_t oprsz = simd_oprsz(desc);
433
intptr_t i;
434
435
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
436
- *(vec64 *)(d + i) = *(vec64 *)(a + i) &~ *(vec64 *)(b + i);
437
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
438
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) &~ *(uint64_t *)(b + i);
439
}
440
clear_high(d, oprsz, desc);
441
}
442
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_orc)(void *d, void *a, void *b, uint32_t desc)
443
intptr_t oprsz = simd_oprsz(desc);
444
intptr_t i;
445
446
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
447
- *(vec64 *)(d + i) = *(vec64 *)(a + i) |~ *(vec64 *)(b + i);
448
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
449
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) |~ *(uint64_t *)(b + i);
450
}
451
clear_high(d, oprsz, desc);
452
}
453
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_nand)(void *d, void *a, void *b, uint32_t desc)
454
intptr_t oprsz = simd_oprsz(desc);
455
intptr_t i;
456
457
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
458
- *(vec64 *)(d + i) = ~(*(vec64 *)(a + i) & *(vec64 *)(b + i));
459
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
460
+ *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) & *(uint64_t *)(b + i));
461
}
462
clear_high(d, oprsz, desc);
463
}
464
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_nor)(void *d, void *a, void *b, uint32_t desc)
465
intptr_t oprsz = simd_oprsz(desc);
466
intptr_t i;
467
468
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
469
- *(vec64 *)(d + i) = ~(*(vec64 *)(a + i) | *(vec64 *)(b + i));
470
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
471
+ *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) | *(uint64_t *)(b + i));
472
}
473
clear_high(d, oprsz, desc);
474
}
475
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc)
476
intptr_t oprsz = simd_oprsz(desc);
477
intptr_t i;
478
479
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
480
- *(vec64 *)(d + i) = ~(*(vec64 *)(a + i) ^ *(vec64 *)(b + i));
481
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
482
+ *(uint64_t *)(d + i) = ~(*(uint64_t *)(a + i) ^ *(uint64_t *)(b + i));
483
}
484
clear_high(d, oprsz, desc);
485
}
486
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc)
487
void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
488
{
489
intptr_t oprsz = simd_oprsz(desc);
490
- vec64 vecb = (vec64)DUP2(b);
491
+ uint64_t vecb = (uint64_t)DUP2(b);
492
intptr_t i;
493
494
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
495
- *(vec64 *)(d + i) = *(vec64 *)(a + i) & vecb;
496
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
497
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & vecb;
498
}
499
clear_high(d, oprsz, desc);
500
}
501
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
502
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
503
{
504
intptr_t oprsz = simd_oprsz(desc);
505
- vec64 vecb = (vec64)DUP2(b);
506
+ uint64_t vecb = (uint64_t)DUP2(b);
507
intptr_t i;
508
509
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
510
- *(vec64 *)(d + i) = *(vec64 *)(a + i) ^ vecb;
511
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
512
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ vecb;
513
}
514
clear_high(d, oprsz, desc);
515
}
516
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
517
void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc)
518
{
519
intptr_t oprsz = simd_oprsz(desc);
520
- vec64 vecb = (vec64)DUP2(b);
521
+ uint64_t vecb = (uint64_t)DUP2(b);
522
intptr_t i;
523
524
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
525
- *(vec64 *)(d + i) = *(vec64 *)(a + i) | vecb;
526
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
527
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | vecb;
528
}
529
clear_high(d, oprsz, desc);
530
}
531
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shl8i)(void *d, void *a, uint32_t desc)
532
int shift = simd_data(desc);
533
intptr_t i;
534
535
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
536
- *(vec8 *)(d + i) = *(vec8 *)(a + i) << shift;
537
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
538
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) << shift;
539
}
540
clear_high(d, oprsz, desc);
541
}
542
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shl16i)(void *d, void *a, uint32_t desc)
543
int shift = simd_data(desc);
544
intptr_t i;
545
546
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
547
- *(vec16 *)(d + i) = *(vec16 *)(a + i) << shift;
548
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
549
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) << shift;
550
}
551
clear_high(d, oprsz, desc);
552
}
553
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shl32i)(void *d, void *a, uint32_t desc)
554
int shift = simd_data(desc);
555
intptr_t i;
556
557
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
558
- *(vec32 *)(d + i) = *(vec32 *)(a + i) << shift;
559
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
560
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) << shift;
561
}
562
clear_high(d, oprsz, desc);
563
}
564
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shl64i)(void *d, void *a, uint32_t desc)
565
int shift = simd_data(desc);
566
intptr_t i;
567
568
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
569
- *(vec64 *)(d + i) = *(vec64 *)(a + i) << shift;
570
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
571
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) << shift;
572
}
573
clear_high(d, oprsz, desc);
574
}
575
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shr8i)(void *d, void *a, uint32_t desc)
576
int shift = simd_data(desc);
577
intptr_t i;
578
579
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
580
- *(vec8 *)(d + i) = *(vec8 *)(a + i) >> shift;
581
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
582
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) >> shift;
583
}
584
clear_high(d, oprsz, desc);
585
}
586
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shr16i)(void *d, void *a, uint32_t desc)
587
int shift = simd_data(desc);
588
intptr_t i;
589
590
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
591
- *(vec16 *)(d + i) = *(vec16 *)(a + i) >> shift;
592
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
593
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) >> shift;
594
}
595
clear_high(d, oprsz, desc);
596
}
597
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shr32i)(void *d, void *a, uint32_t desc)
598
int shift = simd_data(desc);
599
intptr_t i;
600
601
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
602
- *(vec32 *)(d + i) = *(vec32 *)(a + i) >> shift;
603
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
604
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) >> shift;
605
}
606
clear_high(d, oprsz, desc);
607
}
608
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_shr64i)(void *d, void *a, uint32_t desc)
609
int shift = simd_data(desc);
610
intptr_t i;
611
612
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
613
- *(vec64 *)(d + i) = *(vec64 *)(a + i) >> shift;
614
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
615
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) >> shift;
616
}
617
clear_high(d, oprsz, desc);
618
}
619
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar8i)(void *d, void *a, uint32_t desc)
620
int shift = simd_data(desc);
621
intptr_t i;
622
623
- for (i = 0; i < oprsz; i += sizeof(vec8)) {
624
- *(svec8 *)(d + i) = *(svec8 *)(a + i) >> shift;
625
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
626
+ *(int8_t *)(d + i) = *(int8_t *)(a + i) >> shift;
627
}
628
clear_high(d, oprsz, desc);
629
}
630
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar16i)(void *d, void *a, uint32_t desc)
631
int shift = simd_data(desc);
632
intptr_t i;
633
634
- for (i = 0; i < oprsz; i += sizeof(vec16)) {
635
- *(svec16 *)(d + i) = *(svec16 *)(a + i) >> shift;
636
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
637
+ *(int16_t *)(d + i) = *(int16_t *)(a + i) >> shift;
638
}
639
clear_high(d, oprsz, desc);
640
}
641
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar32i)(void *d, void *a, uint32_t desc)
642
int shift = simd_data(desc);
643
intptr_t i;
644
645
- for (i = 0; i < oprsz; i += sizeof(vec32)) {
646
- *(svec32 *)(d + i) = *(svec32 *)(a + i) >> shift;
647
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
648
+ *(int32_t *)(d + i) = *(int32_t *)(a + i) >> shift;
649
}
650
clear_high(d, oprsz, desc);
651
}
652
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc)
653
int shift = simd_data(desc);
654
intptr_t i;
655
656
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
657
- *(svec64 *)(d + i) = *(svec64 *)(a + i) >> shift;
658
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
659
+ *(int64_t *)(d + i) = *(int64_t *)(a + i) >> shift;
660
}
661
clear_high(d, oprsz, desc);
662
}
663
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \
664
}
665
666
#define DO_CMP2(SZ) \
667
- DO_CMP1(gvec_eq##SZ, vec##SZ, ==) \
668
- DO_CMP1(gvec_ne##SZ, vec##SZ, !=) \
669
- DO_CMP1(gvec_lt##SZ, svec##SZ, <) \
670
- DO_CMP1(gvec_le##SZ, svec##SZ, <=) \
671
- DO_CMP1(gvec_ltu##SZ, vec##SZ, <) \
672
- DO_CMP1(gvec_leu##SZ, vec##SZ, <=)
673
+ DO_CMP1(gvec_eq##SZ, uint##SZ##_t, ==) \
674
+ DO_CMP1(gvec_ne##SZ, uint##SZ##_t, !=) \
675
+ DO_CMP1(gvec_lt##SZ, int##SZ##_t, <) \
676
+ DO_CMP1(gvec_le##SZ, int##SZ##_t, <=) \
677
+ DO_CMP1(gvec_ltu##SZ, uint##SZ##_t, <) \
678
+ DO_CMP1(gvec_leu##SZ, uint##SZ##_t, <=)
679
680
DO_CMP2(8)
681
DO_CMP2(16)
682
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bitsel)(void *d, void *a, void *b, void *c, uint32_t desc)
683
intptr_t oprsz = simd_oprsz(desc);
684
intptr_t i;
685
686
- for (i = 0; i < oprsz; i += sizeof(vec64)) {
687
- vec64 aa = *(vec64 *)(a + i);
688
- vec64 bb = *(vec64 *)(b + i);
689
- vec64 cc = *(vec64 *)(c + i);
690
- *(vec64 *)(d + i) = (bb & aa) | (cc & ~aa);
691
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
692
+ uint64_t aa = *(uint64_t *)(a + i);
693
+ uint64_t bb = *(uint64_t *)(b + i);
694
+ uint64_t cc = *(uint64_t *)(c + i);
695
+ *(uint64_t *)(d + i) = (bb & aa) | (cc & ~aa);
696
}
697
clear_high(d, oprsz, desc);
698
}
45
--
699
--
46
2.20.1
700
2.20.1
47
701
48
702
diff view generated by jsdifflib
1
Merge into the only caller, but at the same time split
1
Partial cleanup from the CONFIG_VECTOR16 removal.
2
out tlb_mmu_init to initialize a single tlb entry.
2
Replace the DUP* expansions with the scalar argument.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
accel/tcg/cputlb.c | 33 ++++++++++++++++-----------------
7
accel/tcg/tcg-runtime-gvec.c | 50 +++++++++++-------------------------
10
1 file changed, 16 insertions(+), 17 deletions(-)
8
1 file changed, 15 insertions(+), 35 deletions(-)
11
9
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/cputlb.c
12
--- a/accel/tcg/tcg-runtime-gvec.c
15
+++ b/accel/tcg/cputlb.c
13
+++ b/accel/tcg/tcg-runtime-gvec.c
16
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
14
@@ -XXX,XX +XXX,XX @@
17
desc->window_max_entries = max_entries;
15
#include "tcg/tcg-gvec-desc.h"
18
}
16
19
17
20
-static void tlb_dyn_init(CPUArchState *env)
18
-#define DUP16(X) X
21
-{
19
-#define DUP8(X) X
22
- int i;
20
-#define DUP4(X) X
21
-#define DUP2(X) X
23
-
22
-
24
- for (i = 0; i < NB_MMU_MODES; i++) {
23
static inline void clear_high(void *d, intptr_t oprsz, uint32_t desc)
25
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
24
{
26
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
25
intptr_t maxsz = simd_maxsz(desc);
27
-
26
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_add64)(void *d, void *a, void *b, uint32_t desc)
28
- tlb_window_reset(desc, get_clock_realtime(), 0);
27
void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc)
29
- desc->n_used_entries = 0;
28
{
30
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
29
intptr_t oprsz = simd_oprsz(desc);
31
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
30
- uint8_t vecb = (uint8_t)DUP16(b);
32
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
31
intptr_t i;
33
- }
32
34
-}
33
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
35
-
34
- *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + vecb;
36
/**
35
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) + (uint8_t)b;
37
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
36
}
38
* @desc: The CPUTLBDesc portion of the TLB
37
clear_high(d, oprsz, desc);
39
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
38
}
40
tlb_mmu_flush_locked(desc, fast);
39
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds8)(void *d, void *a, uint64_t b, uint32_t desc)
41
}
40
void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc)
42
41
{
43
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
42
intptr_t oprsz = simd_oprsz(desc);
44
+{
43
- uint16_t vecb = (uint16_t)DUP8(b);
45
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
44
intptr_t i;
46
+
45
47
+ tlb_window_reset(desc, now, 0);
46
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
48
+ desc->n_used_entries = 0;
47
- *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + vecb;
49
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
48
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) + (uint16_t)b;
50
+ fast->table = g_new(CPUTLBEntry, n_entries);
49
}
51
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
50
clear_high(d, oprsz, desc);
52
+}
51
}
53
+
52
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds16)(void *d, void *a, uint64_t b, uint32_t desc)
54
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
53
void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc)
55
{
54
{
56
env_tlb(env)->d[mmu_idx].n_used_entries++;
55
intptr_t oprsz = simd_oprsz(desc);
57
@@ -XXX,XX +XXX,XX @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
56
- uint32_t vecb = (uint32_t)DUP4(b);
58
void tlb_init(CPUState *cpu)
57
intptr_t i;
59
{
58
60
CPUArchState *env = cpu->env_ptr;
59
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
61
+ int64_t now = get_clock_realtime();
60
- *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + vecb;
62
+ int i;
61
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) + (uint32_t)b;
63
62
}
64
qemu_spin_init(&env_tlb(env)->c.lock);
63
clear_high(d, oprsz, desc);
65
64
}
66
/* Ensure that cpu_reset performs a full flush. */
65
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_adds32)(void *d, void *a, uint64_t b, uint32_t desc)
67
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
66
void HELPER(gvec_adds64)(void *d, void *a, uint64_t b, uint32_t desc)
68
67
{
69
- tlb_dyn_init(env);
68
intptr_t oprsz = simd_oprsz(desc);
70
+ for (i = 0; i < NB_MMU_MODES; i++) {
69
- uint64_t vecb = (uint64_t)DUP2(b);
71
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
70
intptr_t i;
72
+ }
71
73
}
72
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
74
73
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + vecb;
75
/* flush_all_helper: run fn across all cpus
74
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) + b;
75
}
76
clear_high(d, oprsz, desc);
77
}
78
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sub64)(void *d, void *a, void *b, uint32_t desc)
79
void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc)
80
{
81
intptr_t oprsz = simd_oprsz(desc);
82
- uint8_t vecb = (uint8_t)DUP16(b);
83
intptr_t i;
84
85
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
86
- *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - vecb;
87
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) - (uint8_t)b;
88
}
89
clear_high(d, oprsz, desc);
90
}
91
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs8)(void *d, void *a, uint64_t b, uint32_t desc)
92
void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc)
93
{
94
intptr_t oprsz = simd_oprsz(desc);
95
- uint16_t vecb = (uint16_t)DUP8(b);
96
intptr_t i;
97
98
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
99
- *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - vecb;
100
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) - (uint16_t)b;
101
}
102
clear_high(d, oprsz, desc);
103
}
104
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs16)(void *d, void *a, uint64_t b, uint32_t desc)
105
void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc)
106
{
107
intptr_t oprsz = simd_oprsz(desc);
108
- uint32_t vecb = (uint32_t)DUP4(b);
109
intptr_t i;
110
111
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
112
- *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - vecb;
113
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) - (uint32_t)b;
114
}
115
clear_high(d, oprsz, desc);
116
}
117
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_subs32)(void *d, void *a, uint64_t b, uint32_t desc)
118
void HELPER(gvec_subs64)(void *d, void *a, uint64_t b, uint32_t desc)
119
{
120
intptr_t oprsz = simd_oprsz(desc);
121
- uint64_t vecb = (uint64_t)DUP2(b);
122
intptr_t i;
123
124
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
125
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - vecb;
126
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) - b;
127
}
128
clear_high(d, oprsz, desc);
129
}
130
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_mul64)(void *d, void *a, void *b, uint32_t desc)
131
void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc)
132
{
133
intptr_t oprsz = simd_oprsz(desc);
134
- uint8_t vecb = (uint8_t)DUP16(b);
135
intptr_t i;
136
137
for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
138
- *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * vecb;
139
+ *(uint8_t *)(d + i) = *(uint8_t *)(a + i) * (uint8_t)b;
140
}
141
clear_high(d, oprsz, desc);
142
}
143
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls8)(void *d, void *a, uint64_t b, uint32_t desc)
144
void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc)
145
{
146
intptr_t oprsz = simd_oprsz(desc);
147
- uint16_t vecb = (uint16_t)DUP8(b);
148
intptr_t i;
149
150
for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
151
- *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * vecb;
152
+ *(uint16_t *)(d + i) = *(uint16_t *)(a + i) * (uint16_t)b;
153
}
154
clear_high(d, oprsz, desc);
155
}
156
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls16)(void *d, void *a, uint64_t b, uint32_t desc)
157
void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc)
158
{
159
intptr_t oprsz = simd_oprsz(desc);
160
- uint32_t vecb = (uint32_t)DUP4(b);
161
intptr_t i;
162
163
for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
164
- *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * vecb;
165
+ *(uint32_t *)(d + i) = *(uint32_t *)(a + i) * (uint32_t)b;
166
}
167
clear_high(d, oprsz, desc);
168
}
169
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_muls32)(void *d, void *a, uint64_t b, uint32_t desc)
170
void HELPER(gvec_muls64)(void *d, void *a, uint64_t b, uint32_t desc)
171
{
172
intptr_t oprsz = simd_oprsz(desc);
173
- uint64_t vecb = (uint64_t)DUP2(b);
174
intptr_t i;
175
176
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
177
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * vecb;
178
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) * b;
179
}
180
clear_high(d, oprsz, desc);
181
}
182
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_eqv)(void *d, void *a, void *b, uint32_t desc)
183
void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
184
{
185
intptr_t oprsz = simd_oprsz(desc);
186
- uint64_t vecb = (uint64_t)DUP2(b);
187
intptr_t i;
188
189
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
190
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & vecb;
191
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & b;
192
}
193
clear_high(d, oprsz, desc);
194
}
195
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
196
void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
197
{
198
intptr_t oprsz = simd_oprsz(desc);
199
- uint64_t vecb = (uint64_t)DUP2(b);
200
intptr_t i;
201
202
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
203
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ vecb;
204
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) ^ b;
205
}
206
clear_high(d, oprsz, desc);
207
}
208
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
209
void HELPER(gvec_ors)(void *d, void *a, uint64_t b, uint32_t desc)
210
{
211
intptr_t oprsz = simd_oprsz(desc);
212
- uint64_t vecb = (uint64_t)DUP2(b);
213
intptr_t i;
214
215
for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
216
- *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | vecb;
217
+ *(uint64_t *)(d + i) = *(uint64_t *)(a + i) | b;
218
}
219
clear_high(d, oprsz, desc);
220
}
76
--
221
--
77
2.20.1
222
2.20.1
78
223
79
224
diff view generated by jsdifflib
1
There's little point in leaving these data structures half initialized,
1
Partial cleanup from the CONFIG_VECTOR16 removal.
2
and relying on a flush to be done during reset.
2
Replace DO_CMP0 with its scalar expansion, a simple negation.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/cputlb.c | 5 +++--
6
accel/tcg/tcg-runtime-gvec.c | 5 +----
9
1 file changed, 3 insertions(+), 2 deletions(-)
7
1 file changed, 1 insertion(+), 4 deletions(-)
10
8
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
11
--- a/accel/tcg/tcg-runtime-gvec.c
14
+++ b/accel/tcg/cputlb.c
12
+++ b/accel/tcg/tcg-runtime-gvec.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
13
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_sar64v)(void *d, void *a, void *b, uint32_t desc)
16
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
14
clear_high(d, oprsz, desc);
17
fast->table = g_new(CPUTLBEntry, n_entries);
18
desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
19
+ tlb_mmu_flush_locked(desc, fast);
20
}
15
}
21
16
22
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
17
-#define DO_CMP0(X) -(X)
23
@@ -XXX,XX +XXX,XX @@ void tlb_init(CPUState *cpu)
18
-
24
19
#define DO_CMP1(NAME, TYPE, OP) \
25
qemu_spin_init(&env_tlb(env)->c.lock);
20
void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \
26
21
{ \
27
- /* Ensure that cpu_reset performs a full flush. */
22
intptr_t oprsz = simd_oprsz(desc); \
28
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
23
intptr_t i; \
29
+ /* All tlbs are initialized flushed. */
24
for (i = 0; i < oprsz; i += sizeof(TYPE)) { \
30
+ env_tlb(env)->c.dirty = 0;
25
- *(TYPE *)(d + i) = DO_CMP0(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \
31
26
+ *(TYPE *)(d + i) = -(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \
32
for (i = 0; i < NB_MMU_MODES; i++) {
27
} \
33
tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
28
clear_high(d, oprsz, desc); \
29
}
30
@@ -XXX,XX +XXX,XX @@ DO_CMP2(16)
31
DO_CMP2(32)
32
DO_CMP2(64)
33
34
-#undef DO_CMP0
35
#undef DO_CMP1
36
#undef DO_CMP2
37
34
--
38
--
35
2.20.1
39
2.20.1
36
40
37
41
diff view generated by jsdifflib