1
target-arm queue: the big things in here are SVE in system
1
As promised, another pullreq... This one's mostly RTH's patches.
2
emulation mode, and v8M stack limit checking; there are
3
also a handful of smaller fixes.
4
2
5
thanks
3
thanks
6
-- PMM
4
-- PMM
7
5
8
The following changes since commit 079911cb6e26898e16f5bb56ef4f9d33cf92d32d:
6
The following changes since commit 784c2e4f232adf5ef47a84a262ec72a07d068d6a:
9
7
10
Merge remote-tracking branch 'remotes/rth/tags/pull-fpu-20181005' into staging (2018-10-08 12:44:35 +0100)
8
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2018-10-19 15:30:40 +0100)
11
9
12
are available in the Git repository at:
10
are available in the Git repository at:
13
11
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20181008
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20181019
15
13
16
for you to fetch changes up to 74e2e59b8d0a68be0956310fc349179c89fd7be0:
14
for you to fetch changes up to 88c9add25e7120e8622796c81ad3f3fb7f8d40e7:
17
15
18
hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow (2018-10-08 14:55:05 +0100)
16
target/arm: Only flush tlb if ASID changes (2018-10-19 17:38:48 +0100)
19
17
20
----------------------------------------------------------------
18
----------------------------------------------------------------
21
target-arm queue:
19
target-arm queue:
22
* target/arm: fix error in a code comment
20
* ssi-sd: Make devices picking up backends unavailable with -device
23
* virt: Suppress external aborts on virt-2.10 and earlier
21
* Add support for VCPU event states
24
* target/arm: Correct condition for v8M callee stack push
22
* Move towards making ID registers the source of truth for
25
* target/arm: Don't read r4 from v8M exception stackframe twice
23
whether a guest CPU implements a feature, rather than having
26
* target/arm: Support SVE in system emulation mode
24
parallel ID registers and feature bit flags
27
* target/arm: Implement v8M hardware stack limit checking
25
* Implement various HCR hypervisor trap/config bits
28
* hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow
26
* Get IL bit correct for v7 syndrome values
27
* Report correct syndrome for FP/SIMD traps to Hyp mode
28
* hw/arm/boot: Increase compliance with kernel arm64 boot protocol
29
* Refactor A32 Neon to use generic vector infrastructure
30
* Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
31
* net: cadence_gem: Report features correctly in ID register
32
* Avoid some unnecessary TLB flushes on TTBR register writes
29
33
30
----------------------------------------------------------------
34
----------------------------------------------------------------
31
Dongjiu Geng (1):
35
Dongjiu Geng (1):
32
target/arm: fix code comments error
36
target/arm: Add support for VCPU event states
33
37
34
Peter Maydell (17):
38
Edgar E. Iglesias (2):
35
virt: Suppress external aborts on virt-2.10 and earlier
39
net: cadence_gem: Announce availability of priority queues
36
target/arm: Correct condition for v8M callee stack push
40
net: cadence_gem: Announce 64bit addressing support
37
target/arm: Don't read r4 from v8M exception stackframe twice
38
target/arm: Define new TBFLAG for v8M stack checking
39
target/arm: Define new EXCP type for v8M stack overflows
40
target/arm: Move v7m_using_psp() to internals.h
41
target/arm: Add v8M stack checks on ADD/SUB/MOV of SP
42
target/arm: Add some comments in Thumb decode
43
target/arm: Add v8M stack checks on exception entry
44
target/arm: Add v8M stack limit checks on NS function calls
45
target/arm: Add v8M stack checks for LDRD/STRD (imm)
46
target/arm: Add v8M stack checks for Thumb2 LDM/STM
47
target/arm: Add v8M stack checks for T32 load/store single
48
target/arm: Add v8M stack checks for Thumb push/pop
49
target/arm: Add v8M stack checks for VLDM/VSTM
50
target/arm: Add v8M stack checks for MSR to SP_NS
51
hw/display/bcm2835_fb: Silence Coverity warning about multiply overflow
52
41
53
Richard Henderson (15):
42
Markus Armbruster (1):
54
target/arm: Define ID_AA64ZFR0_EL1
43
ssi-sd: Make devices picking up backends unavailable with -device
55
target/arm: Adjust sve_exception_el
56
target/arm: Pass in current_el to fp and sve_exception_el
57
target/arm: Handle SVE vector length changes in system mode
58
target/arm: Adjust aarch64_cpu_dump_state for system mode SVE
59
target/arm: Clear unused predicate bits for LD1RQ
60
target/arm: Rewrite helper_sve_ld1*_r using pages
61
target/arm: Rewrite helper_sve_ld[234]*_r
62
target/arm: Rewrite helper_sve_st[1234]*_r
63
target/arm: Split contiguous loads for endianness
64
target/arm: Split contiguous stores for endianness
65
target/arm: Rewrite vector gather loads
66
target/arm: Rewrite vector gather stores
67
target/arm: Rewrite vector gather first-fault loads
68
target/arm: Pass TCGMemOpIdx to sve memory helpers
69
44
70
target/arm/cpu.h | 17 +
45
Peter Maydell (10):
71
target/arm/helper-sve.h | 385 ++++++---
46
target/arm: Improve debug logging of AArch32 exception return
72
target/arm/helper.h | 2 +
47
target/arm: Make switch_mode() file-local
73
target/arm/internals.h | 44 +
48
target/arm: Implement HCR.FB
74
target/arm/kvm_arm.h | 4 +-
49
target/arm: Implement HCR.DC
75
target/arm/translate.h | 1 +
50
target/arm: ISR_EL1 bits track virtual interrupts if IMO/FMO set
76
hw/arm/virt.c | 2 +
51
target/arm: Implement HCR.VI and VF
77
hw/display/bcm2835_fb.c | 2 +-
52
target/arm: Implement HCR.PTW
78
target/arm/cpu64.c | 42 -
53
target/arm: New utility function to extract EC from syndrome
79
target/arm/helper.c | 345 +++++---
54
target/arm: Get IL bit correct for v7 syndrome values
80
target/arm/kvm.c | 2 +-
55
target/arm: Report correct syndrome for FP/SIMD traps to Hyp mode
81
target/arm/op_helper.c | 24 +-
82
target/arm/sve_helper.c | 1961 ++++++++++++++++++++++++++++++--------------
83
target/arm/translate-a64.c | 8 +-
84
target/arm/translate-sve.c | 670 ++++++++++-----
85
target/arm/translate.c | 198 ++++-
86
16 files changed, 2611 insertions(+), 1096 deletions(-)
87
56
57
Richard Henderson (30):
58
target/arm: Move some system registers into a substructure
59
target/arm: V8M should not imply V7VE
60
target/arm: Convert v8 extensions from feature bits to isar tests
61
target/arm: Convert division from feature bits to isar0 tests
62
target/arm: Convert jazelle from feature bit to isar1 test
63
target/arm: Convert t32ee from feature bit to isar3 test
64
target/arm: Convert sve from feature bit to aa64pfr0 test
65
target/arm: Convert v8.2-fp16 from feature bit to aa64pfr0 test
66
target/arm: Hoist address increment for vector memory ops
67
target/arm: Don't call tcg_clear_temp_count
68
target/arm: Use tcg_gen_gvec_dup_i64 for LD[1-4]R
69
target/arm: Promote consecutive memory ops for aa64
70
target/arm: Mark some arrays const
71
target/arm: Use gvec for NEON VDUP
72
target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate)
73
target/arm: Use gvec for NEON_3R_LOGIC insns
74
target/arm: Use gvec for NEON_3R_VADD_VSUB insns
75
target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG
76
target/arm: Use gvec for NEON_3R_VMUL
77
target/arm: Use gvec for VSHR, VSHL
78
target/arm: Use gvec for VSRA
79
target/arm: Use gvec for VSRI, VSLI
80
target/arm: Use gvec for NEON_3R_VML
81
target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
82
target/arm: Use gvec for NEON VLD all lanes
83
target/arm: Reorg NEON VLD/VST all elements
84
target/arm: Promote consecutive memory ops for aa32
85
target/arm: Reorg NEON VLD/VST single element to one lane
86
target/arm: Remove writefn from TTBR0_EL3
87
target/arm: Only flush tlb if ASID changes
88
89
Stewart Hildebrand (1):
90
hw/arm/boot: Increase compliance with kernel arm64 boot protocol
91
92
target/arm/cpu.h | 227 ++++++-
93
target/arm/internals.h | 45 +-
94
target/arm/kvm_arm.h | 24 +
95
target/arm/translate.h | 21 +
96
hw/arm/boot.c | 18 +
97
hw/intc/armv7m_nvic.c | 12 +-
98
hw/net/cadence_gem.c | 9 +-
99
hw/sd/ssi-sd.c | 2 +
100
linux-user/aarch64/signal.c | 4 +-
101
linux-user/elfload.c | 60 +-
102
linux-user/syscall.c | 10 +-
103
target/arm/cpu.c | 242 ++++----
104
target/arm/cpu64.c | 148 +++--
105
target/arm/helper.c | 397 ++++++++----
106
target/arm/kvm.c | 60 ++
107
target/arm/kvm32.c | 13 +
108
target/arm/kvm64.c | 15 +-
109
target/arm/machine.c | 28 +-
110
target/arm/op_helper.c | 2 +-
111
target/arm/translate-a64.c | 715 ++++-----------------
112
target/arm/translate.c | 1451 ++++++++++++++++++++++++++++---------------
113
21 files changed, 2021 insertions(+), 1482 deletions(-)
114
diff view generated by jsdifflib
New patch
1
From: Markus Armbruster <armbru@redhat.com>
1
2
3
Device models aren't supposed to go on fishing expeditions for
4
backends. They should expose suitable properties for the user to set.
5
For onboard devices, board code sets them.
6
7
Device ssi-sd picks up its block backend in its init() method with
8
drive_get_next() instead. This mistake is already marked FIXME since
9
commit af9e40a.
10
11
Unset user_creatable to remove the mistake from our external
12
interface. Since the SSI bus doesn't support hotplug, only -device
13
can be affected. Only certain ARM machines have ssi-sd and provide an
14
SSI bus for it; this patch breaks -device ssi-sd for these machines.
15
No actual use of -device ssi-sd is known.
16
17
Signed-off-by: Markus Armbruster <armbru@redhat.com>
18
Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
19
Acked-by: Thomas Huth <thuth@redhat.com>
20
Message-id: 20181009060835.4608-1-armbru@redhat.com
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
---
23
hw/sd/ssi-sd.c | 2 ++
24
1 file changed, 2 insertions(+)
25
26
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/sd/ssi-sd.c
29
+++ b/hw/sd/ssi-sd.c
30
@@ -XXX,XX +XXX,XX @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
31
k->cs_polarity = SSI_CS_LOW;
32
dc->vmsd = &vmstate_ssi_sd;
33
dc->reset = ssi_sd_reset;
34
+ /* Reason: init() method uses drive_get_next() */
35
+ dc->user_creatable = false;
36
}
37
38
static const TypeInfo ssi_sd_info = {
39
--
40
2.19.1
41
42
diff view generated by jsdifflib
1
From: Dongjiu Geng <gengdongjiu@huawei.com>
1
From: Dongjiu Geng <gengdongjiu@huawei.com>
2
2
3
The parameter of kvm_arm_init_cpreg_list() is ARMCPU instead of
3
This patch extends the qemu-kvm state sync logic with support for
4
CPUState, so correct the note to make it match the code.
4
KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
5
And also it can support the exception state migration.
6
7
The SError exception states include SError pending state and ESR value,
8
the kvm_put/get_vcpu_events() will be called when set or get system
9
registers. When do migration, if source machine has SError pending,
10
QEMU will do this migration regardless whether the target machine supports
11
to specify guest ESR value, because if target machine does not support that,
12
it can also inject the SError with zero ESR value.
5
13
6
Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
14
Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
7
Message-id: 1538069046-5757-1-git-send-email-gengdongjiu@huawei.com
15
Reviewed-by: Andrew Jones <drjones@redhat.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Message-id: 1538067351-23931-3-git-send-email-gengdongjiu@huawei.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
19
---
11
target/arm/kvm_arm.h | 4 ++--
20
target/arm/cpu.h | 7 ++++++
12
target/arm/kvm.c | 2 +-
21
target/arm/kvm_arm.h | 24 ++++++++++++++++++
13
2 files changed, 3 insertions(+), 3 deletions(-)
22
target/arm/kvm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++
14
23
target/arm/kvm32.c | 13 ++++++++++
24
target/arm/kvm64.c | 13 ++++++++++
25
target/arm/machine.c | 22 ++++++++++++++++
26
6 files changed, 139 insertions(+)
27
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/cpu.h
31
+++ b/target/arm/cpu.h
32
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
33
*/
34
} exception;
35
36
+ /* Information associated with an SError */
37
+ struct {
38
+ uint8_t pending;
39
+ uint8_t has_esr;
40
+ uint64_t esr;
41
+ } serror;
42
+
43
/* Thumb-2 EE state. */
44
uint32_t teecr;
45
uint32_t teehbr;
15
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
46
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
16
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/kvm_arm.h
48
--- a/target/arm/kvm_arm.h
18
+++ b/target/arm/kvm_arm.h
49
+++ b/target/arm/kvm_arm.h
19
@@ -XXX,XX +XXX,XX @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
50
@@ -XXX,XX +XXX,XX @@ bool write_kvmstate_to_list(ARMCPU *cpu);
20
51
*/
52
void kvm_arm_reset_vcpu(ARMCPU *cpu);
53
54
+/**
55
+ * kvm_arm_init_serror_injection:
56
+ * @cs: CPUState
57
+ *
58
+ * Check whether KVM can set guest SError syndrome.
59
+ */
60
+void kvm_arm_init_serror_injection(CPUState *cs);
61
+
62
+/**
63
+ * kvm_get_vcpu_events:
64
+ * @cpu: ARMCPU
65
+ *
66
+ * Get VCPU related state from kvm.
67
+ */
68
+int kvm_get_vcpu_events(ARMCPU *cpu);
69
+
70
+/**
71
+ * kvm_put_vcpu_events:
72
+ * @cpu: ARMCPU
73
+ *
74
+ * Put VCPU related state to kvm.
75
+ */
76
+int kvm_put_vcpu_events(ARMCPU *cpu);
77
+
78
#ifdef CONFIG_KVM
21
/**
79
/**
22
* kvm_arm_init_cpreg_list:
80
* kvm_arm_create_scratch_host_vcpu:
23
- * @cs: CPUState
24
+ * @cpu: ARMCPU
25
*
26
- * Initialize the CPUState's cpreg list according to the kernel's
27
+ * Initialize the ARMCPU cpreg list according to the kernel's
28
* definition of what CPU registers it knows about (and throw away
29
* the previous TCG-created cpreg list).
30
*
31
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
81
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
32
index XXXXXXX..XXXXXXX 100644
82
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/kvm.c
83
--- a/target/arm/kvm.c
34
+++ b/target/arm/kvm.c
84
+++ b/target/arm/kvm.c
35
@@ -XXX,XX +XXX,XX @@ static int compare_u64(const void *a, const void *b)
85
@@ -XXX,XX +XXX,XX @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
86
};
87
88
static bool cap_has_mp_state;
89
+static bool cap_has_inject_serror_esr;
90
91
static ARMHostCPUFeatures arm_host_cpu_features;
92
93
@@ -XXX,XX +XXX,XX @@ int kvm_arm_vcpu_init(CPUState *cs)
94
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
95
}
96
97
+void kvm_arm_init_serror_injection(CPUState *cs)
98
+{
99
+ cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
100
+ KVM_CAP_ARM_INJECT_SERROR_ESR);
101
+}
102
+
103
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
104
int *fdarray,
105
struct kvm_vcpu_init *init)
106
@@ -XXX,XX +XXX,XX @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
36
return 0;
107
return 0;
37
}
108
}
38
109
39
-/* Initialize the CPUState's cpreg list according to the kernel's
110
+int kvm_put_vcpu_events(ARMCPU *cpu)
40
+/* Initialize the ARMCPU cpreg list according to the kernel's
111
+{
41
* definition of what CPU registers it knows about (and throw away
112
+ CPUARMState *env = &cpu->env;
42
* the previous TCG-created cpreg list).
113
+ struct kvm_vcpu_events events;
43
*/
114
+ int ret;
115
+
116
+ if (!kvm_has_vcpu_events()) {
117
+ return 0;
118
+ }
119
+
120
+ memset(&events, 0, sizeof(events));
121
+ events.exception.serror_pending = env->serror.pending;
122
+
123
+ /* Inject SError to guest with specified syndrome if host kernel
124
+ * supports it, otherwise inject SError without syndrome.
125
+ */
126
+ if (cap_has_inject_serror_esr) {
127
+ events.exception.serror_has_esr = env->serror.has_esr;
128
+ events.exception.serror_esr = env->serror.esr;
129
+ }
130
+
131
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
132
+ if (ret) {
133
+ error_report("failed to put vcpu events");
134
+ }
135
+
136
+ return ret;
137
+}
138
+
139
+int kvm_get_vcpu_events(ARMCPU *cpu)
140
+{
141
+ CPUARMState *env = &cpu->env;
142
+ struct kvm_vcpu_events events;
143
+ int ret;
144
+
145
+ if (!kvm_has_vcpu_events()) {
146
+ return 0;
147
+ }
148
+
149
+ memset(&events, 0, sizeof(events));
150
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
151
+ if (ret) {
152
+ error_report("failed to get vcpu events");
153
+ return ret;
154
+ }
155
+
156
+ env->serror.pending = events.exception.serror_pending;
157
+ env->serror.has_esr = events.exception.serror_has_esr;
158
+ env->serror.esr = events.exception.serror_esr;
159
+
160
+ return 0;
161
+}
162
+
163
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
164
{
165
}
166
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/target/arm/kvm32.c
169
+++ b/target/arm/kvm32.c
170
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
171
}
172
cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
173
174
+ /* Check whether userspace can specify guest syndrome value */
175
+ kvm_arm_init_serror_injection(cs);
176
+
177
return kvm_arm_init_cpreg_list(cpu);
178
}
179
180
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
181
return ret;
182
}
183
184
+ ret = kvm_put_vcpu_events(cpu);
185
+ if (ret) {
186
+ return ret;
187
+ }
188
+
189
/* Note that we do not call write_cpustate_to_list()
190
* here, so we are only writing the tuple list back to
191
* KVM. This is safe because nothing can change the
192
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
193
}
194
vfp_set_fpscr(env, fpscr);
195
196
+ ret = kvm_get_vcpu_events(cpu);
197
+ if (ret) {
198
+ return ret;
199
+ }
200
+
201
if (!write_kvmstate_to_list(cpu)) {
202
return EINVAL;
203
}
204
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/kvm64.c
207
+++ b/target/arm/kvm64.c
208
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
209
210
kvm_arm_init_debug(cs);
211
212
+ /* Check whether user space can specify guest syndrome value */
213
+ kvm_arm_init_serror_injection(cs);
214
+
215
return kvm_arm_init_cpreg_list(cpu);
216
}
217
218
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
219
return ret;
220
}
221
222
+ ret = kvm_put_vcpu_events(cpu);
223
+ if (ret) {
224
+ return ret;
225
+ }
226
+
227
if (!write_list_to_kvmstate(cpu, level)) {
228
return EINVAL;
229
}
230
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
231
}
232
vfp_set_fpcr(env, fpr);
233
234
+ ret = kvm_get_vcpu_events(cpu);
235
+ if (ret) {
236
+ return ret;
237
+ }
238
+
239
if (!write_kvmstate_to_list(cpu)) {
240
return EINVAL;
241
}
242
diff --git a/target/arm/machine.c b/target/arm/machine.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/target/arm/machine.c
245
+++ b/target/arm/machine.c
246
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sve = {
247
};
248
#endif /* AARCH64 */
249
250
+static bool serror_needed(void *opaque)
251
+{
252
+ ARMCPU *cpu = opaque;
253
+ CPUARMState *env = &cpu->env;
254
+
255
+ return env->serror.pending != 0;
256
+}
257
+
258
+static const VMStateDescription vmstate_serror = {
259
+ .name = "cpu/serror",
260
+ .version_id = 1,
261
+ .minimum_version_id = 1,
262
+ .needed = serror_needed,
263
+ .fields = (VMStateField[]) {
264
+ VMSTATE_UINT8(env.serror.pending, ARMCPU),
265
+ VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
266
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
267
+ VMSTATE_END_OF_LIST()
268
+ }
269
+};
270
+
271
static bool m_needed(void *opaque)
272
{
273
ARMCPU *cpu = opaque;
274
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_arm_cpu = {
275
#ifdef TARGET_AARCH64
276
&vmstate_sve,
277
#endif
278
+ &vmstate_serror,
279
NULL
280
}
281
};
44
--
282
--
45
2.19.0
283
2.19.1
46
284
47
285
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We can choose the endianness at translation time, rather than
3
Create struct ARMISARegisters, to be accessed during translation.
4
re-computing it at execution time.
5
4
6
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-11-richard.henderson@linaro.org
6
Message-id: 20181016223115.24100-2-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
target/arm/helper-sve.h | 117 +++++++++++++++-------
10
target/arm/cpu.h | 32 ++++----
13
target/arm/sve_helper.c | 70 ++++++-------
11
hw/intc/armv7m_nvic.c | 12 +--
14
target/arm/translate-sve.c | 196 +++++++++++++++++++++++++------------
12
target/arm/cpu.c | 178 +++++++++++++++++++++---------------------
15
3 files changed, 252 insertions(+), 131 deletions(-)
13
target/arm/cpu64.c | 70 ++++++++---------
14
target/arm/helper.c | 28 +++----
15
5 files changed, 162 insertions(+), 158 deletions(-)
16
16
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/helper-sve.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
21
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
22
DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
22
* ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
23
DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
23
* is used for reset values of non-constant registers; no reset_
24
24
* prefix means a constant register.
25
-DEF_HELPER_FLAGS_4(sve_ld1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
25
+ * Some of these registers are split out into a substructure that
26
-DEF_HELPER_FLAGS_4(sve_ld2hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
26
+ * is shared with the translators to control the ISA.
27
-DEF_HELPER_FLAGS_4(sve_ld3hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
27
*/
28
-DEF_HELPER_FLAGS_4(sve_ld4hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
28
+ struct ARMISARegisters {
29
+DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
29
+ uint32_t id_isar0;
30
+DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
30
+ uint32_t id_isar1;
31
+DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
31
+ uint32_t id_isar2;
32
+DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
32
+ uint32_t id_isar3;
33
33
+ uint32_t id_isar4;
34
-DEF_HELPER_FLAGS_4(sve_ld1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
34
+ uint32_t id_isar5;
35
-DEF_HELPER_FLAGS_4(sve_ld2ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
35
+ uint32_t id_isar6;
36
-DEF_HELPER_FLAGS_4(sve_ld3ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
36
+ uint32_t mvfr0;
37
-DEF_HELPER_FLAGS_4(sve_ld4ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
37
+ uint32_t mvfr1;
38
+DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
38
+ uint32_t mvfr2;
39
+DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
39
+ uint64_t id_aa64isar0;
40
+DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
40
+ uint64_t id_aa64isar1;
41
+DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
41
+ uint64_t id_aa64pfr0;
42
42
+ uint64_t id_aa64pfr1;
43
-DEF_HELPER_FLAGS_4(sve_ld1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
43
+ } isar;
44
-DEF_HELPER_FLAGS_4(sve_ld2dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
44
uint32_t midr;
45
-DEF_HELPER_FLAGS_4(sve_ld3dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
45
uint32_t revidr;
46
-DEF_HELPER_FLAGS_4(sve_ld4dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
46
uint32_t reset_fpsid;
47
+DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
47
- uint32_t mvfr0;
48
+DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
48
- uint32_t mvfr1;
49
+DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
49
- uint32_t mvfr2;
50
+DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
50
uint32_t ctr;
51
+
51
uint32_t reset_sctlr;
52
+DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
52
uint32_t id_pfr0;
53
+DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
53
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
54
+DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
54
uint32_t id_mmfr2;
55
+DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
55
uint32_t id_mmfr3;
56
+
56
uint32_t id_mmfr4;
57
+DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
57
- uint32_t id_isar0;
58
+DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
58
- uint32_t id_isar1;
59
+DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
59
- uint32_t id_isar2;
60
+DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
60
- uint32_t id_isar3;
61
+
61
- uint32_t id_isar4;
62
+DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
62
- uint32_t id_isar5;
63
+DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
63
- uint32_t id_isar6;
64
+DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
64
- uint64_t id_aa64pfr0;
65
+DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
65
- uint64_t id_aa64pfr1;
66
66
uint64_t id_aa64dfr0;
67
DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
67
uint64_t id_aa64dfr1;
68
DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
68
uint64_t id_aa64afr0;
69
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
69
uint64_t id_aa64afr1;
70
DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
70
- uint64_t id_aa64isar0;
71
DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
71
- uint64_t id_aa64isar1;
72
72
uint64_t id_aa64mmfr0;
73
-DEF_HELPER_FLAGS_4(sve_ld1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
73
uint64_t id_aa64mmfr1;
74
-DEF_HELPER_FLAGS_4(sve_ld1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
74
uint32_t dbgdidr;
75
-DEF_HELPER_FLAGS_4(sve_ld1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
75
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
76
-DEF_HELPER_FLAGS_4(sve_ld1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
77
+DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
81
82
-DEF_HELPER_FLAGS_4(sve_ld1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
83
-DEF_HELPER_FLAGS_4(sve_ld1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
84
+DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
85
+DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
86
+DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
87
+DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
88
+
89
+DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
90
+DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
91
+
92
+DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
93
+DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
94
95
DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
96
DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
97
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
98
DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
99
DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
100
101
-DEF_HELPER_FLAGS_4(sve_ldff1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
102
-DEF_HELPER_FLAGS_4(sve_ldff1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
103
-DEF_HELPER_FLAGS_4(sve_ldff1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
104
-DEF_HELPER_FLAGS_4(sve_ldff1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
105
-DEF_HELPER_FLAGS_4(sve_ldff1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
107
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
108
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
109
+DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
110
+DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
111
112
-DEF_HELPER_FLAGS_4(sve_ldff1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
113
-DEF_HELPER_FLAGS_4(sve_ldff1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
114
-DEF_HELPER_FLAGS_4(sve_ldff1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
115
+DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
116
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
117
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
118
+DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
119
+DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
120
121
-DEF_HELPER_FLAGS_4(sve_ldff1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
122
+DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
123
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
124
+DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
125
+
126
+DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
127
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
128
+DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
129
+
130
+DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
131
+DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
132
133
DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
134
DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
135
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
136
DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
137
DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
138
139
-DEF_HELPER_FLAGS_4(sve_ldnf1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
140
-DEF_HELPER_FLAGS_4(sve_ldnf1hsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
141
-DEF_HELPER_FLAGS_4(sve_ldnf1hdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
142
-DEF_HELPER_FLAGS_4(sve_ldnf1hss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
143
-DEF_HELPER_FLAGS_4(sve_ldnf1hds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
144
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
145
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
146
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
147
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
148
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
149
150
-DEF_HELPER_FLAGS_4(sve_ldnf1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
151
-DEF_HELPER_FLAGS_4(sve_ldnf1sdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
152
-DEF_HELPER_FLAGS_4(sve_ldnf1sds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
153
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
154
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
155
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
156
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
157
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
158
159
-DEF_HELPER_FLAGS_4(sve_ldnf1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
160
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
161
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
162
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
163
+
164
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
165
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
166
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
167
+
168
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
169
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
170
171
DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
172
DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
173
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
174
index XXXXXXX..XXXXXXX 100644
76
index XXXXXXX..XXXXXXX 100644
175
--- a/target/arm/sve_helper.c
77
--- a/hw/intc/armv7m_nvic.c
176
+++ b/target/arm/sve_helper.c
78
+++ b/hw/intc/armv7m_nvic.c
177
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
79
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
178
sve_##NAME##_host, sve_##NAME##_tlb); \
80
case 0xd5c: /* MMFR3. */
81
return cpu->id_mmfr3;
82
case 0xd60: /* ISAR0. */
83
- return cpu->id_isar0;
84
+ return cpu->isar.id_isar0;
85
case 0xd64: /* ISAR1. */
86
- return cpu->id_isar1;
87
+ return cpu->isar.id_isar1;
88
case 0xd68: /* ISAR2. */
89
- return cpu->id_isar2;
90
+ return cpu->isar.id_isar2;
91
case 0xd6c: /* ISAR3. */
92
- return cpu->id_isar3;
93
+ return cpu->isar.id_isar3;
94
case 0xd70: /* ISAR4. */
95
- return cpu->id_isar4;
96
+ return cpu->isar.id_isar4;
97
case 0xd74: /* ISAR5. */
98
- return cpu->id_isar5;
99
+ return cpu->isar.id_isar5;
100
case 0xd78: /* CLIDR */
101
return cpu->clidr;
102
case 0xd7c: /* CTR */
103
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/cpu.c
106
+++ b/target/arm/cpu.c
107
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
108
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
109
110
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
111
- env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
112
- env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
113
- env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
114
+ env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
115
+ env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
116
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
117
118
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
119
s->halted = cpu->start_powered_off;
120
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
121
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
122
*/
123
cpu->id_pfr1 &= ~0xf0;
124
- cpu->id_aa64pfr0 &= ~0xf000;
125
+ cpu->isar.id_aa64pfr0 &= ~0xf000;
126
}
127
128
if (!cpu->has_el2) {
129
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
130
* registers if we don't have EL2. These are id_pfr1[15:12] and
131
* id_aa64pfr0_el1[11:8].
132
*/
133
- cpu->id_aa64pfr0 &= ~0xf00;
134
+ cpu->isar.id_aa64pfr0 &= ~0xf00;
135
cpu->id_pfr1 &= ~0xf000;
136
}
137
138
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
139
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
140
cpu->midr = 0x4107b362;
141
cpu->reset_fpsid = 0x410120b4;
142
- cpu->mvfr0 = 0x11111111;
143
- cpu->mvfr1 = 0x00000000;
144
+ cpu->isar.mvfr0 = 0x11111111;
145
+ cpu->isar.mvfr1 = 0x00000000;
146
cpu->ctr = 0x1dd20d2;
147
cpu->reset_sctlr = 0x00050078;
148
cpu->id_pfr0 = 0x111;
149
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
150
cpu->id_mmfr0 = 0x01130003;
151
cpu->id_mmfr1 = 0x10030302;
152
cpu->id_mmfr2 = 0x01222110;
153
- cpu->id_isar0 = 0x00140011;
154
- cpu->id_isar1 = 0x12002111;
155
- cpu->id_isar2 = 0x11231111;
156
- cpu->id_isar3 = 0x01102131;
157
- cpu->id_isar4 = 0x141;
158
+ cpu->isar.id_isar0 = 0x00140011;
159
+ cpu->isar.id_isar1 = 0x12002111;
160
+ cpu->isar.id_isar2 = 0x11231111;
161
+ cpu->isar.id_isar3 = 0x01102131;
162
+ cpu->isar.id_isar4 = 0x141;
163
cpu->reset_auxcr = 7;
179
}
164
}
180
165
181
-/* TODO: Propagate the endian check back to the translator. */
166
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
182
#define DO_LD1_2(NAME, ESZ, MSZ) \
167
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
183
-void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
168
cpu->midr = 0x4117b363;
184
- target_ulong addr, uint32_t desc) \
169
cpu->reset_fpsid = 0x410120b4;
185
-{ \
170
- cpu->mvfr0 = 0x11111111;
186
- if (arm_cpu_data_is_big_endian(env)) { \
171
- cpu->mvfr1 = 0x00000000;
187
- sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
172
+ cpu->isar.mvfr0 = 0x11111111;
188
- sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
173
+ cpu->isar.mvfr1 = 0x00000000;
189
- } else { \
174
cpu->ctr = 0x1dd20d2;
190
- sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
175
cpu->reset_sctlr = 0x00050078;
191
- sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
176
cpu->id_pfr0 = 0x111;
192
- } \
177
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
193
+void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
178
cpu->id_mmfr0 = 0x01130003;
194
+ target_ulong addr, uint32_t desc) \
179
cpu->id_mmfr1 = 0x10030302;
195
+{ \
180
cpu->id_mmfr2 = 0x01222110;
196
+ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
181
- cpu->id_isar0 = 0x00140011;
197
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
182
- cpu->id_isar1 = 0x12002111;
198
+} \
183
- cpu->id_isar2 = 0x11231111;
199
+void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
184
- cpu->id_isar3 = 0x01102131;
200
+ target_ulong addr, uint32_t desc) \
185
- cpu->id_isar4 = 0x141;
201
+{ \
186
+ cpu->isar.id_isar0 = 0x00140011;
202
+ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
187
+ cpu->isar.id_isar1 = 0x12002111;
203
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
188
+ cpu->isar.id_isar2 = 0x11231111;
189
+ cpu->isar.id_isar3 = 0x01102131;
190
+ cpu->isar.id_isar4 = 0x141;
191
cpu->reset_auxcr = 7;
204
}
192
}
205
193
206
DO_LD1_1(ld1bb, 0)
194
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
207
@@ -XXX,XX +XXX,XX @@ void __attribute__((flatten)) HELPER(sve_ld##N##bb_r) \
195
set_feature(&cpu->env, ARM_FEATURE_EL3);
196
cpu->midr = 0x410fb767;
197
cpu->reset_fpsid = 0x410120b5;
198
- cpu->mvfr0 = 0x11111111;
199
- cpu->mvfr1 = 0x00000000;
200
+ cpu->isar.mvfr0 = 0x11111111;
201
+ cpu->isar.mvfr1 = 0x00000000;
202
cpu->ctr = 0x1dd20d2;
203
cpu->reset_sctlr = 0x00050078;
204
cpu->id_pfr0 = 0x111;
205
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
206
cpu->id_mmfr0 = 0x01130003;
207
cpu->id_mmfr1 = 0x10030302;
208
cpu->id_mmfr2 = 0x01222100;
209
- cpu->id_isar0 = 0x0140011;
210
- cpu->id_isar1 = 0x12002111;
211
- cpu->id_isar2 = 0x11231121;
212
- cpu->id_isar3 = 0x01102131;
213
- cpu->id_isar4 = 0x01141;
214
+ cpu->isar.id_isar0 = 0x0140011;
215
+ cpu->isar.id_isar1 = 0x12002111;
216
+ cpu->isar.id_isar2 = 0x11231121;
217
+ cpu->isar.id_isar3 = 0x01102131;
218
+ cpu->isar.id_isar4 = 0x01141;
219
cpu->reset_auxcr = 7;
208
}
220
}
209
221
210
#define DO_LDN_2(N, SUFF, SIZE) \
222
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
211
-void __attribute__((flatten)) HELPER(sve_ld##N##SUFF##_r) \
223
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
212
+void __attribute__((flatten)) HELPER(sve_ld##N##SUFF##_le_r) \
224
cpu->midr = 0x410fb022;
213
(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
225
cpu->reset_fpsid = 0x410120b4;
214
{ \
226
- cpu->mvfr0 = 0x11111111;
215
sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \
227
- cpu->mvfr1 = 0x00000000;
216
- arm_cpu_data_is_big_endian(env) \
228
+ cpu->isar.mvfr0 = 0x11111111;
217
- ? sve_ld1##SUFF##_be_tlb : sve_ld1##SUFF##_le_tlb); \
229
+ cpu->isar.mvfr1 = 0x00000000;
218
+ sve_ld1##SUFF##_le_tlb); \
230
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
219
+} \
231
cpu->id_pfr0 = 0x111;
220
+void __attribute__((flatten)) HELPER(sve_ld##N##SUFF##_be_r) \
232
cpu->id_pfr1 = 0x1;
221
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
233
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
222
+{ \
234
cpu->id_mmfr0 = 0x01100103;
223
+ sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \
235
cpu->id_mmfr1 = 0x10020302;
224
+ sve_ld1##SUFF##_be_tlb); \
236
cpu->id_mmfr2 = 0x01222000;
237
- cpu->id_isar0 = 0x00100011;
238
- cpu->id_isar1 = 0x12002111;
239
- cpu->id_isar2 = 0x11221011;
240
- cpu->id_isar3 = 0x01102131;
241
- cpu->id_isar4 = 0x141;
242
+ cpu->isar.id_isar0 = 0x00100011;
243
+ cpu->isar.id_isar1 = 0x12002111;
244
+ cpu->isar.id_isar2 = 0x11221011;
245
+ cpu->isar.id_isar3 = 0x01102131;
246
+ cpu->isar.id_isar4 = 0x141;
247
cpu->reset_auxcr = 1;
225
}
248
}
226
249
227
DO_LDN_1(2)
250
@@ -XXX,XX +XXX,XX @@ static void cortex_m3_initfn(Object *obj)
228
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
251
cpu->id_mmfr1 = 0x00000000;
229
sve_ldnf1_r(env, vg, addr, desc, ESZ, 0, sve_ld1##PART##_host); \
252
cpu->id_mmfr2 = 0x00000000;
253
cpu->id_mmfr3 = 0x00000000;
254
- cpu->id_isar0 = 0x01141110;
255
- cpu->id_isar1 = 0x02111000;
256
- cpu->id_isar2 = 0x21112231;
257
- cpu->id_isar3 = 0x01111110;
258
- cpu->id_isar4 = 0x01310102;
259
- cpu->id_isar5 = 0x00000000;
260
- cpu->id_isar6 = 0x00000000;
261
+ cpu->isar.id_isar0 = 0x01141110;
262
+ cpu->isar.id_isar1 = 0x02111000;
263
+ cpu->isar.id_isar2 = 0x21112231;
264
+ cpu->isar.id_isar3 = 0x01111110;
265
+ cpu->isar.id_isar4 = 0x01310102;
266
+ cpu->isar.id_isar5 = 0x00000000;
267
+ cpu->isar.id_isar6 = 0x00000000;
230
}
268
}
231
269
232
-/* TODO: Propagate the endian check back to the translator. */
270
static void cortex_m4_initfn(Object *obj)
233
#define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \
271
@@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj)
234
-void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \
272
cpu->id_mmfr1 = 0x00000000;
235
- target_ulong addr, uint32_t desc) \
273
cpu->id_mmfr2 = 0x00000000;
236
+void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \
274
cpu->id_mmfr3 = 0x00000000;
237
+ target_ulong addr, uint32_t desc) \
275
- cpu->id_isar0 = 0x01141110;
238
{ \
276
- cpu->id_isar1 = 0x02111000;
239
- if (arm_cpu_data_is_big_endian(env)) { \
277
- cpu->id_isar2 = 0x21112231;
240
- sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
278
- cpu->id_isar3 = 0x01111110;
241
- sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
279
- cpu->id_isar4 = 0x01310102;
242
- } else { \
280
- cpu->id_isar5 = 0x00000000;
243
- sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
281
- cpu->id_isar6 = 0x00000000;
244
- sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
282
+ cpu->isar.id_isar0 = 0x01141110;
245
- } \
283
+ cpu->isar.id_isar1 = 0x02111000;
246
+ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
284
+ cpu->isar.id_isar2 = 0x21112231;
247
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
285
+ cpu->isar.id_isar3 = 0x01111110;
248
} \
286
+ cpu->isar.id_isar4 = 0x01310102;
249
-void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
287
+ cpu->isar.id_isar5 = 0x00000000;
250
- target_ulong addr, uint32_t desc) \
288
+ cpu->isar.id_isar6 = 0x00000000;
251
+void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \
252
+ target_ulong addr, uint32_t desc) \
253
{ \
254
- if (arm_cpu_data_is_big_endian(env)) { \
255
- sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, \
256
- sve_ld1##PART##_be_host); \
257
- } else { \
258
- sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, \
259
- sve_ld1##PART##_le_host); \
260
- } \
261
+ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_le_host); \
262
+} \
263
+void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \
264
+ target_ulong addr, uint32_t desc) \
265
+{ \
266
+ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
267
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
268
+} \
269
+void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \
270
+ target_ulong addr, uint32_t desc) \
271
+{ \
272
+ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, sve_ld1##PART##_be_host); \
273
}
289
}
274
290
275
DO_LDFF1_LDNF1_1(bb, 0)
291
static void cortex_m33_initfn(Object *obj)
276
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
292
@@ -XXX,XX +XXX,XX @@ static void cortex_m33_initfn(Object *obj)
293
cpu->id_mmfr1 = 0x00000000;
294
cpu->id_mmfr2 = 0x01000000;
295
cpu->id_mmfr3 = 0x00000000;
296
- cpu->id_isar0 = 0x01101110;
297
- cpu->id_isar1 = 0x02212000;
298
- cpu->id_isar2 = 0x20232232;
299
- cpu->id_isar3 = 0x01111131;
300
- cpu->id_isar4 = 0x01310132;
301
- cpu->id_isar5 = 0x00000000;
302
- cpu->id_isar6 = 0x00000000;
303
+ cpu->isar.id_isar0 = 0x01101110;
304
+ cpu->isar.id_isar1 = 0x02212000;
305
+ cpu->isar.id_isar2 = 0x20232232;
306
+ cpu->isar.id_isar3 = 0x01111131;
307
+ cpu->isar.id_isar4 = 0x01310132;
308
+ cpu->isar.id_isar5 = 0x00000000;
309
+ cpu->isar.id_isar6 = 0x00000000;
310
cpu->clidr = 0x00000000;
311
cpu->ctr = 0x8000c000;
312
}
313
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
314
cpu->id_mmfr1 = 0x00000000;
315
cpu->id_mmfr2 = 0x01200000;
316
cpu->id_mmfr3 = 0x0211;
317
- cpu->id_isar0 = 0x02101111;
318
- cpu->id_isar1 = 0x13112111;
319
- cpu->id_isar2 = 0x21232141;
320
- cpu->id_isar3 = 0x01112131;
321
- cpu->id_isar4 = 0x0010142;
322
- cpu->id_isar5 = 0x0;
323
- cpu->id_isar6 = 0x0;
324
+ cpu->isar.id_isar0 = 0x02101111;
325
+ cpu->isar.id_isar1 = 0x13112111;
326
+ cpu->isar.id_isar2 = 0x21232141;
327
+ cpu->isar.id_isar3 = 0x01112131;
328
+ cpu->isar.id_isar4 = 0x0010142;
329
+ cpu->isar.id_isar5 = 0x0;
330
+ cpu->isar.id_isar6 = 0x0;
331
cpu->mp_is_up = true;
332
cpu->pmsav7_dregion = 16;
333
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
334
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
335
set_feature(&cpu->env, ARM_FEATURE_EL3);
336
cpu->midr = 0x410fc080;
337
cpu->reset_fpsid = 0x410330c0;
338
- cpu->mvfr0 = 0x11110222;
339
- cpu->mvfr1 = 0x00011111;
340
+ cpu->isar.mvfr0 = 0x11110222;
341
+ cpu->isar.mvfr1 = 0x00011111;
342
cpu->ctr = 0x82048004;
343
cpu->reset_sctlr = 0x00c50078;
344
cpu->id_pfr0 = 0x1031;
345
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
346
cpu->id_mmfr1 = 0x20000000;
347
cpu->id_mmfr2 = 0x01202000;
348
cpu->id_mmfr3 = 0x11;
349
- cpu->id_isar0 = 0x00101111;
350
- cpu->id_isar1 = 0x12112111;
351
- cpu->id_isar2 = 0x21232031;
352
- cpu->id_isar3 = 0x11112131;
353
- cpu->id_isar4 = 0x00111142;
354
+ cpu->isar.id_isar0 = 0x00101111;
355
+ cpu->isar.id_isar1 = 0x12112111;
356
+ cpu->isar.id_isar2 = 0x21232031;
357
+ cpu->isar.id_isar3 = 0x11112131;
358
+ cpu->isar.id_isar4 = 0x00111142;
359
cpu->dbgdidr = 0x15141000;
360
cpu->clidr = (1 << 27) | (2 << 24) | 3;
361
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
362
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
363
set_feature(&cpu->env, ARM_FEATURE_CBAR);
364
cpu->midr = 0x410fc090;
365
cpu->reset_fpsid = 0x41033090;
366
- cpu->mvfr0 = 0x11110222;
367
- cpu->mvfr1 = 0x01111111;
368
+ cpu->isar.mvfr0 = 0x11110222;
369
+ cpu->isar.mvfr1 = 0x01111111;
370
cpu->ctr = 0x80038003;
371
cpu->reset_sctlr = 0x00c50078;
372
cpu->id_pfr0 = 0x1031;
373
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
374
cpu->id_mmfr1 = 0x20000000;
375
cpu->id_mmfr2 = 0x01230000;
376
cpu->id_mmfr3 = 0x00002111;
377
- cpu->id_isar0 = 0x00101111;
378
- cpu->id_isar1 = 0x13112111;
379
- cpu->id_isar2 = 0x21232041;
380
- cpu->id_isar3 = 0x11112131;
381
- cpu->id_isar4 = 0x00111142;
382
+ cpu->isar.id_isar0 = 0x00101111;
383
+ cpu->isar.id_isar1 = 0x13112111;
384
+ cpu->isar.id_isar2 = 0x21232041;
385
+ cpu->isar.id_isar3 = 0x11112131;
386
+ cpu->isar.id_isar4 = 0x00111142;
387
cpu->dbgdidr = 0x35141000;
388
cpu->clidr = (1 << 27) | (1 << 24) | 3;
389
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
390
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
391
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
392
cpu->midr = 0x410fc075;
393
cpu->reset_fpsid = 0x41023075;
394
- cpu->mvfr0 = 0x10110222;
395
- cpu->mvfr1 = 0x11111111;
396
+ cpu->isar.mvfr0 = 0x10110222;
397
+ cpu->isar.mvfr1 = 0x11111111;
398
cpu->ctr = 0x84448003;
399
cpu->reset_sctlr = 0x00c50078;
400
cpu->id_pfr0 = 0x00001131;
401
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
402
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
403
* table 4-41 gives 0x02101110, which includes the arm div insns.
404
*/
405
- cpu->id_isar0 = 0x02101110;
406
- cpu->id_isar1 = 0x13112111;
407
- cpu->id_isar2 = 0x21232041;
408
- cpu->id_isar3 = 0x11112131;
409
- cpu->id_isar4 = 0x10011142;
410
+ cpu->isar.id_isar0 = 0x02101110;
411
+ cpu->isar.id_isar1 = 0x13112111;
412
+ cpu->isar.id_isar2 = 0x21232041;
413
+ cpu->isar.id_isar3 = 0x11112131;
414
+ cpu->isar.id_isar4 = 0x10011142;
415
cpu->dbgdidr = 0x3515f005;
416
cpu->clidr = 0x0a200023;
417
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
418
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
419
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
420
cpu->midr = 0x412fc0f1;
421
cpu->reset_fpsid = 0x410430f0;
422
- cpu->mvfr0 = 0x10110222;
423
- cpu->mvfr1 = 0x11111111;
424
+ cpu->isar.mvfr0 = 0x10110222;
425
+ cpu->isar.mvfr1 = 0x11111111;
426
cpu->ctr = 0x8444c004;
427
cpu->reset_sctlr = 0x00c50078;
428
cpu->id_pfr0 = 0x00001131;
429
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
430
cpu->id_mmfr1 = 0x20000000;
431
cpu->id_mmfr2 = 0x01240000;
432
cpu->id_mmfr3 = 0x02102211;
433
- cpu->id_isar0 = 0x02101110;
434
- cpu->id_isar1 = 0x13112111;
435
- cpu->id_isar2 = 0x21232041;
436
- cpu->id_isar3 = 0x11112131;
437
- cpu->id_isar4 = 0x10011142;
438
+ cpu->isar.id_isar0 = 0x02101110;
439
+ cpu->isar.id_isar1 = 0x13112111;
440
+ cpu->isar.id_isar2 = 0x21232041;
441
+ cpu->isar.id_isar3 = 0x11112131;
442
+ cpu->isar.id_isar4 = 0x10011142;
443
cpu->dbgdidr = 0x3515f021;
444
cpu->clidr = 0x0a200023;
445
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
446
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
277
index XXXXXXX..XXXXXXX 100644
447
index XXXXXXX..XXXXXXX 100644
278
--- a/target/arm/translate-sve.c
448
--- a/target/arm/cpu64.c
279
+++ b/target/arm/translate-sve.c
449
+++ b/target/arm/cpu64.c
280
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
450
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
281
static void do_ld_zpa(DisasContext *s, int zt, int pg,
451
cpu->midr = 0x411fd070;
282
TCGv_i64 addr, int dtype, int nreg)
452
cpu->revidr = 0x00000000;
453
cpu->reset_fpsid = 0x41034070;
454
- cpu->mvfr0 = 0x10110222;
455
- cpu->mvfr1 = 0x12111111;
456
- cpu->mvfr2 = 0x00000043;
457
+ cpu->isar.mvfr0 = 0x10110222;
458
+ cpu->isar.mvfr1 = 0x12111111;
459
+ cpu->isar.mvfr2 = 0x00000043;
460
cpu->ctr = 0x8444c004;
461
cpu->reset_sctlr = 0x00c50838;
462
cpu->id_pfr0 = 0x00000131;
463
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
464
cpu->id_mmfr1 = 0x40000000;
465
cpu->id_mmfr2 = 0x01260000;
466
cpu->id_mmfr3 = 0x02102211;
467
- cpu->id_isar0 = 0x02101110;
468
- cpu->id_isar1 = 0x13112111;
469
- cpu->id_isar2 = 0x21232042;
470
- cpu->id_isar3 = 0x01112131;
471
- cpu->id_isar4 = 0x00011142;
472
- cpu->id_isar5 = 0x00011121;
473
- cpu->id_isar6 = 0;
474
- cpu->id_aa64pfr0 = 0x00002222;
475
+ cpu->isar.id_isar0 = 0x02101110;
476
+ cpu->isar.id_isar1 = 0x13112111;
477
+ cpu->isar.id_isar2 = 0x21232042;
478
+ cpu->isar.id_isar3 = 0x01112131;
479
+ cpu->isar.id_isar4 = 0x00011142;
480
+ cpu->isar.id_isar5 = 0x00011121;
481
+ cpu->isar.id_isar6 = 0;
482
+ cpu->isar.id_aa64pfr0 = 0x00002222;
483
cpu->id_aa64dfr0 = 0x10305106;
484
cpu->pmceid0 = 0x00000000;
485
cpu->pmceid1 = 0x00000000;
486
- cpu->id_aa64isar0 = 0x00011120;
487
+ cpu->isar.id_aa64isar0 = 0x00011120;
488
cpu->id_aa64mmfr0 = 0x00001124;
489
cpu->dbgdidr = 0x3516d000;
490
cpu->clidr = 0x0a200023;
491
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
492
cpu->midr = 0x410fd034;
493
cpu->revidr = 0x00000000;
494
cpu->reset_fpsid = 0x41034070;
495
- cpu->mvfr0 = 0x10110222;
496
- cpu->mvfr1 = 0x12111111;
497
- cpu->mvfr2 = 0x00000043;
498
+ cpu->isar.mvfr0 = 0x10110222;
499
+ cpu->isar.mvfr1 = 0x12111111;
500
+ cpu->isar.mvfr2 = 0x00000043;
501
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
502
cpu->reset_sctlr = 0x00c50838;
503
cpu->id_pfr0 = 0x00000131;
504
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
505
cpu->id_mmfr1 = 0x40000000;
506
cpu->id_mmfr2 = 0x01260000;
507
cpu->id_mmfr3 = 0x02102211;
508
- cpu->id_isar0 = 0x02101110;
509
- cpu->id_isar1 = 0x13112111;
510
- cpu->id_isar2 = 0x21232042;
511
- cpu->id_isar3 = 0x01112131;
512
- cpu->id_isar4 = 0x00011142;
513
- cpu->id_isar5 = 0x00011121;
514
- cpu->id_isar6 = 0;
515
- cpu->id_aa64pfr0 = 0x00002222;
516
+ cpu->isar.id_isar0 = 0x02101110;
517
+ cpu->isar.id_isar1 = 0x13112111;
518
+ cpu->isar.id_isar2 = 0x21232042;
519
+ cpu->isar.id_isar3 = 0x01112131;
520
+ cpu->isar.id_isar4 = 0x00011142;
521
+ cpu->isar.id_isar5 = 0x00011121;
522
+ cpu->isar.id_isar6 = 0;
523
+ cpu->isar.id_aa64pfr0 = 0x00002222;
524
cpu->id_aa64dfr0 = 0x10305106;
525
- cpu->id_aa64isar0 = 0x00011120;
526
+ cpu->isar.id_aa64isar0 = 0x00011120;
527
cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
528
cpu->dbgdidr = 0x3516d000;
529
cpu->clidr = 0x0a200023;
530
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
531
cpu->midr = 0x410fd083;
532
cpu->revidr = 0x00000000;
533
cpu->reset_fpsid = 0x41034080;
534
- cpu->mvfr0 = 0x10110222;
535
- cpu->mvfr1 = 0x12111111;
536
- cpu->mvfr2 = 0x00000043;
537
+ cpu->isar.mvfr0 = 0x10110222;
538
+ cpu->isar.mvfr1 = 0x12111111;
539
+ cpu->isar.mvfr2 = 0x00000043;
540
cpu->ctr = 0x8444c004;
541
cpu->reset_sctlr = 0x00c50838;
542
cpu->id_pfr0 = 0x00000131;
543
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
544
cpu->id_mmfr1 = 0x40000000;
545
cpu->id_mmfr2 = 0x01260000;
546
cpu->id_mmfr3 = 0x02102211;
547
- cpu->id_isar0 = 0x02101110;
548
- cpu->id_isar1 = 0x13112111;
549
- cpu->id_isar2 = 0x21232042;
550
- cpu->id_isar3 = 0x01112131;
551
- cpu->id_isar4 = 0x00011142;
552
- cpu->id_isar5 = 0x00011121;
553
- cpu->id_aa64pfr0 = 0x00002222;
554
+ cpu->isar.id_isar0 = 0x02101110;
555
+ cpu->isar.id_isar1 = 0x13112111;
556
+ cpu->isar.id_isar2 = 0x21232042;
557
+ cpu->isar.id_isar3 = 0x01112131;
558
+ cpu->isar.id_isar4 = 0x00011142;
559
+ cpu->isar.id_isar5 = 0x00011121;
560
+ cpu->isar.id_aa64pfr0 = 0x00002222;
561
cpu->id_aa64dfr0 = 0x10305106;
562
cpu->pmceid0 = 0x00000000;
563
cpu->pmceid1 = 0x00000000;
564
- cpu->id_aa64isar0 = 0x00011120;
565
+ cpu->isar.id_aa64isar0 = 0x00011120;
566
cpu->id_aa64mmfr0 = 0x00001124;
567
cpu->dbgdidr = 0x3516d000;
568
cpu->clidr = 0x0a200023;
569
diff --git a/target/arm/helper.c b/target/arm/helper.c
570
index XXXXXXX..XXXXXXX 100644
571
--- a/target/arm/helper.c
572
+++ b/target/arm/helper.c
573
@@ -XXX,XX +XXX,XX @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
574
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
283
{
575
{
284
- static gen_helper_gvec_mem * const fns[16][4] = {
576
ARMCPU *cpu = arm_env_get_cpu(env);
285
- { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
577
- uint64_t pfr0 = cpu->id_aa64pfr0;
286
- gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
578
+ uint64_t pfr0 = cpu->isar.id_aa64pfr0;
287
- { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
579
288
- { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
580
if (env->gicv3state) {
289
- { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
581
pfr0 |= 1 << 24;
290
+ static gen_helper_gvec_mem * const fns[2][16][4] = {
582
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
291
+ /* Little-endian */
583
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
292
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
584
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
293
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
585
.access = PL1_R, .type = ARM_CP_CONST,
294
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
586
- .resetvalue = cpu->id_isar0 },
295
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
587
+ .resetvalue = cpu->isar.id_isar0 },
296
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
588
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
297
589
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
298
- { gen_helper_sve_ld1sds_r, NULL, NULL, NULL },
590
.access = PL1_R, .type = ARM_CP_CONST,
299
- { gen_helper_sve_ld1hh_r, gen_helper_sve_ld2hh_r,
591
- .resetvalue = cpu->id_isar1 },
300
- gen_helper_sve_ld3hh_r, gen_helper_sve_ld4hh_r },
592
+ .resetvalue = cpu->isar.id_isar1 },
301
- { gen_helper_sve_ld1hsu_r, NULL, NULL, NULL },
593
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
302
- { gen_helper_sve_ld1hdu_r, NULL, NULL, NULL },
594
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
303
+ { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
595
.access = PL1_R, .type = ARM_CP_CONST,
304
+ { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
596
- .resetvalue = cpu->id_isar2 },
305
+ gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
597
+ .resetvalue = cpu->isar.id_isar2 },
306
+ { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
598
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
307
+ { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
599
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
308
600
.access = PL1_R, .type = ARM_CP_CONST,
309
- { gen_helper_sve_ld1hds_r, NULL, NULL, NULL },
601
- .resetvalue = cpu->id_isar3 },
310
- { gen_helper_sve_ld1hss_r, NULL, NULL, NULL },
602
+ .resetvalue = cpu->isar.id_isar3 },
311
- { gen_helper_sve_ld1ss_r, gen_helper_sve_ld2ss_r,
603
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
312
- gen_helper_sve_ld3ss_r, gen_helper_sve_ld4ss_r },
604
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
313
- { gen_helper_sve_ld1sdu_r, NULL, NULL, NULL },
605
.access = PL1_R, .type = ARM_CP_CONST,
314
+ { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
606
- .resetvalue = cpu->id_isar4 },
315
+ { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
607
+ .resetvalue = cpu->isar.id_isar4 },
316
+ { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
608
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
317
+ gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
609
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
318
+ { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
610
.access = PL1_R, .type = ARM_CP_CONST,
319
611
- .resetvalue = cpu->id_isar5 },
320
- { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
612
+ .resetvalue = cpu->isar.id_isar5 },
321
- { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
613
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
322
- { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
614
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
323
- { gen_helper_sve_ld1dd_r, gen_helper_sve_ld2dd_r,
615
.access = PL1_R, .type = ARM_CP_CONST,
324
- gen_helper_sve_ld3dd_r, gen_helper_sve_ld4dd_r },
616
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
325
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
617
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
326
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
618
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
327
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
619
.access = PL1_R, .type = ARM_CP_CONST,
328
+ { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
620
- .resetvalue = cpu->id_isar6 },
329
+ gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
621
+ .resetvalue = cpu->isar.id_isar6 },
330
+
622
REGINFO_SENTINEL
331
+ /* Big-endian */
623
};
332
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
624
define_arm_cp_regs(cpu, v6_idregs);
333
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
625
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
334
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
626
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
335
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
627
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
336
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
628
.access = PL1_R, .type = ARM_CP_CONST,
337
+
629
- .resetvalue = cpu->id_aa64pfr1},
338
+ { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
630
+ .resetvalue = cpu->isar.id_aa64pfr1},
339
+ { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
631
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
340
+ gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
632
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
341
+ { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
633
.access = PL1_R, .type = ARM_CP_CONST,
342
+ { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
634
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
343
+
635
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
344
+ { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
636
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
345
+ { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
637
.access = PL1_R, .type = ARM_CP_CONST,
346
+ { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
638
- .resetvalue = cpu->id_aa64isar0 },
347
+ gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
639
+ .resetvalue = cpu->isar.id_aa64isar0 },
348
+ { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
640
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
349
+
641
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
350
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
642
.access = PL1_R, .type = ARM_CP_CONST,
351
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
643
- .resetvalue = cpu->id_aa64isar1 },
352
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
644
+ .resetvalue = cpu->isar.id_aa64isar1 },
353
+ { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
645
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
354
+ gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } }
646
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
355
};
647
.access = PL1_R, .type = ARM_CP_CONST,
356
- gen_helper_gvec_mem *fn = fns[dtype][nreg];
648
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
357
+ gen_helper_gvec_mem *fn = fns[s->be_data == MO_BE][dtype][nreg];
649
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
358
650
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
359
/* While there are holes in the table, they are not
651
.access = PL1_R, .type = ARM_CP_CONST,
360
* accessible via the instruction encoding.
652
- .resetvalue = cpu->mvfr0 },
361
@@ -XXX,XX +XXX,XX @@ static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
653
+ .resetvalue = cpu->isar.mvfr0 },
362
654
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
363
static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
655
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
364
{
656
.access = PL1_R, .type = ARM_CP_CONST,
365
- static gen_helper_gvec_mem * const fns[16] = {
657
- .resetvalue = cpu->mvfr1 },
366
- gen_helper_sve_ldff1bb_r,
658
+ .resetvalue = cpu->isar.mvfr1 },
367
- gen_helper_sve_ldff1bhu_r,
659
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
368
- gen_helper_sve_ldff1bsu_r,
660
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
369
- gen_helper_sve_ldff1bdu_r,
661
.access = PL1_R, .type = ARM_CP_CONST,
370
+ static gen_helper_gvec_mem * const fns[2][16] = {
662
- .resetvalue = cpu->mvfr2 },
371
+ /* Little-endian */
663
+ .resetvalue = cpu->isar.mvfr2 },
372
+ { gen_helper_sve_ldff1bb_r,
664
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
373
+ gen_helper_sve_ldff1bhu_r,
665
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
374
+ gen_helper_sve_ldff1bsu_r,
666
.access = PL1_R, .type = ARM_CP_CONST,
375
+ gen_helper_sve_ldff1bdu_r,
376
377
- gen_helper_sve_ldff1sds_r,
378
- gen_helper_sve_ldff1hh_r,
379
- gen_helper_sve_ldff1hsu_r,
380
- gen_helper_sve_ldff1hdu_r,
381
+ gen_helper_sve_ldff1sds_le_r,
382
+ gen_helper_sve_ldff1hh_le_r,
383
+ gen_helper_sve_ldff1hsu_le_r,
384
+ gen_helper_sve_ldff1hdu_le_r,
385
386
- gen_helper_sve_ldff1hds_r,
387
- gen_helper_sve_ldff1hss_r,
388
- gen_helper_sve_ldff1ss_r,
389
- gen_helper_sve_ldff1sdu_r,
390
+ gen_helper_sve_ldff1hds_le_r,
391
+ gen_helper_sve_ldff1hss_le_r,
392
+ gen_helper_sve_ldff1ss_le_r,
393
+ gen_helper_sve_ldff1sdu_le_r,
394
395
- gen_helper_sve_ldff1bds_r,
396
- gen_helper_sve_ldff1bss_r,
397
- gen_helper_sve_ldff1bhs_r,
398
- gen_helper_sve_ldff1dd_r,
399
+ gen_helper_sve_ldff1bds_r,
400
+ gen_helper_sve_ldff1bss_r,
401
+ gen_helper_sve_ldff1bhs_r,
402
+ gen_helper_sve_ldff1dd_le_r },
403
+
404
+ /* Big-endian */
405
+ { gen_helper_sve_ldff1bb_r,
406
+ gen_helper_sve_ldff1bhu_r,
407
+ gen_helper_sve_ldff1bsu_r,
408
+ gen_helper_sve_ldff1bdu_r,
409
+
410
+ gen_helper_sve_ldff1sds_be_r,
411
+ gen_helper_sve_ldff1hh_be_r,
412
+ gen_helper_sve_ldff1hsu_be_r,
413
+ gen_helper_sve_ldff1hdu_be_r,
414
+
415
+ gen_helper_sve_ldff1hds_be_r,
416
+ gen_helper_sve_ldff1hss_be_r,
417
+ gen_helper_sve_ldff1ss_be_r,
418
+ gen_helper_sve_ldff1sdu_be_r,
419
+
420
+ gen_helper_sve_ldff1bds_r,
421
+ gen_helper_sve_ldff1bss_r,
422
+ gen_helper_sve_ldff1bhs_r,
423
+ gen_helper_sve_ldff1dd_be_r },
424
};
425
426
if (sve_access_check(s)) {
427
TCGv_i64 addr = new_tmp_a64(s);
428
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
429
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
430
- do_mem_zpa(s, a->rd, a->pg, addr, fns[a->dtype]);
431
+ do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data == MO_BE][a->dtype]);
432
}
433
return true;
434
}
435
436
static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
437
{
438
- static gen_helper_gvec_mem * const fns[16] = {
439
- gen_helper_sve_ldnf1bb_r,
440
- gen_helper_sve_ldnf1bhu_r,
441
- gen_helper_sve_ldnf1bsu_r,
442
- gen_helper_sve_ldnf1bdu_r,
443
+ static gen_helper_gvec_mem * const fns[2][16] = {
444
+ /* Little-endian */
445
+ { gen_helper_sve_ldnf1bb_r,
446
+ gen_helper_sve_ldnf1bhu_r,
447
+ gen_helper_sve_ldnf1bsu_r,
448
+ gen_helper_sve_ldnf1bdu_r,
449
450
- gen_helper_sve_ldnf1sds_r,
451
- gen_helper_sve_ldnf1hh_r,
452
- gen_helper_sve_ldnf1hsu_r,
453
- gen_helper_sve_ldnf1hdu_r,
454
+ gen_helper_sve_ldnf1sds_le_r,
455
+ gen_helper_sve_ldnf1hh_le_r,
456
+ gen_helper_sve_ldnf1hsu_le_r,
457
+ gen_helper_sve_ldnf1hdu_le_r,
458
459
- gen_helper_sve_ldnf1hds_r,
460
- gen_helper_sve_ldnf1hss_r,
461
- gen_helper_sve_ldnf1ss_r,
462
- gen_helper_sve_ldnf1sdu_r,
463
+ gen_helper_sve_ldnf1hds_le_r,
464
+ gen_helper_sve_ldnf1hss_le_r,
465
+ gen_helper_sve_ldnf1ss_le_r,
466
+ gen_helper_sve_ldnf1sdu_le_r,
467
468
- gen_helper_sve_ldnf1bds_r,
469
- gen_helper_sve_ldnf1bss_r,
470
- gen_helper_sve_ldnf1bhs_r,
471
- gen_helper_sve_ldnf1dd_r,
472
+ gen_helper_sve_ldnf1bds_r,
473
+ gen_helper_sve_ldnf1bss_r,
474
+ gen_helper_sve_ldnf1bhs_r,
475
+ gen_helper_sve_ldnf1dd_le_r },
476
+
477
+ /* Big-endian */
478
+ { gen_helper_sve_ldnf1bb_r,
479
+ gen_helper_sve_ldnf1bhu_r,
480
+ gen_helper_sve_ldnf1bsu_r,
481
+ gen_helper_sve_ldnf1bdu_r,
482
+
483
+ gen_helper_sve_ldnf1sds_be_r,
484
+ gen_helper_sve_ldnf1hh_be_r,
485
+ gen_helper_sve_ldnf1hsu_be_r,
486
+ gen_helper_sve_ldnf1hdu_be_r,
487
+
488
+ gen_helper_sve_ldnf1hds_be_r,
489
+ gen_helper_sve_ldnf1hss_be_r,
490
+ gen_helper_sve_ldnf1ss_be_r,
491
+ gen_helper_sve_ldnf1sdu_be_r,
492
+
493
+ gen_helper_sve_ldnf1bds_r,
494
+ gen_helper_sve_ldnf1bss_r,
495
+ gen_helper_sve_ldnf1bhs_r,
496
+ gen_helper_sve_ldnf1dd_be_r },
497
};
498
499
if (sve_access_check(s)) {
500
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
501
TCGv_i64 addr = new_tmp_a64(s);
502
503
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
504
- do_mem_zpa(s, a->rd, a->pg, addr, fns[a->dtype]);
505
+ do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data == MO_BE][a->dtype]);
506
}
507
return true;
508
}
509
510
static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
511
{
512
- static gen_helper_gvec_mem * const fns[4] = {
513
- gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_r,
514
- gen_helper_sve_ld1ss_r, gen_helper_sve_ld1dd_r,
515
+ static gen_helper_gvec_mem * const fns[2][4] = {
516
+ { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_le_r,
517
+ gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld1dd_le_r },
518
+ { gen_helper_sve_ld1bb_r, gen_helper_sve_ld1hh_be_r,
519
+ gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld1dd_be_r },
520
};
521
unsigned vsz = vec_full_reg_size(s);
522
TCGv_ptr t_pg;
523
@@ -XXX,XX +XXX,XX @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
524
t_pg = tcg_temp_new_ptr();
525
tcg_gen_addi_ptr(t_pg, cpu_env, poff);
526
527
- fns[msz](cpu_env, t_pg, addr, desc);
528
+ fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, desc);
529
530
tcg_temp_free_ptr(t_pg);
531
tcg_temp_free_i32(desc);
532
--
667
--
533
2.19.0
668
2.19.1
534
669
535
670
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Instantiating mps2-an505 (cortex-m33) will fail make check when
4
V7VE asserts that ID_ISAR0.Divide includes ARM division. It is
5
also wrong to include ARM_FEATURE_LPAE.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181016223115.24100-3-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.c | 6 +++++-
13
1 file changed, 5 insertions(+), 1 deletion(-)
14
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
20
21
/* Some features automatically imply others: */
22
if (arm_feature(env, ARM_FEATURE_V8)) {
23
- set_feature(env, ARM_FEATURE_V7VE);
24
+ if (arm_feature(env, ARM_FEATURE_M)) {
25
+ set_feature(env, ARM_FEATURE_V7);
26
+ } else {
27
+ set_feature(env, ARM_FEATURE_V7VE);
28
+ }
29
}
30
if (arm_feature(env, ARM_FEATURE_V7VE)) {
31
/* v7 Virtualization Extensions. In real hardware this implies
32
--
33
2.19.1
34
35
diff view generated by jsdifflib
1
The Arm v8M architecture includes hardware stack limit checking.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
When certain instructions update the stack pointer, if the new
3
value of SP is below the limit set in the associated limit register
4
then an exception is taken. Add a TB flag that tracks whether
5
the limit-checking code needs to be emitted.
6
2
3
Most of the v8 extensions are self-contained within the ISAR
4
registers and are not implied by other feature bits, which
5
makes them the easiest to convert.
6
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181016223115.24100-4-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20181002163556.10279-2-peter.maydell@linaro.org
11
---
12
---
12
target/arm/cpu.h | 7 +++++++
13
target/arm/cpu.h | 131 +++++++++++++++++++++++++++++++++----
13
target/arm/translate.h | 1 +
14
target/arm/translate.h | 7 ++
14
target/arm/helper.c | 10 ++++++++++
15
linux-user/elfload.c | 46 ++++++++-----
15
target/arm/translate.c | 1 +
16
target/arm/cpu.c | 27 +++++---
16
4 files changed, 19 insertions(+)
17
target/arm/cpu64.c | 57 +++++++++-------
18
target/arm/translate-a64.c | 101 ++++++++++++++--------------
19
target/arm/translate.c | 36 +++++-----
20
7 files changed, 273 insertions(+), 132 deletions(-)
17
21
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.h
24
--- a/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
22
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
26
@@ -XXX,XX +XXX,XX @@ typedef enum ARMPSCIState {
23
FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
27
PSCI_ON_PENDING = 2
24
FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
28
} ARMPSCIState;
25
FIELD(V7M_CCR, STKALIGN, 9, 1)
29
26
+FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
30
+typedef struct ARMISARegisters ARMISARegisters;
27
FIELD(V7M_CCR, DC, 16, 1)
31
+
28
FIELD(V7M_CCR, IC, 17, 1)
32
/**
29
+FIELD(V7M_CCR, BP, 18, 1)
33
* ARMCPU:
30
34
* @env: #CPUARMState
31
/* V7M SCR bits */
35
@@ -XXX,XX +XXX,XX @@ enum arm_features {
32
FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
36
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
33
@@ -XXX,XX +XXX,XX @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
37
ARM_FEATURE_V8,
34
/* For M profile only, Handler (ie not Thread) mode */
38
ARM_FEATURE_AARCH64, /* supports 64 bit mode */
35
#define ARM_TBFLAG_HANDLER_SHIFT 21
39
- ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
36
#define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT)
40
ARM_FEATURE_CBAR, /* has cp15 CBAR */
37
+/* For M profile only, whether we should generate stack-limit checks */
41
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
38
+#define ARM_TBFLAG_STACKCHECK_SHIFT 22
42
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
39
+#define ARM_TBFLAG_STACKCHECK_MASK (1 << ARM_TBFLAG_STACKCHECK_SHIFT)
43
ARM_FEATURE_EL2, /* has EL2 Virtualization support */
40
44
ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
41
/* Bit usage when in AArch64 state */
45
- ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
42
#define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
46
- ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
43
@@ -XXX,XX +XXX,XX @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
47
- ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
44
(((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
48
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
45
#define ARM_TBFLAG_HANDLER(F) \
49
ARM_FEATURE_PMU, /* has PMU support */
46
(((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT)
50
ARM_FEATURE_VBAR, /* has cp15 VBAR */
47
+#define ARM_TBFLAG_STACKCHECK(F) \
51
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
48
+ (((F) & ARM_TBFLAG_STACKCHECK_MASK) >> ARM_TBFLAG_STACKCHECK_SHIFT)
52
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
49
#define ARM_TBFLAG_TBI0(F) \
53
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
50
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
54
- ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
51
#define ARM_TBFLAG_TBI1(F) \
55
- ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
56
- ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
57
- ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
58
- ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
59
- ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
60
- ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
61
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
62
- ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
63
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
64
};
65
66
@@ -XXX,XX +XXX,XX @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
67
/* Shared between translate-sve.c and sve_helper.c. */
68
extern const uint64_t pred_esz_masks[4];
69
70
+/*
71
+ * 32-bit feature tests via id registers.
72
+ */
73
+static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
74
+{
75
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
76
+}
77
+
78
+static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
79
+{
80
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
81
+}
82
+
83
+static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
84
+{
85
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
86
+}
87
+
88
+static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
89
+{
90
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
91
+}
92
+
93
+static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
94
+{
95
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
96
+}
97
+
98
+static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
99
+{
100
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
101
+}
102
+
103
+static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
104
+{
105
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
106
+}
107
+
108
+static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
109
+{
110
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
111
+}
112
+
113
+/*
114
+ * 64-bit feature tests via id registers.
115
+ */
116
+static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
117
+{
118
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
119
+}
120
+
121
+static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
122
+{
123
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
124
+}
125
+
126
+static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
127
+{
128
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
129
+}
130
+
131
+static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
132
+{
133
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
134
+}
135
+
136
+static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
137
+{
138
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
139
+}
140
+
141
+static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
142
+{
143
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
144
+}
145
+
146
+static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
147
+{
148
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
149
+}
150
+
151
+static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
152
+{
153
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
154
+}
155
+
156
+static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
157
+{
158
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
159
+}
160
+
161
+static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
162
+{
163
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
164
+}
165
+
166
+static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
167
+{
168
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
169
+}
170
+
171
+static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
172
+{
173
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
174
+}
175
+
176
+static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
177
+{
178
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
179
+}
180
+
181
+/*
182
+ * Forward to the above feature tests given an ARMCPU pointer.
183
+ */
184
+#define cpu_isar_feature(name, cpu) \
185
+ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
186
+
187
#endif
52
diff --git a/target/arm/translate.h b/target/arm/translate.h
188
diff --git a/target/arm/translate.h b/target/arm/translate.h
53
index XXXXXXX..XXXXXXX 100644
189
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/translate.h
190
--- a/target/arm/translate.h
55
+++ b/target/arm/translate.h
191
+++ b/target/arm/translate.h
56
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
192
@@ -XXX,XX +XXX,XX @@
57
int vec_stride;
193
/* internal defines */
58
bool v7m_handler_mode;
194
typedef struct DisasContext {
59
bool v8m_secure; /* true if v8M and we're in Secure mode */
195
DisasContextBase base;
60
+ bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
196
+ const ARMISARegisters *isar;
61
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
197
62
* so that top level loop can generate correct syndrome information.
198
target_ulong pc;
63
*/
199
target_ulong page_start;
64
diff --git a/target/arm/helper.c b/target/arm/helper.c
200
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
201
return ret;
202
}
203
204
+/*
205
+ * Forward to the isar_feature_* tests given a DisasContext pointer.
206
+ */
207
+#define dc_isar_feature(name, ctx) \
208
+ ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
209
+
210
#endif /* TARGET_ARM_TRANSLATE_H */
211
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
65
index XXXXXXX..XXXXXXX 100644
212
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/helper.c
213
--- a/linux-user/elfload.c
67
+++ b/target/arm/helper.c
214
+++ b/linux-user/elfload.c
68
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
215
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
69
flags |= ARM_TBFLAG_HANDLER_MASK;
216
/* probe for the extra features */
70
}
217
#define GET_FEATURE(feat, hwcap) \
71
218
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
72
+ /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
219
+
73
+ * suppressing them because the requested execution priority is less than 0.
220
+#define GET_FEATURE_ID(feat, hwcap) \
74
+ */
221
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
75
+ if (arm_feature(env, ARM_FEATURE_V8) &&
222
+
76
+ arm_feature(env, ARM_FEATURE_M) &&
223
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
77
+ !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
224
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
78
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
225
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
79
+ flags |= ARM_TBFLAG_STACKCHECK_MASK;
226
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
80
+ }
227
ARMCPU *cpu = ARM_CPU(thread_cpu);
81
+
228
uint32_t hwcaps = 0;
82
*pflags = flags;
229
83
*cs_base = 0;
230
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
231
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
232
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
233
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
234
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
235
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
236
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
237
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
238
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
239
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
240
return hwcaps;
84
}
241
}
242
243
#undef GET_FEATURE
244
+#undef GET_FEATURE_ID
245
246
#else
247
/* 64 bit ARM definitions */
248
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
249
/* probe for the extra features */
250
#define GET_FEATURE(feat, hwcap) \
251
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
252
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
253
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
254
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
255
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
256
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
257
- GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
258
- GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
259
- GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
260
- GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
261
+#define GET_FEATURE_ID(feat, hwcap) \
262
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
263
+
264
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
265
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
266
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
267
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
268
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
269
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
270
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
271
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
272
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
273
GET_FEATURE(ARM_FEATURE_V8_FP16,
274
ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
275
- GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
276
- GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
277
- GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
278
- GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
279
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
280
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
281
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
282
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
283
GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
284
+
285
#undef GET_FEATURE
286
+#undef GET_FEATURE_ID
287
288
return hwcaps;
289
}
290
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/cpu.c
293
+++ b/target/arm/cpu.c
294
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
295
cortex_a15_initfn(obj);
296
#ifdef CONFIG_USER_ONLY
297
/* We don't set these in system emulation mode for the moment,
298
- * since we don't correctly set the ID registers to advertise them,
299
+ * since we don't correctly set (all of) the ID registers to
300
+ * advertise them.
301
*/
302
set_feature(&cpu->env, ARM_FEATURE_V8);
303
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
304
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
305
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
306
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
307
- set_feature(&cpu->env, ARM_FEATURE_CRC);
308
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
309
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
310
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
311
+ {
312
+ uint32_t t;
313
+
314
+ t = cpu->isar.id_isar5;
315
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
316
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
317
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
318
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
319
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
320
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
321
+ cpu->isar.id_isar5 = t;
322
+
323
+ t = cpu->isar.id_isar6;
324
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
325
+ cpu->isar.id_isar6 = t;
326
+ }
327
#endif
328
}
329
}
330
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/arm/cpu64.c
333
+++ b/target/arm/cpu64.c
334
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
335
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
336
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
337
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
338
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
339
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
340
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
341
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
342
- set_feature(&cpu->env, ARM_FEATURE_CRC);
343
set_feature(&cpu->env, ARM_FEATURE_EL2);
344
set_feature(&cpu->env, ARM_FEATURE_EL3);
345
set_feature(&cpu->env, ARM_FEATURE_PMU);
346
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
347
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
348
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
349
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
350
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
351
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
352
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
353
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
354
- set_feature(&cpu->env, ARM_FEATURE_CRC);
355
set_feature(&cpu->env, ARM_FEATURE_EL2);
356
set_feature(&cpu->env, ARM_FEATURE_EL3);
357
set_feature(&cpu->env, ARM_FEATURE_PMU);
358
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
359
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
360
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
361
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
362
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
363
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
364
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
365
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
366
- set_feature(&cpu->env, ARM_FEATURE_CRC);
367
set_feature(&cpu->env, ARM_FEATURE_EL2);
368
set_feature(&cpu->env, ARM_FEATURE_EL3);
369
set_feature(&cpu->env, ARM_FEATURE_PMU);
370
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
371
if (kvm_enabled()) {
372
kvm_arm_set_cpu_features_from_host(cpu);
373
} else {
374
+ uint64_t t;
375
+ uint32_t u;
376
aarch64_a57_initfn(obj);
377
+
378
+ t = cpu->isar.id_aa64isar0;
379
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
380
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
381
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
382
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
383
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
384
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
385
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
386
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
387
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
388
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
389
+ cpu->isar.id_aa64isar0 = t;
390
+
391
+ t = cpu->isar.id_aa64isar1;
392
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
393
+ cpu->isar.id_aa64isar1 = t;
394
+
395
+ /* Replicate the same data to the 32-bit id registers. */
396
+ u = cpu->isar.id_isar5;
397
+ u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
398
+ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
399
+ u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
400
+ u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
401
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
402
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
403
+ cpu->isar.id_isar5 = u;
404
+
405
+ u = cpu->isar.id_isar6;
406
+ u = FIELD_DP32(u, ID_ISAR6, DP, 1);
407
+ cpu->isar.id_isar6 = u;
408
+
409
#ifdef CONFIG_USER_ONLY
410
/* We don't set these in system emulation mode for the moment,
411
* since we don't correctly set the ID registers to advertise them,
412
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
413
* whereas the architecture requires them to be present in both if
414
* present in either.
415
*/
416
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
417
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
418
- set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
419
- set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
420
- set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
421
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
422
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
423
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
424
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
425
set_feature(&cpu->env, ARM_FEATURE_SVE);
426
/* For usermode -cpu max we can use a larger and more efficient DCZ
427
* blocksize since we don't have to follow what the hardware does.
428
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/arm/translate-a64.c
431
+++ b/target/arm/translate-a64.c
432
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
433
}
434
if (rt2 == 31
435
&& ((rt | rs) & 1) == 0
436
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
437
+ && dc_isar_feature(aa64_atomics, s)) {
438
/* CASP / CASPL */
439
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
440
return;
441
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
442
}
443
if (rt2 == 31
444
&& ((rt | rs) & 1) == 0
445
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
446
+ && dc_isar_feature(aa64_atomics, s)) {
447
/* CASPA / CASPAL */
448
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
449
return;
450
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
451
case 0xb: /* CASL */
452
case 0xe: /* CASA */
453
case 0xf: /* CASAL */
454
- if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
455
+ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
456
gen_compare_and_swap(s, rs, rt, rn, size);
457
return;
458
}
459
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
460
int rs = extract32(insn, 16, 5);
461
int rn = extract32(insn, 5, 5);
462
int o3_opc = extract32(insn, 12, 4);
463
- int feature = ARM_FEATURE_V8_ATOMICS;
464
TCGv_i64 tcg_rn, tcg_rs;
465
AtomicThreeOpFn *fn;
466
467
- if (is_vector) {
468
+ if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
469
unallocated_encoding(s);
470
return;
471
}
472
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
473
unallocated_encoding(s);
474
return;
475
}
476
- if (!arm_dc_feature(s, feature)) {
477
- unallocated_encoding(s);
478
- return;
479
- }
480
481
if (rn == 31) {
482
gen_check_sp_alignment(s);
483
@@ -XXX,XX +XXX,XX @@ static void handle_crc32(DisasContext *s,
484
TCGv_i64 tcg_acc, tcg_val;
485
TCGv_i32 tcg_bytes;
486
487
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)
488
+ if (!dc_isar_feature(aa64_crc32, s)
489
|| (sf == 1 && sz != 3)
490
|| (sf == 0 && sz == 3)) {
491
unallocated_encoding(s);
492
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
493
bool u = extract32(insn, 29, 1);
494
TCGv_i32 ele1, ele2, ele3;
495
TCGv_i64 res;
496
- int feature;
497
+ bool feature;
498
499
switch (u * 16 + opcode) {
500
case 0x10: /* SQRDMLAH (vector) */
501
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
502
unallocated_encoding(s);
503
return;
504
}
505
- feature = ARM_FEATURE_V8_RDM;
506
+ feature = dc_isar_feature(aa64_rdm, s);
507
break;
508
default:
509
unallocated_encoding(s);
510
return;
511
}
512
- if (!arm_dc_feature(s, feature)) {
513
+ if (!feature) {
514
unallocated_encoding(s);
515
return;
516
}
517
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
518
return;
519
}
520
if (size == 3) {
521
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
522
+ if (!dc_isar_feature(aa64_pmull, s)) {
523
unallocated_encoding(s);
524
return;
525
}
526
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
527
int size = extract32(insn, 22, 2);
528
bool u = extract32(insn, 29, 1);
529
bool is_q = extract32(insn, 30, 1);
530
- int feature, rot;
531
+ bool feature;
532
+ int rot;
533
534
switch (u * 16 + opcode) {
535
case 0x10: /* SQRDMLAH (vector) */
536
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
537
unallocated_encoding(s);
538
return;
539
}
540
- feature = ARM_FEATURE_V8_RDM;
541
+ feature = dc_isar_feature(aa64_rdm, s);
542
break;
543
case 0x02: /* SDOT (vector) */
544
case 0x12: /* UDOT (vector) */
545
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
546
unallocated_encoding(s);
547
return;
548
}
549
- feature = ARM_FEATURE_V8_DOTPROD;
550
+ feature = dc_isar_feature(aa64_dp, s);
551
break;
552
case 0x18: /* FCMLA, #0 */
553
case 0x19: /* FCMLA, #90 */
554
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
555
unallocated_encoding(s);
556
return;
557
}
558
- feature = ARM_FEATURE_V8_FCMA;
559
+ feature = dc_isar_feature(aa64_fcma, s);
560
break;
561
default:
562
unallocated_encoding(s);
563
return;
564
}
565
- if (!arm_dc_feature(s, feature)) {
566
+ if (!feature) {
567
unallocated_encoding(s);
568
return;
569
}
570
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
571
break;
572
case 0x1d: /* SQRDMLAH */
573
case 0x1f: /* SQRDMLSH */
574
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
575
+ if (!dc_isar_feature(aa64_rdm, s)) {
576
unallocated_encoding(s);
577
return;
578
}
579
break;
580
case 0x0e: /* SDOT */
581
case 0x1e: /* UDOT */
582
- if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
583
+ if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
584
unallocated_encoding(s);
585
return;
586
}
587
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
588
case 0x13: /* FCMLA #90 */
589
case 0x15: /* FCMLA #180 */
590
case 0x17: /* FCMLA #270 */
591
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
592
+ if (!dc_isar_feature(aa64_fcma, s)) {
593
unallocated_encoding(s);
594
return;
595
}
596
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn)
597
TCGv_i32 tcg_decrypt;
598
CryptoThreeOpIntFn *genfn;
599
600
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
601
- || size != 0) {
602
+ if (!dc_isar_feature(aa64_aes, s) || size != 0) {
603
unallocated_encoding(s);
604
return;
605
}
606
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
607
int rd = extract32(insn, 0, 5);
608
CryptoThreeOpFn *genfn;
609
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
610
- int feature = ARM_FEATURE_V8_SHA256;
611
+ bool feature;
612
613
if (size != 0) {
614
unallocated_encoding(s);
615
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
616
case 2: /* SHA1M */
617
case 3: /* SHA1SU0 */
618
genfn = NULL;
619
- feature = ARM_FEATURE_V8_SHA1;
620
+ feature = dc_isar_feature(aa64_sha1, s);
621
break;
622
case 4: /* SHA256H */
623
genfn = gen_helper_crypto_sha256h;
624
+ feature = dc_isar_feature(aa64_sha256, s);
625
break;
626
case 5: /* SHA256H2 */
627
genfn = gen_helper_crypto_sha256h2;
628
+ feature = dc_isar_feature(aa64_sha256, s);
629
break;
630
case 6: /* SHA256SU1 */
631
genfn = gen_helper_crypto_sha256su1;
632
+ feature = dc_isar_feature(aa64_sha256, s);
633
break;
634
default:
635
unallocated_encoding(s);
636
return;
637
}
638
639
- if (!arm_dc_feature(s, feature)) {
640
+ if (!feature) {
641
unallocated_encoding(s);
642
return;
643
}
644
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
645
int rn = extract32(insn, 5, 5);
646
int rd = extract32(insn, 0, 5);
647
CryptoTwoOpFn *genfn;
648
- int feature;
649
+ bool feature;
650
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
651
652
if (size != 0) {
653
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
654
655
switch (opcode) {
656
case 0: /* SHA1H */
657
- feature = ARM_FEATURE_V8_SHA1;
658
+ feature = dc_isar_feature(aa64_sha1, s);
659
genfn = gen_helper_crypto_sha1h;
660
break;
661
case 1: /* SHA1SU1 */
662
- feature = ARM_FEATURE_V8_SHA1;
663
+ feature = dc_isar_feature(aa64_sha1, s);
664
genfn = gen_helper_crypto_sha1su1;
665
break;
666
case 2: /* SHA256SU0 */
667
- feature = ARM_FEATURE_V8_SHA256;
668
+ feature = dc_isar_feature(aa64_sha256, s);
669
genfn = gen_helper_crypto_sha256su0;
670
break;
671
default:
672
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
673
return;
674
}
675
676
- if (!arm_dc_feature(s, feature)) {
677
+ if (!feature) {
678
unallocated_encoding(s);
679
return;
680
}
681
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
682
int rm = extract32(insn, 16, 5);
683
int rn = extract32(insn, 5, 5);
684
int rd = extract32(insn, 0, 5);
685
- int feature;
686
+ bool feature;
687
CryptoThreeOpFn *genfn;
688
689
if (o == 0) {
690
switch (opcode) {
691
case 0: /* SHA512H */
692
- feature = ARM_FEATURE_V8_SHA512;
693
+ feature = dc_isar_feature(aa64_sha512, s);
694
genfn = gen_helper_crypto_sha512h;
695
break;
696
case 1: /* SHA512H2 */
697
- feature = ARM_FEATURE_V8_SHA512;
698
+ feature = dc_isar_feature(aa64_sha512, s);
699
genfn = gen_helper_crypto_sha512h2;
700
break;
701
case 2: /* SHA512SU1 */
702
- feature = ARM_FEATURE_V8_SHA512;
703
+ feature = dc_isar_feature(aa64_sha512, s);
704
genfn = gen_helper_crypto_sha512su1;
705
break;
706
case 3: /* RAX1 */
707
- feature = ARM_FEATURE_V8_SHA3;
708
+ feature = dc_isar_feature(aa64_sha3, s);
709
genfn = NULL;
710
break;
711
}
712
} else {
713
switch (opcode) {
714
case 0: /* SM3PARTW1 */
715
- feature = ARM_FEATURE_V8_SM3;
716
+ feature = dc_isar_feature(aa64_sm3, s);
717
genfn = gen_helper_crypto_sm3partw1;
718
break;
719
case 1: /* SM3PARTW2 */
720
- feature = ARM_FEATURE_V8_SM3;
721
+ feature = dc_isar_feature(aa64_sm3, s);
722
genfn = gen_helper_crypto_sm3partw2;
723
break;
724
case 2: /* SM4EKEY */
725
- feature = ARM_FEATURE_V8_SM4;
726
+ feature = dc_isar_feature(aa64_sm4, s);
727
genfn = gen_helper_crypto_sm4ekey;
728
break;
729
default:
730
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
731
}
732
}
733
734
- if (!arm_dc_feature(s, feature)) {
735
+ if (!feature) {
736
unallocated_encoding(s);
737
return;
738
}
739
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
740
int rn = extract32(insn, 5, 5);
741
int rd = extract32(insn, 0, 5);
742
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
743
- int feature;
744
+ bool feature;
745
CryptoTwoOpFn *genfn;
746
747
switch (opcode) {
748
case 0: /* SHA512SU0 */
749
- feature = ARM_FEATURE_V8_SHA512;
750
+ feature = dc_isar_feature(aa64_sha512, s);
751
genfn = gen_helper_crypto_sha512su0;
752
break;
753
case 1: /* SM4E */
754
- feature = ARM_FEATURE_V8_SM4;
755
+ feature = dc_isar_feature(aa64_sm4, s);
756
genfn = gen_helper_crypto_sm4e;
757
break;
758
default:
759
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
760
return;
761
}
762
763
- if (!arm_dc_feature(s, feature)) {
764
+ if (!feature) {
765
unallocated_encoding(s);
766
return;
767
}
768
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
769
int ra = extract32(insn, 10, 5);
770
int rn = extract32(insn, 5, 5);
771
int rd = extract32(insn, 0, 5);
772
- int feature;
773
+ bool feature;
774
775
switch (op0) {
776
case 0: /* EOR3 */
777
case 1: /* BCAX */
778
- feature = ARM_FEATURE_V8_SHA3;
779
+ feature = dc_isar_feature(aa64_sha3, s);
780
break;
781
case 2: /* SM3SS1 */
782
- feature = ARM_FEATURE_V8_SM3;
783
+ feature = dc_isar_feature(aa64_sm3, s);
784
break;
785
default:
786
unallocated_encoding(s);
787
return;
788
}
789
790
- if (!arm_dc_feature(s, feature)) {
791
+ if (!feature) {
792
unallocated_encoding(s);
793
return;
794
}
795
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
796
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
797
int pass;
798
799
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
800
+ if (!dc_isar_feature(aa64_sha3, s)) {
801
unallocated_encoding(s);
802
return;
803
}
804
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
805
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
806
TCGv_i32 tcg_imm2, tcg_opcode;
807
808
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
809
+ if (!dc_isar_feature(aa64_sm3, s)) {
810
unallocated_encoding(s);
811
return;
812
}
813
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
814
ARMCPU *arm_cpu = arm_env_get_cpu(env);
815
int bound;
816
817
+ dc->isar = &arm_cpu->isar;
818
dc->pc = dc->base.pc_first;
819
dc->condjmp = 0;
820
85
diff --git a/target/arm/translate.c b/target/arm/translate.c
821
diff --git a/target/arm/translate.c b/target/arm/translate.c
86
index XXXXXXX..XXXXXXX 100644
822
index XXXXXXX..XXXXXXX 100644
87
--- a/target/arm/translate.c
823
--- a/target/arm/translate.c
88
+++ b/target/arm/translate.c
824
+++ b/target/arm/translate.c
825
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_2rm_sizes[] = {
826
static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
827
int q, int rd, int rn, int rm)
828
{
829
- if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
830
+ if (dc_isar_feature(aa32_rdm, s)) {
831
int opr_sz = (1 + q) * 8;
832
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
833
vfp_reg_offset(1, rn),
834
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
835
return 1;
836
}
837
if (!u) { /* SHA-1 */
838
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
839
+ if (!dc_isar_feature(aa32_sha1, s)) {
840
return 1;
841
}
842
ptr1 = vfp_reg_ptr(true, rd);
843
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
844
gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
845
tcg_temp_free_i32(tmp4);
846
} else { /* SHA-256 */
847
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
848
+ if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
849
return 1;
850
}
851
ptr1 = vfp_reg_ptr(true, rd);
852
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
853
if (op == 14 && size == 2) {
854
TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
855
856
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
857
+ if (!dc_isar_feature(aa32_pmull, s)) {
858
return 1;
859
}
860
tcg_rn = tcg_temp_new_i64();
861
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
862
{
863
NeonGenThreeOpEnvFn *fn;
864
865
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
866
+ if (!dc_isar_feature(aa32_rdm, s)) {
867
return 1;
868
}
869
if (u && ((rd | rn) & 1)) {
870
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
871
break;
872
}
873
case NEON_2RM_AESE: case NEON_2RM_AESMC:
874
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
875
- || ((rm | rd) & 1)) {
876
+ if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
877
return 1;
878
}
879
ptr1 = vfp_reg_ptr(true, rd);
880
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
881
tcg_temp_free_i32(tmp3);
882
break;
883
case NEON_2RM_SHA1H:
884
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
885
- || ((rm | rd) & 1)) {
886
+ if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
887
return 1;
888
}
889
ptr1 = vfp_reg_ptr(true, rd);
890
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
891
}
892
/* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
893
if (q) {
894
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
895
+ if (!dc_isar_feature(aa32_sha2, s)) {
896
return 1;
897
}
898
- } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
899
+ } else if (!dc_isar_feature(aa32_sha1, s)) {
900
return 1;
901
}
902
ptr1 = vfp_reg_ptr(true, rd);
903
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
904
/* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
905
int size = extract32(insn, 20, 1);
906
data = extract32(insn, 23, 2); /* rot */
907
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
908
+ if (!dc_isar_feature(aa32_vcma, s)
909
|| (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
910
return 1;
911
}
912
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
913
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
914
int size = extract32(insn, 20, 1);
915
data = extract32(insn, 24, 1); /* rot */
916
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
917
+ if (!dc_isar_feature(aa32_vcma, s)
918
|| (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
919
return 1;
920
}
921
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
922
} else if ((insn & 0xfeb00f00) == 0xfc200d00) {
923
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
924
bool u = extract32(insn, 4, 1);
925
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
926
+ if (!dc_isar_feature(aa32_dp, s)) {
927
return 1;
928
}
929
fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
930
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
931
int size = extract32(insn, 23, 1);
932
int index;
933
934
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
935
+ if (!dc_isar_feature(aa32_vcma, s)) {
936
return 1;
937
}
938
if (size == 0) {
939
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
940
} else if ((insn & 0xffb00f00) == 0xfe200d00) {
941
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
942
int u = extract32(insn, 4, 1);
943
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
944
+ if (!dc_isar_feature(aa32_dp, s)) {
945
return 1;
946
}
947
fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
948
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
949
* op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
950
* Bits 8, 10 and 11 should be zero.
951
*/
952
- if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
953
- (c & 0xd) != 0) {
954
+ if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
955
goto illegal_op;
956
}
957
958
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
959
case 0x28:
960
case 0x29:
961
case 0x2a:
962
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
963
+ if (!dc_isar_feature(aa32_crc32, s)) {
964
goto illegal_op;
965
}
966
break;
89
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
967
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
90
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
968
CPUARMState *env = cs->env_ptr;
91
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
969
ARMCPU *cpu = arm_env_get_cpu(env);
92
regime_is_secure(env, dc->mmu_idx);
970
93
+ dc->v8m_stackcheck = ARM_TBFLAG_STACKCHECK(dc->base.tb->flags);
971
+ dc->isar = &cpu->isar;
94
dc->cp_regs = cpu->cp_regs;
972
dc->pc = dc->base.pc_first;
95
dc->features = env->features;
973
dc->condjmp = 0;
96
974
97
--
975
--
98
2.19.0
976
2.19.1
99
977
100
978
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This fixes the endianness problem for softmmu, and moves
3
Both arm and thumb2 division are controlled by the same ISAR field,
4
the main loop out of a macro and into an inlined function.
4
which takes care of the arm implies thumb case. Having M imply
5
thumb2 division was wrong for cortex-m0, which is v6m and does not
6
have thumb2 at all, much less thumb2 division.
5
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181016223115.24100-5-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
target/arm/helper-sve.h | 84 +++++++++----
14
target/arm/cpu.h | 12 ++++++++++--
13
target/arm/sve_helper.c | 225 ++++++++++++++++++++++++----------
15
linux-user/elfload.c | 4 ++--
14
target/arm/translate-sve.c | 244 +++++++++++++++++++++++++------------
16
target/arm/cpu.c | 10 +---------
15
3 files changed, 386 insertions(+), 167 deletions(-)
17
target/arm/translate.c | 4 ++--
18
4 files changed, 15 insertions(+), 15 deletions(-)
16
19
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
22
--- a/target/arm/cpu.h
20
+++ b/target/arm/helper-sve.h
23
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
24
@@ -XXX,XX +XXX,XX @@ enum arm_features {
22
25
ARM_FEATURE_VFP3,
23
DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
26
ARM_FEATURE_VFP_FP16,
24
void, env, ptr, ptr, ptr, tl, i32)
27
ARM_FEATURE_NEON,
25
-DEF_HELPER_FLAGS_6(sve_ldhsu_zsu, TCG_CALL_NO_WG,
28
- ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
26
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG,
29
ARM_FEATURE_M, /* Microcontroller profile. */
27
void, env, ptr, ptr, ptr, tl, i32)
30
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
28
-DEF_HELPER_FLAGS_6(sve_ldssu_zsu, TCG_CALL_NO_WG,
31
ARM_FEATURE_THUMB2EE,
29
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG,
32
@@ -XXX,XX +XXX,XX @@ enum arm_features {
30
+ void, env, ptr, ptr, ptr, tl, i32)
33
ARM_FEATURE_V5,
31
+DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG,
34
ARM_FEATURE_STRONGARM,
32
+ void, env, ptr, ptr, ptr, tl, i32)
35
ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
33
+DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG,
36
- ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
34
void, env, ptr, ptr, ptr, tl, i32)
37
ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
35
DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG,
38
ARM_FEATURE_GENERIC_TIMER,
36
void, env, ptr, ptr, ptr, tl, i32)
39
ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
37
-DEF_HELPER_FLAGS_6(sve_ldhss_zsu, TCG_CALL_NO_WG,
40
@@ -XXX,XX +XXX,XX @@ extern const uint64_t pred_esz_masks[4];
38
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG,
41
/*
39
+ void, env, ptr, ptr, ptr, tl, i32)
42
* 32-bit feature tests via id registers.
40
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG,
43
*/
41
void, env, ptr, ptr, ptr, tl, i32)
44
+static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
42
43
DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG,
44
void, env, ptr, ptr, ptr, tl, i32)
45
-DEF_HELPER_FLAGS_6(sve_ldhsu_zss, TCG_CALL_NO_WG,
46
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG,
47
void, env, ptr, ptr, ptr, tl, i32)
48
-DEF_HELPER_FLAGS_6(sve_ldssu_zss, TCG_CALL_NO_WG,
49
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG,
50
+ void, env, ptr, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG,
52
+ void, env, ptr, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG,
54
void, env, ptr, ptr, ptr, tl, i32)
55
DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG,
56
void, env, ptr, ptr, ptr, tl, i32)
57
-DEF_HELPER_FLAGS_6(sve_ldhss_zss, TCG_CALL_NO_WG,
58
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG,
59
+ void, env, ptr, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG,
61
void, env, ptr, ptr, ptr, tl, i32)
62
63
DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG,
64
void, env, ptr, ptr, ptr, tl, i32)
65
-DEF_HELPER_FLAGS_6(sve_ldhdu_zsu, TCG_CALL_NO_WG,
66
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG,
67
void, env, ptr, ptr, ptr, tl, i32)
68
-DEF_HELPER_FLAGS_6(sve_ldsdu_zsu, TCG_CALL_NO_WG,
69
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG,
70
void, env, ptr, ptr, ptr, tl, i32)
71
-DEF_HELPER_FLAGS_6(sve_ldddu_zsu, TCG_CALL_NO_WG,
72
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG,
73
+ void, env, ptr, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG,
75
+ void, env, ptr, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG,
77
+ void, env, ptr, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG,
79
void, env, ptr, ptr, ptr, tl, i32)
80
DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG,
81
void, env, ptr, ptr, ptr, tl, i32)
82
-DEF_HELPER_FLAGS_6(sve_ldhds_zsu, TCG_CALL_NO_WG,
83
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG,
84
void, env, ptr, ptr, ptr, tl, i32)
85
-DEF_HELPER_FLAGS_6(sve_ldsds_zsu, TCG_CALL_NO_WG,
86
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG,
87
+ void, env, ptr, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG,
89
+ void, env, ptr, ptr, ptr, tl, i32)
90
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG,
91
void, env, ptr, ptr, ptr, tl, i32)
92
93
DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG,
94
void, env, ptr, ptr, ptr, tl, i32)
95
-DEF_HELPER_FLAGS_6(sve_ldhdu_zss, TCG_CALL_NO_WG,
96
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG,
97
void, env, ptr, ptr, ptr, tl, i32)
98
-DEF_HELPER_FLAGS_6(sve_ldsdu_zss, TCG_CALL_NO_WG,
99
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG,
100
void, env, ptr, ptr, ptr, tl, i32)
101
-DEF_HELPER_FLAGS_6(sve_ldddu_zss, TCG_CALL_NO_WG,
102
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG,
103
+ void, env, ptr, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG,
105
+ void, env, ptr, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG,
107
+ void, env, ptr, ptr, ptr, tl, i32)
108
+DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG,
109
void, env, ptr, ptr, ptr, tl, i32)
110
DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG,
111
void, env, ptr, ptr, ptr, tl, i32)
112
-DEF_HELPER_FLAGS_6(sve_ldhds_zss, TCG_CALL_NO_WG,
113
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG,
114
void, env, ptr, ptr, ptr, tl, i32)
115
-DEF_HELPER_FLAGS_6(sve_ldsds_zss, TCG_CALL_NO_WG,
116
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG,
117
+ void, env, ptr, ptr, ptr, tl, i32)
118
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG,
119
+ void, env, ptr, ptr, ptr, tl, i32)
120
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG,
121
void, env, ptr, ptr, ptr, tl, i32)
122
123
DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG,
124
void, env, ptr, ptr, ptr, tl, i32)
125
-DEF_HELPER_FLAGS_6(sve_ldhdu_zd, TCG_CALL_NO_WG,
126
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG,
127
void, env, ptr, ptr, ptr, tl, i32)
128
-DEF_HELPER_FLAGS_6(sve_ldsdu_zd, TCG_CALL_NO_WG,
129
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG,
130
void, env, ptr, ptr, ptr, tl, i32)
131
-DEF_HELPER_FLAGS_6(sve_ldddu_zd, TCG_CALL_NO_WG,
132
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG,
133
+ void, env, ptr, ptr, ptr, tl, i32)
134
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG,
135
+ void, env, ptr, ptr, ptr, tl, i32)
136
+DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG,
137
+ void, env, ptr, ptr, ptr, tl, i32)
138
+DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG,
139
void, env, ptr, ptr, ptr, tl, i32)
140
DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG,
141
void, env, ptr, ptr, ptr, tl, i32)
142
-DEF_HELPER_FLAGS_6(sve_ldhds_zd, TCG_CALL_NO_WG,
143
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG,
144
void, env, ptr, ptr, ptr, tl, i32)
145
-DEF_HELPER_FLAGS_6(sve_ldsds_zd, TCG_CALL_NO_WG,
146
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG,
147
+ void, env, ptr, ptr, ptr, tl, i32)
148
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG,
149
+ void, env, ptr, ptr, ptr, tl, i32)
150
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG,
151
void, env, ptr, ptr, ptr, tl, i32)
152
153
DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG,
154
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/target/arm/sve_helper.c
157
+++ b/target/arm/sve_helper.c
158
@@ -XXX,XX +XXX,XX @@ DO_STN_2(4, dd, 8, 8)
159
#undef DO_STN_1
160
#undef DO_STN_2
161
162
-/* Loads with a vector index. */
163
+/*
164
+ * Loads with a vector index.
165
+ */
166
167
-#define DO_LD1_ZPZ_S(NAME, TYPEI, TYPEM, FN) \
168
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
169
- target_ulong base, uint32_t desc) \
170
-{ \
171
- intptr_t i, oprsz = simd_oprsz(desc); \
172
- unsigned scale = simd_data(desc); \
173
- uintptr_t ra = GETPC(); \
174
- for (i = 0; i < oprsz; ) { \
175
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
176
- do { \
177
- TYPEM m = 0; \
178
- if (pg & 1) { \
179
- target_ulong off = *(TYPEI *)(vm + H1_4(i)); \
180
- m = FN(env, base + (off << scale), ra); \
181
- } \
182
- *(uint32_t *)(vd + H1_4(i)) = m; \
183
- i += 4, pg >>= 4; \
184
- } while (i & 15); \
185
- } \
186
+/*
187
+ * Load the element at @reg + @reg_ofs, sign or zero-extend as needed.
188
+ */
189
+typedef target_ulong zreg_off_fn(void *reg, intptr_t reg_ofs);
190
+
191
+static target_ulong off_zsu_s(void *reg, intptr_t reg_ofs)
192
+{
45
+{
193
+ return *(uint32_t *)(reg + H1_4(reg_ofs));
46
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
194
}
195
196
-#define DO_LD1_ZPZ_D(NAME, TYPEI, TYPEM, FN) \
197
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
198
- target_ulong base, uint32_t desc) \
199
-{ \
200
- intptr_t i, oprsz = simd_oprsz(desc) / 8; \
201
- unsigned scale = simd_data(desc); \
202
- uintptr_t ra = GETPC(); \
203
- uint64_t *d = vd, *m = vm; uint8_t *pg = vg; \
204
- for (i = 0; i < oprsz; i++) { \
205
- TYPEM mm = 0; \
206
- if (pg[H1(i)] & 1) { \
207
- target_ulong off = (TYPEI)m[i]; \
208
- mm = FN(env, base + (off << scale), ra); \
209
- } \
210
- d[i] = mm; \
211
- } \
212
+static target_ulong off_zss_s(void *reg, intptr_t reg_ofs)
213
+{
214
+ return *(int32_t *)(reg + H1_4(reg_ofs));
215
}
216
217
-DO_LD1_ZPZ_S(sve_ldbsu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
218
-DO_LD1_ZPZ_S(sve_ldhsu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
219
-DO_LD1_ZPZ_S(sve_ldssu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
220
-DO_LD1_ZPZ_S(sve_ldbss_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
221
-DO_LD1_ZPZ_S(sve_ldhss_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
222
+static target_ulong off_zsu_d(void *reg, intptr_t reg_ofs)
223
+{
224
+ return (uint32_t)*(uint64_t *)(reg + reg_ofs);
225
+}
226
227
-DO_LD1_ZPZ_S(sve_ldbsu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
228
-DO_LD1_ZPZ_S(sve_ldhsu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
229
-DO_LD1_ZPZ_S(sve_ldssu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
230
-DO_LD1_ZPZ_S(sve_ldbss_zss, int32_t, int8_t, cpu_ldub_data_ra)
231
-DO_LD1_ZPZ_S(sve_ldhss_zss, int32_t, int16_t, cpu_lduw_data_ra)
232
+static target_ulong off_zss_d(void *reg, intptr_t reg_ofs)
233
+{
234
+ return (int32_t)*(uint64_t *)(reg + reg_ofs);
235
+}
236
237
-DO_LD1_ZPZ_D(sve_ldbdu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
238
-DO_LD1_ZPZ_D(sve_ldhdu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
239
-DO_LD1_ZPZ_D(sve_ldsdu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
240
-DO_LD1_ZPZ_D(sve_ldddu_zsu, uint32_t, uint64_t, cpu_ldq_data_ra)
241
-DO_LD1_ZPZ_D(sve_ldbds_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
242
-DO_LD1_ZPZ_D(sve_ldhds_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
243
-DO_LD1_ZPZ_D(sve_ldsds_zsu, uint32_t, int32_t, cpu_ldl_data_ra)
244
+static target_ulong off_zd_d(void *reg, intptr_t reg_ofs)
245
+{
246
+ return *(uint64_t *)(reg + reg_ofs);
247
+}
248
249
-DO_LD1_ZPZ_D(sve_ldbdu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
250
-DO_LD1_ZPZ_D(sve_ldhdu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
251
-DO_LD1_ZPZ_D(sve_ldsdu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
252
-DO_LD1_ZPZ_D(sve_ldddu_zss, int32_t, uint64_t, cpu_ldq_data_ra)
253
-DO_LD1_ZPZ_D(sve_ldbds_zss, int32_t, int8_t, cpu_ldub_data_ra)
254
-DO_LD1_ZPZ_D(sve_ldhds_zss, int32_t, int16_t, cpu_lduw_data_ra)
255
-DO_LD1_ZPZ_D(sve_ldsds_zss, int32_t, int32_t, cpu_ldl_data_ra)
256
+static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
257
+ target_ulong base, uint32_t desc, uintptr_t ra,
258
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
259
+{
260
+ const int mmu_idx = cpu_mmu_index(env, false);
261
+ intptr_t i, oprsz = simd_oprsz(desc);
262
+ unsigned scale = simd_data(desc);
263
+ ARMVectorReg scratch = { };
264
265
-DO_LD1_ZPZ_D(sve_ldbdu_zd, uint64_t, uint8_t, cpu_ldub_data_ra)
266
-DO_LD1_ZPZ_D(sve_ldhdu_zd, uint64_t, uint16_t, cpu_lduw_data_ra)
267
-DO_LD1_ZPZ_D(sve_ldsdu_zd, uint64_t, uint32_t, cpu_ldl_data_ra)
268
-DO_LD1_ZPZ_D(sve_ldddu_zd, uint64_t, uint64_t, cpu_ldq_data_ra)
269
-DO_LD1_ZPZ_D(sve_ldbds_zd, uint64_t, int8_t, cpu_ldub_data_ra)
270
-DO_LD1_ZPZ_D(sve_ldhds_zd, uint64_t, int16_t, cpu_lduw_data_ra)
271
-DO_LD1_ZPZ_D(sve_ldsds_zd, uint64_t, int32_t, cpu_ldl_data_ra)
272
+ set_helper_retaddr(ra);
273
+ for (i = 0; i < oprsz; ) {
274
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
275
+ do {
276
+ if (likely(pg & 1)) {
277
+ target_ulong off = off_fn(vm, i);
278
+ tlb_fn(env, &scratch, i, base + (off << scale), mmu_idx, ra);
279
+ }
280
+ i += 4, pg >>= 4;
281
+ } while (i & 15);
282
+ }
283
+ set_helper_retaddr(0);
284
+
285
+ /* Wait until all exceptions have been raised to write back. */
286
+ memcpy(vd, &scratch, oprsz);
287
+}
47
+}
288
+
48
+
289
+static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
49
+static inline bool isar_feature_arm_div(const ARMISARegisters *id)
290
+ target_ulong base, uint32_t desc, uintptr_t ra,
291
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
292
+{
50
+{
293
+ const int mmu_idx = cpu_mmu_index(env, false);
51
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
294
+ intptr_t i, oprsz = simd_oprsz(desc) / 8;
295
+ unsigned scale = simd_data(desc);
296
+ ARMVectorReg scratch = { };
297
+
298
+ set_helper_retaddr(ra);
299
+ for (i = 0; i < oprsz; i++) {
300
+ uint8_t pg = *(uint8_t *)(vg + H1(i));
301
+ if (likely(pg & 1)) {
302
+ target_ulong off = off_fn(vm, i * 8);
303
+ tlb_fn(env, &scratch, i * 8, base + (off << scale), mmu_idx, ra);
304
+ }
305
+ }
306
+ set_helper_retaddr(0);
307
+
308
+ /* Wait until all exceptions have been raised to write back. */
309
+ memcpy(vd, &scratch, oprsz * 8);
310
+}
52
+}
311
+
53
+
312
+#define DO_LD1_ZPZ_S(MEM, OFS) \
54
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
313
+void __attribute__((flatten)) HELPER(sve_ld##MEM##_##OFS) \
55
{
314
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
56
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
315
+ target_ulong base, uint32_t desc) \
57
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
316
+{ \
317
+ sve_ld1_zs(env, vd, vg, vm, base, desc, GETPC(), \
318
+ off_##OFS##_s, sve_ld1##MEM##_tlb); \
319
+}
320
+
321
+#define DO_LD1_ZPZ_D(MEM, OFS) \
322
+void __attribute__((flatten)) HELPER(sve_ld##MEM##_##OFS) \
323
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
324
+ target_ulong base, uint32_t desc) \
325
+{ \
326
+ sve_ld1_zd(env, vd, vg, vm, base, desc, GETPC(), \
327
+ off_##OFS##_d, sve_ld1##MEM##_tlb); \
328
+}
329
+
330
+DO_LD1_ZPZ_S(bsu, zsu)
331
+DO_LD1_ZPZ_S(bsu, zss)
332
+DO_LD1_ZPZ_D(bdu, zsu)
333
+DO_LD1_ZPZ_D(bdu, zss)
334
+DO_LD1_ZPZ_D(bdu, zd)
335
+
336
+DO_LD1_ZPZ_S(bss, zsu)
337
+DO_LD1_ZPZ_S(bss, zss)
338
+DO_LD1_ZPZ_D(bds, zsu)
339
+DO_LD1_ZPZ_D(bds, zss)
340
+DO_LD1_ZPZ_D(bds, zd)
341
+
342
+DO_LD1_ZPZ_S(hsu_le, zsu)
343
+DO_LD1_ZPZ_S(hsu_le, zss)
344
+DO_LD1_ZPZ_D(hdu_le, zsu)
345
+DO_LD1_ZPZ_D(hdu_le, zss)
346
+DO_LD1_ZPZ_D(hdu_le, zd)
347
+
348
+DO_LD1_ZPZ_S(hsu_be, zsu)
349
+DO_LD1_ZPZ_S(hsu_be, zss)
350
+DO_LD1_ZPZ_D(hdu_be, zsu)
351
+DO_LD1_ZPZ_D(hdu_be, zss)
352
+DO_LD1_ZPZ_D(hdu_be, zd)
353
+
354
+DO_LD1_ZPZ_S(hss_le, zsu)
355
+DO_LD1_ZPZ_S(hss_le, zss)
356
+DO_LD1_ZPZ_D(hds_le, zsu)
357
+DO_LD1_ZPZ_D(hds_le, zss)
358
+DO_LD1_ZPZ_D(hds_le, zd)
359
+
360
+DO_LD1_ZPZ_S(hss_be, zsu)
361
+DO_LD1_ZPZ_S(hss_be, zss)
362
+DO_LD1_ZPZ_D(hds_be, zsu)
363
+DO_LD1_ZPZ_D(hds_be, zss)
364
+DO_LD1_ZPZ_D(hds_be, zd)
365
+
366
+DO_LD1_ZPZ_S(ss_le, zsu)
367
+DO_LD1_ZPZ_S(ss_le, zss)
368
+DO_LD1_ZPZ_D(sdu_le, zsu)
369
+DO_LD1_ZPZ_D(sdu_le, zss)
370
+DO_LD1_ZPZ_D(sdu_le, zd)
371
+
372
+DO_LD1_ZPZ_S(ss_be, zsu)
373
+DO_LD1_ZPZ_S(ss_be, zss)
374
+DO_LD1_ZPZ_D(sdu_be, zsu)
375
+DO_LD1_ZPZ_D(sdu_be, zss)
376
+DO_LD1_ZPZ_D(sdu_be, zd)
377
+
378
+DO_LD1_ZPZ_D(sds_le, zsu)
379
+DO_LD1_ZPZ_D(sds_le, zss)
380
+DO_LD1_ZPZ_D(sds_le, zd)
381
+
382
+DO_LD1_ZPZ_D(sds_be, zsu)
383
+DO_LD1_ZPZ_D(sds_be, zss)
384
+DO_LD1_ZPZ_D(sds_be, zd)
385
+
386
+DO_LD1_ZPZ_D(dd_le, zsu)
387
+DO_LD1_ZPZ_D(dd_le, zss)
388
+DO_LD1_ZPZ_D(dd_le, zd)
389
+
390
+DO_LD1_ZPZ_D(dd_be, zsu)
391
+DO_LD1_ZPZ_D(dd_be, zss)
392
+DO_LD1_ZPZ_D(dd_be, zd)
393
+
394
+#undef DO_LD1_ZPZ_S
395
+#undef DO_LD1_ZPZ_D
396
397
/* First fault loads with a vector index. */
398
399
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
400
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
401
--- a/target/arm/translate-sve.c
59
--- a/linux-user/elfload.c
402
+++ b/target/arm/translate-sve.c
60
+++ b/linux-user/elfload.c
403
@@ -XXX,XX +XXX,XX @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale,
61
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
404
tcg_temp_free_i32(desc);
62
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
405
}
63
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
406
64
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
407
-/* Indexed by [ff][xs][u][msz]. */
65
- GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
408
-static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][3] = {
66
- GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
409
- { { { gen_helper_sve_ldbss_zsu,
67
+ GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
410
- gen_helper_sve_ldhss_zsu,
68
+ GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
411
- NULL, },
69
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
412
- { gen_helper_sve_ldbsu_zsu,
70
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
413
- gen_helper_sve_ldhsu_zsu,
71
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
414
- gen_helper_sve_ldssu_zsu, } },
72
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
415
- { { gen_helper_sve_ldbss_zss,
73
index XXXXXXX..XXXXXXX 100644
416
- gen_helper_sve_ldhss_zss,
74
--- a/target/arm/cpu.c
417
- NULL, },
75
+++ b/target/arm/cpu.c
418
- { gen_helper_sve_ldbsu_zss,
76
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
419
- gen_helper_sve_ldhsu_zss,
77
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
420
- gen_helper_sve_ldssu_zss, } } },
78
* Security Extensions is ARM_FEATURE_EL3.
421
+/* Indexed by [be][ff][xs][u][msz]. */
79
*/
422
+static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = {
80
- set_feature(env, ARM_FEATURE_ARM_DIV);
423
+ /* Little-endian */
81
+ assert(cpu_isar_feature(arm_div, cpu));
424
+ { { { { gen_helper_sve_ldbss_zsu,
82
set_feature(env, ARM_FEATURE_LPAE);
425
+ gen_helper_sve_ldhss_le_zsu,
83
set_feature(env, ARM_FEATURE_V7);
426
+ NULL, },
427
+ { gen_helper_sve_ldbsu_zsu,
428
+ gen_helper_sve_ldhsu_le_zsu,
429
+ gen_helper_sve_ldss_le_zsu, } },
430
+ { { gen_helper_sve_ldbss_zss,
431
+ gen_helper_sve_ldhss_le_zss,
432
+ NULL, },
433
+ { gen_helper_sve_ldbsu_zss,
434
+ gen_helper_sve_ldhsu_le_zss,
435
+ gen_helper_sve_ldss_le_zss, } } },
436
437
- { { { gen_helper_sve_ldffbss_zsu,
438
- gen_helper_sve_ldffhss_zsu,
439
- NULL, },
440
- { gen_helper_sve_ldffbsu_zsu,
441
- gen_helper_sve_ldffhsu_zsu,
442
- gen_helper_sve_ldffssu_zsu, } },
443
- { { gen_helper_sve_ldffbss_zss,
444
- gen_helper_sve_ldffhss_zss,
445
- NULL, },
446
- { gen_helper_sve_ldffbsu_zss,
447
- gen_helper_sve_ldffhsu_zss,
448
- gen_helper_sve_ldffssu_zss, } } }
449
+ /* First-fault */
450
+ { { { gen_helper_sve_ldffbss_zsu,
451
+ gen_helper_sve_ldffhss_zsu,
452
+ NULL, },
453
+ { gen_helper_sve_ldffbsu_zsu,
454
+ gen_helper_sve_ldffhsu_zsu,
455
+ gen_helper_sve_ldffssu_zsu, } },
456
+ { { gen_helper_sve_ldffbss_zss,
457
+ gen_helper_sve_ldffhss_zss,
458
+ NULL, },
459
+ { gen_helper_sve_ldffbsu_zss,
460
+ gen_helper_sve_ldffhsu_zss,
461
+ gen_helper_sve_ldffssu_zss, } } } },
462
+
463
+ /* Big-endian */
464
+ { { { { gen_helper_sve_ldbss_zsu,
465
+ gen_helper_sve_ldhss_be_zsu,
466
+ NULL, },
467
+ { gen_helper_sve_ldbsu_zsu,
468
+ gen_helper_sve_ldhsu_be_zsu,
469
+ gen_helper_sve_ldss_be_zsu, } },
470
+ { { gen_helper_sve_ldbss_zss,
471
+ gen_helper_sve_ldhss_be_zss,
472
+ NULL, },
473
+ { gen_helper_sve_ldbsu_zss,
474
+ gen_helper_sve_ldhsu_be_zss,
475
+ gen_helper_sve_ldss_be_zss, } } },
476
+
477
+ /* First-fault */
478
+ { { { gen_helper_sve_ldffbss_zsu,
479
+ gen_helper_sve_ldffhss_zsu,
480
+ NULL, },
481
+ { gen_helper_sve_ldffbsu_zsu,
482
+ gen_helper_sve_ldffhsu_zsu,
483
+ gen_helper_sve_ldffssu_zsu, } },
484
+ { { gen_helper_sve_ldffbss_zss,
485
+ gen_helper_sve_ldffhss_zss,
486
+ NULL, },
487
+ { gen_helper_sve_ldffbsu_zss,
488
+ gen_helper_sve_ldffhsu_zss,
489
+ gen_helper_sve_ldffssu_zss, } } } },
490
};
491
492
/* Note that we overload xs=2 to indicate 64-bit offset. */
493
-static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][3][2][4] = {
494
- { { { gen_helper_sve_ldbds_zsu,
495
- gen_helper_sve_ldhds_zsu,
496
- gen_helper_sve_ldsds_zsu,
497
- NULL, },
498
- { gen_helper_sve_ldbdu_zsu,
499
- gen_helper_sve_ldhdu_zsu,
500
- gen_helper_sve_ldsdu_zsu,
501
- gen_helper_sve_ldddu_zsu, } },
502
- { { gen_helper_sve_ldbds_zss,
503
- gen_helper_sve_ldhds_zss,
504
- gen_helper_sve_ldsds_zss,
505
- NULL, },
506
- { gen_helper_sve_ldbdu_zss,
507
- gen_helper_sve_ldhdu_zss,
508
- gen_helper_sve_ldsdu_zss,
509
- gen_helper_sve_ldddu_zss, } },
510
- { { gen_helper_sve_ldbds_zd,
511
- gen_helper_sve_ldhds_zd,
512
- gen_helper_sve_ldsds_zd,
513
- NULL, },
514
- { gen_helper_sve_ldbdu_zd,
515
- gen_helper_sve_ldhdu_zd,
516
- gen_helper_sve_ldsdu_zd,
517
- gen_helper_sve_ldddu_zd, } } },
518
+static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = {
519
+ /* Little-endian */
520
+ { { { { gen_helper_sve_ldbds_zsu,
521
+ gen_helper_sve_ldhds_le_zsu,
522
+ gen_helper_sve_ldsds_le_zsu,
523
+ NULL, },
524
+ { gen_helper_sve_ldbdu_zsu,
525
+ gen_helper_sve_ldhdu_le_zsu,
526
+ gen_helper_sve_ldsdu_le_zsu,
527
+ gen_helper_sve_lddd_le_zsu, } },
528
+ { { gen_helper_sve_ldbds_zss,
529
+ gen_helper_sve_ldhds_le_zss,
530
+ gen_helper_sve_ldsds_le_zss,
531
+ NULL, },
532
+ { gen_helper_sve_ldbdu_zss,
533
+ gen_helper_sve_ldhdu_le_zss,
534
+ gen_helper_sve_ldsdu_le_zss,
535
+ gen_helper_sve_lddd_le_zss, } },
536
+ { { gen_helper_sve_ldbds_zd,
537
+ gen_helper_sve_ldhds_le_zd,
538
+ gen_helper_sve_ldsds_le_zd,
539
+ NULL, },
540
+ { gen_helper_sve_ldbdu_zd,
541
+ gen_helper_sve_ldhdu_le_zd,
542
+ gen_helper_sve_ldsdu_le_zd,
543
+ gen_helper_sve_lddd_le_zd, } } },
544
545
- { { { gen_helper_sve_ldffbds_zsu,
546
- gen_helper_sve_ldffhds_zsu,
547
- gen_helper_sve_ldffsds_zsu,
548
- NULL, },
549
- { gen_helper_sve_ldffbdu_zsu,
550
- gen_helper_sve_ldffhdu_zsu,
551
- gen_helper_sve_ldffsdu_zsu,
552
- gen_helper_sve_ldffddu_zsu, } },
553
- { { gen_helper_sve_ldffbds_zss,
554
- gen_helper_sve_ldffhds_zss,
555
- gen_helper_sve_ldffsds_zss,
556
- NULL, },
557
- { gen_helper_sve_ldffbdu_zss,
558
- gen_helper_sve_ldffhdu_zss,
559
- gen_helper_sve_ldffsdu_zss,
560
- gen_helper_sve_ldffddu_zss, } },
561
- { { gen_helper_sve_ldffbds_zd,
562
- gen_helper_sve_ldffhds_zd,
563
- gen_helper_sve_ldffsds_zd,
564
- NULL, },
565
- { gen_helper_sve_ldffbdu_zd,
566
- gen_helper_sve_ldffhdu_zd,
567
- gen_helper_sve_ldffsdu_zd,
568
- gen_helper_sve_ldffddu_zd, } } }
569
+ /* First-fault */
570
+ { { { gen_helper_sve_ldffbds_zsu,
571
+ gen_helper_sve_ldffhds_zsu,
572
+ gen_helper_sve_ldffsds_zsu,
573
+ NULL, },
574
+ { gen_helper_sve_ldffbdu_zsu,
575
+ gen_helper_sve_ldffhdu_zsu,
576
+ gen_helper_sve_ldffsdu_zsu,
577
+ gen_helper_sve_ldffddu_zsu, } },
578
+ { { gen_helper_sve_ldffbds_zss,
579
+ gen_helper_sve_ldffhds_zss,
580
+ gen_helper_sve_ldffsds_zss,
581
+ NULL, },
582
+ { gen_helper_sve_ldffbdu_zss,
583
+ gen_helper_sve_ldffhdu_zss,
584
+ gen_helper_sve_ldffsdu_zss,
585
+ gen_helper_sve_ldffddu_zss, } },
586
+ { { gen_helper_sve_ldffbds_zd,
587
+ gen_helper_sve_ldffhds_zd,
588
+ gen_helper_sve_ldffsds_zd,
589
+ NULL, },
590
+ { gen_helper_sve_ldffbdu_zd,
591
+ gen_helper_sve_ldffhdu_zd,
592
+ gen_helper_sve_ldffsdu_zd,
593
+ gen_helper_sve_ldffddu_zd, } } } },
594
+
595
+ /* Big-endian */
596
+ { { { { gen_helper_sve_ldbds_zsu,
597
+ gen_helper_sve_ldhds_be_zsu,
598
+ gen_helper_sve_ldsds_be_zsu,
599
+ NULL, },
600
+ { gen_helper_sve_ldbdu_zsu,
601
+ gen_helper_sve_ldhdu_be_zsu,
602
+ gen_helper_sve_ldsdu_be_zsu,
603
+ gen_helper_sve_lddd_be_zsu, } },
604
+ { { gen_helper_sve_ldbds_zss,
605
+ gen_helper_sve_ldhds_be_zss,
606
+ gen_helper_sve_ldsds_be_zss,
607
+ NULL, },
608
+ { gen_helper_sve_ldbdu_zss,
609
+ gen_helper_sve_ldhdu_be_zss,
610
+ gen_helper_sve_ldsdu_be_zss,
611
+ gen_helper_sve_lddd_be_zss, } },
612
+ { { gen_helper_sve_ldbds_zd,
613
+ gen_helper_sve_ldhds_be_zd,
614
+ gen_helper_sve_ldsds_be_zd,
615
+ NULL, },
616
+ { gen_helper_sve_ldbdu_zd,
617
+ gen_helper_sve_ldhdu_be_zd,
618
+ gen_helper_sve_ldsdu_be_zd,
619
+ gen_helper_sve_lddd_be_zd, } } },
620
+
621
+ /* First-fault */
622
+ { { { gen_helper_sve_ldffbds_zsu,
623
+ gen_helper_sve_ldffhds_zsu,
624
+ gen_helper_sve_ldffsds_zsu,
625
+ NULL, },
626
+ { gen_helper_sve_ldffbdu_zsu,
627
+ gen_helper_sve_ldffhdu_zsu,
628
+ gen_helper_sve_ldffsdu_zsu,
629
+ gen_helper_sve_ldffddu_zsu, } },
630
+ { { gen_helper_sve_ldffbds_zss,
631
+ gen_helper_sve_ldffhds_zss,
632
+ gen_helper_sve_ldffsds_zss,
633
+ NULL, },
634
+ { gen_helper_sve_ldffbdu_zss,
635
+ gen_helper_sve_ldffhdu_zss,
636
+ gen_helper_sve_ldffsdu_zss,
637
+ gen_helper_sve_ldffddu_zss, } },
638
+ { { gen_helper_sve_ldffbds_zd,
639
+ gen_helper_sve_ldffhds_zd,
640
+ gen_helper_sve_ldffsds_zd,
641
+ NULL, },
642
+ { gen_helper_sve_ldffbdu_zd,
643
+ gen_helper_sve_ldffhdu_zd,
644
+ gen_helper_sve_ldffsdu_zd,
645
+ gen_helper_sve_ldffddu_zd, } } } },
646
};
647
648
static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
649
{
650
gen_helper_gvec_mem_scatter *fn = NULL;
651
+ int be = s->be_data == MO_BE;
652
653
if (!sve_access_check(s)) {
654
return true;
655
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
656
657
switch (a->esz) {
658
case MO_32:
659
- fn = gather_load_fn32[a->ff][a->xs][a->u][a->msz];
660
+ fn = gather_load_fn32[be][a->ff][a->xs][a->u][a->msz];
661
break;
662
case MO_64:
663
- fn = gather_load_fn64[a->ff][a->xs][a->u][a->msz];
664
+ fn = gather_load_fn64[be][a->ff][a->xs][a->u][a->msz];
665
break;
666
}
84
}
667
assert(fn != NULL);
85
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
668
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
86
if (arm_feature(env, ARM_FEATURE_V5)) {
669
static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
87
set_feature(env, ARM_FEATURE_V4T);
670
{
671
gen_helper_gvec_mem_scatter *fn = NULL;
672
+ int be = s->be_data == MO_BE;
673
TCGv_i64 imm;
674
675
if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
676
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
677
678
switch (a->esz) {
679
case MO_32:
680
- fn = gather_load_fn32[a->ff][0][a->u][a->msz];
681
+ fn = gather_load_fn32[be][a->ff][0][a->u][a->msz];
682
break;
683
case MO_64:
684
- fn = gather_load_fn64[a->ff][2][a->u][a->msz];
685
+ fn = gather_load_fn64[be][a->ff][2][a->u][a->msz];
686
break;
687
}
88
}
688
assert(fn != NULL);
89
- if (arm_feature(env, ARM_FEATURE_M)) {
90
- set_feature(env, ARM_FEATURE_THUMB_DIV);
91
- }
92
- if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
93
- set_feature(env, ARM_FEATURE_THUMB_DIV);
94
- }
95
if (arm_feature(env, ARM_FEATURE_VFP4)) {
96
set_feature(env, ARM_FEATURE_VFP3);
97
set_feature(env, ARM_FEATURE_VFP_FP16);
98
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
99
ARMCPU *cpu = ARM_CPU(obj);
100
101
set_feature(&cpu->env, ARM_FEATURE_V7);
102
- set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
103
- set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
104
set_feature(&cpu->env, ARM_FEATURE_V7MP);
105
set_feature(&cpu->env, ARM_FEATURE_PMSA);
106
cpu->midr = 0x411fc153; /* r1p3 */
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
112
case 1:
113
case 3:
114
/* SDIV, UDIV */
115
- if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
116
+ if (!dc_isar_feature(arm_div, s)) {
117
goto illegal_op;
118
}
119
if (((insn >> 5) & 7) || (rd != 15)) {
120
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
121
tmp2 = load_reg(s, rm);
122
if ((op & 0x50) == 0x10) {
123
/* sdiv, udiv */
124
- if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
125
+ if (!dc_isar_feature(thumb_div, s)) {
126
goto illegal_op;
127
}
128
if (op & 0x20)
689
--
129
--
690
2.19.0
130
2.19.1
691
131
692
132
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
SVE vector length can change when changing EL, or when writing
3
Having V6 alone imply jazelle was wrong for cortex-m0.
4
to one of the ZCR_ELn registers.
4
Change to an assertion for V6 & !M.
5
5
6
For correctness, our implementation requires that predicate bits
6
This was harmless, because the only place we tested ARM_FEATURE_JAZELLE
7
that are inaccessible are never set. Which means noticing length
7
was for 'bxj' in disas_arm(), which is unreachable for M-profile cores.
8
changes and zeroing the appropriate register bits.
9
8
10
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181005175350.30752-5-richard.henderson@linaro.org
11
Message-id: 20181016223115.24100-6-richard.henderson@linaro.org
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
14
---
16
target/arm/cpu.h | 4 ++
15
target/arm/cpu.h | 6 +++++-
17
target/arm/cpu64.c | 42 -------------
16
target/arm/cpu.c | 17 ++++++++++++++---
18
target/arm/helper.c | 133 +++++++++++++++++++++++++++++++++++++----
17
target/arm/translate.c | 2 +-
19
target/arm/op_helper.c | 1 +
18
3 files changed, 20 insertions(+), 5 deletions(-)
20
4 files changed, 125 insertions(+), 55 deletions(-)
21
19
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
22
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
24
@@ -XXX,XX +XXX,XX @@ enum arm_features {
27
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
25
ARM_FEATURE_PMU, /* has PMU support */
28
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
26
ARM_FEATURE_VBAR, /* has cp15 VBAR */
29
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
27
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
30
+void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el);
28
- ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
31
+#else
29
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
32
+static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
30
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
33
+static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n) { }
31
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
34
#endif
32
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_arm_div(const ARMISARegisters *id)
35
33
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
36
target_ulong do_arm_semihosting(CPUARMState *env);
37
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/cpu64.c
40
+++ b/target/arm/cpu64.c
41
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_register_types(void)
42
}
34
}
43
35
44
type_init(aarch64_cpu_register_types)
36
+static inline bool isar_feature_jazelle(const ARMISARegisters *id)
45
-
46
-/* The manual says that when SVE is enabled and VQ is widened the
47
- * implementation is allowed to zero the previously inaccessible
48
- * portion of the registers. The corollary to that is that when
49
- * SVE is enabled and VQ is narrowed we are also allowed to zero
50
- * the now inaccessible portion of the registers.
51
- *
52
- * The intent of this is that no predicate bit beyond VQ is ever set.
53
- * Which means that some operations on predicate registers themselves
54
- * may operate on full uint64_t or even unrolled across the maximum
55
- * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
56
- * may well be cheaper than conditionals to restrict the operation
57
- * to the relevant portion of a uint16_t[16].
58
- *
59
- * TODO: Need to call this for changes to the real system registers
60
- * and EL state changes.
61
- */
62
-void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
63
-{
64
- int i, j;
65
- uint64_t pmask;
66
-
67
- assert(vq >= 1 && vq <= ARM_MAX_VQ);
68
- assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
69
-
70
- /* Zap the high bits of the zregs. */
71
- for (i = 0; i < 32; i++) {
72
- memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
73
- }
74
-
75
- /* Zap the high bits of the pregs and ffr. */
76
- pmask = 0;
77
- if (vq & 3) {
78
- pmask = ~(-1ULL << (16 * (vq & 3)));
79
- }
80
- for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
81
- for (i = 0; i < 17; ++i) {
82
- env->vfp.pregs[i].p[j] &= pmask;
83
- }
84
- pmask = 0;
85
- }
86
-}
87
diff --git a/target/arm/helper.c b/target/arm/helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/helper.c
90
+++ b/target/arm/helper.c
91
@@ -XXX,XX +XXX,XX @@ static int sve_exception_el(CPUARMState *env, int el)
92
return 0;
93
}
94
95
+/*
96
+ * Given that SVE is enabled, return the vector length for EL.
97
+ */
98
+static uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
99
+{
37
+{
100
+ ARMCPU *cpu = arm_env_get_cpu(env);
38
+ return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
101
+ uint32_t zcr_len = cpu->sve_max_vq - 1;
102
+
103
+ if (el <= 1) {
104
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
105
+ }
106
+ if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
107
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
108
+ }
109
+ if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
110
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
111
+ }
112
+ return zcr_len;
113
+}
39
+}
114
+
40
+
115
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
41
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
116
uint64_t value)
117
{
42
{
118
+ int cur_el = arm_current_el(env);
43
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
119
+ int old_len = sve_zcr_len_for_el(env, cur_el);
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
120
+ int new_len;
45
index XXXXXXX..XXXXXXX 100644
121
+
46
--- a/target/arm/cpu.c
122
/* Bits other than [3:0] are RAZ/WI. */
47
+++ b/target/arm/cpu.c
123
raw_write(env, ri, value & 0xf);
48
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
49
}
50
if (arm_feature(env, ARM_FEATURE_V6)) {
51
set_feature(env, ARM_FEATURE_V5);
52
- set_feature(env, ARM_FEATURE_JAZELLE);
53
if (!arm_feature(env, ARM_FEATURE_M)) {
54
+ assert(cpu_isar_feature(jazelle, cpu));
55
set_feature(env, ARM_FEATURE_AUXCR);
56
}
57
}
58
@@ -XXX,XX +XXX,XX @@ static void arm926_initfn(Object *obj)
59
set_feature(&cpu->env, ARM_FEATURE_VFP);
60
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
61
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
62
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
63
cpu->midr = 0x41069265;
64
cpu->reset_fpsid = 0x41011090;
65
cpu->ctr = 0x1dd20d2;
66
cpu->reset_sctlr = 0x00090078;
124
+
67
+
125
+ /*
68
+ /*
126
+ * Because we arrived here, we know both FP and SVE are enabled;
69
+ * ARMv5 does not have the ID_ISAR registers, but we can still
127
+ * otherwise we would have trapped access to the ZCR_ELn register.
70
+ * set the field to indicate Jazelle support within QEMU.
128
+ */
71
+ */
129
+ new_len = sve_zcr_len_for_el(env, cur_el);
72
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
130
+ if (new_len < old_len) {
131
+ aarch64_sve_narrow_vq(env, new_len + 1);
132
+ }
133
}
73
}
134
74
135
static const ARMCPRegInfo zcr_el1_reginfo = {
75
static void arm946_initfn(Object *obj)
136
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
76
@@ -XXX,XX +XXX,XX @@ static void arm1026_initfn(Object *obj)
137
unsigned int new_el = env->exception.target_el;
77
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
138
target_ulong addr = env->cp15.vbar_el[new_el];
78
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
139
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
79
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
140
+ unsigned int cur_el = arm_current_el(env);
80
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
141
81
cpu->midr = 0x4106a262;
142
- if (arm_current_el(env) < new_el) {
82
cpu->reset_fpsid = 0x410110a0;
143
+ aarch64_sve_change_el(env, cur_el, new_el);
83
cpu->ctr = 0x1dd20d2;
144
+
84
cpu->reset_sctlr = 0x00090078;
145
+ if (cur_el < new_el) {
85
cpu->reset_auxcr = 1;
146
/* Entry vector offset depends on whether the implemented EL
147
* immediately lower than the target level is using AArch32 or AArch64
148
*/
149
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
150
if (sve_el != 0 && fp_el == 0) {
151
zcr_len = 0;
152
} else {
153
- ARMCPU *cpu = arm_env_get_cpu(env);
154
-
155
- zcr_len = cpu->sve_max_vq - 1;
156
- if (current_el <= 1) {
157
- zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
158
- }
159
- if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
160
- zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
161
- }
162
- if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
163
- zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
164
- }
165
+ zcr_len = sve_zcr_len_for_el(env, current_el);
166
}
167
flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
168
flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
169
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
170
*pflags = flags;
171
*cs_base = 0;
172
}
173
+
174
+#ifdef TARGET_AARCH64
175
+/*
176
+ * The manual says that when SVE is enabled and VQ is widened the
177
+ * implementation is allowed to zero the previously inaccessible
178
+ * portion of the registers. The corollary to that is that when
179
+ * SVE is enabled and VQ is narrowed we are also allowed to zero
180
+ * the now inaccessible portion of the registers.
181
+ *
182
+ * The intent of this is that no predicate bit beyond VQ is ever set.
183
+ * Which means that some operations on predicate registers themselves
184
+ * may operate on full uint64_t or even unrolled across the maximum
185
+ * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
186
+ * may well be cheaper than conditionals to restrict the operation
187
+ * to the relevant portion of a uint16_t[16].
188
+ */
189
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
190
+{
191
+ int i, j;
192
+ uint64_t pmask;
193
+
194
+ assert(vq >= 1 && vq <= ARM_MAX_VQ);
195
+ assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
196
+
197
+ /* Zap the high bits of the zregs. */
198
+ for (i = 0; i < 32; i++) {
199
+ memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
200
+ }
201
+
202
+ /* Zap the high bits of the pregs and ffr. */
203
+ pmask = 0;
204
+ if (vq & 3) {
205
+ pmask = ~(-1ULL << (16 * (vq & 3)));
206
+ }
207
+ for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
208
+ for (i = 0; i < 17; ++i) {
209
+ env->vfp.pregs[i].p[j] &= pmask;
210
+ }
211
+ pmask = 0;
212
+ }
213
+}
214
+
215
+/*
216
+ * Notice a change in SVE vector size when changing EL.
217
+ */
218
+void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el)
219
+{
220
+ int old_len, new_len;
221
+
222
+ /* Nothing to do if no SVE. */
223
+ if (!arm_feature(env, ARM_FEATURE_SVE)) {
224
+ return;
225
+ }
226
+
227
+ /* Nothing to do if FP is disabled in either EL. */
228
+ if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
229
+ return;
230
+ }
231
+
86
+
232
+ /*
87
+ /*
233
+ * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
88
+ * ARMv5 does not have the ID_ISAR registers, but we can still
234
+ * at ELx, or not available because the EL is in AArch32 state, then
89
+ * set the field to indicate Jazelle support within QEMU.
235
+ * for all purposes other than a direct read, the ZCR_ELx.LEN field
236
+ * has an effective value of 0".
237
+ *
238
+ * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
239
+ * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
240
+ * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
241
+ * we already have the correct register contents when encountering the
242
+ * vq0->vq0 transition between EL0->EL1.
243
+ */
90
+ */
244
+ old_len = (arm_el_is_aa64(env, old_el) && !sve_exception_el(env, old_el)
91
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
245
+ ? sve_zcr_len_for_el(env, old_el) : 0);
246
+ new_len = (arm_el_is_aa64(env, new_el) && !sve_exception_el(env, new_el)
247
+ ? sve_zcr_len_for_el(env, new_el) : 0);
248
+
92
+
249
+ /* When changing vector length, clear inaccessible state. */
93
{
250
+ if (new_len < old_len) {
94
/* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
251
+ aarch64_sve_narrow_vq(env, new_len + 1);
95
ARMCPRegInfo ifar = {
252
+ }
96
diff --git a/target/arm/translate.c b/target/arm/translate.c
253
+}
254
+#endif
255
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
256
index XXXXXXX..XXXXXXX 100644
97
index XXXXXXX..XXXXXXX 100644
257
--- a/target/arm/op_helper.c
98
--- a/target/arm/translate.c
258
+++ b/target/arm/op_helper.c
99
+++ b/target/arm/translate.c
259
@@ -XXX,XX +XXX,XX @@ void HELPER(exception_return)(CPUARMState *env)
100
@@ -XXX,XX +XXX,XX @@
260
"AArch64 EL%d PC 0x%" PRIx64 "\n",
101
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
261
cur_el, new_el, env->pc);
102
/* currently all emulated v5 cores are also v5TE, so don't bother */
262
}
103
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
263
+ aarch64_sve_change_el(env, cur_el, new_el);
104
-#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
264
105
+#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
265
qemu_mutex_lock_iothread();
106
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
266
arm_call_el_change_hook(arm_env_get_cpu(env));
107
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
108
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
267
--
109
--
268
2.19.0
110
2.19.1
269
111
270
112
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Given that the only field defined for this new register may only
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
be 0, we don't actually need to change anything except the name.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
5
Message-id: 20181016223115.24100-7-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
8
---
12
target/arm/helper.c | 3 ++-
9
target/arm/cpu.h | 6 +++++-
13
1 file changed, 2 insertions(+), 1 deletion(-)
10
linux-user/elfload.c | 2 +-
11
target/arm/cpu.c | 4 ----
12
target/arm/helper.c | 2 +-
13
target/arm/machine.c | 3 +--
14
5 files changed, 8 insertions(+), 9 deletions(-)
14
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ enum arm_features {
21
ARM_FEATURE_NEON,
22
ARM_FEATURE_M, /* Microcontroller profile. */
23
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
24
- ARM_FEATURE_THUMB2EE,
25
ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
26
ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
27
ARM_FEATURE_V4T,
28
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_jazelle(const ARMISARegisters *id)
29
return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
30
}
31
32
+static inline bool isar_feature_t32ee(const ARMISARegisters *id)
33
+{
34
+ return FIELD_EX32(id->id_isar3, ID_ISAR3, T32EE) != 0;
35
+}
36
+
37
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
38
{
39
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
40
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/linux-user/elfload.c
43
+++ b/linux-user/elfload.c
44
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
45
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
46
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
47
GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
48
- GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
49
+ GET_FEATURE_ID(t32ee, ARM_HWCAP_ARM_THUMBEE);
50
GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
51
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
52
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
53
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/cpu.c
56
+++ b/target/arm/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
58
set_feature(&cpu->env, ARM_FEATURE_V7);
59
set_feature(&cpu->env, ARM_FEATURE_VFP3);
60
set_feature(&cpu->env, ARM_FEATURE_NEON);
61
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
62
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
63
set_feature(&cpu->env, ARM_FEATURE_EL3);
64
cpu->midr = 0x410fc080;
65
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
66
set_feature(&cpu->env, ARM_FEATURE_VFP3);
67
set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
68
set_feature(&cpu->env, ARM_FEATURE_NEON);
69
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
70
set_feature(&cpu->env, ARM_FEATURE_EL3);
71
/* Note that A9 supports the MP extensions even for
72
* A9UP and single-core A9MP (which are both different
73
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
74
set_feature(&cpu->env, ARM_FEATURE_V7VE);
75
set_feature(&cpu->env, ARM_FEATURE_VFP4);
76
set_feature(&cpu->env, ARM_FEATURE_NEON);
77
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
78
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
79
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
80
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
81
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
82
set_feature(&cpu->env, ARM_FEATURE_V7VE);
83
set_feature(&cpu->env, ARM_FEATURE_VFP4);
84
set_feature(&cpu->env, ARM_FEATURE_NEON);
85
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
86
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
87
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
88
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
89
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
90
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
91
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
92
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
93
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
20
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
94
define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
21
.access = PL1_R, .type = ARM_CP_CONST,
95
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
22
.resetvalue = 0 },
96
}
23
- { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
97
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
24
+ { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
98
+ if (cpu_isar_feature(t32ee, cpu)) {
25
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
99
define_arm_cp_regs(cpu, t2ee_cp_reginfo);
26
.access = PL1_R, .type = ARM_CP_CONST,
100
}
27
+ /* At present, only SVEver == 0 is defined anyway. */
101
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
28
.resetvalue = 0 },
102
diff --git a/target/arm/machine.c b/target/arm/machine.c
29
{ .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
103
index XXXXXXX..XXXXXXX 100644
30
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
104
--- a/target/arm/machine.c
105
+++ b/target/arm/machine.c
106
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m = {
107
static bool thumb2ee_needed(void *opaque)
108
{
109
ARMCPU *cpu = opaque;
110
- CPUARMState *env = &cpu->env;
111
112
- return arm_feature(env, ARM_FEATURE_THUMB2EE);
113
+ return cpu_isar_feature(t32ee, cpu);
114
}
115
116
static const VMStateDescription vmstate_thumb2ee = {
31
--
117
--
32
2.19.0
118
2.19.1
33
119
34
120
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Use the existing helpers to determine if (1) the fpu is enabled,
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
(2) sve state is enabled, and (3) the current sve vector length.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
5
Message-id: 20181016223115.24100-8-richard.henderson@linaro.org
6
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-6-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
8
---
12
target/arm/cpu.h | 4 ++++
9
target/arm/cpu.h | 16 +++++++++++++++-
13
target/arm/helper.c | 6 +++---
10
linux-user/aarch64/signal.c | 4 ++--
14
target/arm/translate-a64.c | 8 ++++++--
11
linux-user/elfload.c | 2 +-
15
3 files changed, 13 insertions(+), 5 deletions(-)
12
linux-user/syscall.c | 10 ++++++----
13
target/arm/cpu64.c | 5 ++++-
14
target/arm/helper.c | 9 ++++++---
15
target/arm/machine.c | 3 +--
16
target/arm/translate-a64.c | 4 ++--
17
8 files changed, 37 insertions(+), 16 deletions(-)
16
18
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ target_ulong do_arm_semihosting(CPUARMState *env);
23
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
22
void aarch64_sync_32_to_64(CPUARMState *env);
24
FIELD(ID_AA64ISAR1, SB, 36, 4)
23
void aarch64_sync_64_to_32(CPUARMState *env);
25
FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
24
26
25
+int fp_exception_el(CPUARMState *env, int cur_el);
27
+FIELD(ID_AA64PFR0, EL0, 0, 4)
26
+int sve_exception_el(CPUARMState *env, int cur_el);
28
+FIELD(ID_AA64PFR0, EL1, 4, 4)
27
+uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
29
+FIELD(ID_AA64PFR0, EL2, 8, 4)
28
+
30
+FIELD(ID_AA64PFR0, EL3, 12, 4)
29
static inline bool is_a64(CPUARMState *env)
31
+FIELD(ID_AA64PFR0, FP, 16, 4)
30
{
32
+FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
31
return env->aarch64;
33
+FIELD(ID_AA64PFR0, GIC, 24, 4)
34
+FIELD(ID_AA64PFR0, RAS, 28, 4)
35
+FIELD(ID_AA64PFR0, SVE, 32, 4)
36
+
37
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
38
39
/* If adding a feature bit which corresponds to a Linux ELF
40
@@ -XXX,XX +XXX,XX @@ enum arm_features {
41
ARM_FEATURE_PMU, /* has PMU support */
42
ARM_FEATURE_VBAR, /* has cp15 VBAR */
43
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
44
- ARM_FEATURE_SVE, /* has Scalable Vector Extension */
45
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
46
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
47
};
48
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
49
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
50
}
51
52
+static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
53
+{
54
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
55
+}
56
+
57
/*
58
* Forward to the above feature tests given an ARMCPU pointer.
59
*/
60
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/linux-user/aarch64/signal.c
63
+++ b/linux-user/aarch64/signal.c
64
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
65
break;
66
67
case TARGET_SVE_MAGIC:
68
- if (arm_feature(env, ARM_FEATURE_SVE)) {
69
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
70
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
71
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
72
if (!sve && size == sve_size) {
73
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
74
&layout);
75
76
/* SVE state needs saving only if it exists. */
77
- if (arm_feature(env, ARM_FEATURE_SVE)) {
78
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
79
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
80
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
81
sve_ofs = alloc_sigframe_space(sve_size, &layout);
82
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/linux-user/elfload.c
85
+++ b/linux-user/elfload.c
86
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
87
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
88
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
89
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
90
- GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
91
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
92
93
#undef GET_FEATURE
94
#undef GET_FEATURE_ID
95
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/linux-user/syscall.c
98
+++ b/linux-user/syscall.c
99
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
100
* even though the current architectural maximum is VQ=16.
101
*/
102
ret = -TARGET_EINVAL;
103
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)
104
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
105
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
106
CPUARMState *env = cpu_env;
107
ARMCPU *cpu = arm_env_get_cpu(env);
108
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
109
return ret;
110
case TARGET_PR_SVE_GET_VL:
111
ret = -TARGET_EINVAL;
112
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
113
- CPUARMState *env = cpu_env;
114
- ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
115
+ {
116
+ ARMCPU *cpu = arm_env_get_cpu(cpu_env);
117
+ if (cpu_isar_feature(aa64_sve, cpu)) {
118
+ ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
119
+ }
120
}
121
return ret;
122
#endif /* AARCH64 */
123
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/arm/cpu64.c
126
+++ b/target/arm/cpu64.c
127
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
128
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
129
cpu->isar.id_aa64isar1 = t;
130
131
+ t = cpu->isar.id_aa64pfr0;
132
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
133
+ cpu->isar.id_aa64pfr0 = t;
134
+
135
/* Replicate the same data to the 32-bit id registers. */
136
u = cpu->isar.id_isar5;
137
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
138
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
139
* present in either.
140
*/
141
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
142
- set_feature(&cpu->env, ARM_FEATURE_SVE);
143
/* For usermode -cpu max we can use a larger and more efficient DCZ
144
* blocksize since we don't have to follow what the hardware does.
145
*/
32
diff --git a/target/arm/helper.c b/target/arm/helper.c
146
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
index XXXXXXX..XXXXXXX 100644
147
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/helper.c
148
--- a/target/arm/helper.c
35
+++ b/target/arm/helper.c
149
+++ b/target/arm/helper.c
36
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
150
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
37
* take care of raising that exception.
151
define_one_arm_cp_reg(cpu, &sctlr);
38
* C.f. the ARM pseudocode function CheckSVEEnabled.
152
}
39
*/
153
40
-static int sve_exception_el(CPUARMState *env, int el)
154
- if (arm_feature(env, ARM_FEATURE_SVE)) {
41
+int sve_exception_el(CPUARMState *env, int el)
155
+ if (cpu_isar_feature(aa64_sve, cpu)) {
156
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
157
if (arm_feature(env, ARM_FEATURE_EL2)) {
158
define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
159
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
160
uint32_t flags;
161
162
if (is_a64(env)) {
163
+ ARMCPU *cpu = arm_env_get_cpu(env);
164
+
165
*pc = env->pc;
166
flags = ARM_TBFLAG_AARCH64_STATE_MASK;
167
/* Get control bits for tagged addresses */
168
flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
169
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
170
171
- if (arm_feature(env, ARM_FEATURE_SVE)) {
172
+ if (cpu_isar_feature(aa64_sve, cpu)) {
173
int sve_el = sve_exception_el(env, current_el);
174
uint32_t zcr_len;
175
176
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
177
void aarch64_sve_change_el(CPUARMState *env, int old_el,
178
int new_el, bool el0_a64)
42
{
179
{
43
#ifndef CONFIG_USER_ONLY
180
+ ARMCPU *cpu = arm_env_get_cpu(env);
44
if (el <= 1) {
181
int old_len, new_len;
45
@@ -XXX,XX +XXX,XX @@ static int sve_exception_el(CPUARMState *env, int el)
182
bool old_a64, new_a64;
46
/*
183
47
* Given that SVE is enabled, return the vector length for EL.
184
/* Nothing to do if no SVE. */
48
*/
185
- if (!arm_feature(env, ARM_FEATURE_SVE)) {
49
-static uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
186
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
50
+uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
187
return;
188
}
189
190
diff --git a/target/arm/machine.c b/target/arm/machine.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/target/arm/machine.c
193
+++ b/target/arm/machine.c
194
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_iwmmxt = {
195
static bool sve_needed(void *opaque)
51
{
196
{
52
ARMCPU *cpu = arm_env_get_cpu(env);
197
ARMCPU *cpu = opaque;
53
uint32_t zcr_len = cpu->sve_max_vq - 1;
198
- CPUARMState *env = &cpu->env;
54
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
199
55
/* Return the exception level to which FP-disabled exceptions should
200
- return arm_feature(env, ARM_FEATURE_SVE);
56
* be taken, or 0 if FP is enabled.
201
+ return cpu_isar_feature(aa64_sve, cpu);
57
*/
202
}
58
-static int fp_exception_el(CPUARMState *env, int cur_el)
203
59
+int fp_exception_el(CPUARMState *env, int cur_el)
204
/* The first two words of each Zreg is stored in VFP state. */
60
{
61
#ifndef CONFIG_USER_ONLY
62
int fpen;
63
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
205
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
64
index XXXXXXX..XXXXXXX 100644
206
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-a64.c
207
--- a/target/arm/translate-a64.c
66
+++ b/target/arm/translate-a64.c
208
+++ b/target/arm/translate-a64.c
67
@@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
209
@@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
68
cpu_fprintf(f, "\n");
69
return;
70
}
71
+ if (fp_exception_el(env, el) != 0) {
72
+ cpu_fprintf(f, " FPU disabled\n");
73
+ return;
74
+ }
75
cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
210
cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
76
vfp_get_fpcr(env), vfp_get_fpsr(env));
211
vfp_get_fpcr(env), vfp_get_fpsr(env));
77
212
78
- if (arm_feature(env, ARM_FEATURE_SVE)) {
213
- if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
79
- int j, zcr_len = env->vfp.zcr_el[1] & 0xf; /* fix for system mode */
214
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
80
+ if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
215
int j, zcr_len = sve_zcr_len_for_el(env, el);
81
+ int j, zcr_len = sve_zcr_len_for_el(env, el);
82
216
83
for (i = 0; i <= FFR_PRED_NUM; i++) {
217
for (i = 0; i <= FFR_PRED_NUM; i++) {
84
bool eol;
218
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
219
unallocated_encoding(s);
220
break;
221
case 0x2:
222
- if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
223
+ if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
224
unallocated_encoding(s);
225
}
226
break;
85
--
227
--
86
2.19.0
228
2.19.1
87
229
88
230
diff view generated by jsdifflib
1
Define EXCP_STKOF, and arrange for it to cause us to take
1
From: Richard Henderson <richard.henderson@linaro.org>
2
a UsageFault with CFSR.STKOF set.
3
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20181016223115.24100-9-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181002163556.10279-3-peter.maydell@linaro.org
8
---
8
---
9
target/arm/cpu.h | 2 ++
9
target/arm/cpu.h | 17 +++++++++++++++-
10
target/arm/helper.c | 5 +++++
10
linux-user/elfload.c | 6 +-----
11
2 files changed, 7 insertions(+)
11
target/arm/cpu64.c | 16 ++++++++-------
12
target/arm/helper.c | 2 +-
13
target/arm/translate-a64.c | 40 +++++++++++++++++++-------------------
14
target/arm/translate.c | 6 +++---
15
6 files changed, 50 insertions(+), 37 deletions(-)
12
16
13
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.h
19
--- a/target/arm/cpu.h
16
+++ b/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
17
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ enum arm_features {
18
#define EXCP_SEMIHOST 16 /* semihosting call */
22
ARM_FEATURE_PMU, /* has PMU support */
19
#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
23
ARM_FEATURE_VBAR, /* has cp15 VBAR */
20
#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
24
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
21
+#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
25
- ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
22
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
26
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
23
27
};
24
#define ARMV7M_EXCP_RESET 1
28
25
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
29
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
26
FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
30
return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
27
FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
31
}
28
FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
32
29
+FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
33
+static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
30
FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
34
+{
31
FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
35
+ /*
32
36
+ * This is a placeholder for use by VCMA until the rest of
37
+ * the ARMv8.2-FP16 extension is implemented for aa32 mode.
38
+ * At which point we can properly set and check MVFR1.FPHP.
39
+ */
40
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
41
+}
42
+
43
/*
44
* 64-bit feature tests via id registers.
45
*/
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
47
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
48
}
49
50
+static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
51
+{
52
+ /* We always set the AdvSIMD and FP fields identically wrt FP16. */
53
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
54
+}
55
+
56
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
57
{
58
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
59
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/linux-user/elfload.c
62
+++ b/linux-user/elfload.c
63
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
64
hwcaps |= ARM_HWCAP_A64_ASIMD;
65
66
/* probe for the extra features */
67
-#define GET_FEATURE(feat, hwcap) \
68
- do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
69
#define GET_FEATURE_ID(feat, hwcap) \
70
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
71
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
73
GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
74
GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
75
GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
76
- GET_FEATURE(ARM_FEATURE_V8_FP16,
77
- ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
78
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
79
GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
80
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
81
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
82
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
83
GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
84
85
-#undef GET_FEATURE
86
#undef GET_FEATURE_ID
87
88
return hwcaps;
89
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/cpu64.c
92
+++ b/target/arm/cpu64.c
93
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
94
95
t = cpu->isar.id_aa64pfr0;
96
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
97
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
98
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
99
cpu->isar.id_aa64pfr0 = t;
100
101
/* Replicate the same data to the 32-bit id registers. */
102
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
103
u = FIELD_DP32(u, ID_ISAR6, DP, 1);
104
cpu->isar.id_isar6 = u;
105
106
-#ifdef CONFIG_USER_ONLY
107
- /* We don't set these in system emulation mode for the moment,
108
- * since we don't correctly set the ID registers to advertise them,
109
- * and in some cases they're only available in AArch64 and not AArch32,
110
- * whereas the architecture requires them to be present in both if
111
- * present in either.
112
+ /*
113
+ * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
114
+ * so do not set MVFR1.FPHP. Strictly speaking this is not legal,
115
+ * but it is also not legal to enable SVE without support for FP16,
116
+ * and enabling SVE in system mode is more useful in the short term.
117
*/
118
- set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
119
+
120
+#ifdef CONFIG_USER_ONLY
121
/* For usermode -cpu max we can use a larger and more efficient DCZ
122
* blocksize since we don't have to follow what the hardware does.
123
*/
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
124
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
index XXXXXXX..XXXXXXX 100644
125
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/helper.c
126
--- a/target/arm/helper.c
36
+++ b/target/arm/helper.c
127
+++ b/target/arm/helper.c
37
@@ -XXX,XX +XXX,XX @@ static void arm_log_exception(int idx)
128
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
38
[EXCP_SEMIHOST] = "Semihosting call",
129
uint32_t changed;
39
[EXCP_NOCP] = "v7M NOCP UsageFault",
130
40
[EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
131
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
41
+ [EXCP_STKOF] = "v8M STKOF UsageFault",
132
- if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
42
};
133
+ if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
43
134
val &= ~FPCR_FZ16;
44
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
135
}
45
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
136
46
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
137
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
138
index XXXXXXX..XXXXXXX 100644
48
break;
139
--- a/target/arm/translate-a64.c
49
+ case EXCP_STKOF:
140
+++ b/target/arm/translate-a64.c
50
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
141
@@ -XXX,XX +XXX,XX @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
51
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
142
break;
52
+ break;
143
case 3:
53
case EXCP_SWI:
144
size = MO_16;
54
/* The PC already points to the next instruction. */
145
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
55
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
146
+ if (dc_isar_feature(aa64_fp16, s)) {
147
break;
148
}
149
/* fallthru */
150
@@ -XXX,XX +XXX,XX @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
151
break;
152
case 3:
153
size = MO_16;
154
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
155
+ if (dc_isar_feature(aa64_fp16, s)) {
156
break;
157
}
158
/* fallthru */
159
@@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
160
break;
161
case 3:
162
sz = MO_16;
163
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
164
+ if (dc_isar_feature(aa64_fp16, s)) {
165
break;
166
}
167
/* fallthru */
168
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
169
handle_fp_1src_double(s, opcode, rd, rn);
170
break;
171
case 3:
172
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
173
+ if (!dc_isar_feature(aa64_fp16, s)) {
174
unallocated_encoding(s);
175
return;
176
}
177
@@ -XXX,XX +XXX,XX @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
178
handle_fp_2src_double(s, opcode, rd, rn, rm);
179
break;
180
case 3:
181
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
182
+ if (!dc_isar_feature(aa64_fp16, s)) {
183
unallocated_encoding(s);
184
return;
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
187
handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
188
break;
189
case 3:
190
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
191
+ if (!dc_isar_feature(aa64_fp16, s)) {
192
unallocated_encoding(s);
193
return;
194
}
195
@@ -XXX,XX +XXX,XX @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
196
break;
197
case 3:
198
sz = MO_16;
199
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
200
+ if (dc_isar_feature(aa64_fp16, s)) {
201
break;
202
}
203
/* fallthru */
204
@@ -XXX,XX +XXX,XX @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
205
case 1: /* float64 */
206
break;
207
case 3: /* float16 */
208
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
209
+ if (dc_isar_feature(aa64_fp16, s)) {
210
break;
211
}
212
/* fallthru */
213
@@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
214
break;
215
case 0x6: /* 16-bit float, 32-bit int */
216
case 0xe: /* 16-bit float, 64-bit int */
217
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
218
+ if (dc_isar_feature(aa64_fp16, s)) {
219
break;
220
}
221
/* fallthru */
222
@@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
223
case 1: /* float64 */
224
break;
225
case 3: /* float16 */
226
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
227
+ if (dc_isar_feature(aa64_fp16, s)) {
228
break;
229
}
230
/* fallthru */
231
@@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
232
*/
233
is_min = extract32(size, 1, 1);
234
is_fp = true;
235
- if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
236
+ if (!is_u && dc_isar_feature(aa64_fp16, s)) {
237
size = 1;
238
} else if (!is_u || !is_q || extract32(size, 0, 1)) {
239
unallocated_encoding(s);
240
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
241
242
if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
243
/* Check for FMOV (vector, immediate) - half-precision */
244
- if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
245
+ if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
246
unallocated_encoding(s);
247
return;
248
}
249
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
250
case 0x2f: /* FMINP */
251
/* FP op, size[0] is 32 or 64 bit*/
252
if (!u) {
253
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
254
+ if (!dc_isar_feature(aa64_fp16, s)) {
255
unallocated_encoding(s);
256
return;
257
} else {
258
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
259
size = MO_32;
260
} else if (immh & 2) {
261
size = MO_16;
262
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
263
+ if (!dc_isar_feature(aa64_fp16, s)) {
264
unallocated_encoding(s);
265
return;
266
}
267
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
268
size = MO_32;
269
} else if (immh & 0x2) {
270
size = MO_16;
271
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
272
+ if (!dc_isar_feature(aa64_fp16, s)) {
273
unallocated_encoding(s);
274
return;
275
}
276
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
277
return;
278
}
279
280
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
281
+ if (!dc_isar_feature(aa64_fp16, s)) {
282
unallocated_encoding(s);
283
}
284
285
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
286
TCGv_ptr fpst;
287
bool pairwise = false;
288
289
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
290
+ if (!dc_isar_feature(aa64_fp16, s)) {
291
unallocated_encoding(s);
292
return;
293
}
294
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
295
case 0x1c: /* FCADD, #90 */
296
case 0x1e: /* FCADD, #270 */
297
if (size == 0
298
- || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
299
+ || (size == 1 && !dc_isar_feature(aa64_fp16, s))
300
|| (size == 3 && !is_q)) {
301
unallocated_encoding(s);
302
return;
303
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
304
bool need_fpst = true;
305
int rmode;
306
307
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
308
+ if (!dc_isar_feature(aa64_fp16, s)) {
309
unallocated_encoding(s);
310
return;
311
}
312
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
313
}
314
break;
315
}
316
- if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
317
+ if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
318
unallocated_encoding(s);
319
return;
320
}
321
diff --git a/target/arm/translate.c b/target/arm/translate.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/arm/translate.c
324
+++ b/target/arm/translate.c
325
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
326
int size = extract32(insn, 20, 1);
327
data = extract32(insn, 23, 2); /* rot */
328
if (!dc_isar_feature(aa32_vcma, s)
329
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
330
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
331
return 1;
332
}
333
fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
334
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
335
int size = extract32(insn, 20, 1);
336
data = extract32(insn, 24, 1); /* rot */
337
if (!dc_isar_feature(aa32_vcma, s)
338
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
339
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
340
return 1;
341
}
342
fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
343
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
344
return 1;
345
}
346
if (size == 0) {
347
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
348
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
349
return 1;
350
}
351
/* For fp16, rm is just Vm, and index is M. */
56
--
352
--
57
2.19.0
353
2.19.1
58
354
59
355
diff view generated by jsdifflib
1
Add code to insert calls to a helper function to do the stack
1
For AArch32, exception return happens through certain kinds
2
limit checking when we handle these forms of instruction
2
of CPSR write. We don't currently have any CPU_LOG_INT logging
3
that write to SP:
3
of these events (unlike AArch64, where we log in the ERET
4
* ADD (SP plus immediate)
4
instruction). Add some suitable logging.
5
* ADD (SP plus register)
5
6
* SUB (SP minus immediate)
6
This will log exception returns like this:
7
* SUB (SP minus register)
7
Exception return from AArch32 hyp to usr PC 0x80100374
8
* MOV (register)
8
9
paralleling the existing logging in the exception_return
10
helper for AArch64 exception returns:
11
Exception return from AArch64 EL2 to AArch64 EL0 PC 0x8003045c
12
Exception return from AArch64 EL2 to AArch32 EL0 PC 0x8003045c
13
14
(Note that an AArch32 exception return can only be
15
AArch32->AArch32, never to AArch64.)
9
16
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181002163556.10279-5-peter.maydell@linaro.org
19
Message-id: 20181012144235.19646-2-peter.maydell@linaro.org
13
---
20
---
14
target/arm/helper.h | 2 ++
21
target/arm/internals.h | 18 ++++++++++++++++++
15
target/arm/internals.h | 14 ++++++++
22
target/arm/helper.c | 10 ++++++++++
16
target/arm/op_helper.c | 19 ++++++++++
23
target/arm/translate.c | 7 +------
17
target/arm/translate.c | 80 +++++++++++++++++++++++++++++++++++++-----
24
3 files changed, 29 insertions(+), 6 deletions(-)
18
4 files changed, 106 insertions(+), 9 deletions(-)
19
25
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
23
+++ b/target/arm/helper.h
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(v7m_blxns, void, env, i32)
25
26
DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
27
28
+DEF_HELPER_2(v8m_stackcheck, void, env, i32)
29
+
30
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
31
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
32
DEF_HELPER_2(get_cp_reg, i32, env, ptr)
33
diff --git a/target/arm/internals.h b/target/arm/internals.h
26
diff --git a/target/arm/internals.h b/target/arm/internals.h
34
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/internals.h
28
--- a/target/arm/internals.h
36
+++ b/target/arm/internals.h
29
+++ b/target/arm/internals.h
37
@@ -XXX,XX +XXX,XX @@ static inline bool v7m_using_psp(CPUARMState *env)
30
@@ -XXX,XX +XXX,XX @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
38
env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
31
}
39
}
32
}
40
33
41
+/**
34
+/**
42
+ * v7m_sp_limit: Return SP limit for current CPU state
35
+ * aarch32_mode_name(): Return name of the AArch32 CPU mode
43
+ * Return the SP limit value for the current CPU security state
36
+ * @psr: Program Status Register indicating CPU mode
44
+ * and stack pointer.
37
+ *
38
+ * Returns, for debug logging purposes, a printable representation
39
+ * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
40
+ * the low bits of the specified PSR.
45
+ */
41
+ */
46
+static inline uint32_t v7m_sp_limit(CPUARMState *env)
42
+static inline const char *aarch32_mode_name(uint32_t psr)
47
+{
43
+{
48
+ if (v7m_using_psp(env)) {
44
+ static const char cpu_mode_names[16][4] = {
49
+ return env->v7m.psplim[env->v7m.secure];
45
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
50
+ } else {
46
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
51
+ return env->v7m.msplim[env->v7m.secure];
47
+ };
52
+ }
48
+
49
+ return cpu_mode_names[psr & 0xf];
53
+}
50
+}
54
+
51
+
55
#endif
52
#endif
56
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
53
diff --git a/target/arm/helper.c b/target/arm/helper.c
57
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/op_helper.c
55
--- a/target/arm/helper.c
59
+++ b/target/arm/op_helper.c
56
+++ b/target/arm/helper.c
60
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
57
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
61
58
mask |= CPSR_IL;
62
#endif /* !defined(CONFIG_USER_ONLY) */
59
val |= CPSR_IL;
63
60
}
64
+void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
61
+ qemu_log_mask(LOG_GUEST_ERROR,
65
+{
62
+ "Illegal AArch32 mode switch attempt from %s to %s\n",
66
+ /*
63
+ aarch32_mode_name(env->uncached_cpsr),
67
+ * Perform the v8M stack limit check for SP updates from translated code,
64
+ aarch32_mode_name(val));
68
+ * raising an exception if the limit is breached.
65
} else {
69
+ */
66
+ qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
70
+ if (newvalue < v7m_sp_limit(env)) {
67
+ write_type == CPSRWriteExceptionReturn ?
71
+ CPUState *cs = CPU(arm_env_get_cpu(env));
68
+ "Exception return from AArch32" :
72
+
69
+ "AArch32 mode switch from",
73
+ /*
70
+ aarch32_mode_name(env->uncached_cpsr),
74
+ * Stack limit exceptions are a rare case, so rather than syncing
71
+ aarch32_mode_name(val), env->regs[15]);
75
+ * PC/condbits before the call, we use cpu_restore_state() to
72
switch_mode(env, val & CPSR_M);
76
+ * get them right before raising the exception.
73
}
77
+ */
74
}
78
+ cpu_restore_state(cs, GETPC(), true);
79
+ raise_exception(env, EXCP_STKOF, 0, 1);
80
+ }
81
+}
82
+
83
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
84
{
85
uint32_t res = a + b;
86
diff --git a/target/arm/translate.c b/target/arm/translate.c
75
diff --git a/target/arm/translate.c b/target/arm/translate.c
87
index XXXXXXX..XXXXXXX 100644
76
index XXXXXXX..XXXXXXX 100644
88
--- a/target/arm/translate.c
77
--- a/target/arm/translate.c
89
+++ b/target/arm/translate.c
78
+++ b/target/arm/translate.c
90
@@ -XXX,XX +XXX,XX @@ static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
79
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
91
tcg_temp_free_i32(var);
80
translator_loop(ops, &dc.base, cpu, tb);
92
}
81
}
93
82
94
+/*
83
-static const char *cpu_mode_names[16] = {
95
+ * Variant of store_reg which applies v8M stack-limit checks before updating
84
- "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
96
+ * SP. If the check fails this will result in an exception being taken.
85
- "???", "???", "hyp", "und", "???", "???", "???", "sys"
97
+ * We disable the stack checks for CONFIG_USER_ONLY because we have
86
-};
98
+ * no idea what the stack limits should be in that case.
87
-
99
+ * If stack checking is not being done this just acts like store_reg().
88
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
100
+ */
89
int flags)
101
+static void store_sp_checked(DisasContext *s, TCGv_i32 var)
90
{
102
+{
91
@@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
103
+#ifndef CONFIG_USER_ONLY
92
psr & CPSR_V ? 'V' : '-',
104
+ if (s->v8m_stackcheck) {
93
psr & CPSR_T ? 'T' : 'A',
105
+ gen_helper_v8m_stackcheck(cpu_env, var);
94
ns_status,
106
+ }
95
- cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
107
+#endif
96
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
108
+ store_reg(s, 13, var);
97
}
109
+}
98
110
+
99
if (flags & CPU_DUMP_FPU) {
111
/* Value extensions. */
112
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
113
#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
114
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
115
if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
116
goto illegal_op;
117
tcg_temp_free_i32(tmp2);
118
- if (rd != 15) {
119
+ if (rd == 13 &&
120
+ ((op == 2 && rn == 15) ||
121
+ (op == 8 && rn == 13) ||
122
+ (op == 13 && rn == 13))) {
123
+ /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
124
+ store_sp_checked(s, tmp);
125
+ } else if (rd != 15) {
126
store_reg(s, rd, tmp);
127
} else {
128
tcg_temp_free_i32(tmp);
129
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
130
gen_jmp(s, s->pc + offset);
131
}
132
} else {
133
- /* Data processing immediate. */
134
+ /*
135
+ * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
136
+ * - Data-processing (modified immediate, plain binary immediate)
137
+ */
138
if (insn & (1 << 25)) {
139
+ /*
140
+ * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
141
+ * - Data-processing (plain binary immediate)
142
+ */
143
if (insn & (1 << 24)) {
144
if (insn & (1 << 20))
145
goto illegal_op;
146
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
147
tmp = tcg_temp_new_i32();
148
tcg_gen_movi_i32(tmp, imm);
149
}
150
+ store_reg(s, rd, tmp);
151
} else {
152
/* Add/sub 12-bit immediate. */
153
if (rn == 15) {
154
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
155
offset += imm;
156
tmp = tcg_temp_new_i32();
157
tcg_gen_movi_i32(tmp, offset);
158
+ store_reg(s, rd, tmp);
159
} else {
160
tmp = load_reg(s, rn);
161
if (insn & (1 << 23))
162
tcg_gen_subi_i32(tmp, tmp, imm);
163
else
164
tcg_gen_addi_i32(tmp, tmp, imm);
165
+ if (rn == 13 && rd == 13) {
166
+ /* ADD SP, SP, imm or SUB SP, SP, imm */
167
+ store_sp_checked(s, tmp);
168
+ } else {
169
+ store_reg(s, rd, tmp);
170
+ }
171
}
172
}
173
- store_reg(s, rd, tmp);
174
}
175
} else {
176
+ /*
177
+ * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
178
+ * - Data-processing (modified immediate)
179
+ */
180
int shifter_out = 0;
181
/* modified 12-bit immediate. */
182
shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
183
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
184
goto illegal_op;
185
tcg_temp_free_i32(tmp2);
186
rd = (insn >> 8) & 0xf;
187
- if (rd != 15) {
188
+ if (rd == 13 && rn == 13
189
+ && (op == 8 || op == 13)) {
190
+ /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
191
+ store_sp_checked(s, tmp);
192
+ } else if (rd != 15) {
193
store_reg(s, rd, tmp);
194
} else {
195
tcg_temp_free_i32(tmp);
196
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
197
tmp2 = load_reg(s, rm);
198
tcg_gen_add_i32(tmp, tmp, tmp2);
199
tcg_temp_free_i32(tmp2);
200
- store_reg(s, rd, tmp);
201
+ if (rd == 13) {
202
+ /* ADD SP, SP, reg */
203
+ store_sp_checked(s, tmp);
204
+ } else {
205
+ store_reg(s, rd, tmp);
206
+ }
207
break;
208
case 1: /* cmp */
209
tmp = load_reg(s, rd);
210
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
211
break;
212
case 2: /* mov/cpy */
213
tmp = load_reg(s, rm);
214
- store_reg(s, rd, tmp);
215
+ if (rd == 13) {
216
+ /* MOV SP, reg */
217
+ store_sp_checked(s, tmp);
218
+ } else {
219
+ store_reg(s, rd, tmp);
220
+ }
221
break;
222
case 3:
223
{
224
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
225
break;
226
227
case 10:
228
- /* add to high reg */
229
+ /*
230
+ * 0b1010_xxxx_xxxx_xxxx
231
+ * - Add PC/SP (immediate)
232
+ */
233
rd = (insn >> 8) & 7;
234
if (insn & (1 << 11)) {
235
/* SP */
236
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
237
op = (insn >> 8) & 0xf;
238
switch (op) {
239
case 0:
240
- /* adjust stack pointer */
241
+ /*
242
+ * 0b1011_0000_xxxx_xxxx
243
+ * - ADD (SP plus immediate)
244
+ * - SUB (SP minus immediate)
245
+ */
246
tmp = load_reg(s, 13);
247
val = (insn & 0x7f) * 4;
248
if (insn & (1 << 7))
249
val = -(int32_t)val;
250
tcg_gen_addi_i32(tmp, tmp, val);
251
- store_reg(s, 13, tmp);
252
+ store_sp_checked(s, tmp);
253
break;
254
255
case 2: /* sign/zero extend. */
256
--
100
--
257
2.19.0
101
2.19.1
258
102
259
103
diff view generated by jsdifflib
1
We're going to want v7m_using_psp() in op_helper.c in the
1
The switch_mode() function is defined in target/arm/helper.c and used
2
next patch, so move it from helper.c to internals.h.
2
only in that file and nowhere else, so we can make it file-local
3
rather than global.
3
4
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181002163556.10279-4-peter.maydell@linaro.org
7
Message-id: 20181012144235.19646-3-peter.maydell@linaro.org
8
---
8
---
9
target/arm/internals.h | 16 ++++++++++++++++
9
target/arm/internals.h | 1 -
10
target/arm/helper.c | 12 ------------
10
target/arm/helper.c | 6 ++++--
11
2 files changed, 16 insertions(+), 12 deletions(-)
11
2 files changed, 4 insertions(+), 3 deletions(-)
12
12
13
diff --git a/target/arm/internals.h b/target/arm/internals.h
13
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/internals.h
15
--- a/target/arm/internals.h
16
+++ b/target/arm/internals.h
16
+++ b/target/arm/internals.h
17
@@ -XXX,XX +XXX,XX @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
17
@@ -XXX,XX +XXX,XX @@ static inline int bank_number(int mode)
18
*/
18
g_assert_not_reached();
19
#define MEMOPIDX_SHIFT 8
19
}
20
20
21
+/**
21
-void switch_mode(CPUARMState *, int);
22
+ * v7m_using_psp: Return true if using process stack pointer
22
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
23
+ * Return true if the CPU is currently using the process stack
23
void arm_translate_init(void);
24
+ * pointer, or false if it is using the main stack pointer.
24
25
+ */
26
+static inline bool v7m_using_psp(CPUARMState *env)
27
+{
28
+ /* Handler mode always uses the main stack; for thread mode
29
+ * the CONTROL.SPSEL bit determines the answer.
30
+ * Note that in v7M it is not possible to be in Handler mode with
31
+ * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
32
+ */
33
+ return !arm_v7m_is_handler_mode(env) &&
34
+ env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
35
+}
36
+
37
#endif
38
diff --git a/target/arm/helper.c b/target/arm/helper.c
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
39
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/helper.c
27
--- a/target/arm/helper.c
41
+++ b/target/arm/helper.c
28
+++ b/target/arm/helper.c
42
@@ -XXX,XX +XXX,XX @@ pend_fault:
29
@@ -XXX,XX +XXX,XX @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
43
return false;
30
V8M_SAttributes *sattrs);
31
#endif
32
33
+static void switch_mode(CPUARMState *env, int mode);
34
+
35
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
36
{
37
int nregs;
38
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
39
return 0;
44
}
40
}
45
41
46
-/* Return true if we're using the process stack pointer (not the MSP) */
42
-void switch_mode(CPUARMState *env, int mode)
47
-static bool v7m_using_psp(CPUARMState *env)
43
+static void switch_mode(CPUARMState *env, int mode)
48
-{
44
{
49
- /* Handler mode always uses the main stack; for thread mode
45
ARMCPU *cpu = arm_env_get_cpu(env);
50
- * the CONTROL.SPSEL bit determines the answer.
46
51
- * Note that in v7M it is not possible to be in Handler mode with
47
@@ -XXX,XX +XXX,XX @@ void aarch64_sync_64_to_32(CPUARMState *env)
52
- * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
48
53
- */
49
#else
54
- return !arm_v7m_is_handler_mode(env) &&
50
55
- env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
51
-void switch_mode(CPUARMState *env, int mode)
56
-}
52
+static void switch_mode(CPUARMState *env, int mode)
57
-
53
{
58
/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
54
int old_mode;
59
* This may change the current stack pointer between Main and Process
55
int i;
60
* stack pointers if it is done for the CONTROL register for the current
61
--
56
--
62
2.19.0
57
2.19.1
63
58
64
59
diff view generated by jsdifflib
New patch
1
1
The HCR.FB virtualization configuration register bit requests that
2
TLB maintenance, branch predictor invalidate-all and icache
3
invalidate-all operations performed in NS EL1 should be upgraded
4
from "local CPU only to "broadcast within Inner Shareable domain".
5
For QEMU we NOP the branch predictor and icache operations, so
6
we only need to upgrade the TLB invalidates:
7
AArch32 TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
8
ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVAL, TLBIMVAAL
9
AArch64 TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
10
TLBI VALE1, TLBI VAALE1
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20181012144235.19646-4-peter.maydell@linaro.org
15
---
16
target/arm/helper.c | 191 +++++++++++++++++++++++++++-----------------
17
1 file changed, 116 insertions(+), 75 deletions(-)
18
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
22
+++ b/target/arm/helper.c
23
@@ -XXX,XX +XXX,XX @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
24
raw_write(env, ri, value);
25
}
26
27
-static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
28
- uint64_t value)
29
-{
30
- /* Invalidate all (TLBIALL) */
31
- ARMCPU *cpu = arm_env_get_cpu(env);
32
-
33
- tlb_flush(CPU(cpu));
34
-}
35
-
36
-static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
37
- uint64_t value)
38
-{
39
- /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
40
- ARMCPU *cpu = arm_env_get_cpu(env);
41
-
42
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
43
-}
44
-
45
-static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
46
- uint64_t value)
47
-{
48
- /* Invalidate by ASID (TLBIASID) */
49
- ARMCPU *cpu = arm_env_get_cpu(env);
50
-
51
- tlb_flush(CPU(cpu));
52
-}
53
-
54
-static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
55
- uint64_t value)
56
-{
57
- /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
58
- ARMCPU *cpu = arm_env_get_cpu(env);
59
-
60
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
61
-}
62
-
63
/* IS variants of TLB operations must affect all cores */
64
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
65
uint64_t value)
66
@@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
68
}
69
70
+/*
71
+ * Non-IS variants of TLB operations are upgraded to
72
+ * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
73
+ * force broadcast of these operations.
74
+ */
75
+static bool tlb_force_broadcast(CPUARMState *env)
76
+{
77
+ return (env->cp15.hcr_el2 & HCR_FB) &&
78
+ arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
79
+}
80
+
81
+static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
82
+ uint64_t value)
83
+{
84
+ /* Invalidate all (TLBIALL) */
85
+ ARMCPU *cpu = arm_env_get_cpu(env);
86
+
87
+ if (tlb_force_broadcast(env)) {
88
+ tlbiall_is_write(env, NULL, value);
89
+ return;
90
+ }
91
+
92
+ tlb_flush(CPU(cpu));
93
+}
94
+
95
+static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
96
+ uint64_t value)
97
+{
98
+ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
99
+ ARMCPU *cpu = arm_env_get_cpu(env);
100
+
101
+ if (tlb_force_broadcast(env)) {
102
+ tlbimva_is_write(env, NULL, value);
103
+ return;
104
+ }
105
+
106
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
107
+}
108
+
109
+static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
110
+ uint64_t value)
111
+{
112
+ /* Invalidate by ASID (TLBIASID) */
113
+ ARMCPU *cpu = arm_env_get_cpu(env);
114
+
115
+ if (tlb_force_broadcast(env)) {
116
+ tlbiasid_is_write(env, NULL, value);
117
+ return;
118
+ }
119
+
120
+ tlb_flush(CPU(cpu));
121
+}
122
+
123
+static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
124
+ uint64_t value)
125
+{
126
+ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
127
+ ARMCPU *cpu = arm_env_get_cpu(env);
128
+
129
+ if (tlb_force_broadcast(env)) {
130
+ tlbimvaa_is_write(env, NULL, value);
131
+ return;
132
+ }
133
+
134
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
135
+}
136
+
137
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
138
uint64_t value)
139
{
140
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
141
* Page D4-1736 (DDI0487A.b)
142
*/
143
144
-static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
145
- uint64_t value)
146
-{
147
- CPUState *cs = ENV_GET_CPU(env);
148
-
149
- if (arm_is_secure_below_el3(env)) {
150
- tlb_flush_by_mmuidx(cs,
151
- ARMMMUIdxBit_S1SE1 |
152
- ARMMMUIdxBit_S1SE0);
153
- } else {
154
- tlb_flush_by_mmuidx(cs,
155
- ARMMMUIdxBit_S12NSE1 |
156
- ARMMMUIdxBit_S12NSE0);
157
- }
158
-}
159
-
160
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
161
uint64_t value)
162
{
163
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
164
}
165
}
166
167
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
168
+ uint64_t value)
169
+{
170
+ CPUState *cs = ENV_GET_CPU(env);
171
+
172
+ if (tlb_force_broadcast(env)) {
173
+ tlbi_aa64_vmalle1_write(env, NULL, value);
174
+ return;
175
+ }
176
+
177
+ if (arm_is_secure_below_el3(env)) {
178
+ tlb_flush_by_mmuidx(cs,
179
+ ARMMMUIdxBit_S1SE1 |
180
+ ARMMMUIdxBit_S1SE0);
181
+ } else {
182
+ tlb_flush_by_mmuidx(cs,
183
+ ARMMMUIdxBit_S12NSE1 |
184
+ ARMMMUIdxBit_S12NSE0);
185
+ }
186
+}
187
+
188
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
189
uint64_t value)
190
{
191
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
192
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
193
}
194
195
-static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
196
- uint64_t value)
197
-{
198
- /* Invalidate by VA, EL1&0 (AArch64 version).
199
- * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
200
- * since we don't support flush-for-specific-ASID-only or
201
- * flush-last-level-only.
202
- */
203
- ARMCPU *cpu = arm_env_get_cpu(env);
204
- CPUState *cs = CPU(cpu);
205
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
206
-
207
- if (arm_is_secure_below_el3(env)) {
208
- tlb_flush_page_by_mmuidx(cs, pageaddr,
209
- ARMMMUIdxBit_S1SE1 |
210
- ARMMMUIdxBit_S1SE0);
211
- } else {
212
- tlb_flush_page_by_mmuidx(cs, pageaddr,
213
- ARMMMUIdxBit_S12NSE1 |
214
- ARMMMUIdxBit_S12NSE0);
215
- }
216
-}
217
-
218
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
219
uint64_t value)
220
{
221
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
222
}
223
}
224
225
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
226
+ uint64_t value)
227
+{
228
+ /* Invalidate by VA, EL1&0 (AArch64 version).
229
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
230
+ * since we don't support flush-for-specific-ASID-only or
231
+ * flush-last-level-only.
232
+ */
233
+ ARMCPU *cpu = arm_env_get_cpu(env);
234
+ CPUState *cs = CPU(cpu);
235
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
236
+
237
+ if (tlb_force_broadcast(env)) {
238
+ tlbi_aa64_vae1is_write(env, NULL, value);
239
+ return;
240
+ }
241
+
242
+ if (arm_is_secure_below_el3(env)) {
243
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
244
+ ARMMMUIdxBit_S1SE1 |
245
+ ARMMMUIdxBit_S1SE0);
246
+ } else {
247
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
248
+ ARMMMUIdxBit_S12NSE1 |
249
+ ARMMMUIdxBit_S12NSE0);
250
+ }
251
+}
252
+
253
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
254
uint64_t value)
255
{
256
--
257
2.19.1
258
259
diff view generated by jsdifflib
1
Add checks for breaches of the v8M stack limit when the
1
The HCR.DC virtualization configuration register bit has the
2
stack pointer is decremented to push the exception frame
2
following effects:
3
for exception entry.
3
* SCTLR.M behaves as if it is 0 for all purposes except
4
direct reads of the bit
5
* HCR.VM behaves as if it is 1 for all purposes except
6
direct reads of the bit
7
* the memory type produced by the first stage of the EL1&EL0
8
translation regime is Normal Non-Shareable,
9
Inner Write-Back Read-Allocate Write-Allocate,
10
Outer Write-Back Read-Allocate Write-Allocate.
4
11
5
Note that the exception-entry case is unique in that the
12
Implement this behaviour.
6
stack pointer is updated to be the limit value if the limit
7
is hit (per rule R_ZLZG).
8
13
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181002163556.10279-7-peter.maydell@linaro.org
16
Message-id: 20181012144235.19646-5-peter.maydell@linaro.org
13
---
17
---
14
target/arm/helper.c | 54 ++++++++++++++++++++++++++++++++++++++-------
18
target/arm/helper.c | 23 +++++++++++++++++++++--
15
1 file changed, 46 insertions(+), 8 deletions(-)
19
1 file changed, 21 insertions(+), 2 deletions(-)
16
20
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
23
--- a/target/arm/helper.c
20
+++ b/target/arm/helper.c
24
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
25
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
22
uint32_t frameptr;
26
* * The Non-secure TTBCR.EAE bit is set to 1
23
ARMMMUIdx mmu_idx;
27
* * The implementation includes EL2, and the value of HCR.VM is 1
24
bool stacked_ok;
28
*
25
+ uint32_t limit;
29
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
26
+ bool want_psp;
30
+ *
27
31
* ATS1Hx always uses the 64bit format (not supported yet).
28
if (dotailchain) {
32
*/
29
bool mode = lr & R_V7M_EXCRET_MODE_MASK;
33
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
30
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
34
31
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
35
if (arm_feature(env, ARM_FEATURE_EL2)) {
32
frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
36
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
33
lr & R_V7M_EXCRET_SPSEL_MASK);
37
- format64 |= env->cp15.hcr_el2 & HCR_VM;
34
+ want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
38
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
35
+ if (want_psp) {
39
} else {
36
+ limit = env->v7m.psplim[M_REG_S];
40
format64 |= arm_current_el(env) == 2;
37
+ } else {
41
}
38
+ limit = env->v7m.msplim[M_REG_S];
42
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
39
+ }
40
} else {
41
mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
42
frame_sp_p = &env->regs[13];
43
+ limit = v7m_sp_limit(env);
44
}
43
}
45
44
46
frameptr = *frame_sp_p - 0x28;
45
if (mmu_idx == ARMMMUIdx_S2NS) {
47
+ if (frameptr < limit) {
46
- return (env->cp15.hcr_el2 & HCR_VM) == 0;
48
+ /*
47
+ /* HCR.DC means HCR.VM behaves as 1 */
49
+ * Stack limit failure: set SP to the limit value, and generate
48
+ return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
50
+ * STKOF UsageFault. Stack pushes below the limit must not be
49
}
51
+ * performed. It is IMPDEF whether pushes above the limit are
50
52
+ * performed; we choose not to.
51
if (env->cp15.hcr_el2 & HCR_TGE) {
53
+ */
52
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
54
+ qemu_log_mask(CPU_LOG_INT,
53
}
55
+ "...STKOF during callee-saves register stacking\n");
54
}
56
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
55
57
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
56
+ if ((env->cp15.hcr_el2 & HCR_DC) &&
58
+ env->v7m.secure);
57
+ (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
59
+ *frame_sp_p = limit;
58
+ /* HCR.DC means SCTLR_EL1.M behaves as 0 */
60
+ return true;
59
+ return true;
61
+ }
60
+ }
62
63
/* Write as much of the stack frame as we can. A write failure may
64
* cause us to pend a derived exception.
65
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
66
v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
67
ignore_faults);
68
69
- /* Update SP regardless of whether any of the stack accesses failed.
70
- * When we implement v8M stack limit checking then this attempt to
71
- * update SP might also fail and result in a derived exception.
72
- */
73
+ /* Update SP regardless of whether any of the stack accesses failed. */
74
*frame_sp_p = frameptr;
75
76
return !stacked_ok;
77
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_stack(ARMCPU *cpu)
78
79
frameptr -= 0x20;
80
81
+ if (arm_feature(env, ARM_FEATURE_V8)) {
82
+ uint32_t limit = v7m_sp_limit(env);
83
+
61
+
84
+ if (frameptr < limit) {
62
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
85
+ /*
63
}
86
+ * Stack limit failure: set SP to the limit value, and generate
64
87
+ * STKOF UsageFault. Stack pushes below the limit must not be
65
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
88
+ * performed. It is IMPDEF whether pushes above the limit are
66
89
+ * performed; we choose not to.
67
/* Combine the S1 and S2 cache attributes, if needed */
90
+ */
68
if (!ret && cacheattrs != NULL) {
91
+ qemu_log_mask(CPU_LOG_INT,
69
+ if (env->cp15.hcr_el2 & HCR_DC) {
92
+ "...STKOF during stacking\n");
70
+ /*
93
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
71
+ * HCR.DC forces the first stage attributes to
94
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
72
+ * Normal Non-Shareable,
95
+ env->v7m.secure);
73
+ * Inner Write-Back Read-Allocate Write-Allocate,
96
+ env->regs[13] = limit;
74
+ * Outer Write-Back Read-Allocate Write-Allocate.
97
+ return true;
75
+ */
98
+ }
76
+ cacheattrs->attrs = 0xff;
99
+ }
77
+ cacheattrs->shareability = 0;
100
+
78
+ }
101
/* Write as much of the stack frame as we can. If we fail a stack
79
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
102
* write this will result in a derived exception being pended
80
}
103
* (which may be taken in preference to the one we started with
81
104
@@ -XXX,XX +XXX,XX @@ static bool v7m_push_stack(ARMCPU *cpu)
105
v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
106
v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
107
108
- /* Update SP regardless of whether any of the stack accesses failed.
109
- * When we implement v8M stack limit checking then this attempt to
110
- * update SP might also fail and result in a derived exception.
111
- */
112
+ /* Update SP regardless of whether any of the stack accesses failed. */
113
env->regs[13] = frameptr;
114
115
return !stacked_ok;
116
--
82
--
117
2.19.0
83
2.19.1
118
84
119
85
diff view generated by jsdifflib
1
In v7m_exception_taken() we were incorrectly using a
1
The A/I/F bits in ISR_EL1 should track the virtual interrupt
2
"LR bit EXCRET.ES is 1" check when it should be 0
2
status, not the physical interrupt status, if the associated
3
(compare the pseudocode ExceptionTaken() function).
3
HCR_EL2.AMO/IMO/FMO bit is set. Implement this, rather than
4
This meant we didn't stack the callee-saved registers
4
always showing the physical interrupt status.
5
when tailchaining from a NonSecure to a Secure exception.
6
5
7
Cc: qemu-stable@nongnu.org
6
We don't currently implement anything to do with external
7
aborts, so this applies only to the I and F bits (though it
8
ought to be possible for the outer guest to present a virtual
9
external abort to the inner guest, even if QEMU doesn't
10
emulate physical external aborts, so there is missing
11
functionality in this area).
12
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181002145940.30931-1-peter.maydell@linaro.org
15
Message-id: 20181012144235.19646-6-peter.maydell@linaro.org
11
---
16
---
12
target/arm/helper.c | 2 +-
17
target/arm/helper.c | 22 ++++++++++++++++++----
13
1 file changed, 1 insertion(+), 1 deletion(-)
18
1 file changed, 18 insertions(+), 4 deletions(-)
14
19
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
22
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
23
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
24
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
20
* not already saved.
25
CPUState *cs = ENV_GET_CPU(env);
21
*/
26
uint64_t ret = 0;
22
if (lr & R_V7M_EXCRET_DCRS_MASK &&
27
23
- !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
28
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
24
+ !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
29
- ret |= CPSR_I;
25
push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
30
+ if (arm_hcr_el2_imo(env)) {
26
ignore_stackfaults);
31
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
27
}
32
+ ret |= CPSR_I;
33
+ }
34
+ } else {
35
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
36
+ ret |= CPSR_I;
37
+ }
38
}
39
- if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
40
- ret |= CPSR_F;
41
+
42
+ if (arm_hcr_el2_fmo(env)) {
43
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
44
+ ret |= CPSR_F;
45
+ }
46
+ } else {
47
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
48
+ ret |= CPSR_F;
49
+ }
50
}
51
+
52
/* External aborts are not possible in QEMU so A bit is always clear */
53
return ret;
54
}
28
--
55
--
29
2.19.0
56
2.19.1
30
57
31
58
diff view generated by jsdifflib
1
Updating the NS stack pointer via MSR to SP_NS should include
1
The HCR_EL2 VI and VF bits are supposed to track whether there is
2
a check whether the new SP value is below the stack limit.
2
a pending virtual IRQ or virtual FIQ. For QEMU we store the
3
No other kinds of update to the various stack pointer and
3
pending VIRQ/VFIQ status in cs->interrupt_request, so this means:
4
limit registers via MSR should perform a check.
4
* if the register is read we must get these bit values from
5
cs->interrupt_request
6
* if the register is written then we must write the bit
7
values back into cs->interrupt_request
5
8
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181002163556.10279-14-peter.maydell@linaro.org
11
Message-id: 20181012144235.19646-7-peter.maydell@linaro.org
10
---
12
---
11
target/arm/helper.c | 14 +++++++++++++-
13
target/arm/helper.c | 47 +++++++++++++++++++++++++++++++++++++++++----
12
1 file changed, 13 insertions(+), 1 deletion(-)
14
1 file changed, 43 insertions(+), 4 deletions(-)
13
15
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
18
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
19
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
20
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
19
* currently in handler mode or not, using the NS CONTROL.SPSEL.
21
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
20
*/
22
{
21
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
23
ARMCPU *cpu = arm_env_get_cpu(env);
22
+ bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
24
+ CPUState *cs = ENV_GET_CPU(env);
23
+ uint32_t limit;
25
uint64_t valid_mask = HCR_MASK;
24
26
25
if (!env->v7m.secure) {
27
if (arm_feature(env, ARM_FEATURE_EL3)) {
26
return;
28
@@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
27
}
29
/* Clear RES0 bits. */
28
- if (!arm_v7m_is_handler_mode(env) && spsel) {
30
value &= valid_mask;
31
32
+ /*
33
+ * VI and VF are kept in cs->interrupt_request. Modifying that
34
+ * requires that we have the iothread lock, which is done by
35
+ * marking the reginfo structs as ARM_CP_IO.
36
+ * Note that if a write to HCR pends a VIRQ or VFIQ it is never
37
+ * possible for it to be taken immediately, because VIRQ and
38
+ * VFIQ are masked unless running at EL0 or EL1, and HCR
39
+ * can only be written at EL2.
40
+ */
41
+ g_assert(qemu_mutex_iothread_locked());
42
+ if (value & HCR_VI) {
43
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
44
+ } else {
45
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
46
+ }
47
+ if (value & HCR_VF) {
48
+ cs->interrupt_request |= CPU_INTERRUPT_VFIQ;
49
+ } else {
50
+ cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ;
51
+ }
52
+ value &= ~(HCR_VI | HCR_VF);
29
+
53
+
30
+ limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
54
/* These bits change the MMU setup:
55
* HCR_VM enables stage 2 translation
56
* HCR_PTW forbids certain page-table setups
57
@@ -XXX,XX +XXX,XX @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
58
hcr_write(env, NULL, value);
59
}
60
61
+static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
62
+{
63
+ /* The VI and VF bits live in cs->interrupt_request */
64
+ uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF);
65
+ CPUState *cs = ENV_GET_CPU(env);
31
+
66
+
32
+ if (val < limit) {
67
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
33
+ CPUState *cs = CPU(arm_env_get_cpu(env));
68
+ ret |= HCR_VI;
69
+ }
70
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
71
+ ret |= HCR_VF;
72
+ }
73
+ return ret;
74
+}
34
+
75
+
35
+ cpu_restore_state(cs, GETPC(), true);
76
static const ARMCPRegInfo el2_cp_reginfo[] = {
36
+ raise_exception(env, EXCP_STKOF, 0, 1);
77
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
37
+ }
78
+ .type = ARM_CP_IO,
38
+
79
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
39
+ if (is_psp) {
80
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
40
env->v7m.other_ss_psp = val;
81
- .writefn = hcr_write },
41
} else {
82
+ .writefn = hcr_write, .readfn = hcr_read },
42
env->v7m.other_ss_msp = val;
83
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
84
- .type = ARM_CP_ALIAS,
85
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
86
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
87
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
88
- .writefn = hcr_writelow },
89
+ .writefn = hcr_writelow, .readfn = hcr_read },
90
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
91
.type = ARM_CP_ALIAS,
92
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
93
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
94
95
static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
96
{ .name = "HCR2", .state = ARM_CP_STATE_AA32,
97
- .type = ARM_CP_ALIAS,
98
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
99
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
100
.access = PL2_RW,
101
.fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
43
--
102
--
44
2.19.0
103
2.19.1
45
104
46
105
diff view generated by jsdifflib
1
A cut-and-paste error meant we were reading r4 from the v8M
1
If the HCR_EL2 PTW virtualizaiton configuration register bit
2
callee-saves exception stack frame twice. This is harmless
2
is set, then this means that a stage 2 Permission fault must
3
since it just meant we did two memory accesses to the same
3
be generated if a stage 1 translation table access is made
4
location, but it's unnecessary. Delete it.
4
to an address that is mapped as Device memory in stage 2.
5
Implement this.
5
6
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181002150304.2287-1-peter.maydell@linaro.org
9
Message-id: 20181012144235.19646-8-peter.maydell@linaro.org
10
---
10
---
11
target/arm/helper.c | 1 -
11
target/arm/helper.c | 21 ++++++++++++++++++++-
12
1 file changed, 1 deletion(-)
12
1 file changed, 20 insertions(+), 1 deletion(-)
13
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
18
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
19
}
19
hwaddr s2pa;
20
20
int s2prot;
21
pop_ok = pop_ok &&
21
int ret;
22
- v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
22
+ ARMCacheAttrs cacheattrs = {};
23
v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
23
+ ARMCacheAttrs *pcacheattrs = NULL;
24
v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
24
+
25
v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
25
+ if (env->cp15.hcr_el2 & HCR_PTW) {
26
+ /*
27
+ * PTW means we must fault if this S1 walk touches S2 Device
28
+ * memory; otherwise we don't care about the attributes and can
29
+ * save the S2 translation the effort of computing them.
30
+ */
31
+ pcacheattrs = &cacheattrs;
32
+ }
33
34
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
35
- &txattrs, &s2prot, &s2size, fi, NULL);
36
+ &txattrs, &s2prot, &s2size, fi, pcacheattrs);
37
if (ret) {
38
assert(fi->type != ARMFault_None);
39
fi->s2addr = addr;
40
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
41
fi->s1ptw = true;
42
return ~0;
43
}
44
+ if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
45
+ /* Access was to Device memory: generate Permission fault */
46
+ fi->type = ARMFault_Permission;
47
+ fi->s2addr = addr;
48
+ fi->stage2 = true;
49
+ fi->s1ptw = true;
50
+ return ~0;
51
+ }
52
addr = s2pa;
53
}
54
return addr;
26
--
55
--
27
2.19.0
56
2.19.1
28
57
29
58
diff view generated by jsdifflib
1
Check the v8M stack limits when pushing the frame for a
1
Create and use a utility function to extract the EC field
2
non-secure function call via BLXNS.
2
from a syndrome, rather than open-coding the shift.
3
4
In order to be able to generate the exception we need to
5
promote raise_exception() from being local to op_helper.c
6
so we can call it from helper.c.
7
3
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20181002163556.10279-8-peter.maydell@linaro.org
6
Message-id: 20181012144235.19646-9-peter.maydell@linaro.org
12
---
7
---
13
target/arm/internals.h | 9 +++++++++
8
target/arm/internals.h | 5 +++++
14
target/arm/helper.c | 4 ++++
9
target/arm/helper.c | 4 ++--
15
target/arm/op_helper.c | 4 ++--
10
target/arm/kvm64.c | 2 +-
16
3 files changed, 15 insertions(+), 2 deletions(-)
11
target/arm/op_helper.c | 2 +-
12
4 files changed, 9 insertions(+), 4 deletions(-)
17
13
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/internals.h
16
--- a/target/arm/internals.h
21
+++ b/target/arm/internals.h
17
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
18
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
23
#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
19
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
24
#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
20
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
25
21
26
+/**
22
+static inline uint32_t syn_get_ec(uint32_t syn)
27
+ * raise_exception: Raise the specified exception.
23
+{
28
+ * Raise a guest exception with the specified value, syndrome register
24
+ return syn >> ARM_EL_EC_SHIFT;
29
+ * and target exception level. This should be called from helper functions,
25
+}
30
+ * and never returns because we will longjump back up to the CPU main loop.
31
+ */
32
+void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
33
+ uint32_t syndrome, uint32_t target_el);
34
+
26
+
35
/*
27
/* Utility functions for constructing various kinds of syndrome value.
36
* For AArch64, map a given EL to an index in the banked_spsr array.
28
* Note that in general we follow the AArch64 syndrome values; in a
37
* Note that this mapping and the AArch32 mapping defined in bank_number()
29
* few cases the value in HSR for exceptions taken to AArch32 Hyp
38
diff --git a/target/arm/helper.c b/target/arm/helper.c
30
diff --git a/target/arm/helper.c b/target/arm/helper.c
39
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/helper.c
32
--- a/target/arm/helper.c
41
+++ b/target/arm/helper.c
33
+++ b/target/arm/helper.c
42
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
43
"BLXNS with misaligned SP is UNPREDICTABLE\n");
35
uint32_t moe;
36
37
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
38
- switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
39
+ switch (syn_get_ec(env->exception.syndrome)) {
40
case EC_BREAKPOINT:
41
case EC_BREAKPOINT_SAME_EL:
42
moe = 1;
43
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
44
if (qemu_loglevel_mask(CPU_LOG_INT)
45
&& !excp_is_internal(cs->exception_index)) {
46
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
47
- env->exception.syndrome >> ARM_EL_EC_SHIFT,
48
+ syn_get_ec(env->exception.syndrome),
49
env->exception.syndrome);
44
}
50
}
45
51
46
+ if (sp < v7m_sp_limit(env)) {
52
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
47
+ raise_exception(env, EXCP_STKOF, 0, 1);
53
index XXXXXXX..XXXXXXX 100644
48
+ }
54
--- a/target/arm/kvm64.c
49
+
55
+++ b/target/arm/kvm64.c
50
saved_psr = env->v7m.exception;
56
@@ -XXX,XX +XXX,XX @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
51
if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
57
52
saved_psr |= XPSR_SFPA;
58
bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
59
{
60
- int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
61
+ int hsr_ec = syn_get_ec(debug_exit->hsr);
62
ARMCPU *cpu = ARM_CPU(cs);
63
CPUClass *cc = CPU_GET_CLASS(cs);
64
CPUARMState *env = &cpu->env;
53
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
65
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
54
index XXXXXXX..XXXXXXX 100644
66
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/op_helper.c
67
--- a/target/arm/op_helper.c
56
+++ b/target/arm/op_helper.c
68
+++ b/target/arm/op_helper.c
57
@@ -XXX,XX +XXX,XX @@
69
@@ -XXX,XX +XXX,XX @@ void raise_exception(CPUARMState *env, uint32_t excp,
58
#define SIGNBIT (uint32_t)0x80000000
70
* (see DDI0478C.a D1.10.4)
59
#define SIGNBIT64 ((uint64_t)1 << 63)
71
*/
60
72
target_el = 2;
61
-static void raise_exception(CPUARMState *env, uint32_t excp,
73
- if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
62
- uint32_t syndrome, uint32_t target_el)
74
+ if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
63
+void raise_exception(CPUARMState *env, uint32_t excp,
75
syndrome = syn_uncategorized();
64
+ uint32_t syndrome, uint32_t target_el)
76
}
65
{
77
}
66
CPUState *cs = CPU(arm_env_get_cpu(env));
67
68
--
78
--
69
2.19.0
79
2.19.1
70
80
71
81
diff view generated by jsdifflib
1
Coverity complains (CID 1395628) that the multiply in the calculation
1
For the v7 version of the Arm architecture, the IL bit in
2
of the framebuffer base is performed as 32x32 but then used in a
2
syndrome register values where the field is not valid was
3
context that takes a 64-bit hwaddr. This can't actually ever
3
defined to be UNK/SBZP. In v8 this is RES1, which is what
4
overflow the 32-bit result, because of the constraints placed on
4
QEMU currently implements. Handle the desired v7 behaviour
5
the s->config values in bcm2835_fb_validate_config(). But we
5
by squashing the IL bit for the affected cases:
6
can placate Coverity anyway, by explicitly casting one of the
6
* EC == EC_UNCATEGORIZED
7
inputs to a hwaddr, so the whole expression is calculated with
7
* prefetch aborts
8
64-bit arithmetic.
8
* data aborts where ISV is 0
9
10
(The fourth case listed in the v8 Arm ARM DDI 0487C.a in
11
section G7.2.70, "illegal state exception", can't happen
12
on a v7 CPU.)
13
14
This deals with a corner case noted in a comment.
9
15
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181005133012.26490-1-peter.maydell@linaro.org
18
Message-id: 20181012144235.19646-10-peter.maydell@linaro.org
13
---
19
---
14
hw/display/bcm2835_fb.c | 2 +-
20
target/arm/internals.h | 7 ++-----
15
1 file changed, 1 insertion(+), 1 deletion(-)
21
target/arm/helper.c | 13 +++++++++++++
22
2 files changed, 15 insertions(+), 5 deletions(-)
16
23
17
diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/display/bcm2835_fb.c
26
--- a/target/arm/internals.h
20
+++ b/hw/display/bcm2835_fb.c
27
+++ b/target/arm/internals.h
21
@@ -XXX,XX +XXX,XX @@ static void fb_update_display(void *opaque)
28
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn)
29
/* Utility functions for constructing various kinds of syndrome value.
30
* Note that in general we follow the AArch64 syndrome values; in a
31
* few cases the value in HSR for exceptions taken to AArch32 Hyp
32
- * mode differs slightly, so if we ever implemented Hyp mode then the
33
- * syndrome value would need some massaging on exception entry.
34
- * (One example of this is that AArch64 defaults to IL bit set for
35
- * exceptions which don't specifically indicate information about the
36
- * trapping instruction, whereas AArch32 defaults to IL bit clear.)
37
+ * mode differs slightly, and we fix this up when populating HSR in
38
+ * arm_cpu_do_interrupt_aarch32_hyp().
39
*/
40
static inline uint32_t syn_uncategorized(void)
41
{
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper.c
45
+++ b/target/arm/helper.c
46
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
22
}
47
}
23
48
24
if (s->invalidate) {
49
if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
25
- hwaddr base = s->config.base + xoff + yoff * src_width;
50
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
26
+ hwaddr base = s->config.base + xoff + (hwaddr)yoff * src_width;
51
+ /*
27
framebuffer_update_memory_section(&s->fbsection, s->dma_mr,
52
+ * QEMU syndrome values are v8-style. v7 has the IL bit
28
base,
53
+ * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
29
s->config.yres, src_width);
54
+ * If this is a v7 CPU, squash the IL bit in those cases.
55
+ */
56
+ if (cs->exception_index == EXCP_PREFETCH_ABORT ||
57
+ (cs->exception_index == EXCP_DATA_ABORT &&
58
+ !(env->exception.syndrome & ARM_EL_ISV)) ||
59
+ syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
60
+ env->exception.syndrome &= ~ARM_EL_IL;
61
+ }
62
+ }
63
env->cp15.esr_el[2] = env->exception.syndrome;
64
}
65
30
--
66
--
31
2.19.0
67
2.19.1
32
68
33
69
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
For traps of FP/SIMD instructions to AArch32 Hyp mode, the syndrome
2
provided in HSR has more information than is reported to AArch64.
3
Specifically, there are extra fields TA and coproc which indicate
4
whether the trapped instruction was FP or SIMD. Add this extra
5
information to the syndromes we construct, and mask it out when
6
taking the exception to AArch64.
2
7
3
There is quite a lot of code required to compute cpu_mem_index,
4
or even put together the full TCGMemOpIdx. This can easily be
5
done at translation time.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181005175350.30752-16-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181012144235.19646-11-peter.maydell@linaro.org
12
---
11
---
13
target/arm/internals.h | 5 ++
12
target/arm/internals.h | 14 +++++++++++++-
14
target/arm/sve_helper.c | 138 +++++++++++++++++++------------------
13
target/arm/helper.c | 9 +++++++++
15
target/arm/translate-sve.c | 67 +++++++++++-------
14
target/arm/translate.c | 8 ++++----
16
3 files changed, 121 insertions(+), 89 deletions(-)
15
3 files changed, 26 insertions(+), 5 deletions(-)
17
16
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
19
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/internals.h
19
--- a/target/arm/internals.h
21
+++ b/target/arm/internals.h
20
+++ b/target/arm/internals.h
22
@@ -XXX,XX +XXX,XX @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
21
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn)
23
}
22
* few cases the value in HSR for exceptions taken to AArch32 Hyp
24
}
23
* mode differs slightly, and we fix this up when populating HSR in
25
24
* arm_cpu_do_interrupt_aarch32_hyp().
26
+/* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3.
25
+ * The exception is FP/SIMD access traps -- these report extra information
27
+ * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits.
26
+ * when taking an exception to AArch32. For those we include the extra coproc
28
+ */
27
+ * and TA fields, and mask them out when taking the exception to AArch64.
29
+#define MEMOPIDX_SHIFT 8
30
+
31
#endif
32
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve_helper.c
35
+++ b/target/arm/sve_helper.c
36
@@ -XXX,XX +XXX,XX @@
37
38
#include "qemu/osdep.h"
39
#include "cpu.h"
40
+#include "internals.h"
41
#include "exec/exec-all.h"
42
#include "exec/cpu_ldst.h"
43
#include "exec/helper-proto.h"
44
@@ -XXX,XX +XXX,XX @@ typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, void *host,
45
* The controlling predicate is known to be true.
46
*/
28
*/
47
typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
29
static inline uint32_t syn_uncategorized(void)
48
- target_ulong vaddr, int mmu_idx, uintptr_t ra);
49
+ target_ulong vaddr, TCGMemOpIdx oi, uintptr_t ra);
50
typedef sve_ld1_tlb_fn sve_st1_tlb_fn;
51
52
/*
53
@@ -XXX,XX +XXX,XX @@ static intptr_t sve_##NAME##_host(void *vd, void *vg, void *host, \
54
#ifdef CONFIG_SOFTMMU
55
#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \
56
static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
57
- target_ulong addr, int mmu_idx, uintptr_t ra) \
58
+ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \
59
{ \
60
- TCGMemOpIdx oi = make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_idx); \
61
TYPEM val = TLB(env, addr, oi, ra); \
62
*(TYPEE *)(vd + H(reg_off)) = val; \
63
}
64
#else
65
#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \
66
static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
67
- target_ulong addr, int mmu_idx, uintptr_t ra) \
68
+ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \
69
{ \
70
TYPEM val = HOST(g2h(addr)); \
71
*(TYPEE *)(vd + H(reg_off)) = val; \
72
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
73
sve_ld1_host_fn *host_fn,
74
sve_ld1_tlb_fn *tlb_fn)
75
{
30
{
76
- void *vd = &env->vfp.zregs[simd_data(desc)];
31
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
77
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
32
78
+ const int mmu_idx = get_mmuidx(oi);
33
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
79
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
80
+ void *vd = &env->vfp.zregs[rd];
81
const int diffsz = esz - msz;
82
const intptr_t reg_max = simd_oprsz(desc);
83
const intptr_t mem_max = reg_max >> diffsz;
84
- const int mmu_idx = cpu_mmu_index(env, false);
85
ARMVectorReg scratch;
86
void *host;
87
intptr_t split, reg_off, mem_off;
88
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
89
* on I/O memory, it may succeed but not bring in the TLB entry.
90
* But even then we have still made forward progress.
91
*/
92
- tlb_fn(env, &scratch, reg_off, addr + mem_off, mmu_idx, retaddr);
93
+ tlb_fn(env, &scratch, reg_off, addr + mem_off, oi, retaddr);
94
reg_off += 1 << esz;
95
}
96
#endif
97
@@ -XXX,XX +XXX,XX @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
98
uint32_t desc, int size, uintptr_t ra,
99
sve_ld1_tlb_fn *tlb_fn)
100
{
34
{
101
- const int mmu_idx = cpu_mmu_index(env, false);
35
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
102
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
36
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
103
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
37
| (is_16bit ? 0 : ARM_EL_IL)
104
intptr_t i, oprsz = simd_oprsz(desc);
38
- | (cv << 24) | (cond << 20);
105
- unsigned rd = simd_data(desc);
39
+ | (cv << 24) | (cond << 20) | 0xa;
106
ARMVectorReg scratch[2] = { };
107
108
set_helper_retaddr(ra);
109
@@ -XXX,XX +XXX,XX @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
110
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
111
do {
112
if (pg & 1) {
113
- tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
114
- tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
115
+ tlb_fn(env, &scratch[0], i, addr, oi, ra);
116
+ tlb_fn(env, &scratch[1], i, addr + size, oi, ra);
117
}
118
i += size, pg >>= size;
119
addr += 2 * size;
120
@@ -XXX,XX +XXX,XX @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
121
uint32_t desc, int size, uintptr_t ra,
122
sve_ld1_tlb_fn *tlb_fn)
123
{
124
- const int mmu_idx = cpu_mmu_index(env, false);
125
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
126
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
127
intptr_t i, oprsz = simd_oprsz(desc);
128
- unsigned rd = simd_data(desc);
129
ARMVectorReg scratch[3] = { };
130
131
set_helper_retaddr(ra);
132
@@ -XXX,XX +XXX,XX @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
133
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
134
do {
135
if (pg & 1) {
136
- tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
137
- tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
138
- tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra);
139
+ tlb_fn(env, &scratch[0], i, addr, oi, ra);
140
+ tlb_fn(env, &scratch[1], i, addr + size, oi, ra);
141
+ tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra);
142
}
143
i += size, pg >>= size;
144
addr += 3 * size;
145
@@ -XXX,XX +XXX,XX @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
146
uint32_t desc, int size, uintptr_t ra,
147
sve_ld1_tlb_fn *tlb_fn)
148
{
149
- const int mmu_idx = cpu_mmu_index(env, false);
150
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
151
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
152
intptr_t i, oprsz = simd_oprsz(desc);
153
- unsigned rd = simd_data(desc);
154
ARMVectorReg scratch[4] = { };
155
156
set_helper_retaddr(ra);
157
@@ -XXX,XX +XXX,XX @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
158
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
159
do {
160
if (pg & 1) {
161
- tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
162
- tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
163
- tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra);
164
- tlb_fn(env, &scratch[3], i, addr + 3 * size, mmu_idx, ra);
165
+ tlb_fn(env, &scratch[0], i, addr, oi, ra);
166
+ tlb_fn(env, &scratch[1], i, addr + size, oi, ra);
167
+ tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra);
168
+ tlb_fn(env, &scratch[3], i, addr + 3 * size, oi, ra);
169
}
170
i += size, pg >>= size;
171
addr += 4 * size;
172
@@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
173
sve_ld1_host_fn *host_fn,
174
sve_ld1_tlb_fn *tlb_fn)
175
{
176
- void *vd = &env->vfp.zregs[simd_data(desc)];
177
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
178
+ const int mmu_idx = get_mmuidx(oi);
179
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
180
+ void *vd = &env->vfp.zregs[rd];
181
const int diffsz = esz - msz;
182
const intptr_t reg_max = simd_oprsz(desc);
183
const intptr_t mem_max = reg_max >> diffsz;
184
- const int mmu_idx = cpu_mmu_index(env, false);
185
intptr_t split, reg_off, mem_off;
186
void *host;
187
188
@@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
189
* Perform one normal read, which will fault or not.
190
* But it is likely to bring the page into the tlb.
191
*/
192
- tlb_fn(env, vd, reg_off, addr + mem_off, mmu_idx, retaddr);
193
+ tlb_fn(env, vd, reg_off, addr + mem_off, oi, retaddr);
194
195
/* After any fault, zero any leading predicated false elts. */
196
swap_memzero(vd, reg_off);
197
@@ -XXX,XX +XXX,XX @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
198
uint32_t desc, const int esz, const int msz,
199
sve_ld1_host_fn *host_fn)
200
{
201
- void *vd = &env->vfp.zregs[simd_data(desc)];
202
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
203
+ void *vd = &env->vfp.zregs[rd];
204
const int diffsz = esz - msz;
205
const intptr_t reg_max = simd_oprsz(desc);
206
const intptr_t mem_max = reg_max >> diffsz;
207
@@ -XXX,XX +XXX,XX @@ DO_LDFF1_LDNF1_2(dd, 3, 3)
208
#ifdef CONFIG_SOFTMMU
209
#define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \
210
static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
211
- target_ulong addr, int mmu_idx, uintptr_t ra) \
212
+ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \
213
{ \
214
- TCGMemOpIdx oi = make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_idx); \
215
TLB(env, addr, *(TYPEM *)(vd + H(reg_off)), oi, ra); \
216
}
217
#else
218
#define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \
219
static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
220
- target_ulong addr, int mmu_idx, uintptr_t ra) \
221
+ target_ulong addr, TCGMemOpIdx oi, uintptr_t ra) \
222
{ \
223
HOST(g2h(addr), *(TYPEM *)(vd + H(reg_off))); \
224
}
225
@@ -XXX,XX +XXX,XX @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr,
226
const int esize, const int msize,
227
sve_st1_tlb_fn *tlb_fn)
228
{
229
- const int mmu_idx = cpu_mmu_index(env, false);
230
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
231
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
232
intptr_t i, oprsz = simd_oprsz(desc);
233
- unsigned rd = simd_data(desc);
234
void *vd = &env->vfp.zregs[rd];
235
236
set_helper_retaddr(ra);
237
@@ -XXX,XX +XXX,XX @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr,
238
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
239
do {
240
if (pg & 1) {
241
- tlb_fn(env, vd, i, addr, mmu_idx, ra);
242
+ tlb_fn(env, vd, i, addr, oi, ra);
243
}
244
i += esize, pg >>= esize;
245
addr += msize;
246
@@ -XXX,XX +XXX,XX @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
247
const int esize, const int msize,
248
sve_st1_tlb_fn *tlb_fn)
249
{
250
- const int mmu_idx = cpu_mmu_index(env, false);
251
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
252
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
253
intptr_t i, oprsz = simd_oprsz(desc);
254
- unsigned rd = simd_data(desc);
255
void *d1 = &env->vfp.zregs[rd];
256
void *d2 = &env->vfp.zregs[(rd + 1) & 31];
257
258
@@ -XXX,XX +XXX,XX @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
259
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
260
do {
261
if (pg & 1) {
262
- tlb_fn(env, d1, i, addr, mmu_idx, ra);
263
- tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
264
+ tlb_fn(env, d1, i, addr, oi, ra);
265
+ tlb_fn(env, d2, i, addr + msize, oi, ra);
266
}
267
i += esize, pg >>= esize;
268
addr += 2 * msize;
269
@@ -XXX,XX +XXX,XX @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
270
const int esize, const int msize,
271
sve_st1_tlb_fn *tlb_fn)
272
{
273
- const int mmu_idx = cpu_mmu_index(env, false);
274
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
275
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
276
intptr_t i, oprsz = simd_oprsz(desc);
277
- unsigned rd = simd_data(desc);
278
void *d1 = &env->vfp.zregs[rd];
279
void *d2 = &env->vfp.zregs[(rd + 1) & 31];
280
void *d3 = &env->vfp.zregs[(rd + 2) & 31];
281
@@ -XXX,XX +XXX,XX @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
282
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
283
do {
284
if (pg & 1) {
285
- tlb_fn(env, d1, i, addr, mmu_idx, ra);
286
- tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
287
- tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra);
288
+ tlb_fn(env, d1, i, addr, oi, ra);
289
+ tlb_fn(env, d2, i, addr + msize, oi, ra);
290
+ tlb_fn(env, d3, i, addr + 2 * msize, oi, ra);
291
}
292
i += esize, pg >>= esize;
293
addr += 3 * msize;
294
@@ -XXX,XX +XXX,XX @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
295
const int esize, const int msize,
296
sve_st1_tlb_fn *tlb_fn)
297
{
298
- const int mmu_idx = cpu_mmu_index(env, false);
299
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
300
+ const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
301
intptr_t i, oprsz = simd_oprsz(desc);
302
- unsigned rd = simd_data(desc);
303
void *d1 = &env->vfp.zregs[rd];
304
void *d2 = &env->vfp.zregs[(rd + 1) & 31];
305
void *d3 = &env->vfp.zregs[(rd + 2) & 31];
306
@@ -XXX,XX +XXX,XX @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
307
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
308
do {
309
if (pg & 1) {
310
- tlb_fn(env, d1, i, addr, mmu_idx, ra);
311
- tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
312
- tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra);
313
- tlb_fn(env, d4, i, addr + 3 * msize, mmu_idx, ra);
314
+ tlb_fn(env, d1, i, addr, oi, ra);
315
+ tlb_fn(env, d2, i, addr + msize, oi, ra);
316
+ tlb_fn(env, d3, i, addr + 2 * msize, oi, ra);
317
+ tlb_fn(env, d4, i, addr + 3 * msize, oi, ra);
318
}
319
i += esize, pg >>= esize;
320
addr += 4 * msize;
321
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
322
target_ulong base, uint32_t desc, uintptr_t ra,
323
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
324
{
325
- const int mmu_idx = cpu_mmu_index(env, false);
326
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
327
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
328
intptr_t i, oprsz = simd_oprsz(desc);
329
- unsigned scale = simd_data(desc);
330
ARMVectorReg scratch = { };
331
332
set_helper_retaddr(ra);
333
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
334
do {
335
if (likely(pg & 1)) {
336
target_ulong off = off_fn(vm, i);
337
- tlb_fn(env, &scratch, i, base + (off << scale), mmu_idx, ra);
338
+ tlb_fn(env, &scratch, i, base + (off << scale), oi, ra);
339
}
340
i += 4, pg >>= 4;
341
} while (i & 15);
342
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
343
target_ulong base, uint32_t desc, uintptr_t ra,
344
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
345
{
346
- const int mmu_idx = cpu_mmu_index(env, false);
347
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
348
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
349
intptr_t i, oprsz = simd_oprsz(desc) / 8;
350
- unsigned scale = simd_data(desc);
351
ARMVectorReg scratch = { };
352
353
set_helper_retaddr(ra);
354
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
355
uint8_t pg = *(uint8_t *)(vg + H1(i));
356
if (likely(pg & 1)) {
357
target_ulong off = off_fn(vm, i * 8);
358
- tlb_fn(env, &scratch, i * 8, base + (off << scale), mmu_idx, ra);
359
+ tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra);
360
}
361
}
362
set_helper_retaddr(0);
363
@@ -XXX,XX +XXX,XX @@ typedef bool sve_ld1_nf_fn(CPUARMState *env, void *vd, intptr_t reg_off,
364
#ifdef CONFIG_SOFTMMU
365
#define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \
366
static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \
367
- target_ulong addr, int mmu_idx) \
368
+ target_ulong addr, int mmu_idx) \
369
{ \
370
target_ulong next_page = -(addr | TARGET_PAGE_MASK); \
371
if (likely(next_page - addr >= sizeof(TYPEM))) { \
372
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
373
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn,
374
sve_ld1_nf_fn *nonfault_fn)
375
{
376
- const int mmu_idx = cpu_mmu_index(env, false);
377
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
378
+ const int mmu_idx = get_mmuidx(oi);
379
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
380
intptr_t reg_off, reg_max = simd_oprsz(desc);
381
- unsigned scale = simd_data(desc);
382
target_ulong addr;
383
384
/* Skip to the first true predicate. */
385
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
386
set_helper_retaddr(ra);
387
addr = off_fn(vm, reg_off);
388
addr = base + (addr << scale);
389
- tlb_fn(env, vd, reg_off, addr, mmu_idx, ra);
390
+ tlb_fn(env, vd, reg_off, addr, oi, ra);
391
392
/* The rest of the reads will be non-faulting. */
393
set_helper_retaddr(0);
394
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
395
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn,
396
sve_ld1_nf_fn *nonfault_fn)
397
{
398
- const int mmu_idx = cpu_mmu_index(env, false);
399
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
400
+ const int mmu_idx = get_mmuidx(oi);
401
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
402
intptr_t reg_off, reg_max = simd_oprsz(desc);
403
- unsigned scale = simd_data(desc);
404
target_ulong addr;
405
406
/* Skip to the first true predicate. */
407
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
408
set_helper_retaddr(ra);
409
addr = off_fn(vm, reg_off);
410
addr = base + (addr << scale);
411
- tlb_fn(env, vd, reg_off, addr, mmu_idx, ra);
412
+ tlb_fn(env, vd, reg_off, addr, oi, ra);
413
414
/* The rest of the reads will be non-faulting. */
415
set_helper_retaddr(0);
416
@@ -XXX,XX +XXX,XX @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
417
target_ulong base, uint32_t desc, uintptr_t ra,
418
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
419
{
420
- const int mmu_idx = cpu_mmu_index(env, false);
421
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
422
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
423
intptr_t i, oprsz = simd_oprsz(desc);
424
- unsigned scale = simd_data(desc);
425
426
set_helper_retaddr(ra);
427
for (i = 0; i < oprsz; ) {
428
@@ -XXX,XX +XXX,XX @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
429
do {
430
if (likely(pg & 1)) {
431
target_ulong off = off_fn(vm, i);
432
- tlb_fn(env, vd, i, base + (off << scale), mmu_idx, ra);
433
+ tlb_fn(env, vd, i, base + (off << scale), oi, ra);
434
}
435
i += 4, pg >>= 4;
436
} while (i & 15);
437
@@ -XXX,XX +XXX,XX @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
438
target_ulong base, uint32_t desc, uintptr_t ra,
439
zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
440
{
441
- const int mmu_idx = cpu_mmu_index(env, false);
442
+ const TCGMemOpIdx oi = extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHIFT);
443
+ const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2);
444
intptr_t i, oprsz = simd_oprsz(desc) / 8;
445
- unsigned scale = simd_data(desc);
446
447
set_helper_retaddr(ra);
448
for (i = 0; i < oprsz; i++) {
449
uint8_t pg = *(uint8_t *)(vg + H1(i));
450
if (likely(pg & 1)) {
451
target_ulong off = off_fn(vm, i * 8);
452
- tlb_fn(env, vd, i * 8, base + (off << scale), mmu_idx, ra);
453
+ tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra);
454
}
455
}
456
set_helper_retaddr(0);
457
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
458
index XXXXXXX..XXXXXXX 100644
459
--- a/target/arm/translate-sve.c
460
+++ b/target/arm/translate-sve.c
461
@@ -XXX,XX +XXX,XX @@ static const uint8_t dtype_esz[16] = {
462
3, 2, 1, 3
463
};
464
465
+static TCGMemOpIdx sve_memopidx(DisasContext *s, int dtype)
466
+{
467
+ return make_memop_idx(s->be_data | dtype_mop[dtype], get_mem_index(s));
468
+}
40
+}
469
+
41
+
470
static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
42
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
471
- gen_helper_gvec_mem *fn)
43
+{
472
+ int dtype, gen_helper_gvec_mem *fn)
44
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
473
{
45
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
474
unsigned vsz = vec_full_reg_size(s);
46
+ | (is_16bit ? 0 : ARM_EL_IL)
475
TCGv_ptr t_pg;
47
+ | (cv << 24) | (cond << 20) | (1 << 5);
476
- TCGv_i32 desc;
48
}
477
+ TCGv_i32 t_desc;
49
478
+ int desc;
50
static inline uint32_t syn_sve_access_trap(void)
479
51
diff --git a/target/arm/helper.c b/target/arm/helper.c
480
/* For e.g. LD4, there are not enough arguments to pass all 4
52
index XXXXXXX..XXXXXXX 100644
481
* registers as pointers, so encode the regno into the data field.
53
--- a/target/arm/helper.c
482
* For consistency, do this even for LD1.
54
+++ b/target/arm/helper.c
55
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
56
case EXCP_HVC:
57
case EXCP_HYP_TRAP:
58
case EXCP_SMC:
59
+ if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
60
+ /*
61
+ * QEMU internal FP/SIMD syndromes from AArch32 include the
62
+ * TA and coproc fields which are only exposed if the exception
63
+ * is taken to AArch32 Hyp mode. Mask them out to get a valid
64
+ * AArch64 format syndrome.
65
+ */
66
+ env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
67
+ }
68
env->cp15.esr_el[new_el] = env->exception.syndrome;
69
break;
70
case EXCP_IRQ:
71
diff --git a/target/arm/translate.c b/target/arm/translate.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate.c
74
+++ b/target/arm/translate.c
75
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
483
*/
76
*/
484
- desc = tcg_const_i32(simd_desc(vsz, vsz, zt));
77
if (s->fp_excp_el) {
485
+ desc = sve_memopidx(s, dtype);
78
gen_exception_insn(s, 4, EXCP_UDEF,
486
+ desc |= zt << MEMOPIDX_SHIFT;
79
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
487
+ desc = simd_desc(vsz, vsz, desc);
80
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
488
+ t_desc = tcg_const_i32(desc);
81
return 0;
489
t_pg = tcg_temp_new_ptr();
82
}
490
83
491
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
84
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
492
- fn(cpu_env, t_pg, addr, desc);
493
+ fn(cpu_env, t_pg, addr, t_desc);
494
495
tcg_temp_free_ptr(t_pg);
496
- tcg_temp_free_i32(desc);
497
+ tcg_temp_free_i32(t_desc);
498
}
499
500
static void do_ld_zpa(DisasContext *s, int zt, int pg,
501
@@ -XXX,XX +XXX,XX @@ static void do_ld_zpa(DisasContext *s, int zt, int pg,
502
* accessible via the instruction encoding.
503
*/
85
*/
504
assert(fn != NULL);
86
if (s->fp_excp_el) {
505
- do_mem_zpa(s, zt, pg, addr, fn);
87
gen_exception_insn(s, 4, EXCP_UDEF,
506
+ do_mem_zpa(s, zt, pg, addr, dtype, fn);
88
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
507
}
89
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
508
90
return 0;
509
static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
510
@@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn)
511
TCGv_i64 addr = new_tmp_a64(s);
512
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
513
tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
514
- do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data == MO_BE][a->dtype]);
515
+ do_mem_zpa(s, a->rd, a->pg, addr, a->dtype,
516
+ fns[s->be_data == MO_BE][a->dtype]);
517
}
91
}
518
return true;
92
519
}
93
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
520
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
94
521
TCGv_i64 addr = new_tmp_a64(s);
95
if (s->fp_excp_el) {
522
96
gen_exception_insn(s, 4, EXCP_UDEF,
523
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
97
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
524
- do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data == MO_BE][a->dtype]);
98
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
525
+ do_mem_zpa(s, a->rd, a->pg, addr, a->dtype,
99
return 0;
526
+ fns[s->be_data == MO_BE][a->dtype]);
527
}
100
}
528
return true;
101
if (!s->vfp_enabled) {
529
}
102
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
530
@@ -XXX,XX +XXX,XX @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
103
531
};
104
if (s->fp_excp_el) {
532
unsigned vsz = vec_full_reg_size(s);
105
gen_exception_insn(s, 4, EXCP_UDEF,
533
TCGv_ptr t_pg;
106
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
534
- TCGv_i32 desc;
107
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
535
- int poff;
108
return 0;
536
+ TCGv_i32 t_desc;
537
+ int desc, poff;
538
539
/* Load the first quadword using the normal predicated load helpers. */
540
- desc = tcg_const_i32(simd_desc(16, 16, zt));
541
+ desc = sve_memopidx(s, msz_dtype(msz));
542
+ desc |= zt << MEMOPIDX_SHIFT;
543
+ desc = simd_desc(16, 16, desc);
544
+ t_desc = tcg_const_i32(desc);
545
546
poff = pred_full_reg_offset(s, pg);
547
if (vsz > 16) {
548
@@ -XXX,XX +XXX,XX @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
549
t_pg = tcg_temp_new_ptr();
550
tcg_gen_addi_ptr(t_pg, cpu_env, poff);
551
552
- fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, desc);
553
+ fns[s->be_data == MO_BE][msz](cpu_env, t_pg, addr, t_desc);
554
555
tcg_temp_free_ptr(t_pg);
556
- tcg_temp_free_i32(desc);
557
+ tcg_temp_free_i32(t_desc);
558
559
/* Replicate that first quadword. */
560
if (vsz > 16) {
561
@@ -XXX,XX +XXX,XX @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
562
fn = fn_multiple[be][nreg - 1][msz];
563
}
109
}
564
assert(fn != NULL);
110
if (!s->vfp_enabled) {
565
- do_mem_zpa(s, zt, pg, addr, fn);
566
+ do_mem_zpa(s, zt, pg, addr, msz_dtype(msz), fn);
567
}
568
569
static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a, uint32_t insn)
570
@@ -XXX,XX +XXX,XX @@ static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a, uint32_t insn)
571
*** SVE gather loads / scatter stores
572
*/
573
574
-static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale,
575
- TCGv_i64 scalar, gen_helper_gvec_mem_scatter *fn)
576
+static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
577
+ int scale, TCGv_i64 scalar, int msz,
578
+ gen_helper_gvec_mem_scatter *fn)
579
{
580
unsigned vsz = vec_full_reg_size(s);
581
- TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, scale));
582
TCGv_ptr t_zm = tcg_temp_new_ptr();
583
TCGv_ptr t_pg = tcg_temp_new_ptr();
584
TCGv_ptr t_zt = tcg_temp_new_ptr();
585
+ TCGv_i32 t_desc;
586
+ int desc;
587
+
588
+ desc = sve_memopidx(s, msz_dtype(msz));
589
+ desc |= scale << MEMOPIDX_SHIFT;
590
+ desc = simd_desc(vsz, vsz, desc);
591
+ t_desc = tcg_const_i32(desc);
592
593
tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
594
tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
595
tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
596
- fn(cpu_env, t_zt, t_pg, t_zm, scalar, desc);
597
+ fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc);
598
599
tcg_temp_free_ptr(t_zt);
600
tcg_temp_free_ptr(t_zm);
601
tcg_temp_free_ptr(t_pg);
602
- tcg_temp_free_i32(desc);
603
+ tcg_temp_free_i32(t_desc);
604
}
605
606
/* Indexed by [be][ff][xs][u][msz]. */
607
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
608
assert(fn != NULL);
609
610
do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
611
- cpu_reg_sp(s, a->rn), fn);
612
+ cpu_reg_sp(s, a->rn), a->msz, fn);
613
return true;
614
}
615
616
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
617
* by loading the immediate into the scalar parameter.
618
*/
619
imm = tcg_const_i64(a->imm << a->msz);
620
- do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn);
621
+ do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn);
622
tcg_temp_free_i64(imm);
623
return true;
624
}
625
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
626
g_assert_not_reached();
627
}
628
do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
629
- cpu_reg_sp(s, a->rn), fn);
630
+ cpu_reg_sp(s, a->rn), a->msz, fn);
631
return true;
632
}
633
634
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
635
* by loading the immediate into the scalar parameter.
636
*/
637
imm = tcg_const_i64(a->imm << a->msz);
638
- do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn);
639
+ do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn);
640
tcg_temp_free_i64(imm);
641
return true;
642
}
643
--
111
--
644
2.19.0
112
2.19.1
645
113
646
114
diff view generated by jsdifflib
New patch
1
From: Stewart Hildebrand <Stewart.Hildebrand@dornerworks.com>
1
2
3
"The Image must be placed text_offset bytes from a 2MB aligned base
4
address anywhere in usable system RAM and called there."
5
6
For the virt board, we write our startup bootloader at the very
7
bottom of RAM, so that bit can't be used for the image. To avoid
8
overlap in case the image requests to be loaded at an offset
9
smaller than our bootloader, we increment the load offset to the
10
next 2MB.
11
12
This fixes a boot failure for Xen AArch64.
13
14
Signed-off-by: Stewart Hildebrand <stewart.hildebrand@dornerworks.com>
15
Tested-by: Andre Przywara <andre.przywara@arm.com>
16
Message-id: b8a89518794b4436af0c151ed10de4fa@dornerworks.com
17
[PMM: Rephrased a comment a bit]
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
21
hw/arm/boot.c | 18 ++++++++++++++++++
22
1 file changed, 18 insertions(+)
23
24
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/arm/boot.c
27
+++ b/hw/arm/boot.c
28
@@ -XXX,XX +XXX,XX @@
29
#include "qemu/config-file.h"
30
#include "qemu/option.h"
31
#include "exec/address-spaces.h"
32
+#include "qemu/units.h"
33
34
/* Kernel boot protocol is specified in the kernel docs
35
* Documentation/arm/Booting and Documentation/arm64/booting.txt
36
@@ -XXX,XX +XXX,XX @@
37
#define ARM64_TEXT_OFFSET_OFFSET 8
38
#define ARM64_MAGIC_OFFSET 56
39
40
+#define BOOTLOADER_MAX_SIZE (4 * KiB)
41
+
42
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
43
const struct arm_boot_info *info)
44
{
45
@@ -XXX,XX +XXX,XX @@ static void write_bootloader(const char *name, hwaddr addr,
46
code[i] = tswap32(insn);
47
}
48
49
+ assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE);
50
+
51
rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as);
52
53
g_free(code);
54
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
55
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
56
if (hdrvals[1] != 0) {
57
kernel_load_offset = le64_to_cpu(hdrvals[0]);
58
+
59
+ /*
60
+ * We write our startup "bootloader" at the very bottom of RAM,
61
+ * so that bit can't be used for the image. Luckily the Image
62
+ * format specification is that the image requests only an offset
63
+ * from a 2MB boundary, not an absolute load address. So if the
64
+ * image requests an offset that might mean it overlaps with the
65
+ * bootloader, we can just load it starting at 2MB+offset rather
66
+ * than 0MB + offset.
67
+ */
68
+ if (kernel_load_offset < BOOTLOADER_MAX_SIZE) {
69
+ kernel_load_offset += 2 * MiB;
70
+ }
71
}
72
}
73
74
--
75
2.19.1
76
77
diff view generated by jsdifflib
1
In commit c79c0a314c43b78 we enabled emulation of external aborts
1
From: Richard Henderson <rth@twiddle.net>
2
when the guest attempts to access a physical address with no
3
mapped device. In commit 4672cbd7bed88dc6 we suppress this for
4
most legacy boards to prevent breakage of previously working
5
guests, but we didn't suppress it in the 'virt' board, with
6
the rationale "we know that guests won't try to prod devices
7
that we don't describe in the device tree or ACPI tables". This
8
is mostly true, but we've had a report of a Linux guest image
9
that this did break. The problem seems to be that the guest
10
is (incorrectly) configured with a DEBUG_UART_PHYS value that
11
tells it there is a uart at 0x10009000 (which is true for
12
vexpress but not for virt), so in early bootup the kernel
13
probes this bogus address.
14
2
15
This is a misconfigured guest, so we don't need to worry
3
This can reduce the number of opcodes required for certain
16
about it too much, but we can arrange that guests that ran
4
complex forms of load-multiple (e.g. ld4.16b).
17
on QEMU v2.10 (before c79c0a314c43b78) will still run on
18
the "virt-2.10" board model, by suppressing external aborts
19
only for that version and earlier. This seems a reasonable
20
compromise: "virt-2.10" is supposed to behave the same way
21
that "virt" did in the 2.10 release, and making it do that
22
provides a usable workaround for guests with bugs like this.
23
5
24
Cc: qemu-stable@nongnu.org
6
Signed-off-by: Richard Henderson <rth@twiddle.net>
7
Message-id: 20181011205206.3552-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
Message-id: 20180925144127.31965-1-peter.maydell@linaro.org
27
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
28
---
10
---
29
hw/arm/virt.c | 2 ++
11
target/arm/translate-a64.c | 12 ++++++++----
30
1 file changed, 2 insertions(+)
12
1 file changed, 8 insertions(+), 4 deletions(-)
31
13
32
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
34
--- a/hw/arm/virt.c
16
--- a/target/arm/translate-a64.c
35
+++ b/hw/arm/virt.c
17
+++ b/target/arm/translate-a64.c
36
@@ -XXX,XX +XXX,XX @@ static void virt_machine_2_10_options(MachineClass *mc)
18
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
37
{
19
bool is_store = !extract32(insn, 22, 1);
38
virt_machine_2_11_options(mc);
20
bool is_postidx = extract32(insn, 23, 1);
39
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_10);
21
bool is_q = extract32(insn, 30, 1);
40
+ /* before 2.11 we never faulted accesses to bad addresses */
22
- TCGv_i64 tcg_addr, tcg_rn;
41
+ mc->ignore_memory_transaction_failures = true;
23
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
24
25
int ebytes = 1 << size;
26
int elements = (is_q ? 128 : 64) / (8 << size);
27
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
28
tcg_rn = cpu_reg_sp(s, rn);
29
tcg_addr = tcg_temp_new_i64();
30
tcg_gen_mov_i64(tcg_addr, tcg_rn);
31
+ tcg_ebytes = tcg_const_i64(ebytes);
32
33
for (r = 0; r < rpt; r++) {
34
int e;
35
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
36
clear_vec_high(s, is_q, tt);
37
}
38
}
39
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
40
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
41
tt = (tt + 1) % 32;
42
}
43
}
44
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
45
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
46
}
47
}
48
+ tcg_temp_free_i64(tcg_ebytes);
49
tcg_temp_free_i64(tcg_addr);
42
}
50
}
43
DEFINE_VIRT_MACHINE(2, 10)
51
52
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
53
bool replicate = false;
54
int index = is_q << 3 | S << 2 | size;
55
int ebytes, xs;
56
- TCGv_i64 tcg_addr, tcg_rn;
57
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
58
59
switch (scale) {
60
case 3:
61
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
62
tcg_rn = cpu_reg_sp(s, rn);
63
tcg_addr = tcg_temp_new_i64();
64
tcg_gen_mov_i64(tcg_addr, tcg_rn);
65
+ tcg_ebytes = tcg_const_i64(ebytes);
66
67
for (xs = 0; xs < selem; xs++) {
68
if (replicate) {
69
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
70
do_vec_st(s, rt, index, tcg_addr, scale);
71
}
72
}
73
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
74
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
75
rt = (rt + 1) % 32;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
79
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
80
}
81
}
82
+ tcg_temp_free_i64(tcg_ebytes);
83
tcg_temp_free_i64(tcg_addr);
84
}
44
85
45
--
86
--
46
2.19.0
87
2.19.1
47
88
48
89
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Use the same *_tlb primitives as we use for ld1.
3
This is done generically in translator_loop.
4
4
5
For linux-user, this hoists the set of helper_retaddr. For softmmu,
5
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
6
hoists the computation of the current mmu_idx outside the loop,
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
fixes the endianness problem, and moves the main loop out of a
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
macro and into an inlined function.
8
Message-id: 20181011205206.3552-3-richard.henderson@linaro.org
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20181005175350.30752-9-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
11
---
16
target/arm/sve_helper.c | 210 ++++++++++++++++++++++------------------
12
target/arm/translate-a64.c | 1 -
17
1 file changed, 117 insertions(+), 93 deletions(-)
13
target/arm/translate.c | 1 -
14
2 files changed, 2 deletions(-)
18
15
19
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/sve_helper.c
18
--- a/target/arm/translate-a64.c
22
+++ b/target/arm/sve_helper.c
19
+++ b/target/arm/translate-a64.c
23
@@ -XXX,XX +XXX,XX @@ DO_LD1_2(ld1dd, 3, 3)
20
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
24
#undef DO_LD1_1
21
25
#undef DO_LD1_2
22
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
26
23
{
27
-#define DO_LD2(NAME, FN, TYPEE, TYPEM, H) \
24
- tcg_clear_temp_count();
28
-void HELPER(NAME)(CPUARMState *env, void *vg, \
29
- target_ulong addr, uint32_t desc) \
30
-{ \
31
- intptr_t i, oprsz = simd_oprsz(desc); \
32
- intptr_t ra = GETPC(); \
33
- unsigned rd = simd_data(desc); \
34
- void *d1 = &env->vfp.zregs[rd]; \
35
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
36
- for (i = 0; i < oprsz; ) { \
37
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
38
- do { \
39
- TYPEM m1 = 0, m2 = 0; \
40
- if (pg & 1) { \
41
- m1 = FN(env, addr, ra); \
42
- m2 = FN(env, addr + sizeof(TYPEM), ra); \
43
- } \
44
- *(TYPEE *)(d1 + H(i)) = m1; \
45
- *(TYPEE *)(d2 + H(i)) = m2; \
46
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
47
- addr += 2 * sizeof(TYPEM); \
48
- } while (i & 15); \
49
- } \
50
+/*
51
+ * Common helpers for all contiguous 2,3,4-register predicated loads.
52
+ */
53
+static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
54
+ uint32_t desc, int size, uintptr_t ra,
55
+ sve_ld1_tlb_fn *tlb_fn)
56
+{
57
+ const int mmu_idx = cpu_mmu_index(env, false);
58
+ intptr_t i, oprsz = simd_oprsz(desc);
59
+ unsigned rd = simd_data(desc);
60
+ ARMVectorReg scratch[2] = { };
61
+
62
+ set_helper_retaddr(ra);
63
+ for (i = 0; i < oprsz; ) {
64
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
65
+ do {
66
+ if (pg & 1) {
67
+ tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
68
+ tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
69
+ }
70
+ i += size, pg >>= size;
71
+ addr += 2 * size;
72
+ } while (i & 15);
73
+ }
74
+ set_helper_retaddr(0);
75
+
76
+ /* Wait until all exceptions have been raised to write back. */
77
+ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
78
+ memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
79
}
25
}
80
26
81
-#define DO_LD3(NAME, FN, TYPEE, TYPEM, H) \
27
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
82
-void HELPER(NAME)(CPUARMState *env, void *vg, \
28
diff --git a/target/arm/translate.c b/target/arm/translate.c
83
- target_ulong addr, uint32_t desc) \
29
index XXXXXXX..XXXXXXX 100644
84
-{ \
30
--- a/target/arm/translate.c
85
- intptr_t i, oprsz = simd_oprsz(desc); \
31
+++ b/target/arm/translate.c
86
- intptr_t ra = GETPC(); \
32
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
87
- unsigned rd = simd_data(desc); \
33
tcg_gen_movi_i32(tmp, 0);
88
- void *d1 = &env->vfp.zregs[rd]; \
34
store_cpu_field(tmp, condexec_bits);
89
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
35
}
90
- void *d3 = &env->vfp.zregs[(rd + 2) & 31]; \
36
- tcg_clear_temp_count();
91
- for (i = 0; i < oprsz; ) { \
92
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
93
- do { \
94
- TYPEM m1 = 0, m2 = 0, m3 = 0; \
95
- if (pg & 1) { \
96
- m1 = FN(env, addr, ra); \
97
- m2 = FN(env, addr + sizeof(TYPEM), ra); \
98
- m3 = FN(env, addr + 2 * sizeof(TYPEM), ra); \
99
- } \
100
- *(TYPEE *)(d1 + H(i)) = m1; \
101
- *(TYPEE *)(d2 + H(i)) = m2; \
102
- *(TYPEE *)(d3 + H(i)) = m3; \
103
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
104
- addr += 3 * sizeof(TYPEM); \
105
- } while (i & 15); \
106
- } \
107
+static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
108
+ uint32_t desc, int size, uintptr_t ra,
109
+ sve_ld1_tlb_fn *tlb_fn)
110
+{
111
+ const int mmu_idx = cpu_mmu_index(env, false);
112
+ intptr_t i, oprsz = simd_oprsz(desc);
113
+ unsigned rd = simd_data(desc);
114
+ ARMVectorReg scratch[3] = { };
115
+
116
+ set_helper_retaddr(ra);
117
+ for (i = 0; i < oprsz; ) {
118
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
119
+ do {
120
+ if (pg & 1) {
121
+ tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
122
+ tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
123
+ tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra);
124
+ }
125
+ i += size, pg >>= size;
126
+ addr += 3 * size;
127
+ } while (i & 15);
128
+ }
129
+ set_helper_retaddr(0);
130
+
131
+ /* Wait until all exceptions have been raised to write back. */
132
+ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
133
+ memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
134
+ memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
135
}
37
}
136
38
137
-#define DO_LD4(NAME, FN, TYPEE, TYPEM, H) \
39
static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
138
-void HELPER(NAME)(CPUARMState *env, void *vg, \
139
- target_ulong addr, uint32_t desc) \
140
-{ \
141
- intptr_t i, oprsz = simd_oprsz(desc); \
142
- intptr_t ra = GETPC(); \
143
- unsigned rd = simd_data(desc); \
144
- void *d1 = &env->vfp.zregs[rd]; \
145
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
146
- void *d3 = &env->vfp.zregs[(rd + 2) & 31]; \
147
- void *d4 = &env->vfp.zregs[(rd + 3) & 31]; \
148
- for (i = 0; i < oprsz; ) { \
149
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
150
- do { \
151
- TYPEM m1 = 0, m2 = 0, m3 = 0, m4 = 0; \
152
- if (pg & 1) { \
153
- m1 = FN(env, addr, ra); \
154
- m2 = FN(env, addr + sizeof(TYPEM), ra); \
155
- m3 = FN(env, addr + 2 * sizeof(TYPEM), ra); \
156
- m4 = FN(env, addr + 3 * sizeof(TYPEM), ra); \
157
- } \
158
- *(TYPEE *)(d1 + H(i)) = m1; \
159
- *(TYPEE *)(d2 + H(i)) = m2; \
160
- *(TYPEE *)(d3 + H(i)) = m3; \
161
- *(TYPEE *)(d4 + H(i)) = m4; \
162
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
163
- addr += 4 * sizeof(TYPEM); \
164
- } while (i & 15); \
165
- } \
166
+static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
167
+ uint32_t desc, int size, uintptr_t ra,
168
+ sve_ld1_tlb_fn *tlb_fn)
169
+{
170
+ const int mmu_idx = cpu_mmu_index(env, false);
171
+ intptr_t i, oprsz = simd_oprsz(desc);
172
+ unsigned rd = simd_data(desc);
173
+ ARMVectorReg scratch[4] = { };
174
+
175
+ set_helper_retaddr(ra);
176
+ for (i = 0; i < oprsz; ) {
177
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
178
+ do {
179
+ if (pg & 1) {
180
+ tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra);
181
+ tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra);
182
+ tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra);
183
+ tlb_fn(env, &scratch[3], i, addr + 3 * size, mmu_idx, ra);
184
+ }
185
+ i += size, pg >>= size;
186
+ addr += 4 * size;
187
+ } while (i & 15);
188
+ }
189
+ set_helper_retaddr(0);
190
+
191
+ /* Wait until all exceptions have been raised to write back. */
192
+ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
193
+ memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
194
+ memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
195
+ memcpy(&env->vfp.zregs[(rd + 3) & 31], &scratch[3], oprsz);
196
}
197
198
-DO_LD2(sve_ld2bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
199
-DO_LD3(sve_ld3bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
200
-DO_LD4(sve_ld4bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
201
+#define DO_LDN_1(N) \
202
+void __attribute__((flatten)) HELPER(sve_ld##N##bb_r) \
203
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
204
+{ \
205
+ sve_ld##N##_r(env, vg, addr, desc, 1, GETPC(), sve_ld1bb_tlb); \
206
+}
207
208
-DO_LD2(sve_ld2hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
209
-DO_LD3(sve_ld3hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
210
-DO_LD4(sve_ld4hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
211
+#define DO_LDN_2(N, SUFF, SIZE) \
212
+void __attribute__((flatten)) HELPER(sve_ld##N##SUFF##_r) \
213
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
214
+{ \
215
+ sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \
216
+ arm_cpu_data_is_big_endian(env) \
217
+ ? sve_ld1##SUFF##_be_tlb : sve_ld1##SUFF##_le_tlb); \
218
+}
219
220
-DO_LD2(sve_ld2ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
221
-DO_LD3(sve_ld3ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
222
-DO_LD4(sve_ld4ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
223
+DO_LDN_1(2)
224
+DO_LDN_1(3)
225
+DO_LDN_1(4)
226
227
-DO_LD2(sve_ld2dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
228
-DO_LD3(sve_ld3dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
229
-DO_LD4(sve_ld4dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
230
+DO_LDN_2(2, hh, 2)
231
+DO_LDN_2(3, hh, 2)
232
+DO_LDN_2(4, hh, 2)
233
234
-#undef DO_LD2
235
-#undef DO_LD3
236
-#undef DO_LD4
237
+DO_LDN_2(2, ss, 4)
238
+DO_LDN_2(3, ss, 4)
239
+DO_LDN_2(4, ss, 4)
240
+
241
+DO_LDN_2(2, dd, 8)
242
+DO_LDN_2(3, dd, 8)
243
+DO_LDN_2(4, dd, 8)
244
+
245
+#undef DO_LDN_1
246
+#undef DO_LDN_2
247
248
/*
249
* Load contiguous data, first-fault and no-fault.
250
--
40
--
251
2.19.0
41
2.19.1
252
42
253
43
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-4-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate-a64.c | 28 +++-------------------------
9
1 file changed, 3 insertions(+), 25 deletions(-)
10
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
16
for (xs = 0; xs < selem; xs++) {
17
if (replicate) {
18
/* Load and replicate to all elements */
19
- uint64_t mulconst;
20
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
21
22
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
23
get_mem_index(s), s->be_data + scale);
24
- switch (scale) {
25
- case 0:
26
- mulconst = 0x0101010101010101ULL;
27
- break;
28
- case 1:
29
- mulconst = 0x0001000100010001ULL;
30
- break;
31
- case 2:
32
- mulconst = 0x0000000100000001ULL;
33
- break;
34
- case 3:
35
- mulconst = 0;
36
- break;
37
- default:
38
- g_assert_not_reached();
39
- }
40
- if (mulconst) {
41
- tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
42
- }
43
- write_vec_element(s, tcg_tmp, rt, 0, MO_64);
44
- if (is_q) {
45
- write_vec_element(s, tcg_tmp, rt, 1, MO_64);
46
- }
47
+ tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
48
+ (is_q + 1) * 8, vec_full_reg_size(s),
49
+ tcg_tmp);
50
tcg_temp_free_i64(tcg_tmp);
51
- clear_vec_high(s, is_q, rt);
52
} else {
53
/* Load/store one element per register */
54
if (is_load) {
55
--
56
2.19.1
57
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The 16-byte load only uses 16 predicate bits. But while
3
For a sequence of loads or stores from a single register,
4
reusing the other load infrastructure, we find other bits
4
little-endian operations can be promoted to an 8-byte op.
5
that are set and trigger an assert. To avoid this and
5
This can reduce the number of operations by a factor of 8.
6
retain the assert, zero-extend the predicate that we pass
7
to the LD1 helper.
8
6
9
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Message-id: 20181011205206.3552-5-richard.henderson@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20181005175350.30752-7-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
11
---
16
target/arm/translate-sve.c | 25 +++++++++++++++++++++++--
12
target/arm/translate-a64.c | 66 +++++++++++++++++++++++---------------
17
1 file changed, 23 insertions(+), 2 deletions(-)
13
1 file changed, 40 insertions(+), 26 deletions(-)
18
14
19
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate-sve.c
17
--- a/target/arm/translate-a64.c
22
+++ b/target/arm/translate-sve.c
18
+++ b/target/arm/translate-a64.c
23
@@ -XXX,XX +XXX,XX @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz)
19
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
24
unsigned vsz = vec_full_reg_size(s);
20
25
TCGv_ptr t_pg;
21
/* Store from vector register to memory */
26
TCGv_i32 desc;
22
static void do_vec_st(DisasContext *s, int srcidx, int element,
27
+ int poff;
23
- TCGv_i64 tcg_addr, int size)
28
24
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
29
/* Load the first quadword using the normal predicated load helpers. */
25
{
30
desc = tcg_const_i32(simd_desc(16, 16, zt));
26
- TCGMemOp memop = s->be_data + size;
31
- t_pg = tcg_temp_new_ptr();
27
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
32
28
33
- tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
29
read_vec_element(s, tcg_tmp, srcidx, element, size);
34
+ poff = pred_full_reg_offset(s, pg);
30
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
35
+ if (vsz > 16) {
31
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
36
+ /*
32
37
+ * Zero-extend the first 16 bits of the predicate into a temporary.
33
tcg_temp_free_i64(tcg_tmp);
38
+ * This avoids triggering an assert making sure we don't have bits
34
}
39
+ * set within a predicate beyond VQ, but we have lowered VQ to 1
35
40
+ * for this load operation.
36
/* Load from memory to vector register */
41
+ */
37
static void do_vec_ld(DisasContext *s, int destidx, int element,
42
+ TCGv_i64 tmp = tcg_temp_new_i64();
38
- TCGv_i64 tcg_addr, int size)
43
+#ifdef HOST_WORDS_BIGENDIAN
39
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
44
+ poff += 6;
40
{
45
+#endif
41
- TCGMemOp memop = s->be_data + size;
46
+ tcg_gen_ld16u_i64(tmp, cpu_env, poff);
42
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
47
+
43
48
+ poff = offsetof(CPUARMState, vfp.preg_tmp);
44
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
49
+ tcg_gen_st_i64(tmp, cpu_env, poff);
45
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
50
+ tcg_temp_free_i64(tmp);
46
write_vec_element(s, tcg_tmp, destidx, element, size);
47
48
tcg_temp_free_i64(tcg_tmp);
49
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
50
bool is_postidx = extract32(insn, 23, 1);
51
bool is_q = extract32(insn, 30, 1);
52
TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
53
+ TCGMemOp endian = s->be_data;
54
55
- int ebytes = 1 << size;
56
- int elements = (is_q ? 128 : 64) / (8 << size);
57
+ int ebytes; /* bytes per element */
58
+ int elements; /* elements per vector */
59
int rpt; /* num iterations */
60
int selem; /* structure elements */
61
int r;
62
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
63
gen_check_sp_alignment(s);
64
}
65
66
+ /* For our purposes, bytes are always little-endian. */
67
+ if (size == 0) {
68
+ endian = MO_LE;
51
+ }
69
+ }
52
+
70
+
53
+ t_pg = tcg_temp_new_ptr();
71
+ /* Consecutive little-endian elements from a single register
54
+ tcg_gen_addi_ptr(t_pg, cpu_env, poff);
72
+ * can be promoted to a larger little-endian operation.
73
+ */
74
+ if (selem == 1 && endian == MO_LE) {
75
+ size = 3;
76
+ }
77
+ ebytes = 1 << size;
78
+ elements = (is_q ? 16 : 8) / ebytes;
55
+
79
+
56
fns[msz](cpu_env, t_pg, addr, desc);
80
tcg_rn = cpu_reg_sp(s, rn);
57
81
tcg_addr = tcg_temp_new_i64();
58
tcg_temp_free_ptr(t_pg);
82
tcg_gen_mov_i64(tcg_addr, tcg_rn);
83
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
84
for (r = 0; r < rpt; r++) {
85
int e;
86
for (e = 0; e < elements; e++) {
87
- int tt = (rt + r) % 32;
88
int xs;
89
for (xs = 0; xs < selem; xs++) {
90
+ int tt = (rt + r + xs) % 32;
91
if (is_store) {
92
- do_vec_st(s, tt, e, tcg_addr, size);
93
+ do_vec_st(s, tt, e, tcg_addr, size, endian);
94
} else {
95
- do_vec_ld(s, tt, e, tcg_addr, size);
96
-
97
- /* For non-quad operations, setting a slice of the low
98
- * 64 bits of the register clears the high 64 bits (in
99
- * the ARM ARM pseudocode this is implicit in the fact
100
- * that 'rval' is a 64 bit wide variable).
101
- * For quad operations, we might still need to zero the
102
- * high bits of SVE. We optimize by noticing that we only
103
- * need to do this the first time we touch a register.
104
- */
105
- if (e == 0 && (r == 0 || xs == selem - 1)) {
106
- clear_vec_high(s, is_q, tt);
107
- }
108
+ do_vec_ld(s, tt, e, tcg_addr, size, endian);
109
}
110
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
111
- tt = (tt + 1) % 32;
112
}
113
}
114
}
115
116
+ if (!is_store) {
117
+ /* For non-quad operations, setting a slice of the low
118
+ * 64 bits of the register clears the high 64 bits (in
119
+ * the ARM ARM pseudocode this is implicit in the fact
120
+ * that 'rval' is a 64 bit wide variable).
121
+ * For quad operations, we might still need to zero the
122
+ * high bits of SVE.
123
+ */
124
+ for (r = 0; r < rpt * selem; r++) {
125
+ int tt = (rt + r) % 32;
126
+ clear_vec_high(s, is_q, tt);
127
+ }
128
+ }
129
+
130
if (is_postidx) {
131
int rm = extract32(insn, 16, 5);
132
if (rm == 31) {
133
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
134
} else {
135
/* Load/store one element per register */
136
if (is_load) {
137
- do_vec_ld(s, rt, index, tcg_addr, scale);
138
+ do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
139
} else {
140
- do_vec_st(s, rt, index, tcg_addr, scale);
141
+ do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
142
}
143
}
144
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
59
--
145
--
60
2.19.0
146
2.19.1
61
147
62
148
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Message-id: 20181011205206.3552-6-richard.henderson@linaro.org
6
[PMM: drop change to now-deleted cpu_mode_names array]
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate.c | 4 ++--
11
1 file changed, 2 insertions(+), 2 deletions(-)
12
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
16
+++ b/target/arm/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_F0d, cpu_F1d;
18
19
#include "exec/gen-icount.h"
20
21
-static const char *regnames[] =
22
+static const char * const regnames[] =
23
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
24
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
25
26
@@ -XXX,XX +XXX,XX @@ static struct {
27
int nregs;
28
int interleave;
29
int spacing;
30
-} neon_ls_element_type[11] = {
31
+} const neon_ls_element_type[11] = {
32
{4, 4, 1},
33
{4, 4, 2},
34
{4, 1, 1},
35
--
36
2.19.1
37
38
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This fixes the endianness problem for softmmu, and moves the
3
Also introduces neon_element_offset to find the env offset
4
main loop out of a macro and into an inlined function.
4
of a specific element within a neon register.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181011205206.3552-7-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-10-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/sve_helper.c | 351 ++++++++++++++++++++--------------------
11
target/arm/translate.c | 63 ++++++++++++++++++++++++------------------
13
1 file changed, 172 insertions(+), 179 deletions(-)
12
1 file changed, 36 insertions(+), 27 deletions(-)
14
13
15
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sve_helper.c
16
--- a/target/arm/translate.c
18
+++ b/target/arm/sve_helper.c
17
+++ b/target/arm/translate.c
19
@@ -XXX,XX +XXX,XX @@ typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, void *host,
18
@@ -XXX,XX +XXX,XX @@ neon_reg_offset (int reg, int n)
20
*/
19
return vfp_reg_offset(0, sreg);
21
typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
22
target_ulong vaddr, int mmu_idx, uintptr_t ra);
23
+typedef sve_ld1_tlb_fn sve_st1_tlb_fn;
24
25
/*
26
* Generate the above primitives.
27
@@ -XXX,XX +XXX,XX @@ DO_LDFF1_LDNF1_2(dd, 3, 3)
28
/*
29
* Store contiguous data, protected by a governing predicate.
30
*/
31
-#define DO_ST1(NAME, FN, TYPEE, TYPEM, H) \
32
-void HELPER(NAME)(CPUARMState *env, void *vg, \
33
- target_ulong addr, uint32_t desc) \
34
-{ \
35
- intptr_t i, oprsz = simd_oprsz(desc); \
36
- intptr_t ra = GETPC(); \
37
- unsigned rd = simd_data(desc); \
38
- void *vd = &env->vfp.zregs[rd]; \
39
- for (i = 0; i < oprsz; ) { \
40
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
41
- do { \
42
- if (pg & 1) { \
43
- TYPEM m = *(TYPEE *)(vd + H(i)); \
44
- FN(env, addr, m, ra); \
45
- } \
46
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
47
- addr += sizeof(TYPEM); \
48
- } while (i & 15); \
49
- } \
50
+
51
+#ifdef CONFIG_SOFTMMU
52
+#define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \
53
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
54
+ target_ulong addr, int mmu_idx, uintptr_t ra) \
55
+{ \
56
+ TCGMemOpIdx oi = make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_idx); \
57
+ TLB(env, addr, *(TYPEM *)(vd + H(reg_off)), oi, ra); \
58
}
20
}
59
-
21
60
-#define DO_ST1_D(NAME, FN, TYPEM) \
22
+/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
61
-void HELPER(NAME)(CPUARMState *env, void *vg, \
23
+ * where 0 is the least significant end of the register.
62
- target_ulong addr, uint32_t desc) \
24
+ */
63
-{ \
25
+static inline long
64
- intptr_t i, oprsz = simd_oprsz(desc) / 8; \
26
+neon_element_offset(int reg, int element, TCGMemOp size)
65
- intptr_t ra = GETPC(); \
27
+{
66
- unsigned rd = simd_data(desc); \
28
+ int element_size = 1 << size;
67
- uint64_t *d = &env->vfp.zregs[rd].d[0]; \
29
+ int ofs = element * element_size;
68
- uint8_t *pg = vg; \
30
+#ifdef HOST_WORDS_BIGENDIAN
69
- for (i = 0; i < oprsz; i += 1) { \
31
+ /* Calculate the offset assuming fully little-endian,
70
- if (pg[H1(i)] & 1) { \
32
+ * then XOR to account for the order of the 8-byte units.
71
- FN(env, addr, d[i], ra); \
33
+ */
72
- } \
34
+ if (element_size < 8) {
73
- addr += sizeof(TYPEM); \
35
+ ofs ^= 8 - element_size;
74
- } \
36
+ }
75
+#else
76
+#define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \
77
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
78
+ target_ulong addr, int mmu_idx, uintptr_t ra) \
79
+{ \
80
+ HOST(g2h(addr), *(TYPEM *)(vd + H(reg_off))); \
81
}
82
+#endif
37
+#endif
83
38
+ return neon_reg_offset(reg, 0) + ofs;
84
-#define DO_ST2(NAME, FN, TYPEE, TYPEM, H) \
85
-void HELPER(NAME)(CPUARMState *env, void *vg, \
86
- target_ulong addr, uint32_t desc) \
87
-{ \
88
- intptr_t i, oprsz = simd_oprsz(desc); \
89
- intptr_t ra = GETPC(); \
90
- unsigned rd = simd_data(desc); \
91
- void *d1 = &env->vfp.zregs[rd]; \
92
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
93
- for (i = 0; i < oprsz; ) { \
94
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
95
- do { \
96
- if (pg & 1) { \
97
- TYPEM m1 = *(TYPEE *)(d1 + H(i)); \
98
- TYPEM m2 = *(TYPEE *)(d2 + H(i)); \
99
- FN(env, addr, m1, ra); \
100
- FN(env, addr + sizeof(TYPEM), m2, ra); \
101
- } \
102
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
103
- addr += 2 * sizeof(TYPEM); \
104
- } while (i & 15); \
105
- } \
106
-}
107
+DO_ST_TLB(st1bb, H1, uint8_t, stb_p, 0, helper_ret_stb_mmu)
108
+DO_ST_TLB(st1bh, H1_2, uint16_t, stb_p, 0, helper_ret_stb_mmu)
109
+DO_ST_TLB(st1bs, H1_4, uint32_t, stb_p, 0, helper_ret_stb_mmu)
110
+DO_ST_TLB(st1bd, , uint64_t, stb_p, 0, helper_ret_stb_mmu)
111
112
-#define DO_ST3(NAME, FN, TYPEE, TYPEM, H) \
113
-void HELPER(NAME)(CPUARMState *env, void *vg, \
114
- target_ulong addr, uint32_t desc) \
115
-{ \
116
- intptr_t i, oprsz = simd_oprsz(desc); \
117
- intptr_t ra = GETPC(); \
118
- unsigned rd = simd_data(desc); \
119
- void *d1 = &env->vfp.zregs[rd]; \
120
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
121
- void *d3 = &env->vfp.zregs[(rd + 2) & 31]; \
122
- for (i = 0; i < oprsz; ) { \
123
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
124
- do { \
125
- if (pg & 1) { \
126
- TYPEM m1 = *(TYPEE *)(d1 + H(i)); \
127
- TYPEM m2 = *(TYPEE *)(d2 + H(i)); \
128
- TYPEM m3 = *(TYPEE *)(d3 + H(i)); \
129
- FN(env, addr, m1, ra); \
130
- FN(env, addr + sizeof(TYPEM), m2, ra); \
131
- FN(env, addr + 2 * sizeof(TYPEM), m3, ra); \
132
- } \
133
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
134
- addr += 3 * sizeof(TYPEM); \
135
- } while (i & 15); \
136
- } \
137
-}
138
+DO_ST_TLB(st1hh_le, H1_2, uint16_t, stw_le_p, MO_LE, helper_le_stw_mmu)
139
+DO_ST_TLB(st1hs_le, H1_4, uint32_t, stw_le_p, MO_LE, helper_le_stw_mmu)
140
+DO_ST_TLB(st1hd_le, , uint64_t, stw_le_p, MO_LE, helper_le_stw_mmu)
141
142
-#define DO_ST4(NAME, FN, TYPEE, TYPEM, H) \
143
-void HELPER(NAME)(CPUARMState *env, void *vg, \
144
- target_ulong addr, uint32_t desc) \
145
-{ \
146
- intptr_t i, oprsz = simd_oprsz(desc); \
147
- intptr_t ra = GETPC(); \
148
- unsigned rd = simd_data(desc); \
149
- void *d1 = &env->vfp.zregs[rd]; \
150
- void *d2 = &env->vfp.zregs[(rd + 1) & 31]; \
151
- void *d3 = &env->vfp.zregs[(rd + 2) & 31]; \
152
- void *d4 = &env->vfp.zregs[(rd + 3) & 31]; \
153
- for (i = 0; i < oprsz; ) { \
154
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
155
- do { \
156
- if (pg & 1) { \
157
- TYPEM m1 = *(TYPEE *)(d1 + H(i)); \
158
- TYPEM m2 = *(TYPEE *)(d2 + H(i)); \
159
- TYPEM m3 = *(TYPEE *)(d3 + H(i)); \
160
- TYPEM m4 = *(TYPEE *)(d4 + H(i)); \
161
- FN(env, addr, m1, ra); \
162
- FN(env, addr + sizeof(TYPEM), m2, ra); \
163
- FN(env, addr + 2 * sizeof(TYPEM), m3, ra); \
164
- FN(env, addr + 3 * sizeof(TYPEM), m4, ra); \
165
- } \
166
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
167
- addr += 4 * sizeof(TYPEM); \
168
- } while (i & 15); \
169
- } \
170
-}
171
+DO_ST_TLB(st1ss_le, H1_4, uint32_t, stl_le_p, MO_LE, helper_le_stl_mmu)
172
+DO_ST_TLB(st1sd_le, , uint64_t, stl_le_p, MO_LE, helper_le_stl_mmu)
173
174
-DO_ST1(sve_st1bh_r, cpu_stb_data_ra, uint16_t, uint8_t, H1_2)
175
-DO_ST1(sve_st1bs_r, cpu_stb_data_ra, uint32_t, uint8_t, H1_4)
176
-DO_ST1_D(sve_st1bd_r, cpu_stb_data_ra, uint8_t)
177
+DO_ST_TLB(st1dd_le, , uint64_t, stq_le_p, MO_LE, helper_le_stq_mmu)
178
179
-DO_ST1(sve_st1hs_r, cpu_stw_data_ra, uint32_t, uint16_t, H1_4)
180
-DO_ST1_D(sve_st1hd_r, cpu_stw_data_ra, uint16_t)
181
+DO_ST_TLB(st1hh_be, H1_2, uint16_t, stw_be_p, MO_BE, helper_be_stw_mmu)
182
+DO_ST_TLB(st1hs_be, H1_4, uint32_t, stw_be_p, MO_BE, helper_be_stw_mmu)
183
+DO_ST_TLB(st1hd_be, , uint64_t, stw_be_p, MO_BE, helper_be_stw_mmu)
184
185
-DO_ST1_D(sve_st1sd_r, cpu_stl_data_ra, uint32_t)
186
+DO_ST_TLB(st1ss_be, H1_4, uint32_t, stl_be_p, MO_BE, helper_be_stl_mmu)
187
+DO_ST_TLB(st1sd_be, , uint64_t, stl_be_p, MO_BE, helper_be_stl_mmu)
188
189
-DO_ST1(sve_st1bb_r, cpu_stb_data_ra, uint8_t, uint8_t, H1)
190
-DO_ST2(sve_st2bb_r, cpu_stb_data_ra, uint8_t, uint8_t, H1)
191
-DO_ST3(sve_st3bb_r, cpu_stb_data_ra, uint8_t, uint8_t, H1)
192
-DO_ST4(sve_st4bb_r, cpu_stb_data_ra, uint8_t, uint8_t, H1)
193
+DO_ST_TLB(st1dd_be, , uint64_t, stq_be_p, MO_BE, helper_be_stq_mmu)
194
195
-DO_ST1(sve_st1hh_r, cpu_stw_data_ra, uint16_t, uint16_t, H1_2)
196
-DO_ST2(sve_st2hh_r, cpu_stw_data_ra, uint16_t, uint16_t, H1_2)
197
-DO_ST3(sve_st3hh_r, cpu_stw_data_ra, uint16_t, uint16_t, H1_2)
198
-DO_ST4(sve_st4hh_r, cpu_stw_data_ra, uint16_t, uint16_t, H1_2)
199
+#undef DO_ST_TLB
200
201
-DO_ST1(sve_st1ss_r, cpu_stl_data_ra, uint32_t, uint32_t, H1_4)
202
-DO_ST2(sve_st2ss_r, cpu_stl_data_ra, uint32_t, uint32_t, H1_4)
203
-DO_ST3(sve_st3ss_r, cpu_stl_data_ra, uint32_t, uint32_t, H1_4)
204
-DO_ST4(sve_st4ss_r, cpu_stl_data_ra, uint32_t, uint32_t, H1_4)
205
-
206
-DO_ST1_D(sve_st1dd_r, cpu_stq_data_ra, uint64_t)
207
-
208
-void HELPER(sve_st2dd_r)(CPUARMState *env, void *vg,
209
- target_ulong addr, uint32_t desc)
210
+/*
211
+ * Common helpers for all contiguous 1,2,3,4-register predicated stores.
212
+ */
213
+static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr,
214
+ uint32_t desc, const uintptr_t ra,
215
+ const int esize, const int msize,
216
+ sve_st1_tlb_fn *tlb_fn)
217
{
218
- intptr_t i, oprsz = simd_oprsz(desc) / 8;
219
- intptr_t ra = GETPC();
220
+ const int mmu_idx = cpu_mmu_index(env, false);
221
+ intptr_t i, oprsz = simd_oprsz(desc);
222
unsigned rd = simd_data(desc);
223
- uint64_t *d1 = &env->vfp.zregs[rd].d[0];
224
- uint64_t *d2 = &env->vfp.zregs[(rd + 1) & 31].d[0];
225
- uint8_t *pg = vg;
226
+ void *vd = &env->vfp.zregs[rd];
227
228
- for (i = 0; i < oprsz; i += 1) {
229
- if (pg[H1(i)] & 1) {
230
- cpu_stq_data_ra(env, addr, d1[i], ra);
231
- cpu_stq_data_ra(env, addr + 8, d2[i], ra);
232
- }
233
- addr += 2 * 8;
234
+ set_helper_retaddr(ra);
235
+ for (i = 0; i < oprsz; ) {
236
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
237
+ do {
238
+ if (pg & 1) {
239
+ tlb_fn(env, vd, i, addr, mmu_idx, ra);
240
+ }
241
+ i += esize, pg >>= esize;
242
+ addr += msize;
243
+ } while (i & 15);
244
}
245
+ set_helper_retaddr(0);
246
}
247
248
-void HELPER(sve_st3dd_r)(CPUARMState *env, void *vg,
249
- target_ulong addr, uint32_t desc)
250
+static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
251
+ uint32_t desc, const uintptr_t ra,
252
+ const int esize, const int msize,
253
+ sve_st1_tlb_fn *tlb_fn)
254
{
255
- intptr_t i, oprsz = simd_oprsz(desc) / 8;
256
- intptr_t ra = GETPC();
257
+ const int mmu_idx = cpu_mmu_index(env, false);
258
+ intptr_t i, oprsz = simd_oprsz(desc);
259
unsigned rd = simd_data(desc);
260
- uint64_t *d1 = &env->vfp.zregs[rd].d[0];
261
- uint64_t *d2 = &env->vfp.zregs[(rd + 1) & 31].d[0];
262
- uint64_t *d3 = &env->vfp.zregs[(rd + 2) & 31].d[0];
263
- uint8_t *pg = vg;
264
+ void *d1 = &env->vfp.zregs[rd];
265
+ void *d2 = &env->vfp.zregs[(rd + 1) & 31];
266
267
- for (i = 0; i < oprsz; i += 1) {
268
- if (pg[H1(i)] & 1) {
269
- cpu_stq_data_ra(env, addr, d1[i], ra);
270
- cpu_stq_data_ra(env, addr + 8, d2[i], ra);
271
- cpu_stq_data_ra(env, addr + 16, d3[i], ra);
272
- }
273
- addr += 3 * 8;
274
+ set_helper_retaddr(ra);
275
+ for (i = 0; i < oprsz; ) {
276
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
277
+ do {
278
+ if (pg & 1) {
279
+ tlb_fn(env, d1, i, addr, mmu_idx, ra);
280
+ tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
281
+ }
282
+ i += esize, pg >>= esize;
283
+ addr += 2 * msize;
284
+ } while (i & 15);
285
}
286
+ set_helper_retaddr(0);
287
}
288
289
-void HELPER(sve_st4dd_r)(CPUARMState *env, void *vg,
290
- target_ulong addr, uint32_t desc)
291
+static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
292
+ uint32_t desc, const uintptr_t ra,
293
+ const int esize, const int msize,
294
+ sve_st1_tlb_fn *tlb_fn)
295
{
296
- intptr_t i, oprsz = simd_oprsz(desc) / 8;
297
- intptr_t ra = GETPC();
298
+ const int mmu_idx = cpu_mmu_index(env, false);
299
+ intptr_t i, oprsz = simd_oprsz(desc);
300
unsigned rd = simd_data(desc);
301
- uint64_t *d1 = &env->vfp.zregs[rd].d[0];
302
- uint64_t *d2 = &env->vfp.zregs[(rd + 1) & 31].d[0];
303
- uint64_t *d3 = &env->vfp.zregs[(rd + 2) & 31].d[0];
304
- uint64_t *d4 = &env->vfp.zregs[(rd + 3) & 31].d[0];
305
- uint8_t *pg = vg;
306
+ void *d1 = &env->vfp.zregs[rd];
307
+ void *d2 = &env->vfp.zregs[(rd + 1) & 31];
308
+ void *d3 = &env->vfp.zregs[(rd + 2) & 31];
309
310
- for (i = 0; i < oprsz; i += 1) {
311
- if (pg[H1(i)] & 1) {
312
- cpu_stq_data_ra(env, addr, d1[i], ra);
313
- cpu_stq_data_ra(env, addr + 8, d2[i], ra);
314
- cpu_stq_data_ra(env, addr + 16, d3[i], ra);
315
- cpu_stq_data_ra(env, addr + 24, d4[i], ra);
316
- }
317
- addr += 4 * 8;
318
+ set_helper_retaddr(ra);
319
+ for (i = 0; i < oprsz; ) {
320
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
321
+ do {
322
+ if (pg & 1) {
323
+ tlb_fn(env, d1, i, addr, mmu_idx, ra);
324
+ tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
325
+ tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra);
326
+ }
327
+ i += esize, pg >>= esize;
328
+ addr += 3 * msize;
329
+ } while (i & 15);
330
}
331
+ set_helper_retaddr(0);
332
}
333
334
+static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
335
+ uint32_t desc, const uintptr_t ra,
336
+ const int esize, const int msize,
337
+ sve_st1_tlb_fn *tlb_fn)
338
+{
339
+ const int mmu_idx = cpu_mmu_index(env, false);
340
+ intptr_t i, oprsz = simd_oprsz(desc);
341
+ unsigned rd = simd_data(desc);
342
+ void *d1 = &env->vfp.zregs[rd];
343
+ void *d2 = &env->vfp.zregs[(rd + 1) & 31];
344
+ void *d3 = &env->vfp.zregs[(rd + 2) & 31];
345
+ void *d4 = &env->vfp.zregs[(rd + 3) & 31];
346
+
347
+ set_helper_retaddr(ra);
348
+ for (i = 0; i < oprsz; ) {
349
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
350
+ do {
351
+ if (pg & 1) {
352
+ tlb_fn(env, d1, i, addr, mmu_idx, ra);
353
+ tlb_fn(env, d2, i, addr + msize, mmu_idx, ra);
354
+ tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra);
355
+ tlb_fn(env, d4, i, addr + 3 * msize, mmu_idx, ra);
356
+ }
357
+ i += esize, pg >>= esize;
358
+ addr += 4 * msize;
359
+ } while (i & 15);
360
+ }
361
+ set_helper_retaddr(0);
362
+}
39
+}
363
+
40
+
364
+#define DO_STN_1(N, NAME, ESIZE) \
41
static TCGv_i32 neon_load_reg(int reg, int pass)
365
+void __attribute__((flatten)) HELPER(sve_st##N##NAME##_r) \
42
{
366
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
43
TCGv_i32 tmp = tcg_temp_new_i32();
367
+{ \
44
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
368
+ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, 1, \
45
tmp = load_reg(s, rd);
369
+ sve_st1##NAME##_tlb); \
46
if (insn & (1 << 23)) {
370
+}
47
/* VDUP */
48
- if (size == 0) {
49
- gen_neon_dup_u8(tmp, 0);
50
- } else if (size == 1) {
51
- gen_neon_dup_low16(tmp);
52
- }
53
- for (n = 0; n <= pass * 2; n++) {
54
- tmp2 = tcg_temp_new_i32();
55
- tcg_gen_mov_i32(tmp2, tmp);
56
- neon_store_reg(rn, n, tmp2);
57
- }
58
- neon_store_reg(rn, n, tmp);
59
+ int vec_size = pass ? 16 : 8;
60
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
61
+ vec_size, vec_size, tmp);
62
+ tcg_temp_free_i32(tmp);
63
} else {
64
/* VMOV */
65
switch (size) {
66
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
67
tcg_temp_free_i32(tmp);
68
} else if ((insn & 0x380) == 0) {
69
/* VDUP */
70
+ int element;
71
+ TCGMemOp size;
371
+
72
+
372
+#define DO_STN_2(N, NAME, ESIZE, MSIZE) \
73
if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
373
+void __attribute__((flatten)) HELPER(sve_st##N##NAME##_r) \
74
return 1;
374
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
75
}
375
+{ \
76
- if (insn & (1 << 19)) {
376
+ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \
77
- tmp = neon_load_reg(rm, 1);
377
+ arm_cpu_data_is_big_endian(env) \
78
- } else {
378
+ ? sve_st1##NAME##_be_tlb : sve_st1##NAME##_le_tlb); \
79
- tmp = neon_load_reg(rm, 0);
379
+}
80
- }
380
+
81
if (insn & (1 << 16)) {
381
+DO_STN_1(1, bb, 1)
82
- gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
382
+DO_STN_1(1, bh, 2)
83
+ size = MO_8;
383
+DO_STN_1(1, bs, 4)
84
+ element = (insn >> 17) & 7;
384
+DO_STN_1(1, bd, 8)
85
} else if (insn & (1 << 17)) {
385
+DO_STN_1(2, bb, 1)
86
- if ((insn >> 18) & 1)
386
+DO_STN_1(3, bb, 1)
87
- gen_neon_dup_high16(tmp);
387
+DO_STN_1(4, bb, 1)
88
- else
388
+
89
- gen_neon_dup_low16(tmp);
389
+DO_STN_2(1, hh, 2, 2)
90
+ size = MO_16;
390
+DO_STN_2(1, hs, 4, 2)
91
+ element = (insn >> 18) & 3;
391
+DO_STN_2(1, hd, 8, 2)
92
+ } else {
392
+DO_STN_2(2, hh, 2, 2)
93
+ size = MO_32;
393
+DO_STN_2(3, hh, 2, 2)
94
+ element = (insn >> 19) & 1;
394
+DO_STN_2(4, hh, 2, 2)
95
}
395
+
96
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
396
+DO_STN_2(1, ss, 4, 4)
97
- tmp2 = tcg_temp_new_i32();
397
+DO_STN_2(1, sd, 8, 4)
98
- tcg_gen_mov_i32(tmp2, tmp);
398
+DO_STN_2(2, ss, 4, 4)
99
- neon_store_reg(rd, pass, tmp2);
399
+DO_STN_2(3, ss, 4, 4)
100
- }
400
+DO_STN_2(4, ss, 4, 4)
101
- tcg_temp_free_i32(tmp);
401
+
102
+ tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
402
+DO_STN_2(1, dd, 8, 8)
103
+ neon_element_offset(rm, element, size),
403
+DO_STN_2(2, dd, 8, 8)
104
+ q ? 16 : 8, q ? 16 : 8);
404
+DO_STN_2(3, dd, 8, 8)
105
} else {
405
+DO_STN_2(4, dd, 8, 8)
106
return 1;
406
+
107
}
407
+#undef DO_STN_1
408
+#undef DO_STN_2
409
+
410
/* Loads with a vector index. */
411
412
#define DO_LD1_ZPZ_S(NAME, TYPEI, TYPEM, FN) \
413
--
108
--
414
2.19.0
109
2.19.1
415
110
416
111
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-8-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/translate.c | 67 ++++++++++++++++++++++++------------------
9
1 file changed, 39 insertions(+), 28 deletions(-)
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
14
+++ b/target/arm/translate.c
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
16
return 1;
17
}
18
} else { /* (insn & 0x00380080) == 0 */
19
- int invert;
20
+ int invert, reg_ofs, vec_size;
21
+
22
if (q && (rd & 1)) {
23
return 1;
24
}
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
26
break;
27
case 14:
28
imm |= (imm << 8) | (imm << 16) | (imm << 24);
29
- if (invert)
30
+ if (invert) {
31
imm = ~imm;
32
+ }
33
break;
34
case 15:
35
if (invert) {
36
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
37
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
38
break;
39
}
40
- if (invert)
41
+ if (invert) {
42
imm = ~imm;
43
+ }
44
45
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
46
- if (op & 1 && op < 12) {
47
- tmp = neon_load_reg(rd, pass);
48
- if (invert) {
49
- /* The immediate value has already been inverted, so
50
- BIC becomes AND. */
51
- tcg_gen_andi_i32(tmp, tmp, imm);
52
- } else {
53
- tcg_gen_ori_i32(tmp, tmp, imm);
54
- }
55
+ reg_ofs = neon_reg_offset(rd, 0);
56
+ vec_size = q ? 16 : 8;
57
+
58
+ if (op & 1 && op < 12) {
59
+ if (invert) {
60
+ /* The immediate value has already been inverted,
61
+ * so BIC becomes AND.
62
+ */
63
+ tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
64
+ vec_size, vec_size);
65
} else {
66
- /* VMOV, VMVN. */
67
- tmp = tcg_temp_new_i32();
68
- if (op == 14 && invert) {
69
- int n;
70
- uint32_t val;
71
- val = 0;
72
- for (n = 0; n < 4; n++) {
73
- if (imm & (1 << (n + (pass & 1) * 4)))
74
- val |= 0xff << (n * 8);
75
- }
76
- tcg_gen_movi_i32(tmp, val);
77
- } else {
78
- tcg_gen_movi_i32(tmp, imm);
79
- }
80
+ tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
81
+ vec_size, vec_size);
82
+ }
83
+ } else {
84
+ /* VMOV, VMVN. */
85
+ if (op == 14 && invert) {
86
+ TCGv_i64 t64 = tcg_temp_new_i64();
87
+
88
+ for (pass = 0; pass <= q; ++pass) {
89
+ uint64_t val = 0;
90
+ int n;
91
+
92
+ for (n = 0; n < 8; n++) {
93
+ if (imm & (1 << (n + pass * 8))) {
94
+ val |= 0xffull << (n * 8);
95
+ }
96
+ }
97
+ tcg_gen_movi_i64(t64, val);
98
+ neon_store_reg64(t64, rd + pass);
99
+ }
100
+ tcg_temp_free_i64(t64);
101
+ } else {
102
+ tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
103
}
104
- neon_store_reg(rd, pass, tmp);
105
}
106
}
107
} else { /* (insn & 0x00800010 == 0x00800000) */
108
--
109
2.19.1
110
111
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Move expanders for VBSL, VBIT, and VBIF from translate-a64.c.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-9-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate.h | 6 ++
11
target/arm/translate-a64.c | 61 --------------
12
target/arm/translate.c | 162 +++++++++++++++++++++++++++----------
13
3 files changed, 124 insertions(+), 105 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
20
return ret;
21
}
22
23
+
24
+/* Vector operations shared between ARM and AArch64. */
25
+extern const GVecGen3 bsl_op;
26
+extern const GVecGen3 bit_op;
27
+extern const GVecGen3 bif_op;
28
+
29
/*
30
* Forward to the isar_feature_* tests given a DisasContext pointer.
31
*/
32
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-a64.c
35
+++ b/target/arm/translate-a64.c
36
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
37
}
38
}
39
40
-static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
41
-{
42
- tcg_gen_xor_i64(rn, rn, rm);
43
- tcg_gen_and_i64(rn, rn, rd);
44
- tcg_gen_xor_i64(rd, rm, rn);
45
-}
46
-
47
-static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
48
-{
49
- tcg_gen_xor_i64(rn, rn, rd);
50
- tcg_gen_and_i64(rn, rn, rm);
51
- tcg_gen_xor_i64(rd, rd, rn);
52
-}
53
-
54
-static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
55
-{
56
- tcg_gen_xor_i64(rn, rn, rd);
57
- tcg_gen_andc_i64(rn, rn, rm);
58
- tcg_gen_xor_i64(rd, rd, rn);
59
-}
60
-
61
-static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
62
-{
63
- tcg_gen_xor_vec(vece, rn, rn, rm);
64
- tcg_gen_and_vec(vece, rn, rn, rd);
65
- tcg_gen_xor_vec(vece, rd, rm, rn);
66
-}
67
-
68
-static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
69
-{
70
- tcg_gen_xor_vec(vece, rn, rn, rd);
71
- tcg_gen_and_vec(vece, rn, rn, rm);
72
- tcg_gen_xor_vec(vece, rd, rd, rn);
73
-}
74
-
75
-static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
76
-{
77
- tcg_gen_xor_vec(vece, rn, rn, rd);
78
- tcg_gen_andc_vec(vece, rn, rn, rm);
79
- tcg_gen_xor_vec(vece, rd, rd, rn);
80
-}
81
-
82
/* Logic op (opcode == 3) subgroup of C3.6.16. */
83
static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
84
{
85
- static const GVecGen3 bsl_op = {
86
- .fni8 = gen_bsl_i64,
87
- .fniv = gen_bsl_vec,
88
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
89
- .load_dest = true
90
- };
91
- static const GVecGen3 bit_op = {
92
- .fni8 = gen_bit_i64,
93
- .fniv = gen_bit_vec,
94
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
95
- .load_dest = true
96
- };
97
- static const GVecGen3 bif_op = {
98
- .fni8 = gen_bif_i64,
99
- .fniv = gen_bif_vec,
100
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
101
- .load_dest = true
102
- };
103
-
104
int rd = extract32(insn, 0, 5);
105
int rn = extract32(insn, 5, 5);
106
int rm = extract32(insn, 16, 5);
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
112
return 0;
113
}
114
115
-/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
116
-static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
117
-{
118
- tcg_gen_and_i32(t, t, c);
119
- tcg_gen_andc_i32(f, f, c);
120
- tcg_gen_or_i32(dest, t, f);
121
-}
122
-
123
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
124
{
125
switch (size) {
126
@@ -XXX,XX +XXX,XX @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
127
return 1;
128
}
129
130
+/*
131
+ * Expanders for VBitOps_VBIF, VBIT, VBSL.
132
+ */
133
+static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
134
+{
135
+ tcg_gen_xor_i64(rn, rn, rm);
136
+ tcg_gen_and_i64(rn, rn, rd);
137
+ tcg_gen_xor_i64(rd, rm, rn);
138
+}
139
+
140
+static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
141
+{
142
+ tcg_gen_xor_i64(rn, rn, rd);
143
+ tcg_gen_and_i64(rn, rn, rm);
144
+ tcg_gen_xor_i64(rd, rd, rn);
145
+}
146
+
147
+static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
148
+{
149
+ tcg_gen_xor_i64(rn, rn, rd);
150
+ tcg_gen_andc_i64(rn, rn, rm);
151
+ tcg_gen_xor_i64(rd, rd, rn);
152
+}
153
+
154
+static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
155
+{
156
+ tcg_gen_xor_vec(vece, rn, rn, rm);
157
+ tcg_gen_and_vec(vece, rn, rn, rd);
158
+ tcg_gen_xor_vec(vece, rd, rm, rn);
159
+}
160
+
161
+static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
162
+{
163
+ tcg_gen_xor_vec(vece, rn, rn, rd);
164
+ tcg_gen_and_vec(vece, rn, rn, rm);
165
+ tcg_gen_xor_vec(vece, rd, rd, rn);
166
+}
167
+
168
+static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
169
+{
170
+ tcg_gen_xor_vec(vece, rn, rn, rd);
171
+ tcg_gen_andc_vec(vece, rn, rn, rm);
172
+ tcg_gen_xor_vec(vece, rd, rd, rn);
173
+}
174
+
175
+const GVecGen3 bsl_op = {
176
+ .fni8 = gen_bsl_i64,
177
+ .fniv = gen_bsl_vec,
178
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
179
+ .load_dest = true
180
+};
181
+
182
+const GVecGen3 bit_op = {
183
+ .fni8 = gen_bit_i64,
184
+ .fniv = gen_bit_vec,
185
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
186
+ .load_dest = true
187
+};
188
+
189
+const GVecGen3 bif_op = {
190
+ .fni8 = gen_bif_i64,
191
+ .fniv = gen_bif_vec,
192
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
193
+ .load_dest = true
194
+};
195
+
196
+
197
/* Translate a NEON data processing instruction. Return nonzero if the
198
instruction is invalid.
199
We process data in a mixture of 32-bit and 64-bit chunks.
200
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
201
{
202
int op;
203
int q;
204
- int rd, rn, rm;
205
+ int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
206
int size;
207
int shift;
208
int pass;
209
int count;
210
int pairwise;
211
int u;
212
+ int vec_size;
213
uint32_t imm, mask;
214
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
215
TCGv_ptr ptr1, ptr2, ptr3;
216
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
217
VFP_DREG_N(rn, insn);
218
VFP_DREG_M(rm, insn);
219
size = (insn >> 20) & 3;
220
+ vec_size = q ? 16 : 8;
221
+ rd_ofs = neon_reg_offset(rd, 0);
222
+ rn_ofs = neon_reg_offset(rn, 0);
223
+ rm_ofs = neon_reg_offset(rm, 0);
224
+
225
if ((insn & (1 << 23)) == 0) {
226
/* Three register same length. */
227
op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
228
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
229
q, rd, rn, rm);
230
}
231
return 1;
232
+
233
+ case NEON_3R_LOGIC: /* Logic ops. */
234
+ switch ((u << 2) | size) {
235
+ case 0: /* VAND */
236
+ tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
237
+ vec_size, vec_size);
238
+ break;
239
+ case 1: /* VBIC */
240
+ tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
241
+ vec_size, vec_size);
242
+ break;
243
+ case 2:
244
+ if (rn == rm) {
245
+ /* VMOV */
246
+ tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
247
+ } else {
248
+ /* VORR */
249
+ tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
250
+ vec_size, vec_size);
251
+ }
252
+ break;
253
+ case 3: /* VORN */
254
+ tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
255
+ vec_size, vec_size);
256
+ break;
257
+ case 4: /* VEOR */
258
+ tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
259
+ vec_size, vec_size);
260
+ break;
261
+ case 5: /* VBSL */
262
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
263
+ vec_size, vec_size, &bsl_op);
264
+ break;
265
+ case 6: /* VBIT */
266
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
267
+ vec_size, vec_size, &bit_op);
268
+ break;
269
+ case 7: /* VBIF */
270
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
271
+ vec_size, vec_size, &bif_op);
272
+ break;
273
+ }
274
+ return 0;
275
}
276
- if (size == 3 && op != NEON_3R_LOGIC) {
277
+ if (size == 3) {
278
/* 64-bit element instructions. */
279
for (pass = 0; pass < (q ? 2 : 1); pass++) {
280
neon_load_reg64(cpu_V0, rn + pass);
281
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
282
case NEON_3R_VRHADD:
283
GEN_NEON_INTEGER_OP(rhadd);
284
break;
285
- case NEON_3R_LOGIC: /* Logic ops. */
286
- switch ((u << 2) | size) {
287
- case 0: /* VAND */
288
- tcg_gen_and_i32(tmp, tmp, tmp2);
289
- break;
290
- case 1: /* BIC */
291
- tcg_gen_andc_i32(tmp, tmp, tmp2);
292
- break;
293
- case 2: /* VORR */
294
- tcg_gen_or_i32(tmp, tmp, tmp2);
295
- break;
296
- case 3: /* VORN */
297
- tcg_gen_orc_i32(tmp, tmp, tmp2);
298
- break;
299
- case 4: /* VEOR */
300
- tcg_gen_xor_i32(tmp, tmp, tmp2);
301
- break;
302
- case 5: /* VBSL */
303
- tmp3 = neon_load_reg(rd, pass);
304
- gen_neon_bsl(tmp, tmp, tmp2, tmp3);
305
- tcg_temp_free_i32(tmp3);
306
- break;
307
- case 6: /* VBIT */
308
- tmp3 = neon_load_reg(rd, pass);
309
- gen_neon_bsl(tmp, tmp, tmp3, tmp2);
310
- tcg_temp_free_i32(tmp3);
311
- break;
312
- case 7: /* VBIF */
313
- tmp3 = neon_load_reg(rd, pass);
314
- gen_neon_bsl(tmp, tmp3, tmp, tmp2);
315
- tcg_temp_free_i32(tmp3);
316
- break;
317
- }
318
- break;
319
case NEON_3R_VHSUB:
320
GEN_NEON_INTEGER_OP(hsub);
321
break;
322
--
323
2.19.1
324
325
diff view generated by jsdifflib
1
Add the v8M stack checks for the VLDM/VSTM
1
From: Richard Henderson <richard.henderson@linaro.org>
2
(aka VPUSH/VPOP) instructions. This code is currently
3
unreachable because we haven't yet implemented M profile
4
floating point support, but since the change is simple,
5
we add it now because otherwise we're likely to forget to
6
do it later.
7
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-10-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20181002163556.10279-13-peter.maydell@linaro.org
12
---
7
---
13
target/arm/translate.c | 12 ++++++++++++
8
target/arm/translate.c | 29 ++++++++++-------------------
14
1 file changed, 12 insertions(+)
9
1 file changed, 10 insertions(+), 19 deletions(-)
15
10
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
13
--- a/target/arm/translate.c
19
+++ b/target/arm/translate.c
14
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
21
if (insn & (1 << 24)) /* pre-decrement */
16
break;
22
tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
17
}
23
18
return 0;
24
+ if (s->v8m_stackcheck && rn == 13 && w) {
25
+ /*
26
+ * Here 'addr' is the lowest address we will store to,
27
+ * and is either the old SP (if post-increment) or
28
+ * the new SP (if pre-decrement). For post-increment
29
+ * where the old value is below the limit and the new
30
+ * value is above, it is UNKNOWN whether the limit check
31
+ * triggers; we choose to trigger.
32
+ */
33
+ gen_helper_v8m_stackcheck(cpu_env, addr);
34
+ }
35
+
19
+
36
if (dp)
20
+ case NEON_3R_VADD_VSUB:
37
offset = 8;
21
+ if (u) {
38
else
22
+ tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
23
+ vec_size, vec_size);
24
+ } else {
25
+ tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
26
+ vec_size, vec_size);
27
+ }
28
+ return 0;
29
}
30
if (size == 3) {
31
/* 64-bit element instructions. */
32
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
33
cpu_V1, cpu_V0);
34
}
35
break;
36
- case NEON_3R_VADD_VSUB:
37
- if (u) {
38
- tcg_gen_sub_i64(CPU_V001);
39
- } else {
40
- tcg_gen_add_i64(CPU_V001);
41
- }
42
- break;
43
default:
44
abort();
45
}
46
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
47
tmp2 = neon_load_reg(rd, pass);
48
gen_neon_add(size, tmp, tmp2);
49
break;
50
- case NEON_3R_VADD_VSUB:
51
- if (!u) { /* VADD */
52
- gen_neon_add(size, tmp, tmp2);
53
- } else { /* VSUB */
54
- switch (size) {
55
- case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
56
- case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
57
- case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
58
- default: abort();
59
- }
60
- }
61
- break;
62
case NEON_3R_VTST_VCEQ:
63
if (!u) { /* VTST */
64
switch (size) {
39
--
65
--
40
2.19.0
66
2.19.1
41
67
42
68
diff view generated by jsdifflib
1
Add v8M stack checks for the 16-bit Thumb push/pop
1
From: Richard Henderson <richard.henderson@linaro.org>
2
encodings: STMDB, STMFD, LDM, LDMIA, LDMFD.
3
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-11-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181002163556.10279-12-peter.maydell@linaro.org
8
---
7
---
9
target/arm/translate.c | 16 +++++++++++++++-
8
target/arm/translate.c | 16 ++++++++--------
10
1 file changed, 15 insertions(+), 1 deletion(-)
9
1 file changed, 8 insertions(+), 8 deletions(-)
11
10
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
13
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
14
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
17
store_reg(s, rd, tmp);
16
tcg_temp_free_ptr(ptr1);
18
break;
17
tcg_temp_free_ptr(ptr2);
19
case 4: case 5: case 0xc: case 0xd:
18
break;
20
- /* push/pop */
21
+ /*
22
+ * 0b1011_x10x_xxxx_xxxx
23
+ * - push/pop
24
+ */
25
addr = load_reg(s, 13);
26
if (insn & (1 << 8))
27
offset = 4;
28
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
29
if ((insn & (1 << 11)) == 0) {
30
tcg_gen_addi_i32(addr, addr, -offset);
31
}
32
+
19
+
33
+ if (s->v8m_stackcheck) {
20
+ case NEON_2RM_VMVN:
34
+ /*
21
+ tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
35
+ * Here 'addr' is the lower of "old SP" and "new SP";
22
+ break;
36
+ * if this is a pop that starts below the limit and ends
23
+ case NEON_2RM_VNEG:
37
+ * above it, it is UNKNOWN whether the limit check triggers;
24
+ tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
38
+ * we choose to trigger.
25
+ break;
39
+ */
40
+ gen_helper_v8m_stackcheck(cpu_env, addr);
41
+ }
42
+
26
+
43
for (i = 0; i < 8; i++) {
27
default:
44
if (insn & (1 << i)) {
28
elementwise:
45
if (insn & (1 << 11)) {
29
for (pass = 0; pass < (q ? 4 : 2); pass++) {
30
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
31
case NEON_2RM_VCNT:
32
gen_helper_neon_cnt_u8(tmp, tmp);
33
break;
34
- case NEON_2RM_VMVN:
35
- tcg_gen_not_i32(tmp, tmp);
36
- break;
37
case NEON_2RM_VQABS:
38
switch (size) {
39
case 0:
40
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
41
default: abort();
42
}
43
break;
44
- case NEON_2RM_VNEG:
45
- tmp2 = tcg_const_i32(0);
46
- gen_neon_rsb(size, tmp, tmp2);
47
- tcg_temp_free_i32(tmp2);
48
- break;
49
case NEON_2RM_VCGT0_F:
50
{
51
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
46
--
52
--
47
2.19.0
53
2.19.1
48
54
49
55
diff view generated by jsdifflib
1
Add v8M stack checks for the instructions in the T32
1
From: Richard Henderson <richard.henderson@linaro.org>
2
"load/store single" encoding class: these are the
3
"immediate pre-indexed" and "immediate, post-indexed"
4
LDR and STR instructions.
5
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-12-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181002163556.10279-11-peter.maydell@linaro.org
10
---
7
---
11
target/arm/translate.c | 23 ++++++++++++++++++++++-
8
target/arm/translate.c | 31 +++++++++++++++----------------
12
1 file changed, 22 insertions(+), 1 deletion(-)
9
1 file changed, 15 insertions(+), 16 deletions(-)
13
10
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
13
--- a/target/arm/translate.c
17
+++ b/target/arm/translate.c
14
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
19
imm = -imm;
16
vec_size, vec_size);
20
/* Fall through. */
17
}
21
case 0xf: /* Pre-increment. */
18
return 0;
22
- tcg_gen_addi_i32(addr, addr, imm);
23
writeback = 1;
24
break;
25
default:
26
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
27
28
issinfo = writeback ? ISSInvalid : rs;
29
30
+ if (s->v8m_stackcheck && rn == 13 && writeback) {
31
+ /*
32
+ * Stackcheck. Here we know 'addr' is the current SP;
33
+ * if imm is +ve we're moving SP up, else down. It is
34
+ * UNKNOWN whether the limit check triggers when SP starts
35
+ * below the limit and ends up above it; we chose to do so.
36
+ */
37
+ if ((int32_t)imm < 0) {
38
+ TCGv_i32 newsp = tcg_temp_new_i32();
39
+
19
+
40
+ tcg_gen_addi_i32(newsp, addr, imm);
20
+ case NEON_3R_VMUL: /* VMUL */
41
+ gen_helper_v8m_stackcheck(cpu_env, newsp);
21
+ if (u) {
42
+ tcg_temp_free_i32(newsp);
22
+ /* Polynomial case allows only P8 and is handled below. */
23
+ if (size != 0) {
24
+ return 1;
25
+ }
43
+ } else {
26
+ } else {
44
+ gen_helper_v8m_stackcheck(cpu_env, addr);
27
+ tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
28
+ vec_size, vec_size);
29
+ return 0;
45
+ }
30
+ }
46
+ }
31
+ break;
47
+
32
}
48
+ if (writeback && !postinc) {
33
if (size == 3) {
49
+ tcg_gen_addi_i32(addr, addr, imm);
34
/* 64-bit element instructions. */
50
+ }
35
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
51
+
36
return 1;
52
if (insn & (1 << 20)) {
37
}
53
/* Load. */
38
break;
54
tmp = tcg_temp_new_i32();
39
- case NEON_3R_VMUL:
40
- if (u && (size != 0)) {
41
- /* UNDEF on invalid size for polynomial subcase */
42
- return 1;
43
- }
44
- break;
45
case NEON_3R_VFM_VQRDMLSH:
46
if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
47
return 1;
48
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
49
}
50
break;
51
case NEON_3R_VMUL:
52
- if (u) { /* polynomial */
53
- gen_helper_neon_mul_p8(tmp, tmp, tmp2);
54
- } else { /* Integer */
55
- switch (size) {
56
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
57
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
58
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
59
- default: abort();
60
- }
61
- }
62
+ /* VMUL.P8; other cases already eliminated. */
63
+ gen_helper_neon_mul_p8(tmp, tmp, tmp2);
64
break;
65
case NEON_3R_VPMAX:
66
GEN_NEON_INTEGER_OP(pmax);
55
--
67
--
56
2.19.0
68
2.19.1
57
69
58
70
diff view generated by jsdifflib
1
Add the v8M stack checks for:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* LDRD (immediate)
3
* STRD (immediate)
4
2
5
Loads and stores are more complicated than ADD/SUB/MOV, because we
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
must ensure that memory accesses below the stack limit are not
4
Message-id: 20181011205206.3552-13-richard.henderson@linaro.org
7
performed, so we can't simply do the check when we actually update
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
SP.
9
10
For these instructions, if the stack limit check triggers
11
we must not:
12
* perform any memory access below the SP limit
13
* update PC, SP or the load/store base register
14
but it is IMPDEF whether we:
15
* perform any accesses above or equal to the SP limit
16
* update destination registers for loads
17
18
For QEMU we choose to always check the limit before doing any other
19
part of the load or store, so we won't update any registers or
20
perform any memory accesses.
21
22
It is UNKNOWN whether the limit check triggers for a load or store
23
where the initial SP value is below the limit and one of the stores
24
would be below the limit, but the writeback moves SP to above the
25
limit. For QEMU we choose to trigger the check in this situation.
26
27
Note that limit checks happen only for loads and stores which update
28
SP via writeback; they do not happen for loads and stores which
29
simply use SP as a base register.
30
31
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
32
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Message-id: 20181002163556.10279-9-peter.maydell@linaro.org
35
---
7
---
36
target/arm/translate.c | 27 +++++++++++++++++++++++++--
8
target/arm/translate.c | 70 +++++++++++++++++++++++++++++-------------
37
1 file changed, 25 insertions(+), 2 deletions(-)
9
1 file changed, 48 insertions(+), 22 deletions(-)
38
10
39
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
40
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate.c
13
--- a/target/arm/translate.c
42
+++ b/target/arm/translate.c
14
+++ b/target/arm/translate.c
43
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
44
* 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
16
size--;
45
* - load/store dual (pre-indexed)
17
}
46
*/
18
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
47
+ bool wback = extract32(insn, 21, 1);
19
- /* To avoid excessive duplication of ops we implement shift
48
+
20
- by immediate using the variable shift operations. */
49
if (rn == 15) {
21
if (op < 8) {
50
if (insn & (1 << 21)) {
22
/* Shift by immediate:
51
/* UNPREDICTABLE */
23
VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
52
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
24
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
53
addr = load_reg(s, rn);
54
}
25
}
55
offset = (insn & 0xff) * 4;
26
/* Right shifts are encoded as N - shift, where N is the
56
- if ((insn & (1 << 23)) == 0)
27
element size in bits. */
57
+ if ((insn & (1 << 23)) == 0) {
28
- if (op <= 4)
58
offset = -offset;
29
+ if (op <= 4) {
30
shift = shift - (1 << (size + 3));
59
+ }
31
+ }
60
+
32
+
61
+ if (s->v8m_stackcheck && rn == 13 && wback) {
33
+ switch (op) {
62
+ /*
34
+ case 0: /* VSHR */
63
+ * Here 'addr' is the current SP; if offset is +ve we're
35
+ /* Right shift comes here negative. */
64
+ * moving SP up, else down. It is UNKNOWN whether the limit
36
+ shift = -shift;
65
+ * check triggers when SP starts below the limit and ends
37
+ /* Shifts larger than the element size are architecturally
66
+ * up above it; check whichever of the current and final
38
+ * valid. Unsigned results in all zeros; signed results
67
+ * SP is lower, so QEMU will trigger in that situation.
39
+ * in all sign bits.
68
+ */
40
+ */
69
+ if ((int32_t)offset < 0) {
41
+ if (!u) {
70
+ TCGv_i32 newsp = tcg_temp_new_i32();
42
+ tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
43
+ MIN(shift, (8 << size) - 1),
44
+ vec_size, vec_size);
45
+ } else if (shift >= 8 << size) {
46
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
47
+ } else {
48
+ tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
49
+ vec_size, vec_size);
50
+ }
51
+ return 0;
71
+
52
+
72
+ tcg_gen_addi_i32(newsp, addr, offset);
53
+ case 5: /* VSHL, VSLI */
73
+ gen_helper_v8m_stackcheck(cpu_env, newsp);
54
+ if (!u) { /* VSHL */
74
+ tcg_temp_free_i32(newsp);
55
+ /* Shifts larger than the element size are
75
+ } else {
56
+ * architecturally valid and results in zero.
76
+ gen_helper_v8m_stackcheck(cpu_env, addr);
57
+ */
58
+ if (shift >= 8 << size) {
59
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
60
+ } else {
61
+ tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
62
+ vec_size, vec_size);
63
+ }
64
+ return 0;
77
+ }
65
+ }
66
+ break;
78
+ }
67
+ }
79
+
68
+
80
if (insn & (1 << 24)) {
69
if (size == 3) {
81
tcg_gen_addi_i32(addr, addr, offset);
70
count = q + 1;
82
offset = 0;
71
} else {
83
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
72
count = q ? 4: 2;
84
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
85
tcg_temp_free_i32(tmp);
86
}
73
}
87
- if (insn & (1 << 21)) {
74
- switch (size) {
88
+ if (wback) {
75
- case 0:
89
/* Base writeback. */
76
- imm = (uint8_t) shift;
90
tcg_gen_addi_i32(addr, addr, offset - 4);
77
- imm |= imm << 8;
91
store_reg(s, rn, addr);
78
- imm |= imm << 16;
79
- break;
80
- case 1:
81
- imm = (uint16_t) shift;
82
- imm |= imm << 16;
83
- break;
84
- case 2:
85
- case 3:
86
- imm = shift;
87
- break;
88
- default:
89
- abort();
90
- }
91
+
92
+ /* To avoid excessive duplication of ops we implement shift
93
+ * by immediate using the variable shift operations.
94
+ */
95
+ imm = dup_const(size, shift);
96
97
for (pass = 0; pass < count; pass++) {
98
if (size == 3) {
99
neon_load_reg64(cpu_V0, rm + pass);
100
tcg_gen_movi_i64(cpu_V1, imm);
101
switch (op) {
102
- case 0: /* VSHR */
103
case 1: /* VSRA */
104
if (u)
105
gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
106
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
107
cpu_V0, cpu_V1);
108
}
109
break;
110
+ default:
111
+ g_assert_not_reached();
112
}
113
if (op == 1 || op == 3) {
114
/* Accumulate. */
115
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
116
tmp2 = tcg_temp_new_i32();
117
tcg_gen_movi_i32(tmp2, imm);
118
switch (op) {
119
- case 0: /* VSHR */
120
case 1: /* VSRA */
121
GEN_NEON_INTEGER_OP(shl);
122
break;
123
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
124
case 7: /* VQSHL */
125
GEN_NEON_INTEGER_OP_ENV(qshl);
126
break;
127
+ default:
128
+ g_assert_not_reached();
129
}
130
tcg_temp_free_i32(tmp2);
131
92
--
132
--
93
2.19.0
133
2.19.1
94
134
95
135
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Move ssra_op and usra_op expanders from translate-a64.c.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-14-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate.h | 2 +
11
target/arm/translate-a64.c | 106 ----------------------------
12
target/arm/translate.c | 139 ++++++++++++++++++++++++++++++++++---
13
3 files changed, 130 insertions(+), 117 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
20
extern const GVecGen3 bsl_op;
21
extern const GVecGen3 bit_op;
22
extern const GVecGen3 bif_op;
23
+extern const GVecGen2i ssra_op[4];
24
+extern const GVecGen2i usra_op[4];
25
26
/*
27
* Forward to the isar_feature_* tests given a DisasContext pointer.
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-a64.c
31
+++ b/target/arm/translate-a64.c
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
33
}
34
}
35
36
-static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
37
-{
38
- tcg_gen_vec_sar8i_i64(a, a, shift);
39
- tcg_gen_vec_add8_i64(d, d, a);
40
-}
41
-
42
-static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
43
-{
44
- tcg_gen_vec_sar16i_i64(a, a, shift);
45
- tcg_gen_vec_add16_i64(d, d, a);
46
-}
47
-
48
-static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
49
-{
50
- tcg_gen_sari_i32(a, a, shift);
51
- tcg_gen_add_i32(d, d, a);
52
-}
53
-
54
-static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
55
-{
56
- tcg_gen_sari_i64(a, a, shift);
57
- tcg_gen_add_i64(d, d, a);
58
-}
59
-
60
-static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
61
-{
62
- tcg_gen_sari_vec(vece, a, a, sh);
63
- tcg_gen_add_vec(vece, d, d, a);
64
-}
65
-
66
-static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
67
-{
68
- tcg_gen_vec_shr8i_i64(a, a, shift);
69
- tcg_gen_vec_add8_i64(d, d, a);
70
-}
71
-
72
-static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
73
-{
74
- tcg_gen_vec_shr16i_i64(a, a, shift);
75
- tcg_gen_vec_add16_i64(d, d, a);
76
-}
77
-
78
-static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
79
-{
80
- tcg_gen_shri_i32(a, a, shift);
81
- tcg_gen_add_i32(d, d, a);
82
-}
83
-
84
-static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
85
-{
86
- tcg_gen_shri_i64(a, a, shift);
87
- tcg_gen_add_i64(d, d, a);
88
-}
89
-
90
-static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
91
-{
92
- tcg_gen_shri_vec(vece, a, a, sh);
93
- tcg_gen_add_vec(vece, d, d, a);
94
-}
95
-
96
static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
97
{
98
uint64_t mask = dup_const(MO_8, 0xff >> shift);
99
@@ -XXX,XX +XXX,XX @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
100
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
101
int immh, int immb, int opcode, int rn, int rd)
102
{
103
- static const GVecGen2i ssra_op[4] = {
104
- { .fni8 = gen_ssra8_i64,
105
- .fniv = gen_ssra_vec,
106
- .load_dest = true,
107
- .opc = INDEX_op_sari_vec,
108
- .vece = MO_8 },
109
- { .fni8 = gen_ssra16_i64,
110
- .fniv = gen_ssra_vec,
111
- .load_dest = true,
112
- .opc = INDEX_op_sari_vec,
113
- .vece = MO_16 },
114
- { .fni4 = gen_ssra32_i32,
115
- .fniv = gen_ssra_vec,
116
- .load_dest = true,
117
- .opc = INDEX_op_sari_vec,
118
- .vece = MO_32 },
119
- { .fni8 = gen_ssra64_i64,
120
- .fniv = gen_ssra_vec,
121
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
122
- .load_dest = true,
123
- .opc = INDEX_op_sari_vec,
124
- .vece = MO_64 },
125
- };
126
- static const GVecGen2i usra_op[4] = {
127
- { .fni8 = gen_usra8_i64,
128
- .fniv = gen_usra_vec,
129
- .load_dest = true,
130
- .opc = INDEX_op_shri_vec,
131
- .vece = MO_8, },
132
- { .fni8 = gen_usra16_i64,
133
- .fniv = gen_usra_vec,
134
- .load_dest = true,
135
- .opc = INDEX_op_shri_vec,
136
- .vece = MO_16, },
137
- { .fni4 = gen_usra32_i32,
138
- .fniv = gen_usra_vec,
139
- .load_dest = true,
140
- .opc = INDEX_op_shri_vec,
141
- .vece = MO_32, },
142
- { .fni8 = gen_usra64_i64,
143
- .fniv = gen_usra_vec,
144
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
145
- .load_dest = true,
146
- .opc = INDEX_op_shri_vec,
147
- .vece = MO_64, },
148
- };
149
static const GVecGen2i sri_op[4] = {
150
{ .fni8 = gen_shr8_ins_i64,
151
.fniv = gen_shr_ins_vec,
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate.c
155
+++ b/target/arm/translate.c
156
@@ -XXX,XX +XXX,XX @@ const GVecGen3 bif_op = {
157
.load_dest = true
158
};
159
160
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
161
+{
162
+ tcg_gen_vec_sar8i_i64(a, a, shift);
163
+ tcg_gen_vec_add8_i64(d, d, a);
164
+}
165
+
166
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
167
+{
168
+ tcg_gen_vec_sar16i_i64(a, a, shift);
169
+ tcg_gen_vec_add16_i64(d, d, a);
170
+}
171
+
172
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
173
+{
174
+ tcg_gen_sari_i32(a, a, shift);
175
+ tcg_gen_add_i32(d, d, a);
176
+}
177
+
178
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
179
+{
180
+ tcg_gen_sari_i64(a, a, shift);
181
+ tcg_gen_add_i64(d, d, a);
182
+}
183
+
184
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
185
+{
186
+ tcg_gen_sari_vec(vece, a, a, sh);
187
+ tcg_gen_add_vec(vece, d, d, a);
188
+}
189
+
190
+const GVecGen2i ssra_op[4] = {
191
+ { .fni8 = gen_ssra8_i64,
192
+ .fniv = gen_ssra_vec,
193
+ .load_dest = true,
194
+ .opc = INDEX_op_sari_vec,
195
+ .vece = MO_8 },
196
+ { .fni8 = gen_ssra16_i64,
197
+ .fniv = gen_ssra_vec,
198
+ .load_dest = true,
199
+ .opc = INDEX_op_sari_vec,
200
+ .vece = MO_16 },
201
+ { .fni4 = gen_ssra32_i32,
202
+ .fniv = gen_ssra_vec,
203
+ .load_dest = true,
204
+ .opc = INDEX_op_sari_vec,
205
+ .vece = MO_32 },
206
+ { .fni8 = gen_ssra64_i64,
207
+ .fniv = gen_ssra_vec,
208
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
209
+ .load_dest = true,
210
+ .opc = INDEX_op_sari_vec,
211
+ .vece = MO_64 },
212
+};
213
+
214
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
215
+{
216
+ tcg_gen_vec_shr8i_i64(a, a, shift);
217
+ tcg_gen_vec_add8_i64(d, d, a);
218
+}
219
+
220
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
221
+{
222
+ tcg_gen_vec_shr16i_i64(a, a, shift);
223
+ tcg_gen_vec_add16_i64(d, d, a);
224
+}
225
+
226
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
227
+{
228
+ tcg_gen_shri_i32(a, a, shift);
229
+ tcg_gen_add_i32(d, d, a);
230
+}
231
+
232
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
233
+{
234
+ tcg_gen_shri_i64(a, a, shift);
235
+ tcg_gen_add_i64(d, d, a);
236
+}
237
+
238
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
239
+{
240
+ tcg_gen_shri_vec(vece, a, a, sh);
241
+ tcg_gen_add_vec(vece, d, d, a);
242
+}
243
+
244
+const GVecGen2i usra_op[4] = {
245
+ { .fni8 = gen_usra8_i64,
246
+ .fniv = gen_usra_vec,
247
+ .load_dest = true,
248
+ .opc = INDEX_op_shri_vec,
249
+ .vece = MO_8, },
250
+ { .fni8 = gen_usra16_i64,
251
+ .fniv = gen_usra_vec,
252
+ .load_dest = true,
253
+ .opc = INDEX_op_shri_vec,
254
+ .vece = MO_16, },
255
+ { .fni4 = gen_usra32_i32,
256
+ .fniv = gen_usra_vec,
257
+ .load_dest = true,
258
+ .opc = INDEX_op_shri_vec,
259
+ .vece = MO_32, },
260
+ { .fni8 = gen_usra64_i64,
261
+ .fniv = gen_usra_vec,
262
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
263
+ .load_dest = true,
264
+ .opc = INDEX_op_shri_vec,
265
+ .vece = MO_64, },
266
+};
267
268
/* Translate a NEON data processing instruction. Return nonzero if the
269
instruction is invalid.
270
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
271
}
272
return 0;
273
274
+ case 1: /* VSRA */
275
+ /* Right shift comes here negative. */
276
+ shift = -shift;
277
+ /* Shifts larger than the element size are architecturally
278
+ * valid. Unsigned results in all zeros; signed results
279
+ * in all sign bits.
280
+ */
281
+ if (!u) {
282
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
283
+ MIN(shift, (8 << size) - 1),
284
+ &ssra_op[size]);
285
+ } else if (shift >= 8 << size) {
286
+ /* rd += 0 */
287
+ } else {
288
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
289
+ shift, &usra_op[size]);
290
+ }
291
+ return 0;
292
+
293
case 5: /* VSHL, VSLI */
294
if (!u) { /* VSHL */
295
/* Shifts larger than the element size are
296
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
297
neon_load_reg64(cpu_V0, rm + pass);
298
tcg_gen_movi_i64(cpu_V1, imm);
299
switch (op) {
300
- case 1: /* VSRA */
301
- if (u)
302
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
303
- else
304
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
305
- break;
306
case 2: /* VRSHR */
307
case 3: /* VRSRA */
308
if (u)
309
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
310
default:
311
g_assert_not_reached();
312
}
313
- if (op == 1 || op == 3) {
314
+ if (op == 3) {
315
/* Accumulate. */
316
neon_load_reg64(cpu_V1, rd + pass);
317
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
318
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
319
tmp2 = tcg_temp_new_i32();
320
tcg_gen_movi_i32(tmp2, imm);
321
switch (op) {
322
- case 1: /* VSRA */
323
- GEN_NEON_INTEGER_OP(shl);
324
- break;
325
case 2: /* VRSHR */
326
case 3: /* VRSRA */
327
GEN_NEON_INTEGER_OP(rshl);
328
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
329
}
330
tcg_temp_free_i32(tmp2);
331
332
- if (op == 1 || op == 3) {
333
+ if (op == 3) {
334
/* Accumulate. */
335
tmp2 = neon_load_reg(rd, pass);
336
gen_neon_add(size, tmp, tmp2);
337
--
338
2.19.1
339
340
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We can choose the endianness at translation time, rather than
3
Move shi_op and sli_op expanders from translate-a64.c.
4
re-computing it at execution time.
5
4
6
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-12-richard.henderson@linaro.org
6
Message-id: 20181011205206.3552-15-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
target/arm/helper-sve.h | 48 +++++++++++++++++--------
10
target/arm/translate.h | 2 +
13
target/arm/sve_helper.c | 11 ++++--
11
target/arm/translate-a64.c | 152 +----------------------
14
target/arm/translate-sve.c | 72 +++++++++++++++++++++++++++++---------
12
target/arm/translate.c | 244 ++++++++++++++++++++++++++-----------
15
3 files changed, 96 insertions(+), 35 deletions(-)
13
3 files changed, 179 insertions(+), 219 deletions(-)
16
14
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
17
--- a/target/arm/translate.h
20
+++ b/target/arm/helper-sve.h
18
+++ b/target/arm/translate.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
19
@@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op;
22
DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
20
extern const GVecGen3 bif_op;
23
DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
21
extern const GVecGen2i ssra_op[4];
24
22
extern const GVecGen2i usra_op[4];
25
-DEF_HELPER_FLAGS_4(sve_st1hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
23
+extern const GVecGen2i sri_op[4];
26
-DEF_HELPER_FLAGS_4(sve_st2hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
24
+extern const GVecGen2i sli_op[4];
27
-DEF_HELPER_FLAGS_4(sve_st3hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
25
28
-DEF_HELPER_FLAGS_4(sve_st4hh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
26
/*
29
+DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
27
* Forward to the isar_feature_* tests given a DisasContext pointer.
30
+DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
31
+DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
32
+DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
33
34
-DEF_HELPER_FLAGS_4(sve_st1ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
35
-DEF_HELPER_FLAGS_4(sve_st2ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
36
-DEF_HELPER_FLAGS_4(sve_st3ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
37
-DEF_HELPER_FLAGS_4(sve_st4ss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
38
+DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
39
+DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
40
+DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
41
+DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
42
43
-DEF_HELPER_FLAGS_4(sve_st1dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
44
-DEF_HELPER_FLAGS_4(sve_st2dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
45
-DEF_HELPER_FLAGS_4(sve_st3dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
46
-DEF_HELPER_FLAGS_4(sve_st4dd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
47
+DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
48
+DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
49
+DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
50
+DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
51
+
52
+DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
54
+DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
55
+DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
56
+
57
+DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
58
+DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
59
+DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
61
+
62
+DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
63
+DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
65
+DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
66
67
DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
68
DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
69
DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
70
71
-DEF_HELPER_FLAGS_4(sve_st1hs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
72
-DEF_HELPER_FLAGS_4(sve_st1hd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
73
+DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
75
+DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
77
78
-DEF_HELPER_FLAGS_4(sve_st1sd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
81
82
DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
83
void, env, ptr, ptr, ptr, tl, i32)
84
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
85
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
86
--- a/target/arm/sve_helper.c
30
--- a/target/arm/translate-a64.c
87
+++ b/target/arm/sve_helper.c
31
+++ b/target/arm/translate-a64.c
88
@@ -XXX,XX +XXX,XX @@ void __attribute__((flatten)) HELPER(sve_st##N##NAME##_r) \
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
33
}
89
}
34
}
90
35
91
#define DO_STN_2(N, NAME, ESIZE, MSIZE) \
36
-static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
92
-void __attribute__((flatten)) HELPER(sve_st##N##NAME##_r) \
37
-{
93
+void __attribute__((flatten)) HELPER(sve_st##N##NAME##_le_r) \
38
- uint64_t mask = dup_const(MO_8, 0xff >> shift);
94
(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
39
- TCGv_i64 t = tcg_temp_new_i64();
95
{ \
40
-
96
sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \
41
- tcg_gen_shri_i64(t, a, shift);
97
- arm_cpu_data_is_big_endian(env) \
42
- tcg_gen_andi_i64(t, t, mask);
98
- ? sve_st1##NAME##_be_tlb : sve_st1##NAME##_le_tlb); \
43
- tcg_gen_andi_i64(d, d, ~mask);
99
+ sve_st1##NAME##_le_tlb); \
44
- tcg_gen_or_i64(d, d, t);
100
+} \
45
- tcg_temp_free_i64(t);
101
+void __attribute__((flatten)) HELPER(sve_st##N##NAME##_be_r) \
46
-}
102
+ (CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \
47
-
103
+{ \
48
-static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
104
+ sve_st##N##_r(env, vg, addr, desc, GETPC(), ESIZE, MSIZE, \
49
-{
105
+ sve_st1##NAME##_be_tlb); \
50
- uint64_t mask = dup_const(MO_16, 0xffff >> shift);
51
- TCGv_i64 t = tcg_temp_new_i64();
52
-
53
- tcg_gen_shri_i64(t, a, shift);
54
- tcg_gen_andi_i64(t, t, mask);
55
- tcg_gen_andi_i64(d, d, ~mask);
56
- tcg_gen_or_i64(d, d, t);
57
- tcg_temp_free_i64(t);
58
-}
59
-
60
-static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
61
-{
62
- tcg_gen_shri_i32(a, a, shift);
63
- tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
64
-}
65
-
66
-static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
67
-{
68
- tcg_gen_shri_i64(a, a, shift);
69
- tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
70
-}
71
-
72
-static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
73
-{
74
- uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
75
- TCGv_vec t = tcg_temp_new_vec_matching(d);
76
- TCGv_vec m = tcg_temp_new_vec_matching(d);
77
-
78
- tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
79
- tcg_gen_shri_vec(vece, t, a, sh);
80
- tcg_gen_and_vec(vece, d, d, m);
81
- tcg_gen_or_vec(vece, d, d, t);
82
-
83
- tcg_temp_free_vec(t);
84
- tcg_temp_free_vec(m);
85
-}
86
-
87
/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
88
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
89
int immh, int immb, int opcode, int rn, int rd)
90
{
91
- static const GVecGen2i sri_op[4] = {
92
- { .fni8 = gen_shr8_ins_i64,
93
- .fniv = gen_shr_ins_vec,
94
- .load_dest = true,
95
- .opc = INDEX_op_shri_vec,
96
- .vece = MO_8 },
97
- { .fni8 = gen_shr16_ins_i64,
98
- .fniv = gen_shr_ins_vec,
99
- .load_dest = true,
100
- .opc = INDEX_op_shri_vec,
101
- .vece = MO_16 },
102
- { .fni4 = gen_shr32_ins_i32,
103
- .fniv = gen_shr_ins_vec,
104
- .load_dest = true,
105
- .opc = INDEX_op_shri_vec,
106
- .vece = MO_32 },
107
- { .fni8 = gen_shr64_ins_i64,
108
- .fniv = gen_shr_ins_vec,
109
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
110
- .load_dest = true,
111
- .opc = INDEX_op_shri_vec,
112
- .vece = MO_64 },
113
- };
114
-
115
int size = 32 - clz32(immh) - 1;
116
int immhb = immh << 3 | immb;
117
int shift = 2 * (8 << size) - immhb;
118
@@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
119
clear_vec_high(s, is_q, rd);
106
}
120
}
107
121
108
DO_STN_1(1, bb, 1)
122
-static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
109
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
123
-{
124
- uint64_t mask = dup_const(MO_8, 0xff << shift);
125
- TCGv_i64 t = tcg_temp_new_i64();
126
-
127
- tcg_gen_shli_i64(t, a, shift);
128
- tcg_gen_andi_i64(t, t, mask);
129
- tcg_gen_andi_i64(d, d, ~mask);
130
- tcg_gen_or_i64(d, d, t);
131
- tcg_temp_free_i64(t);
132
-}
133
-
134
-static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
135
-{
136
- uint64_t mask = dup_const(MO_16, 0xffff << shift);
137
- TCGv_i64 t = tcg_temp_new_i64();
138
-
139
- tcg_gen_shli_i64(t, a, shift);
140
- tcg_gen_andi_i64(t, t, mask);
141
- tcg_gen_andi_i64(d, d, ~mask);
142
- tcg_gen_or_i64(d, d, t);
143
- tcg_temp_free_i64(t);
144
-}
145
-
146
-static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
147
-{
148
- tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
149
-}
150
-
151
-static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
152
-{
153
- tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
154
-}
155
-
156
-static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
157
-{
158
- uint64_t mask = (1ull << sh) - 1;
159
- TCGv_vec t = tcg_temp_new_vec_matching(d);
160
- TCGv_vec m = tcg_temp_new_vec_matching(d);
161
-
162
- tcg_gen_dupi_vec(vece, m, mask);
163
- tcg_gen_shli_vec(vece, t, a, sh);
164
- tcg_gen_and_vec(vece, d, d, m);
165
- tcg_gen_or_vec(vece, d, d, t);
166
-
167
- tcg_temp_free_vec(t);
168
- tcg_temp_free_vec(m);
169
-}
170
-
171
/* SHL/SLI - Vector shift left */
172
static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
173
int immh, int immb, int opcode, int rn, int rd)
174
{
175
- static const GVecGen2i shi_op[4] = {
176
- { .fni8 = gen_shl8_ins_i64,
177
- .fniv = gen_shl_ins_vec,
178
- .opc = INDEX_op_shli_vec,
179
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
180
- .load_dest = true,
181
- .vece = MO_8 },
182
- { .fni8 = gen_shl16_ins_i64,
183
- .fniv = gen_shl_ins_vec,
184
- .opc = INDEX_op_shli_vec,
185
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
186
- .load_dest = true,
187
- .vece = MO_16 },
188
- { .fni4 = gen_shl32_ins_i32,
189
- .fniv = gen_shl_ins_vec,
190
- .opc = INDEX_op_shli_vec,
191
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
192
- .load_dest = true,
193
- .vece = MO_32 },
194
- { .fni8 = gen_shl64_ins_i64,
195
- .fniv = gen_shl_ins_vec,
196
- .opc = INDEX_op_shli_vec,
197
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
198
- .load_dest = true,
199
- .vece = MO_64 },
200
- };
201
int size = 32 - clz32(immh) - 1;
202
int immhb = immh << 3 | immb;
203
int shift = immhb - (8 << size);
204
@@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
205
}
206
207
if (insert) {
208
- gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
209
+ gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
210
} else {
211
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
212
}
213
diff --git a/target/arm/translate.c b/target/arm/translate.c
110
index XXXXXXX..XXXXXXX 100644
214
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/translate-sve.c
215
--- a/target/arm/translate.c
112
+++ b/target/arm/translate-sve.c
216
+++ b/target/arm/translate.c
113
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a, uint32_t insn)
217
@@ -XXX,XX +XXX,XX @@ const GVecGen2i usra_op[4] = {
114
static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
218
.vece = MO_64, },
115
int msz, int esz, int nreg)
219
};
116
{
220
117
- static gen_helper_gvec_mem * const fn_single[4][4] = {
221
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
118
- { gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r,
222
+{
119
- gen_helper_sve_st1bs_r, gen_helper_sve_st1bd_r },
223
+ uint64_t mask = dup_const(MO_8, 0xff >> shift);
120
- { NULL, gen_helper_sve_st1hh_r,
224
+ TCGv_i64 t = tcg_temp_new_i64();
121
- gen_helper_sve_st1hs_r, gen_helper_sve_st1hd_r },
225
+
122
- { NULL, NULL,
226
+ tcg_gen_shri_i64(t, a, shift);
123
- gen_helper_sve_st1ss_r, gen_helper_sve_st1sd_r },
227
+ tcg_gen_andi_i64(t, t, mask);
124
- { NULL, NULL, NULL, gen_helper_sve_st1dd_r },
228
+ tcg_gen_andi_i64(d, d, ~mask);
125
+ static gen_helper_gvec_mem * const fn_single[2][4][4] = {
229
+ tcg_gen_or_i64(d, d, t);
126
+ { { gen_helper_sve_st1bb_r,
230
+ tcg_temp_free_i64(t);
127
+ gen_helper_sve_st1bh_r,
231
+}
128
+ gen_helper_sve_st1bs_r,
232
+
129
+ gen_helper_sve_st1bd_r },
233
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
130
+ { NULL,
234
+{
131
+ gen_helper_sve_st1hh_le_r,
235
+ uint64_t mask = dup_const(MO_16, 0xffff >> shift);
132
+ gen_helper_sve_st1hs_le_r,
236
+ TCGv_i64 t = tcg_temp_new_i64();
133
+ gen_helper_sve_st1hd_le_r },
237
+
134
+ { NULL, NULL,
238
+ tcg_gen_shri_i64(t, a, shift);
135
+ gen_helper_sve_st1ss_le_r,
239
+ tcg_gen_andi_i64(t, t, mask);
136
+ gen_helper_sve_st1sd_le_r },
240
+ tcg_gen_andi_i64(d, d, ~mask);
137
+ { NULL, NULL, NULL,
241
+ tcg_gen_or_i64(d, d, t);
138
+ gen_helper_sve_st1dd_le_r } },
242
+ tcg_temp_free_i64(t);
139
+ { { gen_helper_sve_st1bb_r,
243
+}
140
+ gen_helper_sve_st1bh_r,
244
+
141
+ gen_helper_sve_st1bs_r,
245
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
142
+ gen_helper_sve_st1bd_r },
246
+{
143
+ { NULL,
247
+ tcg_gen_shri_i32(a, a, shift);
144
+ gen_helper_sve_st1hh_be_r,
248
+ tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
145
+ gen_helper_sve_st1hs_be_r,
249
+}
146
+ gen_helper_sve_st1hd_be_r },
250
+
147
+ { NULL, NULL,
251
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
148
+ gen_helper_sve_st1ss_be_r,
252
+{
149
+ gen_helper_sve_st1sd_be_r },
253
+ tcg_gen_shri_i64(a, a, shift);
150
+ { NULL, NULL, NULL,
254
+ tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
151
+ gen_helper_sve_st1dd_be_r } },
255
+}
152
};
256
+
153
- static gen_helper_gvec_mem * const fn_multiple[3][4] = {
257
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
154
- { gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_r,
258
+{
155
- gen_helper_sve_st2ss_r, gen_helper_sve_st2dd_r },
259
+ if (sh == 0) {
156
- { gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_r,
260
+ tcg_gen_mov_vec(d, a);
157
- gen_helper_sve_st3ss_r, gen_helper_sve_st3dd_r },
261
+ } else {
158
- { gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_r,
262
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
159
- gen_helper_sve_st4ss_r, gen_helper_sve_st4dd_r },
263
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
160
+ static gen_helper_gvec_mem * const fn_multiple[2][3][4] = {
264
+
161
+ { { gen_helper_sve_st2bb_r,
265
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
162
+ gen_helper_sve_st2hh_le_r,
266
+ tcg_gen_shri_vec(vece, t, a, sh);
163
+ gen_helper_sve_st2ss_le_r,
267
+ tcg_gen_and_vec(vece, d, d, m);
164
+ gen_helper_sve_st2dd_le_r },
268
+ tcg_gen_or_vec(vece, d, d, t);
165
+ { gen_helper_sve_st3bb_r,
269
+
166
+ gen_helper_sve_st3hh_le_r,
270
+ tcg_temp_free_vec(t);
167
+ gen_helper_sve_st3ss_le_r,
271
+ tcg_temp_free_vec(m);
168
+ gen_helper_sve_st3dd_le_r },
272
+ }
169
+ { gen_helper_sve_st4bb_r,
273
+}
170
+ gen_helper_sve_st4hh_le_r,
274
+
171
+ gen_helper_sve_st4ss_le_r,
275
+const GVecGen2i sri_op[4] = {
172
+ gen_helper_sve_st4dd_le_r } },
276
+ { .fni8 = gen_shr8_ins_i64,
173
+ { { gen_helper_sve_st2bb_r,
277
+ .fniv = gen_shr_ins_vec,
174
+ gen_helper_sve_st2hh_be_r,
278
+ .load_dest = true,
175
+ gen_helper_sve_st2ss_be_r,
279
+ .opc = INDEX_op_shri_vec,
176
+ gen_helper_sve_st2dd_be_r },
280
+ .vece = MO_8 },
177
+ { gen_helper_sve_st3bb_r,
281
+ { .fni8 = gen_shr16_ins_i64,
178
+ gen_helper_sve_st3hh_be_r,
282
+ .fniv = gen_shr_ins_vec,
179
+ gen_helper_sve_st3ss_be_r,
283
+ .load_dest = true,
180
+ gen_helper_sve_st3dd_be_r },
284
+ .opc = INDEX_op_shri_vec,
181
+ { gen_helper_sve_st4bb_r,
285
+ .vece = MO_16 },
182
+ gen_helper_sve_st4hh_be_r,
286
+ { .fni4 = gen_shr32_ins_i32,
183
+ gen_helper_sve_st4ss_be_r,
287
+ .fniv = gen_shr_ins_vec,
184
+ gen_helper_sve_st4dd_be_r } },
288
+ .load_dest = true,
185
};
289
+ .opc = INDEX_op_shri_vec,
186
gen_helper_gvec_mem *fn;
290
+ .vece = MO_32 },
187
+ int be = s->be_data == MO_BE;
291
+ { .fni8 = gen_shr64_ins_i64,
188
292
+ .fniv = gen_shr_ins_vec,
189
if (nreg == 0) {
293
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
190
/* ST1 */
294
+ .load_dest = true,
191
- fn = fn_single[msz][esz];
295
+ .opc = INDEX_op_shri_vec,
192
+ fn = fn_single[be][msz][esz];
296
+ .vece = MO_64 },
193
} else {
297
+};
194
/* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
298
+
195
assert(msz == esz);
299
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
196
- fn = fn_multiple[nreg - 1][msz];
300
+{
197
+ fn = fn_multiple[be][nreg - 1][msz];
301
+ uint64_t mask = dup_const(MO_8, 0xff << shift);
198
}
302
+ TCGv_i64 t = tcg_temp_new_i64();
199
assert(fn != NULL);
303
+
200
do_mem_zpa(s, zt, pg, addr, fn);
304
+ tcg_gen_shli_i64(t, a, shift);
305
+ tcg_gen_andi_i64(t, t, mask);
306
+ tcg_gen_andi_i64(d, d, ~mask);
307
+ tcg_gen_or_i64(d, d, t);
308
+ tcg_temp_free_i64(t);
309
+}
310
+
311
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
312
+{
313
+ uint64_t mask = dup_const(MO_16, 0xffff << shift);
314
+ TCGv_i64 t = tcg_temp_new_i64();
315
+
316
+ tcg_gen_shli_i64(t, a, shift);
317
+ tcg_gen_andi_i64(t, t, mask);
318
+ tcg_gen_andi_i64(d, d, ~mask);
319
+ tcg_gen_or_i64(d, d, t);
320
+ tcg_temp_free_i64(t);
321
+}
322
+
323
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
324
+{
325
+ tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
326
+}
327
+
328
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
329
+{
330
+ tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
331
+}
332
+
333
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
334
+{
335
+ if (sh == 0) {
336
+ tcg_gen_mov_vec(d, a);
337
+ } else {
338
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
339
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
340
+
341
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
342
+ tcg_gen_shli_vec(vece, t, a, sh);
343
+ tcg_gen_and_vec(vece, d, d, m);
344
+ tcg_gen_or_vec(vece, d, d, t);
345
+
346
+ tcg_temp_free_vec(t);
347
+ tcg_temp_free_vec(m);
348
+ }
349
+}
350
+
351
+const GVecGen2i sli_op[4] = {
352
+ { .fni8 = gen_shl8_ins_i64,
353
+ .fniv = gen_shl_ins_vec,
354
+ .load_dest = true,
355
+ .opc = INDEX_op_shli_vec,
356
+ .vece = MO_8 },
357
+ { .fni8 = gen_shl16_ins_i64,
358
+ .fniv = gen_shl_ins_vec,
359
+ .load_dest = true,
360
+ .opc = INDEX_op_shli_vec,
361
+ .vece = MO_16 },
362
+ { .fni4 = gen_shl32_ins_i32,
363
+ .fniv = gen_shl_ins_vec,
364
+ .load_dest = true,
365
+ .opc = INDEX_op_shli_vec,
366
+ .vece = MO_32 },
367
+ { .fni8 = gen_shl64_ins_i64,
368
+ .fniv = gen_shl_ins_vec,
369
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
370
+ .load_dest = true,
371
+ .opc = INDEX_op_shli_vec,
372
+ .vece = MO_64 },
373
+};
374
+
375
/* Translate a NEON data processing instruction. Return nonzero if the
376
instruction is invalid.
377
We process data in a mixture of 32-bit and 64-bit chunks.
378
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
379
int pairwise;
380
int u;
381
int vec_size;
382
- uint32_t imm, mask;
383
+ uint32_t imm;
384
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
385
TCGv_ptr ptr1, ptr2, ptr3;
386
TCGv_i64 tmp64;
387
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
388
}
389
return 0;
390
391
+ case 4: /* VSRI */
392
+ if (!u) {
393
+ return 1;
394
+ }
395
+ /* Right shift comes here negative. */
396
+ shift = -shift;
397
+ /* Shift out of range leaves destination unchanged. */
398
+ if (shift < 8 << size) {
399
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
400
+ shift, &sri_op[size]);
401
+ }
402
+ return 0;
403
+
404
case 5: /* VSHL, VSLI */
405
- if (!u) { /* VSHL */
406
+ if (u) { /* VSLI */
407
+ /* Shift out of range leaves destination unchanged. */
408
+ if (shift < 8 << size) {
409
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
410
+ vec_size, shift, &sli_op[size]);
411
+ }
412
+ } else { /* VSHL */
413
/* Shifts larger than the element size are
414
* architecturally valid and results in zero.
415
*/
416
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
417
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
418
vec_size, vec_size);
419
}
420
- return 0;
421
}
422
- break;
423
+ return 0;
424
}
425
426
if (size == 3) {
427
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
428
else
429
gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
430
break;
431
- case 4: /* VSRI */
432
- case 5: /* VSHL, VSLI */
433
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
434
- break;
435
case 6: /* VQSHLU */
436
gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
437
cpu_V0, cpu_V1);
438
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
439
/* Accumulate. */
440
neon_load_reg64(cpu_V1, rd + pass);
441
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
442
- } else if (op == 4 || (op == 5 && u)) {
443
- /* Insert */
444
- neon_load_reg64(cpu_V1, rd + pass);
445
- uint64_t mask;
446
- if (shift < -63 || shift > 63) {
447
- mask = 0;
448
- } else {
449
- if (op == 4) {
450
- mask = 0xffffffffffffffffull >> -shift;
451
- } else {
452
- mask = 0xffffffffffffffffull << shift;
453
- }
454
- }
455
- tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
456
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
457
}
458
neon_store_reg64(cpu_V0, rd + pass);
459
} else { /* size < 3 */
460
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
461
case 3: /* VRSRA */
462
GEN_NEON_INTEGER_OP(rshl);
463
break;
464
- case 4: /* VSRI */
465
- case 5: /* VSHL, VSLI */
466
- switch (size) {
467
- case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
468
- case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
469
- case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
470
- default: abort();
471
- }
472
- break;
473
case 6: /* VQSHLU */
474
switch (size) {
475
case 0:
476
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
477
tmp2 = neon_load_reg(rd, pass);
478
gen_neon_add(size, tmp, tmp2);
479
tcg_temp_free_i32(tmp2);
480
- } else if (op == 4 || (op == 5 && u)) {
481
- /* Insert */
482
- switch (size) {
483
- case 0:
484
- if (op == 4)
485
- mask = 0xff >> -shift;
486
- else
487
- mask = (uint8_t)(0xff << shift);
488
- mask |= mask << 8;
489
- mask |= mask << 16;
490
- break;
491
- case 1:
492
- if (op == 4)
493
- mask = 0xffff >> -shift;
494
- else
495
- mask = (uint16_t)(0xffff << shift);
496
- mask |= mask << 16;
497
- break;
498
- case 2:
499
- if (shift < -31 || shift > 31) {
500
- mask = 0;
501
- } else {
502
- if (op == 4)
503
- mask = 0xffffffffu >> -shift;
504
- else
505
- mask = 0xffffffffu << shift;
506
- }
507
- break;
508
- default:
509
- abort();
510
- }
511
- tmp2 = neon_load_reg(rd, pass);
512
- tcg_gen_andi_i32(tmp, tmp, mask);
513
- tcg_gen_andi_i32(tmp2, tmp2, ~mask);
514
- tcg_gen_or_i32(tmp, tmp, tmp2);
515
- tcg_temp_free_i32(tmp2);
516
}
517
neon_store_reg(rd, pass, tmp);
518
}
201
--
519
--
202
2.19.0
520
2.19.1
203
521
204
522
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This fixes the endianness problem for softmmu, and moves
3
Move mla_op and mls_op expanders from translate-a64.c.
4
the main loop out of a macro and into an inlined function.
4
5
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-16-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-14-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
target/arm/helper-sve.h | 52 ++++++++++----
10
target/arm/translate.h | 2 +
13
target/arm/sve_helper.c | 139 ++++++++++++++++++++++++-------------
11
target/arm/translate-a64.c | 106 -----------------------------
14
target/arm/translate-sve.c | 74 +++++++++++++-------
12
target/arm/translate.c | 134 ++++++++++++++++++++++++++++++++-----
15
3 files changed, 177 insertions(+), 88 deletions(-)
13
3 files changed, 120 insertions(+), 122 deletions(-)
16
14
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
17
--- a/target/arm/translate.h
20
+++ b/target/arm/helper-sve.h
18
+++ b/target/arm/translate.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve_ldffsds_zd, TCG_CALL_NO_WG,
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
22
20
extern const GVecGen3 bsl_op;
23
DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG,
21
extern const GVecGen3 bit_op;
24
void, env, ptr, ptr, ptr, tl, i32)
22
extern const GVecGen3 bif_op;
25
-DEF_HELPER_FLAGS_6(sve_sths_zsu, TCG_CALL_NO_WG,
23
+extern const GVecGen3 mla_op[4];
26
+DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG,
24
+extern const GVecGen3 mls_op[4];
27
void, env, ptr, ptr, ptr, tl, i32)
25
extern const GVecGen2i ssra_op[4];
28
-DEF_HELPER_FLAGS_6(sve_stss_zsu, TCG_CALL_NO_WG,
26
extern const GVecGen2i usra_op[4];
29
+DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG,
27
extern const GVecGen2i sri_op[4];
30
+ void, env, ptr, ptr, ptr, tl, i32)
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
31
+DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG,
32
+ void, env, ptr, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG,
34
void, env, ptr, ptr, ptr, tl, i32)
35
36
DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG,
37
void, env, ptr, ptr, ptr, tl, i32)
38
-DEF_HELPER_FLAGS_6(sve_sths_zss, TCG_CALL_NO_WG,
39
+DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG,
40
void, env, ptr, ptr, ptr, tl, i32)
41
-DEF_HELPER_FLAGS_6(sve_stss_zss, TCG_CALL_NO_WG,
42
+DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG,
43
+ void, env, ptr, ptr, ptr, tl, i32)
44
+DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG,
45
+ void, env, ptr, ptr, ptr, tl, i32)
46
+DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG,
47
void, env, ptr, ptr, ptr, tl, i32)
48
49
DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG,
50
void, env, ptr, ptr, ptr, tl, i32)
51
-DEF_HELPER_FLAGS_6(sve_sthd_zsu, TCG_CALL_NO_WG,
52
+DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG,
53
void, env, ptr, ptr, ptr, tl, i32)
54
-DEF_HELPER_FLAGS_6(sve_stsd_zsu, TCG_CALL_NO_WG,
55
+DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG,
56
void, env, ptr, ptr, ptr, tl, i32)
57
-DEF_HELPER_FLAGS_6(sve_stdd_zsu, TCG_CALL_NO_WG,
58
+DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG,
59
+ void, env, ptr, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG,
61
+ void, env, ptr, ptr, ptr, tl, i32)
62
+DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG,
63
+ void, env, ptr, ptr, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG,
65
void, env, ptr, ptr, ptr, tl, i32)
66
67
DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG,
68
void, env, ptr, ptr, ptr, tl, i32)
69
-DEF_HELPER_FLAGS_6(sve_sthd_zss, TCG_CALL_NO_WG,
70
+DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG,
71
void, env, ptr, ptr, ptr, tl, i32)
72
-DEF_HELPER_FLAGS_6(sve_stsd_zss, TCG_CALL_NO_WG,
73
+DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG,
74
void, env, ptr, ptr, ptr, tl, i32)
75
-DEF_HELPER_FLAGS_6(sve_stdd_zss, TCG_CALL_NO_WG,
76
+DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG,
77
+ void, env, ptr, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG,
79
+ void, env, ptr, ptr, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG,
81
+ void, env, ptr, ptr, ptr, tl, i32)
82
+DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG,
83
void, env, ptr, ptr, ptr, tl, i32)
84
85
DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG,
86
void, env, ptr, ptr, ptr, tl, i32)
87
-DEF_HELPER_FLAGS_6(sve_sthd_zd, TCG_CALL_NO_WG,
88
+DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG,
89
void, env, ptr, ptr, ptr, tl, i32)
90
-DEF_HELPER_FLAGS_6(sve_stsd_zd, TCG_CALL_NO_WG,
91
+DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG,
92
void, env, ptr, ptr, ptr, tl, i32)
93
-DEF_HELPER_FLAGS_6(sve_stdd_zd, TCG_CALL_NO_WG,
94
+DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG,
95
+ void, env, ptr, ptr, ptr, tl, i32)
96
+DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG,
97
+ void, env, ptr, ptr, ptr, tl, i32)
98
+DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG,
99
+ void, env, ptr, ptr, ptr, tl, i32)
100
+DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG,
101
void, env, ptr, ptr, ptr, tl, i32)
102
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
103
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/sve_helper.c
30
--- a/target/arm/translate-a64.c
105
+++ b/target/arm/sve_helper.c
31
+++ b/target/arm/translate-a64.c
106
@@ -XXX,XX +XXX,XX @@ DO_LDFF1_ZPZ_D(sve_ldffsds_zd, uint64_t, int32_t, cpu_ldl_data_ra)
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
107
33
}
108
/* Stores with a vector index. */
109
110
-#define DO_ST1_ZPZ_S(NAME, TYPEI, FN) \
111
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
112
- target_ulong base, uint32_t desc) \
113
-{ \
114
- intptr_t i, oprsz = simd_oprsz(desc); \
115
- unsigned scale = simd_data(desc); \
116
- uintptr_t ra = GETPC(); \
117
- for (i = 0; i < oprsz; ) { \
118
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
119
- do { \
120
- if (likely(pg & 1)) { \
121
- target_ulong off = *(TYPEI *)(vm + H1_4(i)); \
122
- uint32_t d = *(uint32_t *)(vd + H1_4(i)); \
123
- FN(env, base + (off << scale), d, ra); \
124
- } \
125
- i += sizeof(uint32_t), pg >>= sizeof(uint32_t); \
126
- } while (i & 15); \
127
- } \
128
+static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
129
+ target_ulong base, uint32_t desc, uintptr_t ra,
130
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
131
+{
132
+ const int mmu_idx = cpu_mmu_index(env, false);
133
+ intptr_t i, oprsz = simd_oprsz(desc);
134
+ unsigned scale = simd_data(desc);
135
+
136
+ set_helper_retaddr(ra);
137
+ for (i = 0; i < oprsz; ) {
138
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
139
+ do {
140
+ if (likely(pg & 1)) {
141
+ target_ulong off = off_fn(vm, i);
142
+ tlb_fn(env, vd, i, base + (off << scale), mmu_idx, ra);
143
+ }
144
+ i += 4, pg >>= 4;
145
+ } while (i & 15);
146
+ }
147
+ set_helper_retaddr(0);
148
}
34
}
149
35
150
-#define DO_ST1_ZPZ_D(NAME, TYPEI, FN) \
36
-static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
151
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
37
-{
152
- target_ulong base, uint32_t desc) \
38
- gen_helper_neon_mul_u8(a, a, b);
153
-{ \
39
- gen_helper_neon_add_u8(d, d, a);
154
- intptr_t i, oprsz = simd_oprsz(desc) / 8; \
40
-}
155
- unsigned scale = simd_data(desc); \
41
-
156
- uintptr_t ra = GETPC(); \
42
-static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
157
- uint64_t *d = vd, *m = vm; uint8_t *pg = vg; \
43
-{
158
- for (i = 0; i < oprsz; i++) { \
44
- gen_helper_neon_mul_u16(a, a, b);
159
- if (likely(pg[H1(i)] & 1)) { \
45
- gen_helper_neon_add_u16(d, d, a);
160
- target_ulong off = (target_ulong)(TYPEI)m[i] << scale; \
46
-}
161
- FN(env, base + off, d[i], ra); \
47
-
162
- } \
48
-static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
163
- } \
49
-{
164
+static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
50
- tcg_gen_mul_i32(a, a, b);
165
+ target_ulong base, uint32_t desc, uintptr_t ra,
51
- tcg_gen_add_i32(d, d, a);
166
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn)
52
-}
167
+{
53
-
168
+ const int mmu_idx = cpu_mmu_index(env, false);
54
-static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
169
+ intptr_t i, oprsz = simd_oprsz(desc) / 8;
55
-{
170
+ unsigned scale = simd_data(desc);
56
- tcg_gen_mul_i64(a, a, b);
171
+
57
- tcg_gen_add_i64(d, d, a);
172
+ set_helper_retaddr(ra);
58
-}
173
+ for (i = 0; i < oprsz; i++) {
59
-
174
+ uint8_t pg = *(uint8_t *)(vg + H1(i));
60
-static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
175
+ if (likely(pg & 1)) {
61
-{
176
+ target_ulong off = off_fn(vm, i * 8);
62
- tcg_gen_mul_vec(vece, a, a, b);
177
+ tlb_fn(env, vd, i * 8, base + (off << scale), mmu_idx, ra);
63
- tcg_gen_add_vec(vece, d, d, a);
178
+ }
64
-}
179
+ }
65
-
180
+ set_helper_retaddr(0);
66
-static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
181
}
67
-{
182
68
- gen_helper_neon_mul_u8(a, a, b);
183
-DO_ST1_ZPZ_S(sve_stbs_zsu, uint32_t, cpu_stb_data_ra)
69
- gen_helper_neon_sub_u8(d, d, a);
184
-DO_ST1_ZPZ_S(sve_sths_zsu, uint32_t, cpu_stw_data_ra)
70
-}
185
-DO_ST1_ZPZ_S(sve_stss_zsu, uint32_t, cpu_stl_data_ra)
71
-
186
+#define DO_ST1_ZPZ_S(MEM, OFS) \
72
-static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
187
+void __attribute__((flatten)) HELPER(sve_st##MEM##_##OFS) \
73
-{
188
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
74
- gen_helper_neon_mul_u16(a, a, b);
189
+ target_ulong base, uint32_t desc) \
75
- gen_helper_neon_sub_u16(d, d, a);
190
+{ \
76
-}
191
+ sve_st1_zs(env, vd, vg, vm, base, desc, GETPC(), \
77
-
192
+ off_##OFS##_s, sve_st1##MEM##_tlb); \
78
-static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
193
+}
79
-{
194
80
- tcg_gen_mul_i32(a, a, b);
195
-DO_ST1_ZPZ_S(sve_stbs_zss, int32_t, cpu_stb_data_ra)
81
- tcg_gen_sub_i32(d, d, a);
196
-DO_ST1_ZPZ_S(sve_sths_zss, int32_t, cpu_stw_data_ra)
82
-}
197
-DO_ST1_ZPZ_S(sve_stss_zss, int32_t, cpu_stl_data_ra)
83
-
198
+#define DO_ST1_ZPZ_D(MEM, OFS) \
84
-static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
199
+void __attribute__((flatten)) HELPER(sve_st##MEM##_##OFS) \
85
-{
200
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
86
- tcg_gen_mul_i64(a, a, b);
201
+ target_ulong base, uint32_t desc) \
87
- tcg_gen_sub_i64(d, d, a);
202
+{ \
88
-}
203
+ sve_st1_zd(env, vd, vg, vm, base, desc, GETPC(), \
89
-
204
+ off_##OFS##_d, sve_st1##MEM##_tlb); \
90
-static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
205
+}
91
-{
206
92
- tcg_gen_mul_vec(vece, a, a, b);
207
-DO_ST1_ZPZ_D(sve_stbd_zsu, uint32_t, cpu_stb_data_ra)
93
- tcg_gen_sub_vec(vece, d, d, a);
208
-DO_ST1_ZPZ_D(sve_sthd_zsu, uint32_t, cpu_stw_data_ra)
94
-}
209
-DO_ST1_ZPZ_D(sve_stsd_zsu, uint32_t, cpu_stl_data_ra)
95
-
210
-DO_ST1_ZPZ_D(sve_stdd_zsu, uint32_t, cpu_stq_data_ra)
96
/* Integer op subgroup of C3.6.16. */
211
+DO_ST1_ZPZ_S(bs, zsu)
97
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
212
+DO_ST1_ZPZ_S(hs_le, zsu)
98
{
213
+DO_ST1_ZPZ_S(hs_be, zsu)
99
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
214
+DO_ST1_ZPZ_S(ss_le, zsu)
100
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
215
+DO_ST1_ZPZ_S(ss_be, zsu)
101
.vece = MO_64 },
216
102
};
217
-DO_ST1_ZPZ_D(sve_stbd_zss, int32_t, cpu_stb_data_ra)
103
- static const GVecGen3 mla_op[4] = {
218
-DO_ST1_ZPZ_D(sve_sthd_zss, int32_t, cpu_stw_data_ra)
104
- { .fni4 = gen_mla8_i32,
219
-DO_ST1_ZPZ_D(sve_stsd_zss, int32_t, cpu_stl_data_ra)
105
- .fniv = gen_mla_vec,
220
-DO_ST1_ZPZ_D(sve_stdd_zss, int32_t, cpu_stq_data_ra)
106
- .opc = INDEX_op_mul_vec,
221
+DO_ST1_ZPZ_S(bs, zss)
107
- .load_dest = true,
222
+DO_ST1_ZPZ_S(hs_le, zss)
108
- .vece = MO_8 },
223
+DO_ST1_ZPZ_S(hs_be, zss)
109
- { .fni4 = gen_mla16_i32,
224
+DO_ST1_ZPZ_S(ss_le, zss)
110
- .fniv = gen_mla_vec,
225
+DO_ST1_ZPZ_S(ss_be, zss)
111
- .opc = INDEX_op_mul_vec,
226
112
- .load_dest = true,
227
-DO_ST1_ZPZ_D(sve_stbd_zd, uint64_t, cpu_stb_data_ra)
113
- .vece = MO_16 },
228
-DO_ST1_ZPZ_D(sve_sthd_zd, uint64_t, cpu_stw_data_ra)
114
- { .fni4 = gen_mla32_i32,
229
-DO_ST1_ZPZ_D(sve_stsd_zd, uint64_t, cpu_stl_data_ra)
115
- .fniv = gen_mla_vec,
230
-DO_ST1_ZPZ_D(sve_stdd_zd, uint64_t, cpu_stq_data_ra)
116
- .opc = INDEX_op_mul_vec,
231
+DO_ST1_ZPZ_D(bd, zsu)
117
- .load_dest = true,
232
+DO_ST1_ZPZ_D(hd_le, zsu)
118
- .vece = MO_32 },
233
+DO_ST1_ZPZ_D(hd_be, zsu)
119
- { .fni8 = gen_mla64_i64,
234
+DO_ST1_ZPZ_D(sd_le, zsu)
120
- .fniv = gen_mla_vec,
235
+DO_ST1_ZPZ_D(sd_be, zsu)
121
- .opc = INDEX_op_mul_vec,
236
+DO_ST1_ZPZ_D(dd_le, zsu)
122
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
237
+DO_ST1_ZPZ_D(dd_be, zsu)
123
- .load_dest = true,
238
+
124
- .vece = MO_64 },
239
+DO_ST1_ZPZ_D(bd, zss)
125
- };
240
+DO_ST1_ZPZ_D(hd_le, zss)
126
- static const GVecGen3 mls_op[4] = {
241
+DO_ST1_ZPZ_D(hd_be, zss)
127
- { .fni4 = gen_mls8_i32,
242
+DO_ST1_ZPZ_D(sd_le, zss)
128
- .fniv = gen_mls_vec,
243
+DO_ST1_ZPZ_D(sd_be, zss)
129
- .opc = INDEX_op_mul_vec,
244
+DO_ST1_ZPZ_D(dd_le, zss)
130
- .load_dest = true,
245
+DO_ST1_ZPZ_D(dd_be, zss)
131
- .vece = MO_8 },
246
+
132
- { .fni4 = gen_mls16_i32,
247
+DO_ST1_ZPZ_D(bd, zd)
133
- .fniv = gen_mls_vec,
248
+DO_ST1_ZPZ_D(hd_le, zd)
134
- .opc = INDEX_op_mul_vec,
249
+DO_ST1_ZPZ_D(hd_be, zd)
135
- .load_dest = true,
250
+DO_ST1_ZPZ_D(sd_le, zd)
136
- .vece = MO_16 },
251
+DO_ST1_ZPZ_D(sd_be, zd)
137
- { .fni4 = gen_mls32_i32,
252
+DO_ST1_ZPZ_D(dd_le, zd)
138
- .fniv = gen_mls_vec,
253
+DO_ST1_ZPZ_D(dd_be, zd)
139
- .opc = INDEX_op_mul_vec,
254
+
140
- .load_dest = true,
255
+#undef DO_ST1_ZPZ_S
141
- .vece = MO_32 },
256
+#undef DO_ST1_ZPZ_D
142
- { .fni8 = gen_mls64_i64,
257
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
143
- .fniv = gen_mls_vec,
144
- .opc = INDEX_op_mul_vec,
145
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
146
- .load_dest = true,
147
- .vece = MO_64 },
148
- };
149
150
int is_q = extract32(insn, 30, 1);
151
int u = extract32(insn, 29, 1);
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
258
index XXXXXXX..XXXXXXX 100644
153
index XXXXXXX..XXXXXXX 100644
259
--- a/target/arm/translate-sve.c
154
--- a/target/arm/translate.c
260
+++ b/target/arm/translate-sve.c
155
+++ b/target/arm/translate.c
261
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a, uint32_t insn)
156
@@ -XXX,XX +XXX,XX @@ static void gen_neon_narrow_op(int op, int u, int size,
262
return true;
157
#define NEON_3R_VABA 15
263
}
158
#define NEON_3R_VADD_VSUB 16
264
159
#define NEON_3R_VTST_VCEQ 17
265
-/* Indexed by [xs][msz]. */
160
-#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
266
-static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][3] = {
161
+#define NEON_3R_VML 18 /* VMLA, VMLS */
267
- { gen_helper_sve_stbs_zsu,
162
#define NEON_3R_VMUL 19
268
- gen_helper_sve_sths_zsu,
163
#define NEON_3R_VPMAX 20
269
- gen_helper_sve_stss_zsu, },
164
#define NEON_3R_VPMIN 21
270
- { gen_helper_sve_stbs_zss,
165
@@ -XXX,XX +XXX,XX @@ const GVecGen2i sli_op[4] = {
271
- gen_helper_sve_sths_zss,
166
.vece = MO_64 },
272
- gen_helper_sve_stss_zss, },
273
+/* Indexed by [be][xs][msz]. */
274
+static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][3] = {
275
+ /* Little-endian */
276
+ { { gen_helper_sve_stbs_zsu,
277
+ gen_helper_sve_sths_le_zsu,
278
+ gen_helper_sve_stss_le_zsu, },
279
+ { gen_helper_sve_stbs_zss,
280
+ gen_helper_sve_sths_le_zss,
281
+ gen_helper_sve_stss_le_zss, } },
282
+ /* Big-endian */
283
+ { { gen_helper_sve_stbs_zsu,
284
+ gen_helper_sve_sths_be_zsu,
285
+ gen_helper_sve_stss_be_zsu, },
286
+ { gen_helper_sve_stbs_zss,
287
+ gen_helper_sve_sths_be_zss,
288
+ gen_helper_sve_stss_be_zss, } },
289
};
167
};
290
168
291
/* Note that we overload xs=2 to indicate 64-bit offset. */
169
+static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
292
-static gen_helper_gvec_mem_scatter * const scatter_store_fn64[3][4] = {
170
+{
293
- { gen_helper_sve_stbd_zsu,
171
+ gen_helper_neon_mul_u8(a, a, b);
294
- gen_helper_sve_sthd_zsu,
172
+ gen_helper_neon_add_u8(d, d, a);
295
- gen_helper_sve_stsd_zsu,
173
+}
296
- gen_helper_sve_stdd_zsu, },
174
+
297
- { gen_helper_sve_stbd_zss,
175
+static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
298
- gen_helper_sve_sthd_zss,
176
+{
299
- gen_helper_sve_stsd_zss,
177
+ gen_helper_neon_mul_u8(a, a, b);
300
- gen_helper_sve_stdd_zss, },
178
+ gen_helper_neon_sub_u8(d, d, a);
301
- { gen_helper_sve_stbd_zd,
179
+}
302
- gen_helper_sve_sthd_zd,
180
+
303
- gen_helper_sve_stsd_zd,
181
+static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
304
- gen_helper_sve_stdd_zd, },
182
+{
305
+static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][3][4] = {
183
+ gen_helper_neon_mul_u16(a, a, b);
306
+ /* Little-endian */
184
+ gen_helper_neon_add_u16(d, d, a);
307
+ { { gen_helper_sve_stbd_zsu,
185
+}
308
+ gen_helper_sve_sthd_le_zsu,
186
+
309
+ gen_helper_sve_stsd_le_zsu,
187
+static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
310
+ gen_helper_sve_stdd_le_zsu, },
188
+{
311
+ { gen_helper_sve_stbd_zss,
189
+ gen_helper_neon_mul_u16(a, a, b);
312
+ gen_helper_sve_sthd_le_zss,
190
+ gen_helper_neon_sub_u16(d, d, a);
313
+ gen_helper_sve_stsd_le_zss,
191
+}
314
+ gen_helper_sve_stdd_le_zss, },
192
+
315
+ { gen_helper_sve_stbd_zd,
193
+static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
316
+ gen_helper_sve_sthd_le_zd,
194
+{
317
+ gen_helper_sve_stsd_le_zd,
195
+ tcg_gen_mul_i32(a, a, b);
318
+ gen_helper_sve_stdd_le_zd, } },
196
+ tcg_gen_add_i32(d, d, a);
319
+ /* Big-endian */
197
+}
320
+ { { gen_helper_sve_stbd_zsu,
198
+
321
+ gen_helper_sve_sthd_be_zsu,
199
+static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
322
+ gen_helper_sve_stsd_be_zsu,
200
+{
323
+ gen_helper_sve_stdd_be_zsu, },
201
+ tcg_gen_mul_i32(a, a, b);
324
+ { gen_helper_sve_stbd_zss,
202
+ tcg_gen_sub_i32(d, d, a);
325
+ gen_helper_sve_sthd_be_zss,
203
+}
326
+ gen_helper_sve_stsd_be_zss,
204
+
327
+ gen_helper_sve_stdd_be_zss, },
205
+static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
328
+ { gen_helper_sve_stbd_zd,
206
+{
329
+ gen_helper_sve_sthd_be_zd,
207
+ tcg_gen_mul_i64(a, a, b);
330
+ gen_helper_sve_stsd_be_zd,
208
+ tcg_gen_add_i64(d, d, a);
331
+ gen_helper_sve_stdd_be_zd, } },
209
+}
332
};
210
+
333
211
+static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
334
static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
212
+{
335
{
213
+ tcg_gen_mul_i64(a, a, b);
336
gen_helper_gvec_mem_scatter *fn;
214
+ tcg_gen_sub_i64(d, d, a);
337
+ int be = s->be_data == MO_BE;
215
+}
338
216
+
339
if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
217
+static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
340
return false;
218
+{
341
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
219
+ tcg_gen_mul_vec(vece, a, a, b);
342
}
220
+ tcg_gen_add_vec(vece, d, d, a);
343
switch (a->esz) {
221
+}
344
case MO_32:
222
+
345
- fn = scatter_store_fn32[a->xs][a->msz];
223
+static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
346
+ fn = scatter_store_fn32[be][a->xs][a->msz];
224
+{
347
break;
225
+ tcg_gen_mul_vec(vece, a, a, b);
348
case MO_64:
226
+ tcg_gen_sub_vec(vece, d, d, a);
349
- fn = scatter_store_fn64[a->xs][a->msz];
227
+}
350
+ fn = scatter_store_fn64[be][a->xs][a->msz];
228
+
351
break;
229
+/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
352
default:
230
+ * these tables are shared with AArch64 which does support them.
353
g_assert_not_reached();
231
+ */
354
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a, uint32_t insn)
232
+const GVecGen3 mla_op[4] = {
355
static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
233
+ { .fni4 = gen_mla8_i32,
356
{
234
+ .fniv = gen_mla_vec,
357
gen_helper_gvec_mem_scatter *fn = NULL;
235
+ .opc = INDEX_op_mul_vec,
358
+ int be = s->be_data == MO_BE;
236
+ .load_dest = true,
359
TCGv_i64 imm;
237
+ .vece = MO_8 },
360
238
+ { .fni4 = gen_mla16_i32,
361
if (a->esz < a->msz) {
239
+ .fniv = gen_mla_vec,
362
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a, uint32_t insn)
240
+ .opc = INDEX_op_mul_vec,
363
241
+ .load_dest = true,
364
switch (a->esz) {
242
+ .vece = MO_16 },
365
case MO_32:
243
+ { .fni4 = gen_mla32_i32,
366
- fn = scatter_store_fn32[0][a->msz];
244
+ .fniv = gen_mla_vec,
367
+ fn = scatter_store_fn32[be][0][a->msz];
245
+ .opc = INDEX_op_mul_vec,
368
break;
246
+ .load_dest = true,
369
case MO_64:
247
+ .vece = MO_32 },
370
- fn = scatter_store_fn64[2][a->msz];
248
+ { .fni8 = gen_mla64_i64,
371
+ fn = scatter_store_fn64[be][2][a->msz];
249
+ .fniv = gen_mla_vec,
372
break;
250
+ .opc = INDEX_op_mul_vec,
373
}
251
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
374
assert(fn != NULL);
252
+ .load_dest = true,
253
+ .vece = MO_64 },
254
+};
255
+
256
+const GVecGen3 mls_op[4] = {
257
+ { .fni4 = gen_mls8_i32,
258
+ .fniv = gen_mls_vec,
259
+ .opc = INDEX_op_mul_vec,
260
+ .load_dest = true,
261
+ .vece = MO_8 },
262
+ { .fni4 = gen_mls16_i32,
263
+ .fniv = gen_mls_vec,
264
+ .opc = INDEX_op_mul_vec,
265
+ .load_dest = true,
266
+ .vece = MO_16 },
267
+ { .fni4 = gen_mls32_i32,
268
+ .fniv = gen_mls_vec,
269
+ .opc = INDEX_op_mul_vec,
270
+ .load_dest = true,
271
+ .vece = MO_32 },
272
+ { .fni8 = gen_mls64_i64,
273
+ .fniv = gen_mls_vec,
274
+ .opc = INDEX_op_mul_vec,
275
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
276
+ .load_dest = true,
277
+ .vece = MO_64 },
278
+};
279
+
280
/* Translate a NEON data processing instruction. Return nonzero if the
281
instruction is invalid.
282
We process data in a mixture of 32-bit and 64-bit chunks.
283
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
284
return 0;
285
}
286
break;
287
+
288
+ case NEON_3R_VML: /* VMLA, VMLS */
289
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
290
+ u ? &mls_op[size] : &mla_op[size]);
291
+ return 0;
292
}
293
+
294
if (size == 3) {
295
/* 64-bit element instructions. */
296
for (pass = 0; pass < (q ? 2 : 1); pass++) {
297
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
298
}
299
}
300
break;
301
- case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
302
- switch (size) {
303
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
304
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
305
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
306
- default: abort();
307
- }
308
- tcg_temp_free_i32(tmp2);
309
- tmp2 = neon_load_reg(rd, pass);
310
- if (u) { /* VMLS */
311
- gen_neon_rsb(size, tmp, tmp2);
312
- } else { /* VMLA */
313
- gen_neon_add(size, tmp, tmp2);
314
- }
315
- break;
316
case NEON_3R_VMUL:
317
/* VMUL.P8; other cases already eliminated. */
318
gen_helper_neon_mul_p8(tmp, tmp, tmp2);
375
--
319
--
376
2.19.0
320
2.19.1
377
321
378
322
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Move cmtst_op expanders from translate-a64.c.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-17-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate.h | 2 +
11
target/arm/translate-a64.c | 38 ------------------
12
target/arm/translate.c | 81 +++++++++++++++++++++++++++-----------
13
3 files changed, 60 insertions(+), 61 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op;
20
extern const GVecGen3 bif_op;
21
extern const GVecGen3 mla_op[4];
22
extern const GVecGen3 mls_op[4];
23
+extern const GVecGen3 cmtst_op[4];
24
extern const GVecGen2i ssra_op[4];
25
extern const GVecGen2i usra_op[4];
26
extern const GVecGen2i sri_op[4];
27
extern const GVecGen2i sli_op[4];
28
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
29
30
/*
31
* Forward to the isar_feature_* tests given a DisasContext pointer.
32
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-a64.c
35
+++ b/target/arm/translate-a64.c
36
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
37
}
38
}
39
40
-/* CMTST : test is "if (X & Y != 0)". */
41
-static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
42
-{
43
- tcg_gen_and_i32(d, a, b);
44
- tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
45
- tcg_gen_neg_i32(d, d);
46
-}
47
-
48
-static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
49
-{
50
- tcg_gen_and_i64(d, a, b);
51
- tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
52
- tcg_gen_neg_i64(d, d);
53
-}
54
-
55
-static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
56
-{
57
- tcg_gen_and_vec(vece, d, a, b);
58
- tcg_gen_dupi_vec(vece, a, 0);
59
- tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
60
-}
61
-
62
static void handle_3same_64(DisasContext *s, int opcode, bool u,
63
TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
64
{
65
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
66
/* Integer op subgroup of C3.6.16. */
67
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
68
{
69
- static const GVecGen3 cmtst_op[4] = {
70
- { .fni4 = gen_helper_neon_tst_u8,
71
- .fniv = gen_cmtst_vec,
72
- .vece = MO_8 },
73
- { .fni4 = gen_helper_neon_tst_u16,
74
- .fniv = gen_cmtst_vec,
75
- .vece = MO_16 },
76
- { .fni4 = gen_cmtst_i32,
77
- .fniv = gen_cmtst_vec,
78
- .vece = MO_32 },
79
- { .fni8 = gen_cmtst_i64,
80
- .fniv = gen_cmtst_vec,
81
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
82
- .vece = MO_64 },
83
- };
84
-
85
int is_q = extract32(insn, 30, 1);
86
int u = extract32(insn, 29, 1);
87
int size = extract32(insn, 22, 2);
88
diff --git a/target/arm/translate.c b/target/arm/translate.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/translate.c
91
+++ b/target/arm/translate.c
92
@@ -XXX,XX +XXX,XX @@ const GVecGen3 mls_op[4] = {
93
.vece = MO_64 },
94
};
95
96
+/* CMTST : test is "if (X & Y != 0)". */
97
+static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
98
+{
99
+ tcg_gen_and_i32(d, a, b);
100
+ tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
101
+ tcg_gen_neg_i32(d, d);
102
+}
103
+
104
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
105
+{
106
+ tcg_gen_and_i64(d, a, b);
107
+ tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
108
+ tcg_gen_neg_i64(d, d);
109
+}
110
+
111
+static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
112
+{
113
+ tcg_gen_and_vec(vece, d, a, b);
114
+ tcg_gen_dupi_vec(vece, a, 0);
115
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
116
+}
117
+
118
+const GVecGen3 cmtst_op[4] = {
119
+ { .fni4 = gen_helper_neon_tst_u8,
120
+ .fniv = gen_cmtst_vec,
121
+ .vece = MO_8 },
122
+ { .fni4 = gen_helper_neon_tst_u16,
123
+ .fniv = gen_cmtst_vec,
124
+ .vece = MO_16 },
125
+ { .fni4 = gen_cmtst_i32,
126
+ .fniv = gen_cmtst_vec,
127
+ .vece = MO_32 },
128
+ { .fni8 = gen_cmtst_i64,
129
+ .fniv = gen_cmtst_vec,
130
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
131
+ .vece = MO_64 },
132
+};
133
+
134
/* Translate a NEON data processing instruction. Return nonzero if the
135
instruction is invalid.
136
We process data in a mixture of 32-bit and 64-bit chunks.
137
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
138
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
139
u ? &mls_op[size] : &mla_op[size]);
140
return 0;
141
+
142
+ case NEON_3R_VTST_VCEQ:
143
+ if (u) { /* VCEQ */
144
+ tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
145
+ vec_size, vec_size);
146
+ } else { /* VTST */
147
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
148
+ vec_size, vec_size, &cmtst_op[size]);
149
+ }
150
+ return 0;
151
+
152
+ case NEON_3R_VCGT:
153
+ tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
154
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
155
+ return 0;
156
+
157
+ case NEON_3R_VCGE:
158
+ tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
159
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
160
+ return 0;
161
}
162
163
if (size == 3) {
164
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
165
case NEON_3R_VQSUB:
166
GEN_NEON_INTEGER_OP_ENV(qsub);
167
break;
168
- case NEON_3R_VCGT:
169
- GEN_NEON_INTEGER_OP(cgt);
170
- break;
171
- case NEON_3R_VCGE:
172
- GEN_NEON_INTEGER_OP(cge);
173
- break;
174
case NEON_3R_VSHL:
175
GEN_NEON_INTEGER_OP(shl);
176
break;
177
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
178
tmp2 = neon_load_reg(rd, pass);
179
gen_neon_add(size, tmp, tmp2);
180
break;
181
- case NEON_3R_VTST_VCEQ:
182
- if (!u) { /* VTST */
183
- switch (size) {
184
- case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
185
- case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
186
- case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
187
- default: abort();
188
- }
189
- } else { /* VCEQ */
190
- switch (size) {
191
- case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
192
- case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
193
- case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
194
- default: abort();
195
- }
196
- }
197
- break;
198
case NEON_3R_VMUL:
199
/* VMUL.P8; other cases already eliminated. */
200
gen_helper_neon_mul_p8(tmp, tmp, tmp2);
201
--
202
2.19.1
203
204
diff view generated by jsdifflib
1
Add the v8M stack checks for:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* LDM (T2 encoding)
3
* STM (T2 encoding)
4
2
5
This includes the 32-bit encodings of the instructions listed
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
in v8M ARM ARM rule R_YVWT as
4
Message-id: 20181011205206.3552-18-richard.henderson@linaro.org
7
* LDM, LDMIA, LDMFD
5
[PMM: added parens in ?: expression]
8
* LDMDB, LDMEA
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
* POP (multiple registers)
10
* PUSH (muliple registers)
11
* STM, STMIA, STMEA
12
* STMDB, STMFD
13
14
We perform the stack limit before doing any other part
15
of the load or store.
16
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-id: 20181002163556.10279-10-peter.maydell@linaro.org
21
---
8
---
22
target/arm/translate.c | 19 ++++++++++++++++++-
9
target/arm/translate.c | 81 ++++++++++++++----------------------------
23
1 file changed, 18 insertions(+), 1 deletion(-)
10
1 file changed, 26 insertions(+), 55 deletions(-)
24
11
25
diff --git a/target/arm/translate.c b/target/arm/translate.c
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
26
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/translate.c
14
--- a/target/arm/translate.c
28
+++ b/target/arm/translate.c
15
+++ b/target/arm/translate.c
29
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
16
@@ -XXX,XX +XXX,XX @@ static void gen_vfp_msr(TCGv_i32 tmp)
30
} else {
17
tcg_temp_free_i32(tmp);
31
int i, loaded_base = 0;
18
}
32
TCGv_i32 loaded_var;
19
33
+ bool wback = extract32(insn, 21, 1);
20
-static void gen_neon_dup_u8(TCGv_i32 var, int shift)
34
/* Load/store multiple. */
21
-{
35
addr = load_reg(s, rn);
22
- TCGv_i32 tmp = tcg_temp_new_i32();
36
offset = 0;
23
- if (shift)
37
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
24
- tcg_gen_shri_i32(var, var, shift);
38
if (insn & (1 << i))
25
- tcg_gen_ext8u_i32(var, var);
39
offset += 4;
26
- tcg_gen_shli_i32(tmp, var, 8);
27
- tcg_gen_or_i32(var, var, tmp);
28
- tcg_gen_shli_i32(tmp, var, 16);
29
- tcg_gen_or_i32(var, var, tmp);
30
- tcg_temp_free_i32(tmp);
31
-}
32
-
33
static void gen_neon_dup_low16(TCGv_i32 var)
34
{
35
TCGv_i32 tmp = tcg_temp_new_i32();
36
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
37
tcg_temp_free_i32(tmp);
38
}
39
40
-static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
41
-{
42
- /* Load a single Neon element and replicate into a 32 bit TCG reg */
43
- TCGv_i32 tmp = tcg_temp_new_i32();
44
- switch (size) {
45
- case 0:
46
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
47
- gen_neon_dup_u8(tmp, 0);
48
- break;
49
- case 1:
50
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
51
- gen_neon_dup_low16(tmp);
52
- break;
53
- case 2:
54
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
55
- break;
56
- default: /* Avoid compiler warnings. */
57
- abort();
58
- }
59
- return tmp;
60
-}
61
-
62
static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
63
uint32_t dp)
64
{
65
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
66
int load;
67
int shift;
68
int n;
69
+ int vec_size;
70
TCGv_i32 addr;
71
TCGv_i32 tmp;
72
TCGv_i32 tmp2;
73
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
74
}
75
addr = tcg_temp_new_i32();
76
load_reg_var(s, addr, rn);
77
- if (nregs == 1) {
78
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
79
- tmp = gen_load_and_replicate(s, addr, size);
80
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
81
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
82
- if (insn & (1 << 5)) {
83
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
84
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
85
- }
86
- tcg_temp_free_i32(tmp);
87
- } else {
88
- /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
89
- stride = (insn & (1 << 5)) ? 2 : 1;
90
- for (reg = 0; reg < nregs; reg++) {
91
- tmp = gen_load_and_replicate(s, addr, size);
92
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
93
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
94
- tcg_temp_free_i32(tmp);
95
- tcg_gen_addi_i32(addr, addr, 1 << size);
96
- rd += stride;
97
+
98
+ /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
99
+ * VLD2/3/4 to all lanes: bit 5 indicates register stride.
100
+ */
101
+ stride = (insn & (1 << 5)) ? 2 : 1;
102
+ vec_size = nregs == 1 ? stride * 8 : 8;
103
+
104
+ tmp = tcg_temp_new_i32();
105
+ for (reg = 0; reg < nregs; reg++) {
106
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
107
+ s->be_data | size);
108
+ if ((rd & 1) && vec_size == 16) {
109
+ /* We cannot write 16 bytes at once because the
110
+ * destination is unaligned.
111
+ */
112
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
113
+ 8, 8, tmp);
114
+ tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
115
+ neon_reg_offset(rd, 0), 8, 8);
116
+ } else {
117
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
118
+ vec_size, vec_size, tmp);
40
}
119
}
41
+
120
+ tcg_gen_addi_i32(addr, addr, 1 << size);
42
if (insn & (1 << 24)) {
121
+ rd += stride;
43
tcg_gen_addi_i32(addr, addr, -offset);
122
}
44
}
123
+ tcg_temp_free_i32(tmp);
45
124
tcg_temp_free_i32(addr);
46
+ if (s->v8m_stackcheck && rn == 13 && wback) {
125
stride = (1 << size) * nregs;
47
+ /*
126
} else {
48
+ * If the writeback is incrementing SP rather than
49
+ * decrementing it, and the initial SP is below the
50
+ * stack limit but the final written-back SP would
51
+ * be above, then then we must not perform any memory
52
+ * accesses, but it is IMPDEF whether we generate
53
+ * an exception. We choose to do so in this case.
54
+ * At this point 'addr' is the lowest address, so
55
+ * either the original SP (if incrementing) or our
56
+ * final SP (if decrementing), so that's what we check.
57
+ */
58
+ gen_helper_v8m_stackcheck(cpu_env, addr);
59
+ }
60
+
61
loaded_var = NULL;
62
for (i = 0; i < 16; i++) {
63
if ((insn & (1 << i)) == 0)
64
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
65
if (loaded_base) {
66
store_reg(s, rn, loaded_var);
67
}
68
- if (insn & (1 << 21)) {
69
+ if (wback) {
70
/* Base register writeback. */
71
if (insn & (1 << 24)) {
72
tcg_gen_addi_i32(addr, addr, -offset);
73
--
127
--
74
2.19.0
128
2.19.1
75
129
76
130
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This implements the feature for softmmu, and moves the
3
Instead of shifts and masks, use direct loads and stores from the neon
4
main loop out of a macro and into a function.
4
register file. Mirror the iteration structure of the ARM pseudocode
5
5
more closely. Correct the parameters of the VLD2 A2 insn.
6
7
Note that this includes a bugfix for handling of the insn
8
"VLD2 (multiple 2-element structures)" -- we were using an
9
incorrect stride value.
10
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181011205206.3552-19-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181005175350.30752-15-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
15
---
12
target/arm/helper-sve.h | 84 ++++++++---
16
target/arm/translate.c | 170 ++++++++++++++++++-----------------------
13
target/arm/sve_helper.c | 290 +++++++++++++++++++++++++++----------
17
1 file changed, 74 insertions(+), 96 deletions(-)
14
target/arm/translate-sve.c | 84 +++++------
18
15
3 files changed, 321 insertions(+), 137 deletions(-)
19
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
21
--- a/target/arm/translate.c
20
+++ b/target/arm/helper-sve.h
22
+++ b/target/arm/translate.c
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG,
23
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass)
22
24
return tmp;
23
DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG,
24
void, env, ptr, ptr, ptr, tl, i32)
25
-DEF_HELPER_FLAGS_6(sve_ldffhsu_zsu, TCG_CALL_NO_WG,
26
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG,
27
void, env, ptr, ptr, ptr, tl, i32)
28
-DEF_HELPER_FLAGS_6(sve_ldffssu_zsu, TCG_CALL_NO_WG,
29
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG,
30
+ void, env, ptr, ptr, ptr, tl, i32)
31
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG,
32
+ void, env, ptr, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG,
34
void, env, ptr, ptr, ptr, tl, i32)
35
DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG,
36
void, env, ptr, ptr, ptr, tl, i32)
37
-DEF_HELPER_FLAGS_6(sve_ldffhss_zsu, TCG_CALL_NO_WG,
38
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG,
39
+ void, env, ptr, ptr, ptr, tl, i32)
40
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG,
41
void, env, ptr, ptr, ptr, tl, i32)
42
43
DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG,
44
void, env, ptr, ptr, ptr, tl, i32)
45
-DEF_HELPER_FLAGS_6(sve_ldffhsu_zss, TCG_CALL_NO_WG,
46
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG,
47
void, env, ptr, ptr, ptr, tl, i32)
48
-DEF_HELPER_FLAGS_6(sve_ldffssu_zss, TCG_CALL_NO_WG,
49
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG,
50
+ void, env, ptr, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG,
52
+ void, env, ptr, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG,
54
void, env, ptr, ptr, ptr, tl, i32)
55
DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG,
56
void, env, ptr, ptr, ptr, tl, i32)
57
-DEF_HELPER_FLAGS_6(sve_ldffhss_zss, TCG_CALL_NO_WG,
58
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG,
59
+ void, env, ptr, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG,
61
void, env, ptr, ptr, ptr, tl, i32)
62
63
DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG,
64
void, env, ptr, ptr, ptr, tl, i32)
65
-DEF_HELPER_FLAGS_6(sve_ldffhdu_zsu, TCG_CALL_NO_WG,
66
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG,
67
void, env, ptr, ptr, ptr, tl, i32)
68
-DEF_HELPER_FLAGS_6(sve_ldffsdu_zsu, TCG_CALL_NO_WG,
69
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG,
70
void, env, ptr, ptr, ptr, tl, i32)
71
-DEF_HELPER_FLAGS_6(sve_ldffddu_zsu, TCG_CALL_NO_WG,
72
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG,
73
+ void, env, ptr, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG,
75
+ void, env, ptr, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG,
77
+ void, env, ptr, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG,
79
void, env, ptr, ptr, ptr, tl, i32)
80
DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG,
81
void, env, ptr, ptr, ptr, tl, i32)
82
-DEF_HELPER_FLAGS_6(sve_ldffhds_zsu, TCG_CALL_NO_WG,
83
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG,
84
void, env, ptr, ptr, ptr, tl, i32)
85
-DEF_HELPER_FLAGS_6(sve_ldffsds_zsu, TCG_CALL_NO_WG,
86
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG,
87
+ void, env, ptr, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG,
89
+ void, env, ptr, ptr, ptr, tl, i32)
90
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG,
91
void, env, ptr, ptr, ptr, tl, i32)
92
93
DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG,
94
void, env, ptr, ptr, ptr, tl, i32)
95
-DEF_HELPER_FLAGS_6(sve_ldffhdu_zss, TCG_CALL_NO_WG,
96
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG,
97
void, env, ptr, ptr, ptr, tl, i32)
98
-DEF_HELPER_FLAGS_6(sve_ldffsdu_zss, TCG_CALL_NO_WG,
99
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG,
100
void, env, ptr, ptr, ptr, tl, i32)
101
-DEF_HELPER_FLAGS_6(sve_ldffddu_zss, TCG_CALL_NO_WG,
102
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG,
103
+ void, env, ptr, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG,
105
+ void, env, ptr, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG,
107
+ void, env, ptr, ptr, ptr, tl, i32)
108
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG,
109
void, env, ptr, ptr, ptr, tl, i32)
110
DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG,
111
void, env, ptr, ptr, ptr, tl, i32)
112
-DEF_HELPER_FLAGS_6(sve_ldffhds_zss, TCG_CALL_NO_WG,
113
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG,
114
void, env, ptr, ptr, ptr, tl, i32)
115
-DEF_HELPER_FLAGS_6(sve_ldffsds_zss, TCG_CALL_NO_WG,
116
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG,
117
+ void, env, ptr, ptr, ptr, tl, i32)
118
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG,
119
+ void, env, ptr, ptr, ptr, tl, i32)
120
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG,
121
void, env, ptr, ptr, ptr, tl, i32)
122
123
DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG,
124
void, env, ptr, ptr, ptr, tl, i32)
125
-DEF_HELPER_FLAGS_6(sve_ldffhdu_zd, TCG_CALL_NO_WG,
126
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG,
127
void, env, ptr, ptr, ptr, tl, i32)
128
-DEF_HELPER_FLAGS_6(sve_ldffsdu_zd, TCG_CALL_NO_WG,
129
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG,
130
void, env, ptr, ptr, ptr, tl, i32)
131
-DEF_HELPER_FLAGS_6(sve_ldffddu_zd, TCG_CALL_NO_WG,
132
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG,
133
+ void, env, ptr, ptr, ptr, tl, i32)
134
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG,
135
+ void, env, ptr, ptr, ptr, tl, i32)
136
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG,
137
+ void, env, ptr, ptr, ptr, tl, i32)
138
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG,
139
void, env, ptr, ptr, ptr, tl, i32)
140
DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG,
141
void, env, ptr, ptr, ptr, tl, i32)
142
-DEF_HELPER_FLAGS_6(sve_ldffhds_zd, TCG_CALL_NO_WG,
143
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG,
144
void, env, ptr, ptr, ptr, tl, i32)
145
-DEF_HELPER_FLAGS_6(sve_ldffsds_zd, TCG_CALL_NO_WG,
146
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG,
147
+ void, env, ptr, ptr, ptr, tl, i32)
148
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG,
149
+ void, env, ptr, ptr, ptr, tl, i32)
150
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG,
151
void, env, ptr, ptr, ptr, tl, i32)
152
153
DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG,
154
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/target/arm/sve_helper.c
157
+++ b/target/arm/sve_helper.c
158
@@ -XXX,XX +XXX,XX @@ DO_LD1_ZPZ_D(dd_be, zd)
159
160
/* First fault loads with a vector index. */
161
162
-#ifdef CONFIG_USER_ONLY
163
+/* Load one element into VD+REG_OFF from (ENV,VADDR) without faulting.
164
+ * The controlling predicate is known to be true. Return true if the
165
+ * load was successful.
166
+ */
167
+typedef bool sve_ld1_nf_fn(CPUARMState *env, void *vd, intptr_t reg_off,
168
+ target_ulong vaddr, int mmu_idx);
169
170
-#define DO_LDFF1_ZPZ(NAME, TYPEE, TYPEI, TYPEM, FN, H) \
171
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
172
- target_ulong base, uint32_t desc) \
173
-{ \
174
- intptr_t i, oprsz = simd_oprsz(desc); \
175
- unsigned scale = simd_data(desc); \
176
- uintptr_t ra = GETPC(); \
177
- bool first = true; \
178
- mmap_lock(); \
179
- for (i = 0; i < oprsz; ) { \
180
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
181
- do { \
182
- TYPEM m = 0; \
183
- if (pg & 1) { \
184
- target_ulong off = *(TYPEI *)(vm + H(i)); \
185
- target_ulong addr = base + (off << scale); \
186
- if (!first && \
187
- page_check_range(addr, sizeof(TYPEM), PAGE_READ)) { \
188
- record_fault(env, i, oprsz); \
189
- goto exit; \
190
- } \
191
- m = FN(env, addr, ra); \
192
- first = false; \
193
- } \
194
- *(TYPEE *)(vd + H(i)) = m; \
195
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
196
- } while (i & 15); \
197
- } \
198
- exit: \
199
- mmap_unlock(); \
200
+#ifdef CONFIG_SOFTMMU
201
+#define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \
202
+static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \
203
+ target_ulong addr, int mmu_idx) \
204
+{ \
205
+ target_ulong next_page = -(addr | TARGET_PAGE_MASK); \
206
+ if (likely(next_page - addr >= sizeof(TYPEM))) { \
207
+ void *host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx); \
208
+ if (likely(host)) { \
209
+ TYPEM val = HOST(host); \
210
+ *(TYPEE *)(vd + H(reg_off)) = val; \
211
+ return true; \
212
+ } \
213
+ } \
214
+ return false; \
215
}
25
}
216
-
26
217
#else
27
+static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
218
-
219
-#define DO_LDFF1_ZPZ(NAME, TYPEE, TYPEI, TYPEM, FN, H) \
220
-void HELPER(NAME)(CPUARMState *env, void *vd, void *vg, void *vm, \
221
- target_ulong base, uint32_t desc) \
222
-{ \
223
- g_assert_not_reached(); \
224
+#define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \
225
+static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off, \
226
+ target_ulong addr, int mmu_idx) \
227
+{ \
228
+ if (likely(page_check_range(addr, sizeof(TYPEM), PAGE_READ))) { \
229
+ TYPEM val = HOST(g2h(addr)); \
230
+ *(TYPEE *)(vd + H(reg_off)) = val; \
231
+ return true; \
232
+ } \
233
+ return false; \
234
}
235
-
236
#endif
237
238
-#define DO_LDFF1_ZPZ_S(NAME, TYPEI, TYPEM, FN) \
239
- DO_LDFF1_ZPZ(NAME, uint32_t, TYPEI, TYPEM, FN, H1_4)
240
-#define DO_LDFF1_ZPZ_D(NAME, TYPEI, TYPEM, FN) \
241
- DO_LDFF1_ZPZ(NAME, uint64_t, TYPEI, TYPEM, FN, )
242
+DO_LD_NF(bsu, H1_4, uint32_t, uint8_t, ldub_p)
243
+DO_LD_NF(bss, H1_4, uint32_t, int8_t, ldsb_p)
244
+DO_LD_NF(bdu, , uint64_t, uint8_t, ldub_p)
245
+DO_LD_NF(bds, , uint64_t, int8_t, ldsb_p)
246
247
-DO_LDFF1_ZPZ_S(sve_ldffbsu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
248
-DO_LDFF1_ZPZ_S(sve_ldffhsu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
249
-DO_LDFF1_ZPZ_S(sve_ldffssu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
250
-DO_LDFF1_ZPZ_S(sve_ldffbss_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
251
-DO_LDFF1_ZPZ_S(sve_ldffhss_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
252
+DO_LD_NF(hsu_le, H1_4, uint32_t, uint16_t, lduw_le_p)
253
+DO_LD_NF(hss_le, H1_4, uint32_t, int16_t, ldsw_le_p)
254
+DO_LD_NF(hsu_be, H1_4, uint32_t, uint16_t, lduw_be_p)
255
+DO_LD_NF(hss_be, H1_4, uint32_t, int16_t, ldsw_be_p)
256
+DO_LD_NF(hdu_le, , uint64_t, uint16_t, lduw_le_p)
257
+DO_LD_NF(hds_le, , uint64_t, int16_t, ldsw_le_p)
258
+DO_LD_NF(hdu_be, , uint64_t, uint16_t, lduw_be_p)
259
+DO_LD_NF(hds_be, , uint64_t, int16_t, ldsw_be_p)
260
261
-DO_LDFF1_ZPZ_S(sve_ldffbsu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
262
-DO_LDFF1_ZPZ_S(sve_ldffhsu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
263
-DO_LDFF1_ZPZ_S(sve_ldffssu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
264
-DO_LDFF1_ZPZ_S(sve_ldffbss_zss, int32_t, int8_t, cpu_ldub_data_ra)
265
-DO_LDFF1_ZPZ_S(sve_ldffhss_zss, int32_t, int16_t, cpu_lduw_data_ra)
266
+DO_LD_NF(ss_le, H1_4, uint32_t, uint32_t, ldl_le_p)
267
+DO_LD_NF(ss_be, H1_4, uint32_t, uint32_t, ldl_be_p)
268
+DO_LD_NF(sdu_le, , uint64_t, uint32_t, ldl_le_p)
269
+DO_LD_NF(sds_le, , uint64_t, int32_t, ldl_le_p)
270
+DO_LD_NF(sdu_be, , uint64_t, uint32_t, ldl_be_p)
271
+DO_LD_NF(sds_be, , uint64_t, int32_t, ldl_be_p)
272
273
-DO_LDFF1_ZPZ_D(sve_ldffbdu_zsu, uint32_t, uint8_t, cpu_ldub_data_ra)
274
-DO_LDFF1_ZPZ_D(sve_ldffhdu_zsu, uint32_t, uint16_t, cpu_lduw_data_ra)
275
-DO_LDFF1_ZPZ_D(sve_ldffsdu_zsu, uint32_t, uint32_t, cpu_ldl_data_ra)
276
-DO_LDFF1_ZPZ_D(sve_ldffddu_zsu, uint32_t, uint64_t, cpu_ldq_data_ra)
277
-DO_LDFF1_ZPZ_D(sve_ldffbds_zsu, uint32_t, int8_t, cpu_ldub_data_ra)
278
-DO_LDFF1_ZPZ_D(sve_ldffhds_zsu, uint32_t, int16_t, cpu_lduw_data_ra)
279
-DO_LDFF1_ZPZ_D(sve_ldffsds_zsu, uint32_t, int32_t, cpu_ldl_data_ra)
280
+DO_LD_NF(dd_le, , uint64_t, uint64_t, ldq_le_p)
281
+DO_LD_NF(dd_be, , uint64_t, uint64_t, ldq_be_p)
282
283
-DO_LDFF1_ZPZ_D(sve_ldffbdu_zss, int32_t, uint8_t, cpu_ldub_data_ra)
284
-DO_LDFF1_ZPZ_D(sve_ldffhdu_zss, int32_t, uint16_t, cpu_lduw_data_ra)
285
-DO_LDFF1_ZPZ_D(sve_ldffsdu_zss, int32_t, uint32_t, cpu_ldl_data_ra)
286
-DO_LDFF1_ZPZ_D(sve_ldffddu_zss, int32_t, uint64_t, cpu_ldq_data_ra)
287
-DO_LDFF1_ZPZ_D(sve_ldffbds_zss, int32_t, int8_t, cpu_ldub_data_ra)
288
-DO_LDFF1_ZPZ_D(sve_ldffhds_zss, int32_t, int16_t, cpu_lduw_data_ra)
289
-DO_LDFF1_ZPZ_D(sve_ldffsds_zss, int32_t, int32_t, cpu_ldl_data_ra)
290
+/*
291
+ * Common helper for all gather first-faulting loads.
292
+ */
293
+static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
294
+ target_ulong base, uint32_t desc, uintptr_t ra,
295
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn,
296
+ sve_ld1_nf_fn *nonfault_fn)
297
+{
28
+{
298
+ const int mmu_idx = cpu_mmu_index(env, false);
29
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
299
+ intptr_t reg_off, reg_max = simd_oprsz(desc);
30
+
300
+ unsigned scale = simd_data(desc);
31
+ switch (mop) {
301
+ target_ulong addr;
32
+ case MO_UB:
302
33
+ tcg_gen_ld8u_i64(var, cpu_env, offset);
303
-DO_LDFF1_ZPZ_D(sve_ldffbdu_zd, uint64_t, uint8_t, cpu_ldub_data_ra)
34
+ break;
304
-DO_LDFF1_ZPZ_D(sve_ldffhdu_zd, uint64_t, uint16_t, cpu_lduw_data_ra)
35
+ case MO_UW:
305
-DO_LDFF1_ZPZ_D(sve_ldffsdu_zd, uint64_t, uint32_t, cpu_ldl_data_ra)
36
+ tcg_gen_ld16u_i64(var, cpu_env, offset);
306
-DO_LDFF1_ZPZ_D(sve_ldffddu_zd, uint64_t, uint64_t, cpu_ldq_data_ra)
37
+ break;
307
-DO_LDFF1_ZPZ_D(sve_ldffbds_zd, uint64_t, int8_t, cpu_ldub_data_ra)
38
+ case MO_UL:
308
-DO_LDFF1_ZPZ_D(sve_ldffhds_zd, uint64_t, int16_t, cpu_lduw_data_ra)
39
+ tcg_gen_ld32u_i64(var, cpu_env, offset);
309
-DO_LDFF1_ZPZ_D(sve_ldffsds_zd, uint64_t, int32_t, cpu_ldl_data_ra)
40
+ break;
310
+ /* Skip to the first true predicate. */
41
+ case MO_Q:
311
+ reg_off = find_next_active(vg, 0, reg_max, MO_32);
42
+ tcg_gen_ld_i64(var, cpu_env, offset);
312
+ if (likely(reg_off < reg_max)) {
43
+ break;
313
+ /* Perform one normal read, which will fault or not. */
44
+ default:
314
+ set_helper_retaddr(ra);
45
+ g_assert_not_reached();
315
+ addr = off_fn(vm, reg_off);
316
+ addr = base + (addr << scale);
317
+ tlb_fn(env, vd, reg_off, addr, mmu_idx, ra);
318
+
319
+ /* The rest of the reads will be non-faulting. */
320
+ set_helper_retaddr(0);
321
+ }
322
+
323
+ /* After any fault, zero the leading predicated false elements. */
324
+ swap_memzero(vd, reg_off);
325
+
326
+ while (likely((reg_off += 4) < reg_max)) {
327
+ uint64_t pg = *(uint64_t *)(vg + (reg_off >> 6) * 8);
328
+ if (likely((pg >> (reg_off & 63)) & 1)) {
329
+ addr = off_fn(vm, reg_off);
330
+ addr = base + (addr << scale);
331
+ if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) {
332
+ record_fault(env, reg_off, reg_max);
333
+ break;
334
+ }
335
+ } else {
336
+ *(uint32_t *)(vd + H1_4(reg_off)) = 0;
337
+ }
338
+ }
46
+ }
339
+}
47
+}
340
+
48
+
341
+static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
49
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
342
+ target_ulong base, uint32_t desc, uintptr_t ra,
50
{
343
+ zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn,
51
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
344
+ sve_ld1_nf_fn *nonfault_fn)
52
tcg_temp_free_i32(var);
53
}
54
55
+static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
345
+{
56
+{
346
+ const int mmu_idx = cpu_mmu_index(env, false);
57
+ long offset = neon_element_offset(reg, ele, size);
347
+ intptr_t reg_off, reg_max = simd_oprsz(desc);
58
+
348
+ unsigned scale = simd_data(desc);
59
+ switch (size) {
349
+ target_ulong addr;
60
+ case MO_8:
350
+
61
+ tcg_gen_st8_i64(var, cpu_env, offset);
351
+ /* Skip to the first true predicate. */
62
+ break;
352
+ reg_off = find_next_active(vg, 0, reg_max, MO_64);
63
+ case MO_16:
353
+ if (likely(reg_off < reg_max)) {
64
+ tcg_gen_st16_i64(var, cpu_env, offset);
354
+ /* Perform one normal read, which will fault or not. */
65
+ break;
355
+ set_helper_retaddr(ra);
66
+ case MO_32:
356
+ addr = off_fn(vm, reg_off);
67
+ tcg_gen_st32_i64(var, cpu_env, offset);
357
+ addr = base + (addr << scale);
68
+ break;
358
+ tlb_fn(env, vd, reg_off, addr, mmu_idx, ra);
69
+ case MO_64:
359
+
70
+ tcg_gen_st_i64(var, cpu_env, offset);
360
+ /* The rest of the reads will be non-faulting. */
71
+ break;
361
+ set_helper_retaddr(0);
72
+ default:
362
+ }
73
+ g_assert_not_reached();
363
+
364
+ /* After any fault, zero the leading predicated false elements. */
365
+ swap_memzero(vd, reg_off);
366
+
367
+ while (likely((reg_off += 8) < reg_max)) {
368
+ uint8_t pg = *(uint8_t *)(vg + H1(reg_off >> 3));
369
+ if (likely(pg & 1)) {
370
+ addr = off_fn(vm, reg_off);
371
+ addr = base + (addr << scale);
372
+ if (!nonfault_fn(env, vd, reg_off, addr, mmu_idx)) {
373
+ record_fault(env, reg_off, reg_max);
374
+ break;
375
+ }
376
+ } else {
377
+ *(uint64_t *)(vd + reg_off) = 0;
378
+ }
379
+ }
74
+ }
380
+}
75
+}
381
+
76
+
382
+#define DO_LDFF1_ZPZ_S(MEM, OFS) \
77
static inline void neon_load_reg64(TCGv_i64 var, int reg)
383
+void HELPER(sve_ldff##MEM##_##OFS) \
78
{
384
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
79
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
385
+ target_ulong base, uint32_t desc) \
80
@@ -XXX,XX +XXX,XX @@ static struct {
386
+{ \
81
int interleave;
387
+ sve_ldff1_zs(env, vd, vg, vm, base, desc, GETPC(), \
82
int spacing;
388
+ off_##OFS##_s, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \
83
} const neon_ls_element_type[11] = {
389
+}
84
- {4, 4, 1},
390
+
85
- {4, 4, 2},
391
+#define DO_LDFF1_ZPZ_D(MEM, OFS) \
86
+ {1, 4, 1},
392
+void HELPER(sve_ldff##MEM##_##OFS) \
87
+ {1, 4, 2},
393
+ (CPUARMState *env, void *vd, void *vg, void *vm, \
88
{4, 1, 1},
394
+ target_ulong base, uint32_t desc) \
89
- {4, 2, 1},
395
+{ \
90
- {3, 3, 1},
396
+ sve_ldff1_zd(env, vd, vg, vm, base, desc, GETPC(), \
91
- {3, 3, 2},
397
+ off_##OFS##_d, sve_ld1##MEM##_tlb, sve_ld##MEM##_nf); \
92
+ {2, 2, 2},
398
+}
93
+ {1, 3, 1},
399
+
94
+ {1, 3, 2},
400
+DO_LDFF1_ZPZ_S(bsu, zsu)
95
{3, 1, 1},
401
+DO_LDFF1_ZPZ_S(bsu, zss)
96
{1, 1, 1},
402
+DO_LDFF1_ZPZ_D(bdu, zsu)
97
- {2, 2, 1},
403
+DO_LDFF1_ZPZ_D(bdu, zss)
98
- {2, 2, 2},
404
+DO_LDFF1_ZPZ_D(bdu, zd)
99
+ {1, 2, 1},
405
+
100
+ {1, 2, 2},
406
+DO_LDFF1_ZPZ_S(bss, zsu)
101
{2, 1, 1}
407
+DO_LDFF1_ZPZ_S(bss, zss)
408
+DO_LDFF1_ZPZ_D(bds, zsu)
409
+DO_LDFF1_ZPZ_D(bds, zss)
410
+DO_LDFF1_ZPZ_D(bds, zd)
411
+
412
+DO_LDFF1_ZPZ_S(hsu_le, zsu)
413
+DO_LDFF1_ZPZ_S(hsu_le, zss)
414
+DO_LDFF1_ZPZ_D(hdu_le, zsu)
415
+DO_LDFF1_ZPZ_D(hdu_le, zss)
416
+DO_LDFF1_ZPZ_D(hdu_le, zd)
417
+
418
+DO_LDFF1_ZPZ_S(hsu_be, zsu)
419
+DO_LDFF1_ZPZ_S(hsu_be, zss)
420
+DO_LDFF1_ZPZ_D(hdu_be, zsu)
421
+DO_LDFF1_ZPZ_D(hdu_be, zss)
422
+DO_LDFF1_ZPZ_D(hdu_be, zd)
423
+
424
+DO_LDFF1_ZPZ_S(hss_le, zsu)
425
+DO_LDFF1_ZPZ_S(hss_le, zss)
426
+DO_LDFF1_ZPZ_D(hds_le, zsu)
427
+DO_LDFF1_ZPZ_D(hds_le, zss)
428
+DO_LDFF1_ZPZ_D(hds_le, zd)
429
+
430
+DO_LDFF1_ZPZ_S(hss_be, zsu)
431
+DO_LDFF1_ZPZ_S(hss_be, zss)
432
+DO_LDFF1_ZPZ_D(hds_be, zsu)
433
+DO_LDFF1_ZPZ_D(hds_be, zss)
434
+DO_LDFF1_ZPZ_D(hds_be, zd)
435
+
436
+DO_LDFF1_ZPZ_S(ss_le, zsu)
437
+DO_LDFF1_ZPZ_S(ss_le, zss)
438
+DO_LDFF1_ZPZ_D(sdu_le, zsu)
439
+DO_LDFF1_ZPZ_D(sdu_le, zss)
440
+DO_LDFF1_ZPZ_D(sdu_le, zd)
441
+
442
+DO_LDFF1_ZPZ_S(ss_be, zsu)
443
+DO_LDFF1_ZPZ_S(ss_be, zss)
444
+DO_LDFF1_ZPZ_D(sdu_be, zsu)
445
+DO_LDFF1_ZPZ_D(sdu_be, zss)
446
+DO_LDFF1_ZPZ_D(sdu_be, zd)
447
+
448
+DO_LDFF1_ZPZ_D(sds_le, zsu)
449
+DO_LDFF1_ZPZ_D(sds_le, zss)
450
+DO_LDFF1_ZPZ_D(sds_le, zd)
451
+
452
+DO_LDFF1_ZPZ_D(sds_be, zsu)
453
+DO_LDFF1_ZPZ_D(sds_be, zss)
454
+DO_LDFF1_ZPZ_D(sds_be, zd)
455
+
456
+DO_LDFF1_ZPZ_D(dd_le, zsu)
457
+DO_LDFF1_ZPZ_D(dd_le, zss)
458
+DO_LDFF1_ZPZ_D(dd_le, zd)
459
+
460
+DO_LDFF1_ZPZ_D(dd_be, zsu)
461
+DO_LDFF1_ZPZ_D(dd_be, zss)
462
+DO_LDFF1_ZPZ_D(dd_be, zd)
463
464
/* Stores with a vector index. */
465
466
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
467
index XXXXXXX..XXXXXXX 100644
468
--- a/target/arm/translate-sve.c
469
+++ b/target/arm/translate-sve.c
470
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = {
471
472
/* First-fault */
473
{ { { gen_helper_sve_ldffbss_zsu,
474
- gen_helper_sve_ldffhss_zsu,
475
+ gen_helper_sve_ldffhss_le_zsu,
476
NULL, },
477
{ gen_helper_sve_ldffbsu_zsu,
478
- gen_helper_sve_ldffhsu_zsu,
479
- gen_helper_sve_ldffssu_zsu, } },
480
+ gen_helper_sve_ldffhsu_le_zsu,
481
+ gen_helper_sve_ldffss_le_zsu, } },
482
{ { gen_helper_sve_ldffbss_zss,
483
- gen_helper_sve_ldffhss_zss,
484
+ gen_helper_sve_ldffhss_le_zss,
485
NULL, },
486
{ gen_helper_sve_ldffbsu_zss,
487
- gen_helper_sve_ldffhsu_zss,
488
- gen_helper_sve_ldffssu_zss, } } } },
489
+ gen_helper_sve_ldffhsu_le_zss,
490
+ gen_helper_sve_ldffss_le_zss, } } } },
491
492
/* Big-endian */
493
{ { { { gen_helper_sve_ldbss_zsu,
494
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_mem_scatter * const gather_load_fn32[2][2][2][2][3] = {
495
496
/* First-fault */
497
{ { { gen_helper_sve_ldffbss_zsu,
498
- gen_helper_sve_ldffhss_zsu,
499
+ gen_helper_sve_ldffhss_be_zsu,
500
NULL, },
501
{ gen_helper_sve_ldffbsu_zsu,
502
- gen_helper_sve_ldffhsu_zsu,
503
- gen_helper_sve_ldffssu_zsu, } },
504
+ gen_helper_sve_ldffhsu_be_zsu,
505
+ gen_helper_sve_ldffss_be_zsu, } },
506
{ { gen_helper_sve_ldffbss_zss,
507
- gen_helper_sve_ldffhss_zss,
508
+ gen_helper_sve_ldffhss_be_zss,
509
NULL, },
510
{ gen_helper_sve_ldffbsu_zss,
511
- gen_helper_sve_ldffhsu_zss,
512
- gen_helper_sve_ldffssu_zss, } } } },
513
+ gen_helper_sve_ldffhsu_be_zss,
514
+ gen_helper_sve_ldffss_be_zss, } } } },
515
};
102
};
516
103
517
/* Note that we overload xs=2 to indicate 64-bit offset. */
104
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
518
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = {
105
int shift;
519
106
int n;
520
/* First-fault */
107
int vec_size;
521
{ { { gen_helper_sve_ldffbds_zsu,
108
+ int mmu_idx;
522
- gen_helper_sve_ldffhds_zsu,
109
+ TCGMemOp endian;
523
- gen_helper_sve_ldffsds_zsu,
110
TCGv_i32 addr;
524
+ gen_helper_sve_ldffhds_le_zsu,
111
TCGv_i32 tmp;
525
+ gen_helper_sve_ldffsds_le_zsu,
112
TCGv_i32 tmp2;
526
NULL, },
113
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
527
{ gen_helper_sve_ldffbdu_zsu,
114
rn = (insn >> 16) & 0xf;
528
- gen_helper_sve_ldffhdu_zsu,
115
rm = insn & 0xf;
529
- gen_helper_sve_ldffsdu_zsu,
116
load = (insn & (1 << 21)) != 0;
530
- gen_helper_sve_ldffddu_zsu, } },
117
+ endian = s->be_data;
531
+ gen_helper_sve_ldffhdu_le_zsu,
118
+ mmu_idx = get_mem_index(s);
532
+ gen_helper_sve_ldffsdu_le_zsu,
119
if ((insn & (1 << 23)) == 0) {
533
+ gen_helper_sve_ldffdd_le_zsu, } },
120
/* Load store all elements. */
534
{ { gen_helper_sve_ldffbds_zss,
121
op = (insn >> 8) & 0xf;
535
- gen_helper_sve_ldffhds_zss,
122
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
536
- gen_helper_sve_ldffsds_zss,
123
nregs = neon_ls_element_type[op].nregs;
537
+ gen_helper_sve_ldffhds_le_zss,
124
interleave = neon_ls_element_type[op].interleave;
538
+ gen_helper_sve_ldffsds_le_zss,
125
spacing = neon_ls_element_type[op].spacing;
539
NULL, },
126
- if (size == 3 && (interleave | spacing) != 1)
540
{ gen_helper_sve_ldffbdu_zss,
127
+ if (size == 3 && (interleave | spacing) != 1) {
541
- gen_helper_sve_ldffhdu_zss,
128
return 1;
542
- gen_helper_sve_ldffsdu_zss,
129
+ }
543
- gen_helper_sve_ldffddu_zss, } },
130
+ tmp64 = tcg_temp_new_i64();
544
+ gen_helper_sve_ldffhdu_le_zss,
131
addr = tcg_temp_new_i32();
545
+ gen_helper_sve_ldffsdu_le_zss,
132
+ tmp2 = tcg_const_i32(1 << size);
546
+ gen_helper_sve_ldffdd_le_zss, } },
133
load_reg_var(s, addr, rn);
547
{ { gen_helper_sve_ldffbds_zd,
134
- stride = (1 << size) * interleave;
548
- gen_helper_sve_ldffhds_zd,
135
for (reg = 0; reg < nregs; reg++) {
549
- gen_helper_sve_ldffsds_zd,
136
- if (interleave > 2 || (interleave == 2 && nregs == 2)) {
550
+ gen_helper_sve_ldffhds_le_zd,
137
- load_reg_var(s, addr, rn);
551
+ gen_helper_sve_ldffsds_le_zd,
138
- tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
552
NULL, },
139
- } else if (interleave == 2 && nregs == 4 && reg == 2) {
553
{ gen_helper_sve_ldffbdu_zd,
140
- load_reg_var(s, addr, rn);
554
- gen_helper_sve_ldffhdu_zd,
141
- tcg_gen_addi_i32(addr, addr, 1 << size);
555
- gen_helper_sve_ldffsdu_zd,
142
- }
556
- gen_helper_sve_ldffddu_zd, } } } },
143
- if (size == 3) {
557
+ gen_helper_sve_ldffhdu_le_zd,
144
- tmp64 = tcg_temp_new_i64();
558
+ gen_helper_sve_ldffsdu_le_zd,
145
- if (load) {
559
+ gen_helper_sve_ldffdd_le_zd, } } } },
146
- gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
560
147
- neon_store_reg64(tmp64, rd);
561
/* Big-endian */
148
- } else {
562
{ { { { gen_helper_sve_ldbds_zsu,
149
- neon_load_reg64(tmp64, rd);
563
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_mem_scatter * const gather_load_fn64[2][2][3][2][4] = {
150
- gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
564
151
- }
565
/* First-fault */
152
- tcg_temp_free_i64(tmp64);
566
{ { { gen_helper_sve_ldffbds_zsu,
153
- tcg_gen_addi_i32(addr, addr, stride);
567
- gen_helper_sve_ldffhds_zsu,
154
- } else {
568
- gen_helper_sve_ldffsds_zsu,
155
- for (pass = 0; pass < 2; pass++) {
569
+ gen_helper_sve_ldffhds_be_zsu,
156
- if (size == 2) {
570
+ gen_helper_sve_ldffsds_be_zsu,
157
- if (load) {
571
NULL, },
158
- tmp = tcg_temp_new_i32();
572
{ gen_helper_sve_ldffbdu_zsu,
159
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
573
- gen_helper_sve_ldffhdu_zsu,
160
- neon_store_reg(rd, pass, tmp);
574
- gen_helper_sve_ldffsdu_zsu,
161
- } else {
575
- gen_helper_sve_ldffddu_zsu, } },
162
- tmp = neon_load_reg(rd, pass);
576
+ gen_helper_sve_ldffhdu_be_zsu,
163
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
577
+ gen_helper_sve_ldffsdu_be_zsu,
164
- tcg_temp_free_i32(tmp);
578
+ gen_helper_sve_ldffdd_be_zsu, } },
165
- }
579
{ { gen_helper_sve_ldffbds_zss,
166
- tcg_gen_addi_i32(addr, addr, stride);
580
- gen_helper_sve_ldffhds_zss,
167
- } else if (size == 1) {
581
- gen_helper_sve_ldffsds_zss,
168
- if (load) {
582
+ gen_helper_sve_ldffhds_be_zss,
169
- tmp = tcg_temp_new_i32();
583
+ gen_helper_sve_ldffsds_be_zss,
170
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
584
NULL, },
171
- tcg_gen_addi_i32(addr, addr, stride);
585
{ gen_helper_sve_ldffbdu_zss,
172
- tmp2 = tcg_temp_new_i32();
586
- gen_helper_sve_ldffhdu_zss,
173
- gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
587
- gen_helper_sve_ldffsdu_zss,
174
- tcg_gen_addi_i32(addr, addr, stride);
588
- gen_helper_sve_ldffddu_zss, } },
175
- tcg_gen_shli_i32(tmp2, tmp2, 16);
589
+ gen_helper_sve_ldffhdu_be_zss,
176
- tcg_gen_or_i32(tmp, tmp, tmp2);
590
+ gen_helper_sve_ldffsdu_be_zss,
177
- tcg_temp_free_i32(tmp2);
591
+ gen_helper_sve_ldffdd_be_zss, } },
178
- neon_store_reg(rd, pass, tmp);
592
{ { gen_helper_sve_ldffbds_zd,
179
- } else {
593
- gen_helper_sve_ldffhds_zd,
180
- tmp = neon_load_reg(rd, pass);
594
- gen_helper_sve_ldffsds_zd,
181
- tmp2 = tcg_temp_new_i32();
595
+ gen_helper_sve_ldffhds_be_zd,
182
- tcg_gen_shri_i32(tmp2, tmp, 16);
596
+ gen_helper_sve_ldffsds_be_zd,
183
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
597
NULL, },
184
- tcg_temp_free_i32(tmp);
598
{ gen_helper_sve_ldffbdu_zd,
185
- tcg_gen_addi_i32(addr, addr, stride);
599
- gen_helper_sve_ldffhdu_zd,
186
- gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
600
- gen_helper_sve_ldffsdu_zd,
187
- tcg_temp_free_i32(tmp2);
601
- gen_helper_sve_ldffddu_zd, } } } },
188
- tcg_gen_addi_i32(addr, addr, stride);
602
+ gen_helper_sve_ldffhdu_be_zd,
189
- }
603
+ gen_helper_sve_ldffsdu_be_zd,
190
- } else /* size == 0 */ {
604
+ gen_helper_sve_ldffdd_be_zd, } } } },
191
- if (load) {
605
};
192
- tmp2 = NULL;
606
193
- for (n = 0; n < 4; n++) {
607
static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a, uint32_t insn)
194
- tmp = tcg_temp_new_i32();
195
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
196
- tcg_gen_addi_i32(addr, addr, stride);
197
- if (n == 0) {
198
- tmp2 = tmp;
199
- } else {
200
- tcg_gen_shli_i32(tmp, tmp, n * 8);
201
- tcg_gen_or_i32(tmp2, tmp2, tmp);
202
- tcg_temp_free_i32(tmp);
203
- }
204
- }
205
- neon_store_reg(rd, pass, tmp2);
206
- } else {
207
- tmp2 = neon_load_reg(rd, pass);
208
- for (n = 0; n < 4; n++) {
209
- tmp = tcg_temp_new_i32();
210
- if (n == 0) {
211
- tcg_gen_mov_i32(tmp, tmp2);
212
- } else {
213
- tcg_gen_shri_i32(tmp, tmp2, n * 8);
214
- }
215
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
216
- tcg_temp_free_i32(tmp);
217
- tcg_gen_addi_i32(addr, addr, stride);
218
- }
219
- tcg_temp_free_i32(tmp2);
220
- }
221
+ for (n = 0; n < 8 >> size; n++) {
222
+ int xs;
223
+ for (xs = 0; xs < interleave; xs++) {
224
+ int tt = rd + reg + spacing * xs;
225
+
226
+ if (load) {
227
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
228
+ neon_store_element64(tt, n, size, tmp64);
229
+ } else {
230
+ neon_load_element64(tmp64, tt, n, size);
231
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
232
}
233
+ tcg_gen_add_i32(addr, addr, tmp2);
234
}
235
}
236
- rd += spacing;
237
}
238
tcg_temp_free_i32(addr);
239
- stride = nregs * 8;
240
+ tcg_temp_free_i32(tmp2);
241
+ tcg_temp_free_i64(tmp64);
242
+ stride = nregs * interleave * 8;
243
} else {
244
size = (insn >> 10) & 3;
245
if (size == 3) {
608
--
246
--
609
2.19.0
247
2.19.1
610
248
611
249
diff view generated by jsdifflib
1
Add some comments to the Thumb decoder indicating what bits
1
From: Richard Henderson <richard.henderson@linaro.org>
2
of the instruction have been decoded at various points in
3
the code.
4
2
5
This is not an exhaustive set of comments; we're gradually
3
For a sequence of loads or stores from a single register,
6
adding comments as we work with particular bits of the code.
4
little-endian operations can be promoted to an 8-byte op.
5
This can reduce the number of operations by a factor of 8.
7
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181011205206.3552-20-richard.henderson@linaro.org
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20181002163556.10279-6-peter.maydell@linaro.org
12
---
12
---
13
target/arm/translate.c | 20 +++++++++++++++++---
13
target/arm/translate.c | 10 ++++++++++
14
1 file changed, 17 insertions(+), 3 deletions(-)
14
1 file changed, 10 insertions(+)
15
15
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
18
--- a/target/arm/translate.c
19
+++ b/target/arm/translate.c
19
+++ b/target/arm/translate.c
20
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
20
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
21
tmp2 = load_reg(s, rm);
21
if (size == 3 && (interleave | spacing) != 1) {
22
if ((insn & 0x70) != 0)
22
return 1;
23
goto illegal_op;
24
+ /*
25
+ * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
26
+ * - MOV, MOVS (register-shifted register), flagsetting
27
+ */
28
op = (insn >> 21) & 3;
29
logic_cc = (insn & (1 << 20)) != 0;
30
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
31
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
32
rd = insn & 7;
33
op = (insn >> 11) & 3;
34
if (op == 3) {
35
- /* add/subtract */
36
+ /*
37
+ * 0b0001_1xxx_xxxx_xxxx
38
+ * - Add, subtract (three low registers)
39
+ * - Add, subtract (two low registers and immediate)
40
+ */
41
rn = (insn >> 3) & 7;
42
tmp = load_reg(s, rn);
43
if (insn & (1 << 10)) {
44
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
45
}
23
}
46
break;
24
+ /* For our purposes, bytes are always little-endian. */
47
case 2: case 3:
25
+ if (size == 0) {
48
- /* arithmetic large immediate */
26
+ endian = MO_LE;
49
+ /*
27
+ }
50
+ * 0b001x_xxxx_xxxx_xxxx
28
+ /* Consecutive little-endian elements from a single register
51
+ * - Add, subtract, compare, move (one low register and immediate)
29
+ * can be promoted to a larger little-endian operation.
52
+ */
30
+ */
53
op = (insn >> 11) & 3;
31
+ if (interleave == 1 && endian == MO_LE) {
54
rd = (insn >> 8) & 0x7;
32
+ size = 3;
55
if (op == 0) { /* mov */
33
+ }
56
@@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
34
tmp64 = tcg_temp_new_i64();
57
break;
35
addr = tcg_temp_new_i32();
58
}
36
tmp2 = tcg_const_i32(1 << size);
59
60
- /* data processing register */
61
+ /*
62
+ * 0b0100_00xx_xxxx_xxxx
63
+ * - Data-processing (two low registers)
64
+ */
65
rd = insn & 7;
66
rm = (insn >> 3) & 7;
67
op = (insn >> 6) & 0xf;
68
--
37
--
69
2.19.0
38
2.19.1
70
39
71
40
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Uses tlb_vaddr_to_host for correct operation with softmmu.
3
Instead of shifts and masks, use direct loads and stores from
4
Optimize for accesses within a single page or pair of pages.
4
the neon register file.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181011205206.3552-21-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181005175350.30752-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/sve_helper.c | 731 +++++++++++++++++++++++++++++++---------
11
target/arm/translate.c | 92 +++++++++++++++++++++++-------------------
12
1 file changed, 569 insertions(+), 162 deletions(-)
12
1 file changed, 50 insertions(+), 42 deletions(-)
13
13
14
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/sve_helper.c
16
--- a/target/arm/translate.c
17
+++ b/target/arm/sve_helper.c
17
+++ b/target/arm/translate.c
18
@@ -XXX,XX +XXX,XX @@ static void swap_memmove(void *vd, void *vs, size_t n)
18
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass)
19
}
19
return tmp;
20
}
20
}
21
21
22
+/* Similarly for memset of 0. */
22
+static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
23
+static void swap_memzero(void *vd, size_t n)
24
+{
23
+{
25
+ uintptr_t d = (uintptr_t)vd;
24
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
26
+ uintptr_t o = (d | n) & 7;
27
+ size_t i;
28
+
25
+
29
+ /* Usually, the first bit of a predicate is set, so N is 0. */
26
+ switch (mop) {
30
+ if (likely(n == 0)) {
27
+ case MO_UB:
31
+ return;
28
+ tcg_gen_ld8u_i32(var, cpu_env, offset);
32
+ }
33
+
34
+#ifndef HOST_WORDS_BIGENDIAN
35
+ o = 0;
36
+#endif
37
+ switch (o) {
38
+ case 0:
39
+ memset(vd, 0, n);
40
+ break;
29
+ break;
41
+
30
+ case MO_UW:
42
+ case 4:
31
+ tcg_gen_ld16u_i32(var, cpu_env, offset);
43
+ for (i = 0; i < n; i += 4) {
44
+ *(uint32_t *)H1_4(d + i) = 0;
45
+ }
46
+ break;
32
+ break;
47
+
33
+ case MO_UL:
48
+ case 2:
34
+ tcg_gen_ld_i32(var, cpu_env, offset);
49
+ case 6:
50
+ for (i = 0; i < n; i += 2) {
51
+ *(uint16_t *)H1_2(d + i) = 0;
52
+ }
53
+ break;
35
+ break;
54
+
55
+ default:
36
+ default:
56
+ for (i = 0; i < n; i++) {
37
+ g_assert_not_reached();
57
+ *(uint8_t *)H1(d + i) = 0;
58
+ }
59
+ break;
60
+ }
38
+ }
61
+}
39
+}
62
+
40
+
63
void HELPER(sve_ext)(void *vd, void *vn, void *vm, uint32_t desc)
41
static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
64
{
42
{
65
intptr_t opr_sz = simd_oprsz(desc);
43
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
66
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_fcmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc)
44
@@ -XXX,XX +XXX,XX @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var)
67
/*
45
tcg_temp_free_i32(var);
68
* Load contiguous data, protected by a governing predicate.
46
}
69
*/
47
70
-#define DO_LD1(NAME, FN, TYPEE, TYPEM, H) \
48
+static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
71
-static void do_##NAME(CPUARMState *env, void *vd, void *vg, \
49
+{
72
- target_ulong addr, intptr_t oprsz, \
50
+ long offset = neon_element_offset(reg, ele, size);
73
- uintptr_t ra) \
74
-{ \
75
- intptr_t i = 0; \
76
- do { \
77
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
78
- do { \
79
- TYPEM m = 0; \
80
- if (pg & 1) { \
81
- m = FN(env, addr, ra); \
82
- } \
83
- *(TYPEE *)(vd + H(i)) = m; \
84
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
85
- addr += sizeof(TYPEM); \
86
- } while (i & 15); \
87
- } while (i < oprsz); \
88
-} \
89
-void HELPER(NAME)(CPUARMState *env, void *vg, \
90
- target_ulong addr, uint32_t desc) \
91
-{ \
92
- do_##NAME(env, &env->vfp.zregs[simd_data(desc)], vg, \
93
- addr, simd_oprsz(desc), GETPC()); \
94
+
51
+
95
+/*
52
+ switch (size) {
96
+ * Load elements into @vd, controlled by @vg, from @host + @mem_ofs.
53
+ case MO_8:
97
+ * Memory is valid through @host + @mem_max. The register element
54
+ tcg_gen_st8_i32(var, cpu_env, offset);
98
+ * indicies are inferred from @mem_ofs, as modified by the types for
55
+ break;
99
+ * which the helper is built. Return the @mem_ofs of the first element
56
+ case MO_16:
100
+ * not loaded (which is @mem_max if they are all loaded).
57
+ tcg_gen_st16_i32(var, cpu_env, offset);
101
+ *
58
+ break;
102
+ * For softmmu, we have fully validated the guest page. For user-only,
59
+ case MO_32:
103
+ * we cannot fully validate without taking the mmap lock, but since we
60
+ tcg_gen_st_i32(var, cpu_env, offset);
104
+ * know the access is within one host page, if any access is valid they
61
+ break;
105
+ * all must be valid. However, when @vg is all false, it may be that
62
+ default:
106
+ * no access is valid.
63
+ g_assert_not_reached();
107
+ */
108
+typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, void *host,
109
+ intptr_t mem_ofs, intptr_t mem_max);
110
+
111
+/*
112
+ * Load one element into @vd + @reg_off from (@env, @vaddr, @ra).
113
+ * The controlling predicate is known to be true.
114
+ */
115
+typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
116
+ target_ulong vaddr, int mmu_idx, uintptr_t ra);
117
+
118
+/*
119
+ * Generate the above primitives.
120
+ */
121
+
122
+#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \
123
+static intptr_t sve_##NAME##_host(void *vd, void *vg, void *host, \
124
+ intptr_t mem_off, const intptr_t mem_max) \
125
+{ \
126
+ intptr_t reg_off = mem_off * (sizeof(TYPEE) / sizeof(TYPEM)); \
127
+ uint64_t *pg = vg; \
128
+ while (mem_off + sizeof(TYPEM) <= mem_max) { \
129
+ TYPEM val = 0; \
130
+ if (likely((pg[reg_off >> 6] >> (reg_off & 63)) & 1)) { \
131
+ val = HOST(host + mem_off); \
132
+ } \
133
+ *(TYPEE *)(vd + H(reg_off)) = val; \
134
+ mem_off += sizeof(TYPEM), reg_off += sizeof(TYPEE); \
135
+ } \
136
+ return mem_off; \
137
}
138
139
+#ifdef CONFIG_SOFTMMU
140
+#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \
141
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
142
+ target_ulong addr, int mmu_idx, uintptr_t ra) \
143
+{ \
144
+ TCGMemOpIdx oi = make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_idx); \
145
+ TYPEM val = TLB(env, addr, oi, ra); \
146
+ *(TYPEE *)(vd + H(reg_off)) = val; \
147
+}
148
+#else
149
+#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \
150
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
151
+ target_ulong addr, int mmu_idx, uintptr_t ra) \
152
+{ \
153
+ TYPEM val = HOST(g2h(addr)); \
154
+ *(TYPEE *)(vd + H(reg_off)) = val; \
155
+}
156
+#endif
157
+
158
+#define DO_LD_PRIM_1(NAME, H, TE, TM) \
159
+ DO_LD_HOST(NAME, H, TE, TM, ldub_p) \
160
+ DO_LD_TLB(NAME, H, TE, TM, ldub_p, 0, helper_ret_ldub_mmu)
161
+
162
+DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t)
163
+DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t)
164
+DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t)
165
+DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t)
166
+DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t)
167
+DO_LD_PRIM_1(ld1bdu, , uint64_t, uint8_t)
168
+DO_LD_PRIM_1(ld1bds, , uint64_t, int8_t)
169
+
170
+#define DO_LD_PRIM_2(NAME, end, MOEND, H, TE, TM, PH, PT) \
171
+ DO_LD_HOST(NAME##_##end, H, TE, TM, PH##_##end##_p) \
172
+ DO_LD_TLB(NAME##_##end, H, TE, TM, PH##_##end##_p, \
173
+ MOEND, helper_##end##_##PT##_mmu)
174
+
175
+DO_LD_PRIM_2(ld1hh, le, MO_LE, H1_2, uint16_t, uint16_t, lduw, lduw)
176
+DO_LD_PRIM_2(ld1hsu, le, MO_LE, H1_4, uint32_t, uint16_t, lduw, lduw)
177
+DO_LD_PRIM_2(ld1hss, le, MO_LE, H1_4, uint32_t, int16_t, lduw, lduw)
178
+DO_LD_PRIM_2(ld1hdu, le, MO_LE, , uint64_t, uint16_t, lduw, lduw)
179
+DO_LD_PRIM_2(ld1hds, le, MO_LE, , uint64_t, int16_t, lduw, lduw)
180
+
181
+DO_LD_PRIM_2(ld1ss, le, MO_LE, H1_4, uint32_t, uint32_t, ldl, ldul)
182
+DO_LD_PRIM_2(ld1sdu, le, MO_LE, , uint64_t, uint32_t, ldl, ldul)
183
+DO_LD_PRIM_2(ld1sds, le, MO_LE, , uint64_t, int32_t, ldl, ldul)
184
+
185
+DO_LD_PRIM_2(ld1dd, le, MO_LE, , uint64_t, uint64_t, ldq, ldq)
186
+
187
+DO_LD_PRIM_2(ld1hh, be, MO_BE, H1_2, uint16_t, uint16_t, lduw, lduw)
188
+DO_LD_PRIM_2(ld1hsu, be, MO_BE, H1_4, uint32_t, uint16_t, lduw, lduw)
189
+DO_LD_PRIM_2(ld1hss, be, MO_BE, H1_4, uint32_t, int16_t, lduw, lduw)
190
+DO_LD_PRIM_2(ld1hdu, be, MO_BE, , uint64_t, uint16_t, lduw, lduw)
191
+DO_LD_PRIM_2(ld1hds, be, MO_BE, , uint64_t, int16_t, lduw, lduw)
192
+
193
+DO_LD_PRIM_2(ld1ss, be, MO_BE, H1_4, uint32_t, uint32_t, ldl, ldul)
194
+DO_LD_PRIM_2(ld1sdu, be, MO_BE, , uint64_t, uint32_t, ldl, ldul)
195
+DO_LD_PRIM_2(ld1sds, be, MO_BE, , uint64_t, int32_t, ldl, ldul)
196
+
197
+DO_LD_PRIM_2(ld1dd, be, MO_BE, , uint64_t, uint64_t, ldq, ldq)
198
+
199
+#undef DO_LD_TLB
200
+#undef DO_LD_HOST
201
+#undef DO_LD_PRIM_1
202
+#undef DO_LD_PRIM_2
203
+
204
+/*
205
+ * Skip through a sequence of inactive elements in the guarding predicate @vg,
206
+ * beginning at @reg_off bounded by @reg_max. Return the offset of the active
207
+ * element >= @reg_off, or @reg_max if there were no active elements at all.
208
+ */
209
+static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off,
210
+ intptr_t reg_max, int esz)
211
+{
212
+ uint64_t pg_mask = pred_esz_masks[esz];
213
+ uint64_t pg = (vg[reg_off >> 6] & pg_mask) >> (reg_off & 63);
214
+
215
+ /* In normal usage, the first element is active. */
216
+ if (likely(pg & 1)) {
217
+ return reg_off;
218
+ }
64
+ }
219
+
220
+ if (pg == 0) {
221
+ reg_off &= -64;
222
+ do {
223
+ reg_off += 64;
224
+ if (unlikely(reg_off >= reg_max)) {
225
+ /* The entire predicate was false. */
226
+ return reg_max;
227
+ }
228
+ pg = vg[reg_off >> 6] & pg_mask;
229
+ } while (pg == 0);
230
+ }
231
+ reg_off += ctz64(pg);
232
+
233
+ /* We should never see an out of range predicate bit set. */
234
+ tcg_debug_assert(reg_off < reg_max);
235
+ return reg_off;
236
+}
65
+}
237
+
66
+
238
+/*
67
static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
239
+ * Return the maximum offset <= @mem_max which is still within the page
68
{
240
+ * referenced by @base + @mem_off.
69
long offset = neon_element_offset(reg, ele, size);
241
+ */
70
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
242
+static intptr_t max_for_page(target_ulong base, intptr_t mem_off,
71
int stride;
243
+ intptr_t mem_max)
72
int size;
244
+{
73
int reg;
245
+ target_ulong addr = base + mem_off;
74
- int pass;
246
+ intptr_t split = -(intptr_t)(addr | TARGET_PAGE_MASK);
75
int load;
247
+ return MIN(split, mem_max - mem_off) + mem_off;
76
- int shift;
248
+}
77
int n;
249
+
78
int vec_size;
250
+static inline void set_helper_retaddr(uintptr_t ra)
79
int mmu_idx;
251
+{
80
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
252
+#ifdef CONFIG_USER_ONLY
81
} else {
253
+ helper_retaddr = ra;
82
/* Single element. */
254
+#endif
83
int idx = (insn >> 4) & 0xf;
255
+}
84
- pass = (insn >> 7) & 1;
256
+
85
+ int reg_idx;
257
+/*
86
switch (size) {
258
+ * The result of tlb_vaddr_to_host for user-only is just g2h(x),
87
case 0:
259
+ * which is always non-null. Elide the useless test.
88
- shift = ((insn >> 5) & 3) * 8;
260
+ */
89
+ reg_idx = (insn >> 5) & 7;
261
+static inline bool test_host_page(void *host)
90
stride = 1;
262
+{
91
break;
263
+#ifdef CONFIG_USER_ONLY
92
case 1:
264
+ return true;
93
- shift = ((insn >> 6) & 1) * 16;
265
+#else
94
+ reg_idx = (insn >> 6) & 3;
266
+ return likely(host != NULL);
95
stride = (insn & (1 << 5)) ? 2 : 1;
267
+#endif
96
break;
268
+}
97
case 2:
269
+
98
- shift = 0;
270
+/*
99
+ reg_idx = (insn >> 7) & 1;
271
+ * Common helper for all contiguous one-register predicated loads.
100
stride = (insn & (1 << 6)) ? 2 : 1;
272
+ */
101
break;
273
+static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
102
default:
274
+ uint32_t desc, const uintptr_t retaddr,
103
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
275
+ const int esz, const int msz,
104
*/
276
+ sve_ld1_host_fn *host_fn,
105
return 1;
277
+ sve_ld1_tlb_fn *tlb_fn)
106
}
278
+{
107
+ tmp = tcg_temp_new_i32();
279
+ void *vd = &env->vfp.zregs[simd_data(desc)];
108
addr = tcg_temp_new_i32();
280
+ const int diffsz = esz - msz;
109
load_reg_var(s, addr, rn);
281
+ const intptr_t reg_max = simd_oprsz(desc);
110
for (reg = 0; reg < nregs; reg++) {
282
+ const intptr_t mem_max = reg_max >> diffsz;
111
if (load) {
283
+ const int mmu_idx = cpu_mmu_index(env, false);
112
- tmp = tcg_temp_new_i32();
284
+ ARMVectorReg scratch;
113
- switch (size) {
285
+ void *host;
114
- case 0:
286
+ intptr_t split, reg_off, mem_off;
115
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
287
+
116
- break;
288
+ /* Find the first active element. */
117
- case 1:
289
+ reg_off = find_next_active(vg, 0, reg_max, esz);
118
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
290
+ if (unlikely(reg_off == reg_max)) {
119
- break;
291
+ /* The entire predicate was false; no load occurs. */
120
- case 2:
292
+ memset(vd, 0, reg_max);
121
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
293
+ return;
122
- break;
294
+ }
123
- default: /* Avoid compiler warnings. */
295
+ mem_off = reg_off >> diffsz;
124
- abort();
296
+ set_helper_retaddr(retaddr);
125
- }
297
+
126
- if (size != 2) {
298
+ /*
127
- tmp2 = neon_load_reg(rd, pass);
299
+ * If the (remaining) load is entirely within a single page, then:
128
- tcg_gen_deposit_i32(tmp, tmp2, tmp,
300
+ * For softmmu, and the tlb hits, then no faults will occur;
129
- shift, size ? 16 : 8);
301
+ * For user-only, either the first load will fault or none will.
130
- tcg_temp_free_i32(tmp2);
302
+ * We can thus perform the load directly to the destination and
131
- }
303
+ * Vd will be unmodified on any exception path.
132
- neon_store_reg(rd, pass, tmp);
304
+ */
133
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
305
+ split = max_for_page(addr, mem_off, mem_max);
134
+ s->be_data | size);
306
+ if (likely(split == mem_max)) {
135
+ neon_store_element(rd, reg_idx, size, tmp);
307
+ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
136
} else { /* Store */
308
+ if (test_host_page(host)) {
137
- tmp = neon_load_reg(rd, pass);
309
+ mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
138
- if (shift)
310
+ tcg_debug_assert(mem_off == mem_max);
139
- tcg_gen_shri_i32(tmp, tmp, shift);
311
+ set_helper_retaddr(0);
140
- switch (size) {
312
+ /* After having taken any fault, zero leading inactive elements. */
141
- case 0:
313
+ swap_memzero(vd, reg_off);
142
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
314
+ return;
143
- break;
315
+ }
144
- case 1:
316
+ }
145
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
317
+
146
- break;
318
+ /*
147
- case 2:
319
+ * Perform the predicated read into a temporary, thus ensuring
148
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
320
+ * if the load of the last element faults, Vd is not modified.
149
- break;
321
+ */
150
- }
322
+#ifdef CONFIG_USER_ONLY
151
- tcg_temp_free_i32(tmp);
323
+ swap_memzero(&scratch, reg_off);
152
+ neon_load_element(tmp, rd, reg_idx, size);
324
+ host_fn(&scratch, vg, g2h(addr), mem_off, mem_max);
153
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
325
+#else
154
+ s->be_data | size);
326
+ memset(&scratch, 0, reg_max);
155
}
327
+ goto start;
156
rd += stride;
328
+ while (1) {
157
tcg_gen_addi_i32(addr, addr, 1 << size);
329
+ reg_off = find_next_active(vg, reg_off, reg_max, esz);
158
}
330
+ if (reg_off >= reg_max) {
159
tcg_temp_free_i32(addr);
331
+ break;
160
+ tcg_temp_free_i32(tmp);
332
+ }
161
stride = nregs * (1 << size);
333
+ mem_off = reg_off >> diffsz;
162
}
334
+ split = max_for_page(addr, mem_off, mem_max);
335
+
336
+ start:
337
+ if (split - mem_off >= (1 << msz)) {
338
+ /* At least one whole element on this page. */
339
+ host = tlb_vaddr_to_host(env, addr + mem_off,
340
+ MMU_DATA_LOAD, mmu_idx);
341
+ if (host) {
342
+ mem_off = host_fn(&scratch, vg, host - mem_off,
343
+ mem_off, split);
344
+ reg_off = mem_off << diffsz;
345
+ continue;
346
+ }
347
+ }
348
+
349
+ /*
350
+ * Perform one normal read. This may fault, longjmping out to the
351
+ * main loop in order to raise an exception. It may succeed, and
352
+ * as a side-effect load the TLB entry for the next round. Finally,
353
+ * in the extremely unlikely case we're performing this operation
354
+ * on I/O memory, it may succeed but not bring in the TLB entry.
355
+ * But even then we have still made forward progress.
356
+ */
357
+ tlb_fn(env, &scratch, reg_off, addr + mem_off, mmu_idx, retaddr);
358
+ reg_off += 1 << esz;
359
+ }
360
+#endif
361
+
362
+ set_helper_retaddr(0);
363
+ memcpy(vd, &scratch, reg_max);
364
+}
365
+
366
+#define DO_LD1_1(NAME, ESZ) \
367
+void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
368
+ target_ulong addr, uint32_t desc) \
369
+{ \
370
+ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \
371
+ sve_##NAME##_host, sve_##NAME##_tlb); \
372
+}
373
+
374
+/* TODO: Propagate the endian check back to the translator. */
375
+#define DO_LD1_2(NAME, ESZ, MSZ) \
376
+void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
377
+ target_ulong addr, uint32_t desc) \
378
+{ \
379
+ if (arm_cpu_data_is_big_endian(env)) { \
380
+ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
381
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
382
+ } else { \
383
+ sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
384
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
385
+ } \
386
+}
387
+
388
+DO_LD1_1(ld1bb, 0)
389
+DO_LD1_1(ld1bhu, 1)
390
+DO_LD1_1(ld1bhs, 1)
391
+DO_LD1_1(ld1bsu, 2)
392
+DO_LD1_1(ld1bss, 2)
393
+DO_LD1_1(ld1bdu, 3)
394
+DO_LD1_1(ld1bds, 3)
395
+
396
+DO_LD1_2(ld1hh, 1, 1)
397
+DO_LD1_2(ld1hsu, 2, 1)
398
+DO_LD1_2(ld1hss, 2, 1)
399
+DO_LD1_2(ld1hdu, 3, 1)
400
+DO_LD1_2(ld1hds, 3, 1)
401
+
402
+DO_LD1_2(ld1ss, 2, 2)
403
+DO_LD1_2(ld1sdu, 3, 2)
404
+DO_LD1_2(ld1sds, 3, 2)
405
+
406
+DO_LD1_2(ld1dd, 3, 3)
407
+
408
+#undef DO_LD1_1
409
+#undef DO_LD1_2
410
+
411
#define DO_LD2(NAME, FN, TYPEE, TYPEM, H) \
412
void HELPER(NAME)(CPUARMState *env, void *vg, \
413
target_ulong addr, uint32_t desc) \
414
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(CPUARMState *env, void *vg, \
415
} \
416
}
417
418
-DO_LD1(sve_ld1bhu_r, cpu_ldub_data_ra, uint16_t, uint8_t, H1_2)
419
-DO_LD1(sve_ld1bhs_r, cpu_ldsb_data_ra, uint16_t, int8_t, H1_2)
420
-DO_LD1(sve_ld1bsu_r, cpu_ldub_data_ra, uint32_t, uint8_t, H1_4)
421
-DO_LD1(sve_ld1bss_r, cpu_ldsb_data_ra, uint32_t, int8_t, H1_4)
422
-DO_LD1(sve_ld1bdu_r, cpu_ldub_data_ra, uint64_t, uint8_t, )
423
-DO_LD1(sve_ld1bds_r, cpu_ldsb_data_ra, uint64_t, int8_t, )
424
-
425
-DO_LD1(sve_ld1hsu_r, cpu_lduw_data_ra, uint32_t, uint16_t, H1_4)
426
-DO_LD1(sve_ld1hss_r, cpu_ldsw_data_ra, uint32_t, int16_t, H1_4)
427
-DO_LD1(sve_ld1hdu_r, cpu_lduw_data_ra, uint64_t, uint16_t, )
428
-DO_LD1(sve_ld1hds_r, cpu_ldsw_data_ra, uint64_t, int16_t, )
429
-
430
-DO_LD1(sve_ld1sdu_r, cpu_ldl_data_ra, uint64_t, uint32_t, )
431
-DO_LD1(sve_ld1sds_r, cpu_ldl_data_ra, uint64_t, int32_t, )
432
-
433
-DO_LD1(sve_ld1bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
434
DO_LD2(sve_ld2bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
435
DO_LD3(sve_ld3bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
436
DO_LD4(sve_ld4bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
437
438
-DO_LD1(sve_ld1hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
439
DO_LD2(sve_ld2hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
440
DO_LD3(sve_ld3hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
441
DO_LD4(sve_ld4hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
442
443
-DO_LD1(sve_ld1ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
444
DO_LD2(sve_ld2ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
445
DO_LD3(sve_ld3ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
446
DO_LD4(sve_ld4ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
447
448
-DO_LD1(sve_ld1dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
449
DO_LD2(sve_ld2dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
450
DO_LD3(sve_ld3dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
451
DO_LD4(sve_ld4dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
452
453
-#undef DO_LD1
454
#undef DO_LD2
455
#undef DO_LD3
456
#undef DO_LD4
457
458
/*
459
* Load contiguous data, first-fault and no-fault.
460
+ *
461
+ * For user-only, one could argue that we should hold the mmap_lock during
462
+ * the operation so that there is no race between page_check_range and the
463
+ * load operation. However, unmapping pages out from under a running thread
464
+ * is extraordinarily unlikely. This theoretical race condition also affects
465
+ * linux-user/ in its get_user/put_user macros.
466
+ *
467
+ * TODO: Construct some helpers, written in assembly, that interact with
468
+ * handle_cpu_signal to produce memory ops which can properly report errors
469
+ * without racing.
470
*/
471
472
-#ifdef CONFIG_USER_ONLY
473
-
474
/* Fault on byte I. All bits in FFR from I are cleared. The vector
475
* result from I is CONSTRAINED UNPREDICTABLE; we choose the MERGE
476
* option, which leaves subsequent data unchanged.
477
@@ -XXX,XX +XXX,XX @@ static void record_fault(CPUARMState *env, uintptr_t i, uintptr_t oprsz)
478
}
163
}
479
}
480
481
-/* Hold the mmap lock during the operation so that there is no race
482
- * between page_check_range and the load operation. We expect the
483
- * usual case to have no faults at all, so we check the whole range
484
- * first and if successful defer to the normal load operation.
485
- *
486
- * TODO: Change mmap_lock to a rwlock so that multiple readers
487
- * can run simultaneously. This will probably help other uses
488
- * within QEMU as well.
489
+/*
490
+ * Common helper for all contiguous first-fault loads.
491
*/
492
-#define DO_LDFF1(PART, FN, TYPEE, TYPEM, H) \
493
-static void do_sve_ldff1##PART(CPUARMState *env, void *vd, void *vg, \
494
- target_ulong addr, intptr_t oprsz, \
495
- bool first, uintptr_t ra) \
496
-{ \
497
- intptr_t i = 0; \
498
- do { \
499
- uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
500
- do { \
501
- TYPEM m = 0; \
502
- if (pg & 1) { \
503
- if (!first && \
504
- unlikely(page_check_range(addr, sizeof(TYPEM), \
505
- PAGE_READ))) { \
506
- record_fault(env, i, oprsz); \
507
- return; \
508
- } \
509
- m = FN(env, addr, ra); \
510
- first = false; \
511
- } \
512
- *(TYPEE *)(vd + H(i)) = m; \
513
- i += sizeof(TYPEE), pg >>= sizeof(TYPEE); \
514
- addr += sizeof(TYPEM); \
515
- } while (i & 15); \
516
- } while (i < oprsz); \
517
-} \
518
-void HELPER(sve_ldff1##PART)(CPUARMState *env, void *vg, \
519
- target_ulong addr, uint32_t desc) \
520
-{ \
521
- intptr_t oprsz = simd_oprsz(desc); \
522
- unsigned rd = simd_data(desc); \
523
- void *vd = &env->vfp.zregs[rd]; \
524
- mmap_lock(); \
525
- if (likely(page_check_range(addr, oprsz, PAGE_READ) == 0)) { \
526
- do_sve_ld1##PART(env, vd, vg, addr, oprsz, GETPC()); \
527
- } else { \
528
- do_sve_ldff1##PART(env, vd, vg, addr, oprsz, true, GETPC()); \
529
- } \
530
- mmap_unlock(); \
531
-}
532
+static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
533
+ uint32_t desc, const uintptr_t retaddr,
534
+ const int esz, const int msz,
535
+ sve_ld1_host_fn *host_fn,
536
+ sve_ld1_tlb_fn *tlb_fn)
537
+{
538
+ void *vd = &env->vfp.zregs[simd_data(desc)];
539
+ const int diffsz = esz - msz;
540
+ const intptr_t reg_max = simd_oprsz(desc);
541
+ const intptr_t mem_max = reg_max >> diffsz;
542
+ const int mmu_idx = cpu_mmu_index(env, false);
543
+ intptr_t split, reg_off, mem_off;
544
+ void *host;
545
546
-/* No-fault loads are like first-fault loads without the
547
- * first faulting special case.
548
- */
549
-#define DO_LDNF1(PART) \
550
-void HELPER(sve_ldnf1##PART)(CPUARMState *env, void *vg, \
551
- target_ulong addr, uint32_t desc) \
552
-{ \
553
- intptr_t oprsz = simd_oprsz(desc); \
554
- unsigned rd = simd_data(desc); \
555
- void *vd = &env->vfp.zregs[rd]; \
556
- mmap_lock(); \
557
- if (likely(page_check_range(addr, oprsz, PAGE_READ) == 0)) { \
558
- do_sve_ld1##PART(env, vd, vg, addr, oprsz, GETPC()); \
559
- } else { \
560
- do_sve_ldff1##PART(env, vd, vg, addr, oprsz, false, GETPC()); \
561
- } \
562
- mmap_unlock(); \
563
-}
564
+ /* Skip to the first active element. */
565
+ reg_off = find_next_active(vg, 0, reg_max, esz);
566
+ if (unlikely(reg_off == reg_max)) {
567
+ /* The entire predicate was false; no load occurs. */
568
+ memset(vd, 0, reg_max);
569
+ return;
570
+ }
571
+ mem_off = reg_off >> diffsz;
572
+ set_helper_retaddr(retaddr);
573
574
+ /*
575
+ * If the (remaining) load is entirely within a single page, then:
576
+ * For softmmu, and the tlb hits, then no faults will occur;
577
+ * For user-only, either the first load will fault or none will.
578
+ * We can thus perform the load directly to the destination and
579
+ * Vd will be unmodified on any exception path.
580
+ */
581
+ split = max_for_page(addr, mem_off, mem_max);
582
+ if (likely(split == mem_max)) {
583
+ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
584
+ if (test_host_page(host)) {
585
+ mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
586
+ tcg_debug_assert(mem_off == mem_max);
587
+ set_helper_retaddr(0);
588
+ /* After any fault, zero any leading inactive elements. */
589
+ swap_memzero(vd, reg_off);
590
+ return;
591
+ }
592
+ }
593
+
594
+#ifdef CONFIG_USER_ONLY
595
+ /*
596
+ * The page(s) containing this first element at ADDR+MEM_OFF must
597
+ * be valid. Considering that this first element may be misaligned
598
+ * and cross a page boundary itself, take the rest of the page from
599
+ * the last byte of the element.
600
+ */
601
+ split = max_for_page(addr, mem_off + (1 << msz) - 1, mem_max);
602
+ mem_off = host_fn(vd, vg, g2h(addr), mem_off, split);
603
+
604
+ /* After any fault, zero any leading inactive elements. */
605
+ swap_memzero(vd, reg_off);
606
+ reg_off = mem_off << diffsz;
607
#else
608
+ /*
609
+ * Perform one normal read, which will fault or not.
610
+ * But it is likely to bring the page into the tlb.
611
+ */
612
+ tlb_fn(env, vd, reg_off, addr + mem_off, mmu_idx, retaddr);
613
614
-/* TODO: System mode is not yet supported.
615
- * This would probably use tlb_vaddr_to_host.
616
- */
617
-#define DO_LDFF1(PART, FN, TYPEE, TYPEM, H) \
618
-void HELPER(sve_ldff1##PART)(CPUARMState *env, void *vg, \
619
- target_ulong addr, uint32_t desc) \
620
-{ \
621
- g_assert_not_reached(); \
622
-}
623
-
624
-#define DO_LDNF1(PART) \
625
-void HELPER(sve_ldnf1##PART)(CPUARMState *env, void *vg, \
626
- target_ulong addr, uint32_t desc) \
627
-{ \
628
- g_assert_not_reached(); \
629
-}
630
+ /* After any fault, zero any leading predicated false elts. */
631
+ swap_memzero(vd, reg_off);
632
+ mem_off += 1 << msz;
633
+ reg_off += 1 << esz;
634
635
+ /* Try again to read the balance of the page. */
636
+ split = max_for_page(addr, mem_off - 1, mem_max);
637
+ if (split >= (1 << msz)) {
638
+ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
639
+ if (host) {
640
+ mem_off = host_fn(vd, vg, host - mem_off, mem_off, split);
641
+ reg_off = mem_off << diffsz;
642
+ }
643
+ }
644
#endif
645
646
-DO_LDFF1(bb_r, cpu_ldub_data_ra, uint8_t, uint8_t, H1)
647
-DO_LDFF1(bhu_r, cpu_ldub_data_ra, uint16_t, uint8_t, H1_2)
648
-DO_LDFF1(bhs_r, cpu_ldsb_data_ra, uint16_t, int8_t, H1_2)
649
-DO_LDFF1(bsu_r, cpu_ldub_data_ra, uint32_t, uint8_t, H1_4)
650
-DO_LDFF1(bss_r, cpu_ldsb_data_ra, uint32_t, int8_t, H1_4)
651
-DO_LDFF1(bdu_r, cpu_ldub_data_ra, uint64_t, uint8_t, )
652
-DO_LDFF1(bds_r, cpu_ldsb_data_ra, uint64_t, int8_t, )
653
+ set_helper_retaddr(0);
654
+ record_fault(env, reg_off, reg_max);
655
+}
656
657
-DO_LDFF1(hh_r, cpu_lduw_data_ra, uint16_t, uint16_t, H1_2)
658
-DO_LDFF1(hsu_r, cpu_lduw_data_ra, uint32_t, uint16_t, H1_4)
659
-DO_LDFF1(hss_r, cpu_ldsw_data_ra, uint32_t, int8_t, H1_4)
660
-DO_LDFF1(hdu_r, cpu_lduw_data_ra, uint64_t, uint16_t, )
661
-DO_LDFF1(hds_r, cpu_ldsw_data_ra, uint64_t, int16_t, )
662
+/*
663
+ * Common helper for all contiguous no-fault loads.
664
+ */
665
+static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
666
+ uint32_t desc, const int esz, const int msz,
667
+ sve_ld1_host_fn *host_fn)
668
+{
669
+ void *vd = &env->vfp.zregs[simd_data(desc)];
670
+ const int diffsz = esz - msz;
671
+ const intptr_t reg_max = simd_oprsz(desc);
672
+ const intptr_t mem_max = reg_max >> diffsz;
673
+ const int mmu_idx = cpu_mmu_index(env, false);
674
+ intptr_t split, reg_off, mem_off;
675
+ void *host;
676
677
-DO_LDFF1(ss_r, cpu_ldl_data_ra, uint32_t, uint32_t, H1_4)
678
-DO_LDFF1(sdu_r, cpu_ldl_data_ra, uint64_t, uint32_t, )
679
-DO_LDFF1(sds_r, cpu_ldl_data_ra, uint64_t, int32_t, )
680
+#ifdef CONFIG_USER_ONLY
681
+ host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, mmu_idx);
682
+ if (likely(page_check_range(addr, mem_max, PAGE_READ) == 0)) {
683
+ /* The entire operation is valid and will not fault. */
684
+ host_fn(vd, vg, host, 0, mem_max);
685
+ return;
686
+ }
687
+#endif
688
689
-DO_LDFF1(dd_r, cpu_ldq_data_ra, uint64_t, uint64_t, )
690
+ /* There will be no fault, so we may modify in advance. */
691
+ memset(vd, 0, reg_max);
692
693
-#undef DO_LDFF1
694
+ /* Skip to the first active element. */
695
+ reg_off = find_next_active(vg, 0, reg_max, esz);
696
+ if (unlikely(reg_off == reg_max)) {
697
+ /* The entire predicate was false; no load occurs. */
698
+ return;
699
+ }
700
+ mem_off = reg_off >> diffsz;
701
702
-DO_LDNF1(bb_r)
703
-DO_LDNF1(bhu_r)
704
-DO_LDNF1(bhs_r)
705
-DO_LDNF1(bsu_r)
706
-DO_LDNF1(bss_r)
707
-DO_LDNF1(bdu_r)
708
-DO_LDNF1(bds_r)
709
+#ifdef CONFIG_USER_ONLY
710
+ if (page_check_range(addr + mem_off, 1 << msz, PAGE_READ) == 0) {
711
+ /* At least one load is valid; take the rest of the page. */
712
+ split = max_for_page(addr, mem_off + (1 << msz) - 1, mem_max);
713
+ mem_off = host_fn(vd, vg, host, mem_off, split);
714
+ reg_off = mem_off << diffsz;
715
+ }
716
+#else
717
+ /*
718
+ * If the address is not in the TLB, we have no way to bring the
719
+ * entry into the TLB without also risking a fault. Note that
720
+ * the corollary is that we never load from an address not in RAM.
721
+ *
722
+ * This last is out of spec, in a weird corner case.
723
+ * Per the MemNF/MemSingleNF pseudocode, a NF load from Device memory
724
+ * must not actually hit the bus -- it returns UNKNOWN data instead.
725
+ * But if you map non-RAM with Normal memory attributes and do a NF
726
+ * load then it should access the bus. (Nobody ought actually do this
727
+ * in the real world, obviously.)
728
+ *
729
+ * Then there are the annoying special cases with watchpoints...
730
+ *
731
+ * TODO: Add a form of tlb_fill that does not raise an exception,
732
+ * with a form of tlb_vaddr_to_host and a set of loads to match.
733
+ * The non_fault_vaddr_to_host would handle everything, usually,
734
+ * and the loads would handle the iomem path for watchpoints.
735
+ */
736
+ host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
737
+ split = max_for_page(addr, mem_off, mem_max);
738
+ if (host && split >= (1 << msz)) {
739
+ mem_off = host_fn(vd, vg, host - mem_off, mem_off, split);
740
+ reg_off = mem_off << diffsz;
741
+ }
742
+#endif
743
744
-DO_LDNF1(hh_r)
745
-DO_LDNF1(hsu_r)
746
-DO_LDNF1(hss_r)
747
-DO_LDNF1(hdu_r)
748
-DO_LDNF1(hds_r)
749
+ record_fault(env, reg_off, reg_max);
750
+}
751
752
-DO_LDNF1(ss_r)
753
-DO_LDNF1(sdu_r)
754
-DO_LDNF1(sds_r)
755
+#define DO_LDFF1_LDNF1_1(PART, ESZ) \
756
+void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \
757
+ target_ulong addr, uint32_t desc) \
758
+{ \
759
+ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \
760
+ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
761
+} \
762
+void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
763
+ target_ulong addr, uint32_t desc) \
764
+{ \
765
+ sve_ldnf1_r(env, vg, addr, desc, ESZ, 0, sve_ld1##PART##_host); \
766
+}
767
768
-DO_LDNF1(dd_r)
769
+/* TODO: Propagate the endian check back to the translator. */
770
+#define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \
771
+void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \
772
+ target_ulong addr, uint32_t desc) \
773
+{ \
774
+ if (arm_cpu_data_is_big_endian(env)) { \
775
+ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
776
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
777
+ } else { \
778
+ sve_ldff1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \
779
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
780
+ } \
781
+} \
782
+void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
783
+ target_ulong addr, uint32_t desc) \
784
+{ \
785
+ if (arm_cpu_data_is_big_endian(env)) { \
786
+ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, \
787
+ sve_ld1##PART##_be_host); \
788
+ } else { \
789
+ sve_ldnf1_r(env, vg, addr, desc, ESZ, MSZ, \
790
+ sve_ld1##PART##_le_host); \
791
+ } \
792
+}
793
794
-#undef DO_LDNF1
795
+DO_LDFF1_LDNF1_1(bb, 0)
796
+DO_LDFF1_LDNF1_1(bhu, 1)
797
+DO_LDFF1_LDNF1_1(bhs, 1)
798
+DO_LDFF1_LDNF1_1(bsu, 2)
799
+DO_LDFF1_LDNF1_1(bss, 2)
800
+DO_LDFF1_LDNF1_1(bdu, 3)
801
+DO_LDFF1_LDNF1_1(bds, 3)
802
+
803
+DO_LDFF1_LDNF1_2(hh, 1, 1)
804
+DO_LDFF1_LDNF1_2(hsu, 2, 1)
805
+DO_LDFF1_LDNF1_2(hss, 2, 1)
806
+DO_LDFF1_LDNF1_2(hdu, 3, 1)
807
+DO_LDFF1_LDNF1_2(hds, 3, 1)
808
+
809
+DO_LDFF1_LDNF1_2(ss, 2, 2)
810
+DO_LDFF1_LDNF1_2(sdu, 3, 2)
811
+DO_LDFF1_LDNF1_2(sds, 3, 2)
812
+
813
+DO_LDFF1_LDNF1_2(dd, 3, 3)
814
+
815
+#undef DO_LDFF1_LDNF1_1
816
+#undef DO_LDFF1_LDNF1_2
817
818
/*
819
* Store contiguous data, protected by a governing predicate.
820
--
164
--
821
2.19.0
165
2.19.1
822
166
823
167
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Announce the availability of the various priority queues.
4
This fixes an issue where guest kernels would miss to
5
configure secondary queues due to inproper feature bits.
6
7
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Message-id: 20181017213932.19973-2-edgar.iglesias@gmail.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/net/cadence_gem.c | 8 +++++++-
13
1 file changed, 7 insertions(+), 1 deletion(-)
14
15
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/net/cadence_gem.c
18
+++ b/hw/net/cadence_gem.c
19
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
20
int i;
21
CadenceGEMState *s = CADENCE_GEM(d);
22
const uint8_t *a;
23
+ uint32_t queues_mask = 0;
24
25
DB_PRINT("\n");
26
27
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
28
s->regs[GEM_DESCONF] = 0x02500111;
29
s->regs[GEM_DESCONF2] = 0x2ab13fff;
30
s->regs[GEM_DESCONF5] = 0x002f2045;
31
- s->regs[GEM_DESCONF6] = 0x00000200;
32
+ s->regs[GEM_DESCONF6] = 0x0;
33
+
34
+ if (s->num_priority_queues > 1) {
35
+ queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
36
+ s->regs[GEM_DESCONF6] |= queues_mask;
37
+ }
38
39
/* Set MAC address */
40
a = &s->conf.macaddr.a[0];
41
--
42
2.19.1
43
44
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Announce 64bit addressing support.
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Message-id: 20181017213932.19973-3-edgar.iglesias@gmail.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
hw/net/cadence_gem.c | 3 ++-
12
1 file changed, 2 insertions(+), 1 deletion(-)
13
14
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/net/cadence_gem.c
17
+++ b/hw/net/cadence_gem.c
18
@@ -XXX,XX +XXX,XX @@
19
#define GEM_DESCONF4 (0x0000028C/4)
20
#define GEM_DESCONF5 (0x00000290/4)
21
#define GEM_DESCONF6 (0x00000294/4)
22
+#define GEM_DESCONF6_64B_MASK (1U << 23)
23
#define GEM_DESCONF7 (0x00000298/4)
24
25
#define GEM_INT_Q1_STATUS (0x00000400 / 4)
26
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
27
s->regs[GEM_DESCONF] = 0x02500111;
28
s->regs[GEM_DESCONF2] = 0x2ab13fff;
29
s->regs[GEM_DESCONF5] = 0x002f2045;
30
- s->regs[GEM_DESCONF6] = 0x0;
31
+ s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
32
33
if (s->num_priority_queues > 1) {
34
queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
35
--
36
2.19.1
37
38
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We are going to want to determine whether sve is enabled
3
The EL3 version of this register does not include an ASID,
4
for EL other than current.
4
and so the tlb_flush performed by vmsa_ttbr_write is not needed.
5
5
6
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
6
Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181019015617.22583-2-richard.henderson@linaro.org
9
Message-id: 20181005175350.30752-4-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/helper.c | 21 +++++++++------------
12
target/arm/helper.c | 2 +-
13
1 file changed, 9 insertions(+), 12 deletions(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
19
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
20
* take care of raising that exception.
20
.fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
21
* C.f. the ARM pseudocode function CheckSVEEnabled.
21
{ .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
22
*/
22
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
23
-static int sve_exception_el(CPUARMState *env)
23
- .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
24
+static int sve_exception_el(CPUARMState *env, int el)
24
+ .access = PL3_RW, .resetvalue = 0,
25
{
25
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
26
#ifndef CONFIG_USER_ONLY
26
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
27
- unsigned current_el = arm_current_el(env);
27
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
28
-
29
- if (current_el <= 1) {
30
+ if (el <= 1) {
31
bool disabled = false;
32
33
/* The CPACR.ZEN controls traps to EL1:
34
@@ -XXX,XX +XXX,XX @@ static int sve_exception_el(CPUARMState *env)
35
if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
36
disabled = true;
37
} else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
38
- disabled = current_el == 0;
39
+ disabled = el == 0;
40
}
41
if (disabled) {
42
/* route_to_el2 */
43
@@ -XXX,XX +XXX,XX @@ static int sve_exception_el(CPUARMState *env)
44
if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
45
disabled = true;
46
} else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
47
- disabled = current_el == 0;
48
+ disabled = el == 0;
49
}
50
if (disabled) {
51
return 0;
52
@@ -XXX,XX +XXX,XX @@ static int sve_exception_el(CPUARMState *env)
53
/* CPTR_EL2. Since TZ and TFP are positive,
54
* they will be zero when EL2 is not present.
55
*/
56
- if (current_el <= 2 && !arm_is_secure_below_el3(env)) {
57
+ if (el <= 2 && !arm_is_secure_below_el3(env)) {
58
if (env->cp15.cptr_el[2] & CPTR_TZ) {
59
return 2;
60
}
61
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
62
/* Return the exception level to which FP-disabled exceptions should
63
* be taken, or 0 if FP is enabled.
64
*/
65
-static inline int fp_exception_el(CPUARMState *env)
66
+static int fp_exception_el(CPUARMState *env, int cur_el)
67
{
68
#ifndef CONFIG_USER_ONLY
69
int fpen;
70
- int cur_el = arm_current_el(env);
71
72
/* CPACR and the CPTR registers don't exist before v6, so FP is
73
* always accessible
74
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
75
target_ulong *cs_base, uint32_t *pflags)
76
{
77
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
78
- int fp_el = fp_exception_el(env);
79
+ int current_el = arm_current_el(env);
80
+ int fp_el = fp_exception_el(env, current_el);
81
uint32_t flags;
82
83
if (is_a64(env)) {
84
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
85
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
86
87
if (arm_feature(env, ARM_FEATURE_SVE)) {
88
- int sve_el = sve_exception_el(env);
89
+ int sve_el = sve_exception_el(env, current_el);
90
uint32_t zcr_len;
91
92
/* If SVE is disabled, but FP is enabled,
93
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
94
if (sve_el != 0 && fp_el == 0) {
95
zcr_len = 0;
96
} else {
97
- int current_el = arm_current_el(env);
98
ARMCPU *cpu = arm_env_get_cpu(env);
99
100
zcr_len = cpu->sve_max_vq - 1;
101
--
28
--
102
2.19.0
29
2.19.1
103
30
104
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Check for EL3 before testing CPTR_EL3.EZ. Return 0 when the exception
3
Since QEMU does not implement ASIDs, changes to the ASID must flush the
4
should be routed via AdvSIMDFPAccessTrap. Mirror the structure of
4
tlb. However, if the ASID does not change there is no reason to flush.
5
CheckSVEEnabled more closely.
6
5
7
Fixes: 5be5e8eda78
6
In testing a boot of the Ubuntu installer to the first menu, this reduces
7
the number of flushes by 30%, or nearly 600k instances.
8
9
Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20181019015617.22583-3-richard.henderson@linaro.org
11
Message-id: 20181005175350.30752-3-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
15
---
14
target/arm/helper.c | 96 ++++++++++++++++++++++-----------------------
16
target/arm/helper.c | 8 +++-----
15
1 file changed, 46 insertions(+), 50 deletions(-)
17
1 file changed, 3 insertions(+), 5 deletions(-)
16
18
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.c
21
--- a/target/arm/helper.c
20
+++ b/target/arm/helper.c
22
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
23
@@ -XXX,XX +XXX,XX @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
22
REGINFO_SENTINEL
24
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
23
};
25
uint64_t value)
24
25
-/* Return the exception level to which SVE-disabled exceptions should
26
- * be taken, or 0 if SVE is enabled.
27
+/* Return the exception level to which exceptions should be taken
28
+ * via SVEAccessTrap. If an exception should be routed through
29
+ * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
30
+ * take care of raising that exception.
31
+ * C.f. the ARM pseudocode function CheckSVEEnabled.
32
*/
33
static int sve_exception_el(CPUARMState *env)
34
{
26
{
35
#ifndef CONFIG_USER_ONLY
27
- /* 64 bit accesses to the TTBRs can change the ASID and so we
36
unsigned current_el = arm_current_el(env);
28
- * must flush the TLB.
37
29
- */
38
- /* The CPACR.ZEN controls traps to EL1:
30
- if (cpreg_field_is_64bit(ri)) {
39
- * 0, 2 : trap EL0 and EL1 accesses
31
+ /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
40
- * 1 : trap only EL0 accesses
32
+ if (cpreg_field_is_64bit(ri) &&
41
- * 3 : trap no accesses
33
+ extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
42
+ if (current_el <= 1) {
34
ARMCPU *cpu = arm_env_get_cpu(env);
43
+ bool disabled = false;
35
-
44
+
36
tlb_flush(CPU(cpu));
45
+ /* The CPACR.ZEN controls traps to EL1:
46
+ * 0, 2 : trap EL0 and EL1 accesses
47
+ * 1 : trap only EL0 accesses
48
+ * 3 : trap no accesses
49
+ */
50
+ if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
51
+ disabled = true;
52
+ } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
53
+ disabled = current_el == 0;
54
+ }
55
+ if (disabled) {
56
+ /* route_to_el2 */
57
+ return (arm_feature(env, ARM_FEATURE_EL2)
58
+ && !arm_is_secure(env)
59
+ && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
60
+ }
61
+
62
+ /* Check CPACR.FPEN. */
63
+ if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
64
+ disabled = true;
65
+ } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
66
+ disabled = current_el == 0;
67
+ }
68
+ if (disabled) {
69
+ return 0;
70
+ }
71
+ }
72
+
73
+ /* CPTR_EL2. Since TZ and TFP are positive,
74
+ * they will be zero when EL2 is not present.
75
*/
76
- switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
77
- default:
78
- if (current_el <= 1) {
79
- /* Trap to PL1, which might be EL1 or EL3 */
80
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
81
- return 3;
82
- }
83
- return 1;
84
+ if (current_el <= 2 && !arm_is_secure_below_el3(env)) {
85
+ if (env->cp15.cptr_el[2] & CPTR_TZ) {
86
+ return 2;
87
}
88
- break;
89
- case 1:
90
- if (current_el == 0) {
91
- return 1;
92
+ if (env->cp15.cptr_el[2] & CPTR_TFP) {
93
+ return 0;
94
}
95
- break;
96
- case 3:
97
- break;
98
}
37
}
99
38
raw_write(env, ri, value);
100
- /* Similarly for CPACR.FPEN, after having checked ZEN. */
101
- switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
102
- default:
103
- if (current_el <= 1) {
104
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
105
- return 3;
106
- }
107
- return 1;
108
- }
109
- break;
110
- case 1:
111
- if (current_el == 0) {
112
- return 1;
113
- }
114
- break;
115
- case 3:
116
- break;
117
- }
118
-
119
- /* CPTR_EL2. Check both TZ and TFP. */
120
- if (current_el <= 2
121
- && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ))
122
- && !arm_is_secure_below_el3(env)) {
123
- return 2;
124
- }
125
-
126
- /* CPTR_EL3. Check both EZ and TFP. */
127
- if (!(env->cp15.cptr_el[3] & CPTR_EZ)
128
- || (env->cp15.cptr_el[3] & CPTR_TFP)) {
129
+ /* CPTR_EL3. Since EZ is negative we must check for EL3. */
130
+ if (arm_feature(env, ARM_FEATURE_EL3)
131
+ && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
132
return 3;
133
}
134
#endif
135
--
39
--
136
2.19.0
40
2.19.1
137
41
138
42
diff view generated by jsdifflib