1
As promised, another pullreq... This one's mostly RTH's patches.
1
I don't have anything else queued up at the moment, so this is just
2
Richard's SME patches.
2
3
3
thanks
4
-- PMM
4
-- PMM
5
5
6
The following changes since commit 784c2e4f232adf5ef47a84a262ec72a07d068d6a:
6
The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3:
7
7
8
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2018-10-19 15:30:40 +0100)
8
Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530)
9
9
10
are available in the Git repository at:
10
are available in the Git repository at:
11
11
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20181019
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711
13
13
14
for you to fetch changes up to 88c9add25e7120e8622796c81ad3f3fb7f8d40e7:
14
for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8:
15
15
16
target/arm: Only flush tlb if ASID changes (2018-10-19 17:38:48 +0100)
16
linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100)
17
17
18
----------------------------------------------------------------
18
----------------------------------------------------------------
19
target-arm queue:
19
target-arm:
20
* ssi-sd: Make devices picking up backends unavailable with -device
20
* Implement SME emulation, for both system and linux-user
21
* Add support for VCPU event states
22
* Move towards making ID registers the source of truth for
23
whether a guest CPU implements a feature, rather than having
24
parallel ID registers and feature bit flags
25
* Implement various HCR hypervisor trap/config bits
26
* Get IL bit correct for v7 syndrome values
27
* Report correct syndrome for FP/SIMD traps to Hyp mode
28
* hw/arm/boot: Increase compliance with kernel arm64 boot protocol
29
* Refactor A32 Neon to use generic vector infrastructure
30
* Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
31
* net: cadence_gem: Report features correctly in ID register
32
* Avoid some unnecessary TLB flushes on TTBR register writes
33
21
34
----------------------------------------------------------------
22
----------------------------------------------------------------
35
Dongjiu Geng (1):
23
Richard Henderson (45):
36
target/arm: Add support for VCPU event states
24
target/arm: Handle SME in aarch64_cpu_dump_state
25
target/arm: Add infrastructure for disas_sme
26
target/arm: Trap non-streaming usage when Streaming SVE is active
27
target/arm: Mark ADR as non-streaming
28
target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming
29
target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming
30
target/arm: Mark PMULL, FMMLA as non-streaming
31
target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming
32
target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming
33
target/arm: Mark string/histo/crypto as non-streaming
34
target/arm: Mark gather/scatter load/store as non-streaming
35
target/arm: Mark gather prefetch as non-streaming
36
target/arm: Mark LDFF1 and LDNF1 as non-streaming
37
target/arm: Mark LD1RO as non-streaming
38
target/arm: Add SME enablement checks
39
target/arm: Handle SME in sve_access_check
40
target/arm: Implement SME RDSVL, ADDSVL, ADDSPL
41
target/arm: Implement SME ZERO
42
target/arm: Implement SME MOVA
43
target/arm: Implement SME LD1, ST1
44
target/arm: Export unpredicated ld/st from translate-sve.c
45
target/arm: Implement SME LDR, STR
46
target/arm: Implement SME ADDHA, ADDVA
47
target/arm: Implement FMOPA, FMOPS (non-widening)
48
target/arm: Implement BFMOPA, BFMOPS
49
target/arm: Implement FMOPA, FMOPS (widening)
50
target/arm: Implement SME integer outer product
51
target/arm: Implement PSEL
52
target/arm: Implement REVD
53
target/arm: Implement SCLAMP, UCLAMP
54
target/arm: Reset streaming sve state on exception boundaries
55
target/arm: Enable SME for -cpu max
56
linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS
57
linux-user/aarch64: Reset PSTATE.SM on syscalls
58
linux-user/aarch64: Add SM bit to SVE signal context
59
linux-user/aarch64: Tidy target_restore_sigframe error return
60
linux-user/aarch64: Do not allow duplicate or short sve records
61
linux-user/aarch64: Verify extra record lock succeeded
62
linux-user/aarch64: Move sve record checks into restore
63
linux-user/aarch64: Implement SME signal handling
64
linux-user: Rename sve prctls
65
linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL
66
target/arm: Only set ZEN in reset if SVE present
67
target/arm: Enable SME for user-only
68
linux-user/aarch64: Add SME related hwcap entries
37
69
38
Edgar E. Iglesias (2):
70
docs/system/arm/emulation.rst | 4 +
39
net: cadence_gem: Announce availability of priority queues
71
linux-user/aarch64/target_cpu.h | 5 +-
40
net: cadence_gem: Announce 64bit addressing support
72
linux-user/aarch64/target_prctl.h | 62 +-
41
73
target/arm/cpu.h | 7 +
42
Markus Armbruster (1):
74
target/arm/helper-sme.h | 126 ++++
43
ssi-sd: Make devices picking up backends unavailable with -device
75
target/arm/helper-sve.h | 4 +
44
76
target/arm/helper.h | 18 +
45
Peter Maydell (10):
77
target/arm/translate-a64.h | 45 ++
46
target/arm: Improve debug logging of AArch32 exception return
78
target/arm/translate.h | 16 +
47
target/arm: Make switch_mode() file-local
79
target/arm/sme-fa64.decode | 60 ++
48
target/arm: Implement HCR.FB
80
target/arm/sme.decode | 88 +++
49
target/arm: Implement HCR.DC
81
target/arm/sve.decode | 41 +-
50
target/arm: ISR_EL1 bits track virtual interrupts if IMO/FMO set
82
linux-user/aarch64/cpu_loop.c | 9 +
51
target/arm: Implement HCR.VI and VF
83
linux-user/aarch64/signal.c | 243 ++++++--
52
target/arm: Implement HCR.PTW
84
linux-user/elfload.c | 20 +
53
target/arm: New utility function to extract EC from syndrome
85
linux-user/syscall.c | 28 +-
54
target/arm: Get IL bit correct for v7 syndrome values
86
target/arm/cpu.c | 35 +-
55
target/arm: Report correct syndrome for FP/SIMD traps to Hyp mode
87
target/arm/cpu64.c | 11 +
56
88
target/arm/helper.c | 56 +-
57
Richard Henderson (30):
89
target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++
58
target/arm: Move some system registers into a substructure
90
target/arm/sve_helper.c | 28 +
59
target/arm: V8M should not imply V7VE
91
target/arm/translate-a64.c | 103 +++-
60
target/arm: Convert v8 extensions from feature bits to isar tests
92
target/arm/translate-sme.c | 373 ++++++++++++
61
target/arm: Convert division from feature bits to isar0 tests
93
target/arm/translate-sve.c | 393 ++++++++++---
62
target/arm: Convert jazelle from feature bit to isar1 test
94
target/arm/translate-vfp.c | 12 +
63
target/arm: Convert t32ee from feature bit to isar3 test
95
target/arm/translate.c | 2 +
64
target/arm: Convert sve from feature bit to aa64pfr0 test
96
target/arm/vec_helper.c | 24 +
65
target/arm: Convert v8.2-fp16 from feature bit to aa64pfr0 test
97
target/arm/meson.build | 3 +
66
target/arm: Hoist address increment for vector memory ops
98
28 files changed, 2821 insertions(+), 135 deletions(-)
67
target/arm: Don't call tcg_clear_temp_count
99
create mode 100644 target/arm/sme-fa64.decode
68
target/arm: Use tcg_gen_gvec_dup_i64 for LD[1-4]R
100
create mode 100644 target/arm/sme.decode
69
target/arm: Promote consecutive memory ops for aa64
101
create mode 100644 target/arm/translate-sme.c
70
target/arm: Mark some arrays const
71
target/arm: Use gvec for NEON VDUP
72
target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate)
73
target/arm: Use gvec for NEON_3R_LOGIC insns
74
target/arm: Use gvec for NEON_3R_VADD_VSUB insns
75
target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG
76
target/arm: Use gvec for NEON_3R_VMUL
77
target/arm: Use gvec for VSHR, VSHL
78
target/arm: Use gvec for VSRA
79
target/arm: Use gvec for VSRI, VSLI
80
target/arm: Use gvec for NEON_3R_VML
81
target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
82
target/arm: Use gvec for NEON VLD all lanes
83
target/arm: Reorg NEON VLD/VST all elements
84
target/arm: Promote consecutive memory ops for aa32
85
target/arm: Reorg NEON VLD/VST single element to one lane
86
target/arm: Remove writefn from TTBR0_EL3
87
target/arm: Only flush tlb if ASID changes
88
89
Stewart Hildebrand (1):
90
hw/arm/boot: Increase compliance with kernel arm64 boot protocol
91
92
target/arm/cpu.h | 227 ++++++-
93
target/arm/internals.h | 45 +-
94
target/arm/kvm_arm.h | 24 +
95
target/arm/translate.h | 21 +
96
hw/arm/boot.c | 18 +
97
hw/intc/armv7m_nvic.c | 12 +-
98
hw/net/cadence_gem.c | 9 +-
99
hw/sd/ssi-sd.c | 2 +
100
linux-user/aarch64/signal.c | 4 +-
101
linux-user/elfload.c | 60 +-
102
linux-user/syscall.c | 10 +-
103
target/arm/cpu.c | 242 ++++----
104
target/arm/cpu64.c | 148 +++--
105
target/arm/helper.c | 397 ++++++++----
106
target/arm/kvm.c | 60 ++
107
target/arm/kvm32.c | 13 +
108
target/arm/kvm64.c | 15 +-
109
target/arm/machine.c | 28 +-
110
target/arm/op_helper.c | 2 +-
111
target/arm/translate-a64.c | 715 ++++-----------------
112
target/arm/translate.c | 1451 ++++++++++++++++++++++++++++---------------
113
21 files changed, 2021 insertions(+), 1482 deletions(-)
114
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Dump SVCR, plus use the correct access check for Streaming Mode.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20181016223115.24100-7-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-2-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
---
9
target/arm/cpu.h | 6 +++++-
10
target/arm/cpu.c | 17 ++++++++++++++++-
10
linux-user/elfload.c | 2 +-
11
1 file changed, 16 insertions(+), 1 deletion(-)
11
target/arm/cpu.c | 4 ----
12
target/arm/helper.c | 2 +-
13
target/arm/machine.c | 3 +--
14
5 files changed, 8 insertions(+), 9 deletions(-)
15
12
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ enum arm_features {
21
ARM_FEATURE_NEON,
22
ARM_FEATURE_M, /* Microcontroller profile. */
23
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
24
- ARM_FEATURE_THUMB2EE,
25
ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
26
ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
27
ARM_FEATURE_V4T,
28
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_jazelle(const ARMISARegisters *id)
29
return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
30
}
31
32
+static inline bool isar_feature_t32ee(const ARMISARegisters *id)
33
+{
34
+ return FIELD_EX32(id->id_isar3, ID_ISAR3, T32EE) != 0;
35
+}
36
+
37
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
38
{
39
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
40
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/linux-user/elfload.c
43
+++ b/linux-user/elfload.c
44
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
45
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
46
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
47
GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
48
- GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
49
+ GET_FEATURE_ID(t32ee, ARM_HWCAP_ARM_THUMBEE);
50
GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
51
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
52
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
53
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
54
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/cpu.c
15
--- a/target/arm/cpu.c
56
+++ b/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
17
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
58
set_feature(&cpu->env, ARM_FEATURE_V7);
18
int i;
59
set_feature(&cpu->env, ARM_FEATURE_VFP3);
19
int el = arm_current_el(env);
60
set_feature(&cpu->env, ARM_FEATURE_NEON);
20
const char *ns_status;
61
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
21
+ bool sve;
62
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
22
63
set_feature(&cpu->env, ARM_FEATURE_EL3);
23
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
64
cpu->midr = 0x410fc080;
24
for (i = 0; i < 32; i++) {
65
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
66
set_feature(&cpu->env, ARM_FEATURE_VFP3);
26
el,
67
set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
27
psr & PSTATE_SP ? 'h' : 't');
68
set_feature(&cpu->env, ARM_FEATURE_NEON);
28
69
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
29
+ if (cpu_isar_feature(aa64_sme, cpu)) {
70
set_feature(&cpu->env, ARM_FEATURE_EL3);
30
+ qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
71
/* Note that A9 supports the MP extensions even for
31
+ env->svcr,
72
* A9UP and single-core A9MP (which are both different
32
+ (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
73
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
33
+ (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
74
set_feature(&cpu->env, ARM_FEATURE_V7VE);
34
+ }
75
set_feature(&cpu->env, ARM_FEATURE_VFP4);
35
if (cpu_isar_feature(aa64_bti, cpu)) {
76
set_feature(&cpu->env, ARM_FEATURE_NEON);
36
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
77
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
78
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
79
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
80
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
81
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
82
set_feature(&cpu->env, ARM_FEATURE_V7VE);
83
set_feature(&cpu->env, ARM_FEATURE_VFP4);
84
set_feature(&cpu->env, ARM_FEATURE_NEON);
85
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
86
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
87
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
88
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
89
diff --git a/target/arm/helper.c b/target/arm/helper.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/helper.c
92
+++ b/target/arm/helper.c
93
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
94
define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
95
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
96
}
37
}
97
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
38
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
98
+ if (cpu_isar_feature(t32ee, cpu)) {
39
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
99
define_arm_cp_regs(cpu, t2ee_cp_reginfo);
40
vfp_get_fpcr(env), vfp_get_fpsr(env));
100
}
41
101
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
42
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
102
diff --git a/target/arm/machine.c b/target/arm/machine.c
43
+ if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
103
index XXXXXXX..XXXXXXX 100644
44
+ sve = sme_exception_el(env, el) == 0;
104
--- a/target/arm/machine.c
45
+ } else if (cpu_isar_feature(aa64_sve, cpu)) {
105
+++ b/target/arm/machine.c
46
+ sve = sve_exception_el(env, el) == 0;
106
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m = {
47
+ } else {
107
static bool thumb2ee_needed(void *opaque)
48
+ sve = false;
108
{
49
+ }
109
ARMCPU *cpu = opaque;
50
+
110
- CPUARMState *env = &cpu->env;
51
+ if (sve) {
111
52
int j, zcr_len = sve_vqm1_for_el(env, el);
112
- return arm_feature(env, ARM_FEATURE_THUMB2EE);
53
113
+ return cpu_isar_feature(t32ee, cpu);
54
for (i = 0; i <= FFR_PRED_NUM; i++) {
114
}
115
116
static const VMStateDescription vmstate_thumb2ee = {
117
--
55
--
118
2.19.1
56
2.25.1
119
120
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This includes the build rules for the decoder, and the
4
new file for translation, but excludes any instructions.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-4-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-3-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate-a64.c | 28 +++-------------------------
11
target/arm/translate-a64.h | 1 +
9
1 file changed, 3 insertions(+), 25 deletions(-)
12
target/arm/sme.decode | 20 ++++++++++++++++++++
13
target/arm/translate-a64.c | 7 ++++++-
14
target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++
15
target/arm/meson.build | 2 ++
16
5 files changed, 64 insertions(+), 1 deletion(-)
17
create mode 100644 target/arm/sme.decode
18
create mode 100644 target/arm/translate-sme.c
10
19
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate-a64.h
23
+++ b/target/arm/translate-a64.h
24
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
25
}
26
27
bool disas_sve(DisasContext *, uint32_t);
28
+bool disas_sme(DisasContext *, uint32_t);
29
30
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
31
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
32
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
33
new file mode 100644
34
index XXXXXXX..XXXXXXX
35
--- /dev/null
36
+++ b/target/arm/sme.decode
37
@@ -XXX,XX +XXX,XX @@
38
+# AArch64 SME instruction descriptions
39
+#
40
+# Copyright (c) 2022 Linaro, Ltd
41
+#
42
+# This library is free software; you can redistribute it and/or
43
+# modify it under the terms of the GNU Lesser General Public
44
+# License as published by the Free Software Foundation; either
45
+# version 2.1 of the License, or (at your option) any later version.
46
+#
47
+# This library is distributed in the hope that it will be useful,
48
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
49
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
50
+# Lesser General Public License for more details.
51
+#
52
+# You should have received a copy of the GNU Lesser General Public
53
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
54
+
55
+#
56
+# This file is processed by scripts/decodetree.py
57
+#
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
60
--- a/target/arm/translate-a64.c
14
+++ b/target/arm/translate-a64.c
61
+++ b/target/arm/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
62
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
16
for (xs = 0; xs < selem; xs++) {
63
}
17
if (replicate) {
64
18
/* Load and replicate to all elements */
65
switch (extract32(insn, 25, 4)) {
19
- uint64_t mulconst;
66
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
20
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
67
+ case 0x0:
21
68
+ if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
22
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
69
+ unallocated_encoding(s);
23
get_mem_index(s), s->be_data + scale);
70
+ }
24
- switch (scale) {
71
+ break;
25
- case 0:
72
+ case 0x1: case 0x3: /* UNALLOCATED */
26
- mulconst = 0x0101010101010101ULL;
73
unallocated_encoding(s);
27
- break;
74
break;
28
- case 1:
75
case 0x2:
29
- mulconst = 0x0001000100010001ULL;
76
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
30
- break;
77
new file mode 100644
31
- case 2:
78
index XXXXXXX..XXXXXXX
32
- mulconst = 0x0000000100000001ULL;
79
--- /dev/null
33
- break;
80
+++ b/target/arm/translate-sme.c
34
- case 3:
81
@@ -XXX,XX +XXX,XX @@
35
- mulconst = 0;
82
+/*
36
- break;
83
+ * AArch64 SME translation
37
- default:
84
+ *
38
- g_assert_not_reached();
85
+ * Copyright (c) 2022 Linaro, Ltd
39
- }
86
+ *
40
- if (mulconst) {
87
+ * This library is free software; you can redistribute it and/or
41
- tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
88
+ * modify it under the terms of the GNU Lesser General Public
42
- }
89
+ * License as published by the Free Software Foundation; either
43
- write_vec_element(s, tcg_tmp, rt, 0, MO_64);
90
+ * version 2.1 of the License, or (at your option) any later version.
44
- if (is_q) {
91
+ *
45
- write_vec_element(s, tcg_tmp, rt, 1, MO_64);
92
+ * This library is distributed in the hope that it will be useful,
46
- }
93
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
47
+ tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
94
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
48
+ (is_q + 1) * 8, vec_full_reg_size(s),
95
+ * Lesser General Public License for more details.
49
+ tcg_tmp);
96
+ *
50
tcg_temp_free_i64(tcg_tmp);
97
+ * You should have received a copy of the GNU Lesser General Public
51
- clear_vec_high(s, is_q, rt);
98
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
52
} else {
99
+ */
53
/* Load/store one element per register */
100
+
54
if (is_load) {
101
+#include "qemu/osdep.h"
102
+#include "cpu.h"
103
+#include "tcg/tcg-op.h"
104
+#include "tcg/tcg-op-gvec.h"
105
+#include "tcg/tcg-gvec-desc.h"
106
+#include "translate.h"
107
+#include "exec/helper-gen.h"
108
+#include "translate-a64.h"
109
+#include "fpu/softfloat.h"
110
+
111
+
112
+/*
113
+ * Include the generated decoder.
114
+ */
115
+
116
+#include "decode-sme.c.inc"
117
diff --git a/target/arm/meson.build b/target/arm/meson.build
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/meson.build
120
+++ b/target/arm/meson.build
121
@@ -XXX,XX +XXX,XX @@
122
gen = [
123
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
124
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
125
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
126
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
127
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
128
@@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
129
'sme_helper.c',
130
'translate-a64.c',
131
'translate-sve.c',
132
+ 'translate-sme.c',
133
))
134
135
arm_softmmu_ss = ss.source_set()
55
--
136
--
56
2.19.1
137
2.25.1
57
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Most of the v8 extensions are self-contained within the ISAR
3
This new behaviour is in the ARM pseudocode function
4
registers and are not implied by other feature bits, which
4
AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32
5
makes them the easiest to convert.
5
via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which
6
6
the trap would be delivered is in AArch64 mode.
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
8
Given that ARMv9 drops support for AArch32 outside EL0, the trap EL
9
detection ought to be trivially true, but the pseudocode still contains
10
a number of conditions, and QEMU has not yet committed to dropping A32
11
support for EL[12] when v9 features are present.
12
13
Since the computation of SME_TRAP_NONSTREAMING is necessarily different
14
for the two modes, we might as well preserve bits within TBFLAG_ANY and
15
allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead.
16
17
Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table
18
of instructions illegal in streaming mode.
19
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181016223115.24100-4-richard.henderson@linaro.org
22
Message-id: 20220708151540.18136-4-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
24
---
13
target/arm/cpu.h | 131 +++++++++++++++++++++++++++++++++----
25
target/arm/cpu.h | 7 +++
14
target/arm/translate.h | 7 ++
26
target/arm/translate.h | 4 ++
15
linux-user/elfload.c | 46 ++++++++-----
27
target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++
16
target/arm/cpu.c | 27 +++++---
28
target/arm/helper.c | 41 +++++++++++++++++
17
target/arm/cpu64.c | 57 +++++++++-------
29
target/arm/translate-a64.c | 40 ++++++++++++++++-
18
target/arm/translate-a64.c | 101 ++++++++++++++--------------
30
target/arm/translate-vfp.c | 12 +++++
19
target/arm/translate.c | 36 +++++-----
31
target/arm/translate.c | 2 +
20
7 files changed, 273 insertions(+), 132 deletions(-)
32
target/arm/meson.build | 1 +
33
8 files changed, 195 insertions(+), 2 deletions(-)
34
create mode 100644 target/arm/sme-fa64.decode
21
35
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
36
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
38
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
39
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ typedef enum ARMPSCIState {
40
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
27
PSCI_ON_PENDING = 2
41
* the same thing as the current security state of the processor!
28
} ARMPSCIState;
42
*/
29
43
FIELD(TBFLAG_A32, NS, 10, 1)
30
+typedef struct ARMISARegisters ARMISARegisters;
31
+
32
/**
33
* ARMCPU:
34
* @env: #CPUARMState
35
@@ -XXX,XX +XXX,XX @@ enum arm_features {
36
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
37
ARM_FEATURE_V8,
38
ARM_FEATURE_AARCH64, /* supports 64 bit mode */
39
- ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
40
ARM_FEATURE_CBAR, /* has cp15 CBAR */
41
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
42
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
43
ARM_FEATURE_EL2, /* has EL2 Virtualization support */
44
ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
45
- ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
46
- ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
47
- ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
48
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
49
ARM_FEATURE_PMU, /* has PMU support */
50
ARM_FEATURE_VBAR, /* has cp15 VBAR */
51
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
52
ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
53
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
54
- ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
55
- ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
56
- ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
57
- ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
58
- ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
59
- ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
60
- ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
61
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
62
- ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
63
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
64
};
65
66
@@ -XXX,XX +XXX,XX @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
67
/* Shared between translate-sve.c and sve_helper.c. */
68
extern const uint64_t pred_esz_masks[4];
69
70
+/*
44
+/*
71
+ * 32-bit feature tests via id registers.
45
+ * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
46
+ * This requires an SME trap from AArch32 mode when using NEON.
72
+ */
47
+ */
73
+static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
48
+FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
74
+{
49
75
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
50
/*
76
+}
51
* Bit usage when in AArch32 state, for M-profile only.
77
+
52
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
78
+static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
53
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
79
+{
54
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
80
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
55
FIELD(TBFLAG_A64, SVL, 24, 4)
81
+}
56
+/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
82
+
57
+FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
83
+static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
58
84
+{
59
/*
85
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
60
* Helpers for using the above.
86
+}
87
+
88
+static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
89
+{
90
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
91
+}
92
+
93
+static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
94
+{
95
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
96
+}
97
+
98
+static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
99
+{
100
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
101
+}
102
+
103
+static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
104
+{
105
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
106
+}
107
+
108
+static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
109
+{
110
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
111
+}
112
+
113
+/*
114
+ * 64-bit feature tests via id registers.
115
+ */
116
+static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
117
+{
118
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
119
+}
120
+
121
+static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
122
+{
123
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
124
+}
125
+
126
+static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
127
+{
128
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
129
+}
130
+
131
+static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
132
+{
133
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
134
+}
135
+
136
+static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
137
+{
138
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
139
+}
140
+
141
+static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
142
+{
143
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
144
+}
145
+
146
+static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
147
+{
148
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
149
+}
150
+
151
+static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
152
+{
153
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
154
+}
155
+
156
+static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
157
+{
158
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
159
+}
160
+
161
+static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
162
+{
163
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
164
+}
165
+
166
+static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
167
+{
168
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
169
+}
170
+
171
+static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
172
+{
173
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
174
+}
175
+
176
+static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
177
+{
178
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
179
+}
180
+
181
+/*
182
+ * Forward to the above feature tests given an ARMCPU pointer.
183
+ */
184
+#define cpu_isar_feature(name, cpu) \
185
+ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
186
+
187
#endif
188
diff --git a/target/arm/translate.h b/target/arm/translate.h
61
diff --git a/target/arm/translate.h b/target/arm/translate.h
189
index XXXXXXX..XXXXXXX 100644
62
index XXXXXXX..XXXXXXX 100644
190
--- a/target/arm/translate.h
63
--- a/target/arm/translate.h
191
+++ b/target/arm/translate.h
64
+++ b/target/arm/translate.h
65
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
66
bool pstate_sm;
67
/* True if PSTATE.ZA is set. */
68
bool pstate_za;
69
+ /* True if non-streaming insns should raise an SME Streaming exception. */
70
+ bool sme_trap_nonstreaming;
71
+ /* True if the current instruction is non-streaming. */
72
+ bool is_nonstreaming;
73
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
74
bool mve_no_pred;
75
/*
76
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/target/arm/sme-fa64.decode
192
@@ -XXX,XX +XXX,XX @@
81
@@ -XXX,XX +XXX,XX @@
193
/* internal defines */
82
+# AArch64 SME allowed instruction decoding
194
typedef struct DisasContext {
83
+#
195
DisasContextBase base;
84
+# Copyright (c) 2022 Linaro, Ltd
196
+ const ARMISARegisters *isar;
85
+#
197
86
+# This library is free software; you can redistribute it and/or
198
target_ulong pc;
87
+# modify it under the terms of the GNU Lesser General Public
199
target_ulong page_start;
88
+# License as published by the Free Software Foundation; either
200
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
89
+# version 2.1 of the License, or (at your option) any later version.
201
return ret;
90
+#
91
+# This library is distributed in the hope that it will be useful,
92
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
93
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
94
+# Lesser General Public License for more details.
95
+#
96
+# You should have received a copy of the GNU Lesser General Public
97
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
98
+
99
+#
100
+# This file is processed by scripts/decodetree.py
101
+#
102
+
103
+# These patterns are taken from Appendix E1.1 of DDI0616 A.a,
104
+# Arm Architecture Reference Manual Supplement,
105
+# The Scalable Matrix Extension (SME), for Armv9-A
106
+
107
+{
108
+ [
109
+ OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0]
110
+ OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0]
111
+ OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0]
112
+ OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0]
113
+ OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0]
114
+ OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0]
115
+ OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0]
116
+ ]
117
+ FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations
118
+}
119
+
120
+{
121
+ [
122
+ OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar)
123
+ OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16)
124
+ OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar)
125
+ OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16)
126
+ ]
127
+ FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations
128
+}
129
+
130
+FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store
131
+FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions
132
+FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
133
+
134
+# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions
135
+# We don't actually need to include these, as the default is OK.
136
+# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations
137
+# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers
138
+# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal)
139
+# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
140
+# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
141
+# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
142
+
143
+FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
144
+FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
145
+FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
146
+FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
147
+FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
148
+FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
149
+FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
150
+FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
151
+FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
152
+FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
153
+FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
154
+FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
155
+FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
156
+FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
157
+FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
158
+FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
159
+FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
160
+FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
161
+FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
162
+FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
163
+FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
164
+FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
165
+FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
166
+FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
167
+FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
168
+FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
169
+FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
170
+FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
171
+FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
172
diff --git a/target/arm/helper.c b/target/arm/helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/helper.c
175
+++ b/target/arm/helper.c
176
@@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el)
177
return 0;
202
}
178
}
203
179
204
+/*
180
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
205
+ * Forward to the isar_feature_* tests given a DisasContext pointer.
181
+static bool sme_fa64(CPUARMState *env, int el)
206
+ */
182
+{
207
+#define dc_isar_feature(name, ctx) \
183
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
208
+ ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
184
+ return false;
209
+
185
+ }
210
#endif /* TARGET_ARM_TRANSLATE_H */
186
+
211
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
187
+ if (el <= 1 && !el_is_in_host(env, el)) {
212
index XXXXXXX..XXXXXXX 100644
188
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
213
--- a/linux-user/elfload.c
189
+ return false;
214
+++ b/linux-user/elfload.c
190
+ }
215
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
191
+ }
216
/* probe for the extra features */
192
+ if (el <= 2 && arm_is_el2_enabled(env)) {
217
#define GET_FEATURE(feat, hwcap) \
193
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
218
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
194
+ return false;
219
+
195
+ }
220
+#define GET_FEATURE_ID(feat, hwcap) \
196
+ }
221
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
197
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
222
+
198
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
223
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
199
+ return false;
224
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
200
+ }
225
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
201
+ }
226
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
202
+
227
ARMCPU *cpu = ARM_CPU(thread_cpu);
203
+ return true;
228
uint32_t hwcaps = 0;
204
+}
229
205
+
230
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
206
/*
231
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
207
* Given that SVE is enabled, return the vector length for EL.
232
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
208
*/
233
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
209
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
234
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
210
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
235
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
211
}
236
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
212
237
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
213
+ /*
238
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
214
+ * The SME exception we are testing for is raised via
239
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
215
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
240
return hwcaps;
216
+ * AArch32.CheckAdvSIMDOrFPEnabled().
217
+ */
218
+ if (el == 0
219
+ && FIELD_EX64(env->svcr, SVCR, SM)
220
+ && (!arm_is_el2_enabled(env)
221
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
222
+ && arm_el_is_aa64(env, 1)
223
+ && !sme_fa64(env, el)) {
224
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
225
+ }
226
+
227
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
241
}
228
}
242
229
243
#undef GET_FEATURE
230
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
244
+#undef GET_FEATURE_ID
231
}
245
232
if (FIELD_EX64(env->svcr, SVCR, SM)) {
246
#else
233
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
247
/* 64 bit ARM definitions */
234
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
248
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
235
}
249
/* probe for the extra features */
236
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
250
#define GET_FEATURE(feat, hwcap) \
237
}
251
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
252
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
253
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
254
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
255
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
256
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
257
- GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
258
- GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
259
- GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
260
- GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
261
+#define GET_FEATURE_ID(feat, hwcap) \
262
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
263
+
264
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
265
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
266
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
267
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
268
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
269
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
270
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
271
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
272
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
273
GET_FEATURE(ARM_FEATURE_V8_FP16,
274
ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
275
- GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
276
- GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
277
- GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
278
- GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
279
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
280
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
281
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
282
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
283
GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
284
+
285
#undef GET_FEATURE
286
+#undef GET_FEATURE_ID
287
288
return hwcaps;
289
}
290
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/cpu.c
293
+++ b/target/arm/cpu.c
294
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
295
cortex_a15_initfn(obj);
296
#ifdef CONFIG_USER_ONLY
297
/* We don't set these in system emulation mode for the moment,
298
- * since we don't correctly set the ID registers to advertise them,
299
+ * since we don't correctly set (all of) the ID registers to
300
+ * advertise them.
301
*/
302
set_feature(&cpu->env, ARM_FEATURE_V8);
303
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
304
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
305
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
306
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
307
- set_feature(&cpu->env, ARM_FEATURE_CRC);
308
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
309
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
310
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
311
+ {
312
+ uint32_t t;
313
+
314
+ t = cpu->isar.id_isar5;
315
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
316
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
317
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
318
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
319
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
320
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
321
+ cpu->isar.id_isar5 = t;
322
+
323
+ t = cpu->isar.id_isar6;
324
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
325
+ cpu->isar.id_isar6 = t;
326
+ }
327
#endif
328
}
329
}
330
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/arm/cpu64.c
333
+++ b/target/arm/cpu64.c
334
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
335
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
336
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
337
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
338
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
339
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
340
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
341
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
342
- set_feature(&cpu->env, ARM_FEATURE_CRC);
343
set_feature(&cpu->env, ARM_FEATURE_EL2);
344
set_feature(&cpu->env, ARM_FEATURE_EL3);
345
set_feature(&cpu->env, ARM_FEATURE_PMU);
346
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
347
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
348
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
349
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
350
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
351
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
352
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
353
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
354
- set_feature(&cpu->env, ARM_FEATURE_CRC);
355
set_feature(&cpu->env, ARM_FEATURE_EL2);
356
set_feature(&cpu->env, ARM_FEATURE_EL3);
357
set_feature(&cpu->env, ARM_FEATURE_PMU);
358
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
359
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
360
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
361
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
362
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
363
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
364
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
365
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
366
- set_feature(&cpu->env, ARM_FEATURE_CRC);
367
set_feature(&cpu->env, ARM_FEATURE_EL2);
368
set_feature(&cpu->env, ARM_FEATURE_EL3);
369
set_feature(&cpu->env, ARM_FEATURE_PMU);
370
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
371
if (kvm_enabled()) {
372
kvm_arm_set_cpu_features_from_host(cpu);
373
} else {
374
+ uint64_t t;
375
+ uint32_t u;
376
aarch64_a57_initfn(obj);
377
+
378
+ t = cpu->isar.id_aa64isar0;
379
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
380
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
381
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
382
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
383
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
384
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
385
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
386
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
387
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
388
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
389
+ cpu->isar.id_aa64isar0 = t;
390
+
391
+ t = cpu->isar.id_aa64isar1;
392
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
393
+ cpu->isar.id_aa64isar1 = t;
394
+
395
+ /* Replicate the same data to the 32-bit id registers. */
396
+ u = cpu->isar.id_isar5;
397
+ u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
398
+ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
399
+ u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
400
+ u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
401
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
402
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
403
+ cpu->isar.id_isar5 = u;
404
+
405
+ u = cpu->isar.id_isar6;
406
+ u = FIELD_DP32(u, ID_ISAR6, DP, 1);
407
+ cpu->isar.id_isar6 = u;
408
+
409
#ifdef CONFIG_USER_ONLY
410
/* We don't set these in system emulation mode for the moment,
411
* since we don't correctly set the ID registers to advertise them,
412
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
413
* whereas the architecture requires them to be present in both if
414
* present in either.
415
*/
416
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
417
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
418
- set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
419
- set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
420
- set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
421
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
422
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
423
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
424
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
425
set_feature(&cpu->env, ARM_FEATURE_SVE);
426
/* For usermode -cpu max we can use a larger and more efficient DCZ
427
* blocksize since we don't have to follow what the hardware does.
428
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
238
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
429
index XXXXXXX..XXXXXXX 100644
239
index XXXXXXX..XXXXXXX 100644
430
--- a/target/arm/translate-a64.c
240
--- a/target/arm/translate-a64.c
431
+++ b/target/arm/translate-a64.c
241
+++ b/target/arm/translate-a64.c
432
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
242
@@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
243
* unallocated-encoding checks (otherwise the syndrome information
244
* for the resulting exception will be incorrect).
245
*/
246
-static bool fp_access_check(DisasContext *s)
247
+static bool fp_access_check_only(DisasContext *s)
248
{
249
if (s->fp_excp_el) {
250
assert(!s->fp_access_checked);
251
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
252
return true;
253
}
254
255
+static bool fp_access_check(DisasContext *s)
256
+{
257
+ if (!fp_access_check_only(s)) {
258
+ return false;
259
+ }
260
+ if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
261
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
262
+ syn_smetrap(SME_ET_Streaming, false));
263
+ return false;
264
+ }
265
+ return true;
266
+}
267
+
268
/* Check that SVE access is enabled. If it is, return true.
269
* If not, emit code to generate an appropriate exception and return false.
270
*/
271
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
272
default:
273
g_assert_not_reached();
274
}
275
- if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
276
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
277
return;
278
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
279
return;
280
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
281
}
282
}
283
284
+/*
285
+ * Include the generated SME FA64 decoder.
286
+ */
287
+
288
+#include "decode-sme-fa64.c.inc"
289
+
290
+static bool trans_OK(DisasContext *s, arg_OK *a)
291
+{
292
+ return true;
293
+}
294
+
295
+static bool trans_FAIL(DisasContext *s, arg_OK *a)
296
+{
297
+ s->is_nonstreaming = true;
298
+ return true;
299
+}
300
+
301
/**
302
* is_guarded_page:
303
* @env: The cpu environment
304
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
305
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
306
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
307
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
308
+ dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
309
dc->vec_len = 0;
310
dc->vec_stride = 0;
311
dc->cp_regs = arm_cpu->cp_regs;
312
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
433
}
313
}
434
if (rt2 == 31
314
}
435
&& ((rt | rs) & 1) == 0
315
436
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
316
+ s->is_nonstreaming = false;
437
+ && dc_isar_feature(aa64_atomics, s)) {
317
+ if (s->sme_trap_nonstreaming) {
438
/* CASP / CASPL */
318
+ disas_sme_fa64(s, insn);
439
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
319
+ }
440
return;
320
+
441
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
321
switch (extract32(insn, 25, 4)) {
442
}
322
case 0x0:
443
if (rt2 == 31
323
if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
444
&& ((rt | rs) & 1) == 0
324
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
445
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
325
index XXXXXXX..XXXXXXX 100644
446
+ && dc_isar_feature(aa64_atomics, s)) {
326
--- a/target/arm/translate-vfp.c
447
/* CASPA / CASPAL */
327
+++ b/target/arm/translate-vfp.c
448
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
328
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
449
return;
329
return false;
450
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
330
}
451
case 0xb: /* CASL */
331
452
case 0xe: /* CASA */
332
+ /*
453
case 0xf: /* CASAL */
333
+ * Note that rebuild_hflags_a32 has already accounted for being in EL0
454
- if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
334
+ * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not
455
+ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
335
+ * appear to be any insns which touch VFP which are allowed.
456
gen_compare_and_swap(s, rs, rt, rn, size);
336
+ */
457
return;
337
+ if (s->sme_trap_nonstreaming) {
458
}
338
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
459
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
339
+ syn_smetrap(SME_ET_Streaming,
460
int rs = extract32(insn, 16, 5);
340
+ s->base.pc_next - s->pc_curr == 2));
461
int rn = extract32(insn, 5, 5);
341
+ return false;
462
int o3_opc = extract32(insn, 12, 4);
342
+ }
463
- int feature = ARM_FEATURE_V8_ATOMICS;
343
+
464
TCGv_i64 tcg_rn, tcg_rs;
344
if (!s->vfp_enabled && !ignore_vfp_enabled) {
465
AtomicThreeOpFn *fn;
345
assert(!arm_dc_feature(s, ARM_FEATURE_M));
466
467
- if (is_vector) {
468
+ if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
469
unallocated_encoding(s);
346
unallocated_encoding(s);
470
return;
471
}
472
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
473
unallocated_encoding(s);
474
return;
475
}
476
- if (!arm_dc_feature(s, feature)) {
477
- unallocated_encoding(s);
478
- return;
479
- }
480
481
if (rn == 31) {
482
gen_check_sp_alignment(s);
483
@@ -XXX,XX +XXX,XX @@ static void handle_crc32(DisasContext *s,
484
TCGv_i64 tcg_acc, tcg_val;
485
TCGv_i32 tcg_bytes;
486
487
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)
488
+ if (!dc_isar_feature(aa64_crc32, s)
489
|| (sf == 1 && sz != 3)
490
|| (sf == 0 && sz == 3)) {
491
unallocated_encoding(s);
492
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
493
bool u = extract32(insn, 29, 1);
494
TCGv_i32 ele1, ele2, ele3;
495
TCGv_i64 res;
496
- int feature;
497
+ bool feature;
498
499
switch (u * 16 + opcode) {
500
case 0x10: /* SQRDMLAH (vector) */
501
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
502
unallocated_encoding(s);
503
return;
504
}
505
- feature = ARM_FEATURE_V8_RDM;
506
+ feature = dc_isar_feature(aa64_rdm, s);
507
break;
508
default:
509
unallocated_encoding(s);
510
return;
511
}
512
- if (!arm_dc_feature(s, feature)) {
513
+ if (!feature) {
514
unallocated_encoding(s);
515
return;
516
}
517
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
518
return;
519
}
520
if (size == 3) {
521
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
522
+ if (!dc_isar_feature(aa64_pmull, s)) {
523
unallocated_encoding(s);
524
return;
525
}
526
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
527
int size = extract32(insn, 22, 2);
528
bool u = extract32(insn, 29, 1);
529
bool is_q = extract32(insn, 30, 1);
530
- int feature, rot;
531
+ bool feature;
532
+ int rot;
533
534
switch (u * 16 + opcode) {
535
case 0x10: /* SQRDMLAH (vector) */
536
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
537
unallocated_encoding(s);
538
return;
539
}
540
- feature = ARM_FEATURE_V8_RDM;
541
+ feature = dc_isar_feature(aa64_rdm, s);
542
break;
543
case 0x02: /* SDOT (vector) */
544
case 0x12: /* UDOT (vector) */
545
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
546
unallocated_encoding(s);
547
return;
548
}
549
- feature = ARM_FEATURE_V8_DOTPROD;
550
+ feature = dc_isar_feature(aa64_dp, s);
551
break;
552
case 0x18: /* FCMLA, #0 */
553
case 0x19: /* FCMLA, #90 */
554
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
555
unallocated_encoding(s);
556
return;
557
}
558
- feature = ARM_FEATURE_V8_FCMA;
559
+ feature = dc_isar_feature(aa64_fcma, s);
560
break;
561
default:
562
unallocated_encoding(s);
563
return;
564
}
565
- if (!arm_dc_feature(s, feature)) {
566
+ if (!feature) {
567
unallocated_encoding(s);
568
return;
569
}
570
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
571
break;
572
case 0x1d: /* SQRDMLAH */
573
case 0x1f: /* SQRDMLSH */
574
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
575
+ if (!dc_isar_feature(aa64_rdm, s)) {
576
unallocated_encoding(s);
577
return;
578
}
579
break;
580
case 0x0e: /* SDOT */
581
case 0x1e: /* UDOT */
582
- if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
583
+ if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
584
unallocated_encoding(s);
585
return;
586
}
587
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
588
case 0x13: /* FCMLA #90 */
589
case 0x15: /* FCMLA #180 */
590
case 0x17: /* FCMLA #270 */
591
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
592
+ if (!dc_isar_feature(aa64_fcma, s)) {
593
unallocated_encoding(s);
594
return;
595
}
596
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn)
597
TCGv_i32 tcg_decrypt;
598
CryptoThreeOpIntFn *genfn;
599
600
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
601
- || size != 0) {
602
+ if (!dc_isar_feature(aa64_aes, s) || size != 0) {
603
unallocated_encoding(s);
604
return;
605
}
606
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
607
int rd = extract32(insn, 0, 5);
608
CryptoThreeOpFn *genfn;
609
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
610
- int feature = ARM_FEATURE_V8_SHA256;
611
+ bool feature;
612
613
if (size != 0) {
614
unallocated_encoding(s);
615
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
616
case 2: /* SHA1M */
617
case 3: /* SHA1SU0 */
618
genfn = NULL;
619
- feature = ARM_FEATURE_V8_SHA1;
620
+ feature = dc_isar_feature(aa64_sha1, s);
621
break;
622
case 4: /* SHA256H */
623
genfn = gen_helper_crypto_sha256h;
624
+ feature = dc_isar_feature(aa64_sha256, s);
625
break;
626
case 5: /* SHA256H2 */
627
genfn = gen_helper_crypto_sha256h2;
628
+ feature = dc_isar_feature(aa64_sha256, s);
629
break;
630
case 6: /* SHA256SU1 */
631
genfn = gen_helper_crypto_sha256su1;
632
+ feature = dc_isar_feature(aa64_sha256, s);
633
break;
634
default:
635
unallocated_encoding(s);
636
return;
637
}
638
639
- if (!arm_dc_feature(s, feature)) {
640
+ if (!feature) {
641
unallocated_encoding(s);
642
return;
643
}
644
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
645
int rn = extract32(insn, 5, 5);
646
int rd = extract32(insn, 0, 5);
647
CryptoTwoOpFn *genfn;
648
- int feature;
649
+ bool feature;
650
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
651
652
if (size != 0) {
653
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
654
655
switch (opcode) {
656
case 0: /* SHA1H */
657
- feature = ARM_FEATURE_V8_SHA1;
658
+ feature = dc_isar_feature(aa64_sha1, s);
659
genfn = gen_helper_crypto_sha1h;
660
break;
661
case 1: /* SHA1SU1 */
662
- feature = ARM_FEATURE_V8_SHA1;
663
+ feature = dc_isar_feature(aa64_sha1, s);
664
genfn = gen_helper_crypto_sha1su1;
665
break;
666
case 2: /* SHA256SU0 */
667
- feature = ARM_FEATURE_V8_SHA256;
668
+ feature = dc_isar_feature(aa64_sha256, s);
669
genfn = gen_helper_crypto_sha256su0;
670
break;
671
default:
672
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
673
return;
674
}
675
676
- if (!arm_dc_feature(s, feature)) {
677
+ if (!feature) {
678
unallocated_encoding(s);
679
return;
680
}
681
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
682
int rm = extract32(insn, 16, 5);
683
int rn = extract32(insn, 5, 5);
684
int rd = extract32(insn, 0, 5);
685
- int feature;
686
+ bool feature;
687
CryptoThreeOpFn *genfn;
688
689
if (o == 0) {
690
switch (opcode) {
691
case 0: /* SHA512H */
692
- feature = ARM_FEATURE_V8_SHA512;
693
+ feature = dc_isar_feature(aa64_sha512, s);
694
genfn = gen_helper_crypto_sha512h;
695
break;
696
case 1: /* SHA512H2 */
697
- feature = ARM_FEATURE_V8_SHA512;
698
+ feature = dc_isar_feature(aa64_sha512, s);
699
genfn = gen_helper_crypto_sha512h2;
700
break;
701
case 2: /* SHA512SU1 */
702
- feature = ARM_FEATURE_V8_SHA512;
703
+ feature = dc_isar_feature(aa64_sha512, s);
704
genfn = gen_helper_crypto_sha512su1;
705
break;
706
case 3: /* RAX1 */
707
- feature = ARM_FEATURE_V8_SHA3;
708
+ feature = dc_isar_feature(aa64_sha3, s);
709
genfn = NULL;
710
break;
711
}
712
} else {
713
switch (opcode) {
714
case 0: /* SM3PARTW1 */
715
- feature = ARM_FEATURE_V8_SM3;
716
+ feature = dc_isar_feature(aa64_sm3, s);
717
genfn = gen_helper_crypto_sm3partw1;
718
break;
719
case 1: /* SM3PARTW2 */
720
- feature = ARM_FEATURE_V8_SM3;
721
+ feature = dc_isar_feature(aa64_sm3, s);
722
genfn = gen_helper_crypto_sm3partw2;
723
break;
724
case 2: /* SM4EKEY */
725
- feature = ARM_FEATURE_V8_SM4;
726
+ feature = dc_isar_feature(aa64_sm4, s);
727
genfn = gen_helper_crypto_sm4ekey;
728
break;
729
default:
730
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
731
}
732
}
733
734
- if (!arm_dc_feature(s, feature)) {
735
+ if (!feature) {
736
unallocated_encoding(s);
737
return;
738
}
739
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
740
int rn = extract32(insn, 5, 5);
741
int rd = extract32(insn, 0, 5);
742
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
743
- int feature;
744
+ bool feature;
745
CryptoTwoOpFn *genfn;
746
747
switch (opcode) {
748
case 0: /* SHA512SU0 */
749
- feature = ARM_FEATURE_V8_SHA512;
750
+ feature = dc_isar_feature(aa64_sha512, s);
751
genfn = gen_helper_crypto_sha512su0;
752
break;
753
case 1: /* SM4E */
754
- feature = ARM_FEATURE_V8_SM4;
755
+ feature = dc_isar_feature(aa64_sm4, s);
756
genfn = gen_helper_crypto_sm4e;
757
break;
758
default:
759
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
760
return;
761
}
762
763
- if (!arm_dc_feature(s, feature)) {
764
+ if (!feature) {
765
unallocated_encoding(s);
766
return;
767
}
768
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
769
int ra = extract32(insn, 10, 5);
770
int rn = extract32(insn, 5, 5);
771
int rd = extract32(insn, 0, 5);
772
- int feature;
773
+ bool feature;
774
775
switch (op0) {
776
case 0: /* EOR3 */
777
case 1: /* BCAX */
778
- feature = ARM_FEATURE_V8_SHA3;
779
+ feature = dc_isar_feature(aa64_sha3, s);
780
break;
781
case 2: /* SM3SS1 */
782
- feature = ARM_FEATURE_V8_SM3;
783
+ feature = dc_isar_feature(aa64_sm3, s);
784
break;
785
default:
786
unallocated_encoding(s);
787
return;
788
}
789
790
- if (!arm_dc_feature(s, feature)) {
791
+ if (!feature) {
792
unallocated_encoding(s);
793
return;
794
}
795
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
796
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
797
int pass;
798
799
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
800
+ if (!dc_isar_feature(aa64_sha3, s)) {
801
unallocated_encoding(s);
802
return;
803
}
804
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
805
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
806
TCGv_i32 tcg_imm2, tcg_opcode;
807
808
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
809
+ if (!dc_isar_feature(aa64_sm3, s)) {
810
unallocated_encoding(s);
811
return;
812
}
813
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
814
ARMCPU *arm_cpu = arm_env_get_cpu(env);
815
int bound;
816
817
+ dc->isar = &arm_cpu->isar;
818
dc->pc = dc->base.pc_first;
819
dc->condjmp = 0;
820
821
diff --git a/target/arm/translate.c b/target/arm/translate.c
347
diff --git a/target/arm/translate.c b/target/arm/translate.c
822
index XXXXXXX..XXXXXXX 100644
348
index XXXXXXX..XXXXXXX 100644
823
--- a/target/arm/translate.c
349
--- a/target/arm/translate.c
824
+++ b/target/arm/translate.c
350
+++ b/target/arm/translate.c
825
@@ -XXX,XX +XXX,XX @@ static const uint8_t neon_2rm_sizes[] = {
351
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
826
static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
352
dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
827
int q, int rd, int rn, int rm)
353
dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
828
{
829
- if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
830
+ if (dc_isar_feature(aa32_rdm, s)) {
831
int opr_sz = (1 + q) * 8;
832
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
833
vfp_reg_offset(1, rn),
834
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
835
return 1;
836
}
837
if (!u) { /* SHA-1 */
838
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
839
+ if (!dc_isar_feature(aa32_sha1, s)) {
840
return 1;
841
}
842
ptr1 = vfp_reg_ptr(true, rd);
843
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
844
gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
845
tcg_temp_free_i32(tmp4);
846
} else { /* SHA-256 */
847
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
848
+ if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
849
return 1;
850
}
851
ptr1 = vfp_reg_ptr(true, rd);
852
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
853
if (op == 14 && size == 2) {
854
TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
855
856
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
857
+ if (!dc_isar_feature(aa32_pmull, s)) {
858
return 1;
859
}
860
tcg_rn = tcg_temp_new_i64();
861
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
862
{
863
NeonGenThreeOpEnvFn *fn;
864
865
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
866
+ if (!dc_isar_feature(aa32_rdm, s)) {
867
return 1;
868
}
869
if (u && ((rd | rn) & 1)) {
870
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
871
break;
872
}
873
case NEON_2RM_AESE: case NEON_2RM_AESMC:
874
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
875
- || ((rm | rd) & 1)) {
876
+ if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
877
return 1;
878
}
879
ptr1 = vfp_reg_ptr(true, rd);
880
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
881
tcg_temp_free_i32(tmp3);
882
break;
883
case NEON_2RM_SHA1H:
884
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
885
- || ((rm | rd) & 1)) {
886
+ if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
887
return 1;
888
}
889
ptr1 = vfp_reg_ptr(true, rd);
890
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
891
}
892
/* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
893
if (q) {
894
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
895
+ if (!dc_isar_feature(aa32_sha2, s)) {
896
return 1;
897
}
898
- } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
899
+ } else if (!dc_isar_feature(aa32_sha1, s)) {
900
return 1;
901
}
902
ptr1 = vfp_reg_ptr(true, rd);
903
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
904
/* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
905
int size = extract32(insn, 20, 1);
906
data = extract32(insn, 23, 2); /* rot */
907
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
908
+ if (!dc_isar_feature(aa32_vcma, s)
909
|| (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
910
return 1;
911
}
354
}
912
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
355
+ dc->sme_trap_nonstreaming =
913
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
356
+ EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
914
int size = extract32(insn, 20, 1);
357
}
915
data = extract32(insn, 24, 1); /* rot */
358
dc->cp_regs = cpu->cp_regs;
916
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
359
dc->features = env->features;
917
+ if (!dc_isar_feature(aa32_vcma, s)
360
diff --git a/target/arm/meson.build b/target/arm/meson.build
918
|| (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
361
index XXXXXXX..XXXXXXX 100644
919
return 1;
362
--- a/target/arm/meson.build
920
}
363
+++ b/target/arm/meson.build
921
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
364
@@ -XXX,XX +XXX,XX @@
922
} else if ((insn & 0xfeb00f00) == 0xfc200d00) {
365
gen = [
923
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
366
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
924
bool u = extract32(insn, 4, 1);
367
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
925
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
368
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
926
+ if (!dc_isar_feature(aa32_dp, s)) {
369
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
927
return 1;
370
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
928
}
371
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
929
fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
930
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
931
int size = extract32(insn, 23, 1);
932
int index;
933
934
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
935
+ if (!dc_isar_feature(aa32_vcma, s)) {
936
return 1;
937
}
938
if (size == 0) {
939
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
940
} else if ((insn & 0xffb00f00) == 0xfe200d00) {
941
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
942
int u = extract32(insn, 4, 1);
943
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
944
+ if (!dc_isar_feature(aa32_dp, s)) {
945
return 1;
946
}
947
fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
948
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
949
* op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
950
* Bits 8, 10 and 11 should be zero.
951
*/
952
- if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
953
- (c & 0xd) != 0) {
954
+ if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
955
goto illegal_op;
956
}
957
958
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
959
case 0x28:
960
case 0x29:
961
case 0x2a:
962
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
963
+ if (!dc_isar_feature(aa32_crc32, s)) {
964
goto illegal_op;
965
}
966
break;
967
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
968
CPUARMState *env = cs->env_ptr;
969
ARMCPU *cpu = arm_env_get_cpu(env);
970
971
+ dc->isar = &cpu->isar;
972
dc->pc = dc->base.pc_first;
973
dc->condjmp = 0;
974
975
--
372
--
976
2.19.1
373
2.25.1
977
978
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move cmtst_op expanders from translate-a64.c.
3
Mark ADR as a non-streaming instruction, which should trap
4
if full a64 support is not enabled in streaming mode.
4
5
6
Removing entries from sme-fa64.decode is an easy way to see
7
what remains to be done.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-17-richard.henderson@linaro.org
11
Message-id: 20220708151540.18136-5-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
13
---
10
target/arm/translate.h | 2 +
14
target/arm/translate.h | 7 +++++++
11
target/arm/translate-a64.c | 38 ------------------
15
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate.c | 81 +++++++++++++++++++++++++++-----------
16
target/arm/translate-sve.c | 8 ++++----
13
3 files changed, 60 insertions(+), 61 deletions(-)
17
3 files changed, 11 insertions(+), 5 deletions(-)
14
18
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
19
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
21
--- a/target/arm/translate.h
18
+++ b/target/arm/translate.h
22
+++ b/target/arm/translate.h
19
@@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op;
23
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
20
extern const GVecGen3 bif_op;
24
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
21
extern const GVecGen3 mla_op[4];
25
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
22
extern const GVecGen3 mls_op[4];
26
23
+extern const GVecGen3 cmtst_op[4];
27
+#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
24
extern const GVecGen2i ssra_op[4];
28
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
25
extern const GVecGen2i usra_op[4];
29
+ { \
26
extern const GVecGen2i sri_op[4];
30
+ s->is_nonstreaming = true; \
27
extern const GVecGen2i sli_op[4];
31
+ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
28
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
32
+ }
33
+
34
#endif /* TARGET_ARM_TRANSLATE_H */
35
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/sme-fa64.decode
38
+++ b/target/arm/sme-fa64.decode
39
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
40
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
41
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
42
43
-FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
44
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
45
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
46
FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
47
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-sve.c
50
+++ b/target/arm/translate-sve.c
51
@@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
52
return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
53
}
54
55
-TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
56
-TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
57
-TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
58
-TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
59
+TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
60
+TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
61
+TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
62
+TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
29
63
30
/*
64
/*
31
* Forward to the isar_feature_* tests given a DisasContext pointer.
65
*** SVE Integer Misc - Unpredicated Group
32
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/translate-a64.c
35
+++ b/target/arm/translate-a64.c
36
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
37
}
38
}
39
40
-/* CMTST : test is "if (X & Y != 0)". */
41
-static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
42
-{
43
- tcg_gen_and_i32(d, a, b);
44
- tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
45
- tcg_gen_neg_i32(d, d);
46
-}
47
-
48
-static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
49
-{
50
- tcg_gen_and_i64(d, a, b);
51
- tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
52
- tcg_gen_neg_i64(d, d);
53
-}
54
-
55
-static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
56
-{
57
- tcg_gen_and_vec(vece, d, a, b);
58
- tcg_gen_dupi_vec(vece, a, 0);
59
- tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
60
-}
61
-
62
static void handle_3same_64(DisasContext *s, int opcode, bool u,
63
TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
64
{
65
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
66
/* Integer op subgroup of C3.6.16. */
67
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
68
{
69
- static const GVecGen3 cmtst_op[4] = {
70
- { .fni4 = gen_helper_neon_tst_u8,
71
- .fniv = gen_cmtst_vec,
72
- .vece = MO_8 },
73
- { .fni4 = gen_helper_neon_tst_u16,
74
- .fniv = gen_cmtst_vec,
75
- .vece = MO_16 },
76
- { .fni4 = gen_cmtst_i32,
77
- .fniv = gen_cmtst_vec,
78
- .vece = MO_32 },
79
- { .fni8 = gen_cmtst_i64,
80
- .fniv = gen_cmtst_vec,
81
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
82
- .vece = MO_64 },
83
- };
84
-
85
int is_q = extract32(insn, 30, 1);
86
int u = extract32(insn, 29, 1);
87
int size = extract32(insn, 22, 2);
88
diff --git a/target/arm/translate.c b/target/arm/translate.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/translate.c
91
+++ b/target/arm/translate.c
92
@@ -XXX,XX +XXX,XX @@ const GVecGen3 mls_op[4] = {
93
.vece = MO_64 },
94
};
95
96
+/* CMTST : test is "if (X & Y != 0)". */
97
+static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
98
+{
99
+ tcg_gen_and_i32(d, a, b);
100
+ tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
101
+ tcg_gen_neg_i32(d, d);
102
+}
103
+
104
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
105
+{
106
+ tcg_gen_and_i64(d, a, b);
107
+ tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
108
+ tcg_gen_neg_i64(d, d);
109
+}
110
+
111
+static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
112
+{
113
+ tcg_gen_and_vec(vece, d, a, b);
114
+ tcg_gen_dupi_vec(vece, a, 0);
115
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
116
+}
117
+
118
+const GVecGen3 cmtst_op[4] = {
119
+ { .fni4 = gen_helper_neon_tst_u8,
120
+ .fniv = gen_cmtst_vec,
121
+ .vece = MO_8 },
122
+ { .fni4 = gen_helper_neon_tst_u16,
123
+ .fniv = gen_cmtst_vec,
124
+ .vece = MO_16 },
125
+ { .fni4 = gen_cmtst_i32,
126
+ .fniv = gen_cmtst_vec,
127
+ .vece = MO_32 },
128
+ { .fni8 = gen_cmtst_i64,
129
+ .fniv = gen_cmtst_vec,
130
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
131
+ .vece = MO_64 },
132
+};
133
+
134
/* Translate a NEON data processing instruction. Return nonzero if the
135
instruction is invalid.
136
We process data in a mixture of 32-bit and 64-bit chunks.
137
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
138
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
139
u ? &mls_op[size] : &mla_op[size]);
140
return 0;
141
+
142
+ case NEON_3R_VTST_VCEQ:
143
+ if (u) { /* VCEQ */
144
+ tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
145
+ vec_size, vec_size);
146
+ } else { /* VTST */
147
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
148
+ vec_size, vec_size, &cmtst_op[size]);
149
+ }
150
+ return 0;
151
+
152
+ case NEON_3R_VCGT:
153
+ tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
154
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
155
+ return 0;
156
+
157
+ case NEON_3R_VCGE:
158
+ tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
159
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
160
+ return 0;
161
}
162
163
if (size == 3) {
164
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
165
case NEON_3R_VQSUB:
166
GEN_NEON_INTEGER_OP_ENV(qsub);
167
break;
168
- case NEON_3R_VCGT:
169
- GEN_NEON_INTEGER_OP(cgt);
170
- break;
171
- case NEON_3R_VCGE:
172
- GEN_NEON_INTEGER_OP(cge);
173
- break;
174
case NEON_3R_VSHL:
175
GEN_NEON_INTEGER_OP(shl);
176
break;
177
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
178
tmp2 = neon_load_reg(rd, pass);
179
gen_neon_add(size, tmp, tmp2);
180
break;
181
- case NEON_3R_VTST_VCEQ:
182
- if (!u) { /* VTST */
183
- switch (size) {
184
- case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
185
- case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
186
- case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
187
- default: abort();
188
- }
189
- } else { /* VCEQ */
190
- switch (size) {
191
- case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
192
- case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
193
- case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
194
- default: abort();
195
- }
196
- }
197
- break;
198
case NEON_3R_VMUL:
199
/* VMUL.P8; other cases already eliminated. */
200
gen_helper_neon_mul_p8(tmp, tmp, tmp2);
201
--
66
--
202
2.19.1
67
2.25.1
203
204
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-18-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-6-richard.henderson@linaro.org
5
[PMM: added parens in ?: expression]
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
10
---
9
target/arm/translate.c | 81 ++++++++++++++----------------------------
11
target/arm/sme-fa64.decode | 2 --
10
1 file changed, 26 insertions(+), 55 deletions(-)
12
target/arm/translate-sve.c | 9 ++++++---
13
2 files changed, 6 insertions(+), 5 deletions(-)
11
14
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
17
--- a/target/arm/sme-fa64.decode
15
+++ b/target/arm/translate.c
18
+++ b/target/arm/sme-fa64.decode
16
@@ -XXX,XX +XXX,XX @@ static void gen_vfp_msr(TCGv_i32 tmp)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
17
tcg_temp_free_i32(tmp);
20
21
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
22
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
23
-FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
24
-FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
25
FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
33
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
34
35
/* Note pat == 31 is #all, to set all elements. */
36
-TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
37
+TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
38
+ do_predset, 0, FFR_PRED_NUM, 31, false)
39
40
/* Note pat == 32 is #unimp, to set no elements. */
41
TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
42
@@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
43
.rd = a->rd, .pg = a->pg, .s = a->s,
44
.rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
45
};
46
+
47
+ s->is_nonstreaming = true;
48
return trans_AND_pppp(s, &alt_a);
18
}
49
}
19
50
20
-static void gen_neon_dup_u8(TCGv_i32 var, int shift)
51
-TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
21
-{
52
-TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
22
- TCGv_i32 tmp = tcg_temp_new_i32();
53
+TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
23
- if (shift)
54
+TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
24
- tcg_gen_shri_i32(var, var, shift);
55
25
- tcg_gen_ext8u_i32(var, var);
56
static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
26
- tcg_gen_shli_i32(tmp, var, 8);
57
void (*gen_fn)(TCGv_i32, TCGv_ptr,
27
- tcg_gen_or_i32(var, var, tmp);
28
- tcg_gen_shli_i32(tmp, var, 16);
29
- tcg_gen_or_i32(var, var, tmp);
30
- tcg_temp_free_i32(tmp);
31
-}
32
-
33
static void gen_neon_dup_low16(TCGv_i32 var)
34
{
35
TCGv_i32 tmp = tcg_temp_new_i32();
36
@@ -XXX,XX +XXX,XX @@ static void gen_neon_dup_high16(TCGv_i32 var)
37
tcg_temp_free_i32(tmp);
38
}
39
40
-static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
41
-{
42
- /* Load a single Neon element and replicate into a 32 bit TCG reg */
43
- TCGv_i32 tmp = tcg_temp_new_i32();
44
- switch (size) {
45
- case 0:
46
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
47
- gen_neon_dup_u8(tmp, 0);
48
- break;
49
- case 1:
50
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
51
- gen_neon_dup_low16(tmp);
52
- break;
53
- case 2:
54
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
55
- break;
56
- default: /* Avoid compiler warnings. */
57
- abort();
58
- }
59
- return tmp;
60
-}
61
-
62
static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
63
uint32_t dp)
64
{
65
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
66
int load;
67
int shift;
68
int n;
69
+ int vec_size;
70
TCGv_i32 addr;
71
TCGv_i32 tmp;
72
TCGv_i32 tmp2;
73
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
74
}
75
addr = tcg_temp_new_i32();
76
load_reg_var(s, addr, rn);
77
- if (nregs == 1) {
78
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
79
- tmp = gen_load_and_replicate(s, addr, size);
80
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
81
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
82
- if (insn & (1 << 5)) {
83
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
84
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
85
- }
86
- tcg_temp_free_i32(tmp);
87
- } else {
88
- /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
89
- stride = (insn & (1 << 5)) ? 2 : 1;
90
- for (reg = 0; reg < nregs; reg++) {
91
- tmp = gen_load_and_replicate(s, addr, size);
92
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
93
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
94
- tcg_temp_free_i32(tmp);
95
- tcg_gen_addi_i32(addr, addr, 1 << size);
96
- rd += stride;
97
+
98
+ /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
99
+ * VLD2/3/4 to all lanes: bit 5 indicates register stride.
100
+ */
101
+ stride = (insn & (1 << 5)) ? 2 : 1;
102
+ vec_size = nregs == 1 ? stride * 8 : 8;
103
+
104
+ tmp = tcg_temp_new_i32();
105
+ for (reg = 0; reg < nregs; reg++) {
106
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
107
+ s->be_data | size);
108
+ if ((rd & 1) && vec_size == 16) {
109
+ /* We cannot write 16 bytes at once because the
110
+ * destination is unaligned.
111
+ */
112
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
113
+ 8, 8, tmp);
114
+ tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
115
+ neon_reg_offset(rd, 0), 8, 8);
116
+ } else {
117
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
118
+ vec_size, vec_size, tmp);
119
}
120
+ tcg_gen_addi_i32(addr, addr, 1 << size);
121
+ rd += stride;
122
}
123
+ tcg_temp_free_i32(tmp);
124
tcg_temp_free_i32(addr);
125
stride = (1 << size) * nregs;
126
} else {
127
--
58
--
128
2.19.1
59
2.25.1
129
130
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Since QEMU does not implement ASIDs, changes to the ASID must flush the
3
Mark these as a non-streaming instructions, which should trap
4
tlb. However, if the ASID does not change there is no reason to flush.
4
if full a64 support is not enabled in streaming mode.
5
5
6
In testing a boot of the Ubuntu installer to the first menu, this reduces
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
the number of flushes by 30%, or nearly 600k instances.
8
9
Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220708151540.18136-7-richard.henderson@linaro.org
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Message-id: 20181019015617.22583-3-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
10
---
16
target/arm/helper.c | 8 +++-----
11
target/arm/sme-fa64.decode | 3 ---
17
1 file changed, 3 insertions(+), 5 deletions(-)
12
target/arm/translate-sve.c | 22 ++++++++++++----------
13
2 files changed, 12 insertions(+), 13 deletions(-)
18
14
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
17
--- a/target/arm/sme-fa64.decode
22
+++ b/target/arm/helper.c
18
+++ b/target/arm/sme-fa64.decode
23
@@ -XXX,XX +XXX,XX @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
24
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
25
uint64_t value)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
26
{
22
27
- /* 64 bit accesses to the TTBRs can change the ASID and so we
23
-FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
28
- * must flush the TLB.
24
-FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
29
- */
25
-FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
30
- if (cpreg_field_is_64bit(ri)) {
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
31
+ /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
32
+ if (cpreg_field_is_64bit(ri) &&
28
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
33
+ extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
34
ARMCPU *cpu = arm_env_get_cpu(env);
30
index XXXXXXX..XXXXXXX 100644
35
-
31
--- a/target/arm/translate-sve.c
36
tlb_flush(CPU(cpu));
32
+++ b/target/arm/translate-sve.c
37
}
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
38
raw_write(env, ri, value);
34
NULL, gen_helper_sve_fexpa_h,
35
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
36
};
37
-TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
38
- fexpa_fns[a->esz], a->rd, a->rn, 0)
39
+TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
40
+ fexpa_fns[a->esz], a->rd, a->rn, 0)
41
42
static gen_helper_gvec_3 * const ftssel_fns[4] = {
43
NULL, gen_helper_sve_ftssel_h,
44
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
45
};
46
-TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
47
+TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
48
+ ftssel_fns[a->esz], a, 0)
49
50
/*
51
*** SVE Predicate Logical Operations Group
52
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
53
static gen_helper_gvec_3 * const compact_fns[4] = {
54
NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
55
};
56
-TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
57
+TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
58
+ compact_fns[a->esz], a, 0)
59
60
/* Call the helper that computes the ARM LastActiveElement pseudocode
61
* function, scaled by the element size. This includes the not found
62
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = {
63
gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
64
gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
65
};
66
-TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
67
- bext_fns[a->esz], a, 0)
68
+TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
69
+ bext_fns[a->esz], a, 0)
70
71
static gen_helper_gvec_3 * const bdep_fns[4] = {
72
gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
73
gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
74
};
75
-TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
76
- bdep_fns[a->esz], a, 0)
77
+TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
78
+ bdep_fns[a->esz], a, 0)
79
80
static gen_helper_gvec_3 * const bgrp_fns[4] = {
81
gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
82
gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
83
};
84
-TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
85
- bgrp_fns[a->esz], a, 0)
86
+TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
87
+ bgrp_fns[a->esz], a, 0)
88
89
static gen_helper_gvec_3 * const cadd_fns[4] = {
90
gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
39
--
91
--
40
2.19.1
92
2.25.1
41
42
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The EL3 version of this register does not include an ASID,
3
Mark these as a non-streaming instructions, which should trap
4
and so the tlb_flush performed by vmsa_ttbr_write is not needed.
4
if full a64 support is not enabled in streaming mode.
5
5
6
Reviewed-by: Aaron Lindsay <aaron@os.amperecomputing.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20220708151540.18136-8-richard.henderson@linaro.org
9
Message-id: 20181019015617.22583-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/helper.c | 2 +-
11
target/arm/sme-fa64.decode | 2 --
13
1 file changed, 1 insertion(+), 1 deletion(-)
12
target/arm/translate-sve.c | 24 +++++++++++++++---------
13
2 files changed, 15 insertions(+), 11 deletions(-)
14
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/helper.c
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
.fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
{ .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
22
23
- .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
23
-FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
24
+ .access = PL3_RW, .resetvalue = 0,
24
-FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
25
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
25
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
26
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
26
FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
27
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
27
FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
33
gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
34
NULL, gen_helper_sve2_pmull_d,
35
};
36
- if (a->esz == 0
37
- ? !dc_isar_feature(aa64_sve2_pmull128, s)
38
- : !dc_isar_feature(aa64_sve, s)) {
39
+
40
+ if (a->esz == 0) {
41
+ if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
42
+ return false;
43
+ }
44
+ s->is_nonstreaming = true;
45
+ } else if (!dc_isar_feature(aa64_sve, s)) {
46
return false;
47
}
48
return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
49
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
50
* SVE Integer Multiply-Add (unpredicated)
51
*/
52
53
-TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
54
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
55
-TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
56
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
57
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
58
+ gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
59
+ 0, FPST_FPCR)
60
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
61
+ gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
62
+ 0, FPST_FPCR)
63
64
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
65
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
66
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
67
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
68
gen_helper_gvec_bfdot_idx, a)
69
70
-TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
71
- gen_helper_gvec_bfmmla, a, 0)
72
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
73
+ gen_helper_gvec_bfmmla, a, 0)
74
75
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
76
{
28
--
77
--
29
2.19.1
78
2.25.1
30
31
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Announce 64bit addressing support.
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
4
5
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Message-id: 20181017213932.19973-3-edgar.iglesias@gmail.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
hw/net/cadence_gem.c | 3 ++-
11
target/arm/sme-fa64.decode | 3 ---
12
1 file changed, 2 insertions(+), 1 deletion(-)
12
target/arm/translate-sve.c | 15 +++++++++++----
13
2 files changed, 11 insertions(+), 7 deletions(-)
13
14
14
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/net/cadence_gem.c
17
--- a/target/arm/sme-fa64.decode
17
+++ b/hw/net/cadence_gem.c
18
+++ b/target/arm/sme-fa64.decode
18
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
19
#define GEM_DESCONF4 (0x0000028C/4)
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
20
#define GEM_DESCONF5 (0x00000290/4)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
21
#define GEM_DESCONF6 (0x00000294/4)
22
22
+#define GEM_DESCONF6_64B_MASK (1U << 23)
23
-FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
23
#define GEM_DESCONF7 (0x00000298/4)
24
-FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
24
25
-FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
25
#define GEM_INT_Q1_STATUS (0x00000400 / 4)
26
FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
26
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
27
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
27
s->regs[GEM_DESCONF] = 0x02500111;
28
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
28
s->regs[GEM_DESCONF2] = 0x2ab13fff;
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
s->regs[GEM_DESCONF5] = 0x002f2045;
30
index XXXXXXX..XXXXXXX 100644
30
- s->regs[GEM_DESCONF6] = 0x0;
31
--- a/target/arm/translate-sve.c
31
+ s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
32
+++ b/target/arm/translate-sve.c
32
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
33
if (s->num_priority_queues > 1) {
34
NULL, gen_helper_sve_ftmad_h,
34
queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
35
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
36
};
37
-TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
38
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
39
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
40
+TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
41
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
42
+ a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
43
44
/*
45
*** SVE Floating Point Accumulating Reduction Group
46
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
47
if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
48
return false;
49
}
50
+ s->is_nonstreaming = true;
51
if (!sve_access_check(s)) {
52
return true;
53
}
54
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
55
DO_FP3(FADD_zzz, fadd)
56
DO_FP3(FSUB_zzz, fsub)
57
DO_FP3(FMUL_zzz, fmul)
58
-DO_FP3(FTSMUL, ftsmul)
59
DO_FP3(FRECPS, recps)
60
DO_FP3(FRSQRTS, rsqrts)
61
62
#undef DO_FP3
63
64
+static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
65
+ NULL, gen_helper_gvec_ftsmul_h,
66
+ gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
67
+};
68
+TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
69
+ ftsmul_fns[a->esz], a, 0)
70
+
71
/*
72
*** SVE Floating Point Arithmetic - Predicated Group
73
*/
35
--
74
--
36
2.19.1
75
2.25.1
37
38
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For a sequence of loads or stores from a single register,
3
Mark these as a non-streaming instructions, which should trap
4
little-endian operations can be promoted to an 8-byte op.
4
if full a64 support is not enabled in streaming mode.
5
This can reduce the number of operations by a factor of 8.
6
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181011205206.3552-20-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-10-richard.henderson@linaro.org
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
target/arm/translate.c | 10 ++++++++++
11
target/arm/sme-fa64.decode | 1 -
14
1 file changed, 10 insertions(+)
12
target/arm/translate-sve.c | 12 ++++++------
13
2 files changed, 6 insertions(+), 7 deletions(-)
15
14
16
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate.c
17
--- a/target/arm/sme-fa64.decode
19
+++ b/target/arm/translate.c
18
+++ b/target/arm/sme-fa64.decode
20
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
21
if (size == 3 && (interleave | spacing) != 1) {
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
22
return 1;
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
23
}
22
24
+ /* For our purposes, bytes are always little-endian. */
23
-FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
25
+ if (size == 0) {
24
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
26
+ endian = MO_LE;
25
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
27
+ }
26
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
28
+ /* Consecutive little-endian elements from a single register
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
+ * can be promoted to a larger little-endian operation.
28
index XXXXXXX..XXXXXXX 100644
30
+ */
29
--- a/target/arm/translate-sve.c
31
+ if (interleave == 1 && endian == MO_LE) {
30
+++ b/target/arm/translate-sve.c
32
+ size = 3;
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
33
+ }
32
TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
34
tmp64 = tcg_temp_new_i64();
33
TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
35
addr = tcg_temp_new_i32();
34
36
tmp2 = tcg_const_i32(1 << size);
35
-TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
36
- gen_helper_gvec_smmla_b, a, 0)
37
-TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
38
- gen_helper_gvec_usmmla_b, a, 0)
39
-TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
40
- gen_helper_gvec_ummla_b, a, 0)
41
+TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
42
+ gen_helper_gvec_smmla_b, a, 0)
43
+TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
44
+ gen_helper_gvec_usmmla_b, a, 0)
45
+TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
46
+ gen_helper_gvec_ummla_b, a, 0)
47
48
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
49
gen_helper_gvec_bfdot, a, 0)
37
--
50
--
38
2.19.1
51
2.25.1
39
40
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Mark these as non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-13-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-11-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 70 +++++++++++++++++++++++++++++-------------
11
target/arm/sme-fa64.decode | 1 -
9
1 file changed, 48 insertions(+), 22 deletions(-)
12
target/arm/translate-sve.c | 35 ++++++++++++++++++-----------------
13
2 files changed, 18 insertions(+), 18 deletions(-)
10
14
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
17
--- a/target/arm/sme-fa64.decode
14
+++ b/target/arm/translate.c
18
+++ b/target/arm/sme-fa64.decode
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
16
size--;
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
17
}
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
18
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
22
19
- /* To avoid excessive duplication of ops we implement shift
23
-FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
20
- by immediate using the variable shift operations. */
24
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
21
if (op < 8) {
25
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
22
/* Shift by immediate:
26
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
23
VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
24
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
28
index XXXXXXX..XXXXXXX 100644
25
}
29
--- a/target/arm/translate-sve.c
26
/* Right shifts are encoded as N - shift, where N is the
30
+++ b/target/arm/translate-sve.c
27
element size in bits. */
31
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
28
- if (op <= 4)
32
static gen_helper_gvec_flags_4 * const match_fns[4] = {
29
+ if (op <= 4) {
33
gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
30
shift = shift - (1 << (size + 3));
34
};
31
+ }
35
-TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
32
+
36
+TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
33
+ switch (op) {
37
34
+ case 0: /* VSHR */
38
static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
35
+ /* Right shift comes here negative. */
39
gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
36
+ shift = -shift;
40
};
37
+ /* Shifts larger than the element size are architecturally
41
-TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
38
+ * valid. Unsigned results in all zeros; signed results
42
+TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
39
+ * in all sign bits.
43
40
+ */
44
static gen_helper_gvec_4 * const histcnt_fns[4] = {
41
+ if (!u) {
45
NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
42
+ tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
46
};
43
+ MIN(shift, (8 << size) - 1),
47
-TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
44
+ vec_size, vec_size);
48
- histcnt_fns[a->esz], a, 0)
45
+ } else if (shift >= 8 << size) {
49
+TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
46
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
50
+ histcnt_fns[a->esz], a, 0)
47
+ } else {
51
48
+ tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
52
-TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
49
+ vec_size, vec_size);
53
- a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
50
+ }
54
+TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
51
+ return 0;
55
+ a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
52
+
56
53
+ case 5: /* VSHL, VSLI */
57
DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
54
+ if (!u) { /* VSHL */
58
DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
55
+ /* Shifts larger than the element size are
59
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
56
+ * architecturally valid and results in zero.
60
TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
57
+ */
61
a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
58
+ if (shift >= 8 << size) {
62
59
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
63
-TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
60
+ } else {
64
- gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
61
+ tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
65
+TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
62
+ vec_size, vec_size);
66
+ gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
63
+ }
67
64
+ return 0;
68
-TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
65
+ }
69
- gen_helper_crypto_aese, a, false)
66
+ break;
70
-TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
67
+ }
71
- gen_helper_crypto_aese, a, true)
68
+
72
+TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
69
if (size == 3) {
73
+ gen_helper_crypto_aese, a, false)
70
count = q + 1;
74
+TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
71
} else {
75
+ gen_helper_crypto_aese, a, true)
72
count = q ? 4: 2;
76
73
}
77
-TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
74
- switch (size) {
78
- gen_helper_crypto_sm4e, a, 0)
75
- case 0:
79
-TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
76
- imm = (uint8_t) shift;
80
- gen_helper_crypto_sm4ekey, a, 0)
77
- imm |= imm << 8;
81
+TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
78
- imm |= imm << 16;
82
+ gen_helper_crypto_sm4e, a, 0)
79
- break;
83
+TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
80
- case 1:
84
+ gen_helper_crypto_sm4ekey, a, 0)
81
- imm = (uint16_t) shift;
85
82
- imm |= imm << 16;
86
-TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
83
- break;
87
+TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
84
- case 2:
88
+ gen_gvec_rax1, a)
85
- case 3:
89
86
- imm = shift;
90
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
87
- break;
91
gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
88
- default:
89
- abort();
90
- }
91
+
92
+ /* To avoid excessive duplication of ops we implement shift
93
+ * by immediate using the variable shift operations.
94
+ */
95
+ imm = dup_const(size, shift);
96
97
for (pass = 0; pass < count; pass++) {
98
if (size == 3) {
99
neon_load_reg64(cpu_V0, rm + pass);
100
tcg_gen_movi_i64(cpu_V1, imm);
101
switch (op) {
102
- case 0: /* VSHR */
103
case 1: /* VSRA */
104
if (u)
105
gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
106
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
107
cpu_V0, cpu_V1);
108
}
109
break;
110
+ default:
111
+ g_assert_not_reached();
112
}
113
if (op == 1 || op == 3) {
114
/* Accumulate. */
115
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
116
tmp2 = tcg_temp_new_i32();
117
tcg_gen_movi_i32(tmp2, imm);
118
switch (op) {
119
- case 0: /* VSHR */
120
case 1: /* VSRA */
121
GEN_NEON_INTEGER_OP(shl);
122
break;
123
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
124
case 7: /* VQSHL */
125
GEN_NEON_INTEGER_OP_ENV(qshl);
126
break;
127
+ default:
128
+ g_assert_not_reached();
129
}
130
tcg_temp_free_i32(tmp2);
131
132
--
92
--
133
2.19.1
93
2.25.1
134
135
diff view generated by jsdifflib
1
Create and use a utility function to extract the EC field
1
From: Richard Henderson <richard.henderson@linaro.org>
2
from a syndrome, rather than open-coding the shift.
3
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-12-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181012144235.19646-9-peter.maydell@linaro.org
7
---
10
---
8
target/arm/internals.h | 5 +++++
11
target/arm/sme-fa64.decode | 9 ---------
9
target/arm/helper.c | 4 ++--
12
target/arm/translate-sve.c | 6 ++++++
10
target/arm/kvm64.c | 2 +-
13
2 files changed, 6 insertions(+), 9 deletions(-)
11
target/arm/op_helper.c | 2 +-
12
4 files changed, 9 insertions(+), 4 deletions(-)
13
14
14
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/internals.h
17
--- a/target/arm/sme-fa64.decode
17
+++ b/target/arm/internals.h
18
+++ b/target/arm/sme-fa64.decode
18
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
19
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
20
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
21
22
22
+static inline uint32_t syn_get_ec(uint32_t syn)
23
-FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
23
+{
24
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
24
+ return syn >> ARM_EL_EC_SHIFT;
25
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
25
+}
26
-FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
26
+
27
-FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
27
/* Utility functions for constructing various kinds of syndrome value.
28
-FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
28
* Note that in general we follow the AArch64 syndrome values; in a
29
-FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
29
* few cases the value in HSR for exceptions taken to AArch32 Hyp
30
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
30
diff --git a/target/arm/helper.c b/target/arm/helper.c
31
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
32
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
33
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
34
FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
35
-FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
36
-FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
37
-FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
38
-FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
39
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
31
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/helper.c
41
--- a/target/arm/translate-sve.c
33
+++ b/target/arm/helper.c
42
+++ b/target/arm/translate-sve.c
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
43
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
35
uint32_t moe;
44
if (!dc_isar_feature(aa64_sve, s)) {
36
45
return false;
37
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
38
- switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
39
+ switch (syn_get_ec(env->exception.syndrome)) {
40
case EC_BREAKPOINT:
41
case EC_BREAKPOINT_SAME_EL:
42
moe = 1;
43
@@ -XXX,XX +XXX,XX @@ void arm_cpu_do_interrupt(CPUState *cs)
44
if (qemu_loglevel_mask(CPU_LOG_INT)
45
&& !excp_is_internal(cs->exception_index)) {
46
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
47
- env->exception.syndrome >> ARM_EL_EC_SHIFT,
48
+ syn_get_ec(env->exception.syndrome),
49
env->exception.syndrome);
50
}
46
}
51
47
+ s->is_nonstreaming = true;
52
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
48
if (!sve_access_check(s)) {
53
index XXXXXXX..XXXXXXX 100644
49
return true;
54
--- a/target/arm/kvm64.c
50
}
55
+++ b/target/arm/kvm64.c
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
56
@@ -XXX,XX +XXX,XX @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
52
if (!dc_isar_feature(aa64_sve, s)) {
57
53
return false;
58
bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
54
}
59
{
55
+ s->is_nonstreaming = true;
60
- int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
56
if (!sve_access_check(s)) {
61
+ int hsr_ec = syn_get_ec(debug_exit->hsr);
57
return true;
62
ARMCPU *cpu = ARM_CPU(cs);
58
}
63
CPUClass *cc = CPU_GET_CLASS(cs);
59
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
64
CPUARMState *env = &cpu->env;
60
if (!dc_isar_feature(aa64_sve2, s)) {
65
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
61
return false;
66
index XXXXXXX..XXXXXXX 100644
62
}
67
--- a/target/arm/op_helper.c
63
+ s->is_nonstreaming = true;
68
+++ b/target/arm/op_helper.c
64
if (!sve_access_check(s)) {
69
@@ -XXX,XX +XXX,XX @@ void raise_exception(CPUARMState *env, uint32_t excp,
65
return true;
70
* (see DDI0478C.a D1.10.4)
66
}
71
*/
67
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
72
target_el = 2;
68
if (!dc_isar_feature(aa64_sve, s)) {
73
- if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
69
return false;
74
+ if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
70
}
75
syndrome = syn_uncategorized();
71
+ s->is_nonstreaming = true;
76
}
72
if (!sve_access_check(s)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
76
if (!dc_isar_feature(aa64_sve, s)) {
77
return false;
78
}
79
+ s->is_nonstreaming = true;
80
if (!sve_access_check(s)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
84
if (!dc_isar_feature(aa64_sve2, s)) {
85
return false;
86
}
87
+ s->is_nonstreaming = true;
88
if (!sve_access_check(s)) {
89
return true;
77
}
90
}
78
--
91
--
79
2.19.1
92
2.25.1
80
81
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Also introduces neon_element_offset to find the env offset
3
Mark these as a non-streaming instructions, which should trap if full
4
of a specific element within a neon register.
4
a64 support is not enabled in streaming mode. In this case, introduce
5
PRF_ns (prefetch non-streaming) to handle the checks.
5
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181011205206.3552-7-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-13-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/translate.c | 63 ++++++++++++++++++++++++------------------
12
target/arm/sme-fa64.decode | 3 ---
12
1 file changed, 36 insertions(+), 27 deletions(-)
13
target/arm/sve.decode | 10 +++++-----
14
target/arm/translate-sve.c | 11 +++++++++++
15
3 files changed, 16 insertions(+), 8 deletions(-)
13
16
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
19
--- a/target/arm/sme-fa64.decode
17
+++ b/target/arm/translate.c
20
+++ b/target/arm/sme-fa64.decode
18
@@ -XXX,XX +XXX,XX @@ neon_reg_offset (int reg, int n)
21
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
19
return vfp_reg_offset(0, sreg);
22
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
23
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
24
25
-FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
-FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
28
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
29
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
30
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
31
-FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
35
+++ b/target/arm/sve.decode
36
@@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
37
@rpri_load_msz nreg=0
38
39
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
40
-PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
41
+PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ----
42
43
# SVE 32-bit gather prefetch (vector plus immediate)
44
-PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
45
+PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ----
46
47
# SVE contiguous prefetch (scalar plus immediate)
48
PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
49
@@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
50
@rpri_g_load esz=3
51
52
# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
53
-PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
54
+PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ----
55
56
# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
57
-PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
58
+PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ----
59
60
# SVE 64-bit gather prefetch (vector plus immediate)
61
-PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
62
+PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ----
63
64
### SVE Memory Store Group
65
66
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-sve.c
69
+++ b/target/arm/translate-sve.c
70
@@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
71
return true;
20
}
72
}
21
73
22
+/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
74
+static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
23
+ * where 0 is the least significant end of the register.
24
+ */
25
+static inline long
26
+neon_element_offset(int reg, int element, TCGMemOp size)
27
+{
75
+{
28
+ int element_size = 1 << size;
76
+ if (!dc_isar_feature(aa64_sve, s)) {
29
+ int ofs = element * element_size;
77
+ return false;
30
+#ifdef HOST_WORDS_BIGENDIAN
31
+ /* Calculate the offset assuming fully little-endian,
32
+ * then XOR to account for the order of the 8-byte units.
33
+ */
34
+ if (element_size < 8) {
35
+ ofs ^= 8 - element_size;
36
+ }
78
+ }
37
+#endif
79
+ /* Prefetch is a nop within QEMU. */
38
+ return neon_reg_offset(reg, 0) + ofs;
80
+ s->is_nonstreaming = true;
81
+ (void)sve_access_check(s);
82
+ return true;
39
+}
83
+}
40
+
84
+
41
static TCGv_i32 neon_load_reg(int reg, int pass)
85
/*
42
{
86
* Move Prefix
43
TCGv_i32 tmp = tcg_temp_new_i32();
87
*
44
@@ -XXX,XX +XXX,XX @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
45
tmp = load_reg(s, rd);
46
if (insn & (1 << 23)) {
47
/* VDUP */
48
- if (size == 0) {
49
- gen_neon_dup_u8(tmp, 0);
50
- } else if (size == 1) {
51
- gen_neon_dup_low16(tmp);
52
- }
53
- for (n = 0; n <= pass * 2; n++) {
54
- tmp2 = tcg_temp_new_i32();
55
- tcg_gen_mov_i32(tmp2, tmp);
56
- neon_store_reg(rn, n, tmp2);
57
- }
58
- neon_store_reg(rn, n, tmp);
59
+ int vec_size = pass ? 16 : 8;
60
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
61
+ vec_size, vec_size, tmp);
62
+ tcg_temp_free_i32(tmp);
63
} else {
64
/* VMOV */
65
switch (size) {
66
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
67
tcg_temp_free_i32(tmp);
68
} else if ((insn & 0x380) == 0) {
69
/* VDUP */
70
+ int element;
71
+ TCGMemOp size;
72
+
73
if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
74
return 1;
75
}
76
- if (insn & (1 << 19)) {
77
- tmp = neon_load_reg(rm, 1);
78
- } else {
79
- tmp = neon_load_reg(rm, 0);
80
- }
81
if (insn & (1 << 16)) {
82
- gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
83
+ size = MO_8;
84
+ element = (insn >> 17) & 7;
85
} else if (insn & (1 << 17)) {
86
- if ((insn >> 18) & 1)
87
- gen_neon_dup_high16(tmp);
88
- else
89
- gen_neon_dup_low16(tmp);
90
+ size = MO_16;
91
+ element = (insn >> 18) & 3;
92
+ } else {
93
+ size = MO_32;
94
+ element = (insn >> 19) & 1;
95
}
96
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
97
- tmp2 = tcg_temp_new_i32();
98
- tcg_gen_mov_i32(tmp2, tmp);
99
- neon_store_reg(rd, pass, tmp2);
100
- }
101
- tcg_temp_free_i32(tmp);
102
+ tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
103
+ neon_element_offset(rm, element, size),
104
+ q ? 16 : 8, q ? 16 : 8);
105
} else {
106
return 1;
107
}
108
--
88
--
109
2.19.1
89
2.25.1
110
111
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-12-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-14-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 31 +++++++++++++++----------------
11
target/arm/sme-fa64.decode | 2 --
9
1 file changed, 15 insertions(+), 16 deletions(-)
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 2 deletions(-)
10
14
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
17
--- a/target/arm/sme-fa64.decode
14
+++ b/target/arm/translate.c
18
+++ b/target/arm/sme-fa64.decode
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
16
vec_size, vec_size);
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
17
}
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
18
return 0;
22
19
+
23
-FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
20
+ case NEON_3R_VMUL: /* VMUL */
24
-FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
21
+ if (u) {
25
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
22
+ /* Polynomial case allows only P8 and is handled below. */
26
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
23
+ if (size != 0) {
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
24
+ return 1;
28
index XXXXXXX..XXXXXXX 100644
25
+ }
29
--- a/target/arm/translate-sve.c
26
+ } else {
30
+++ b/target/arm/translate-sve.c
27
+ tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
31
@@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
28
+ vec_size, vec_size);
32
if (!dc_isar_feature(aa64_sve, s)) {
29
+ return 0;
33
return false;
30
+ }
34
}
31
+ break;
35
+ s->is_nonstreaming = true;
32
}
36
if (sve_access_check(s)) {
33
if (size == 3) {
37
TCGv_i64 addr = new_tmp_a64(s);
34
/* 64-bit element instructions. */
38
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
35
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
39
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
36
return 1;
40
if (!dc_isar_feature(aa64_sve, s)) {
37
}
41
return false;
38
break;
42
}
39
- case NEON_3R_VMUL:
43
+ s->is_nonstreaming = true;
40
- if (u && (size != 0)) {
44
if (sve_access_check(s)) {
41
- /* UNDEF on invalid size for polynomial subcase */
45
int vsz = vec_full_reg_size(s);
42
- return 1;
46
int elements = vsz >> dtype_esz[a->dtype];
43
- }
44
- break;
45
case NEON_3R_VFM_VQRDMLSH:
46
if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
47
return 1;
48
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
49
}
50
break;
51
case NEON_3R_VMUL:
52
- if (u) { /* polynomial */
53
- gen_helper_neon_mul_p8(tmp, tmp, tmp2);
54
- } else { /* Integer */
55
- switch (size) {
56
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
57
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
58
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
59
- default: abort();
60
- }
61
- }
62
+ /* VMUL.P8; other cases already eliminated. */
63
+ gen_helper_neon_mul_p8(tmp, tmp, tmp2);
64
break;
65
case NEON_3R_VPMAX:
66
GEN_NEON_INTEGER_OP(pmax);
67
--
47
--
68
2.19.1
48
2.25.1
69
70
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-11-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-15-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 16 ++++++++--------
11
target/arm/sme-fa64.decode | 3 ---
9
1 file changed, 8 insertions(+), 8 deletions(-)
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 3 deletions(-)
10
14
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
17
--- a/target/arm/sme-fa64.decode
14
+++ b/target/arm/translate.c
18
+++ b/target/arm/sme-fa64.decode
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
16
tcg_temp_free_ptr(ptr1);
20
# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
17
tcg_temp_free_ptr(ptr2);
21
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
18
break;
22
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
19
+
23
-
20
+ case NEON_2RM_VMVN:
24
-FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
21
+ tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
25
-FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
22
+ break;
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
23
+ case NEON_2RM_VNEG:
27
index XXXXXXX..XXXXXXX 100644
24
+ tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
28
--- a/target/arm/translate-sve.c
25
+ break;
29
+++ b/target/arm/translate-sve.c
26
+
30
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
27
default:
31
if (a->rm == 31) {
28
elementwise:
32
return false;
29
for (pass = 0; pass < (q ? 4 : 2); pass++) {
33
}
30
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
34
+ s->is_nonstreaming = true;
31
case NEON_2RM_VCNT:
35
if (sve_access_check(s)) {
32
gen_helper_neon_cnt_u8(tmp, tmp);
36
TCGv_i64 addr = new_tmp_a64(s);
33
break;
37
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
34
- case NEON_2RM_VMVN:
38
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
35
- tcg_gen_not_i32(tmp, tmp);
39
if (!dc_isar_feature(aa64_sve_f64mm, s)) {
36
- break;
40
return false;
37
case NEON_2RM_VQABS:
41
}
38
switch (size) {
42
+ s->is_nonstreaming = true;
39
case 0:
43
if (sve_access_check(s)) {
40
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
44
TCGv_i64 addr = new_tmp_a64(s);
41
default: abort();
45
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
42
}
43
break;
44
- case NEON_2RM_VNEG:
45
- tmp2 = tcg_const_i32(0);
46
- gen_neon_rsb(size, tmp, tmp2);
47
- tcg_temp_free_i32(tmp2);
48
- break;
49
case NEON_2RM_VCGT0_F:
50
{
51
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
52
--
46
--
53
2.19.1
47
2.25.1
54
55
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move mla_op and mls_op expanders from translate-a64.c.
3
These functions will be used to verify that the cpu
4
is in the correct state for a given instruction.
4
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-16-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-16-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/translate.h | 2 +
11
target/arm/translate-a64.h | 21 +++++++++++++++++++++
11
target/arm/translate-a64.c | 106 -----------------------------
12
target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++
12
target/arm/translate.c | 134 ++++++++++++++++++++++++++++++++-----
13
2 files changed, 55 insertions(+)
13
3 files changed, 120 insertions(+), 122 deletions(-)
14
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
17
--- a/target/arm/translate-a64.h
18
+++ b/target/arm/translate.h
18
+++ b/target/arm/translate-a64.h
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
19
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
20
extern const GVecGen3 bsl_op;
20
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
21
extern const GVecGen3 bit_op;
21
unsigned int imms, unsigned int immr);
22
extern const GVecGen3 bif_op;
22
bool sve_access_check(DisasContext *s);
23
+extern const GVecGen3 mla_op[4];
23
+bool sme_enabled_check(DisasContext *s);
24
+extern const GVecGen3 mls_op[4];
24
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
25
extern const GVecGen2i ssra_op[4];
25
+
26
extern const GVecGen2i usra_op[4];
26
+/* This function corresponds to CheckStreamingSVEEnabled. */
27
extern const GVecGen2i sri_op[4];
27
+static inline bool sme_sm_enabled_check(DisasContext *s)
28
+{
29
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
30
+}
31
+
32
+/* This function corresponds to CheckSMEAndZAEnabled. */
33
+static inline bool sme_za_enabled_check(DisasContext *s)
34
+{
35
+ return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
36
+}
37
+
38
+/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
39
+static inline bool sme_smza_enabled_check(DisasContext *s)
40
+{
41
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
42
+}
43
+
44
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
45
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
46
bool tag_checked, int log2_size);
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
29
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-a64.c
49
--- a/target/arm/translate-a64.c
31
+++ b/target/arm/translate-a64.c
50
+++ b/target/arm/translate-a64.c
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
51
@@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s)
33
}
52
return true;
34
}
53
}
35
54
36
-static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
55
+/* This function corresponds to CheckSMEEnabled. */
37
-{
56
+bool sme_enabled_check(DisasContext *s)
38
- gen_helper_neon_mul_u8(a, a, b);
39
- gen_helper_neon_add_u8(d, d, a);
40
-}
41
-
42
-static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
43
-{
44
- gen_helper_neon_mul_u16(a, a, b);
45
- gen_helper_neon_add_u16(d, d, a);
46
-}
47
-
48
-static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
49
-{
50
- tcg_gen_mul_i32(a, a, b);
51
- tcg_gen_add_i32(d, d, a);
52
-}
53
-
54
-static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
55
-{
56
- tcg_gen_mul_i64(a, a, b);
57
- tcg_gen_add_i64(d, d, a);
58
-}
59
-
60
-static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
61
-{
62
- tcg_gen_mul_vec(vece, a, a, b);
63
- tcg_gen_add_vec(vece, d, d, a);
64
-}
65
-
66
-static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
67
-{
68
- gen_helper_neon_mul_u8(a, a, b);
69
- gen_helper_neon_sub_u8(d, d, a);
70
-}
71
-
72
-static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
73
-{
74
- gen_helper_neon_mul_u16(a, a, b);
75
- gen_helper_neon_sub_u16(d, d, a);
76
-}
77
-
78
-static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
79
-{
80
- tcg_gen_mul_i32(a, a, b);
81
- tcg_gen_sub_i32(d, d, a);
82
-}
83
-
84
-static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
85
-{
86
- tcg_gen_mul_i64(a, a, b);
87
- tcg_gen_sub_i64(d, d, a);
88
-}
89
-
90
-static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
91
-{
92
- tcg_gen_mul_vec(vece, a, a, b);
93
- tcg_gen_sub_vec(vece, d, d, a);
94
-}
95
-
96
/* Integer op subgroup of C3.6.16. */
97
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
98
{
99
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
100
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
101
.vece = MO_64 },
102
};
103
- static const GVecGen3 mla_op[4] = {
104
- { .fni4 = gen_mla8_i32,
105
- .fniv = gen_mla_vec,
106
- .opc = INDEX_op_mul_vec,
107
- .load_dest = true,
108
- .vece = MO_8 },
109
- { .fni4 = gen_mla16_i32,
110
- .fniv = gen_mla_vec,
111
- .opc = INDEX_op_mul_vec,
112
- .load_dest = true,
113
- .vece = MO_16 },
114
- { .fni4 = gen_mla32_i32,
115
- .fniv = gen_mla_vec,
116
- .opc = INDEX_op_mul_vec,
117
- .load_dest = true,
118
- .vece = MO_32 },
119
- { .fni8 = gen_mla64_i64,
120
- .fniv = gen_mla_vec,
121
- .opc = INDEX_op_mul_vec,
122
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
123
- .load_dest = true,
124
- .vece = MO_64 },
125
- };
126
- static const GVecGen3 mls_op[4] = {
127
- { .fni4 = gen_mls8_i32,
128
- .fniv = gen_mls_vec,
129
- .opc = INDEX_op_mul_vec,
130
- .load_dest = true,
131
- .vece = MO_8 },
132
- { .fni4 = gen_mls16_i32,
133
- .fniv = gen_mls_vec,
134
- .opc = INDEX_op_mul_vec,
135
- .load_dest = true,
136
- .vece = MO_16 },
137
- { .fni4 = gen_mls32_i32,
138
- .fniv = gen_mls_vec,
139
- .opc = INDEX_op_mul_vec,
140
- .load_dest = true,
141
- .vece = MO_32 },
142
- { .fni8 = gen_mls64_i64,
143
- .fniv = gen_mls_vec,
144
- .opc = INDEX_op_mul_vec,
145
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
146
- .load_dest = true,
147
- .vece = MO_64 },
148
- };
149
150
int is_q = extract32(insn, 30, 1);
151
int u = extract32(insn, 29, 1);
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate.c
155
+++ b/target/arm/translate.c
156
@@ -XXX,XX +XXX,XX @@ static void gen_neon_narrow_op(int op, int u, int size,
157
#define NEON_3R_VABA 15
158
#define NEON_3R_VADD_VSUB 16
159
#define NEON_3R_VTST_VCEQ 17
160
-#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
161
+#define NEON_3R_VML 18 /* VMLA, VMLS */
162
#define NEON_3R_VMUL 19
163
#define NEON_3R_VPMAX 20
164
#define NEON_3R_VPMIN 21
165
@@ -XXX,XX +XXX,XX @@ const GVecGen2i sli_op[4] = {
166
.vece = MO_64 },
167
};
168
169
+static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
170
+{
57
+{
171
+ gen_helper_neon_mul_u8(a, a, b);
58
+ /*
172
+ gen_helper_neon_add_u8(d, d, a);
59
+ * Note that unlike sve_excp_el, we have not constrained sme_excp_el
60
+ * to be zero when fp_excp_el has priority. This is because we need
61
+ * sme_excp_el by itself for cpregs access checks.
62
+ */
63
+ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
64
+ s->fp_access_checked = true;
65
+ return sme_access_check(s);
66
+ }
67
+ return fp_access_check_only(s);
173
+}
68
+}
174
+
69
+
175
+static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
70
+/* Common subroutine for CheckSMEAnd*Enabled. */
71
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
176
+{
72
+{
177
+ gen_helper_neon_mul_u8(a, a, b);
73
+ if (!sme_enabled_check(s)) {
178
+ gen_helper_neon_sub_u8(d, d, a);
74
+ return false;
75
+ }
76
+ if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
77
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
78
+ syn_smetrap(SME_ET_NotStreaming, false));
79
+ return false;
80
+ }
81
+ if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
82
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
83
+ syn_smetrap(SME_ET_InactiveZA, false));
84
+ return false;
85
+ }
86
+ return true;
179
+}
87
+}
180
+
88
+
181
+static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
89
/*
182
+{
90
* This utility function is for doing register extension with an
183
+ gen_helper_neon_mul_u16(a, a, b);
91
* optional shift. You will likely want to pass a temporary for the
184
+ gen_helper_neon_add_u16(d, d, a);
185
+}
186
+
187
+static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
188
+{
189
+ gen_helper_neon_mul_u16(a, a, b);
190
+ gen_helper_neon_sub_u16(d, d, a);
191
+}
192
+
193
+static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
194
+{
195
+ tcg_gen_mul_i32(a, a, b);
196
+ tcg_gen_add_i32(d, d, a);
197
+}
198
+
199
+static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
200
+{
201
+ tcg_gen_mul_i32(a, a, b);
202
+ tcg_gen_sub_i32(d, d, a);
203
+}
204
+
205
+static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
206
+{
207
+ tcg_gen_mul_i64(a, a, b);
208
+ tcg_gen_add_i64(d, d, a);
209
+}
210
+
211
+static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
212
+{
213
+ tcg_gen_mul_i64(a, a, b);
214
+ tcg_gen_sub_i64(d, d, a);
215
+}
216
+
217
+static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
218
+{
219
+ tcg_gen_mul_vec(vece, a, a, b);
220
+ tcg_gen_add_vec(vece, d, d, a);
221
+}
222
+
223
+static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
224
+{
225
+ tcg_gen_mul_vec(vece, a, a, b);
226
+ tcg_gen_sub_vec(vece, d, d, a);
227
+}
228
+
229
+/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
230
+ * these tables are shared with AArch64 which does support them.
231
+ */
232
+const GVecGen3 mla_op[4] = {
233
+ { .fni4 = gen_mla8_i32,
234
+ .fniv = gen_mla_vec,
235
+ .opc = INDEX_op_mul_vec,
236
+ .load_dest = true,
237
+ .vece = MO_8 },
238
+ { .fni4 = gen_mla16_i32,
239
+ .fniv = gen_mla_vec,
240
+ .opc = INDEX_op_mul_vec,
241
+ .load_dest = true,
242
+ .vece = MO_16 },
243
+ { .fni4 = gen_mla32_i32,
244
+ .fniv = gen_mla_vec,
245
+ .opc = INDEX_op_mul_vec,
246
+ .load_dest = true,
247
+ .vece = MO_32 },
248
+ { .fni8 = gen_mla64_i64,
249
+ .fniv = gen_mla_vec,
250
+ .opc = INDEX_op_mul_vec,
251
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
252
+ .load_dest = true,
253
+ .vece = MO_64 },
254
+};
255
+
256
+const GVecGen3 mls_op[4] = {
257
+ { .fni4 = gen_mls8_i32,
258
+ .fniv = gen_mls_vec,
259
+ .opc = INDEX_op_mul_vec,
260
+ .load_dest = true,
261
+ .vece = MO_8 },
262
+ { .fni4 = gen_mls16_i32,
263
+ .fniv = gen_mls_vec,
264
+ .opc = INDEX_op_mul_vec,
265
+ .load_dest = true,
266
+ .vece = MO_16 },
267
+ { .fni4 = gen_mls32_i32,
268
+ .fniv = gen_mls_vec,
269
+ .opc = INDEX_op_mul_vec,
270
+ .load_dest = true,
271
+ .vece = MO_32 },
272
+ { .fni8 = gen_mls64_i64,
273
+ .fniv = gen_mls_vec,
274
+ .opc = INDEX_op_mul_vec,
275
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
276
+ .load_dest = true,
277
+ .vece = MO_64 },
278
+};
279
+
280
/* Translate a NEON data processing instruction. Return nonzero if the
281
instruction is invalid.
282
We process data in a mixture of 32-bit and 64-bit chunks.
283
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
284
return 0;
285
}
286
break;
287
+
288
+ case NEON_3R_VML: /* VMLA, VMLS */
289
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
290
+ u ? &mls_op[size] : &mla_op[size]);
291
+ return 0;
292
}
293
+
294
if (size == 3) {
295
/* 64-bit element instructions. */
296
for (pass = 0; pass < (q ? 2 : 1); pass++) {
297
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
298
}
299
}
300
break;
301
- case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
302
- switch (size) {
303
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
304
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
305
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
306
- default: abort();
307
- }
308
- tcg_temp_free_i32(tmp2);
309
- tmp2 = neon_load_reg(rd, pass);
310
- if (u) { /* VMLS */
311
- gen_neon_rsb(size, tmp, tmp2);
312
- } else { /* VMLA */
313
- gen_neon_add(size, tmp, tmp2);
314
- }
315
- break;
316
case NEON_3R_VMUL:
317
/* VMUL.P8; other cases already eliminated. */
318
gen_helper_neon_mul_p8(tmp, tmp, tmp2);
319
--
92
--
320
2.19.1
93
2.25.1
321
322
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is done generically in translator_loop.
3
The pseudocode for CheckSVEEnabled gains a check for Streaming
4
SVE mode, and for SME present but SVE absent.
4
5
5
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20220708151540.18136-17-richard.henderson@linaro.org
8
Message-id: 20181011205206.3552-3-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 1 -
11
target/arm/translate-a64.c | 22 ++++++++++++++++------
13
target/arm/translate.c | 1 -
12
1 file changed, 16 insertions(+), 6 deletions(-)
14
2 files changed, 2 deletions(-)
15
13
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
16
--- a/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
18
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
21
19
return true;
22
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
20
}
21
22
-/* Check that SVE access is enabled. If it is, return true.
23
+/*
24
+ * Check that SVE access is enabled. If it is, return true.
25
* If not, emit code to generate an appropriate exception and return false.
26
+ * This function corresponds to CheckSVEEnabled().
27
*/
28
bool sve_access_check(DisasContext *s)
23
{
29
{
24
- tcg_clear_temp_count();
30
- if (s->sve_excp_el) {
31
- assert(!s->sve_access_checked);
32
- s->sve_access_checked = true;
33
-
34
+ if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
35
+ assert(dc_isar_feature(aa64_sme, s));
36
+ if (!sme_sm_enabled_check(s)) {
37
+ goto fail_exit;
38
+ }
39
+ } else if (s->sve_excp_el) {
40
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
41
syn_sve_access_trap(), s->sve_excp_el);
42
- return false;
43
+ goto fail_exit;
44
}
45
s->sve_access_checked = true;
46
return fp_access_check(s);
47
+
48
+ fail_exit:
49
+ /* Assert that we only raise one exception per instruction. */
50
+ assert(!s->sve_access_checked);
51
+ s->sve_access_checked = true;
52
+ return false;
25
}
53
}
26
54
27
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
55
/*
28
diff --git a/target/arm/translate.c b/target/arm/translate.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate.c
31
+++ b/target/arm/translate.c
32
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
33
tcg_gen_movi_i32(tmp, 0);
34
store_cpu_field(tmp, condexec_bits);
35
}
36
- tcg_clear_temp_count();
37
}
38
39
static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
40
--
56
--
41
2.19.1
57
2.25.1
42
43
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move expanders for VBSL, VBIT, and VBIF from translate-a64.c.
3
These SME instructions are nominally within the SVE decode space,
4
so we add them to sve.decode and translate-sve.c.
4
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-9-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-18-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/translate.h | 6 ++
11
target/arm/translate-a64.h | 12 ++++++++++++
11
target/arm/translate-a64.c | 61 --------------
12
target/arm/sve.decode | 5 ++++-
12
target/arm/translate.c | 162 +++++++++++++++++++++++++++----------
13
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++
13
3 files changed, 124 insertions(+), 105 deletions(-)
14
3 files changed, 54 insertions(+), 1 deletion(-)
14
15
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
--- a/target/arm/translate-a64.h
18
+++ b/target/arm/translate.h
19
+++ b/target/arm/translate-a64.h
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
20
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
20
return ret;
21
return s->vl;
21
}
22
}
22
23
23
+
24
+/* Return the byte size of the vector register, SVL / 8. */
24
+/* Vector operations shared between ARM and AArch64. */
25
+static inline int streaming_vec_reg_size(DisasContext *s)
25
+extern const GVecGen3 bsl_op;
26
+{
26
+extern const GVecGen3 bit_op;
27
+ return s->svl;
27
+extern const GVecGen3 bif_op;
28
+}
28
+
29
+
29
/*
30
/*
30
* Forward to the isar_feature_* tests given a DisasContext pointer.
31
* Return the offset info CPUARMState of the predicate vector register Pn.
31
*/
32
* Note for this purpose, FFR is P16.
32
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s)
33
index XXXXXXX..XXXXXXX 100644
34
return s->vl >> 3;
34
--- a/target/arm/translate-a64.c
35
+++ b/target/arm/translate-a64.c
36
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
37
}
38
}
35
}
39
36
40
-static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
37
+/* Return the byte size of the predicate register, SVL / 64. */
41
-{
38
+static inline int streaming_pred_reg_size(DisasContext *s)
42
- tcg_gen_xor_i64(rn, rn, rm);
43
- tcg_gen_and_i64(rn, rn, rd);
44
- tcg_gen_xor_i64(rd, rm, rn);
45
-}
46
-
47
-static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
48
-{
49
- tcg_gen_xor_i64(rn, rn, rd);
50
- tcg_gen_and_i64(rn, rn, rm);
51
- tcg_gen_xor_i64(rd, rd, rn);
52
-}
53
-
54
-static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
55
-{
56
- tcg_gen_xor_i64(rn, rn, rd);
57
- tcg_gen_andc_i64(rn, rn, rm);
58
- tcg_gen_xor_i64(rd, rd, rn);
59
-}
60
-
61
-static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
62
-{
63
- tcg_gen_xor_vec(vece, rn, rn, rm);
64
- tcg_gen_and_vec(vece, rn, rn, rd);
65
- tcg_gen_xor_vec(vece, rd, rm, rn);
66
-}
67
-
68
-static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
69
-{
70
- tcg_gen_xor_vec(vece, rn, rn, rd);
71
- tcg_gen_and_vec(vece, rn, rn, rm);
72
- tcg_gen_xor_vec(vece, rd, rd, rn);
73
-}
74
-
75
-static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
76
-{
77
- tcg_gen_xor_vec(vece, rn, rn, rd);
78
- tcg_gen_andc_vec(vece, rn, rn, rm);
79
- tcg_gen_xor_vec(vece, rd, rd, rn);
80
-}
81
-
82
/* Logic op (opcode == 3) subgroup of C3.6.16. */
83
static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
84
{
85
- static const GVecGen3 bsl_op = {
86
- .fni8 = gen_bsl_i64,
87
- .fniv = gen_bsl_vec,
88
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
89
- .load_dest = true
90
- };
91
- static const GVecGen3 bit_op = {
92
- .fni8 = gen_bit_i64,
93
- .fniv = gen_bit_vec,
94
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
95
- .load_dest = true
96
- };
97
- static const GVecGen3 bif_op = {
98
- .fni8 = gen_bif_i64,
99
- .fniv = gen_bif_vec,
100
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
101
- .load_dest = true
102
- };
103
-
104
int rd = extract32(insn, 0, 5);
105
int rn = extract32(insn, 5, 5);
106
int rm = extract32(insn, 16, 5);
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
112
return 0;
113
}
114
115
-/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
116
-static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
117
-{
118
- tcg_gen_and_i32(t, t, c);
119
- tcg_gen_andc_i32(f, f, c);
120
- tcg_gen_or_i32(dest, t, f);
121
-}
122
-
123
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
124
{
125
switch (size) {
126
@@ -XXX,XX +XXX,XX @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
127
return 1;
128
}
129
130
+/*
131
+ * Expanders for VBitOps_VBIF, VBIT, VBSL.
132
+ */
133
+static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
134
+{
39
+{
135
+ tcg_gen_xor_i64(rn, rn, rm);
40
+ return s->svl >> 3;
136
+ tcg_gen_and_i64(rn, rn, rd);
137
+ tcg_gen_xor_i64(rd, rm, rn);
138
+}
41
+}
139
+
42
+
140
+static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
43
/*
44
* Round up the size of a register to a size allowed by
45
* the tcg vector infrastructure. Any operation which uses this
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
51
# SVE index generation (register start, register increment)
52
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
53
54
-### SVE Stack Allocation Group
55
+### SVE / Streaming SVE Stack Allocation Group
56
57
# SVE stack frame adjustment
58
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
59
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
60
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
61
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
62
63
# SVE stack frame size
64
RDVL 00000100 101 11111 01010 imm:s6 rd:5
65
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
66
67
### SVE Bitwise Shift - Unpredicated Group
68
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
74
return true;
75
}
76
77
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
141
+{
78
+{
142
+ tcg_gen_xor_i64(rn, rn, rd);
79
+ if (!dc_isar_feature(aa64_sme, s)) {
143
+ tcg_gen_and_i64(rn, rn, rm);
80
+ return false;
144
+ tcg_gen_xor_i64(rd, rd, rn);
81
+ }
82
+ if (sme_enabled_check(s)) {
83
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
84
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
85
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
86
+ }
87
+ return true;
145
+}
88
+}
146
+
89
+
147
+static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
90
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
91
{
92
if (!dc_isar_feature(aa64_sve, s)) {
93
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
94
return true;
95
}
96
97
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
148
+{
98
+{
149
+ tcg_gen_xor_i64(rn, rn, rd);
99
+ if (!dc_isar_feature(aa64_sme, s)) {
150
+ tcg_gen_andc_i64(rn, rn, rm);
100
+ return false;
151
+ tcg_gen_xor_i64(rd, rd, rn);
101
+ }
102
+ if (sme_enabled_check(s)) {
103
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
104
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
105
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
106
+ }
107
+ return true;
152
+}
108
+}
153
+
109
+
154
+static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
110
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
111
{
112
if (!dc_isar_feature(aa64_sve, s)) {
113
@@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
114
return true;
115
}
116
117
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
155
+{
118
+{
156
+ tcg_gen_xor_vec(vece, rn, rn, rm);
119
+ if (!dc_isar_feature(aa64_sme, s)) {
157
+ tcg_gen_and_vec(vece, rn, rn, rd);
120
+ return false;
158
+ tcg_gen_xor_vec(vece, rd, rm, rn);
121
+ }
122
+ if (sme_enabled_check(s)) {
123
+ TCGv_i64 reg = cpu_reg(s, a->rd);
124
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
125
+ }
126
+ return true;
159
+}
127
+}
160
+
128
+
161
+static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
129
/*
162
+{
130
*** SVE Compute Vector Address Group
163
+ tcg_gen_xor_vec(vece, rn, rn, rd);
131
*/
164
+ tcg_gen_and_vec(vece, rn, rn, rm);
165
+ tcg_gen_xor_vec(vece, rd, rd, rn);
166
+}
167
+
168
+static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
169
+{
170
+ tcg_gen_xor_vec(vece, rn, rn, rd);
171
+ tcg_gen_andc_vec(vece, rn, rn, rm);
172
+ tcg_gen_xor_vec(vece, rd, rd, rn);
173
+}
174
+
175
+const GVecGen3 bsl_op = {
176
+ .fni8 = gen_bsl_i64,
177
+ .fniv = gen_bsl_vec,
178
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
179
+ .load_dest = true
180
+};
181
+
182
+const GVecGen3 bit_op = {
183
+ .fni8 = gen_bit_i64,
184
+ .fniv = gen_bit_vec,
185
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
186
+ .load_dest = true
187
+};
188
+
189
+const GVecGen3 bif_op = {
190
+ .fni8 = gen_bif_i64,
191
+ .fniv = gen_bif_vec,
192
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
193
+ .load_dest = true
194
+};
195
+
196
+
197
/* Translate a NEON data processing instruction. Return nonzero if the
198
instruction is invalid.
199
We process data in a mixture of 32-bit and 64-bit chunks.
200
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
201
{
202
int op;
203
int q;
204
- int rd, rn, rm;
205
+ int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
206
int size;
207
int shift;
208
int pass;
209
int count;
210
int pairwise;
211
int u;
212
+ int vec_size;
213
uint32_t imm, mask;
214
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
215
TCGv_ptr ptr1, ptr2, ptr3;
216
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
217
VFP_DREG_N(rn, insn);
218
VFP_DREG_M(rm, insn);
219
size = (insn >> 20) & 3;
220
+ vec_size = q ? 16 : 8;
221
+ rd_ofs = neon_reg_offset(rd, 0);
222
+ rn_ofs = neon_reg_offset(rn, 0);
223
+ rm_ofs = neon_reg_offset(rm, 0);
224
+
225
if ((insn & (1 << 23)) == 0) {
226
/* Three register same length. */
227
op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
228
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
229
q, rd, rn, rm);
230
}
231
return 1;
232
+
233
+ case NEON_3R_LOGIC: /* Logic ops. */
234
+ switch ((u << 2) | size) {
235
+ case 0: /* VAND */
236
+ tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
237
+ vec_size, vec_size);
238
+ break;
239
+ case 1: /* VBIC */
240
+ tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
241
+ vec_size, vec_size);
242
+ break;
243
+ case 2:
244
+ if (rn == rm) {
245
+ /* VMOV */
246
+ tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
247
+ } else {
248
+ /* VORR */
249
+ tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
250
+ vec_size, vec_size);
251
+ }
252
+ break;
253
+ case 3: /* VORN */
254
+ tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
255
+ vec_size, vec_size);
256
+ break;
257
+ case 4: /* VEOR */
258
+ tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
259
+ vec_size, vec_size);
260
+ break;
261
+ case 5: /* VBSL */
262
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
263
+ vec_size, vec_size, &bsl_op);
264
+ break;
265
+ case 6: /* VBIT */
266
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
267
+ vec_size, vec_size, &bit_op);
268
+ break;
269
+ case 7: /* VBIF */
270
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
271
+ vec_size, vec_size, &bif_op);
272
+ break;
273
+ }
274
+ return 0;
275
}
276
- if (size == 3 && op != NEON_3R_LOGIC) {
277
+ if (size == 3) {
278
/* 64-bit element instructions. */
279
for (pass = 0; pass < (q ? 2 : 1); pass++) {
280
neon_load_reg64(cpu_V0, rn + pass);
281
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
282
case NEON_3R_VRHADD:
283
GEN_NEON_INTEGER_OP(rhadd);
284
break;
285
- case NEON_3R_LOGIC: /* Logic ops. */
286
- switch ((u << 2) | size) {
287
- case 0: /* VAND */
288
- tcg_gen_and_i32(tmp, tmp, tmp2);
289
- break;
290
- case 1: /* BIC */
291
- tcg_gen_andc_i32(tmp, tmp, tmp2);
292
- break;
293
- case 2: /* VORR */
294
- tcg_gen_or_i32(tmp, tmp, tmp2);
295
- break;
296
- case 3: /* VORN */
297
- tcg_gen_orc_i32(tmp, tmp, tmp2);
298
- break;
299
- case 4: /* VEOR */
300
- tcg_gen_xor_i32(tmp, tmp, tmp2);
301
- break;
302
- case 5: /* VBSL */
303
- tmp3 = neon_load_reg(rd, pass);
304
- gen_neon_bsl(tmp, tmp, tmp2, tmp3);
305
- tcg_temp_free_i32(tmp3);
306
- break;
307
- case 6: /* VBIT */
308
- tmp3 = neon_load_reg(rd, pass);
309
- gen_neon_bsl(tmp, tmp, tmp3, tmp2);
310
- tcg_temp_free_i32(tmp3);
311
- break;
312
- case 7: /* VBIF */
313
- tmp3 = neon_load_reg(rd, pass);
314
- gen_neon_bsl(tmp, tmp3, tmp, tmp2);
315
- tcg_temp_free_i32(tmp3);
316
- break;
317
- }
318
- break;
319
case NEON_3R_VHSUB:
320
GEN_NEON_INTEGER_OP(hsub);
321
break;
322
--
132
--
323
2.19.1
133
2.25.1
324
325
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For a sequence of loads or stores from a single register,
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
little-endian operations can be promoted to an 8-byte op.
5
This can reduce the number of operations by a factor of 8.
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181011205206.3552-5-richard.henderson@linaro.org
5
Message-id: 20220708151540.18136-19-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
7
---
12
target/arm/translate-a64.c | 66 +++++++++++++++++++++++---------------
8
target/arm/helper-sme.h | 2 ++
13
1 file changed, 40 insertions(+), 26 deletions(-)
9
target/arm/sme.decode | 4 ++++
10
target/arm/sme_helper.c | 25 +++++++++++++++++++++++++
11
target/arm/translate-sme.c | 13 +++++++++++++
12
4 files changed, 44 insertions(+)
14
13
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
16
--- a/target/arm/helper-sme.h
18
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
18
@@ -XXX,XX +XXX,XX @@
20
19
21
/* Store from vector register to memory */
20
DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
22
static void do_vec_st(DisasContext *s, int srcidx, int element,
21
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
23
- TCGv_i64 tcg_addr, int size)
22
+
24
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
23
+DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
25
{
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
26
- TCGMemOp memop = s->be_data + size;
25
index XXXXXXX..XXXXXXX 100644
27
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
26
--- a/target/arm/sme.decode
28
27
+++ b/target/arm/sme.decode
29
read_vec_element(s, tcg_tmp, srcidx, element, size);
28
@@ -XXX,XX +XXX,XX @@
30
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
29
#
31
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
30
# This file is processed by scripts/decodetree.py
32
31
#
33
tcg_temp_free_i64(tcg_tmp);
32
+
33
+### SME Misc
34
+
35
+ZERO 11000000 00 001 00000000000 imm:8
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
41
memset(env->zarray, 0, sizeof(env->zarray));
42
}
34
}
43
}
35
44
+
36
/* Load from memory to vector register */
45
+void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
37
static void do_vec_ld(DisasContext *s, int destidx, int element,
46
+{
38
- TCGv_i64 tcg_addr, int size)
47
+ uint32_t i;
39
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
48
+
40
{
49
+ /*
41
- TCGMemOp memop = s->be_data + size;
50
+ * Special case clearing the entire ZA space.
42
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
51
+ * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
43
52
+ * parts of the ZA storage outside of SVL.
44
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
53
+ */
45
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
54
+ if (imm == 0xff) {
46
write_vec_element(s, tcg_tmp, destidx, element, size);
55
+ memset(env->zarray, 0, sizeof(env->zarray));
47
56
+ return;
48
tcg_temp_free_i64(tcg_tmp);
49
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
50
bool is_postidx = extract32(insn, 23, 1);
51
bool is_q = extract32(insn, 30, 1);
52
TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
53
+ TCGMemOp endian = s->be_data;
54
55
- int ebytes = 1 << size;
56
- int elements = (is_q ? 128 : 64) / (8 << size);
57
+ int ebytes; /* bytes per element */
58
+ int elements; /* elements per vector */
59
int rpt; /* num iterations */
60
int selem; /* structure elements */
61
int r;
62
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
63
gen_check_sp_alignment(s);
64
}
65
66
+ /* For our purposes, bytes are always little-endian. */
67
+ if (size == 0) {
68
+ endian = MO_LE;
69
+ }
57
+ }
70
+
58
+
71
+ /* Consecutive little-endian elements from a single register
59
+ /*
72
+ * can be promoted to a larger little-endian operation.
60
+ * Recall that ZAnH.D[m] is spread across ZA[n+8*m],
61
+ * so each row is discontiguous within ZA[].
73
+ */
62
+ */
74
+ if (selem == 1 && endian == MO_LE) {
63
+ for (i = 0; i < svl; i++) {
75
+ size = 3;
64
+ if (imm & (1 << (i % 8))) {
76
+ }
65
+ memset(&env->zarray[i], 0, svl);
77
+ ebytes = 1 << size;
78
+ elements = (is_q ? 16 : 8) / ebytes;
79
+
80
tcg_rn = cpu_reg_sp(s, rn);
81
tcg_addr = tcg_temp_new_i64();
82
tcg_gen_mov_i64(tcg_addr, tcg_rn);
83
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
84
for (r = 0; r < rpt; r++) {
85
int e;
86
for (e = 0; e < elements; e++) {
87
- int tt = (rt + r) % 32;
88
int xs;
89
for (xs = 0; xs < selem; xs++) {
90
+ int tt = (rt + r + xs) % 32;
91
if (is_store) {
92
- do_vec_st(s, tt, e, tcg_addr, size);
93
+ do_vec_st(s, tt, e, tcg_addr, size, endian);
94
} else {
95
- do_vec_ld(s, tt, e, tcg_addr, size);
96
-
97
- /* For non-quad operations, setting a slice of the low
98
- * 64 bits of the register clears the high 64 bits (in
99
- * the ARM ARM pseudocode this is implicit in the fact
100
- * that 'rval' is a 64 bit wide variable).
101
- * For quad operations, we might still need to zero the
102
- * high bits of SVE. We optimize by noticing that we only
103
- * need to do this the first time we touch a register.
104
- */
105
- if (e == 0 && (r == 0 || xs == selem - 1)) {
106
- clear_vec_high(s, is_q, tt);
107
- }
108
+ do_vec_ld(s, tt, e, tcg_addr, size, endian);
109
}
110
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
111
- tt = (tt + 1) % 32;
112
}
113
}
114
}
115
116
+ if (!is_store) {
117
+ /* For non-quad operations, setting a slice of the low
118
+ * 64 bits of the register clears the high 64 bits (in
119
+ * the ARM ARM pseudocode this is implicit in the fact
120
+ * that 'rval' is a 64 bit wide variable).
121
+ * For quad operations, we might still need to zero the
122
+ * high bits of SVE.
123
+ */
124
+ for (r = 0; r < rpt * selem; r++) {
125
+ int tt = (rt + r) % 32;
126
+ clear_vec_high(s, is_q, tt);
127
+ }
66
+ }
128
+ }
67
+ }
68
+}
69
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sme.c
72
+++ b/target/arm/translate-sme.c
73
@@ -XXX,XX +XXX,XX @@
74
*/
75
76
#include "decode-sme.c.inc"
129
+
77
+
130
if (is_postidx) {
78
+
131
int rm = extract32(insn, 16, 5);
79
+static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
132
if (rm == 31) {
80
+{
133
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
81
+ if (!dc_isar_feature(aa64_sme, s)) {
134
} else {
82
+ return false;
135
/* Load/store one element per register */
83
+ }
136
if (is_load) {
84
+ if (sme_za_enabled_check(s)) {
137
- do_vec_ld(s, rt, index, tcg_addr, scale);
85
+ gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
138
+ do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
86
+ tcg_constant_i32(streaming_vec_reg_size(s)));
139
} else {
87
+ }
140
- do_vec_st(s, rt, index, tcg_addr, scale);
88
+ return true;
141
+ do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
89
+}
142
}
143
}
144
tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
145
--
90
--
146
2.19.1
91
2.25.1
147
148
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Having V6 alone imply jazelle was wrong for cortex-m0.
3
We can reuse the SVE functions for implementing moves to/from
4
Change to an assertion for V6 & !M.
4
horizontal tile slices, but we need new ones for moves to/from
5
vertical tile slices.
5
6
6
This was harmless, because the only place we tested ARM_FEATURE_JAZELLE
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
was for 'bxj' in disas_arm(), which is unreachable for M-profile cores.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20181016223115.24100-6-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-20-richard.henderson@linaro.org
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
11
---
15
target/arm/cpu.h | 6 +++++-
12
target/arm/helper-sme.h | 12 +++
16
target/arm/cpu.c | 17 ++++++++++++++---
13
target/arm/helper-sve.h | 2 +
17
target/arm/translate.c | 2 +-
14
target/arm/translate-a64.h | 8 ++
18
3 files changed, 20 insertions(+), 5 deletions(-)
15
target/arm/translate.h | 5 ++
16
target/arm/sme.decode | 15 ++++
17
target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++-
18
target/arm/sve_helper.c | 12 +++
19
target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++
20
8 files changed, 331 insertions(+), 1 deletion(-)
19
21
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
21
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
24
--- a/target/arm/helper-sme.h
23
+++ b/target/arm/cpu.h
25
+++ b/target/arm/helper-sme.h
24
@@ -XXX,XX +XXX,XX @@ enum arm_features {
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
25
ARM_FEATURE_PMU, /* has PMU support */
27
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
26
ARM_FEATURE_VBAR, /* has cp15 VBAR */
28
27
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
29
DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
28
- ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
30
+
29
ARM_FEATURE_SVE, /* has Scalable Vector Extension */
31
+/* Move to/from vertical array slices, i.e. columns, so 'c'. */
30
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
32
+DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
33
+DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_arm_div(const ARMISARegisters *id)
34
+DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
35
+DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
42
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper-sve.h
45
+++ b/target/arm/helper-sve.h
46
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
47
void, ptr, ptr, ptr, ptr, i32)
48
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
49
void, ptr, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG,
51
+ void, ptr, ptr, ptr, ptr, i32)
52
53
DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
54
void, ptr, ptr, ptr, ptr, i32)
55
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.h
58
+++ b/target/arm/translate-a64.h
59
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
60
return size_for_gvec(pred_full_reg_size(s));
34
}
61
}
35
62
36
+static inline bool isar_feature_jazelle(const ARMISARegisters *id)
63
+/* Return a newly allocated pointer to the predicate register. */
37
+{
64
+static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
38
+ return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
65
+{
39
+}
66
+ TCGv_ptr ret = tcg_temp_new_ptr();
40
+
67
+ tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
41
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
68
+ return ret;
69
+}
70
+
71
bool disas_sve(DisasContext *, uint32_t);
72
bool disas_sme(DisasContext *, uint32_t);
73
74
diff --git a/target/arm/translate.h b/target/arm/translate.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.h
77
+++ b/target/arm/translate.h
78
@@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x)
79
return x + 2;
80
}
81
82
+static inline int plus_12(DisasContext *s, int x)
83
+{
84
+ return x + 12;
85
+}
86
+
87
static inline int times_2(DisasContext *s, int x)
42
{
88
{
43
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
89
return x * 2;
44
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
90
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
45
index XXXXXXX..XXXXXXX 100644
91
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu.c
92
--- a/target/arm/sme.decode
47
+++ b/target/arm/cpu.c
93
+++ b/target/arm/sme.decode
48
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
94
@@ -XXX,XX +XXX,XX @@
49
}
95
### SME Misc
50
if (arm_feature(env, ARM_FEATURE_V6)) {
96
51
set_feature(env, ARM_FEATURE_V5);
97
ZERO 11000000 00 001 00000000000 imm:8
52
- set_feature(env, ARM_FEATURE_JAZELLE);
98
+
53
if (!arm_feature(env, ARM_FEATURE_M)) {
99
+### SME Move into/from Array
54
+ assert(cpu_isar_feature(jazelle, cpu));
100
+
55
set_feature(env, ARM_FEATURE_AUXCR);
101
+%mova_rs 13:2 !function=plus_12
102
+&mova esz rs pg zr za_imm v:bool to_vec:bool
103
+
104
+MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
105
+ &mova to_vec=0 rs=%mova_rs
106
+MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
107
+ &mova to_vec=0 rs=%mova_rs esz=4
108
+
109
+MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
110
+ &mova to_vec=1 rs=%mova_rs
111
+MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
112
+ &mova to_vec=1 rs=%mova_rs esz=4
113
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/arm/sme_helper.c
116
+++ b/target/arm/sme_helper.c
117
@@ -XXX,XX +XXX,XX @@
118
119
#include "qemu/osdep.h"
120
#include "cpu.h"
121
-#include "internals.h"
122
+#include "tcg/tcg-gvec-desc.h"
123
#include "exec/helper-proto.h"
124
+#include "qemu/int128.h"
125
+#include "vec_internal.h"
126
127
/* ResetSVEState */
128
void arm_reset_sve_state(CPUARMState *env)
129
@@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
56
}
130
}
57
}
131
}
58
@@ -XXX,XX +XXX,XX @@ static void arm926_initfn(Object *obj)
132
}
59
set_feature(&cpu->env, ARM_FEATURE_VFP);
133
+
60
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
134
+
61
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
135
+/*
62
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
136
+ * When considering the ZA storage as an array of elements of
63
cpu->midr = 0x41069265;
137
+ * type T, the index within that array of the Nth element of
64
cpu->reset_fpsid = 0x41011090;
138
+ * a vertical slice of a tile can be calculated like this,
65
cpu->ctr = 0x1dd20d2;
139
+ * regardless of the size of type T. This is because the tiles
66
cpu->reset_sctlr = 0x00090078;
140
+ * are interleaved, so if type T is size N bytes then row 1 of
141
+ * the tile is N rows away from row 0. The division by N to
142
+ * convert a byte offset into an array index and the multiplication
143
+ * by N to convert from vslice-index-within-the-tile to
144
+ * the index within the ZA storage cancel out.
145
+ */
146
+#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg))
147
+
148
+/*
149
+ * When doing byte arithmetic on the ZA storage, the element
150
+ * byteoff bytes away in a tile vertical slice is always this
151
+ * many bytes away in the ZA storage, regardless of the
152
+ * size of the tile element, assuming that byteoff is a multiple
153
+ * of the element size. Again this is because of the interleaving
154
+ * of the tiles. For instance if we have 1 byte per element then
155
+ * each row of the ZA storage has one byte of the vslice data,
156
+ * and (counting from 0) byte 8 goes in row 8 of the storage
157
+ * at offset (8 * row-size-in-bytes).
158
+ * If we have 8 bytes per element then each row of the ZA storage
159
+ * has 8 bytes of the data, but there are 8 interleaved tiles and
160
+ * so byte 8 of the data goes into row 1 of the tile,
161
+ * which is again row 8 of the storage, so the offset is still
162
+ * (8 * row-size-in-bytes). Similarly for other element sizes.
163
+ */
164
+#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg))
165
+
166
+
167
+/*
168
+ * Move Zreg vector to ZArray column.
169
+ */
170
+#define DO_MOVA_C(NAME, TYPE, H) \
171
+void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \
172
+{ \
173
+ int i, oprsz = simd_oprsz(desc); \
174
+ for (i = 0; i < oprsz; ) { \
175
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
176
+ do { \
177
+ if (pg & 1) { \
178
+ *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \
179
+ } \
180
+ i += sizeof(TYPE); \
181
+ pg >>= sizeof(TYPE); \
182
+ } while (i & 15); \
183
+ } \
184
+}
185
+
186
+DO_MOVA_C(sme_mova_cz_b, uint8_t, H1)
187
+DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2)
188
+DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4)
189
+
190
+void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc)
191
+{
192
+ int i, oprsz = simd_oprsz(desc) / 8;
193
+ uint8_t *pg = vg;
194
+ uint64_t *n = vn;
195
+ uint64_t *a = za;
196
+
197
+ for (i = 0; i < oprsz; i++) {
198
+ if (pg[H1(i)] & 1) {
199
+ a[tile_vslice_index(i)] = n[i];
200
+ }
201
+ }
202
+}
203
+
204
+void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc)
205
+{
206
+ int i, oprsz = simd_oprsz(desc) / 16;
207
+ uint16_t *pg = vg;
208
+ Int128 *n = vn;
209
+ Int128 *a = za;
67
+
210
+
68
+ /*
211
+ /*
69
+ * ARMv5 does not have the ID_ISAR registers, but we can still
212
+ * Int128 is used here simply to copy 16 bytes, and to simplify
70
+ * set the field to indicate Jazelle support within QEMU.
213
+ * the address arithmetic.
71
+ */
214
+ */
72
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
215
+ for (i = 0; i < oprsz; i++) {
216
+ if (pg[H2(i)] & 1) {
217
+ a[tile_vslice_index(i)] = n[i];
218
+ }
219
+ }
220
+}
221
+
222
+#undef DO_MOVA_C
223
+
224
+/*
225
+ * Move ZArray column to Zreg vector.
226
+ */
227
+#define DO_MOVA_Z(NAME, TYPE, H) \
228
+void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \
229
+{ \
230
+ int i, oprsz = simd_oprsz(desc); \
231
+ for (i = 0; i < oprsz; ) { \
232
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
233
+ do { \
234
+ if (pg & 1) { \
235
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \
236
+ } \
237
+ i += sizeof(TYPE); \
238
+ pg >>= sizeof(TYPE); \
239
+ } while (i & 15); \
240
+ } \
241
+}
242
+
243
+DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1)
244
+DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2)
245
+DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4)
246
+
247
+void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc)
248
+{
249
+ int i, oprsz = simd_oprsz(desc) / 8;
250
+ uint8_t *pg = vg;
251
+ uint64_t *d = vd;
252
+ uint64_t *a = za;
253
+
254
+ for (i = 0; i < oprsz; i++) {
255
+ if (pg[H1(i)] & 1) {
256
+ d[i] = a[tile_vslice_index(i)];
257
+ }
258
+ }
259
+}
260
+
261
+void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
262
+{
263
+ int i, oprsz = simd_oprsz(desc) / 16;
264
+ uint16_t *pg = vg;
265
+ Int128 *d = vd;
266
+ Int128 *a = za;
267
+
268
+ /*
269
+ * Int128 is used here simply to copy 16 bytes, and to simplify
270
+ * the address arithmetic.
271
+ */
272
+ for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) {
273
+ if (pg[H2(i)] & 1) {
274
+ d[i] = a[tile_vslice_index(i)];
275
+ }
276
+ }
277
+}
278
+
279
+#undef DO_MOVA_Z
280
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
281
index XXXXXXX..XXXXXXX 100644
282
--- a/target/arm/sve_helper.c
283
+++ b/target/arm/sve_helper.c
284
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
285
}
73
}
286
}
74
287
75
static void arm946_initfn(Object *obj)
288
+void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm,
76
@@ -XXX,XX +XXX,XX @@ static void arm1026_initfn(Object *obj)
289
+ void *vg, uint32_t desc)
77
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
290
+{
78
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
291
+ intptr_t i, opr_sz = simd_oprsz(desc) / 16;
79
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
292
+ Int128 *d = vd, *n = vn, *m = vm;
80
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
293
+ uint16_t *pg = vg;
81
cpu->midr = 0x4106a262;
294
+
82
cpu->reset_fpsid = 0x410110a0;
295
+ for (i = 0; i < opr_sz; i += 1) {
83
cpu->ctr = 0x1dd20d2;
296
+ d[i] = (pg[H2(i)] & 1 ? n : m)[i];
84
cpu->reset_sctlr = 0x00090078;
297
+ }
85
cpu->reset_auxcr = 1;
298
+}
86
+
299
+
87
+ /*
300
/* Two operand comparison controlled by a predicate.
88
+ * ARMv5 does not have the ID_ISAR registers, but we can still
301
* ??? It is very tempting to want to be able to expand this inline
89
+ * set the field to indicate Jazelle support within QEMU.
302
* with x86 instructions, e.g.
90
+ */
303
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
91
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
304
index XXXXXXX..XXXXXXX 100644
92
+
305
--- a/target/arm/translate-sme.c
93
{
306
+++ b/target/arm/translate-sme.c
94
/* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
95
ARMCPRegInfo ifar = {
96
diff --git a/target/arm/translate.c b/target/arm/translate.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/arm/translate.c
99
+++ b/target/arm/translate.c
100
@@ -XXX,XX +XXX,XX @@
307
@@ -XXX,XX +XXX,XX @@
101
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
308
#include "decode-sme.c.inc"
102
/* currently all emulated v5 cores are also v5TE, so don't bother */
309
103
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
310
104
-#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
311
+/*
105
+#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
312
+ * Resolve tile.size[index] to a host pointer, where tile and index
106
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
313
+ * are always decoded together, dependent on the element size.
107
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
314
+ */
108
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
315
+static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
316
+ int tile_index, bool vertical)
317
+{
318
+ int tile = tile_index >> (4 - esz);
319
+ int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
320
+ int pos, len, offset;
321
+ TCGv_i32 tmp;
322
+ TCGv_ptr addr;
323
+
324
+ /* Compute the final index, which is Rs+imm. */
325
+ tmp = tcg_temp_new_i32();
326
+ tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
327
+ tcg_gen_addi_i32(tmp, tmp, index);
328
+
329
+ /* Prepare a power-of-two modulo via extraction of @len bits. */
330
+ len = ctz32(streaming_vec_reg_size(s)) - esz;
331
+
332
+ if (vertical) {
333
+ /*
334
+ * Compute the byte offset of the index within the tile:
335
+ * (index % (svl / size)) * size
336
+ * = (index % (svl >> esz)) << esz
337
+ * Perform the power-of-two modulo via extraction of the low @len bits.
338
+ * Perform the multiply by shifting left by @pos bits.
339
+ * Perform these operations simultaneously via deposit into zero.
340
+ */
341
+ pos = esz;
342
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
343
+
344
+ /*
345
+ * For big-endian, adjust the indexed column byte offset within
346
+ * the uint64_t host words that make up env->zarray[].
347
+ */
348
+ if (HOST_BIG_ENDIAN && esz < MO_64) {
349
+ tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz));
350
+ }
351
+ } else {
352
+ /*
353
+ * Compute the byte offset of the index within the tile:
354
+ * (index % (svl / size)) * (size * sizeof(row))
355
+ * = (index % (svl >> esz)) << (esz + log2(sizeof(row)))
356
+ */
357
+ pos = esz + ctz32(sizeof(ARMVectorReg));
358
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
359
+
360
+ /* Row slices are always aligned and need no endian adjustment. */
361
+ }
362
+
363
+ /* The tile byte offset within env->zarray is the row. */
364
+ offset = tile * sizeof(ARMVectorReg);
365
+
366
+ /* Include the byte offset of zarray to make this relative to env. */
367
+ offset += offsetof(CPUARMState, zarray);
368
+ tcg_gen_addi_i32(tmp, tmp, offset);
369
+
370
+ /* Add the byte offset to env to produce the final pointer. */
371
+ addr = tcg_temp_new_ptr();
372
+ tcg_gen_ext_i32_ptr(addr, tmp);
373
+ tcg_temp_free_i32(tmp);
374
+ tcg_gen_add_ptr(addr, addr, cpu_env);
375
+
376
+ return addr;
377
+}
378
+
379
static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
380
{
381
if (!dc_isar_feature(aa64_sme, s)) {
382
@@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
383
}
384
return true;
385
}
386
+
387
+static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
388
+{
389
+ static gen_helper_gvec_4 * const h_fns[5] = {
390
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
391
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d,
392
+ gen_helper_sve_sel_zpzz_q
393
+ };
394
+ static gen_helper_gvec_3 * const cz_fns[5] = {
395
+ gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h,
396
+ gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d,
397
+ gen_helper_sme_mova_cz_q,
398
+ };
399
+ static gen_helper_gvec_3 * const zc_fns[5] = {
400
+ gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h,
401
+ gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d,
402
+ gen_helper_sme_mova_zc_q,
403
+ };
404
+
405
+ TCGv_ptr t_za, t_zr, t_pg;
406
+ TCGv_i32 t_desc;
407
+ int svl;
408
+
409
+ if (!dc_isar_feature(aa64_sme, s)) {
410
+ return false;
411
+ }
412
+ if (!sme_smza_enabled_check(s)) {
413
+ return true;
414
+ }
415
+
416
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
417
+ t_zr = vec_full_reg_ptr(s, a->zr);
418
+ t_pg = pred_full_reg_ptr(s, a->pg);
419
+
420
+ svl = streaming_vec_reg_size(s);
421
+ t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
422
+
423
+ if (a->v) {
424
+ /* Vertical slice -- use sme mova helpers. */
425
+ if (a->to_vec) {
426
+ zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
427
+ } else {
428
+ cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
429
+ }
430
+ } else {
431
+ /* Horizontal slice -- reuse sve sel helpers. */
432
+ if (a->to_vec) {
433
+ h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
434
+ } else {
435
+ h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
436
+ }
437
+ }
438
+
439
+ tcg_temp_free_ptr(t_za);
440
+ tcg_temp_free_ptr(t_zr);
441
+ tcg_temp_free_ptr(t_pg);
442
+
443
+ return true;
444
+}
109
--
445
--
110
2.19.1
446
2.25.1
111
112
diff view generated by jsdifflib
1
From: Markus Armbruster <armbru@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Device models aren't supposed to go on fishing expeditions for
3
We cannot reuse the SVE functions for LD[1-4] and ST[1-4],
4
backends. They should expose suitable properties for the user to set.
4
because those functions accept only a Zreg register number.
5
For onboard devices, board code sets them.
5
For SME, we want to pass a pointer into ZA storage.
6
6
7
Device ssi-sd picks up its block backend in its init() method with
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
drive_get_next() instead. This mistake is already marked FIXME since
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
commit af9e40a.
9
Message-id: 20220708151540.18136-21-richard.henderson@linaro.org
10
11
Unset user_creatable to remove the mistake from our external
12
interface. Since the SSI bus doesn't support hotplug, only -device
13
can be affected. Only certain ARM machines have ssi-sd and provide an
14
SSI bus for it; this patch breaks -device ssi-sd for these machines.
15
No actual use of -device ssi-sd is known.
16
17
Signed-off-by: Markus Armbruster <armbru@redhat.com>
18
Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
19
Acked-by: Thomas Huth <thuth@redhat.com>
20
Message-id: 20181009060835.4608-1-armbru@redhat.com
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
---
11
---
23
hw/sd/ssi-sd.c | 2 ++
12
target/arm/helper-sme.h | 82 +++++
24
1 file changed, 2 insertions(+)
13
target/arm/sme.decode | 9 +
14
target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++
15
target/arm/translate-sme.c | 70 +++++
16
4 files changed, 756 insertions(+)
25
17
26
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
18
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
27
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/sd/ssi-sd.c
20
--- a/target/arm/helper-sme.h
29
+++ b/hw/sd/ssi-sd.c
21
+++ b/target/arm/helper-sme.h
30
@@ -XXX,XX +XXX,XX @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
31
k->cs_polarity = SSI_CS_LOW;
23
DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
32
dc->vmsd = &vmstate_ssi_sd;
24
DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
dc->reset = ssi_sd_reset;
25
DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+ /* Reason: init() method uses drive_get_next() */
26
+
35
+ dc->user_creatable = false;
27
+DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
28
+DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
29
+DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
30
+DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
31
+
32
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
34
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
35
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
36
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
37
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
38
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
39
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
40
+
41
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
42
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
43
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
44
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
45
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
46
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
47
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
48
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
49
+
50
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
52
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
54
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
55
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
56
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
57
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
58
+
59
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
61
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
62
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
63
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
65
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
66
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
67
+
68
+DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
69
+DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
70
+DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
71
+DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
72
+
73
+DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
75
+DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
77
+DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
81
+
82
+DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
83
+DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
84
+DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
85
+DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
86
+DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
87
+DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
89
+DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
90
+
91
+DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
92
+DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
93
+DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
94
+DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
95
+DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
96
+DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
97
+DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
98
+DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
99
+
100
+DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
101
+DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
102
+DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
103
+DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
105
+DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
107
+DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
108
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/sme.decode
111
+++ b/target/arm/sme.decode
112
@@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
113
&mova to_vec=1 rs=%mova_rs
114
MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
115
&mova to_vec=1 rs=%mova_rs esz=4
116
+
117
+### SME Memory
118
+
119
+&ldst esz rs pg rn rm za_imm v:bool st:bool
120
+
121
+LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
122
+ &ldst rs=%mova_rs
123
+LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
124
+ &ldst esz=4 rs=%mova_rs
125
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/sme_helper.c
128
+++ b/target/arm/sme_helper.c
129
@@ -XXX,XX +XXX,XX @@
130
131
#include "qemu/osdep.h"
132
#include "cpu.h"
133
+#include "internals.h"
134
#include "tcg/tcg-gvec-desc.h"
135
#include "exec/helper-proto.h"
136
+#include "exec/cpu_ldst.h"
137
+#include "exec/exec-all.h"
138
#include "qemu/int128.h"
139
#include "vec_internal.h"
140
+#include "sve_ldst_internal.h"
141
142
/* ResetSVEState */
143
void arm_reset_sve_state(CPUARMState *env)
144
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
36
}
145
}
37
146
38
static const TypeInfo ssi_sd_info = {
147
#undef DO_MOVA_Z
148
+
149
+/*
150
+ * Clear elements in a tile slice comprising len bytes.
151
+ */
152
+
153
+typedef void ClearFn(void *ptr, size_t off, size_t len);
154
+
155
+static void clear_horizontal(void *ptr, size_t off, size_t len)
156
+{
157
+ memset(ptr + off, 0, len);
158
+}
159
+
160
+static void clear_vertical_b(void *vptr, size_t off, size_t len)
161
+{
162
+ for (size_t i = 0; i < len; ++i) {
163
+ *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0;
164
+ }
165
+}
166
+
167
+static void clear_vertical_h(void *vptr, size_t off, size_t len)
168
+{
169
+ for (size_t i = 0; i < len; i += 2) {
170
+ *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0;
171
+ }
172
+}
173
+
174
+static void clear_vertical_s(void *vptr, size_t off, size_t len)
175
+{
176
+ for (size_t i = 0; i < len; i += 4) {
177
+ *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0;
178
+ }
179
+}
180
+
181
+static void clear_vertical_d(void *vptr, size_t off, size_t len)
182
+{
183
+ for (size_t i = 0; i < len; i += 8) {
184
+ *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0;
185
+ }
186
+}
187
+
188
+static void clear_vertical_q(void *vptr, size_t off, size_t len)
189
+{
190
+ for (size_t i = 0; i < len; i += 16) {
191
+ memset(vptr + tile_vslice_offset(i + off), 0, 16);
192
+ }
193
+}
194
+
195
+/*
196
+ * Copy elements from an array into a tile slice comprising len bytes.
197
+ */
198
+
199
+typedef void CopyFn(void *dst, const void *src, size_t len);
200
+
201
+static void copy_horizontal(void *dst, const void *src, size_t len)
202
+{
203
+ memcpy(dst, src, len);
204
+}
205
+
206
+static void copy_vertical_b(void *vdst, const void *vsrc, size_t len)
207
+{
208
+ const uint8_t *src = vsrc;
209
+ uint8_t *dst = vdst;
210
+ size_t i;
211
+
212
+ for (i = 0; i < len; ++i) {
213
+ dst[tile_vslice_index(i)] = src[i];
214
+ }
215
+}
216
+
217
+static void copy_vertical_h(void *vdst, const void *vsrc, size_t len)
218
+{
219
+ const uint16_t *src = vsrc;
220
+ uint16_t *dst = vdst;
221
+ size_t i;
222
+
223
+ for (i = 0; i < len / 2; ++i) {
224
+ dst[tile_vslice_index(i)] = src[i];
225
+ }
226
+}
227
+
228
+static void copy_vertical_s(void *vdst, const void *vsrc, size_t len)
229
+{
230
+ const uint32_t *src = vsrc;
231
+ uint32_t *dst = vdst;
232
+ size_t i;
233
+
234
+ for (i = 0; i < len / 4; ++i) {
235
+ dst[tile_vslice_index(i)] = src[i];
236
+ }
237
+}
238
+
239
+static void copy_vertical_d(void *vdst, const void *vsrc, size_t len)
240
+{
241
+ const uint64_t *src = vsrc;
242
+ uint64_t *dst = vdst;
243
+ size_t i;
244
+
245
+ for (i = 0; i < len / 8; ++i) {
246
+ dst[tile_vslice_index(i)] = src[i];
247
+ }
248
+}
249
+
250
+static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
251
+{
252
+ for (size_t i = 0; i < len; i += 16) {
253
+ memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16);
254
+ }
255
+}
256
+
257
+/*
258
+ * Host and TLB primitives for vertical tile slice addressing.
259
+ */
260
+
261
+#define DO_LD(NAME, TYPE, HOST, TLB) \
262
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
263
+{ \
264
+ TYPE val = HOST(host); \
265
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
266
+} \
267
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
268
+ intptr_t off, target_ulong addr, uintptr_t ra) \
269
+{ \
270
+ TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \
271
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
272
+}
273
+
274
+#define DO_ST(NAME, TYPE, HOST, TLB) \
275
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
276
+{ \
277
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
278
+ HOST(host, val); \
279
+} \
280
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
281
+ intptr_t off, target_ulong addr, uintptr_t ra) \
282
+{ \
283
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
284
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
285
+}
286
+
287
+/*
288
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
289
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
290
+ * corresponds to storing the two 64-bit pieces in little-endian order.
291
+ */
292
+#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
293
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
294
+{ \
295
+ uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
296
+ uint64_t *ptr = za + off; \
297
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
298
+} \
299
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
300
+{ \
301
+ HNAME##_host(za, tile_vslice_offset(off), host); \
302
+} \
303
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
304
+ target_ulong addr, uintptr_t ra) \
305
+{ \
306
+ uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
307
+ uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
308
+ uint64_t *ptr = za + off; \
309
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
310
+} \
311
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
312
+ target_ulong addr, uintptr_t ra) \
313
+{ \
314
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
315
+}
316
+
317
+#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
318
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
319
+{ \
320
+ uint64_t *ptr = za + off; \
321
+ HOST(host, ptr[BE]); \
322
+ HOST(host + 1, ptr[!BE]); \
323
+} \
324
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
325
+{ \
326
+ HNAME##_host(za, tile_vslice_offset(off), host); \
327
+} \
328
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
329
+ target_ulong addr, uintptr_t ra) \
330
+{ \
331
+ uint64_t *ptr = za + off; \
332
+ TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
333
+ TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
334
+} \
335
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
336
+ target_ulong addr, uintptr_t ra) \
337
+{ \
338
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
339
+}
340
+
341
+DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra)
342
+DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra)
343
+DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra)
344
+DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra)
345
+DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
346
+DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
347
+DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
348
+
349
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
350
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
351
+
352
+DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
353
+DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
354
+DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra)
355
+DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra)
356
+DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
357
+DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
358
+DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
359
+
360
+DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
361
+DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
362
+
363
+#undef DO_LD
364
+#undef DO_ST
365
+#undef DO_LDQ
366
+#undef DO_STQ
367
+
368
+/*
369
+ * Common helper for all contiguous predicated loads.
370
+ */
371
+
372
+static inline QEMU_ALWAYS_INLINE
373
+void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
374
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
375
+ const int esz, uint32_t mtedesc, bool vertical,
376
+ sve_ldst1_host_fn *host_fn,
377
+ sve_ldst1_tlb_fn *tlb_fn,
378
+ ClearFn *clr_fn,
379
+ CopyFn *cpy_fn)
380
+{
381
+ const intptr_t reg_max = simd_oprsz(desc);
382
+ const intptr_t esize = 1 << esz;
383
+ intptr_t reg_off, reg_last;
384
+ SVEContLdSt info;
385
+ void *host;
386
+ int flags;
387
+
388
+ /* Find the active elements. */
389
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
390
+ /* The entire predicate was false; no load occurs. */
391
+ clr_fn(za, 0, reg_max);
392
+ return;
393
+ }
394
+
395
+ /* Probe the page(s). Exit with exception for any invalid page. */
396
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
397
+
398
+ /* Handle watchpoints for all active elements. */
399
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
400
+ BP_MEM_READ, ra);
401
+
402
+ /*
403
+ * Handle mte checks for all active elements.
404
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
405
+ */
406
+ if (mtedesc) {
407
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
408
+ mtedesc, ra);
409
+ }
410
+
411
+ flags = info.page[0].flags | info.page[1].flags;
412
+ if (unlikely(flags != 0)) {
413
+#ifdef CONFIG_USER_ONLY
414
+ g_assert_not_reached();
415
+#else
416
+ /*
417
+ * At least one page includes MMIO.
418
+ * Any bus operation can fail with cpu_transaction_failed,
419
+ * which for ARM will raise SyncExternal. Perform the load
420
+ * into scratch memory to preserve register state until the end.
421
+ */
422
+ ARMVectorReg scratch = { };
423
+
424
+ reg_off = info.reg_off_first[0];
425
+ reg_last = info.reg_off_last[1];
426
+ if (reg_last < 0) {
427
+ reg_last = info.reg_off_split;
428
+ if (reg_last < 0) {
429
+ reg_last = info.reg_off_last[0];
430
+ }
431
+ }
432
+
433
+ do {
434
+ uint64_t pg = vg[reg_off >> 6];
435
+ do {
436
+ if ((pg >> (reg_off & 63)) & 1) {
437
+ tlb_fn(env, &scratch, reg_off, addr + reg_off, ra);
438
+ }
439
+ reg_off += esize;
440
+ } while (reg_off & 63);
441
+ } while (reg_off <= reg_last);
442
+
443
+ cpy_fn(za, &scratch, reg_max);
444
+ return;
445
+#endif
446
+ }
447
+
448
+ /* The entire operation is in RAM, on valid pages. */
449
+
450
+ reg_off = info.reg_off_first[0];
451
+ reg_last = info.reg_off_last[0];
452
+ host = info.page[0].host;
453
+
454
+ if (!vertical) {
455
+ memset(za, 0, reg_max);
456
+ } else if (reg_off) {
457
+ clr_fn(za, 0, reg_off);
458
+ }
459
+
460
+ while (reg_off <= reg_last) {
461
+ uint64_t pg = vg[reg_off >> 6];
462
+ do {
463
+ if ((pg >> (reg_off & 63)) & 1) {
464
+ host_fn(za, reg_off, host + reg_off);
465
+ } else if (vertical) {
466
+ clr_fn(za, reg_off, esize);
467
+ }
468
+ reg_off += esize;
469
+ } while (reg_off <= reg_last && (reg_off & 63));
470
+ }
471
+
472
+ /*
473
+ * Use the slow path to manage the cross-page misalignment.
474
+ * But we know this is RAM and cannot trap.
475
+ */
476
+ reg_off = info.reg_off_split;
477
+ if (unlikely(reg_off >= 0)) {
478
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
479
+ }
480
+
481
+ reg_off = info.reg_off_first[1];
482
+ if (unlikely(reg_off >= 0)) {
483
+ reg_last = info.reg_off_last[1];
484
+ host = info.page[1].host;
485
+
486
+ do {
487
+ uint64_t pg = vg[reg_off >> 6];
488
+ do {
489
+ if ((pg >> (reg_off & 63)) & 1) {
490
+ host_fn(za, reg_off, host + reg_off);
491
+ } else if (vertical) {
492
+ clr_fn(za, reg_off, esize);
493
+ }
494
+ reg_off += esize;
495
+ } while (reg_off & 63);
496
+ } while (reg_off <= reg_last);
497
+ }
498
+}
499
+
500
+static inline QEMU_ALWAYS_INLINE
501
+void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
502
+ target_ulong addr, uint32_t desc, uintptr_t ra,
503
+ const int esz, bool vertical,
504
+ sve_ldst1_host_fn *host_fn,
505
+ sve_ldst1_tlb_fn *tlb_fn,
506
+ ClearFn *clr_fn,
507
+ CopyFn *cpy_fn)
508
+{
509
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
510
+ int bit55 = extract64(addr, 55, 1);
511
+
512
+ /* Remove mtedesc from the normal sve descriptor. */
513
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
514
+
515
+ /* Perform gross MTE suppression early. */
516
+ if (!tbi_check(desc, bit55) ||
517
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
518
+ mtedesc = 0;
519
+ }
520
+
521
+ sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical,
522
+ host_fn, tlb_fn, clr_fn, cpy_fn);
523
+}
524
+
525
+#define DO_LD(L, END, ESZ) \
526
+void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
527
+ target_ulong addr, uint32_t desc) \
528
+{ \
529
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
530
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
531
+ clear_horizontal, copy_horizontal); \
532
+} \
533
+void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
534
+ target_ulong addr, uint32_t desc) \
535
+{ \
536
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
537
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
538
+ clear_vertical_##L, copy_vertical_##L); \
539
+} \
540
+void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
541
+ target_ulong addr, uint32_t desc) \
542
+{ \
543
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
544
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
545
+ clear_horizontal, copy_horizontal); \
546
+} \
547
+void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
548
+ target_ulong addr, uint32_t desc) \
549
+{ \
550
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
551
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
552
+ clear_vertical_##L, copy_vertical_##L); \
553
+}
554
+
555
+DO_LD(b, , MO_8)
556
+DO_LD(h, _be, MO_16)
557
+DO_LD(h, _le, MO_16)
558
+DO_LD(s, _be, MO_32)
559
+DO_LD(s, _le, MO_32)
560
+DO_LD(d, _be, MO_64)
561
+DO_LD(d, _le, MO_64)
562
+DO_LD(q, _be, MO_128)
563
+DO_LD(q, _le, MO_128)
564
+
565
+#undef DO_LD
566
+
567
+/*
568
+ * Common helper for all contiguous predicated stores.
569
+ */
570
+
571
+static inline QEMU_ALWAYS_INLINE
572
+void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
573
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
574
+ const int esz, uint32_t mtedesc, bool vertical,
575
+ sve_ldst1_host_fn *host_fn,
576
+ sve_ldst1_tlb_fn *tlb_fn)
577
+{
578
+ const intptr_t reg_max = simd_oprsz(desc);
579
+ const intptr_t esize = 1 << esz;
580
+ intptr_t reg_off, reg_last;
581
+ SVEContLdSt info;
582
+ void *host;
583
+ int flags;
584
+
585
+ /* Find the active elements. */
586
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
587
+ /* The entire predicate was false; no store occurs. */
588
+ return;
589
+ }
590
+
591
+ /* Probe the page(s). Exit with exception for any invalid page. */
592
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
593
+
594
+ /* Handle watchpoints for all active elements. */
595
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
596
+ BP_MEM_WRITE, ra);
597
+
598
+ /*
599
+ * Handle mte checks for all active elements.
600
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
601
+ */
602
+ if (mtedesc) {
603
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
604
+ mtedesc, ra);
605
+ }
606
+
607
+ flags = info.page[0].flags | info.page[1].flags;
608
+ if (unlikely(flags != 0)) {
609
+#ifdef CONFIG_USER_ONLY
610
+ g_assert_not_reached();
611
+#else
612
+ /*
613
+ * At least one page includes MMIO.
614
+ * Any bus operation can fail with cpu_transaction_failed,
615
+ * which for ARM will raise SyncExternal. We cannot avoid
616
+ * this fault and will leave with the store incomplete.
617
+ */
618
+ reg_off = info.reg_off_first[0];
619
+ reg_last = info.reg_off_last[1];
620
+ if (reg_last < 0) {
621
+ reg_last = info.reg_off_split;
622
+ if (reg_last < 0) {
623
+ reg_last = info.reg_off_last[0];
624
+ }
625
+ }
626
+
627
+ do {
628
+ uint64_t pg = vg[reg_off >> 6];
629
+ do {
630
+ if ((pg >> (reg_off & 63)) & 1) {
631
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
632
+ }
633
+ reg_off += esize;
634
+ } while (reg_off & 63);
635
+ } while (reg_off <= reg_last);
636
+ return;
637
+#endif
638
+ }
639
+
640
+ reg_off = info.reg_off_first[0];
641
+ reg_last = info.reg_off_last[0];
642
+ host = info.page[0].host;
643
+
644
+ while (reg_off <= reg_last) {
645
+ uint64_t pg = vg[reg_off >> 6];
646
+ do {
647
+ if ((pg >> (reg_off & 63)) & 1) {
648
+ host_fn(za, reg_off, host + reg_off);
649
+ }
650
+ reg_off += 1 << esz;
651
+ } while (reg_off <= reg_last && (reg_off & 63));
652
+ }
653
+
654
+ /*
655
+ * Use the slow path to manage the cross-page misalignment.
656
+ * But we know this is RAM and cannot trap.
657
+ */
658
+ reg_off = info.reg_off_split;
659
+ if (unlikely(reg_off >= 0)) {
660
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
661
+ }
662
+
663
+ reg_off = info.reg_off_first[1];
664
+ if (unlikely(reg_off >= 0)) {
665
+ reg_last = info.reg_off_last[1];
666
+ host = info.page[1].host;
667
+
668
+ do {
669
+ uint64_t pg = vg[reg_off >> 6];
670
+ do {
671
+ if ((pg >> (reg_off & 63)) & 1) {
672
+ host_fn(za, reg_off, host + reg_off);
673
+ }
674
+ reg_off += 1 << esz;
675
+ } while (reg_off & 63);
676
+ } while (reg_off <= reg_last);
677
+ }
678
+}
679
+
680
+static inline QEMU_ALWAYS_INLINE
681
+void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
682
+ uint32_t desc, uintptr_t ra, int esz, bool vertical,
683
+ sve_ldst1_host_fn *host_fn,
684
+ sve_ldst1_tlb_fn *tlb_fn)
685
+{
686
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
687
+ int bit55 = extract64(addr, 55, 1);
688
+
689
+ /* Remove mtedesc from the normal sve descriptor. */
690
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
691
+
692
+ /* Perform gross MTE suppression early. */
693
+ if (!tbi_check(desc, bit55) ||
694
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
695
+ mtedesc = 0;
696
+ }
697
+
698
+ sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc,
699
+ vertical, host_fn, tlb_fn);
700
+}
701
+
702
+#define DO_ST(L, END, ESZ) \
703
+void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
704
+ target_ulong addr, uint32_t desc) \
705
+{ \
706
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
707
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
708
+} \
709
+void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
710
+ target_ulong addr, uint32_t desc) \
711
+{ \
712
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
713
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
714
+} \
715
+void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
716
+ target_ulong addr, uint32_t desc) \
717
+{ \
718
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
719
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
720
+} \
721
+void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
722
+ target_ulong addr, uint32_t desc) \
723
+{ \
724
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
725
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
726
+}
727
+
728
+DO_ST(b, , MO_8)
729
+DO_ST(h, _be, MO_16)
730
+DO_ST(h, _le, MO_16)
731
+DO_ST(s, _be, MO_32)
732
+DO_ST(s, _le, MO_32)
733
+DO_ST(d, _be, MO_64)
734
+DO_ST(d, _le, MO_64)
735
+DO_ST(q, _be, MO_128)
736
+DO_ST(q, _le, MO_128)
737
+
738
+#undef DO_ST
739
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
740
index XXXXXXX..XXXXXXX 100644
741
--- a/target/arm/translate-sme.c
742
+++ b/target/arm/translate-sme.c
743
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
744
745
return true;
746
}
747
+
748
+static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
749
+{
750
+ typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
751
+
752
+ /*
753
+ * Indexed by [esz][be][v][mte][st], which is (except for load/store)
754
+ * also the order in which the elements appear in the function names,
755
+ * and so how we must concatenate the pieces.
756
+ */
757
+
758
+#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F }
759
+#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) }
760
+#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) }
761
+#define FN_END(L, B) { FN_HV(L), FN_HV(B) }
762
+
763
+ static GenLdSt1 * const fns[5][2][2][2][2] = {
764
+ FN_END(b, b),
765
+ FN_END(h_le, h_be),
766
+ FN_END(s_le, s_be),
767
+ FN_END(d_le, d_be),
768
+ FN_END(q_le, q_be),
769
+ };
770
+
771
+#undef FN_LS
772
+#undef FN_MTE
773
+#undef FN_HV
774
+#undef FN_END
775
+
776
+ TCGv_ptr t_za, t_pg;
777
+ TCGv_i64 addr;
778
+ int svl, desc = 0;
779
+ bool be = s->be_data == MO_BE;
780
+ bool mte = s->mte_active[0];
781
+
782
+ if (!dc_isar_feature(aa64_sme, s)) {
783
+ return false;
784
+ }
785
+ if (!sme_smza_enabled_check(s)) {
786
+ return true;
787
+ }
788
+
789
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
790
+ t_pg = pred_full_reg_ptr(s, a->pg);
791
+ addr = tcg_temp_new_i64();
792
+
793
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
794
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
795
+
796
+ if (mte) {
797
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
798
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
799
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
800
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
801
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
802
+ desc <<= SVE_MTEDESC_SHIFT;
803
+ } else {
804
+ addr = clean_data_tbi(s, addr);
805
+ }
806
+ svl = streaming_vec_reg_size(s);
807
+ desc = simd_desc(svl, svl, desc);
808
+
809
+ fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
810
+ tcg_constant_i32(desc));
811
+
812
+ tcg_temp_free_ptr(t_za);
813
+ tcg_temp_free_ptr(t_pg);
814
+ tcg_temp_free_i64(addr);
815
+ return true;
816
+}
39
--
817
--
40
2.19.1
818
2.25.1
41
42
diff view generated by jsdifflib
1
The A/I/F bits in ISR_EL1 should track the virtual interrupt
1
From: Richard Henderson <richard.henderson@linaro.org>
2
status, not the physical interrupt status, if the associated
3
HCR_EL2.AMO/IMO/FMO bit is set. Implement this, rather than
4
always showing the physical interrupt status.
5
2
6
We don't currently implement anything to do with external
3
Add a TCGv_ptr base argument, which will be cpu_env for SVE.
7
aborts, so this applies only to the I and F bits (though it
4
We will reuse this for SME save and restore array insns.
8
ought to be possible for the outer guest to present a virtual
9
external abort to the inner guest, even if QEMU doesn't
10
emulate physical external aborts, so there is missing
11
functionality in this area).
12
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-22-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20181012144235.19646-6-peter.maydell@linaro.org
16
---
10
---
17
target/arm/helper.c | 22 ++++++++++++++++++----
11
target/arm/translate-a64.h | 3 +++
18
1 file changed, 18 insertions(+), 4 deletions(-)
12
target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++----------
13
2 files changed, 39 insertions(+), 12 deletions(-)
19
14
20
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.c
17
--- a/target/arm/translate-a64.h
23
+++ b/target/arm/helper.c
18
+++ b/target/arm/translate-a64.h
24
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
19
@@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
25
CPUState *cs = ENV_GET_CPU(env);
20
uint32_t rm_ofs, int64_t shift,
26
uint64_t ret = 0;
21
uint32_t opr_sz, uint32_t max_sz);
27
22
28
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
23
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
29
- ret |= CPSR_I;
24
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
30
+ if (arm_hcr_el2_imo(env)) {
25
+
31
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
26
#endif /* TARGET_ARM_TRANSLATE_A64_H */
32
+ ret |= CPSR_I;
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
32
* The load should begin at the address Rn + IMM.
33
*/
34
35
-static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
36
+void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
37
+ int len, int rn, int imm)
38
{
39
int len_align = QEMU_ALIGN_DOWN(len, 8);
40
int len_remain = len % 8;
41
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
42
t0 = tcg_temp_new_i64();
43
for (i = 0; i < len_align; i += 8) {
44
tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
45
- tcg_gen_st_i64(t0, cpu_env, vofs + i);
46
+ tcg_gen_st_i64(t0, base, vofs + i);
47
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
48
}
49
tcg_temp_free_i64(t0);
50
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
51
clean_addr = new_tmp_a64_local(s);
52
tcg_gen_mov_i64(clean_addr, t0);
53
54
+ if (base != cpu_env) {
55
+ TCGv_ptr b = tcg_temp_local_new_ptr();
56
+ tcg_gen_mov_ptr(b, base);
57
+ base = b;
33
+ }
58
+ }
34
+ } else {
59
+
35
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
60
gen_set_label(loop);
36
+ ret |= CPSR_I;
61
62
t0 = tcg_temp_new_i64();
63
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
64
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
65
66
tp = tcg_temp_new_ptr();
67
- tcg_gen_add_ptr(tp, cpu_env, i);
68
+ tcg_gen_add_ptr(tp, base, i);
69
tcg_gen_addi_ptr(i, i, 8);
70
tcg_gen_st_i64(t0, tp, vofs);
71
tcg_temp_free_ptr(tp);
72
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
73
74
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
75
tcg_temp_free_ptr(i);
76
+
77
+ if (base != cpu_env) {
78
+ tcg_temp_free_ptr(base);
79
+ assert(len_remain == 0);
37
+ }
80
+ }
38
}
81
}
39
- if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
82
40
- ret |= CPSR_F;
83
/*
84
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
85
default:
86
g_assert_not_reached();
87
}
88
- tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
89
+ tcg_gen_st_i64(t0, base, vofs + len_align);
90
tcg_temp_free_i64(t0);
91
}
92
}
93
94
/* Similarly for stores. */
95
-static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
96
+void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
97
+ int len, int rn, int imm)
98
{
99
int len_align = QEMU_ALIGN_DOWN(len, 8);
100
int len_remain = len % 8;
101
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
102
103
t0 = tcg_temp_new_i64();
104
for (i = 0; i < len_align; i += 8) {
105
- tcg_gen_ld_i64(t0, cpu_env, vofs + i);
106
+ tcg_gen_ld_i64(t0, base, vofs + i);
107
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
108
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
111
clean_addr = new_tmp_a64_local(s);
112
tcg_gen_mov_i64(clean_addr, t0);
113
114
+ if (base != cpu_env) {
115
+ TCGv_ptr b = tcg_temp_local_new_ptr();
116
+ tcg_gen_mov_ptr(b, base);
117
+ base = b;
118
+ }
41
+
119
+
42
+ if (arm_hcr_el2_fmo(env)) {
120
gen_set_label(loop);
43
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
121
44
+ ret |= CPSR_F;
122
t0 = tcg_temp_new_i64();
45
+ }
123
tp = tcg_temp_new_ptr();
46
+ } else {
124
- tcg_gen_add_ptr(tp, cpu_env, i);
47
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
125
+ tcg_gen_add_ptr(tp, base, i);
48
+ ret |= CPSR_F;
126
tcg_gen_ld_i64(t0, tp, vofs);
127
tcg_gen_addi_ptr(i, i, 8);
128
tcg_temp_free_ptr(tp);
129
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
130
131
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
132
tcg_temp_free_ptr(i);
133
+
134
+ if (base != cpu_env) {
135
+ tcg_temp_free_ptr(base);
136
+ assert(len_remain == 0);
49
+ }
137
+ }
50
}
138
}
51
+
139
52
/* External aborts are not possible in QEMU so A bit is always clear */
140
/* Predicate register stores can be any multiple of 2. */
53
return ret;
141
if (len_remain) {
142
t0 = tcg_temp_new_i64();
143
- tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
144
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
145
146
switch (len_remain) {
147
case 2:
148
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
149
if (sve_access_check(s)) {
150
int size = vec_full_reg_size(s);
151
int off = vec_full_reg_offset(s, a->rd);
152
- do_ldr(s, off, size, a->rn, a->imm * size);
153
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
154
}
155
return true;
156
}
157
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
158
if (sve_access_check(s)) {
159
int size = pred_full_reg_size(s);
160
int off = pred_full_reg_offset(s, a->rd);
161
- do_ldr(s, off, size, a->rn, a->imm * size);
162
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
163
}
164
return true;
165
}
166
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
167
if (sve_access_check(s)) {
168
int size = vec_full_reg_size(s);
169
int off = vec_full_reg_offset(s, a->rd);
170
- do_str(s, off, size, a->rn, a->imm * size);
171
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
172
}
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
176
if (sve_access_check(s)) {
177
int size = pred_full_reg_size(s);
178
int off = pred_full_reg_offset(s, a->rd);
179
- do_str(s, off, size, a->rn, a->imm * size);
180
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
181
}
182
return true;
54
}
183
}
55
--
184
--
56
2.19.1
185
2.25.1
57
58
diff view generated by jsdifflib
1
For traps of FP/SIMD instructions to AArch32 Hyp mode, the syndrome
1
From: Richard Henderson <richard.henderson@linaro.org>
2
provided in HSR has more information than is reported to AArch64.
3
Specifically, there are extra fields TA and coproc which indicate
4
whether the trapped instruction was FP or SIMD. Add this extra
5
information to the syndromes we construct, and mask it out when
6
taking the exception to AArch64.
7
2
3
We can reuse the SVE functions for LDR and STR, passing in the
4
base of the ZA vector and a zero offset.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-23-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181012144235.19646-11-peter.maydell@linaro.org
11
---
10
---
12
target/arm/internals.h | 14 +++++++++++++-
11
target/arm/sme.decode | 7 +++++++
13
target/arm/helper.c | 9 +++++++++
12
target/arm/translate-sme.c | 24 ++++++++++++++++++++++++
14
target/arm/translate.c | 8 ++++----
13
2 files changed, 31 insertions(+)
15
3 files changed, 26 insertions(+), 5 deletions(-)
16
14
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/internals.h
17
--- a/target/arm/sme.decode
20
+++ b/target/arm/internals.h
18
+++ b/target/arm/sme.decode
21
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn)
19
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
22
* few cases the value in HSR for exceptions taken to AArch32 Hyp
20
&ldst rs=%mova_rs
23
* mode differs slightly, and we fix this up when populating HSR in
21
LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
24
* arm_cpu_do_interrupt_aarch32_hyp().
22
&ldst esz=4 rs=%mova_rs
25
+ * The exception is FP/SIMD access traps -- these report extra information
23
+
26
+ * when taking an exception to AArch32. For those we include the extra coproc
24
+&ldstr rv rn imm
27
+ * and TA fields, and mask them out when taking the exception to AArch64.
25
+@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
28
*/
26
+ &ldstr rv=%mova_rs
29
static inline uint32_t syn_uncategorized(void)
27
+
30
{
28
+LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
31
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
29
+STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
32
30
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
33
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
31
index XXXXXXX..XXXXXXX 100644
34
{
32
--- a/target/arm/translate-sme.c
35
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
33
+++ b/target/arm/translate-sme.c
36
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
34
@@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
37
| (is_16bit ? 0 : ARM_EL_IL)
35
tcg_temp_free_i64(addr);
38
- | (cv << 24) | (cond << 20);
36
return true;
39
+ | (cv << 24) | (cond << 20) | 0xa;
37
}
38
+
39
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
40
+
41
+static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
42
+{
43
+ int svl = streaming_vec_reg_size(s);
44
+ int imm = a->imm;
45
+ TCGv_ptr base;
46
+
47
+ if (!sme_za_enabled_check(s)) {
48
+ return true;
49
+ }
50
+
51
+ /* ZA[n] equates to ZA0H.B[n]. */
52
+ base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
53
+
54
+ fn(s, base, 0, svl, a->rn, imm * svl);
55
+
56
+ tcg_temp_free_ptr(base);
57
+ return true;
40
+}
58
+}
41
+
59
+
42
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
60
+TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
43
+{
61
+TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
44
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
45
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
46
+ | (is_16bit ? 0 : ARM_EL_IL)
47
+ | (cv << 24) | (cond << 20) | (1 << 5);
48
}
49
50
static inline uint32_t syn_sve_access_trap(void)
51
diff --git a/target/arm/helper.c b/target/arm/helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/helper.c
54
+++ b/target/arm/helper.c
55
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
56
case EXCP_HVC:
57
case EXCP_HYP_TRAP:
58
case EXCP_SMC:
59
+ if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
60
+ /*
61
+ * QEMU internal FP/SIMD syndromes from AArch32 include the
62
+ * TA and coproc fields which are only exposed if the exception
63
+ * is taken to AArch32 Hyp mode. Mask them out to get a valid
64
+ * AArch64 format syndrome.
65
+ */
66
+ env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
67
+ }
68
env->cp15.esr_el[new_el] = env->exception.syndrome;
69
break;
70
case EXCP_IRQ:
71
diff --git a/target/arm/translate.c b/target/arm/translate.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/translate.c
74
+++ b/target/arm/translate.c
75
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
76
*/
77
if (s->fp_excp_el) {
78
gen_exception_insn(s, 4, EXCP_UDEF,
79
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
80
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
81
return 0;
82
}
83
84
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
85
*/
86
if (s->fp_excp_el) {
87
gen_exception_insn(s, 4, EXCP_UDEF,
88
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
89
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
90
return 0;
91
}
92
93
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
94
95
if (s->fp_excp_el) {
96
gen_exception_insn(s, 4, EXCP_UDEF,
97
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
98
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
99
return 0;
100
}
101
if (!s->vfp_enabled) {
102
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
103
104
if (s->fp_excp_el) {
105
gen_exception_insn(s, 4, EXCP_UDEF,
106
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
107
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
108
return 0;
109
}
110
if (!s->vfp_enabled) {
111
--
62
--
112
2.19.1
63
2.25.1
113
114
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instead of shifts and masks, use direct loads and stores from
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
the neon register file.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20181011205206.3552-21-richard.henderson@linaro.org
5
Message-id: 20220708151540.18136-24-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
7
---
11
target/arm/translate.c | 92 +++++++++++++++++++++++-------------------
8
target/arm/helper-sme.h | 5 +++
12
1 file changed, 50 insertions(+), 42 deletions(-)
9
target/arm/sme.decode | 11 +++++
10
target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 31 +++++++++++++
12
4 files changed, 137 insertions(+)
13
13
14
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate.c
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/translate.c
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i
19
return tmp;
19
DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
20
}
20
DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
21
21
DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
22
+static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
22
+
23
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
32
33
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
34
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
35
+
36
+### SME Add Vector to Array
37
+
38
+&adda zad zn pm pn
39
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
40
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
41
+
42
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
43
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
44
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
45
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
46
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sme_helper.c
49
+++ b/target/arm/sme_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128)
51
DO_ST(q, _le, MO_128)
52
53
#undef DO_ST
54
+
55
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
56
+ void *vpm, uint32_t desc)
23
+{
57
+{
24
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
58
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
59
+ uint64_t *pn = vpn, *pm = vpm;
60
+ uint32_t *zda = vzda, *zn = vzn;
25
+
61
+
26
+ switch (mop) {
62
+ for (row = 0; row < oprsz; ) {
27
+ case MO_UB:
63
+ uint64_t pa = pn[row >> 4];
28
+ tcg_gen_ld8u_i32(var, cpu_env, offset);
64
+ do {
29
+ break;
65
+ if (pa & 1) {
30
+ case MO_UW:
66
+ for (col = 0; col < oprsz; ) {
31
+ tcg_gen_ld16u_i32(var, cpu_env, offset);
67
+ uint64_t pb = pm[col >> 4];
32
+ break;
68
+ do {
33
+ case MO_UL:
69
+ if (pb & 1) {
34
+ tcg_gen_ld_i32(var, cpu_env, offset);
70
+ zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)];
35
+ break;
71
+ }
36
+ default:
72
+ pb >>= 4;
37
+ g_assert_not_reached();
73
+ } while (++col & 15);
74
+ }
75
+ }
76
+ pa >>= 4;
77
+ } while (++row & 15);
38
+ }
78
+ }
39
+}
79
+}
40
+
80
+
41
static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
81
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
42
{
82
+ void *vpm, uint32_t desc)
43
long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
44
@@ -XXX,XX +XXX,XX @@ static void neon_store_reg(int reg, int pass, TCGv_i32 var)
45
tcg_temp_free_i32(var);
46
}
47
48
+static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
49
+{
83
+{
50
+ long offset = neon_element_offset(reg, ele, size);
84
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
85
+ uint8_t *pn = vpn, *pm = vpm;
86
+ uint64_t *zda = vzda, *zn = vzn;
51
+
87
+
52
+ switch (size) {
88
+ for (row = 0; row < oprsz; ++row) {
53
+ case MO_8:
89
+ if (pn[H1(row)] & 1) {
54
+ tcg_gen_st8_i32(var, cpu_env, offset);
90
+ for (col = 0; col < oprsz; ++col) {
55
+ break;
91
+ if (pm[H1(col)] & 1) {
56
+ case MO_16:
92
+ zda[tile_vslice_index(row) + col] += zn[col];
57
+ tcg_gen_st16_i32(var, cpu_env, offset);
93
+ }
58
+ break;
94
+ }
59
+ case MO_32:
95
+ }
60
+ tcg_gen_st_i32(var, cpu_env, offset);
61
+ break;
62
+ default:
63
+ g_assert_not_reached();
64
+ }
96
+ }
65
+}
97
+}
66
+
98
+
67
static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
99
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
68
{
100
+ void *vpm, uint32_t desc)
69
long offset = neon_element_offset(reg, ele, size);
101
+{
70
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
102
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
71
int stride;
103
+ uint64_t *pn = vpn, *pm = vpm;
72
int size;
104
+ uint32_t *zda = vzda, *zn = vzn;
73
int reg;
105
+
74
- int pass;
106
+ for (row = 0; row < oprsz; ) {
75
int load;
107
+ uint64_t pa = pn[row >> 4];
76
- int shift;
108
+ do {
77
int n;
109
+ if (pa & 1) {
78
int vec_size;
110
+ uint32_t zn_row = zn[H4(row)];
79
int mmu_idx;
111
+ for (col = 0; col < oprsz; ) {
80
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
112
+ uint64_t pb = pm[col >> 4];
81
} else {
113
+ do {
82
/* Single element. */
114
+ if (pb & 1) {
83
int idx = (insn >> 4) & 0xf;
115
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
84
- pass = (insn >> 7) & 1;
116
+ }
85
+ int reg_idx;
117
+ pb >>= 4;
86
switch (size) {
118
+ } while (++col & 15);
87
case 0:
119
+ }
88
- shift = ((insn >> 5) & 3) * 8;
120
+ }
89
+ reg_idx = (insn >> 5) & 7;
121
+ pa >>= 4;
90
stride = 1;
122
+ } while (++row & 15);
91
break;
123
+ }
92
case 1:
124
+}
93
- shift = ((insn >> 6) & 1) * 16;
125
+
94
+ reg_idx = (insn >> 6) & 3;
126
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
95
stride = (insn & (1 << 5)) ? 2 : 1;
127
+ void *vpm, uint32_t desc)
96
break;
128
+{
97
case 2:
129
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
98
- shift = 0;
130
+ uint8_t *pn = vpn, *pm = vpm;
99
+ reg_idx = (insn >> 7) & 1;
131
+ uint64_t *zda = vzda, *zn = vzn;
100
stride = (insn & (1 << 6)) ? 2 : 1;
132
+
101
break;
133
+ for (row = 0; row < oprsz; ++row) {
102
default:
134
+ if (pn[H1(row)] & 1) {
103
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
135
+ uint64_t zn_row = zn[row];
104
*/
136
+ for (col = 0; col < oprsz; ++col) {
105
return 1;
137
+ if (pm[H1(col)] & 1) {
106
}
138
+ zda[tile_vslice_index(row) + col] += zn_row;
107
+ tmp = tcg_temp_new_i32();
139
+ }
108
addr = tcg_temp_new_i32();
140
+ }
109
load_reg_var(s, addr, rn);
141
+ }
110
for (reg = 0; reg < nregs; reg++) {
142
+ }
111
if (load) {
143
+}
112
- tmp = tcg_temp_new_i32();
144
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
113
- switch (size) {
145
index XXXXXXX..XXXXXXX 100644
114
- case 0:
146
--- a/target/arm/translate-sme.c
115
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
147
+++ b/target/arm/translate-sme.c
116
- break;
148
@@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
117
- case 1:
149
118
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
150
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
119
- break;
151
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
120
- case 2:
152
+
121
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
153
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
122
- break;
154
+ gen_helper_gvec_4 *fn)
123
- default: /* Avoid compiler warnings. */
155
+{
124
- abort();
156
+ int svl = streaming_vec_reg_size(s);
125
- }
157
+ uint32_t desc = simd_desc(svl, svl, 0);
126
- if (size != 2) {
158
+ TCGv_ptr za, zn, pn, pm;
127
- tmp2 = neon_load_reg(rd, pass);
159
+
128
- tcg_gen_deposit_i32(tmp, tmp2, tmp,
160
+ if (!sme_smza_enabled_check(s)) {
129
- shift, size ? 16 : 8);
161
+ return true;
130
- tcg_temp_free_i32(tmp2);
162
+ }
131
- }
163
+
132
- neon_store_reg(rd, pass, tmp);
164
+ /* Sum XZR+zad to find ZAd. */
133
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
165
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
134
+ s->be_data | size);
166
+ zn = vec_full_reg_ptr(s, a->zn);
135
+ neon_store_element(rd, reg_idx, size, tmp);
167
+ pn = pred_full_reg_ptr(s, a->pn);
136
} else { /* Store */
168
+ pm = pred_full_reg_ptr(s, a->pm);
137
- tmp = neon_load_reg(rd, pass);
169
+
138
- if (shift)
170
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
139
- tcg_gen_shri_i32(tmp, tmp, shift);
171
+
140
- switch (size) {
172
+ tcg_temp_free_ptr(za);
141
- case 0:
173
+ tcg_temp_free_ptr(zn);
142
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
174
+ tcg_temp_free_ptr(pn);
143
- break;
175
+ tcg_temp_free_ptr(pm);
144
- case 1:
176
+ return true;
145
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
177
+}
146
- break;
178
+
147
- case 2:
179
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
148
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
180
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
149
- break;
181
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
150
- }
182
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
151
- tcg_temp_free_i32(tmp);
152
+ neon_load_element(tmp, rd, reg_idx, size);
153
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
154
+ s->be_data | size);
155
}
156
rd += stride;
157
tcg_gen_addi_i32(addr, addr, 1 << size);
158
}
159
tcg_temp_free_i32(addr);
160
+ tcg_temp_free_i32(tmp);
161
stride = nregs * (1 << size);
162
}
163
}
164
--
183
--
165
2.19.1
184
2.25.1
166
167
diff view generated by jsdifflib
1
The HCR.FB virtualization configuration register bit requests that
1
From: Richard Henderson <richard.henderson@linaro.org>
2
TLB maintenance, branch predictor invalidate-all and icache
3
invalidate-all operations performed in NS EL1 should be upgraded
4
from "local CPU only to "broadcast within Inner Shareable domain".
5
For QEMU we NOP the branch predictor and icache operations, so
6
we only need to upgrade the TLB invalidates:
7
AArch32 TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
8
ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVAL, TLBIMVAAL
9
AArch64 TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
10
TLBI VALE1, TLBI VAALE1
11
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-25-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Message-id: 20181012144235.19646-4-peter.maydell@linaro.org
15
---
7
---
16
target/arm/helper.c | 191 +++++++++++++++++++++++++++-----------------
8
target/arm/helper-sme.h | 5 +++
17
1 file changed, 116 insertions(+), 75 deletions(-)
9
target/arm/sme.decode | 9 +++++
10
target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 32 ++++++++++++++++++
12
4 files changed, 115 insertions(+)
18
13
19
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/helper.c
16
--- a/target/arm/helper-sme.h
22
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper-sme.h
23
@@ -XXX,XX +XXX,XX @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
24
raw_write(env, ri, value);
19
DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
}
20
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
21
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
-static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
28
- uint64_t value)
29
-{
30
- /* Invalidate all (TLBIALL) */
31
- ARMCPU *cpu = arm_env_get_cpu(env);
32
-
33
- tlb_flush(CPU(cpu));
34
-}
35
-
36
-static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
37
- uint64_t value)
38
-{
39
- /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
40
- ARMCPU *cpu = arm_env_get_cpu(env);
41
-
42
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
43
-}
44
-
45
-static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
46
- uint64_t value)
47
-{
48
- /* Invalidate by ASID (TLBIASID) */
49
- ARMCPU *cpu = arm_env_get_cpu(env);
50
-
51
- tlb_flush(CPU(cpu));
52
-}
53
-
54
-static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
55
- uint64_t value)
56
-{
57
- /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
58
- ARMCPU *cpu = arm_env_get_cpu(env);
59
-
60
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
61
-}
62
-
63
/* IS variants of TLB operations must affect all cores */
64
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
65
uint64_t value)
66
@@ -XXX,XX +XXX,XX @@ static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
67
tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
68
}
69
70
+/*
71
+ * Non-IS variants of TLB operations are upgraded to
72
+ * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
73
+ * force broadcast of these operations.
74
+ */
75
+static bool tlb_force_broadcast(CPUARMState *env)
76
+{
77
+ return (env->cp15.hcr_el2 & HCR_FB) &&
78
+ arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
79
+}
80
+
22
+
81
+static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
23
+DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
82
+ uint64_t value)
24
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
83
+{
25
+DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
84
+ /* Invalidate all (TLBIALL) */
26
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
85
+ ARMCPU *cpu = arm_env_get_cpu(env);
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
32
ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
33
ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
34
ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
86
+
35
+
87
+ if (tlb_force_broadcast(env)) {
36
+### SME Outer Product
88
+ tlbiall_is_write(env, NULL, value);
89
+ return;
90
+ }
91
+
37
+
92
+ tlb_flush(CPU(cpu));
38
+&op zad zn zm pm pn sub:bool
93
+}
39
+@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
40
+@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
94
+
41
+
95
+static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
42
+FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
96
+ uint64_t value)
43
+FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
97
+{
44
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
98
+ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
45
index XXXXXXX..XXXXXXX 100644
99
+ ARMCPU *cpu = arm_env_get_cpu(env);
46
--- a/target/arm/sme_helper.c
100
+
47
+++ b/target/arm/sme_helper.c
101
+ if (tlb_force_broadcast(env)) {
48
@@ -XXX,XX +XXX,XX @@
102
+ tlbimva_is_write(env, NULL, value);
49
#include "exec/cpu_ldst.h"
103
+ return;
50
#include "exec/exec-all.h"
104
+ }
51
#include "qemu/int128.h"
105
+
52
+#include "fpu/softfloat.h"
106
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
53
#include "vec_internal.h"
107
+}
54
#include "sve_ldst_internal.h"
108
+
55
109
+static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
56
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
110
+ uint64_t value)
57
}
111
+{
112
+ /* Invalidate by ASID (TLBIASID) */
113
+ ARMCPU *cpu = arm_env_get_cpu(env);
114
+
115
+ if (tlb_force_broadcast(env)) {
116
+ tlbiasid_is_write(env, NULL, value);
117
+ return;
118
+ }
119
+
120
+ tlb_flush(CPU(cpu));
121
+}
122
+
123
+static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
124
+ uint64_t value)
125
+{
126
+ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
127
+ ARMCPU *cpu = arm_env_get_cpu(env);
128
+
129
+ if (tlb_force_broadcast(env)) {
130
+ tlbimvaa_is_write(env, NULL, value);
131
+ return;
132
+ }
133
+
134
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
135
+}
136
+
137
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
138
uint64_t value)
139
{
140
@@ -XXX,XX +XXX,XX @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
141
* Page D4-1736 (DDI0487A.b)
142
*/
143
144
-static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
145
- uint64_t value)
146
-{
147
- CPUState *cs = ENV_GET_CPU(env);
148
-
149
- if (arm_is_secure_below_el3(env)) {
150
- tlb_flush_by_mmuidx(cs,
151
- ARMMMUIdxBit_S1SE1 |
152
- ARMMMUIdxBit_S1SE0);
153
- } else {
154
- tlb_flush_by_mmuidx(cs,
155
- ARMMMUIdxBit_S12NSE1 |
156
- ARMMMUIdxBit_S12NSE0);
157
- }
158
-}
159
-
160
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
161
uint64_t value)
162
{
163
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
164
}
58
}
165
}
59
}
166
60
+
167
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
61
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
168
+ uint64_t value)
62
+ void *vpm, void *vst, uint32_t desc)
169
+{
63
+{
170
+ CPUState *cs = ENV_GET_CPU(env);
64
+ intptr_t row, col, oprsz = simd_maxsz(desc);
65
+ uint32_t neg = simd_data(desc) << 31;
66
+ uint16_t *pn = vpn, *pm = vpm;
67
+ float_status fpst;
171
+
68
+
172
+ if (tlb_force_broadcast(env)) {
69
+ /*
173
+ tlbi_aa64_vmalle1_write(env, NULL, value);
70
+ * Make a copy of float_status because this operation does not
174
+ return;
71
+ * update the cumulative fp exception status. It also produces
175
+ }
72
+ * default nans.
73
+ */
74
+ fpst = *(float_status *)vst;
75
+ set_default_nan_mode(true, &fpst);
176
+
76
+
177
+ if (arm_is_secure_below_el3(env)) {
77
+ for (row = 0; row < oprsz; ) {
178
+ tlb_flush_by_mmuidx(cs,
78
+ uint16_t pa = pn[H2(row >> 4)];
179
+ ARMMMUIdxBit_S1SE1 |
79
+ do {
180
+ ARMMMUIdxBit_S1SE0);
80
+ if (pa & 1) {
181
+ } else {
81
+ void *vza_row = vza + tile_vslice_offset(row);
182
+ tlb_flush_by_mmuidx(cs,
82
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
183
+ ARMMMUIdxBit_S12NSE1 |
83
+
184
+ ARMMMUIdxBit_S12NSE0);
84
+ for (col = 0; col < oprsz; ) {
85
+ uint16_t pb = pm[H2(col >> 4)];
86
+ do {
87
+ if (pb & 1) {
88
+ uint32_t *a = vza_row + H1_4(col);
89
+ uint32_t *m = vzm + H1_4(col);
90
+ *a = float32_muladd(n, *m, *a, 0, vst);
91
+ }
92
+ col += 4;
93
+ pb >>= 4;
94
+ } while (col & 15);
95
+ }
96
+ }
97
+ row += 4;
98
+ pa >>= 4;
99
+ } while (row & 15);
185
+ }
100
+ }
186
+}
101
+}
187
+
102
+
188
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
103
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
189
uint64_t value)
104
+ void *vpm, void *vst, uint32_t desc)
190
{
191
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
192
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
193
}
194
195
-static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
196
- uint64_t value)
197
-{
198
- /* Invalidate by VA, EL1&0 (AArch64 version).
199
- * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
200
- * since we don't support flush-for-specific-ASID-only or
201
- * flush-last-level-only.
202
- */
203
- ARMCPU *cpu = arm_env_get_cpu(env);
204
- CPUState *cs = CPU(cpu);
205
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
206
-
207
- if (arm_is_secure_below_el3(env)) {
208
- tlb_flush_page_by_mmuidx(cs, pageaddr,
209
- ARMMMUIdxBit_S1SE1 |
210
- ARMMMUIdxBit_S1SE0);
211
- } else {
212
- tlb_flush_page_by_mmuidx(cs, pageaddr,
213
- ARMMMUIdxBit_S12NSE1 |
214
- ARMMMUIdxBit_S12NSE0);
215
- }
216
-}
217
-
218
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
219
uint64_t value)
220
{
221
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
222
}
223
}
224
225
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
226
+ uint64_t value)
227
+{
105
+{
228
+ /* Invalidate by VA, EL1&0 (AArch64 version).
106
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
229
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
107
+ uint64_t neg = (uint64_t)simd_data(desc) << 63;
230
+ * since we don't support flush-for-specific-ASID-only or
108
+ uint64_t *za = vza, *zn = vzn, *zm = vzm;
231
+ * flush-last-level-only.
109
+ uint8_t *pn = vpn, *pm = vpm;
232
+ */
110
+ float_status fpst = *(float_status *)vst;
233
+ ARMCPU *cpu = arm_env_get_cpu(env);
234
+ CPUState *cs = CPU(cpu);
235
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
236
+
111
+
237
+ if (tlb_force_broadcast(env)) {
112
+ set_default_nan_mode(true, &fpst);
238
+ tlbi_aa64_vae1is_write(env, NULL, value);
113
+
239
+ return;
114
+ for (row = 0; row < oprsz; ++row) {
115
+ if (pn[H1(row)] & 1) {
116
+ uint64_t *za_row = &za[tile_vslice_index(row)];
117
+ uint64_t n = zn[row] ^ neg;
118
+
119
+ for (col = 0; col < oprsz; ++col) {
120
+ if (pm[H1(col)] & 1) {
121
+ uint64_t *a = &za_row[col];
122
+ *a = float64_muladd(n, zm[col], *a, 0, &fpst);
123
+ }
124
+ }
125
+ }
126
+ }
127
+}
128
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate-sme.c
131
+++ b/target/arm/translate-sme.c
132
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
133
TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
134
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
135
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
136
+
137
+static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
138
+ gen_helper_gvec_5_ptr *fn)
139
+{
140
+ int svl = streaming_vec_reg_size(s);
141
+ uint32_t desc = simd_desc(svl, svl, a->sub);
142
+ TCGv_ptr za, zn, zm, pn, pm, fpst;
143
+
144
+ if (!sme_smza_enabled_check(s)) {
145
+ return true;
240
+ }
146
+ }
241
+
147
+
242
+ if (arm_is_secure_below_el3(env)) {
148
+ /* Sum XZR+zad to find ZAd. */
243
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
149
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
244
+ ARMMMUIdxBit_S1SE1 |
150
+ zn = vec_full_reg_ptr(s, a->zn);
245
+ ARMMMUIdxBit_S1SE0);
151
+ zm = vec_full_reg_ptr(s, a->zm);
246
+ } else {
152
+ pn = pred_full_reg_ptr(s, a->pn);
247
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
153
+ pm = pred_full_reg_ptr(s, a->pm);
248
+ ARMMMUIdxBit_S12NSE1 |
154
+ fpst = fpstatus_ptr(FPST_FPCR);
249
+ ARMMMUIdxBit_S12NSE0);
155
+
250
+ }
156
+ fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc));
157
+
158
+ tcg_temp_free_ptr(za);
159
+ tcg_temp_free_ptr(zn);
160
+ tcg_temp_free_ptr(pn);
161
+ tcg_temp_free_ptr(pm);
162
+ tcg_temp_free_ptr(fpst);
163
+ return true;
251
+}
164
+}
252
+
165
+
253
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
166
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
254
uint64_t value)
167
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
255
{
256
--
168
--
257
2.19.1
169
2.25.1
258
259
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move ssra_op and usra_op expanders from translate-a64.c.
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-26-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 2 ++
10
target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 30 ++++++++++++++++++++
12
4 files changed, 90 insertions(+)
4
13
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
6
Message-id: 20181011205206.3552-14-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/translate.h | 2 +
11
target/arm/translate-a64.c | 106 ----------------------------
12
target/arm/translate.c | 139 ++++++++++++++++++++++++++++++++++---
13
3 files changed, 130 insertions(+), 117 deletions(-)
14
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
16
--- a/target/arm/helper-sme.h
18
+++ b/target/arm/translate.h
17
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
20
extern const GVecGen3 bsl_op;
19
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
21
extern const GVecGen3 bit_op;
20
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
22
extern const GVecGen3 bif_op;
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
23
+extern const GVecGen2i ssra_op[4];
22
+DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
24
+extern const GVecGen2i usra_op[4];
23
+ void, ptr, ptr, ptr, ptr, ptr, i32)
25
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
26
/*
27
* Forward to the isar_feature_* tests given a DisasContext pointer.
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
29
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-a64.c
26
--- a/target/arm/sme.decode
31
+++ b/target/arm/translate-a64.c
27
+++ b/target/arm/sme.decode
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
28
@@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
29
30
FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
31
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
32
+
33
+BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
34
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/sme_helper.c
37
+++ b/target/arm/sme_helper.c
38
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
39
}
33
}
40
}
34
}
41
}
35
42
+
36
-static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
43
+/*
37
-{
44
+ * Alter PAIR as needed for controlling predicates being false,
38
- tcg_gen_vec_sar8i_i64(a, a, shift);
45
+ * and for NEG on an enabled row element.
39
- tcg_gen_vec_add8_i64(d, d, a);
46
+ */
40
-}
47
+static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
41
-
42
-static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
43
-{
44
- tcg_gen_vec_sar16i_i64(a, a, shift);
45
- tcg_gen_vec_add16_i64(d, d, a);
46
-}
47
-
48
-static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
49
-{
50
- tcg_gen_sari_i32(a, a, shift);
51
- tcg_gen_add_i32(d, d, a);
52
-}
53
-
54
-static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
55
-{
56
- tcg_gen_sari_i64(a, a, shift);
57
- tcg_gen_add_i64(d, d, a);
58
-}
59
-
60
-static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
61
-{
62
- tcg_gen_sari_vec(vece, a, a, sh);
63
- tcg_gen_add_vec(vece, d, d, a);
64
-}
65
-
66
-static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
67
-{
68
- tcg_gen_vec_shr8i_i64(a, a, shift);
69
- tcg_gen_vec_add8_i64(d, d, a);
70
-}
71
-
72
-static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
73
-{
74
- tcg_gen_vec_shr16i_i64(a, a, shift);
75
- tcg_gen_vec_add16_i64(d, d, a);
76
-}
77
-
78
-static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
79
-{
80
- tcg_gen_shri_i32(a, a, shift);
81
- tcg_gen_add_i32(d, d, a);
82
-}
83
-
84
-static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
85
-{
86
- tcg_gen_shri_i64(a, a, shift);
87
- tcg_gen_add_i64(d, d, a);
88
-}
89
-
90
-static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
91
-{
92
- tcg_gen_shri_vec(vece, a, a, sh);
93
- tcg_gen_add_vec(vece, d, d, a);
94
-}
95
-
96
static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
97
{
98
uint64_t mask = dup_const(MO_8, 0xff >> shift);
99
@@ -XXX,XX +XXX,XX @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
100
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
101
int immh, int immb, int opcode, int rn, int rd)
102
{
103
- static const GVecGen2i ssra_op[4] = {
104
- { .fni8 = gen_ssra8_i64,
105
- .fniv = gen_ssra_vec,
106
- .load_dest = true,
107
- .opc = INDEX_op_sari_vec,
108
- .vece = MO_8 },
109
- { .fni8 = gen_ssra16_i64,
110
- .fniv = gen_ssra_vec,
111
- .load_dest = true,
112
- .opc = INDEX_op_sari_vec,
113
- .vece = MO_16 },
114
- { .fni4 = gen_ssra32_i32,
115
- .fniv = gen_ssra_vec,
116
- .load_dest = true,
117
- .opc = INDEX_op_sari_vec,
118
- .vece = MO_32 },
119
- { .fni8 = gen_ssra64_i64,
120
- .fniv = gen_ssra_vec,
121
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
122
- .load_dest = true,
123
- .opc = INDEX_op_sari_vec,
124
- .vece = MO_64 },
125
- };
126
- static const GVecGen2i usra_op[4] = {
127
- { .fni8 = gen_usra8_i64,
128
- .fniv = gen_usra_vec,
129
- .load_dest = true,
130
- .opc = INDEX_op_shri_vec,
131
- .vece = MO_8, },
132
- { .fni8 = gen_usra16_i64,
133
- .fniv = gen_usra_vec,
134
- .load_dest = true,
135
- .opc = INDEX_op_shri_vec,
136
- .vece = MO_16, },
137
- { .fni4 = gen_usra32_i32,
138
- .fniv = gen_usra_vec,
139
- .load_dest = true,
140
- .opc = INDEX_op_shri_vec,
141
- .vece = MO_32, },
142
- { .fni8 = gen_usra64_i64,
143
- .fniv = gen_usra_vec,
144
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
145
- .load_dest = true,
146
- .opc = INDEX_op_shri_vec,
147
- .vece = MO_64, },
148
- };
149
static const GVecGen2i sri_op[4] = {
150
{ .fni8 = gen_shr8_ins_i64,
151
.fniv = gen_shr_ins_vec,
152
diff --git a/target/arm/translate.c b/target/arm/translate.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/translate.c
155
+++ b/target/arm/translate.c
156
@@ -XXX,XX +XXX,XX @@ const GVecGen3 bif_op = {
157
.load_dest = true
158
};
159
160
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
161
+{
48
+{
162
+ tcg_gen_vec_sar8i_i64(a, a, shift);
49
+ /*
163
+ tcg_gen_vec_add8_i64(d, d, a);
50
+ * The pseudocode uses a conditional negate after the conditional zero.
51
+ * It is simpler here to unconditionally negate before conditional zero.
52
+ */
53
+ pair ^= neg;
54
+ if (!(pg & 1)) {
55
+ pair &= 0xffff0000u;
56
+ }
57
+ if (!(pg & 4)) {
58
+ pair &= 0x0000ffffu;
59
+ }
60
+ return pair;
164
+}
61
+}
165
+
62
+
166
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
63
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
64
+ void *vpm, uint32_t desc)
167
+{
65
+{
168
+ tcg_gen_vec_sar16i_i64(a, a, shift);
66
+ intptr_t row, col, oprsz = simd_maxsz(desc);
169
+ tcg_gen_vec_add16_i64(d, d, a);
67
+ uint32_t neg = simd_data(desc) * 0x80008000u;
68
+ uint16_t *pn = vpn, *pm = vpm;
69
+
70
+ for (row = 0; row < oprsz; ) {
71
+ uint16_t prow = pn[H2(row >> 4)];
72
+ do {
73
+ void *vza_row = vza + tile_vslice_offset(row);
74
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
75
+
76
+ n = f16mop_adj_pair(n, prow, neg);
77
+
78
+ for (col = 0; col < oprsz; ) {
79
+ uint16_t pcol = pm[H2(col >> 4)];
80
+ do {
81
+ if (prow & pcol & 0b0101) {
82
+ uint32_t *a = vza_row + H1_4(col);
83
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
84
+
85
+ m = f16mop_adj_pair(m, pcol, 0);
86
+ *a = bfdotadd(*a, n, m);
87
+
88
+ col += 4;
89
+ pcol >>= 4;
90
+ }
91
+ } while (col & 15);
92
+ }
93
+ row += 4;
94
+ prow >>= 4;
95
+ } while (row & 15);
96
+ }
97
+}
98
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-sme.c
101
+++ b/target/arm/translate-sme.c
102
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
103
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
104
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
105
106
+static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
107
+ gen_helper_gvec_5 *fn)
108
+{
109
+ int svl = streaming_vec_reg_size(s);
110
+ uint32_t desc = simd_desc(svl, svl, a->sub);
111
+ TCGv_ptr za, zn, zm, pn, pm;
112
+
113
+ if (!sme_smza_enabled_check(s)) {
114
+ return true;
115
+ }
116
+
117
+ /* Sum XZR+zad to find ZAd. */
118
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
119
+ zn = vec_full_reg_ptr(s, a->zn);
120
+ zm = vec_full_reg_ptr(s, a->zm);
121
+ pn = pred_full_reg_ptr(s, a->pn);
122
+ pm = pred_full_reg_ptr(s, a->pm);
123
+
124
+ fn(za, zn, zm, pn, pm, tcg_constant_i32(desc));
125
+
126
+ tcg_temp_free_ptr(za);
127
+ tcg_temp_free_ptr(zn);
128
+ tcg_temp_free_ptr(pn);
129
+ tcg_temp_free_ptr(pm);
130
+ return true;
170
+}
131
+}
171
+
132
+
172
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
133
static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
173
+{
134
gen_helper_gvec_5_ptr *fn)
174
+ tcg_gen_sari_i32(a, a, shift);
135
{
175
+ tcg_gen_add_i32(d, d, a);
136
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
176
+}
137
138
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
139
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
177
+
140
+
178
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
141
+/* TODO: FEAT_EBF16 */
179
+{
142
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
180
+ tcg_gen_sari_i64(a, a, shift);
181
+ tcg_gen_add_i64(d, d, a);
182
+}
183
+
184
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
185
+{
186
+ tcg_gen_sari_vec(vece, a, a, sh);
187
+ tcg_gen_add_vec(vece, d, d, a);
188
+}
189
+
190
+const GVecGen2i ssra_op[4] = {
191
+ { .fni8 = gen_ssra8_i64,
192
+ .fniv = gen_ssra_vec,
193
+ .load_dest = true,
194
+ .opc = INDEX_op_sari_vec,
195
+ .vece = MO_8 },
196
+ { .fni8 = gen_ssra16_i64,
197
+ .fniv = gen_ssra_vec,
198
+ .load_dest = true,
199
+ .opc = INDEX_op_sari_vec,
200
+ .vece = MO_16 },
201
+ { .fni4 = gen_ssra32_i32,
202
+ .fniv = gen_ssra_vec,
203
+ .load_dest = true,
204
+ .opc = INDEX_op_sari_vec,
205
+ .vece = MO_32 },
206
+ { .fni8 = gen_ssra64_i64,
207
+ .fniv = gen_ssra_vec,
208
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
209
+ .load_dest = true,
210
+ .opc = INDEX_op_sari_vec,
211
+ .vece = MO_64 },
212
+};
213
+
214
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
215
+{
216
+ tcg_gen_vec_shr8i_i64(a, a, shift);
217
+ tcg_gen_vec_add8_i64(d, d, a);
218
+}
219
+
220
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
221
+{
222
+ tcg_gen_vec_shr16i_i64(a, a, shift);
223
+ tcg_gen_vec_add16_i64(d, d, a);
224
+}
225
+
226
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
227
+{
228
+ tcg_gen_shri_i32(a, a, shift);
229
+ tcg_gen_add_i32(d, d, a);
230
+}
231
+
232
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
233
+{
234
+ tcg_gen_shri_i64(a, a, shift);
235
+ tcg_gen_add_i64(d, d, a);
236
+}
237
+
238
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
239
+{
240
+ tcg_gen_shri_vec(vece, a, a, sh);
241
+ tcg_gen_add_vec(vece, d, d, a);
242
+}
243
+
244
+const GVecGen2i usra_op[4] = {
245
+ { .fni8 = gen_usra8_i64,
246
+ .fniv = gen_usra_vec,
247
+ .load_dest = true,
248
+ .opc = INDEX_op_shri_vec,
249
+ .vece = MO_8, },
250
+ { .fni8 = gen_usra16_i64,
251
+ .fniv = gen_usra_vec,
252
+ .load_dest = true,
253
+ .opc = INDEX_op_shri_vec,
254
+ .vece = MO_16, },
255
+ { .fni4 = gen_usra32_i32,
256
+ .fniv = gen_usra_vec,
257
+ .load_dest = true,
258
+ .opc = INDEX_op_shri_vec,
259
+ .vece = MO_32, },
260
+ { .fni8 = gen_usra64_i64,
261
+ .fniv = gen_usra_vec,
262
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
263
+ .load_dest = true,
264
+ .opc = INDEX_op_shri_vec,
265
+ .vece = MO_64, },
266
+};
267
268
/* Translate a NEON data processing instruction. Return nonzero if the
269
instruction is invalid.
270
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
271
}
272
return 0;
273
274
+ case 1: /* VSRA */
275
+ /* Right shift comes here negative. */
276
+ shift = -shift;
277
+ /* Shifts larger than the element size are architecturally
278
+ * valid. Unsigned results in all zeros; signed results
279
+ * in all sign bits.
280
+ */
281
+ if (!u) {
282
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
283
+ MIN(shift, (8 << size) - 1),
284
+ &ssra_op[size]);
285
+ } else if (shift >= 8 << size) {
286
+ /* rd += 0 */
287
+ } else {
288
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
289
+ shift, &usra_op[size]);
290
+ }
291
+ return 0;
292
+
293
case 5: /* VSHL, VSLI */
294
if (!u) { /* VSHL */
295
/* Shifts larger than the element size are
296
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
297
neon_load_reg64(cpu_V0, rm + pass);
298
tcg_gen_movi_i64(cpu_V1, imm);
299
switch (op) {
300
- case 1: /* VSRA */
301
- if (u)
302
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
303
- else
304
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
305
- break;
306
case 2: /* VRSHR */
307
case 3: /* VRSRA */
308
if (u)
309
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
310
default:
311
g_assert_not_reached();
312
}
313
- if (op == 1 || op == 3) {
314
+ if (op == 3) {
315
/* Accumulate. */
316
neon_load_reg64(cpu_V1, rd + pass);
317
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
318
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
319
tmp2 = tcg_temp_new_i32();
320
tcg_gen_movi_i32(tmp2, imm);
321
switch (op) {
322
- case 1: /* VSRA */
323
- GEN_NEON_INTEGER_OP(shl);
324
- break;
325
case 2: /* VRSHR */
326
case 3: /* VRSRA */
327
GEN_NEON_INTEGER_OP(rshl);
328
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
329
}
330
tcg_temp_free_i32(tmp2);
331
332
- if (op == 1 || op == 3) {
333
+ if (op == 3) {
334
/* Accumulate. */
335
tmp2 = neon_load_reg(rd, pass);
336
gen_neon_add(size, tmp, tmp2);
337
--
143
--
338
2.19.1
144
2.25.1
339
340
diff view generated by jsdifflib
1
The switch_mode() function is defined in target/arm/helper.c and used
1
From: Richard Henderson <richard.henderson@linaro.org>
2
only in that file and nowhere else, so we can make it file-local
3
rather than global.
4
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-27-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20181012144235.19646-3-peter.maydell@linaro.org
8
---
7
---
9
target/arm/internals.h | 1 -
8
target/arm/helper-sme.h | 2 ++
10
target/arm/helper.c | 6 ++++--
9
target/arm/sme.decode | 1 +
11
2 files changed, 4 insertions(+), 3 deletions(-)
10
target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 1 +
12
4 files changed, 78 insertions(+)
12
13
13
diff --git a/target/arm/internals.h b/target/arm/internals.h
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/internals.h
16
--- a/target/arm/helper-sme.h
16
+++ b/target/arm/internals.h
17
+++ b/target/arm/helper-sme.h
17
@@ -XXX,XX +XXX,XX @@ static inline int bank_number(int mode)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
18
g_assert_not_reached();
19
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
25
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
26
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
32
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
33
34
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
35
+FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
41
return pair;
19
}
42
}
20
43
21
-void switch_mode(CPUARMState *, int);
44
+static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
22
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
45
+ float_status *s_std, float_status *s_odd)
23
void arm_translate_init(void);
46
+{
24
47
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
48
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
49
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
50
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
51
+ float64 t64;
52
+ float32 t32;
53
+
54
+ /*
55
+ * The ARM pseudocode function FPDot performs both multiplies
56
+ * and the add with a single rounding operation. Emulate this
57
+ * by performing the first multiply in round-to-odd, then doing
58
+ * the second multiply as fused multiply-add, and rounding to
59
+ * float32 all in one step.
60
+ */
61
+ t64 = float64_mul(e1r, e2r, s_odd);
62
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
63
+
64
+ /* This conversion is exact, because we've already rounded. */
65
+ t32 = float64_to_float32(t64, s_std);
66
+
67
+ /* The final accumulation step is not fused. */
68
+ return float32_add(sum, t32, s_std);
69
+}
70
+
71
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
72
+ void *vpm, void *vst, uint32_t desc)
73
+{
74
+ intptr_t row, col, oprsz = simd_maxsz(desc);
75
+ uint32_t neg = simd_data(desc) * 0x80008000u;
76
+ uint16_t *pn = vpn, *pm = vpm;
77
+ float_status fpst_odd, fpst_std;
78
+
79
+ /*
80
+ * Make a copy of float_status because this operation does not
81
+ * update the cumulative fp exception status. It also produces
82
+ * default nans. Make a second copy with round-to-odd -- see above.
83
+ */
84
+ fpst_std = *(float_status *)vst;
85
+ set_default_nan_mode(true, &fpst_std);
86
+ fpst_odd = fpst_std;
87
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
88
+
89
+ for (row = 0; row < oprsz; ) {
90
+ uint16_t prow = pn[H2(row >> 4)];
91
+ do {
92
+ void *vza_row = vza + tile_vslice_offset(row);
93
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
94
+
95
+ n = f16mop_adj_pair(n, prow, neg);
96
+
97
+ for (col = 0; col < oprsz; ) {
98
+ uint16_t pcol = pm[H2(col >> 4)];
99
+ do {
100
+ if (prow & pcol & 0b0101) {
101
+ uint32_t *a = vza_row + H1_4(col);
102
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
103
+
104
+ m = f16mop_adj_pair(m, pcol, 0);
105
+ *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
106
+
107
+ col += 4;
108
+ pcol >>= 4;
109
+ }
110
+ } while (col & 15);
111
+ }
112
+ row += 4;
113
+ prow >>= 4;
114
+ } while (row & 15);
115
+ }
116
+}
117
+
118
void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
119
void *vpm, uint32_t desc)
120
{
121
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
26
index XXXXXXX..XXXXXXX 100644
122
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper.c
123
--- a/target/arm/translate-sme.c
28
+++ b/target/arm/helper.c
124
+++ b/target/arm/translate-sme.c
29
@@ -XXX,XX +XXX,XX @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
125
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
30
V8M_SAttributes *sattrs);
126
return true;
31
#endif
32
33
+static void switch_mode(CPUARMState *env, int mode);
34
+
35
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
36
{
37
int nregs;
38
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
39
return 0;
40
}
127
}
41
128
42
-void switch_mode(CPUARMState *env, int mode)
129
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h)
43
+static void switch_mode(CPUARMState *env, int mode)
130
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
44
{
131
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
45
ARMCPU *cpu = arm_env_get_cpu(env);
132
46
47
@@ -XXX,XX +XXX,XX @@ void aarch64_sync_64_to_32(CPUARMState *env)
48
49
#else
50
51
-void switch_mode(CPUARMState *env, int mode)
52
+static void switch_mode(CPUARMState *env, int mode)
53
{
54
int old_mode;
55
int i;
56
--
133
--
57
2.19.1
134
2.25.1
58
59
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Move shi_op and sli_op expanders from translate-a64.c.
3
This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16.
4
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181011205206.3552-15-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-28-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
9
---
10
target/arm/translate.h | 2 +
10
target/arm/helper-sme.h | 16 ++++++++
11
target/arm/translate-a64.c | 152 +----------------------
11
target/arm/sme.decode | 10 +++++
12
target/arm/translate.c | 244 ++++++++++++++++++++++++++-----------
12
target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
13
3 files changed, 179 insertions(+), 219 deletions(-)
13
target/arm/translate-sme.c | 10 +++++
14
4 files changed, 118 insertions(+)
14
15
15
diff --git a/target/arm/translate.h b/target/arm/translate.h
16
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.h
18
--- a/target/arm/helper-sme.h
18
+++ b/target/arm/translate.h
19
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ extern const GVecGen3 bit_op;
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
20
extern const GVecGen3 bif_op;
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
21
extern const GVecGen2i ssra_op[4];
22
DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
22
extern const GVecGen2i usra_op[4];
23
void, ptr, ptr, ptr, ptr, ptr, i32)
23
+extern const GVecGen2i sri_op[4];
24
+DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
24
+extern const GVecGen2i sli_op[4];
25
+ void, ptr, ptr, ptr, ptr, ptr, i32)
25
26
+DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
26
/*
27
+ void, ptr, ptr, ptr, ptr, ptr, i32)
27
* Forward to the isar_feature_* tests given a DisasContext pointer.
28
+DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG,
28
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
30
+DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, ptr, i32)
40
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
29
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-a64.c
42
--- a/target/arm/sme.decode
31
+++ b/target/arm/translate-a64.c
43
+++ b/target/arm/sme.decode
32
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
44
@@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
45
46
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
47
FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
48
+
49
+SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
50
+SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
51
+USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32
52
+UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32
53
+
54
+SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
55
+SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
56
+USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
57
+UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
58
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/sme_helper.c
61
+++ b/target/arm/sme_helper.c
62
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
63
} while (row & 15);
33
}
64
}
34
}
65
}
35
66
+
36
-static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
67
+typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool);
37
-{
68
+
38
- uint64_t mask = dup_const(MO_8, 0xff >> shift);
69
+static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm,
39
- TCGv_i64 t = tcg_temp_new_i64();
70
+ uint8_t *pn, uint8_t *pm,
40
-
71
+ uint32_t desc, IMOPFn *fn)
41
- tcg_gen_shri_i64(t, a, shift);
42
- tcg_gen_andi_i64(t, t, mask);
43
- tcg_gen_andi_i64(d, d, ~mask);
44
- tcg_gen_or_i64(d, d, t);
45
- tcg_temp_free_i64(t);
46
-}
47
-
48
-static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
49
-{
50
- uint64_t mask = dup_const(MO_16, 0xffff >> shift);
51
- TCGv_i64 t = tcg_temp_new_i64();
52
-
53
- tcg_gen_shri_i64(t, a, shift);
54
- tcg_gen_andi_i64(t, t, mask);
55
- tcg_gen_andi_i64(d, d, ~mask);
56
- tcg_gen_or_i64(d, d, t);
57
- tcg_temp_free_i64(t);
58
-}
59
-
60
-static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
61
-{
62
- tcg_gen_shri_i32(a, a, shift);
63
- tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
64
-}
65
-
66
-static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
67
-{
68
- tcg_gen_shri_i64(a, a, shift);
69
- tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
70
-}
71
-
72
-static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
73
-{
74
- uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
75
- TCGv_vec t = tcg_temp_new_vec_matching(d);
76
- TCGv_vec m = tcg_temp_new_vec_matching(d);
77
-
78
- tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
79
- tcg_gen_shri_vec(vece, t, a, sh);
80
- tcg_gen_and_vec(vece, d, d, m);
81
- tcg_gen_or_vec(vece, d, d, t);
82
-
83
- tcg_temp_free_vec(t);
84
- tcg_temp_free_vec(m);
85
-}
86
-
87
/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
88
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
89
int immh, int immb, int opcode, int rn, int rd)
90
{
91
- static const GVecGen2i sri_op[4] = {
92
- { .fni8 = gen_shr8_ins_i64,
93
- .fniv = gen_shr_ins_vec,
94
- .load_dest = true,
95
- .opc = INDEX_op_shri_vec,
96
- .vece = MO_8 },
97
- { .fni8 = gen_shr16_ins_i64,
98
- .fniv = gen_shr_ins_vec,
99
- .load_dest = true,
100
- .opc = INDEX_op_shri_vec,
101
- .vece = MO_16 },
102
- { .fni4 = gen_shr32_ins_i32,
103
- .fniv = gen_shr_ins_vec,
104
- .load_dest = true,
105
- .opc = INDEX_op_shri_vec,
106
- .vece = MO_32 },
107
- { .fni8 = gen_shr64_ins_i64,
108
- .fniv = gen_shr_ins_vec,
109
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
110
- .load_dest = true,
111
- .opc = INDEX_op_shri_vec,
112
- .vece = MO_64 },
113
- };
114
-
115
int size = 32 - clz32(immh) - 1;
116
int immhb = immh << 3 | immb;
117
int shift = 2 * (8 << size) - immhb;
118
@@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
119
clear_vec_high(s, is_q, rd);
120
}
121
122
-static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
123
-{
124
- uint64_t mask = dup_const(MO_8, 0xff << shift);
125
- TCGv_i64 t = tcg_temp_new_i64();
126
-
127
- tcg_gen_shli_i64(t, a, shift);
128
- tcg_gen_andi_i64(t, t, mask);
129
- tcg_gen_andi_i64(d, d, ~mask);
130
- tcg_gen_or_i64(d, d, t);
131
- tcg_temp_free_i64(t);
132
-}
133
-
134
-static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
135
-{
136
- uint64_t mask = dup_const(MO_16, 0xffff << shift);
137
- TCGv_i64 t = tcg_temp_new_i64();
138
-
139
- tcg_gen_shli_i64(t, a, shift);
140
- tcg_gen_andi_i64(t, t, mask);
141
- tcg_gen_andi_i64(d, d, ~mask);
142
- tcg_gen_or_i64(d, d, t);
143
- tcg_temp_free_i64(t);
144
-}
145
-
146
-static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
147
-{
148
- tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
149
-}
150
-
151
-static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
152
-{
153
- tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
154
-}
155
-
156
-static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
157
-{
158
- uint64_t mask = (1ull << sh) - 1;
159
- TCGv_vec t = tcg_temp_new_vec_matching(d);
160
- TCGv_vec m = tcg_temp_new_vec_matching(d);
161
-
162
- tcg_gen_dupi_vec(vece, m, mask);
163
- tcg_gen_shli_vec(vece, t, a, sh);
164
- tcg_gen_and_vec(vece, d, d, m);
165
- tcg_gen_or_vec(vece, d, d, t);
166
-
167
- tcg_temp_free_vec(t);
168
- tcg_temp_free_vec(m);
169
-}
170
-
171
/* SHL/SLI - Vector shift left */
172
static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
173
int immh, int immb, int opcode, int rn, int rd)
174
{
175
- static const GVecGen2i shi_op[4] = {
176
- { .fni8 = gen_shl8_ins_i64,
177
- .fniv = gen_shl_ins_vec,
178
- .opc = INDEX_op_shli_vec,
179
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
180
- .load_dest = true,
181
- .vece = MO_8 },
182
- { .fni8 = gen_shl16_ins_i64,
183
- .fniv = gen_shl_ins_vec,
184
- .opc = INDEX_op_shli_vec,
185
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
186
- .load_dest = true,
187
- .vece = MO_16 },
188
- { .fni4 = gen_shl32_ins_i32,
189
- .fniv = gen_shl_ins_vec,
190
- .opc = INDEX_op_shli_vec,
191
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
192
- .load_dest = true,
193
- .vece = MO_32 },
194
- { .fni8 = gen_shl64_ins_i64,
195
- .fniv = gen_shl_ins_vec,
196
- .opc = INDEX_op_shli_vec,
197
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
198
- .load_dest = true,
199
- .vece = MO_64 },
200
- };
201
int size = 32 - clz32(immh) - 1;
202
int immhb = immh << 3 | immb;
203
int shift = immhb - (8 << size);
204
@@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
205
}
206
207
if (insert) {
208
- gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
209
+ gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
210
} else {
211
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
212
}
213
diff --git a/target/arm/translate.c b/target/arm/translate.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/target/arm/translate.c
216
+++ b/target/arm/translate.c
217
@@ -XXX,XX +XXX,XX @@ const GVecGen2i usra_op[4] = {
218
.vece = MO_64, },
219
};
220
221
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
222
+{
72
+{
223
+ uint64_t mask = dup_const(MO_8, 0xff >> shift);
73
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
224
+ TCGv_i64 t = tcg_temp_new_i64();
74
+ bool neg = simd_data(desc);
225
+
75
+
226
+ tcg_gen_shri_i64(t, a, shift);
76
+ for (row = 0; row < oprsz; ++row) {
227
+ tcg_gen_andi_i64(t, t, mask);
77
+ uint8_t pa = pn[H1(row)];
228
+ tcg_gen_andi_i64(d, d, ~mask);
78
+ uint64_t *za_row = &za[tile_vslice_index(row)];
229
+ tcg_gen_or_i64(d, d, t);
79
+ uint64_t n = zn[row];
230
+ tcg_temp_free_i64(t);
231
+}
232
+
80
+
233
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
81
+ for (col = 0; col < oprsz; ++col) {
234
+{
82
+ uint8_t pb = pm[H1(col)];
235
+ uint64_t mask = dup_const(MO_16, 0xffff >> shift);
83
+ uint64_t *a = &za_row[col];
236
+ TCGv_i64 t = tcg_temp_new_i64();
237
+
84
+
238
+ tcg_gen_shri_i64(t, a, shift);
85
+ *a = fn(n, zm[col], *a, pa & pb, neg);
239
+ tcg_gen_andi_i64(t, t, mask);
86
+ }
240
+ tcg_gen_andi_i64(d, d, ~mask);
241
+ tcg_gen_or_i64(d, d, t);
242
+ tcg_temp_free_i64(t);
243
+}
244
+
245
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
246
+{
247
+ tcg_gen_shri_i32(a, a, shift);
248
+ tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
249
+}
250
+
251
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
252
+{
253
+ tcg_gen_shri_i64(a, a, shift);
254
+ tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
255
+}
256
+
257
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
258
+{
259
+ if (sh == 0) {
260
+ tcg_gen_mov_vec(d, a);
261
+ } else {
262
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
263
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
264
+
265
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
266
+ tcg_gen_shri_vec(vece, t, a, sh);
267
+ tcg_gen_and_vec(vece, d, d, m);
268
+ tcg_gen_or_vec(vece, d, d, t);
269
+
270
+ tcg_temp_free_vec(t);
271
+ tcg_temp_free_vec(m);
272
+ }
87
+ }
273
+}
88
+}
274
+
89
+
275
+const GVecGen2i sri_op[4] = {
90
+#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
276
+ { .fni8 = gen_shr8_ins_i64,
91
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
277
+ .fniv = gen_shr_ins_vec,
92
+{ \
278
+ .load_dest = true,
93
+ uint32_t sum0 = 0, sum1 = 0; \
279
+ .opc = INDEX_op_shri_vec,
94
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
280
+ .vece = MO_8 },
95
+ n &= expand_pred_b(p); \
281
+ { .fni8 = gen_shr16_ins_i64,
96
+ sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
282
+ .fniv = gen_shr_ins_vec,
97
+ sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \
283
+ .load_dest = true,
98
+ sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
284
+ .opc = INDEX_op_shri_vec,
99
+ sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \
285
+ .vece = MO_16 },
100
+ sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
286
+ { .fni4 = gen_shr32_ins_i32,
101
+ sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \
287
+ .fniv = gen_shr_ins_vec,
102
+ sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
288
+ .load_dest = true,
103
+ sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \
289
+ .opc = INDEX_op_shri_vec,
104
+ if (neg) { \
290
+ .vece = MO_32 },
105
+ sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \
291
+ { .fni8 = gen_shr64_ins_i64,
106
+ } else { \
292
+ .fniv = gen_shr_ins_vec,
107
+ sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \
293
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
108
+ } \
294
+ .load_dest = true,
109
+ return ((uint64_t)sum1 << 32) | sum0; \
295
+ .opc = INDEX_op_shri_vec,
296
+ .vece = MO_64 },
297
+};
298
+
299
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
300
+{
301
+ uint64_t mask = dup_const(MO_8, 0xff << shift);
302
+ TCGv_i64 t = tcg_temp_new_i64();
303
+
304
+ tcg_gen_shli_i64(t, a, shift);
305
+ tcg_gen_andi_i64(t, t, mask);
306
+ tcg_gen_andi_i64(d, d, ~mask);
307
+ tcg_gen_or_i64(d, d, t);
308
+ tcg_temp_free_i64(t);
309
+}
110
+}
310
+
111
+
311
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
112
+#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
312
+{
113
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
313
+ uint64_t mask = dup_const(MO_16, 0xffff << shift);
114
+{ \
314
+ TCGv_i64 t = tcg_temp_new_i64();
115
+ uint64_t sum = 0; \
315
+
116
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
316
+ tcg_gen_shli_i64(t, a, shift);
117
+ n &= expand_pred_h(p); \
317
+ tcg_gen_andi_i64(t, t, mask);
118
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
318
+ tcg_gen_andi_i64(d, d, ~mask);
119
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
319
+ tcg_gen_or_i64(d, d, t);
120
+ sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
320
+ tcg_temp_free_i64(t);
121
+ sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
122
+ return neg ? a - sum : a + sum; \
321
+}
123
+}
322
+
124
+
323
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
125
+DEF_IMOP_32(smopa_s, int8_t, int8_t)
324
+{
126
+DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
325
+ tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
127
+DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
326
+}
128
+DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
327
+
129
+
328
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
130
+DEF_IMOP_64(smopa_d, int16_t, int16_t)
329
+{
131
+DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
330
+ tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
132
+DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
331
+}
133
+DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
332
+
134
+
333
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
135
+#define DEF_IMOPH(NAME) \
334
+{
136
+ void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \
335
+ if (sh == 0) {
137
+ void *vpm, uint32_t desc) \
336
+ tcg_gen_mov_vec(d, a);
138
+ { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); }
337
+ } else {
338
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
339
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
340
+
139
+
341
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
140
+DEF_IMOPH(smopa_s)
342
+ tcg_gen_shli_vec(vece, t, a, sh);
141
+DEF_IMOPH(umopa_s)
343
+ tcg_gen_and_vec(vece, d, d, m);
142
+DEF_IMOPH(sumopa_s)
344
+ tcg_gen_or_vec(vece, d, d, t);
143
+DEF_IMOPH(usmopa_s)
144
+DEF_IMOPH(smopa_d)
145
+DEF_IMOPH(umopa_d)
146
+DEF_IMOPH(sumopa_d)
147
+DEF_IMOPH(usmopa_d)
148
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/translate-sme.c
151
+++ b/target/arm/translate-sme.c
152
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f
153
154
/* TODO: FEAT_EBF16 */
155
TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
345
+
156
+
346
+ tcg_temp_free_vec(t);
157
+TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
347
+ tcg_temp_free_vec(m);
158
+TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
348
+ }
159
+TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s)
349
+}
160
+TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s)
350
+
161
+
351
+const GVecGen2i sli_op[4] = {
162
+TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d)
352
+ { .fni8 = gen_shl8_ins_i64,
163
+TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
353
+ .fniv = gen_shl_ins_vec,
164
+TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
354
+ .load_dest = true,
165
+TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
355
+ .opc = INDEX_op_shli_vec,
356
+ .vece = MO_8 },
357
+ { .fni8 = gen_shl16_ins_i64,
358
+ .fniv = gen_shl_ins_vec,
359
+ .load_dest = true,
360
+ .opc = INDEX_op_shli_vec,
361
+ .vece = MO_16 },
362
+ { .fni4 = gen_shl32_ins_i32,
363
+ .fniv = gen_shl_ins_vec,
364
+ .load_dest = true,
365
+ .opc = INDEX_op_shli_vec,
366
+ .vece = MO_32 },
367
+ { .fni8 = gen_shl64_ins_i64,
368
+ .fniv = gen_shl_ins_vec,
369
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
370
+ .load_dest = true,
371
+ .opc = INDEX_op_shli_vec,
372
+ .vece = MO_64 },
373
+};
374
+
375
/* Translate a NEON data processing instruction. Return nonzero if the
376
instruction is invalid.
377
We process data in a mixture of 32-bit and 64-bit chunks.
378
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
379
int pairwise;
380
int u;
381
int vec_size;
382
- uint32_t imm, mask;
383
+ uint32_t imm;
384
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
385
TCGv_ptr ptr1, ptr2, ptr3;
386
TCGv_i64 tmp64;
387
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
388
}
389
return 0;
390
391
+ case 4: /* VSRI */
392
+ if (!u) {
393
+ return 1;
394
+ }
395
+ /* Right shift comes here negative. */
396
+ shift = -shift;
397
+ /* Shift out of range leaves destination unchanged. */
398
+ if (shift < 8 << size) {
399
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
400
+ shift, &sri_op[size]);
401
+ }
402
+ return 0;
403
+
404
case 5: /* VSHL, VSLI */
405
- if (!u) { /* VSHL */
406
+ if (u) { /* VSLI */
407
+ /* Shift out of range leaves destination unchanged. */
408
+ if (shift < 8 << size) {
409
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
410
+ vec_size, shift, &sli_op[size]);
411
+ }
412
+ } else { /* VSHL */
413
/* Shifts larger than the element size are
414
* architecturally valid and results in zero.
415
*/
416
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
417
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
418
vec_size, vec_size);
419
}
420
- return 0;
421
}
422
- break;
423
+ return 0;
424
}
425
426
if (size == 3) {
427
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
428
else
429
gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
430
break;
431
- case 4: /* VSRI */
432
- case 5: /* VSHL, VSLI */
433
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
434
- break;
435
case 6: /* VQSHLU */
436
gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
437
cpu_V0, cpu_V1);
438
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
439
/* Accumulate. */
440
neon_load_reg64(cpu_V1, rd + pass);
441
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
442
- } else if (op == 4 || (op == 5 && u)) {
443
- /* Insert */
444
- neon_load_reg64(cpu_V1, rd + pass);
445
- uint64_t mask;
446
- if (shift < -63 || shift > 63) {
447
- mask = 0;
448
- } else {
449
- if (op == 4) {
450
- mask = 0xffffffffffffffffull >> -shift;
451
- } else {
452
- mask = 0xffffffffffffffffull << shift;
453
- }
454
- }
455
- tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
456
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
457
}
458
neon_store_reg64(cpu_V0, rd + pass);
459
} else { /* size < 3 */
460
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
461
case 3: /* VRSRA */
462
GEN_NEON_INTEGER_OP(rshl);
463
break;
464
- case 4: /* VSRI */
465
- case 5: /* VSHL, VSLI */
466
- switch (size) {
467
- case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
468
- case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
469
- case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
470
- default: abort();
471
- }
472
- break;
473
case 6: /* VQSHLU */
474
switch (size) {
475
case 0:
476
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
477
tmp2 = neon_load_reg(rd, pass);
478
gen_neon_add(size, tmp, tmp2);
479
tcg_temp_free_i32(tmp2);
480
- } else if (op == 4 || (op == 5 && u)) {
481
- /* Insert */
482
- switch (size) {
483
- case 0:
484
- if (op == 4)
485
- mask = 0xff >> -shift;
486
- else
487
- mask = (uint8_t)(0xff << shift);
488
- mask |= mask << 8;
489
- mask |= mask << 16;
490
- break;
491
- case 1:
492
- if (op == 4)
493
- mask = 0xffff >> -shift;
494
- else
495
- mask = (uint16_t)(0xffff << shift);
496
- mask |= mask << 16;
497
- break;
498
- case 2:
499
- if (shift < -31 || shift > 31) {
500
- mask = 0;
501
- } else {
502
- if (op == 4)
503
- mask = 0xffffffffu >> -shift;
504
- else
505
- mask = 0xffffffffu << shift;
506
- }
507
- break;
508
- default:
509
- abort();
510
- }
511
- tmp2 = neon_load_reg(rd, pass);
512
- tcg_gen_andi_i32(tmp, tmp, mask);
513
- tcg_gen_andi_i32(tmp2, tmp2, ~mask);
514
- tcg_gen_or_i32(tmp, tmp, tmp2);
515
- tcg_temp_free_i32(tmp2);
516
}
517
neon_store_reg(rd, pass, tmp);
518
}
519
--
166
--
520
2.19.1
167
2.25.1
521
522
diff view generated by jsdifflib
1
From: Dongjiu Geng <gengdongjiu@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This patch extends the qemu-kvm state sync logic with support for
3
This is an SVE instruction that operates using the SVE vector
4
KVM_GET/SET_VCPU_EVENTS, giving access to yet missing SError exception.
4
length but that it is present only if SME is implemented.
5
And also it can support the exception state migration.
6
5
7
The SError exception states include SError pending state and ESR value,
8
the kvm_put/get_vcpu_events() will be called when set or get system
9
registers. When do migration, if source machine has SError pending,
10
QEMU will do this migration regardless whether the target machine supports
11
to specify guest ESR value, because if target machine does not support that,
12
it can also inject the SError with zero ESR value.
13
14
Signed-off-by: Dongjiu Geng <gengdongjiu@huawei.com>
15
Reviewed-by: Andrew Jones <drjones@redhat.com>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Message-id: 1538067351-23931-3-git-send-email-gengdongjiu@huawei.com
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-29-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
10
---
20
target/arm/cpu.h | 7 ++++++
11
target/arm/sve.decode | 20 +++++++++++++
21
target/arm/kvm_arm.h | 24 ++++++++++++++++++
12
target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++
22
target/arm/kvm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++
13
2 files changed, 77 insertions(+)
23
target/arm/kvm32.c | 13 ++++++++++
24
target/arm/kvm64.c | 13 ++++++++++
25
target/arm/machine.c | 22 ++++++++++++++++
26
6 files changed, 139 insertions(+)
27
14
28
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
29
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/cpu.h
17
--- a/target/arm/sve.decode
31
+++ b/target/arm/cpu.h
18
+++ b/target/arm/sve.decode
32
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
19
@@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
33
*/
20
34
} exception;
21
### SVE2 floating-point bfloat16 dot-product (indexed)
35
22
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
36
+ /* Information associated with an SError */
37
+ struct {
38
+ uint8_t pending;
39
+ uint8_t has_esr;
40
+ uint64_t esr;
41
+ } serror;
42
+
23
+
43
/* Thumb-2 EE state. */
24
+### SVE broadcast predicate element
44
uint32_t teecr;
25
+
45
uint32_t teehbr;
26
+&psel esz pd pn pm rv imm
46
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
27
+%psel_rv 16:2 !function=plus_12
28
+%psel_imm_b 22:2 19:2
29
+%psel_imm_h 22:2 20:1
30
+%psel_imm_s 22:2
31
+%psel_imm_d 23:1
32
+@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \
33
+ &psel rv=%psel_rv
34
+
35
+PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \
36
+ @psel esz=0 imm=%psel_imm_b
37
+PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \
38
+ @psel esz=1 imm=%psel_imm_h
39
+PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
40
+ @psel esz=2 imm=%psel_imm_s
41
+PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
42
+ @psel esz=3 imm=%psel_imm_d
43
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
47
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/kvm_arm.h
45
--- a/target/arm/translate-sve.c
49
+++ b/target/arm/kvm_arm.h
46
+++ b/target/arm/translate-sve.c
50
@@ -XXX,XX +XXX,XX @@ bool write_kvmstate_to_list(ARMCPU *cpu);
47
@@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
51
*/
48
52
void kvm_arm_reset_vcpu(ARMCPU *cpu);
49
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
53
50
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
54
+/**
55
+ * kvm_arm_init_serror_injection:
56
+ * @cs: CPUState
57
+ *
58
+ * Check whether KVM can set guest SError syndrome.
59
+ */
60
+void kvm_arm_init_serror_injection(CPUState *cs);
61
+
51
+
62
+/**
52
+static bool trans_PSEL(DisasContext *s, arg_psel *a)
63
+ * kvm_get_vcpu_events:
53
+{
64
+ * @cpu: ARMCPU
54
+ int vl = vec_full_reg_size(s);
65
+ *
55
+ int pl = pred_gvec_reg_size(s);
66
+ * Get VCPU related state from kvm.
56
+ int elements = vl >> a->esz;
67
+ */
57
+ TCGv_i64 tmp, didx, dbit;
68
+int kvm_get_vcpu_events(ARMCPU *cpu);
58
+ TCGv_ptr ptr;
69
+
59
+
70
+/**
60
+ if (!dc_isar_feature(aa64_sme, s)) {
71
+ * kvm_put_vcpu_events:
61
+ return false;
72
+ * @cpu: ARMCPU
62
+ }
73
+ *
63
+ if (!sve_access_check(s)) {
74
+ * Put VCPU related state to kvm.
64
+ return true;
75
+ */
76
+int kvm_put_vcpu_events(ARMCPU *cpu);
77
+
78
#ifdef CONFIG_KVM
79
/**
80
* kvm_arm_create_scratch_host_vcpu:
81
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/arm/kvm.c
84
+++ b/target/arm/kvm.c
85
@@ -XXX,XX +XXX,XX @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
86
};
87
88
static bool cap_has_mp_state;
89
+static bool cap_has_inject_serror_esr;
90
91
static ARMHostCPUFeatures arm_host_cpu_features;
92
93
@@ -XXX,XX +XXX,XX @@ int kvm_arm_vcpu_init(CPUState *cs)
94
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
95
}
96
97
+void kvm_arm_init_serror_injection(CPUState *cs)
98
+{
99
+ cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
100
+ KVM_CAP_ARM_INJECT_SERROR_ESR);
101
+}
102
+
103
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
104
int *fdarray,
105
struct kvm_vcpu_init *init)
106
@@ -XXX,XX +XXX,XX @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
107
return 0;
108
}
109
110
+int kvm_put_vcpu_events(ARMCPU *cpu)
111
+{
112
+ CPUARMState *env = &cpu->env;
113
+ struct kvm_vcpu_events events;
114
+ int ret;
115
+
116
+ if (!kvm_has_vcpu_events()) {
117
+ return 0;
118
+ }
65
+ }
119
+
66
+
120
+ memset(&events, 0, sizeof(events));
67
+ tmp = tcg_temp_new_i64();
121
+ events.exception.serror_pending = env->serror.pending;
68
+ dbit = tcg_temp_new_i64();
69
+ didx = tcg_temp_new_i64();
70
+ ptr = tcg_temp_new_ptr();
122
+
71
+
123
+ /* Inject SError to guest with specified syndrome if host kernel
72
+ /* Compute the predicate element. */
124
+ * supports it, otherwise inject SError without syndrome.
73
+ tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
125
+ */
74
+ if (is_power_of_2(elements)) {
126
+ if (cap_has_inject_serror_esr) {
75
+ tcg_gen_andi_i64(tmp, tmp, elements - 1);
127
+ events.exception.serror_has_esr = env->serror.has_esr;
76
+ } else {
128
+ events.exception.serror_esr = env->serror.esr;
77
+ tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
129
+ }
78
+ }
130
+
79
+
131
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
80
+ /* Extract the predicate byte and bit indices. */
132
+ if (ret) {
81
+ tcg_gen_shli_i64(tmp, tmp, a->esz);
133
+ error_report("failed to put vcpu events");
82
+ tcg_gen_andi_i64(dbit, tmp, 7);
83
+ tcg_gen_shri_i64(didx, tmp, 3);
84
+ if (HOST_BIG_ENDIAN) {
85
+ tcg_gen_xori_i64(didx, didx, 7);
134
+ }
86
+ }
135
+
87
+
136
+ return ret;
88
+ /* Load the predicate word. */
89
+ tcg_gen_trunc_i64_ptr(ptr, didx);
90
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
91
+ tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
92
+
93
+ /* Extract the predicate bit and replicate to MO_64. */
94
+ tcg_gen_shr_i64(tmp, tmp, dbit);
95
+ tcg_gen_andi_i64(tmp, tmp, 1);
96
+ tcg_gen_neg_i64(tmp, tmp);
97
+
98
+ /* Apply to either copy the source, or write zeros. */
99
+ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
100
+ pred_full_reg_offset(s, a->pn), tmp, pl, pl);
101
+
102
+ tcg_temp_free_i64(tmp);
103
+ tcg_temp_free_i64(dbit);
104
+ tcg_temp_free_i64(didx);
105
+ tcg_temp_free_ptr(ptr);
106
+ return true;
137
+}
107
+}
138
+
139
+int kvm_get_vcpu_events(ARMCPU *cpu)
140
+{
141
+ CPUARMState *env = &cpu->env;
142
+ struct kvm_vcpu_events events;
143
+ int ret;
144
+
145
+ if (!kvm_has_vcpu_events()) {
146
+ return 0;
147
+ }
148
+
149
+ memset(&events, 0, sizeof(events));
150
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
151
+ if (ret) {
152
+ error_report("failed to get vcpu events");
153
+ return ret;
154
+ }
155
+
156
+ env->serror.pending = events.exception.serror_pending;
157
+ env->serror.has_esr = events.exception.serror_has_esr;
158
+ env->serror.esr = events.exception.serror_esr;
159
+
160
+ return 0;
161
+}
162
+
163
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
164
{
165
}
166
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/target/arm/kvm32.c
169
+++ b/target/arm/kvm32.c
170
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
171
}
172
cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
173
174
+ /* Check whether userspace can specify guest syndrome value */
175
+ kvm_arm_init_serror_injection(cs);
176
+
177
return kvm_arm_init_cpreg_list(cpu);
178
}
179
180
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
181
return ret;
182
}
183
184
+ ret = kvm_put_vcpu_events(cpu);
185
+ if (ret) {
186
+ return ret;
187
+ }
188
+
189
/* Note that we do not call write_cpustate_to_list()
190
* here, so we are only writing the tuple list back to
191
* KVM. This is safe because nothing can change the
192
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
193
}
194
vfp_set_fpscr(env, fpscr);
195
196
+ ret = kvm_get_vcpu_events(cpu);
197
+ if (ret) {
198
+ return ret;
199
+ }
200
+
201
if (!write_kvmstate_to_list(cpu)) {
202
return EINVAL;
203
}
204
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/arm/kvm64.c
207
+++ b/target/arm/kvm64.c
208
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
209
210
kvm_arm_init_debug(cs);
211
212
+ /* Check whether user space can specify guest syndrome value */
213
+ kvm_arm_init_serror_injection(cs);
214
+
215
return kvm_arm_init_cpreg_list(cpu);
216
}
217
218
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
219
return ret;
220
}
221
222
+ ret = kvm_put_vcpu_events(cpu);
223
+ if (ret) {
224
+ return ret;
225
+ }
226
+
227
if (!write_list_to_kvmstate(cpu, level)) {
228
return EINVAL;
229
}
230
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
231
}
232
vfp_set_fpcr(env, fpr);
233
234
+ ret = kvm_get_vcpu_events(cpu);
235
+ if (ret) {
236
+ return ret;
237
+ }
238
+
239
if (!write_kvmstate_to_list(cpu)) {
240
return EINVAL;
241
}
242
diff --git a/target/arm/machine.c b/target/arm/machine.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/target/arm/machine.c
245
+++ b/target/arm/machine.c
246
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sve = {
247
};
248
#endif /* AARCH64 */
249
250
+static bool serror_needed(void *opaque)
251
+{
252
+ ARMCPU *cpu = opaque;
253
+ CPUARMState *env = &cpu->env;
254
+
255
+ return env->serror.pending != 0;
256
+}
257
+
258
+static const VMStateDescription vmstate_serror = {
259
+ .name = "cpu/serror",
260
+ .version_id = 1,
261
+ .minimum_version_id = 1,
262
+ .needed = serror_needed,
263
+ .fields = (VMStateField[]) {
264
+ VMSTATE_UINT8(env.serror.pending, ARMCPU),
265
+ VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
266
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
267
+ VMSTATE_END_OF_LIST()
268
+ }
269
+};
270
+
271
static bool m_needed(void *opaque)
272
{
273
ARMCPU *cpu = opaque;
274
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_arm_cpu = {
275
#ifdef TARGET_AARCH64
276
&vmstate_sve,
277
#endif
278
+ &vmstate_serror,
279
NULL
280
}
281
};
282
--
108
--
283
2.19.1
109
2.25.1
284
285
diff view generated by jsdifflib
1
From: Richard Henderson <rth@twiddle.net>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This can reduce the number of opcodes required for certain
3
This is an SVE instruction that operates using the SVE vector
4
complex forms of load-multiple (e.g. ld4.16b).
4
length but that it is present only if SME is implemented.
5
5
6
Signed-off-by: Richard Henderson <rth@twiddle.net>
7
Message-id: 20181011205206.3552-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-30-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/translate-a64.c | 12 ++++++++----
11
target/arm/helper-sve.h | 2 ++
12
1 file changed, 8 insertions(+), 4 deletions(-)
12
target/arm/sve.decode | 1 +
13
target/arm/sve_helper.c | 16 ++++++++++++++++
14
target/arm/translate-sve.c | 2 ++
15
4 files changed, 21 insertions(+)
13
16
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
19
--- a/target/arm/helper-sve.h
17
+++ b/target/arm/translate-a64.c
20
+++ b/target/arm/helper-sve.h
18
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
bool is_store = !extract32(insn, 22, 1);
22
20
bool is_postidx = extract32(insn, 23, 1);
23
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
bool is_q = extract32(insn, 30, 1);
24
22
- TCGv_i64 tcg_addr, tcg_rn;
25
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
26
+
24
27
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
int ebytes = 1 << size;
28
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
int elements = (is_q ? 128 : 64) / (8 << size);
29
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
27
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
28
tcg_rn = cpu_reg_sp(s, rn);
31
index XXXXXXX..XXXXXXX 100644
29
tcg_addr = tcg_temp_new_i64();
32
--- a/target/arm/sve.decode
30
tcg_gen_mov_i64(tcg_addr, tcg_rn);
33
+++ b/target/arm/sve.decode
31
+ tcg_ebytes = tcg_const_i64(ebytes);
34
@@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
32
35
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
33
for (r = 0; r < rpt; r++) {
36
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
34
int e;
37
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
35
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
38
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
36
clear_vec_high(s, is_q, tt);
39
37
}
40
# SVE vector splice (predicated, destructive)
38
}
41
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
39
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
42
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
40
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
43
index XXXXXXX..XXXXXXX 100644
41
tt = (tt + 1) % 32;
44
--- a/target/arm/sve_helper.c
42
}
45
+++ b/target/arm/sve_helper.c
43
}
46
@@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
44
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
47
45
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
48
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
46
}
49
47
}
50
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
48
+ tcg_temp_free_i64(tcg_ebytes);
51
+{
49
tcg_temp_free_i64(tcg_addr);
52
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
50
}
53
+ uint64_t *d = vd, *n = vn;
51
54
+ uint8_t *pg = vg;
52
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
55
+
53
bool replicate = false;
56
+ for (i = 0; i < opr_sz; i += 2) {
54
int index = is_q << 3 | S << 2 | size;
57
+ if (pg[H1(i)] & 1) {
55
int ebytes, xs;
58
+ uint64_t n0 = n[i + 0];
56
- TCGv_i64 tcg_addr, tcg_rn;
59
+ uint64_t n1 = n[i + 1];
57
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
60
+ d[i + 0] = n1;
58
61
+ d[i + 1] = n0;
59
switch (scale) {
62
+ }
60
case 3:
63
+ }
61
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
64
+}
62
tcg_rn = cpu_reg_sp(s, rn);
65
+
63
tcg_addr = tcg_temp_new_i64();
66
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
64
tcg_gen_mov_i64(tcg_addr, tcg_rn);
67
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
65
+ tcg_ebytes = tcg_const_i64(ebytes);
68
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
66
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
for (xs = 0; xs < selem; xs++) {
70
index XXXXXXX..XXXXXXX 100644
68
if (replicate) {
71
--- a/target/arm/translate-sve.c
69
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
72
+++ b/target/arm/translate-sve.c
70
do_vec_st(s, rt, index, tcg_addr, scale);
73
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
71
}
74
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
72
}
75
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
73
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
76
74
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
77
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
75
rt = (rt + 1) % 32;
78
+
76
}
79
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
77
80
gen_helper_sve_splice, a, a->esz)
78
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
79
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
80
}
81
}
82
+ tcg_temp_free_i64(tcg_ebytes);
83
tcg_temp_free_i64(tcg_addr);
84
}
85
81
86
--
82
--
87
2.19.1
83
2.25.1
88
89
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-10-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-31-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/translate.c | 29 ++++++++++-------------------
11
target/arm/helper.h | 18 +++++++
9
1 file changed, 10 insertions(+), 19 deletions(-)
12
target/arm/sve.decode | 5 ++
13
target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++
14
target/arm/vec_helper.c | 24 +++++++++
15
4 files changed, 149 insertions(+)
10
16
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
12
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
19
--- a/target/arm/helper.h
14
+++ b/target/arm/translate.c
20
+++ b/target/arm/helper.h
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
16
break;
22
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
17
}
23
void, ptr, ptr, ptr, ptr, ptr, i32)
18
return 0;
24
19
+
25
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
20
+ case NEON_3R_VADD_VSUB:
26
+ void, ptr, ptr, ptr, ptr, i32)
21
+ if (u) {
27
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
22
+ tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
28
+ void, ptr, ptr, ptr, ptr, i32)
23
+ vec_size, vec_size);
29
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
24
+ } else {
30
+ void, ptr, ptr, ptr, ptr, i32)
25
+ tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
31
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
26
+ vec_size, vec_size);
32
+ void, ptr, ptr, ptr, ptr, i32)
27
+ }
33
+
28
+ return 0;
34
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
29
}
35
+ void, ptr, ptr, ptr, ptr, i32)
30
if (size == 3) {
36
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
31
/* 64-bit element instructions. */
37
+ void, ptr, ptr, ptr, ptr, i32)
32
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
38
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
33
cpu_V1, cpu_V0);
39
+ void, ptr, ptr, ptr, ptr, i32)
34
}
40
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
35
break;
41
+ void, ptr, ptr, ptr, ptr, i32)
36
- case NEON_3R_VADD_VSUB:
42
+
37
- if (u) {
43
#ifdef TARGET_AARCH64
38
- tcg_gen_sub_i64(CPU_V001);
44
#include "helper-a64.h"
39
- } else {
45
#include "helper-sve.h"
40
- tcg_gen_add_i64(CPU_V001);
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
41
- }
47
index XXXXXXX..XXXXXXX 100644
42
- break;
48
--- a/target/arm/sve.decode
43
default:
49
+++ b/target/arm/sve.decode
44
abort();
50
@@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
45
}
51
@psel esz=2 imm=%psel_imm_s
46
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
52
PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
47
tmp2 = neon_load_reg(rd, pass);
53
@psel esz=3 imm=%psel_imm_d
48
gen_neon_add(size, tmp, tmp2);
54
+
49
break;
55
+### SVE clamp
50
- case NEON_3R_VADD_VSUB:
56
+
51
- if (!u) { /* VADD */
57
+SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
52
- gen_neon_add(size, tmp, tmp2);
58
+UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
53
- } else { /* VSUB */
59
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
54
- switch (size) {
60
index XXXXXXX..XXXXXXX 100644
55
- case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
61
--- a/target/arm/translate-sve.c
56
- case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
62
+++ b/target/arm/translate-sve.c
57
- case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
63
@@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
58
- default: abort();
64
tcg_temp_free_ptr(ptr);
59
- }
65
return true;
60
- }
66
}
61
- break;
67
+
62
case NEON_3R_VTST_VCEQ:
68
+static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
63
if (!u) { /* VTST */
69
+{
64
switch (size) {
70
+ tcg_gen_smax_i32(d, a, n);
71
+ tcg_gen_smin_i32(d, d, m);
72
+}
73
+
74
+static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
75
+{
76
+ tcg_gen_smax_i64(d, a, n);
77
+ tcg_gen_smin_i64(d, d, m);
78
+}
79
+
80
+static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
81
+ TCGv_vec m, TCGv_vec a)
82
+{
83
+ tcg_gen_smax_vec(vece, d, a, n);
84
+ tcg_gen_smin_vec(vece, d, d, m);
85
+}
86
+
87
+static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
88
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
89
+{
90
+ static const TCGOpcode vecop[] = {
91
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
92
+ };
93
+ static const GVecGen4 ops[4] = {
94
+ { .fniv = gen_sclamp_vec,
95
+ .fno = gen_helper_gvec_sclamp_b,
96
+ .opt_opc = vecop,
97
+ .vece = MO_8 },
98
+ { .fniv = gen_sclamp_vec,
99
+ .fno = gen_helper_gvec_sclamp_h,
100
+ .opt_opc = vecop,
101
+ .vece = MO_16 },
102
+ { .fni4 = gen_sclamp_i32,
103
+ .fniv = gen_sclamp_vec,
104
+ .fno = gen_helper_gvec_sclamp_s,
105
+ .opt_opc = vecop,
106
+ .vece = MO_32 },
107
+ { .fni8 = gen_sclamp_i64,
108
+ .fniv = gen_sclamp_vec,
109
+ .fno = gen_helper_gvec_sclamp_d,
110
+ .opt_opc = vecop,
111
+ .vece = MO_64,
112
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
113
+ };
114
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
115
+}
116
+
117
+TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
118
+
119
+static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
120
+{
121
+ tcg_gen_umax_i32(d, a, n);
122
+ tcg_gen_umin_i32(d, d, m);
123
+}
124
+
125
+static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
126
+{
127
+ tcg_gen_umax_i64(d, a, n);
128
+ tcg_gen_umin_i64(d, d, m);
129
+}
130
+
131
+static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
132
+ TCGv_vec m, TCGv_vec a)
133
+{
134
+ tcg_gen_umax_vec(vece, d, a, n);
135
+ tcg_gen_umin_vec(vece, d, d, m);
136
+}
137
+
138
+static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
139
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
140
+{
141
+ static const TCGOpcode vecop[] = {
142
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
143
+ };
144
+ static const GVecGen4 ops[4] = {
145
+ { .fniv = gen_uclamp_vec,
146
+ .fno = gen_helper_gvec_uclamp_b,
147
+ .opt_opc = vecop,
148
+ .vece = MO_8 },
149
+ { .fniv = gen_uclamp_vec,
150
+ .fno = gen_helper_gvec_uclamp_h,
151
+ .opt_opc = vecop,
152
+ .vece = MO_16 },
153
+ { .fni4 = gen_uclamp_i32,
154
+ .fniv = gen_uclamp_vec,
155
+ .fno = gen_helper_gvec_uclamp_s,
156
+ .opt_opc = vecop,
157
+ .vece = MO_32 },
158
+ { .fni8 = gen_uclamp_i64,
159
+ .fniv = gen_uclamp_vec,
160
+ .fno = gen_helper_gvec_uclamp_d,
161
+ .opt_opc = vecop,
162
+ .vece = MO_64,
163
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
164
+ };
165
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
166
+}
167
+
168
+TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
169
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/target/arm/vec_helper.c
172
+++ b/target/arm/vec_helper.c
173
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
174
}
175
clear_tail(d, opr_sz, simd_maxsz(desc));
176
}
177
+
178
+#define DO_CLAMP(NAME, TYPE) \
179
+void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
180
+{ \
181
+ intptr_t i, opr_sz = simd_oprsz(desc); \
182
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
183
+ TYPE aa = *(TYPE *)(a + i); \
184
+ TYPE nn = *(TYPE *)(n + i); \
185
+ TYPE mm = *(TYPE *)(m + i); \
186
+ TYPE dd = MIN(MAX(aa, nn), mm); \
187
+ *(TYPE *)(d + i) = dd; \
188
+ } \
189
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
190
+}
191
+
192
+DO_CLAMP(gvec_sclamp_b, int8_t)
193
+DO_CLAMP(gvec_sclamp_h, int16_t)
194
+DO_CLAMP(gvec_sclamp_s, int32_t)
195
+DO_CLAMP(gvec_sclamp_d, int64_t)
196
+
197
+DO_CLAMP(gvec_uclamp_b, uint8_t)
198
+DO_CLAMP(gvec_uclamp_h, uint16_t)
199
+DO_CLAMP(gvec_uclamp_s, uint32_t)
200
+DO_CLAMP(gvec_uclamp_d, uint64_t)
65
--
201
--
66
2.19.1
202
2.25.1
67
68
diff view generated by jsdifflib
1
The HCR.DC virtualization configuration register bit has the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
following effects:
3
* SCTLR.M behaves as if it is 0 for all purposes except
4
direct reads of the bit
5
* HCR.VM behaves as if it is 1 for all purposes except
6
direct reads of the bit
7
* the memory type produced by the first stage of the EL1&EL0
8
translation regime is Normal Non-Shareable,
9
Inner Write-Back Read-Allocate Write-Allocate,
10
Outer Write-Back Read-Allocate Write-Allocate.
11
2
12
Implement this behaviour.
3
We can handle both exception entry and exception return by
4
hooking into aarch64_sve_change_el.
13
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-32-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20181012144235.19646-5-peter.maydell@linaro.org
17
---
10
---
18
target/arm/helper.c | 23 +++++++++++++++++++++--
11
target/arm/helper.c | 15 +++++++++++++--
19
1 file changed, 21 insertions(+), 2 deletions(-)
12
1 file changed, 13 insertions(+), 2 deletions(-)
20
13
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
22
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/helper.c
16
--- a/target/arm/helper.c
24
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper.c
25
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
18
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
26
* * The Non-secure TTBCR.EAE bit is set to 1
19
return;
27
* * The implementation includes EL2, and the value of HCR.VM is 1
28
*
29
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
30
+ *
31
* ATS1Hx always uses the 64bit format (not supported yet).
32
*/
33
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
34
35
if (arm_feature(env, ARM_FEATURE_EL2)) {
36
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
37
- format64 |= env->cp15.hcr_el2 & HCR_VM;
38
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
39
} else {
40
format64 |= arm_current_el(env) == 2;
41
}
42
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
43
}
20
}
44
21
45
if (mmu_idx == ARMMMUIdx_S2NS) {
22
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
46
- return (env->cp15.hcr_el2 & HCR_VM) == 0;
23
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
47
+ /* HCR.DC means HCR.VM behaves as 1 */
24
+
48
+ return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
25
+ /*
49
}
26
+ * Both AArch64.TakeException and AArch64.ExceptionReturn
50
27
+ * invoke ResetSVEState when taking an exception from, or
51
if (env->cp15.hcr_el2 & HCR_TGE) {
28
+ * returning to, AArch32 state when PSTATE.SM is enabled.
52
@@ -XXX,XX +XXX,XX @@ static inline bool regime_translation_disabled(CPUARMState *env,
29
+ */
53
}
30
+ if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
54
}
31
+ arm_reset_sve_state(env);
55
32
+ return;
56
+ if ((env->cp15.hcr_el2 & HCR_DC) &&
57
+ (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
58
+ /* HCR.DC means SCTLR_EL1.M behaves as 0 */
59
+ return true;
60
+ }
33
+ }
61
+
34
+
62
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
35
/*
63
}
36
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
64
37
* at ELx, or not available because the EL is in AArch32 state, then
65
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
38
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
66
39
* we already have the correct register contents when encountering the
67
/* Combine the S1 and S2 cache attributes, if needed */
40
* vq0->vq0 transition between EL0->EL1.
68
if (!ret && cacheattrs != NULL) {
41
*/
69
+ if (env->cp15.hcr_el2 & HCR_DC) {
42
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
70
+ /*
43
old_len = (old_a64 && !sve_exception_el(env, old_el)
71
+ * HCR.DC forces the first stage attributes to
44
? sve_vqm1_for_el(env, old_el) : 0);
72
+ * Normal Non-Shareable,
45
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
73
+ * Inner Write-Back Read-Allocate Write-Allocate,
46
new_len = (new_a64 && !sve_exception_el(env, new_el)
74
+ * Outer Write-Back Read-Allocate Write-Allocate.
47
? sve_vqm1_for_el(env, new_el) : 0);
75
+ */
76
+ cacheattrs->attrs = 0xff;
77
+ cacheattrs->shareability = 0;
78
+ }
79
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
80
}
81
48
82
--
49
--
83
2.19.1
50
2.25.1
84
85
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
Note that SME remains effectively disabled for user-only,
4
because we do not yet set CPACR_EL1.SMEN. This needs to
5
wait until the kernel ABI is implemented.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20181016223115.24100-9-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-33-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
11
---
9
target/arm/cpu.h | 17 +++++++++++++++-
12
docs/system/arm/emulation.rst | 4 ++++
10
linux-user/elfload.c | 6 +-----
13
target/arm/cpu64.c | 11 +++++++++++
11
target/arm/cpu64.c | 16 ++++++++-------
14
2 files changed, 15 insertions(+)
12
target/arm/helper.c | 2 +-
13
target/arm/translate-a64.c | 40 +++++++++++++++++++-------------------
14
target/arm/translate.c | 6 +++---
15
6 files changed, 50 insertions(+), 37 deletions(-)
16
15
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
18
--- a/docs/system/arm/emulation.rst
20
+++ b/target/arm/cpu.h
19
+++ b/docs/system/arm/emulation.rst
21
@@ -XXX,XX +XXX,XX @@ enum arm_features {
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
22
ARM_FEATURE_PMU, /* has PMU support */
21
- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
23
ARM_FEATURE_VBAR, /* has cp15 VBAR */
22
- FEAT_SM3 (Advanced SIMD SM3 instructions)
24
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
23
- FEAT_SM4 (Advanced SIMD SM4 instructions)
25
- ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
24
+- FEAT_SME (Scalable Matrix Extension)
26
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
25
+- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode)
27
};
26
+- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
28
27
+- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
29
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
28
- FEAT_SPECRES (Speculation restriction instructions)
30
return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
29
- FEAT_SSBS (Speculative Store Bypass Safe)
31
}
30
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
32
33
+static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
34
+{
35
+ /*
36
+ * This is a placeholder for use by VCMA until the rest of
37
+ * the ARMv8.2-FP16 extension is implemented for aa32 mode.
38
+ * At which point we can properly set and check MVFR1.FPHP.
39
+ */
40
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
41
+}
42
+
43
/*
44
* 64-bit feature tests via id registers.
45
*/
46
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
47
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
48
}
49
50
+static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
51
+{
52
+ /* We always set the AdvSIMD and FP fields identically wrt FP16. */
53
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
54
+}
55
+
56
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
57
{
58
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
59
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/linux-user/elfload.c
62
+++ b/linux-user/elfload.c
63
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
64
hwcaps |= ARM_HWCAP_A64_ASIMD;
65
66
/* probe for the extra features */
67
-#define GET_FEATURE(feat, hwcap) \
68
- do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
69
#define GET_FEATURE_ID(feat, hwcap) \
70
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
71
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
73
GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
74
GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
75
GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
76
- GET_FEATURE(ARM_FEATURE_V8_FP16,
77
- ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
78
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
79
GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
80
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
81
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
82
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
83
GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
84
85
-#undef GET_FEATURE
86
#undef GET_FEATURE_ID
87
88
return hwcaps;
89
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
90
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/cpu64.c
33
--- a/target/arm/cpu64.c
92
+++ b/target/arm/cpu64.c
34
+++ b/target/arm/cpu64.c
93
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
35
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
94
36
*/
95
t = cpu->isar.id_aa64pfr0;
37
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
96
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
38
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
97
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
39
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
98
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
40
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
99
cpu->isar.id_aa64pfr0 = t;
41
cpu->isar.id_aa64pfr1 = t;
100
42
101
/* Replicate the same data to the 32-bit id registers. */
102
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
43
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
103
u = FIELD_DP32(u, ID_ISAR6, DP, 1);
44
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
104
cpu->isar.id_isar6 = u;
45
cpu->isar.id_aa64dfr0 = t;
105
46
106
-#ifdef CONFIG_USER_ONLY
47
+ t = cpu->isar.id_aa64smfr0;
107
- /* We don't set these in system emulation mode for the moment,
48
+ t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
108
- * since we don't correctly set the ID registers to advertise them,
49
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
109
- * and in some cases they're only available in AArch64 and not AArch32,
50
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
110
- * whereas the architecture requires them to be present in both if
51
+ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
111
- * present in either.
52
+ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
112
+ /*
53
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
113
+ * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
54
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
114
+ * so do not set MVFR1.FPHP. Strictly speaking this is not legal,
55
+ cpu->isar.id_aa64smfr0 = t;
115
+ * but it is also not legal to enable SVE without support for FP16,
116
+ * and enabling SVE in system mode is more useful in the short term.
117
*/
118
- set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
119
+
56
+
120
+#ifdef CONFIG_USER_ONLY
57
/* Replicate the same data to the 32-bit id registers. */
121
/* For usermode -cpu max we can use a larger and more efficient DCZ
58
aa32_max_features(cpu);
122
* blocksize since we don't have to follow what the hardware does.
59
123
*/
124
diff --git a/target/arm/helper.c b/target/arm/helper.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/target/arm/helper.c
127
+++ b/target/arm/helper.c
128
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
129
uint32_t changed;
130
131
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
132
- if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
133
+ if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
134
val &= ~FPCR_FZ16;
135
}
136
137
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/arm/translate-a64.c
140
+++ b/target/arm/translate-a64.c
141
@@ -XXX,XX +XXX,XX @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
142
break;
143
case 3:
144
size = MO_16;
145
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
146
+ if (dc_isar_feature(aa64_fp16, s)) {
147
break;
148
}
149
/* fallthru */
150
@@ -XXX,XX +XXX,XX @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
151
break;
152
case 3:
153
size = MO_16;
154
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
155
+ if (dc_isar_feature(aa64_fp16, s)) {
156
break;
157
}
158
/* fallthru */
159
@@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
160
break;
161
case 3:
162
sz = MO_16;
163
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
164
+ if (dc_isar_feature(aa64_fp16, s)) {
165
break;
166
}
167
/* fallthru */
168
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
169
handle_fp_1src_double(s, opcode, rd, rn);
170
break;
171
case 3:
172
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
173
+ if (!dc_isar_feature(aa64_fp16, s)) {
174
unallocated_encoding(s);
175
return;
176
}
177
@@ -XXX,XX +XXX,XX @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
178
handle_fp_2src_double(s, opcode, rd, rn, rm);
179
break;
180
case 3:
181
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
182
+ if (!dc_isar_feature(aa64_fp16, s)) {
183
unallocated_encoding(s);
184
return;
185
}
186
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
187
handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
188
break;
189
case 3:
190
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
191
+ if (!dc_isar_feature(aa64_fp16, s)) {
192
unallocated_encoding(s);
193
return;
194
}
195
@@ -XXX,XX +XXX,XX @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
196
break;
197
case 3:
198
sz = MO_16;
199
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
200
+ if (dc_isar_feature(aa64_fp16, s)) {
201
break;
202
}
203
/* fallthru */
204
@@ -XXX,XX +XXX,XX @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
205
case 1: /* float64 */
206
break;
207
case 3: /* float16 */
208
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
209
+ if (dc_isar_feature(aa64_fp16, s)) {
210
break;
211
}
212
/* fallthru */
213
@@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
214
break;
215
case 0x6: /* 16-bit float, 32-bit int */
216
case 0xe: /* 16-bit float, 64-bit int */
217
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
218
+ if (dc_isar_feature(aa64_fp16, s)) {
219
break;
220
}
221
/* fallthru */
222
@@ -XXX,XX +XXX,XX @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
223
case 1: /* float64 */
224
break;
225
case 3: /* float16 */
226
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
227
+ if (dc_isar_feature(aa64_fp16, s)) {
228
break;
229
}
230
/* fallthru */
231
@@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
232
*/
233
is_min = extract32(size, 1, 1);
234
is_fp = true;
235
- if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
236
+ if (!is_u && dc_isar_feature(aa64_fp16, s)) {
237
size = 1;
238
} else if (!is_u || !is_q || extract32(size, 0, 1)) {
239
unallocated_encoding(s);
240
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
241
242
if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
243
/* Check for FMOV (vector, immediate) - half-precision */
244
- if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
245
+ if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
246
unallocated_encoding(s);
247
return;
248
}
249
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
250
case 0x2f: /* FMINP */
251
/* FP op, size[0] is 32 or 64 bit*/
252
if (!u) {
253
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
254
+ if (!dc_isar_feature(aa64_fp16, s)) {
255
unallocated_encoding(s);
256
return;
257
} else {
258
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
259
size = MO_32;
260
} else if (immh & 2) {
261
size = MO_16;
262
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
263
+ if (!dc_isar_feature(aa64_fp16, s)) {
264
unallocated_encoding(s);
265
return;
266
}
267
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
268
size = MO_32;
269
} else if (immh & 0x2) {
270
size = MO_16;
271
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
272
+ if (!dc_isar_feature(aa64_fp16, s)) {
273
unallocated_encoding(s);
274
return;
275
}
276
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
277
return;
278
}
279
280
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
281
+ if (!dc_isar_feature(aa64_fp16, s)) {
282
unallocated_encoding(s);
283
}
284
285
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
286
TCGv_ptr fpst;
287
bool pairwise = false;
288
289
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
290
+ if (!dc_isar_feature(aa64_fp16, s)) {
291
unallocated_encoding(s);
292
return;
293
}
294
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
295
case 0x1c: /* FCADD, #90 */
296
case 0x1e: /* FCADD, #270 */
297
if (size == 0
298
- || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
299
+ || (size == 1 && !dc_isar_feature(aa64_fp16, s))
300
|| (size == 3 && !is_q)) {
301
unallocated_encoding(s);
302
return;
303
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
304
bool need_fpst = true;
305
int rmode;
306
307
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
308
+ if (!dc_isar_feature(aa64_fp16, s)) {
309
unallocated_encoding(s);
310
return;
311
}
312
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
313
}
314
break;
315
}
316
- if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
317
+ if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
318
unallocated_encoding(s);
319
return;
320
}
321
diff --git a/target/arm/translate.c b/target/arm/translate.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/arm/translate.c
324
+++ b/target/arm/translate.c
325
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
326
int size = extract32(insn, 20, 1);
327
data = extract32(insn, 23, 2); /* rot */
328
if (!dc_isar_feature(aa32_vcma, s)
329
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
330
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
331
return 1;
332
}
333
fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
334
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
335
int size = extract32(insn, 20, 1);
336
data = extract32(insn, 24, 1); /* rot */
337
if (!dc_isar_feature(aa32_vcma, s)
338
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
339
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
340
return 1;
341
}
342
fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
343
@@ -XXX,XX +XXX,XX @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
344
return 1;
345
}
346
if (size == 0) {
347
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
348
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
349
return 1;
350
}
351
/* For fp16, rm is just Vm, and index is M. */
352
--
60
--
353
2.19.1
61
2.25.1
354
355
diff view generated by jsdifflib
1
The HCR_EL2 VI and VF bits are supposed to track whether there is
1
From: Richard Henderson <richard.henderson@linaro.org>
2
a pending virtual IRQ or virtual FIQ. For QEMU we store the
3
pending VIRQ/VFIQ status in cs->interrupt_request, so this means:
4
* if the register is read we must get these bit values from
5
cs->interrupt_request
6
* if the register is written then we must write the bit
7
values back into cs->interrupt_request
8
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-34-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20181012144235.19646-7-peter.maydell@linaro.org
12
---
7
---
13
target/arm/helper.c | 47 +++++++++++++++++++++++++++++++++++++++++----
8
linux-user/aarch64/target_cpu.h | 5 ++++-
14
1 file changed, 43 insertions(+), 4 deletions(-)
9
1 file changed, 4 insertions(+), 1 deletion(-)
15
10
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
11
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/helper.c
13
--- a/linux-user/aarch64/target_cpu.h
19
+++ b/target/arm/helper.c
14
+++ b/linux-user/aarch64/target_cpu.h
20
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
15
@@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
21
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
16
17
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
22
{
18
{
23
ARMCPU *cpu = arm_env_get_cpu(env);
19
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
24
+ CPUState *cs = ENV_GET_CPU(env);
25
uint64_t valid_mask = HCR_MASK;
26
27
if (arm_feature(env, ARM_FEATURE_EL3)) {
28
@@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
29
/* Clear RES0 bits. */
30
value &= valid_mask;
31
32
+ /*
20
+ /*
33
+ * VI and VF are kept in cs->interrupt_request. Modifying that
21
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
34
+ * requires that we have the iothread lock, which is done by
22
* different from AArch32 Linux, which uses TPIDRRO.
35
+ * marking the reginfo structs as ARM_CP_IO.
23
*/
36
+ * Note that if a write to HCR pends a VIRQ or VFIQ it is never
24
env->cp15.tpidr_el[0] = newtls;
37
+ * possible for it to be taken immediately, because VIRQ and
25
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
38
+ * VFIQ are masked unless running at EL0 or EL1, and HCR
26
+ env->cp15.tpidr2_el0 = 0;
39
+ * can only be written at EL2.
40
+ */
41
+ g_assert(qemu_mutex_iothread_locked());
42
+ if (value & HCR_VI) {
43
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
44
+ } else {
45
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
46
+ }
47
+ if (value & HCR_VF) {
48
+ cs->interrupt_request |= CPU_INTERRUPT_VFIQ;
49
+ } else {
50
+ cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ;
51
+ }
52
+ value &= ~(HCR_VI | HCR_VF);
53
+
54
/* These bits change the MMU setup:
55
* HCR_VM enables stage 2 translation
56
* HCR_PTW forbids certain page-table setups
57
@@ -XXX,XX +XXX,XX @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
58
hcr_write(env, NULL, value);
59
}
27
}
60
28
61
+static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
29
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
62
+{
63
+ /* The VI and VF bits live in cs->interrupt_request */
64
+ uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF);
65
+ CPUState *cs = ENV_GET_CPU(env);
66
+
67
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
68
+ ret |= HCR_VI;
69
+ }
70
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
71
+ ret |= HCR_VF;
72
+ }
73
+ return ret;
74
+}
75
+
76
static const ARMCPRegInfo el2_cp_reginfo[] = {
77
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
78
+ .type = ARM_CP_IO,
79
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
80
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
81
- .writefn = hcr_write },
82
+ .writefn = hcr_write, .readfn = hcr_read },
83
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
84
- .type = ARM_CP_ALIAS,
85
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
86
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
87
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
88
- .writefn = hcr_writelow },
89
+ .writefn = hcr_writelow, .readfn = hcr_read },
90
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
91
.type = ARM_CP_ALIAS,
92
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
93
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
94
95
static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
96
{ .name = "HCR2", .state = ARM_CP_STATE_AA32,
97
- .type = ARM_CP_ALIAS,
98
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
99
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
100
.access = PL2_RW,
101
.fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
102
--
30
--
103
2.19.1
31
2.25.1
104
105
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20181011205206.3552-8-richard.henderson@linaro.org
5
Message-id: 20220708151540.18136-35-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
7
---
8
target/arm/translate.c | 67 ++++++++++++++++++++++++------------------
8
linux-user/aarch64/cpu_loop.c | 9 +++++++++
9
1 file changed, 39 insertions(+), 28 deletions(-)
9
1 file changed, 9 insertions(+)
10
10
11
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate.c
13
--- a/linux-user/aarch64/cpu_loop.c
14
+++ b/target/arm/translate.c
14
+++ b/linux-user/aarch64/cpu_loop.c
15
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
15
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
16
return 1;
16
17
}
17
switch (trapnr) {
18
} else { /* (insn & 0x00380080) == 0 */
18
case EXCP_SWI:
19
- int invert;
19
+ /*
20
+ int invert, reg_ofs, vec_size;
20
+ * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
21
+
21
+ * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
22
if (q && (rd & 1)) {
22
+ */
23
return 1;
23
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
24
}
24
+ env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
25
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
25
+ arm_rebuild_hflags(env);
26
break;
26
+ arm_reset_sve_state(env);
27
case 14:
28
imm |= (imm << 8) | (imm << 16) | (imm << 24);
29
- if (invert)
30
+ if (invert) {
31
imm = ~imm;
32
+ }
33
break;
34
case 15:
35
if (invert) {
36
@@ -XXX,XX +XXX,XX @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
37
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
38
break;
39
}
40
- if (invert)
41
+ if (invert) {
42
imm = ~imm;
43
+ }
27
+ }
44
28
ret = do_syscall(env,
45
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
29
env->xregs[8],
46
- if (op & 1 && op < 12) {
30
env->xregs[0],
47
- tmp = neon_load_reg(rd, pass);
48
- if (invert) {
49
- /* The immediate value has already been inverted, so
50
- BIC becomes AND. */
51
- tcg_gen_andi_i32(tmp, tmp, imm);
52
- } else {
53
- tcg_gen_ori_i32(tmp, tmp, imm);
54
- }
55
+ reg_ofs = neon_reg_offset(rd, 0);
56
+ vec_size = q ? 16 : 8;
57
+
58
+ if (op & 1 && op < 12) {
59
+ if (invert) {
60
+ /* The immediate value has already been inverted,
61
+ * so BIC becomes AND.
62
+ */
63
+ tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
64
+ vec_size, vec_size);
65
} else {
66
- /* VMOV, VMVN. */
67
- tmp = tcg_temp_new_i32();
68
- if (op == 14 && invert) {
69
- int n;
70
- uint32_t val;
71
- val = 0;
72
- for (n = 0; n < 4; n++) {
73
- if (imm & (1 << (n + (pass & 1) * 4)))
74
- val |= 0xff << (n * 8);
75
- }
76
- tcg_gen_movi_i32(tmp, val);
77
- } else {
78
- tcg_gen_movi_i32(tmp, imm);
79
- }
80
+ tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
81
+ vec_size, vec_size);
82
+ }
83
+ } else {
84
+ /* VMOV, VMVN. */
85
+ if (op == 14 && invert) {
86
+ TCGv_i64 t64 = tcg_temp_new_i64();
87
+
88
+ for (pass = 0; pass <= q; ++pass) {
89
+ uint64_t val = 0;
90
+ int n;
91
+
92
+ for (n = 0; n < 8; n++) {
93
+ if (imm & (1 << (n + pass * 8))) {
94
+ val |= 0xffull << (n * 8);
95
+ }
96
+ }
97
+ tcg_gen_movi_i64(t64, val);
98
+ neon_store_reg64(t64, rd + pass);
99
+ }
100
+ tcg_temp_free_i64(t64);
101
+ } else {
102
+ tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
103
}
104
- neon_store_reg(rd, pass, tmp);
105
}
106
}
107
} else { /* (insn & 0x00800010 == 0x00800000) */
108
--
31
--
109
2.19.1
32
2.25.1
110
111
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Announce the availability of the various priority queues.
3
Make sure to zero the currently reserved fields.
4
This fixes an issue where guest kernels would miss to
5
configure secondary queues due to inproper feature bits.
6
4
7
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Message-id: 20181017213932.19973-2-edgar.iglesias@gmail.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-36-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
hw/net/cadence_gem.c | 8 +++++++-
10
linux-user/aarch64/signal.c | 9 ++++++++-
13
1 file changed, 7 insertions(+), 1 deletion(-)
11
1 file changed, 8 insertions(+), 1 deletion(-)
14
12
15
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
13
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/net/cadence_gem.c
15
--- a/linux-user/aarch64/signal.c
18
+++ b/hw/net/cadence_gem.c
16
+++ b/linux-user/aarch64/signal.c
19
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
17
@@ -XXX,XX +XXX,XX @@ struct target_extra_context {
20
int i;
18
struct target_sve_context {
21
CadenceGEMState *s = CADENCE_GEM(d);
19
struct target_aarch64_ctx head;
22
const uint8_t *a;
20
uint16_t vl;
23
+ uint32_t queues_mask = 0;
21
- uint16_t reserved[3];
24
22
+ uint16_t flags;
25
DB_PRINT("\n");
23
+ uint16_t reserved[2];
26
24
/* The actual SVE data immediately follows. It is laid out
27
@@ -XXX,XX +XXX,XX @@ static void gem_reset(DeviceState *d)
25
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
28
s->regs[GEM_DESCONF] = 0x02500111;
26
* the original struct pointer.
29
s->regs[GEM_DESCONF2] = 0x2ab13fff;
27
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
30
s->regs[GEM_DESCONF5] = 0x002f2045;
28
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
31
- s->regs[GEM_DESCONF6] = 0x00000200;
29
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
32
+ s->regs[GEM_DESCONF6] = 0x0;
30
31
+#define TARGET_SVE_SIG_FLAG_SM 1
33
+
32
+
34
+ if (s->num_priority_queues > 1) {
33
struct target_rt_sigframe {
35
+ queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
34
struct target_siginfo info;
36
+ s->regs[GEM_DESCONF6] |= queues_mask;
35
struct target_ucontext uc;
36
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
37
{
38
int i, j;
39
40
+ memset(sve, 0, sizeof(*sve));
41
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
42
__put_user(size, &sve->head.size);
43
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
44
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
45
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
37
+ }
46
+ }
38
47
39
/* Set MAC address */
48
/* Note that SVE regs are stored as a byte stream, with each byte element
40
a = &s->conf.macaddr.a[0];
49
* at a subsequent address. This corresponds to a little-endian store
41
--
50
--
42
2.19.1
51
2.25.1
43
44
diff view generated by jsdifflib
1
For the v7 version of the Arm architecture, the IL bit in
1
From: Richard Henderson <richard.henderson@linaro.org>
2
syndrome register values where the field is not valid was
3
defined to be UNK/SBZP. In v8 this is RES1, which is what
4
QEMU currently implements. Handle the desired v7 behaviour
5
by squashing the IL bit for the affected cases:
6
* EC == EC_UNCATEGORIZED
7
* prefetch aborts
8
* data aborts where ISV is 0
9
2
10
(The fourth case listed in the v8 Arm ARM DDI 0487C.a in
3
Fold the return value setting into the goto, so each
11
section G7.2.70, "illegal state exception", can't happen
4
point of failure need not do both.
12
on a v7 CPU.)
13
5
14
This deals with a corner case noted in a comment.
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-37-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/signal.c | 26 +++++++++++---------------
12
1 file changed, 11 insertions(+), 15 deletions(-)
15
13
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20181012144235.19646-10-peter.maydell@linaro.org
19
---
20
target/arm/internals.h | 7 ++-----
21
target/arm/helper.c | 13 +++++++++++++
22
2 files changed, 15 insertions(+), 5 deletions(-)
23
24
diff --git a/target/arm/internals.h b/target/arm/internals.h
25
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/internals.h
16
--- a/linux-user/aarch64/signal.c
27
+++ b/target/arm/internals.h
17
+++ b/linux-user/aarch64/signal.c
28
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_get_ec(uint32_t syn)
18
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
29
/* Utility functions for constructing various kinds of syndrome value.
19
struct target_sve_context *sve = NULL;
30
* Note that in general we follow the AArch64 syndrome values; in a
20
uint64_t extra_datap = 0;
31
* few cases the value in HSR for exceptions taken to AArch32 Hyp
21
bool used_extra = false;
32
- * mode differs slightly, so if we ever implemented Hyp mode then the
22
- bool err = false;
33
- * syndrome value would need some massaging on exception entry.
23
int vq = 0, sve_size = 0;
34
- * (One example of this is that AArch64 defaults to IL bit set for
24
35
- * exceptions which don't specifically indicate information about the
25
target_restore_general_frame(env, sf);
36
- * trapping instruction, whereas AArch32 defaults to IL bit clear.)
26
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
37
+ * mode differs slightly, and we fix this up when populating HSR in
27
switch (magic) {
38
+ * arm_cpu_do_interrupt_aarch32_hyp().
28
case 0:
39
*/
29
if (size != 0) {
40
static inline uint32_t syn_uncategorized(void)
30
- err = true;
41
{
31
- goto exit;
42
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
+ goto err;
43
index XXXXXXX..XXXXXXX 100644
33
}
44
--- a/target/arm/helper.c
34
if (used_extra) {
45
+++ b/target/arm/helper.c
35
ctx = NULL;
46
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
36
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
37
38
case TARGET_FPSIMD_MAGIC:
39
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
40
- err = true;
41
- goto exit;
42
+ goto err;
43
}
44
fpsimd = (struct target_fpsimd_context *)ctx;
45
break;
46
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
47
break;
48
}
49
}
50
- err = true;
51
- goto exit;
52
+ goto err;
53
54
case TARGET_EXTRA_MAGIC:
55
if (extra || size != sizeof(struct target_extra_context)) {
56
- err = true;
57
- goto exit;
58
+ goto err;
59
}
60
__get_user(extra_datap,
61
&((struct target_extra_context *)ctx)->datap);
62
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
63
/* Unknown record -- we certainly didn't generate it.
64
* Did we in fact get out of sync?
65
*/
66
- err = true;
67
- goto exit;
68
+ goto err;
69
}
70
ctx = (void *)ctx + size;
47
}
71
}
48
72
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
49
if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
73
if (fpsimd) {
50
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
74
target_restore_fpsimd_record(env, fpsimd);
51
+ /*
75
} else {
52
+ * QEMU syndrome values are v8-style. v7 has the IL bit
76
- err = true;
53
+ * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
77
+ goto err;
54
+ * If this is a v7 CPU, squash the IL bit in those cases.
55
+ */
56
+ if (cs->exception_index == EXCP_PREFETCH_ABORT ||
57
+ (cs->exception_index == EXCP_DATA_ABORT &&
58
+ !(env->exception.syndrome & ARM_EL_ISV)) ||
59
+ syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
60
+ env->exception.syndrome &= ~ARM_EL_IL;
61
+ }
62
+ }
63
env->cp15.esr_el[2] = env->exception.syndrome;
64
}
78
}
65
79
80
/* SVE data, if present, overwrites FPSIMD data. */
81
if (sve) {
82
target_restore_sve_record(env, sve, vq);
83
}
84
-
85
- exit:
86
unlock_user(extra, extra_datap, 0);
87
- return err;
88
+ return 0;
89
+
90
+ err:
91
+ unlock_user(extra, extra_datap, 0);
92
+ return 1;
93
}
94
95
static abi_ulong get_sigframe(struct target_sigaction *ka,
66
--
96
--
67
2.19.1
97
2.25.1
68
69
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
In parse_user_sigframe, the kernel rejects duplicate sve records,
4
or records that are smaller than the header. We were silently
5
allowing these cases to pass, dropping the record.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20181016223115.24100-8-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-38-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
11
---
9
target/arm/cpu.h | 16 +++++++++++++++-
12
linux-user/aarch64/signal.c | 5 ++++-
10
linux-user/aarch64/signal.c | 4 ++--
13
1 file changed, 4 insertions(+), 1 deletion(-)
11
linux-user/elfload.c | 2 +-
12
linux-user/syscall.c | 10 ++++++----
13
target/arm/cpu64.c | 5 ++++-
14
target/arm/helper.c | 9 ++++++---
15
target/arm/machine.c | 3 +--
16
target/arm/translate-a64.c | 4 ++--
17
8 files changed, 37 insertions(+), 16 deletions(-)
18
14
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
24
FIELD(ID_AA64ISAR1, SB, 36, 4)
25
FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
26
27
+FIELD(ID_AA64PFR0, EL0, 0, 4)
28
+FIELD(ID_AA64PFR0, EL1, 4, 4)
29
+FIELD(ID_AA64PFR0, EL2, 8, 4)
30
+FIELD(ID_AA64PFR0, EL3, 12, 4)
31
+FIELD(ID_AA64PFR0, FP, 16, 4)
32
+FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
33
+FIELD(ID_AA64PFR0, GIC, 24, 4)
34
+FIELD(ID_AA64PFR0, RAS, 28, 4)
35
+FIELD(ID_AA64PFR0, SVE, 32, 4)
36
+
37
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
38
39
/* If adding a feature bit which corresponds to a Linux ELF
40
@@ -XXX,XX +XXX,XX @@ enum arm_features {
41
ARM_FEATURE_PMU, /* has PMU support */
42
ARM_FEATURE_VBAR, /* has cp15 VBAR */
43
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
44
- ARM_FEATURE_SVE, /* has Scalable Vector Extension */
45
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
46
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
47
};
48
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
49
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
50
}
51
52
+static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
53
+{
54
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
55
+}
56
+
57
/*
58
* Forward to the above feature tests given an ARMCPU pointer.
59
*/
60
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
15
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
61
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
62
--- a/linux-user/aarch64/signal.c
17
--- a/linux-user/aarch64/signal.c
63
+++ b/linux-user/aarch64/signal.c
18
+++ b/linux-user/aarch64/signal.c
64
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
19
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
65
break;
20
break;
66
21
67
case TARGET_SVE_MAGIC:
22
case TARGET_SVE_MAGIC:
68
- if (arm_feature(env, ARM_FEATURE_SVE)) {
23
+ if (sve || size < sizeof(struct target_sve_context)) {
69
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
24
+ goto err;
70
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
25
+ }
26
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
27
vq = sve_vq(env);
71
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
28
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
72
if (!sve && size == sve_size) {
29
- if (!sve && size == sve_size) {
73
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
30
+ if (size == sve_size) {
74
&layout);
31
sve = (struct target_sve_context *)ctx;
75
32
break;
76
/* SVE state needs saving only if it exists. */
33
}
77
- if (arm_feature(env, ARM_FEATURE_SVE)) {
78
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
79
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
80
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
81
sve_ofs = alloc_sigframe_space(sve_size, &layout);
82
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/linux-user/elfload.c
85
+++ b/linux-user/elfload.c
86
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
87
GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
88
GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
89
GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
90
- GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
91
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
92
93
#undef GET_FEATURE
94
#undef GET_FEATURE_ID
95
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/linux-user/syscall.c
98
+++ b/linux-user/syscall.c
99
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
100
* even though the current architectural maximum is VQ=16.
101
*/
102
ret = -TARGET_EINVAL;
103
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)
104
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
105
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
106
CPUARMState *env = cpu_env;
107
ARMCPU *cpu = arm_env_get_cpu(env);
108
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
109
return ret;
110
case TARGET_PR_SVE_GET_VL:
111
ret = -TARGET_EINVAL;
112
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
113
- CPUARMState *env = cpu_env;
114
- ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
115
+ {
116
+ ARMCPU *cpu = arm_env_get_cpu(cpu_env);
117
+ if (cpu_isar_feature(aa64_sve, cpu)) {
118
+ ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
119
+ }
120
}
121
return ret;
122
#endif /* AARCH64 */
123
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/arm/cpu64.c
126
+++ b/target/arm/cpu64.c
127
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
128
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
129
cpu->isar.id_aa64isar1 = t;
130
131
+ t = cpu->isar.id_aa64pfr0;
132
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
133
+ cpu->isar.id_aa64pfr0 = t;
134
+
135
/* Replicate the same data to the 32-bit id registers. */
136
u = cpu->isar.id_isar5;
137
u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
138
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
139
* present in either.
140
*/
141
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
142
- set_feature(&cpu->env, ARM_FEATURE_SVE);
143
/* For usermode -cpu max we can use a larger and more efficient DCZ
144
* blocksize since we don't have to follow what the hardware does.
145
*/
146
diff --git a/target/arm/helper.c b/target/arm/helper.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/arm/helper.c
149
+++ b/target/arm/helper.c
150
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
151
define_one_arm_cp_reg(cpu, &sctlr);
152
}
153
154
- if (arm_feature(env, ARM_FEATURE_SVE)) {
155
+ if (cpu_isar_feature(aa64_sve, cpu)) {
156
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
157
if (arm_feature(env, ARM_FEATURE_EL2)) {
158
define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
159
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
160
uint32_t flags;
161
162
if (is_a64(env)) {
163
+ ARMCPU *cpu = arm_env_get_cpu(env);
164
+
165
*pc = env->pc;
166
flags = ARM_TBFLAG_AARCH64_STATE_MASK;
167
/* Get control bits for tagged addresses */
168
flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
169
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
170
171
- if (arm_feature(env, ARM_FEATURE_SVE)) {
172
+ if (cpu_isar_feature(aa64_sve, cpu)) {
173
int sve_el = sve_exception_el(env, current_el);
174
uint32_t zcr_len;
175
176
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
177
void aarch64_sve_change_el(CPUARMState *env, int old_el,
178
int new_el, bool el0_a64)
179
{
180
+ ARMCPU *cpu = arm_env_get_cpu(env);
181
int old_len, new_len;
182
bool old_a64, new_a64;
183
184
/* Nothing to do if no SVE. */
185
- if (!arm_feature(env, ARM_FEATURE_SVE)) {
186
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
187
return;
188
}
189
190
diff --git a/target/arm/machine.c b/target/arm/machine.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/target/arm/machine.c
193
+++ b/target/arm/machine.c
194
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_iwmmxt = {
195
static bool sve_needed(void *opaque)
196
{
197
ARMCPU *cpu = opaque;
198
- CPUARMState *env = &cpu->env;
199
200
- return arm_feature(env, ARM_FEATURE_SVE);
201
+ return cpu_isar_feature(aa64_sve, cpu);
202
}
203
204
/* The first two words of each Zreg is stored in VFP state. */
205
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
206
index XXXXXXX..XXXXXXX 100644
207
--- a/target/arm/translate-a64.c
208
+++ b/target/arm/translate-a64.c
209
@@ -XXX,XX +XXX,XX @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
210
cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
211
vfp_get_fpcr(env), vfp_get_fpsr(env));
212
213
- if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
214
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
215
int j, zcr_len = sve_zcr_len_for_el(env, el);
216
217
for (i = 0; i <= FFR_PRED_NUM; i++) {
218
@@ -XXX,XX +XXX,XX @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
219
unallocated_encoding(s);
220
break;
221
case 0x2:
222
- if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
223
+ if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
224
unallocated_encoding(s);
225
}
226
break;
227
--
34
--
228
2.19.1
35
2.25.1
229
230
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Message-id: 20220708151540.18136-39-richard.henderson@linaro.org
5
Message-id: 20181011205206.3552-6-richard.henderson@linaro.org
6
[PMM: drop change to now-deleted cpu_mode_names array]
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
7
---
10
target/arm/translate.c | 4 ++--
8
linux-user/aarch64/signal.c | 3 +++
11
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 3 insertions(+)
12
10
13
diff --git a/target/arm/translate.c b/target/arm/translate.c
11
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate.c
13
--- a/linux-user/aarch64/signal.c
16
+++ b/target/arm/translate.c
14
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_F0d, cpu_F1d;
15
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
18
16
__get_user(extra_size,
19
#include "exec/gen-icount.h"
17
&((struct target_extra_context *)ctx)->size);
20
18
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
21
-static const char *regnames[] =
19
+ if (!extra) {
22
+static const char * const regnames[] =
20
+ return 1;
23
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
21
+ }
24
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
22
break;
25
23
26
@@ -XXX,XX +XXX,XX @@ static struct {
24
default:
27
int nregs;
28
int interleave;
29
int spacing;
30
-} neon_ls_element_type[11] = {
31
+} const neon_ls_element_type[11] = {
32
{4, 4, 1},
33
{4, 4, 2},
34
{4, 1, 1},
35
--
25
--
36
2.19.1
26
2.25.1
37
38
diff view generated by jsdifflib
1
For AArch32, exception return happens through certain kinds
1
From: Richard Henderson <richard.henderson@linaro.org>
2
of CPSR write. We don't currently have any CPU_LOG_INT logging
3
of these events (unlike AArch64, where we log in the ERET
4
instruction). Add some suitable logging.
5
2
6
This will log exception returns like this:
3
Move the checks out of the parsing loop and into the
7
Exception return from AArch32 hyp to usr PC 0x80100374
4
restore function. This more closely mirrors the code
5
structure in the kernel, and is slightly clearer.
8
6
9
paralleling the existing logging in the exception_return
7
Reject rather than silently skip incorrect VL and SVE record sizes,
10
helper for AArch64 exception returns:
8
bringing our checks in to line with those the kernel does.
11
Exception return from AArch64 EL2 to AArch64 EL0 PC 0x8003045c
12
Exception return from AArch64 EL2 to AArch32 EL0 PC 0x8003045c
13
9
14
(Note that an AArch32 exception return can only be
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
AArch32->AArch32, never to AArch64.)
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20220708151540.18136-40-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
15
linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------
16
1 file changed, 35 insertions(+), 16 deletions(-)
16
17
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20181012144235.19646-2-peter.maydell@linaro.org
20
---
21
target/arm/internals.h | 18 ++++++++++++++++++
22
target/arm/helper.c | 10 ++++++++++
23
target/arm/translate.c | 7 +------
24
3 files changed, 29 insertions(+), 6 deletions(-)
25
26
diff --git a/target/arm/internals.h b/target/arm/internals.h
27
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/internals.h
20
--- a/linux-user/aarch64/signal.c
29
+++ b/target/arm/internals.h
21
+++ b/linux-user/aarch64/signal.c
30
@@ -XXX,XX +XXX,XX @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
22
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
31
}
23
}
32
}
24
}
33
25
34
+/**
26
-static void target_restore_sve_record(CPUARMState *env,
35
+ * aarch32_mode_name(): Return name of the AArch32 CPU mode
27
- struct target_sve_context *sve, int vq)
36
+ * @psr: Program Status Register indicating CPU mode
28
+static bool target_restore_sve_record(CPUARMState *env,
37
+ *
29
+ struct target_sve_context *sve,
38
+ * Returns, for debug logging purposes, a printable representation
30
+ int size)
39
+ * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
31
{
40
+ * the low bits of the specified PSR.
32
- int i, j;
41
+ */
33
+ int i, j, vl, vq;
42
+static inline const char *aarch32_mode_name(uint32_t psr)
34
43
+{
35
- /* Note that SVE regs are stored as a byte stream, with each byte element
44
+ static const char cpu_mode_names[16][4] = {
36
+ if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
45
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
37
+ return false;
46
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
38
+ }
47
+ };
48
+
39
+
49
+ return cpu_mode_names[psr & 0xf];
40
+ __get_user(vl, &sve->vl);
50
+}
41
+ vq = sve_vq(env);
51
+
42
+
52
#endif
43
+ /* Reject mismatched VL. */
53
diff --git a/target/arm/helper.c b/target/arm/helper.c
44
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
54
index XXXXXXX..XXXXXXX 100644
45
+ return false;
55
--- a/target/arm/helper.c
46
+ }
56
+++ b/target/arm/helper.c
47
+
57
@@ -XXX,XX +XXX,XX @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
48
+ /* Accept empty record -- used to clear PSTATE.SM. */
58
mask |= CPSR_IL;
49
+ if (size <= sizeof(*sve)) {
59
val |= CPSR_IL;
50
+ return true;
51
+ }
52
+
53
+ /* Reject non-empty but incomplete record. */
54
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
55
+ return false;
56
+ }
57
+
58
+ /*
59
+ * Note that SVE regs are stored as a byte stream, with each byte element
60
* at a subsequent address. This corresponds to a little-endian load
61
* of our 64-bit hunks.
62
*/
63
@@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env,
60
}
64
}
61
+ qemu_log_mask(LOG_GUEST_ERROR,
62
+ "Illegal AArch32 mode switch attempt from %s to %s\n",
63
+ aarch32_mode_name(env->uncached_cpsr),
64
+ aarch32_mode_name(val));
65
} else {
66
+ qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
67
+ write_type == CPSRWriteExceptionReturn ?
68
+ "Exception return from AArch32" :
69
+ "AArch32 mode switch from",
70
+ aarch32_mode_name(env->uncached_cpsr),
71
+ aarch32_mode_name(val), env->regs[15]);
72
switch_mode(env, val & CPSR_M);
73
}
65
}
74
}
66
}
75
diff --git a/target/arm/translate.c b/target/arm/translate.c
67
+ return true;
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/translate.c
78
+++ b/target/arm/translate.c
79
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
80
translator_loop(ops, &dc.base, cpu, tb);
81
}
68
}
82
69
83
-static const char *cpu_mode_names[16] = {
70
static int target_restore_sigframe(CPUARMState *env,
84
- "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
71
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
85
- "???", "???", "hyp", "und", "???", "???", "???", "sys"
72
struct target_sve_context *sve = NULL;
86
-};
73
uint64_t extra_datap = 0;
87
-
74
bool used_extra = false;
88
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
75
- int vq = 0, sve_size = 0;
89
int flags)
76
+ int sve_size = 0;
90
{
77
91
@@ -XXX,XX +XXX,XX @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
78
target_restore_general_frame(env, sf);
92
psr & CPSR_V ? 'V' : '-',
79
93
psr & CPSR_T ? 'T' : 'A',
80
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
94
ns_status,
81
if (sve || size < sizeof(struct target_sve_context)) {
95
- cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
82
goto err;
96
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
83
}
84
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
85
- vq = sve_vq(env);
86
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
87
- if (size == sve_size) {
88
- sve = (struct target_sve_context *)ctx;
89
- break;
90
- }
91
- }
92
- goto err;
93
+ sve = (struct target_sve_context *)ctx;
94
+ sve_size = size;
95
+ break;
96
97
case TARGET_EXTRA_MAGIC:
98
if (extra || size != sizeof(struct target_extra_context)) {
99
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
97
}
100
}
98
101
99
if (flags & CPU_DUMP_FPU) {
102
/* SVE data, if present, overwrites FPSIMD data. */
103
- if (sve) {
104
- target_restore_sve_record(env, sve, vq);
105
+ if (sve && !target_restore_sve_record(env, sve, sve_size)) {
106
+ goto err;
107
}
108
unlock_user(extra, extra_datap, 0);
109
return 0;
100
--
110
--
101
2.19.1
111
2.25.1
102
103
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instead of shifts and masks, use direct loads and stores from the neon
3
Set the SM bit in the SVE record on signal delivery, create the ZA record.
4
register file. Mirror the iteration structure of the ARM pseudocode
4
Restore SM and ZA state according to the records present on return.
5
more closely. Correct the parameters of the VLD2 A2 insn.
6
5
7
Note that this includes a bugfix for handling of the insn
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
"VLD2 (multiple 2-element structures)" -- we were using an
9
incorrect stride value.
10
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20181011205206.3552-19-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-41-richard.henderson@linaro.org
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
10
---
16
target/arm/translate.c | 170 ++++++++++++++++++-----------------------
11
linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++---
17
1 file changed, 74 insertions(+), 96 deletions(-)
12
1 file changed, 154 insertions(+), 13 deletions(-)
18
13
19
diff --git a/target/arm/translate.c b/target/arm/translate.c
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate.c
16
--- a/linux-user/aarch64/signal.c
22
+++ b/target/arm/translate.c
17
+++ b/linux-user/aarch64/signal.c
23
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 neon_load_reg(int reg, int pass)
18
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
24
return tmp;
19
20
#define TARGET_SVE_SIG_FLAG_SM 1
21
22
+#define TARGET_ZA_MAGIC 0x54366345
23
+
24
+struct target_za_context {
25
+ struct target_aarch64_ctx head;
26
+ uint16_t vl;
27
+ uint16_t reserved[3];
28
+ /* The actual ZA data immediately follows. */
29
+};
30
+
31
+#define TARGET_ZA_SIG_REGS_OFFSET \
32
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
33
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
34
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
35
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
36
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
37
+
38
struct target_rt_sigframe {
39
struct target_siginfo info;
40
struct target_ucontext uc;
41
@@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
25
}
42
}
26
43
27
+static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
44
static void target_setup_sve_record(struct target_sve_context *sve,
45
- CPUARMState *env, int vq, int size)
46
+ CPUARMState *env, int size)
47
{
48
- int i, j;
49
+ int i, j, vq = sve_vq(env);
50
51
memset(sve, 0, sizeof(*sve));
52
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
53
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
54
}
55
}
56
57
+static void target_setup_za_record(struct target_za_context *za,
58
+ CPUARMState *env, int size)
28
+{
59
+{
29
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
60
+ int vq = sme_vq(env);
30
+
61
+ int vl = vq * TARGET_SVE_VQ_BYTES;
31
+ switch (mop) {
62
+ int i, j;
32
+ case MO_UB:
63
+
33
+ tcg_gen_ld8u_i64(var, cpu_env, offset);
64
+ memset(za, 0, sizeof(*za));
34
+ break;
65
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
35
+ case MO_UW:
66
+ __put_user(size, &za->head.size);
36
+ tcg_gen_ld16u_i64(var, cpu_env, offset);
67
+ __put_user(vl, &za->vl);
37
+ break;
68
+
38
+ case MO_UL:
69
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
39
+ tcg_gen_ld32u_i64(var, cpu_env, offset);
70
+ return;
40
+ break;
71
+ }
41
+ case MO_Q:
72
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
42
+ tcg_gen_ld_i64(var, cpu_env, offset);
73
+
43
+ break;
74
+ /*
44
+ default:
75
+ * Note that ZA vectors are stored as a byte stream,
45
+ g_assert_not_reached();
76
+ * with each byte element at a subsequent address.
77
+ */
78
+ for (i = 0; i < vl; ++i) {
79
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
80
+ for (j = 0; j < vq * 2; ++j) {
81
+ __put_user_e(env->zarray[i].d[j], z + j, le);
82
+ }
46
+ }
83
+ }
47
+}
84
+}
48
+
85
+
49
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
86
static void target_restore_general_frame(CPUARMState *env,
50
{
87
struct target_rt_sigframe *sf)
51
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
88
{
52
tcg_temp_free_i32(var);
89
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
90
91
static bool target_restore_sve_record(CPUARMState *env,
92
struct target_sve_context *sve,
93
- int size)
94
+ int size, int *svcr)
95
{
96
- int i, j, vl, vq;
97
+ int i, j, vl, vq, flags;
98
+ bool sm;
99
100
- if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
101
+ __get_user(vl, &sve->vl);
102
+ __get_user(flags, &sve->flags);
103
+
104
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
105
+
106
+ /* The cpu must support Streaming or Non-streaming SVE. */
107
+ if (sm
108
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
109
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
110
return false;
111
}
112
113
- __get_user(vl, &sve->vl);
114
- vq = sve_vq(env);
115
+ /*
116
+ * Note that we cannot use sve_vq() because that depends on the
117
+ * current setting of PSTATE.SM, not the state to be restored.
118
+ */
119
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
120
121
/* Reject mismatched VL. */
122
if (vl != vq * TARGET_SVE_VQ_BYTES) {
123
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
124
return false;
125
}
126
127
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
128
+
129
/*
130
* Note that SVE regs are stored as a byte stream, with each byte element
131
* at a subsequent address. This corresponds to a little-endian load
132
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
133
return true;
53
}
134
}
54
135
55
+static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
136
+static bool target_restore_za_record(CPUARMState *env,
137
+ struct target_za_context *za,
138
+ int size, int *svcr)
56
+{
139
+{
57
+ long offset = neon_element_offset(reg, ele, size);
140
+ int i, j, vl, vq;
58
+
141
+
59
+ switch (size) {
142
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
60
+ case MO_8:
143
+ return false;
61
+ tcg_gen_st8_i64(var, cpu_env, offset);
144
+ }
62
+ break;
145
+
63
+ case MO_16:
146
+ __get_user(vl, &za->vl);
64
+ tcg_gen_st16_i64(var, cpu_env, offset);
147
+ vq = sme_vq(env);
65
+ break;
148
+
66
+ case MO_32:
149
+ /* Reject mismatched VL. */
67
+ tcg_gen_st32_i64(var, cpu_env, offset);
150
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
68
+ break;
151
+ return false;
69
+ case MO_64:
152
+ }
70
+ tcg_gen_st_i64(var, cpu_env, offset);
153
+
71
+ break;
154
+ /* Accept empty record -- used to clear PSTATE.ZA. */
72
+ default:
155
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
73
+ g_assert_not_reached();
156
+ return true;
74
+ }
157
+ }
158
+
159
+ /* Reject non-empty but incomplete record. */
160
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
161
+ return false;
162
+ }
163
+
164
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
165
+
166
+ for (i = 0; i < vl; ++i) {
167
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
168
+ for (j = 0; j < vq * 2; ++j) {
169
+ __get_user_e(env->zarray[i].d[j], z + j, le);
170
+ }
171
+ }
172
+ return true;
75
+}
173
+}
76
+
174
+
77
static inline void neon_load_reg64(TCGv_i64 var, int reg)
175
static int target_restore_sigframe(CPUARMState *env,
78
{
176
struct target_rt_sigframe *sf)
79
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
177
{
80
@@ -XXX,XX +XXX,XX @@ static struct {
178
struct target_aarch64_ctx *ctx, *extra = NULL;
81
int interleave;
179
struct target_fpsimd_context *fpsimd = NULL;
82
int spacing;
180
struct target_sve_context *sve = NULL;
83
} const neon_ls_element_type[11] = {
181
+ struct target_za_context *za = NULL;
84
- {4, 4, 1},
182
uint64_t extra_datap = 0;
85
- {4, 4, 2},
183
bool used_extra = false;
86
+ {1, 4, 1},
184
int sve_size = 0;
87
+ {1, 4, 2},
185
+ int za_size = 0;
88
{4, 1, 1},
186
+ int svcr = 0;
89
- {4, 2, 1},
187
90
- {3, 3, 1},
188
target_restore_general_frame(env, sf);
91
- {3, 3, 2},
189
92
+ {2, 2, 2},
190
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
93
+ {1, 3, 1},
191
sve_size = size;
94
+ {1, 3, 2},
192
break;
95
{3, 1, 1},
193
96
{1, 1, 1},
194
+ case TARGET_ZA_MAGIC:
97
- {2, 2, 1},
195
+ if (za || size < sizeof(struct target_za_context)) {
98
- {2, 2, 2},
196
+ goto err;
99
+ {1, 2, 1},
197
+ }
100
+ {1, 2, 2},
198
+ za = (struct target_za_context *)ctx;
101
{2, 1, 1}
199
+ za_size = size;
102
};
200
+ break;
103
201
+
104
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
202
case TARGET_EXTRA_MAGIC:
105
int shift;
203
if (extra || size != sizeof(struct target_extra_context)) {
106
int n;
204
goto err;
107
int vec_size;
205
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
108
+ int mmu_idx;
206
}
109
+ TCGMemOp endian;
207
110
TCGv_i32 addr;
208
/* SVE data, if present, overwrites FPSIMD data. */
111
TCGv_i32 tmp;
209
- if (sve && !target_restore_sve_record(env, sve, sve_size)) {
112
TCGv_i32 tmp2;
210
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
113
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
211
goto err;
114
rn = (insn >> 16) & 0xf;
212
}
115
rm = insn & 0xf;
213
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
116
load = (insn & (1 << 21)) != 0;
214
+ goto err;
117
+ endian = s->be_data;
215
+ }
118
+ mmu_idx = get_mem_index(s);
216
+ if (env->svcr != svcr) {
119
if ((insn & (1 << 23)) == 0) {
217
+ env->svcr = svcr;
120
/* Load store all elements. */
218
+ arm_rebuild_hflags(env);
121
op = (insn >> 8) & 0xf;
219
+ }
122
@@ -XXX,XX +XXX,XX @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
220
unlock_user(extra, extra_datap, 0);
123
nregs = neon_ls_element_type[op].nregs;
221
return 0;
124
interleave = neon_ls_element_type[op].interleave;
222
125
spacing = neon_ls_element_type[op].spacing;
223
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
126
- if (size == 3 && (interleave | spacing) != 1)
224
.total_size = offsetof(struct target_rt_sigframe,
127
+ if (size == 3 && (interleave | spacing) != 1) {
225
uc.tuc_mcontext.__reserved),
128
return 1;
226
};
227
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
228
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
229
+ int sve_size = 0, za_size = 0;
230
struct target_rt_sigframe *frame;
231
struct target_rt_frame_record *fr;
232
abi_ulong frame_addr, return_addr;
233
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
234
&layout);
235
236
/* SVE state needs saving only if it exists. */
237
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
238
- vq = sve_vq(env);
239
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
240
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
241
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
242
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
243
sve_ofs = alloc_sigframe_space(sve_size, &layout);
244
}
245
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
246
+ /* ZA state needs saving only if it is enabled. */
247
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
248
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
249
+ } else {
250
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
129
+ }
251
+ }
130
+ tmp64 = tcg_temp_new_i64();
252
+ za_ofs = alloc_sigframe_space(za_size, &layout);
131
addr = tcg_temp_new_i32();
253
+ }
132
+ tmp2 = tcg_const_i32(1 << size);
254
133
load_reg_var(s, addr, rn);
255
if (layout.extra_ofs) {
134
- stride = (1 << size) * interleave;
256
/* Reserve space for the extra end marker. The standard end marker
135
for (reg = 0; reg < nregs; reg++) {
257
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
136
- if (interleave > 2 || (interleave == 2 && nregs == 2)) {
258
target_setup_end_record((void *)frame + layout.extra_end_ofs);
137
- load_reg_var(s, addr, rn);
259
}
138
- tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
260
if (sve_ofs) {
139
- } else if (interleave == 2 && nregs == 4 && reg == 2) {
261
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
140
- load_reg_var(s, addr, rn);
262
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
141
- tcg_gen_addi_i32(addr, addr, 1 << size);
263
+ }
142
- }
264
+ if (za_ofs) {
143
- if (size == 3) {
265
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
144
- tmp64 = tcg_temp_new_i64();
266
}
145
- if (load) {
267
146
- gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
268
/* Set up the stack frame for unwinding. */
147
- neon_store_reg64(tmp64, rd);
269
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
148
- } else {
270
env->btype = 2;
149
- neon_load_reg64(tmp64, rd);
271
}
150
- gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
272
151
- }
273
+ /*
152
- tcg_temp_free_i64(tmp64);
274
+ * Invoke the signal handler with both SM and ZA disabled.
153
- tcg_gen_addi_i32(addr, addr, stride);
275
+ * When clearing SM, ResetSVEState, per SMSTOP.
154
- } else {
276
+ */
155
- for (pass = 0; pass < 2; pass++) {
277
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
156
- if (size == 2) {
278
+ arm_reset_sve_state(env);
157
- if (load) {
279
+ }
158
- tmp = tcg_temp_new_i32();
280
+ if (env->svcr) {
159
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
281
+ env->svcr = 0;
160
- neon_store_reg(rd, pass, tmp);
282
+ arm_rebuild_hflags(env);
161
- } else {
283
+ }
162
- tmp = neon_load_reg(rd, pass);
284
+
163
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
285
if (info) {
164
- tcg_temp_free_i32(tmp);
286
tswap_siginfo(&frame->info, info);
165
- }
287
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
166
- tcg_gen_addi_i32(addr, addr, stride);
167
- } else if (size == 1) {
168
- if (load) {
169
- tmp = tcg_temp_new_i32();
170
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
171
- tcg_gen_addi_i32(addr, addr, stride);
172
- tmp2 = tcg_temp_new_i32();
173
- gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
174
- tcg_gen_addi_i32(addr, addr, stride);
175
- tcg_gen_shli_i32(tmp2, tmp2, 16);
176
- tcg_gen_or_i32(tmp, tmp, tmp2);
177
- tcg_temp_free_i32(tmp2);
178
- neon_store_reg(rd, pass, tmp);
179
- } else {
180
- tmp = neon_load_reg(rd, pass);
181
- tmp2 = tcg_temp_new_i32();
182
- tcg_gen_shri_i32(tmp2, tmp, 16);
183
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
184
- tcg_temp_free_i32(tmp);
185
- tcg_gen_addi_i32(addr, addr, stride);
186
- gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
187
- tcg_temp_free_i32(tmp2);
188
- tcg_gen_addi_i32(addr, addr, stride);
189
- }
190
- } else /* size == 0 */ {
191
- if (load) {
192
- tmp2 = NULL;
193
- for (n = 0; n < 4; n++) {
194
- tmp = tcg_temp_new_i32();
195
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
196
- tcg_gen_addi_i32(addr, addr, stride);
197
- if (n == 0) {
198
- tmp2 = tmp;
199
- } else {
200
- tcg_gen_shli_i32(tmp, tmp, n * 8);
201
- tcg_gen_or_i32(tmp2, tmp2, tmp);
202
- tcg_temp_free_i32(tmp);
203
- }
204
- }
205
- neon_store_reg(rd, pass, tmp2);
206
- } else {
207
- tmp2 = neon_load_reg(rd, pass);
208
- for (n = 0; n < 4; n++) {
209
- tmp = tcg_temp_new_i32();
210
- if (n == 0) {
211
- tcg_gen_mov_i32(tmp, tmp2);
212
- } else {
213
- tcg_gen_shri_i32(tmp, tmp2, n * 8);
214
- }
215
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
216
- tcg_temp_free_i32(tmp);
217
- tcg_gen_addi_i32(addr, addr, stride);
218
- }
219
- tcg_temp_free_i32(tmp2);
220
- }
221
+ for (n = 0; n < 8 >> size; n++) {
222
+ int xs;
223
+ for (xs = 0; xs < interleave; xs++) {
224
+ int tt = rd + reg + spacing * xs;
225
+
226
+ if (load) {
227
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
228
+ neon_store_element64(tt, n, size, tmp64);
229
+ } else {
230
+ neon_load_element64(tmp64, tt, n, size);
231
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
232
}
233
+ tcg_gen_add_i32(addr, addr, tmp2);
234
}
235
}
236
- rd += spacing;
237
}
238
tcg_temp_free_i32(addr);
239
- stride = nregs * 8;
240
+ tcg_temp_free_i32(tmp2);
241
+ tcg_temp_free_i64(tmp64);
242
+ stride = nregs * interleave * 8;
243
} else {
244
size = (insn >> 10) & 3;
245
if (size == 3) {
246
--
288
--
247
2.19.1
289
2.25.1
248
249
diff view generated by jsdifflib
1
From: Stewart Hildebrand <Stewart.Hildebrand@dornerworks.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
"The Image must be placed text_offset bytes from a 2MB aligned base
3
Add "sve" to the sve prctl functions, to distinguish
4
address anywhere in usable system RAM and called there."
4
them from the coming "sme" prctls with similar names.
5
5
6
For the virt board, we write our startup bootloader at the very
7
bottom of RAM, so that bit can't be used for the image. To avoid
8
overlap in case the image requests to be loaded at an offset
9
smaller than our bootloader, we increment the load offset to the
10
next 2MB.
11
12
This fixes a boot failure for Xen AArch64.
13
14
Signed-off-by: Stewart Hildebrand <stewart.hildebrand@dornerworks.com>
15
Tested-by: Andre Przywara <andre.przywara@arm.com>
16
Message-id: b8a89518794b4436af0c151ed10de4fa@dornerworks.com
17
[PMM: Rephrased a comment a bit]
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-42-richard.henderson@linaro.org
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
10
---
21
hw/arm/boot.c | 18 ++++++++++++++++++
11
linux-user/aarch64/target_prctl.h | 8 ++++----
22
1 file changed, 18 insertions(+)
12
linux-user/syscall.c | 12 ++++++------
13
2 files changed, 10 insertions(+), 10 deletions(-)
23
14
24
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
25
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/arm/boot.c
17
--- a/linux-user/aarch64/target_prctl.h
27
+++ b/hw/arm/boot.c
18
+++ b/linux-user/aarch64/target_prctl.h
28
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
29
#include "qemu/config-file.h"
20
#ifndef AARCH64_TARGET_PRCTL_H
30
#include "qemu/option.h"
21
#define AARCH64_TARGET_PRCTL_H
31
#include "exec/address-spaces.h"
22
32
+#include "qemu/units.h"
23
-static abi_long do_prctl_get_vl(CPUArchState *env)
33
24
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
34
/* Kernel boot protocol is specified in the kernel docs
35
* Documentation/arm/Booting and Documentation/arm64/booting.txt
36
@@ -XXX,XX +XXX,XX @@
37
#define ARM64_TEXT_OFFSET_OFFSET 8
38
#define ARM64_MAGIC_OFFSET 56
39
40
+#define BOOTLOADER_MAX_SIZE (4 * KiB)
41
+
42
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
43
const struct arm_boot_info *info)
44
{
25
{
45
@@ -XXX,XX +XXX,XX @@ static void write_bootloader(const char *name, hwaddr addr,
26
ARMCPU *cpu = env_archcpu(env);
46
code[i] = tswap32(insn);
27
if (cpu_isar_feature(aa64_sve, cpu)) {
28
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env)
47
}
29
}
48
30
return -TARGET_EINVAL;
49
+ assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE);
31
}
50
+
32
-#define do_prctl_get_vl do_prctl_get_vl
51
rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as);
33
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
52
34
53
g_free(code);
35
-static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
54
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
36
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
55
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
37
{
56
if (hdrvals[1] != 0) {
38
/*
57
kernel_load_offset = le64_to_cpu(hdrvals[0]);
39
* We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
58
+
40
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
59
+ /*
60
+ * We write our startup "bootloader" at the very bottom of RAM,
61
+ * so that bit can't be used for the image. Luckily the Image
62
+ * format specification is that the image requests only an offset
63
+ * from a 2MB boundary, not an absolute load address. So if the
64
+ * image requests an offset that might mean it overlaps with the
65
+ * bootloader, we can just load it starting at 2MB+offset rather
66
+ * than 0MB + offset.
67
+ */
68
+ if (kernel_load_offset < BOOTLOADER_MAX_SIZE) {
69
+ kernel_load_offset += 2 * MiB;
70
+ }
71
}
72
}
41
}
73
42
return -TARGET_EINVAL;
43
}
44
-#define do_prctl_set_vl do_prctl_set_vl
45
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
46
47
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
48
{
49
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/linux-user/syscall.c
52
+++ b/linux-user/syscall.c
53
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
54
#ifndef do_prctl_set_fp_mode
55
#define do_prctl_set_fp_mode do_prctl_inval1
56
#endif
57
-#ifndef do_prctl_get_vl
58
-#define do_prctl_get_vl do_prctl_inval0
59
+#ifndef do_prctl_sve_get_vl
60
+#define do_prctl_sve_get_vl do_prctl_inval0
61
#endif
62
-#ifndef do_prctl_set_vl
63
-#define do_prctl_set_vl do_prctl_inval1
64
+#ifndef do_prctl_sve_set_vl
65
+#define do_prctl_sve_set_vl do_prctl_inval1
66
#endif
67
#ifndef do_prctl_reset_keys
68
#define do_prctl_reset_keys do_prctl_inval1
69
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
70
case PR_SET_FP_MODE:
71
return do_prctl_set_fp_mode(env, arg2);
72
case PR_SVE_GET_VL:
73
- return do_prctl_get_vl(env);
74
+ return do_prctl_sve_get_vl(env);
75
case PR_SVE_SET_VL:
76
- return do_prctl_set_vl(env, arg2);
77
+ return do_prctl_sve_set_vl(env, arg2);
78
case PR_PAC_RESET_KEYS:
79
if (arg3 || arg4 || arg5) {
80
return -TARGET_EINVAL;
74
--
81
--
75
2.19.1
82
2.25.1
76
77
diff view generated by jsdifflib
1
If the HCR_EL2 PTW virtualizaiton configuration register bit
1
From: Richard Henderson <richard.henderson@linaro.org>
2
is set, then this means that a stage 2 Permission fault must
3
be generated if a stage 1 translation table access is made
4
to an address that is mapped as Device memory in stage 2.
5
Implement this.
6
2
3
These prctl set the Streaming SVE vector length, which may
4
be completely different from the Normal SVE vector length.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-43-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20181012144235.19646-8-peter.maydell@linaro.org
10
---
10
---
11
target/arm/helper.c | 21 ++++++++++++++++++++-
11
linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++
12
1 file changed, 20 insertions(+), 1 deletion(-)
12
linux-user/syscall.c | 16 +++++++++
13
2 files changed, 70 insertions(+)
13
14
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
--- a/linux-user/aarch64/target_prctl.h
17
+++ b/target/arm/helper.c
18
+++ b/linux-user/aarch64/target_prctl.h
18
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
19
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env)
19
hwaddr s2pa;
20
{
20
int s2prot;
21
ARMCPU *cpu = env_archcpu(env);
21
int ret;
22
if (cpu_isar_feature(aa64_sve, cpu)) {
22
+ ARMCacheAttrs cacheattrs = {};
23
+ /* PSTATE.SM is always unset on syscall entry. */
23
+ ARMCacheAttrs *pcacheattrs = NULL;
24
return sve_vq(env) * 16;
25
}
26
return -TARGET_EINVAL;
27
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
28
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
29
uint32_t vq, old_vq;
30
31
+ /* PSTATE.SM is always unset on syscall entry. */
32
old_vq = sve_vq(env);
33
34
/*
35
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
36
}
37
#define do_prctl_sve_set_vl do_prctl_sve_set_vl
38
39
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
40
+{
41
+ ARMCPU *cpu = env_archcpu(env);
42
+ if (cpu_isar_feature(aa64_sme, cpu)) {
43
+ return sme_vq(env) * 16;
44
+ }
45
+ return -TARGET_EINVAL;
46
+}
47
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
24
+
48
+
25
+ if (env->cp15.hcr_el2 & HCR_PTW) {
49
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
50
+{
51
+ /*
52
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
53
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
54
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
55
+ */
56
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
57
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
58
+ int vq, old_vq;
59
+
60
+ old_vq = sme_vq(env);
61
+
62
+ /*
63
+ * Bound the value of vq, so that we know that it fits into
64
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
65
+ * on syscall entry, we are not modifying the current SVE
66
+ * vector length.
67
+ */
68
+ vq = MAX(arg2 / 16, 1);
69
+ vq = MIN(vq, 16);
70
+ env->vfp.smcr_el[1] =
71
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
72
+
73
+ /* Delay rebuilding hflags until we know if ZA must change. */
74
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
75
+
76
+ if (vq != old_vq) {
26
+ /*
77
+ /*
27
+ * PTW means we must fault if this S1 walk touches S2 Device
78
+ * PSTATE.ZA state is cleared on any change to SVL.
28
+ * memory; otherwise we don't care about the attributes and can
79
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
29
+ * save the S2 translation the effort of computing them.
80
+ * cleared on syscall entry, so this hasn't changed VL.
30
+ */
81
+ */
31
+ pcacheattrs = &cacheattrs;
82
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
83
+ arm_rebuild_hflags(env);
32
+ }
84
+ }
33
85
+ return vq * 16;
34
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
86
+ }
35
- &txattrs, &s2prot, &s2size, fi, NULL);
87
+ return -TARGET_EINVAL;
36
+ &txattrs, &s2prot, &s2size, fi, pcacheattrs);
88
+}
37
if (ret) {
89
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
38
assert(fi->type != ARMFault_None);
90
+
39
fi->s2addr = addr;
91
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
40
@@ -XXX,XX +XXX,XX @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
92
{
41
fi->s1ptw = true;
93
ARMCPU *cpu = env_archcpu(env);
42
return ~0;
94
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
43
}
95
index XXXXXXX..XXXXXXX 100644
44
+ if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
96
--- a/linux-user/syscall.c
45
+ /* Access was to Device memory: generate Permission fault */
97
+++ b/linux-user/syscall.c
46
+ fi->type = ARMFault_Permission;
98
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
47
+ fi->s2addr = addr;
99
#ifndef PR_SET_SYSCALL_USER_DISPATCH
48
+ fi->stage2 = true;
100
# define PR_SET_SYSCALL_USER_DISPATCH 59
49
+ fi->s1ptw = true;
101
#endif
50
+ return ~0;
102
+#ifndef PR_SME_SET_VL
51
+ }
103
+# define PR_SME_SET_VL 63
52
addr = s2pa;
104
+# define PR_SME_GET_VL 64
53
}
105
+# define PR_SME_VL_LEN_MASK 0xffff
54
return addr;
106
+# define PR_SME_VL_INHERIT (1 << 17)
107
+#endif
108
109
#include "target_prctl.h"
110
111
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
112
#ifndef do_prctl_set_unalign
113
#define do_prctl_set_unalign do_prctl_inval1
114
#endif
115
+#ifndef do_prctl_sme_get_vl
116
+#define do_prctl_sme_get_vl do_prctl_inval0
117
+#endif
118
+#ifndef do_prctl_sme_set_vl
119
+#define do_prctl_sme_set_vl do_prctl_inval1
120
+#endif
121
122
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
123
abi_long arg3, abi_long arg4, abi_long arg5)
124
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
125
return do_prctl_sve_get_vl(env);
126
case PR_SVE_SET_VL:
127
return do_prctl_sve_set_vl(env, arg2);
128
+ case PR_SME_GET_VL:
129
+ return do_prctl_sme_get_vl(env);
130
+ case PR_SME_SET_VL:
131
+ return do_prctl_sme_set_vl(env, arg2);
132
case PR_PAC_RESET_KEYS:
133
if (arg3 || arg4 || arg5) {
134
return -TARGET_EINVAL;
55
--
135
--
56
2.19.1
136
2.25.1
57
58
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Create struct ARMISARegisters, to be accessed during translation.
3
There's no reason to set CPACR_EL1.ZEN if SVE disabled.
4
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20181016223115.24100-2-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-44-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
9
---
10
target/arm/cpu.h | 32 ++++----
10
target/arm/cpu.c | 7 +++----
11
hw/intc/armv7m_nvic.c | 12 +--
11
1 file changed, 3 insertions(+), 4 deletions(-)
12
target/arm/cpu.c | 178 +++++++++++++++++++++---------------------
13
target/arm/cpu64.c | 70 ++++++++---------
14
target/arm/helper.c | 28 +++----
15
5 files changed, 162 insertions(+), 158 deletions(-)
16
12
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
22
* ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
23
* is used for reset values of non-constant registers; no reset_
24
* prefix means a constant register.
25
+ * Some of these registers are split out into a substructure that
26
+ * is shared with the translators to control the ISA.
27
*/
28
+ struct ARMISARegisters {
29
+ uint32_t id_isar0;
30
+ uint32_t id_isar1;
31
+ uint32_t id_isar2;
32
+ uint32_t id_isar3;
33
+ uint32_t id_isar4;
34
+ uint32_t id_isar5;
35
+ uint32_t id_isar6;
36
+ uint32_t mvfr0;
37
+ uint32_t mvfr1;
38
+ uint32_t mvfr2;
39
+ uint64_t id_aa64isar0;
40
+ uint64_t id_aa64isar1;
41
+ uint64_t id_aa64pfr0;
42
+ uint64_t id_aa64pfr1;
43
+ } isar;
44
uint32_t midr;
45
uint32_t revidr;
46
uint32_t reset_fpsid;
47
- uint32_t mvfr0;
48
- uint32_t mvfr1;
49
- uint32_t mvfr2;
50
uint32_t ctr;
51
uint32_t reset_sctlr;
52
uint32_t id_pfr0;
53
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
54
uint32_t id_mmfr2;
55
uint32_t id_mmfr3;
56
uint32_t id_mmfr4;
57
- uint32_t id_isar0;
58
- uint32_t id_isar1;
59
- uint32_t id_isar2;
60
- uint32_t id_isar3;
61
- uint32_t id_isar4;
62
- uint32_t id_isar5;
63
- uint32_t id_isar6;
64
- uint64_t id_aa64pfr0;
65
- uint64_t id_aa64pfr1;
66
uint64_t id_aa64dfr0;
67
uint64_t id_aa64dfr1;
68
uint64_t id_aa64afr0;
69
uint64_t id_aa64afr1;
70
- uint64_t id_aa64isar0;
71
- uint64_t id_aa64isar1;
72
uint64_t id_aa64mmfr0;
73
uint64_t id_aa64mmfr1;
74
uint32_t dbgdidr;
75
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/hw/intc/armv7m_nvic.c
78
+++ b/hw/intc/armv7m_nvic.c
79
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
80
case 0xd5c: /* MMFR3. */
81
return cpu->id_mmfr3;
82
case 0xd60: /* ISAR0. */
83
- return cpu->id_isar0;
84
+ return cpu->isar.id_isar0;
85
case 0xd64: /* ISAR1. */
86
- return cpu->id_isar1;
87
+ return cpu->isar.id_isar1;
88
case 0xd68: /* ISAR2. */
89
- return cpu->id_isar2;
90
+ return cpu->isar.id_isar2;
91
case 0xd6c: /* ISAR3. */
92
- return cpu->id_isar3;
93
+ return cpu->isar.id_isar3;
94
case 0xd70: /* ISAR4. */
95
- return cpu->id_isar4;
96
+ return cpu->isar.id_isar4;
97
case 0xd74: /* ISAR5. */
98
- return cpu->id_isar5;
99
+ return cpu->isar.id_isar5;
100
case 0xd78: /* CLIDR */
101
return cpu->clidr;
102
case 0xd7c: /* CTR */
103
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
104
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/cpu.c
15
--- a/target/arm/cpu.c
106
+++ b/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
107
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
108
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
18
/* and to the FP/Neon instructions */
109
19
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
110
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
20
CPACR_EL1, FPEN, 3);
111
- env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
21
- /* and to the SVE instructions */
112
- env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
22
- env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
113
- env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
23
- CPACR_EL1, ZEN, 3);
114
+ env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
24
- /* with reasonable vector length */
115
+ env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
25
+ /* and to the SVE instructions, with default vector length */
116
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
26
if (cpu_isar_feature(aa64_sve, cpu)) {
117
27
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
118
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
28
+ CPACR_EL1, ZEN, 3);
119
s->halted = cpu->start_powered_off;
29
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
120
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
30
}
121
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
31
/*
122
*/
123
cpu->id_pfr1 &= ~0xf0;
124
- cpu->id_aa64pfr0 &= ~0xf000;
125
+ cpu->isar.id_aa64pfr0 &= ~0xf000;
126
}
127
128
if (!cpu->has_el2) {
129
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
130
* registers if we don't have EL2. These are id_pfr1[15:12] and
131
* id_aa64pfr0_el1[11:8].
132
*/
133
- cpu->id_aa64pfr0 &= ~0xf00;
134
+ cpu->isar.id_aa64pfr0 &= ~0xf00;
135
cpu->id_pfr1 &= ~0xf000;
136
}
137
138
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
139
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
140
cpu->midr = 0x4107b362;
141
cpu->reset_fpsid = 0x410120b4;
142
- cpu->mvfr0 = 0x11111111;
143
- cpu->mvfr1 = 0x00000000;
144
+ cpu->isar.mvfr0 = 0x11111111;
145
+ cpu->isar.mvfr1 = 0x00000000;
146
cpu->ctr = 0x1dd20d2;
147
cpu->reset_sctlr = 0x00050078;
148
cpu->id_pfr0 = 0x111;
149
@@ -XXX,XX +XXX,XX @@ static void arm1136_r2_initfn(Object *obj)
150
cpu->id_mmfr0 = 0x01130003;
151
cpu->id_mmfr1 = 0x10030302;
152
cpu->id_mmfr2 = 0x01222110;
153
- cpu->id_isar0 = 0x00140011;
154
- cpu->id_isar1 = 0x12002111;
155
- cpu->id_isar2 = 0x11231111;
156
- cpu->id_isar3 = 0x01102131;
157
- cpu->id_isar4 = 0x141;
158
+ cpu->isar.id_isar0 = 0x00140011;
159
+ cpu->isar.id_isar1 = 0x12002111;
160
+ cpu->isar.id_isar2 = 0x11231111;
161
+ cpu->isar.id_isar3 = 0x01102131;
162
+ cpu->isar.id_isar4 = 0x141;
163
cpu->reset_auxcr = 7;
164
}
165
166
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
167
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
168
cpu->midr = 0x4117b363;
169
cpu->reset_fpsid = 0x410120b4;
170
- cpu->mvfr0 = 0x11111111;
171
- cpu->mvfr1 = 0x00000000;
172
+ cpu->isar.mvfr0 = 0x11111111;
173
+ cpu->isar.mvfr1 = 0x00000000;
174
cpu->ctr = 0x1dd20d2;
175
cpu->reset_sctlr = 0x00050078;
176
cpu->id_pfr0 = 0x111;
177
@@ -XXX,XX +XXX,XX @@ static void arm1136_initfn(Object *obj)
178
cpu->id_mmfr0 = 0x01130003;
179
cpu->id_mmfr1 = 0x10030302;
180
cpu->id_mmfr2 = 0x01222110;
181
- cpu->id_isar0 = 0x00140011;
182
- cpu->id_isar1 = 0x12002111;
183
- cpu->id_isar2 = 0x11231111;
184
- cpu->id_isar3 = 0x01102131;
185
- cpu->id_isar4 = 0x141;
186
+ cpu->isar.id_isar0 = 0x00140011;
187
+ cpu->isar.id_isar1 = 0x12002111;
188
+ cpu->isar.id_isar2 = 0x11231111;
189
+ cpu->isar.id_isar3 = 0x01102131;
190
+ cpu->isar.id_isar4 = 0x141;
191
cpu->reset_auxcr = 7;
192
}
193
194
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
195
set_feature(&cpu->env, ARM_FEATURE_EL3);
196
cpu->midr = 0x410fb767;
197
cpu->reset_fpsid = 0x410120b5;
198
- cpu->mvfr0 = 0x11111111;
199
- cpu->mvfr1 = 0x00000000;
200
+ cpu->isar.mvfr0 = 0x11111111;
201
+ cpu->isar.mvfr1 = 0x00000000;
202
cpu->ctr = 0x1dd20d2;
203
cpu->reset_sctlr = 0x00050078;
204
cpu->id_pfr0 = 0x111;
205
@@ -XXX,XX +XXX,XX @@ static void arm1176_initfn(Object *obj)
206
cpu->id_mmfr0 = 0x01130003;
207
cpu->id_mmfr1 = 0x10030302;
208
cpu->id_mmfr2 = 0x01222100;
209
- cpu->id_isar0 = 0x0140011;
210
- cpu->id_isar1 = 0x12002111;
211
- cpu->id_isar2 = 0x11231121;
212
- cpu->id_isar3 = 0x01102131;
213
- cpu->id_isar4 = 0x01141;
214
+ cpu->isar.id_isar0 = 0x0140011;
215
+ cpu->isar.id_isar1 = 0x12002111;
216
+ cpu->isar.id_isar2 = 0x11231121;
217
+ cpu->isar.id_isar3 = 0x01102131;
218
+ cpu->isar.id_isar4 = 0x01141;
219
cpu->reset_auxcr = 7;
220
}
221
222
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
223
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
224
cpu->midr = 0x410fb022;
225
cpu->reset_fpsid = 0x410120b4;
226
- cpu->mvfr0 = 0x11111111;
227
- cpu->mvfr1 = 0x00000000;
228
+ cpu->isar.mvfr0 = 0x11111111;
229
+ cpu->isar.mvfr1 = 0x00000000;
230
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
231
cpu->id_pfr0 = 0x111;
232
cpu->id_pfr1 = 0x1;
233
@@ -XXX,XX +XXX,XX @@ static void arm11mpcore_initfn(Object *obj)
234
cpu->id_mmfr0 = 0x01100103;
235
cpu->id_mmfr1 = 0x10020302;
236
cpu->id_mmfr2 = 0x01222000;
237
- cpu->id_isar0 = 0x00100011;
238
- cpu->id_isar1 = 0x12002111;
239
- cpu->id_isar2 = 0x11221011;
240
- cpu->id_isar3 = 0x01102131;
241
- cpu->id_isar4 = 0x141;
242
+ cpu->isar.id_isar0 = 0x00100011;
243
+ cpu->isar.id_isar1 = 0x12002111;
244
+ cpu->isar.id_isar2 = 0x11221011;
245
+ cpu->isar.id_isar3 = 0x01102131;
246
+ cpu->isar.id_isar4 = 0x141;
247
cpu->reset_auxcr = 1;
248
}
249
250
@@ -XXX,XX +XXX,XX @@ static void cortex_m3_initfn(Object *obj)
251
cpu->id_mmfr1 = 0x00000000;
252
cpu->id_mmfr2 = 0x00000000;
253
cpu->id_mmfr3 = 0x00000000;
254
- cpu->id_isar0 = 0x01141110;
255
- cpu->id_isar1 = 0x02111000;
256
- cpu->id_isar2 = 0x21112231;
257
- cpu->id_isar3 = 0x01111110;
258
- cpu->id_isar4 = 0x01310102;
259
- cpu->id_isar5 = 0x00000000;
260
- cpu->id_isar6 = 0x00000000;
261
+ cpu->isar.id_isar0 = 0x01141110;
262
+ cpu->isar.id_isar1 = 0x02111000;
263
+ cpu->isar.id_isar2 = 0x21112231;
264
+ cpu->isar.id_isar3 = 0x01111110;
265
+ cpu->isar.id_isar4 = 0x01310102;
266
+ cpu->isar.id_isar5 = 0x00000000;
267
+ cpu->isar.id_isar6 = 0x00000000;
268
}
269
270
static void cortex_m4_initfn(Object *obj)
271
@@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj)
272
cpu->id_mmfr1 = 0x00000000;
273
cpu->id_mmfr2 = 0x00000000;
274
cpu->id_mmfr3 = 0x00000000;
275
- cpu->id_isar0 = 0x01141110;
276
- cpu->id_isar1 = 0x02111000;
277
- cpu->id_isar2 = 0x21112231;
278
- cpu->id_isar3 = 0x01111110;
279
- cpu->id_isar4 = 0x01310102;
280
- cpu->id_isar5 = 0x00000000;
281
- cpu->id_isar6 = 0x00000000;
282
+ cpu->isar.id_isar0 = 0x01141110;
283
+ cpu->isar.id_isar1 = 0x02111000;
284
+ cpu->isar.id_isar2 = 0x21112231;
285
+ cpu->isar.id_isar3 = 0x01111110;
286
+ cpu->isar.id_isar4 = 0x01310102;
287
+ cpu->isar.id_isar5 = 0x00000000;
288
+ cpu->isar.id_isar6 = 0x00000000;
289
}
290
291
static void cortex_m33_initfn(Object *obj)
292
@@ -XXX,XX +XXX,XX @@ static void cortex_m33_initfn(Object *obj)
293
cpu->id_mmfr1 = 0x00000000;
294
cpu->id_mmfr2 = 0x01000000;
295
cpu->id_mmfr3 = 0x00000000;
296
- cpu->id_isar0 = 0x01101110;
297
- cpu->id_isar1 = 0x02212000;
298
- cpu->id_isar2 = 0x20232232;
299
- cpu->id_isar3 = 0x01111131;
300
- cpu->id_isar4 = 0x01310132;
301
- cpu->id_isar5 = 0x00000000;
302
- cpu->id_isar6 = 0x00000000;
303
+ cpu->isar.id_isar0 = 0x01101110;
304
+ cpu->isar.id_isar1 = 0x02212000;
305
+ cpu->isar.id_isar2 = 0x20232232;
306
+ cpu->isar.id_isar3 = 0x01111131;
307
+ cpu->isar.id_isar4 = 0x01310132;
308
+ cpu->isar.id_isar5 = 0x00000000;
309
+ cpu->isar.id_isar6 = 0x00000000;
310
cpu->clidr = 0x00000000;
311
cpu->ctr = 0x8000c000;
312
}
313
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
314
cpu->id_mmfr1 = 0x00000000;
315
cpu->id_mmfr2 = 0x01200000;
316
cpu->id_mmfr3 = 0x0211;
317
- cpu->id_isar0 = 0x02101111;
318
- cpu->id_isar1 = 0x13112111;
319
- cpu->id_isar2 = 0x21232141;
320
- cpu->id_isar3 = 0x01112131;
321
- cpu->id_isar4 = 0x0010142;
322
- cpu->id_isar5 = 0x0;
323
- cpu->id_isar6 = 0x0;
324
+ cpu->isar.id_isar0 = 0x02101111;
325
+ cpu->isar.id_isar1 = 0x13112111;
326
+ cpu->isar.id_isar2 = 0x21232141;
327
+ cpu->isar.id_isar3 = 0x01112131;
328
+ cpu->isar.id_isar4 = 0x0010142;
329
+ cpu->isar.id_isar5 = 0x0;
330
+ cpu->isar.id_isar6 = 0x0;
331
cpu->mp_is_up = true;
332
cpu->pmsav7_dregion = 16;
333
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
334
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
335
set_feature(&cpu->env, ARM_FEATURE_EL3);
336
cpu->midr = 0x410fc080;
337
cpu->reset_fpsid = 0x410330c0;
338
- cpu->mvfr0 = 0x11110222;
339
- cpu->mvfr1 = 0x00011111;
340
+ cpu->isar.mvfr0 = 0x11110222;
341
+ cpu->isar.mvfr1 = 0x00011111;
342
cpu->ctr = 0x82048004;
343
cpu->reset_sctlr = 0x00c50078;
344
cpu->id_pfr0 = 0x1031;
345
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
346
cpu->id_mmfr1 = 0x20000000;
347
cpu->id_mmfr2 = 0x01202000;
348
cpu->id_mmfr3 = 0x11;
349
- cpu->id_isar0 = 0x00101111;
350
- cpu->id_isar1 = 0x12112111;
351
- cpu->id_isar2 = 0x21232031;
352
- cpu->id_isar3 = 0x11112131;
353
- cpu->id_isar4 = 0x00111142;
354
+ cpu->isar.id_isar0 = 0x00101111;
355
+ cpu->isar.id_isar1 = 0x12112111;
356
+ cpu->isar.id_isar2 = 0x21232031;
357
+ cpu->isar.id_isar3 = 0x11112131;
358
+ cpu->isar.id_isar4 = 0x00111142;
359
cpu->dbgdidr = 0x15141000;
360
cpu->clidr = (1 << 27) | (2 << 24) | 3;
361
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
362
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
363
set_feature(&cpu->env, ARM_FEATURE_CBAR);
364
cpu->midr = 0x410fc090;
365
cpu->reset_fpsid = 0x41033090;
366
- cpu->mvfr0 = 0x11110222;
367
- cpu->mvfr1 = 0x01111111;
368
+ cpu->isar.mvfr0 = 0x11110222;
369
+ cpu->isar.mvfr1 = 0x01111111;
370
cpu->ctr = 0x80038003;
371
cpu->reset_sctlr = 0x00c50078;
372
cpu->id_pfr0 = 0x1031;
373
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
374
cpu->id_mmfr1 = 0x20000000;
375
cpu->id_mmfr2 = 0x01230000;
376
cpu->id_mmfr3 = 0x00002111;
377
- cpu->id_isar0 = 0x00101111;
378
- cpu->id_isar1 = 0x13112111;
379
- cpu->id_isar2 = 0x21232041;
380
- cpu->id_isar3 = 0x11112131;
381
- cpu->id_isar4 = 0x00111142;
382
+ cpu->isar.id_isar0 = 0x00101111;
383
+ cpu->isar.id_isar1 = 0x13112111;
384
+ cpu->isar.id_isar2 = 0x21232041;
385
+ cpu->isar.id_isar3 = 0x11112131;
386
+ cpu->isar.id_isar4 = 0x00111142;
387
cpu->dbgdidr = 0x35141000;
388
cpu->clidr = (1 << 27) | (1 << 24) | 3;
389
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
390
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
391
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
392
cpu->midr = 0x410fc075;
393
cpu->reset_fpsid = 0x41023075;
394
- cpu->mvfr0 = 0x10110222;
395
- cpu->mvfr1 = 0x11111111;
396
+ cpu->isar.mvfr0 = 0x10110222;
397
+ cpu->isar.mvfr1 = 0x11111111;
398
cpu->ctr = 0x84448003;
399
cpu->reset_sctlr = 0x00c50078;
400
cpu->id_pfr0 = 0x00001131;
401
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
402
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
403
* table 4-41 gives 0x02101110, which includes the arm div insns.
404
*/
405
- cpu->id_isar0 = 0x02101110;
406
- cpu->id_isar1 = 0x13112111;
407
- cpu->id_isar2 = 0x21232041;
408
- cpu->id_isar3 = 0x11112131;
409
- cpu->id_isar4 = 0x10011142;
410
+ cpu->isar.id_isar0 = 0x02101110;
411
+ cpu->isar.id_isar1 = 0x13112111;
412
+ cpu->isar.id_isar2 = 0x21232041;
413
+ cpu->isar.id_isar3 = 0x11112131;
414
+ cpu->isar.id_isar4 = 0x10011142;
415
cpu->dbgdidr = 0x3515f005;
416
cpu->clidr = 0x0a200023;
417
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
418
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
419
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
420
cpu->midr = 0x412fc0f1;
421
cpu->reset_fpsid = 0x410430f0;
422
- cpu->mvfr0 = 0x10110222;
423
- cpu->mvfr1 = 0x11111111;
424
+ cpu->isar.mvfr0 = 0x10110222;
425
+ cpu->isar.mvfr1 = 0x11111111;
426
cpu->ctr = 0x8444c004;
427
cpu->reset_sctlr = 0x00c50078;
428
cpu->id_pfr0 = 0x00001131;
429
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
430
cpu->id_mmfr1 = 0x20000000;
431
cpu->id_mmfr2 = 0x01240000;
432
cpu->id_mmfr3 = 0x02102211;
433
- cpu->id_isar0 = 0x02101110;
434
- cpu->id_isar1 = 0x13112111;
435
- cpu->id_isar2 = 0x21232041;
436
- cpu->id_isar3 = 0x11112131;
437
- cpu->id_isar4 = 0x10011142;
438
+ cpu->isar.id_isar0 = 0x02101110;
439
+ cpu->isar.id_isar1 = 0x13112111;
440
+ cpu->isar.id_isar2 = 0x21232041;
441
+ cpu->isar.id_isar3 = 0x11112131;
442
+ cpu->isar.id_isar4 = 0x10011142;
443
cpu->dbgdidr = 0x3515f021;
444
cpu->clidr = 0x0a200023;
445
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
446
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
447
index XXXXXXX..XXXXXXX 100644
448
--- a/target/arm/cpu64.c
449
+++ b/target/arm/cpu64.c
450
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
451
cpu->midr = 0x411fd070;
452
cpu->revidr = 0x00000000;
453
cpu->reset_fpsid = 0x41034070;
454
- cpu->mvfr0 = 0x10110222;
455
- cpu->mvfr1 = 0x12111111;
456
- cpu->mvfr2 = 0x00000043;
457
+ cpu->isar.mvfr0 = 0x10110222;
458
+ cpu->isar.mvfr1 = 0x12111111;
459
+ cpu->isar.mvfr2 = 0x00000043;
460
cpu->ctr = 0x8444c004;
461
cpu->reset_sctlr = 0x00c50838;
462
cpu->id_pfr0 = 0x00000131;
463
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
464
cpu->id_mmfr1 = 0x40000000;
465
cpu->id_mmfr2 = 0x01260000;
466
cpu->id_mmfr3 = 0x02102211;
467
- cpu->id_isar0 = 0x02101110;
468
- cpu->id_isar1 = 0x13112111;
469
- cpu->id_isar2 = 0x21232042;
470
- cpu->id_isar3 = 0x01112131;
471
- cpu->id_isar4 = 0x00011142;
472
- cpu->id_isar5 = 0x00011121;
473
- cpu->id_isar6 = 0;
474
- cpu->id_aa64pfr0 = 0x00002222;
475
+ cpu->isar.id_isar0 = 0x02101110;
476
+ cpu->isar.id_isar1 = 0x13112111;
477
+ cpu->isar.id_isar2 = 0x21232042;
478
+ cpu->isar.id_isar3 = 0x01112131;
479
+ cpu->isar.id_isar4 = 0x00011142;
480
+ cpu->isar.id_isar5 = 0x00011121;
481
+ cpu->isar.id_isar6 = 0;
482
+ cpu->isar.id_aa64pfr0 = 0x00002222;
483
cpu->id_aa64dfr0 = 0x10305106;
484
cpu->pmceid0 = 0x00000000;
485
cpu->pmceid1 = 0x00000000;
486
- cpu->id_aa64isar0 = 0x00011120;
487
+ cpu->isar.id_aa64isar0 = 0x00011120;
488
cpu->id_aa64mmfr0 = 0x00001124;
489
cpu->dbgdidr = 0x3516d000;
490
cpu->clidr = 0x0a200023;
491
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
492
cpu->midr = 0x410fd034;
493
cpu->revidr = 0x00000000;
494
cpu->reset_fpsid = 0x41034070;
495
- cpu->mvfr0 = 0x10110222;
496
- cpu->mvfr1 = 0x12111111;
497
- cpu->mvfr2 = 0x00000043;
498
+ cpu->isar.mvfr0 = 0x10110222;
499
+ cpu->isar.mvfr1 = 0x12111111;
500
+ cpu->isar.mvfr2 = 0x00000043;
501
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
502
cpu->reset_sctlr = 0x00c50838;
503
cpu->id_pfr0 = 0x00000131;
504
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
505
cpu->id_mmfr1 = 0x40000000;
506
cpu->id_mmfr2 = 0x01260000;
507
cpu->id_mmfr3 = 0x02102211;
508
- cpu->id_isar0 = 0x02101110;
509
- cpu->id_isar1 = 0x13112111;
510
- cpu->id_isar2 = 0x21232042;
511
- cpu->id_isar3 = 0x01112131;
512
- cpu->id_isar4 = 0x00011142;
513
- cpu->id_isar5 = 0x00011121;
514
- cpu->id_isar6 = 0;
515
- cpu->id_aa64pfr0 = 0x00002222;
516
+ cpu->isar.id_isar0 = 0x02101110;
517
+ cpu->isar.id_isar1 = 0x13112111;
518
+ cpu->isar.id_isar2 = 0x21232042;
519
+ cpu->isar.id_isar3 = 0x01112131;
520
+ cpu->isar.id_isar4 = 0x00011142;
521
+ cpu->isar.id_isar5 = 0x00011121;
522
+ cpu->isar.id_isar6 = 0;
523
+ cpu->isar.id_aa64pfr0 = 0x00002222;
524
cpu->id_aa64dfr0 = 0x10305106;
525
- cpu->id_aa64isar0 = 0x00011120;
526
+ cpu->isar.id_aa64isar0 = 0x00011120;
527
cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
528
cpu->dbgdidr = 0x3516d000;
529
cpu->clidr = 0x0a200023;
530
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
531
cpu->midr = 0x410fd083;
532
cpu->revidr = 0x00000000;
533
cpu->reset_fpsid = 0x41034080;
534
- cpu->mvfr0 = 0x10110222;
535
- cpu->mvfr1 = 0x12111111;
536
- cpu->mvfr2 = 0x00000043;
537
+ cpu->isar.mvfr0 = 0x10110222;
538
+ cpu->isar.mvfr1 = 0x12111111;
539
+ cpu->isar.mvfr2 = 0x00000043;
540
cpu->ctr = 0x8444c004;
541
cpu->reset_sctlr = 0x00c50838;
542
cpu->id_pfr0 = 0x00000131;
543
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
544
cpu->id_mmfr1 = 0x40000000;
545
cpu->id_mmfr2 = 0x01260000;
546
cpu->id_mmfr3 = 0x02102211;
547
- cpu->id_isar0 = 0x02101110;
548
- cpu->id_isar1 = 0x13112111;
549
- cpu->id_isar2 = 0x21232042;
550
- cpu->id_isar3 = 0x01112131;
551
- cpu->id_isar4 = 0x00011142;
552
- cpu->id_isar5 = 0x00011121;
553
- cpu->id_aa64pfr0 = 0x00002222;
554
+ cpu->isar.id_isar0 = 0x02101110;
555
+ cpu->isar.id_isar1 = 0x13112111;
556
+ cpu->isar.id_isar2 = 0x21232042;
557
+ cpu->isar.id_isar3 = 0x01112131;
558
+ cpu->isar.id_isar4 = 0x00011142;
559
+ cpu->isar.id_isar5 = 0x00011121;
560
+ cpu->isar.id_aa64pfr0 = 0x00002222;
561
cpu->id_aa64dfr0 = 0x10305106;
562
cpu->pmceid0 = 0x00000000;
563
cpu->pmceid1 = 0x00000000;
564
- cpu->id_aa64isar0 = 0x00011120;
565
+ cpu->isar.id_aa64isar0 = 0x00011120;
566
cpu->id_aa64mmfr0 = 0x00001124;
567
cpu->dbgdidr = 0x3516d000;
568
cpu->clidr = 0x0a200023;
569
diff --git a/target/arm/helper.c b/target/arm/helper.c
570
index XXXXXXX..XXXXXXX 100644
571
--- a/target/arm/helper.c
572
+++ b/target/arm/helper.c
573
@@ -XXX,XX +XXX,XX @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
574
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
575
{
576
ARMCPU *cpu = arm_env_get_cpu(env);
577
- uint64_t pfr0 = cpu->id_aa64pfr0;
578
+ uint64_t pfr0 = cpu->isar.id_aa64pfr0;
579
580
if (env->gicv3state) {
581
pfr0 |= 1 << 24;
582
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
583
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
584
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
585
.access = PL1_R, .type = ARM_CP_CONST,
586
- .resetvalue = cpu->id_isar0 },
587
+ .resetvalue = cpu->isar.id_isar0 },
588
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
589
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
590
.access = PL1_R, .type = ARM_CP_CONST,
591
- .resetvalue = cpu->id_isar1 },
592
+ .resetvalue = cpu->isar.id_isar1 },
593
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
594
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
595
.access = PL1_R, .type = ARM_CP_CONST,
596
- .resetvalue = cpu->id_isar2 },
597
+ .resetvalue = cpu->isar.id_isar2 },
598
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
599
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
600
.access = PL1_R, .type = ARM_CP_CONST,
601
- .resetvalue = cpu->id_isar3 },
602
+ .resetvalue = cpu->isar.id_isar3 },
603
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
604
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
605
.access = PL1_R, .type = ARM_CP_CONST,
606
- .resetvalue = cpu->id_isar4 },
607
+ .resetvalue = cpu->isar.id_isar4 },
608
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
609
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
610
.access = PL1_R, .type = ARM_CP_CONST,
611
- .resetvalue = cpu->id_isar5 },
612
+ .resetvalue = cpu->isar.id_isar5 },
613
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
614
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
615
.access = PL1_R, .type = ARM_CP_CONST,
616
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
617
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
618
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
619
.access = PL1_R, .type = ARM_CP_CONST,
620
- .resetvalue = cpu->id_isar6 },
621
+ .resetvalue = cpu->isar.id_isar6 },
622
REGINFO_SENTINEL
623
};
624
define_arm_cp_regs(cpu, v6_idregs);
625
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
626
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
627
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
628
.access = PL1_R, .type = ARM_CP_CONST,
629
- .resetvalue = cpu->id_aa64pfr1},
630
+ .resetvalue = cpu->isar.id_aa64pfr1},
631
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
632
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
633
.access = PL1_R, .type = ARM_CP_CONST,
634
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
635
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
636
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
637
.access = PL1_R, .type = ARM_CP_CONST,
638
- .resetvalue = cpu->id_aa64isar0 },
639
+ .resetvalue = cpu->isar.id_aa64isar0 },
640
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
641
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
642
.access = PL1_R, .type = ARM_CP_CONST,
643
- .resetvalue = cpu->id_aa64isar1 },
644
+ .resetvalue = cpu->isar.id_aa64isar1 },
645
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
646
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
647
.access = PL1_R, .type = ARM_CP_CONST,
648
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
649
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
650
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
651
.access = PL1_R, .type = ARM_CP_CONST,
652
- .resetvalue = cpu->mvfr0 },
653
+ .resetvalue = cpu->isar.mvfr0 },
654
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
655
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
656
.access = PL1_R, .type = ARM_CP_CONST,
657
- .resetvalue = cpu->mvfr1 },
658
+ .resetvalue = cpu->isar.mvfr1 },
659
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
660
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
661
.access = PL1_R, .type = ARM_CP_CONST,
662
- .resetvalue = cpu->mvfr2 },
663
+ .resetvalue = cpu->isar.mvfr2 },
664
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
665
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
666
.access = PL1_R, .type = ARM_CP_CONST,
667
--
32
--
668
2.19.1
33
2.25.1
669
670
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Instantiating mps2-an505 (cortex-m33) will fail make check when
3
Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu.
4
V7VE asserts that ID_ISAR0.Divide includes ARM division. It is
5
also wrong to include ARM_FEATURE_LPAE.
6
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20181016223115.24100-3-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-45-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
target/arm/cpu.c | 6 +++++-
10
target/arm/cpu.c | 11 +++++++++++
13
1 file changed, 5 insertions(+), 1 deletion(-)
11
1 file changed, 11 insertions(+)
14
12
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
15
--- a/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
20
18
CPACR_EL1, ZEN, 3);
21
/* Some features automatically imply others: */
19
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
22
if (arm_feature(env, ARM_FEATURE_V8)) {
20
}
23
- set_feature(env, ARM_FEATURE_V7VE);
21
+ /* and for SME instructions, with default vector length, and TPIDR2 */
24
+ if (arm_feature(env, ARM_FEATURE_M)) {
22
+ if (cpu_isar_feature(aa64_sme, cpu)) {
25
+ set_feature(env, ARM_FEATURE_V7);
23
+ env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
26
+ } else {
24
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
27
+ set_feature(env, ARM_FEATURE_V7VE);
25
+ CPACR_EL1, SMEN, 3);
26
+ env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
27
+ if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
28
+ env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
29
+ SMCR, FA64, 1);
30
+ }
28
+ }
31
+ }
29
}
32
/*
30
if (arm_feature(env, ARM_FEATURE_V7VE)) {
33
* Enable 48-bit address space (TODO: take reserved_va into account).
31
/* v7 Virtualization Extensions. In real hardware this implies
34
* Enable TBI0 but not TBI1.
32
--
35
--
33
2.19.1
36
2.25.1
34
35
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Both arm and thumb2 division are controlled by the same ISAR field,
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
which takes care of the arm implies thumb case. Having M imply
5
thumb2 division was wrong for cortex-m0, which is v6m and does not
6
have thumb2 at all, much less thumb2 division.
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20181016223115.24100-5-richard.henderson@linaro.org
5
Message-id: 20220708151540.18136-46-richard.henderson@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
7
---
14
target/arm/cpu.h | 12 ++++++++++--
8
linux-user/elfload.c | 20 ++++++++++++++++++++
15
linux-user/elfload.c | 4 ++--
9
1 file changed, 20 insertions(+)
16
target/arm/cpu.c | 10 +---------
17
target/arm/translate.c | 4 ++--
18
4 files changed, 15 insertions(+), 15 deletions(-)
19
10
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ enum arm_features {
25
ARM_FEATURE_VFP3,
26
ARM_FEATURE_VFP_FP16,
27
ARM_FEATURE_NEON,
28
- ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
29
ARM_FEATURE_M, /* Microcontroller profile. */
30
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
31
ARM_FEATURE_THUMB2EE,
32
@@ -XXX,XX +XXX,XX @@ enum arm_features {
33
ARM_FEATURE_V5,
34
ARM_FEATURE_STRONGARM,
35
ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
36
- ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
37
ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
38
ARM_FEATURE_GENERIC_TIMER,
39
ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
40
@@ -XXX,XX +XXX,XX @@ extern const uint64_t pred_esz_masks[4];
41
/*
42
* 32-bit feature tests via id registers.
43
*/
44
+static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
45
+{
46
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
47
+}
48
+
49
+static inline bool isar_feature_arm_div(const ARMISARegisters *id)
50
+{
51
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
52
+}
53
+
54
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
55
{
56
return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
57
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
58
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
59
--- a/linux-user/elfload.c
13
--- a/linux-user/elfload.c
60
+++ b/linux-user/elfload.c
14
+++ b/linux-user/elfload.c
61
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
15
@@ -XXX,XX +XXX,XX @@ enum {
62
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
16
ARM_HWCAP2_A64_RNG = 1 << 16,
63
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
17
ARM_HWCAP2_A64_BTI = 1 << 17,
64
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
18
ARM_HWCAP2_A64_MTE = 1 << 18,
65
- GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
19
+ ARM_HWCAP2_A64_ECV = 1 << 19,
66
- GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
20
+ ARM_HWCAP2_A64_AFP = 1 << 20,
67
+ GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
21
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
68
+ GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
22
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
69
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
23
+ ARM_HWCAP2_A64_SME = 1 << 23,
70
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
24
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
71
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
25
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
72
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
26
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
73
index XXXXXXX..XXXXXXX 100644
27
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
74
--- a/target/arm/cpu.c
28
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
75
+++ b/target/arm/cpu.c
29
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
76
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
30
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
77
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
31
};
78
* Security Extensions is ARM_FEATURE_EL3.
32
79
*/
33
#define ELF_HWCAP get_elf_hwcap()
80
- set_feature(env, ARM_FEATURE_ARM_DIV);
34
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
81
+ assert(cpu_isar_feature(arm_div, cpu));
35
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
82
set_feature(env, ARM_FEATURE_LPAE);
36
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
83
set_feature(env, ARM_FEATURE_V7);
37
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
84
}
38
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
85
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
39
+ ARM_HWCAP2_A64_SME_F32F32 |
86
if (arm_feature(env, ARM_FEATURE_V5)) {
40
+ ARM_HWCAP2_A64_SME_B16F32 |
87
set_feature(env, ARM_FEATURE_V4T);
41
+ ARM_HWCAP2_A64_SME_F16F32 |
88
}
42
+ ARM_HWCAP2_A64_SME_I8I32));
89
- if (arm_feature(env, ARM_FEATURE_M)) {
43
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
90
- set_feature(env, ARM_FEATURE_THUMB_DIV);
44
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
91
- }
45
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
92
- if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
46
93
- set_feature(env, ARM_FEATURE_THUMB_DIV);
47
return hwcaps;
94
- }
48
}
95
if (arm_feature(env, ARM_FEATURE_VFP4)) {
96
set_feature(env, ARM_FEATURE_VFP3);
97
set_feature(env, ARM_FEATURE_VFP_FP16);
98
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
99
ARMCPU *cpu = ARM_CPU(obj);
100
101
set_feature(&cpu->env, ARM_FEATURE_V7);
102
- set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
103
- set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
104
set_feature(&cpu->env, ARM_FEATURE_V7MP);
105
set_feature(&cpu->env, ARM_FEATURE_PMSA);
106
cpu->midr = 0x411fc153; /* r1p3 */
107
diff --git a/target/arm/translate.c b/target/arm/translate.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/arm/translate.c
110
+++ b/target/arm/translate.c
111
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
112
case 1:
113
case 3:
114
/* SDIV, UDIV */
115
- if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
116
+ if (!dc_isar_feature(arm_div, s)) {
117
goto illegal_op;
118
}
119
if (((insn >> 5) & 7) || (rd != 15)) {
120
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
121
tmp2 = load_reg(s, rm);
122
if ((op & 0x50) == 0x10) {
123
/* sdiv, udiv */
124
- if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
125
+ if (!dc_isar_feature(thumb_div, s)) {
126
goto illegal_op;
127
}
128
if (op & 0x20)
129
--
49
--
130
2.19.1
50
2.25.1
131
132
diff view generated by jsdifflib