1
target-arm queue for softfreeze:
1
I don't have anything else queued up at the moment, so this is just
2
This has all the big stuff I want to get in for softfreeze;
2
Richard's SME patches.
3
there may be one or two smaller patches I pick up later in
4
the week.
5
3
6
thanks
7
-- PMM
4
-- PMM
8
5
9
The following changes since commit 0984a157c1c053394adbf64ed7de97f1aebe6a2d:
6
The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3:
10
7
11
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2019-03-05 09:33:20 +0000)
8
Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530)
12
9
13
are available in the Git repository at:
10
are available in the Git repository at:
14
11
15
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20190305
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711
16
13
17
for you to fetch changes up to 566528f823d1a2e9eb2d7b2ed839547cb31bfc34:
14
for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8:
18
15
19
hw/arm/stellaris: Implement watchdog timer (2019-03-05 15:55:09 +0000)
16
linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100)
20
17
21
----------------------------------------------------------------
18
----------------------------------------------------------------
22
target-arm queue:
19
target-arm:
23
* Fix PC test for LDM (exception return)
20
* Implement SME emulation, for both system and linux-user
24
* Implement ARMv8.0-SB
25
* Implement ARMv8.0-PredInv
26
* Implement ARMv8.4-CondM
27
* Implement ARMv8.5-CondM
28
* Implement ARMv8.5-FRINT
29
* hw/arm/stellaris: Implement watchdog timer
30
* virt: support more than 255GB of RAM
31
21
32
----------------------------------------------------------------
22
----------------------------------------------------------------
33
Eric Auger (9):
23
Richard Henderson (45):
34
hw/arm/virt: Rename highmem IO regions
24
target/arm: Handle SME in aarch64_cpu_dump_state
35
hw/arm/virt: Split the memory map description
25
target/arm: Add infrastructure for disas_sme
36
hw/boards: Add a MachineState parameter to kvm_type callback
26
target/arm: Trap non-streaming usage when Streaming SVE is active
37
kvm: add kvm_arm_get_max_vm_ipa_size
27
target/arm: Mark ADR as non-streaming
38
vl: Set machine ram_size, maxram_size and ram_slots earlier
28
target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming
39
hw/arm/virt: Dynamic memory map depending on RAM requirements
29
target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming
40
hw/arm/virt: Implement kvm_type function for 4.0 machine
30
target/arm: Mark PMULL, FMMLA as non-streaming
41
hw/arm/virt: Check the VCPU PA range in TCG mode
31
target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming
42
hw/arm/virt: Bump the 255GB initial RAM limit
32
target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming
33
target/arm: Mark string/histo/crypto as non-streaming
34
target/arm: Mark gather/scatter load/store as non-streaming
35
target/arm: Mark gather prefetch as non-streaming
36
target/arm: Mark LDFF1 and LDNF1 as non-streaming
37
target/arm: Mark LD1RO as non-streaming
38
target/arm: Add SME enablement checks
39
target/arm: Handle SME in sve_access_check
40
target/arm: Implement SME RDSVL, ADDSVL, ADDSPL
41
target/arm: Implement SME ZERO
42
target/arm: Implement SME MOVA
43
target/arm: Implement SME LD1, ST1
44
target/arm: Export unpredicated ld/st from translate-sve.c
45
target/arm: Implement SME LDR, STR
46
target/arm: Implement SME ADDHA, ADDVA
47
target/arm: Implement FMOPA, FMOPS (non-widening)
48
target/arm: Implement BFMOPA, BFMOPS
49
target/arm: Implement FMOPA, FMOPS (widening)
50
target/arm: Implement SME integer outer product
51
target/arm: Implement PSEL
52
target/arm: Implement REVD
53
target/arm: Implement SCLAMP, UCLAMP
54
target/arm: Reset streaming sve state on exception boundaries
55
target/arm: Enable SME for -cpu max
56
linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS
57
linux-user/aarch64: Reset PSTATE.SM on syscalls
58
linux-user/aarch64: Add SM bit to SVE signal context
59
linux-user/aarch64: Tidy target_restore_sigframe error return
60
linux-user/aarch64: Do not allow duplicate or short sve records
61
linux-user/aarch64: Verify extra record lock succeeded
62
linux-user/aarch64: Move sve record checks into restore
63
linux-user/aarch64: Implement SME signal handling
64
linux-user: Rename sve prctls
65
linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL
66
target/arm: Only set ZEN in reset if SVE present
67
target/arm: Enable SME for user-only
68
linux-user/aarch64: Add SME related hwcap entries
43
69
44
Michel Heily (1):
70
docs/system/arm/emulation.rst | 4 +
45
hw/arm/stellaris: Implement watchdog timer
71
linux-user/aarch64/target_cpu.h | 5 +-
46
72
linux-user/aarch64/target_prctl.h | 62 +-
47
Richard Henderson (11):
73
target/arm/cpu.h | 7 +
48
target/arm: Fix PC test for LDM (exception return)
74
target/arm/helper-sme.h | 126 ++++
49
target/arm: Split out arm_sctlr
75
target/arm/helper-sve.h | 4 +
50
target/arm: Implement ARMv8.0-SB
76
target/arm/helper.h | 18 +
51
target/arm: Implement ARMv8.0-PredInv
77
target/arm/translate-a64.h | 45 ++
52
target/arm: Split helper_msr_i_pstate into 3
78
target/arm/translate.h | 16 +
53
target/arm: Add set/clear_pstate_bits, share gen_ss_advance
79
target/arm/sme-fa64.decode | 60 ++
54
target/arm: Rearrange disas_data_proc_reg
80
target/arm/sme.decode | 88 +++
55
target/arm: Implement ARMv8.4-CondM
81
target/arm/sve.decode | 41 +-
56
target/arm: Implement ARMv8.5-CondM
82
linux-user/aarch64/cpu_loop.c | 9 +
57
target/arm: Restructure handle_fp_1src_{single, double}
83
linux-user/aarch64/signal.c | 243 ++++++--
58
target/arm: Implement ARMv8.5-FRINT
84
linux-user/elfload.c | 20 +
59
85
linux-user/syscall.c | 28 +-
60
Shameer Kolothum (1):
86
target/arm/cpu.c | 35 +-
61
hw/arm/boot: introduce fdt_add_memory_node helper
87
target/arm/cpu64.c | 11 +
62
88
target/arm/helper.c | 56 +-
63
include/hw/arm/virt.h | 16 +-
89
target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++
64
include/hw/boards.h | 5 +-
90
target/arm/sve_helper.c | 28 +
65
include/hw/watchdog/cmsdk-apb-watchdog.h | 8 +
91
target/arm/translate-a64.c | 103 +++-
66
target/arm/cpu.h | 64 ++++-
92
target/arm/translate-sme.c | 373 ++++++++++++
67
target/arm/helper-a64.h | 3 +
93
target/arm/translate-sve.c | 393 ++++++++++---
68
target/arm/helper.h | 8 +-
94
target/arm/translate-vfp.c | 12 +
69
target/arm/internals.h | 15 +
95
target/arm/translate.c | 2 +
70
target/arm/kvm_arm.h | 13 +
96
target/arm/vec_helper.c | 24 +
71
target/arm/translate.h | 34 +++
97
target/arm/meson.build | 3 +
72
accel/kvm/kvm-all.c | 2 +-
98
28 files changed, 2821 insertions(+), 135 deletions(-)
73
hw/arm/boot.c | 54 ++--
99
create mode 100644 target/arm/sme-fa64.decode
74
hw/arm/stellaris.c | 22 +-
100
create mode 100644 target/arm/sme.decode
75
hw/arm/virt-acpi-build.c | 10 +-
101
create mode 100644 target/arm/translate-sme.c
76
hw/arm/virt.c | 196 ++++++++++---
77
hw/ppc/mac_newworld.c | 3 +-
78
hw/ppc/mac_oldworld.c | 2 +-
79
hw/ppc/spapr.c | 2 +-
80
hw/watchdog/cmsdk-apb-watchdog.c | 74 ++++-
81
linux-user/elfload.c | 2 +
82
target/arm/cpu.c | 2 +
83
target/arm/cpu64.c | 6 +
84
target/arm/helper-a64.c | 30 ++
85
target/arm/helper.c | 63 +++-
86
target/arm/kvm.c | 10 +
87
target/arm/op_helper.c | 47 ---
88
target/arm/translate-a64.c | 478 +++++++++++++++++++++++--------
89
target/arm/translate.c | 35 ++-
90
target/arm/vfp_helper.c | 96 +++++++
91
vl.c | 6 +-
92
29 files changed, 1032 insertions(+), 274 deletions(-)
93
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Dump SVCR, plus use the correct access check for Streaming Mode.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-2-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/cpu.c | 17 ++++++++++++++++-
11
1 file changed, 16 insertions(+), 1 deletion(-)
12
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
18
int i;
19
int el = arm_current_el(env);
20
const char *ns_status;
21
+ bool sve;
22
23
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
24
for (i = 0; i < 32; i++) {
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
26
el,
27
psr & PSTATE_SP ? 'h' : 't');
28
29
+ if (cpu_isar_feature(aa64_sme, cpu)) {
30
+ qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
31
+ env->svcr,
32
+ (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
33
+ (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
34
+ }
35
if (cpu_isar_feature(aa64_bti, cpu)) {
36
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
37
}
38
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
39
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
40
vfp_get_fpcr(env), vfp_get_fpsr(env));
41
42
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
43
+ if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
44
+ sve = sme_exception_el(env, el) == 0;
45
+ } else if (cpu_isar_feature(aa64_sve, cpu)) {
46
+ sve = sve_exception_el(env, el) == 0;
47
+ } else {
48
+ sve = false;
49
+ }
50
+
51
+ if (sve) {
52
int j, zcr_len = sve_vqm1_for_el(env, el);
53
54
for (i = 0; i <= FFR_PRED_NUM; i++) {
55
--
56
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
3
This includes the build rules for the decoder, and the
4
new file for translation, but excludes any instructions.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190301200501.16533-8-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-3-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
[PMM: fixed up block comment style]
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/arm/cpu.h | 5 ++
11
target/arm/translate-a64.h | 1 +
11
linux-user/elfload.c | 1 +
12
target/arm/sme.decode | 20 ++++++++++++++++++++
12
target/arm/cpu64.c | 1 +
13
target/arm/translate-a64.c | 7 ++++++-
13
target/arm/translate-a64.c | 99 +++++++++++++++++++++++++++++++++++++-
14
target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++
14
4 files changed, 105 insertions(+), 1 deletion(-)
15
target/arm/meson.build | 2 ++
16
5 files changed, 64 insertions(+), 1 deletion(-)
17
create mode 100644 target/arm/sme.decode
18
create mode 100644 target/arm/translate-sme.c
15
19
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
17
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
22
--- a/target/arm/translate-a64.h
19
+++ b/target/arm/cpu.h
23
+++ b/target/arm/translate-a64.h
20
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
24
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
21
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
22
}
25
}
23
26
24
+static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
27
bool disas_sve(DisasContext *, uint32_t);
25
+{
28
+bool disas_sme(DisasContext *, uint32_t);
26
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
29
27
+}
30
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
31
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
32
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
33
new file mode 100644
34
index XXXXXXX..XXXXXXX
35
--- /dev/null
36
+++ b/target/arm/sme.decode
37
@@ -XXX,XX +XXX,XX @@
38
+# AArch64 SME instruction descriptions
39
+#
40
+# Copyright (c) 2022 Linaro, Ltd
41
+#
42
+# This library is free software; you can redistribute it and/or
43
+# modify it under the terms of the GNU Lesser General Public
44
+# License as published by the Free Software Foundation; either
45
+# version 2.1 of the License, or (at your option) any later version.
46
+#
47
+# This library is distributed in the hope that it will be useful,
48
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
49
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
50
+# Lesser General Public License for more details.
51
+#
52
+# You should have received a copy of the GNU Lesser General Public
53
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
28
+
54
+
29
static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
55
+#
30
{
56
+# This file is processed by scripts/decodetree.py
31
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
57
+#
32
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/linux-user/elfload.c
35
+++ b/linux-user/elfload.c
36
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
37
GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
38
GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
39
GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
40
+ GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
41
42
#undef GET_FEATURE_ID
43
44
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu64.c
47
+++ b/target/arm/cpu64.c
48
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
49
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
50
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
51
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
52
+ t = FIELD_DP64(t, ID_AA64ISAR0, TS, 1);
53
cpu->isar.id_aa64isar0 = t;
54
55
t = cpu->isar.id_aa64isar1;
56
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
57
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate-a64.c
60
--- a/target/arm/translate-a64.c
59
+++ b/target/arm/translate-a64.c
61
+++ b/target/arm/translate-a64.c
60
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
62
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
61
s->base.is_jmp = DISAS_TOO_MANY;
63
}
62
64
63
switch (op) {
65
switch (extract32(insn, 25, 4)) {
64
+ case 0x00: /* CFINV */
66
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
65
+ if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
67
+ case 0x0:
66
+ goto do_unallocated;
68
+ if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
69
+ unallocated_encoding(s);
67
+ }
70
+ }
68
+ tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
69
+ s->base.is_jmp = DISAS_NEXT;
70
+ break;
71
+ break;
72
+ case 0x1: case 0x3: /* UNALLOCATED */
73
unallocated_encoding(s);
74
break;
75
case 0x2:
76
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/target/arm/translate-sme.c
81
@@ -XXX,XX +XXX,XX @@
82
+/*
83
+ * AArch64 SME translation
84
+ *
85
+ * Copyright (c) 2022 Linaro, Ltd
86
+ *
87
+ * This library is free software; you can redistribute it and/or
88
+ * modify it under the terms of the GNU Lesser General Public
89
+ * License as published by the Free Software Foundation; either
90
+ * version 2.1 of the License, or (at your option) any later version.
91
+ *
92
+ * This library is distributed in the hope that it will be useful,
93
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
94
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
95
+ * Lesser General Public License for more details.
96
+ *
97
+ * You should have received a copy of the GNU Lesser General Public
98
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
99
+ */
71
+
100
+
72
case 0x05: /* SPSel */
101
+#include "qemu/osdep.h"
73
if (s->current_el == 0) {
102
+#include "cpu.h"
74
goto do_unallocated;
103
+#include "tcg/tcg-op.h"
75
@@ -XXX,XX +XXX,XX @@ static void gen_get_nzcv(TCGv_i64 tcg_rt)
104
+#include "tcg/tcg-op-gvec.h"
76
}
105
+#include "tcg/tcg-gvec-desc.h"
77
106
+#include "translate.h"
78
static void gen_set_nzcv(TCGv_i64 tcg_rt)
107
+#include "exec/helper-gen.h"
79
-
108
+#include "translate-a64.h"
80
{
109
+#include "fpu/softfloat.h"
81
TCGv_i32 nzcv = tcg_temp_new_i32();
82
83
@@ -XXX,XX +XXX,XX @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
84
}
85
}
86
87
+/*
88
+ * Rotate right into flags
89
+ * 31 30 29 21 15 10 5 4 0
90
+ * +--+--+--+-----------------+--------+-----------+------+--+------+
91
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
92
+ * +--+--+--+-----------------+--------+-----------+------+--+------+
93
+ */
94
+static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
95
+{
96
+ int mask = extract32(insn, 0, 4);
97
+ int o2 = extract32(insn, 4, 1);
98
+ int rn = extract32(insn, 5, 5);
99
+ int imm6 = extract32(insn, 15, 6);
100
+ int sf_op_s = extract32(insn, 29, 3);
101
+ TCGv_i64 tcg_rn;
102
+ TCGv_i32 nzcv;
103
+
110
+
104
+ if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
105
+ unallocated_encoding(s);
106
+ return;
107
+ }
108
+
109
+ tcg_rn = read_cpu_reg(s, rn, 1);
110
+ tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
111
+
112
+ nzcv = tcg_temp_new_i32();
113
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
114
+
115
+ if (mask & 8) { /* N */
116
+ tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
117
+ }
118
+ if (mask & 4) { /* Z */
119
+ tcg_gen_not_i32(cpu_ZF, nzcv);
120
+ tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
121
+ }
122
+ if (mask & 2) { /* C */
123
+ tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
124
+ }
125
+ if (mask & 1) { /* V */
126
+ tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
127
+ }
128
+
129
+ tcg_temp_free_i32(nzcv);
130
+}
131
+
111
+
132
+/*
112
+/*
133
+ * Evaluate into flags
113
+ * Include the generated decoder.
134
+ * 31 30 29 21 15 14 10 5 4 0
135
+ * +--+--+--+-----------------+---------+----+---------+------+--+------+
136
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
137
+ * +--+--+--+-----------------+---------+----+---------+------+--+------+
138
+ */
114
+ */
139
+static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
140
+{
141
+ int o3_mask = extract32(insn, 0, 5);
142
+ int rn = extract32(insn, 5, 5);
143
+ int o2 = extract32(insn, 15, 6);
144
+ int sz = extract32(insn, 14, 1);
145
+ int sf_op_s = extract32(insn, 29, 3);
146
+ TCGv_i32 tmp;
147
+ int shift;
148
+
115
+
149
+ if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
116
+#include "decode-sme.c.inc"
150
+ !dc_isar_feature(aa64_condm_4, s)) {
117
diff --git a/target/arm/meson.build b/target/arm/meson.build
151
+ unallocated_encoding(s);
118
index XXXXXXX..XXXXXXX 100644
152
+ return;
119
--- a/target/arm/meson.build
153
+ }
120
+++ b/target/arm/meson.build
154
+ shift = sz ? 16 : 24; /* SETF16 or SETF8 */
121
@@ -XXX,XX +XXX,XX @@
155
+
122
gen = [
156
+ tmp = tcg_temp_new_i32();
123
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
157
+ tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
124
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
158
+ tcg_gen_shli_i32(cpu_NF, tmp, shift);
125
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
159
+ tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
126
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
160
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
127
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
161
+ tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
128
@@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
162
+ tcg_temp_free_i32(tmp);
129
'sme_helper.c',
163
+}
130
'translate-a64.c',
164
+
131
'translate-sve.c',
165
/* Conditional compare (immediate / register)
132
+ 'translate-sme.c',
166
* 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
133
))
167
* +--+--+--+------------------------+--------+------+----+--+------+--+-----+
134
168
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
135
arm_softmmu_ss = ss.source_set()
169
disas_adc_sbc(s, insn);
170
break;
171
172
+ case 0x01: /* Rotate right into flags */
173
+ case 0x21:
174
+ disas_rotate_right_into_flags(s, insn);
175
+ break;
176
+
177
+ case 0x02: /* Evaluate into flags */
178
+ case 0x12:
179
+ case 0x22:
180
+ case 0x32:
181
+ disas_evaluate_into_flags(s, insn);
182
+ break;
183
+
184
default:
185
goto do_unallocated;
186
}
187
--
136
--
188
2.20.1
137
2.25.1
189
190
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This new behaviour is in the ARM pseudocode function
4
AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32
5
via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which
6
the trap would be delivered is in AArch64 mode.
7
8
Given that ARMv9 drops support for AArch32 outside EL0, the trap EL
9
detection ought to be trivially true, but the pseudocode still contains
10
a number of conditions, and QEMU has not yet committed to dropping A32
11
support for EL[12] when v9 features are present.
12
13
Since the computation of SME_TRAP_NONSTREAMING is necessarily different
14
for the two modes, we might as well preserve bits within TBFLAG_ANY and
15
allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead.
16
17
Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table
18
of instructions illegal in streaming mode.
19
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20190301200501.16533-3-richard.henderson@linaro.org
22
Message-id: 20220708151540.18136-4-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
24
---
8
target/arm/cpu.h | 10 ++++++++++
25
target/arm/cpu.h | 7 +++
9
linux-user/elfload.c | 1 +
26
target/arm/translate.h | 4 ++
10
target/arm/cpu.c | 1 +
27
target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++
11
target/arm/cpu64.c | 2 ++
28
target/arm/helper.c | 41 +++++++++++++++++
12
target/arm/translate-a64.c | 14 ++++++++++++++
29
target/arm/translate-a64.c | 40 ++++++++++++++++-
13
target/arm/translate.c | 22 ++++++++++++++++++++++
30
target/arm/translate-vfp.c | 12 +++++
14
6 files changed, 50 insertions(+)
31
target/arm/translate.c | 2 +
32
target/arm/meson.build | 1 +
33
8 files changed, 195 insertions(+), 2 deletions(-)
34
create mode 100644 target/arm/sme-fa64.decode
15
35
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
36
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
38
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
39
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
40
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
21
return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
41
* the same thing as the current security state of the processor!
42
*/
43
FIELD(TBFLAG_A32, NS, 10, 1)
44
+/*
45
+ * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
46
+ * This requires an SME trap from AArch32 mode when using NEON.
47
+ */
48
+FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
49
50
/*
51
* Bit usage when in AArch32 state, for M-profile only.
52
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
53
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
54
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
55
FIELD(TBFLAG_A64, SVL, 24, 4)
56
+/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
57
+FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
58
59
/*
60
* Helpers for using the above.
61
diff --git a/target/arm/translate.h b/target/arm/translate.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/translate.h
64
+++ b/target/arm/translate.h
65
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
66
bool pstate_sm;
67
/* True if PSTATE.ZA is set. */
68
bool pstate_za;
69
+ /* True if non-streaming insns should raise an SME Streaming exception. */
70
+ bool sme_trap_nonstreaming;
71
+ /* True if the current instruction is non-streaming. */
72
+ bool is_nonstreaming;
73
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
74
bool mve_no_pred;
75
/*
76
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/target/arm/sme-fa64.decode
81
@@ -XXX,XX +XXX,XX @@
82
+# AArch64 SME allowed instruction decoding
83
+#
84
+# Copyright (c) 2022 Linaro, Ltd
85
+#
86
+# This library is free software; you can redistribute it and/or
87
+# modify it under the terms of the GNU Lesser General Public
88
+# License as published by the Free Software Foundation; either
89
+# version 2.1 of the License, or (at your option) any later version.
90
+#
91
+# This library is distributed in the hope that it will be useful,
92
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
93
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
94
+# Lesser General Public License for more details.
95
+#
96
+# You should have received a copy of the GNU Lesser General Public
97
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
98
+
99
+#
100
+# This file is processed by scripts/decodetree.py
101
+#
102
+
103
+# These patterns are taken from Appendix E1.1 of DDI0616 A.a,
104
+# Arm Architecture Reference Manual Supplement,
105
+# The Scalable Matrix Extension (SME), for Armv9-A
106
+
107
+{
108
+ [
109
+ OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0]
110
+ OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0]
111
+ OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0]
112
+ OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0]
113
+ OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0]
114
+ OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0]
115
+ OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0]
116
+ ]
117
+ FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations
118
+}
119
+
120
+{
121
+ [
122
+ OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar)
123
+ OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16)
124
+ OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar)
125
+ OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16)
126
+ ]
127
+ FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations
128
+}
129
+
130
+FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store
131
+FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions
132
+FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
133
+
134
+# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions
135
+# We don't actually need to include these, as the default is OK.
136
+# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations
137
+# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers
138
+# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal)
139
+# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
140
+# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
141
+# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
142
+
143
+FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
144
+FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
145
+FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
146
+FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
147
+FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
148
+FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
149
+FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
150
+FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
151
+FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
152
+FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
153
+FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
154
+FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
155
+FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
156
+FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
157
+FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
158
+FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
159
+FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
160
+FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
161
+FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
162
+FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
163
+FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
164
+FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
165
+FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
166
+FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
167
+FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
168
+FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
169
+FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
170
+FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
171
+FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
172
diff --git a/target/arm/helper.c b/target/arm/helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/helper.c
175
+++ b/target/arm/helper.c
176
@@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el)
177
return 0;
22
}
178
}
23
179
24
+static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
180
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
25
+{
181
+static bool sme_fa64(CPUARMState *env, int el)
26
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
182
+{
27
+}
183
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
28
+
184
+ return false;
29
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
185
+ }
30
{
186
+
31
/*
187
+ if (el <= 1 && !el_is_in_host(env, el)) {
32
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
188
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
33
FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
189
+ return false;
190
+ }
191
+ }
192
+ if (el <= 2 && arm_is_el2_enabled(env)) {
193
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
194
+ return false;
195
+ }
196
+ }
197
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
198
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
199
+ return false;
200
+ }
201
+ }
202
+
203
+ return true;
204
+}
205
+
206
/*
207
* Given that SVE is enabled, return the vector length for EL.
208
*/
209
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
210
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
211
}
212
213
+ /*
214
+ * The SME exception we are testing for is raised via
215
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
216
+ * AArch32.CheckAdvSIMDOrFPEnabled().
217
+ */
218
+ if (el == 0
219
+ && FIELD_EX64(env->svcr, SVCR, SM)
220
+ && (!arm_is_el2_enabled(env)
221
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
222
+ && arm_el_is_aa64(env, 1)
223
+ && !sme_fa64(env, el)) {
224
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
225
+ }
226
+
227
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
34
}
228
}
35
229
36
+static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
230
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
37
+{
231
}
38
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
232
if (FIELD_EX64(env->svcr, SVCR, SM)) {
39
+}
233
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
40
+
234
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
41
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
235
}
42
{
236
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
43
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
237
}
44
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/linux-user/elfload.c
47
+++ b/linux-user/elfload.c
48
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
49
GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
50
GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
51
GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
52
+ GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
53
54
#undef GET_FEATURE_ID
55
56
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/cpu.c
59
+++ b/target/arm/cpu.c
60
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
61
t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
62
t = FIELD_DP32(t, ID_ISAR6, DP, 1);
63
t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
64
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
65
cpu->isar.id_isar6 = t;
66
67
t = cpu->id_mmfr4;
68
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/cpu64.c
71
+++ b/target/arm/cpu64.c
72
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
73
t = FIELD_DP64(t, ID_AA64ISAR1, API, 0);
74
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, 1);
75
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
76
+ t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
77
cpu->isar.id_aa64isar1 = t;
78
79
t = cpu->isar.id_aa64pfr0;
80
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
81
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
82
u = FIELD_DP32(u, ID_ISAR6, DP, 1);
83
u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
84
+ u = FIELD_DP32(u, ID_ISAR6, SB, 1);
85
cpu->isar.id_isar6 = u;
86
87
/*
88
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
238
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
89
index XXXXXXX..XXXXXXX 100644
239
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/translate-a64.c
240
--- a/target/arm/translate-a64.c
91
+++ b/target/arm/translate-a64.c
241
+++ b/target/arm/translate-a64.c
92
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
242
@@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
93
reset_btype(s);
243
* unallocated-encoding checks (otherwise the syndrome information
94
gen_goto_tb(s, 0, s->pc);
244
* for the resulting exception will be incorrect).
245
*/
246
-static bool fp_access_check(DisasContext *s)
247
+static bool fp_access_check_only(DisasContext *s)
248
{
249
if (s->fp_excp_el) {
250
assert(!s->fp_access_checked);
251
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
252
return true;
253
}
254
255
+static bool fp_access_check(DisasContext *s)
256
+{
257
+ if (!fp_access_check_only(s)) {
258
+ return false;
259
+ }
260
+ if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
261
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
262
+ syn_smetrap(SME_ET_Streaming, false));
263
+ return false;
264
+ }
265
+ return true;
266
+}
267
+
268
/* Check that SVE access is enabled. If it is, return true.
269
* If not, emit code to generate an appropriate exception and return false.
270
*/
271
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
272
default:
273
g_assert_not_reached();
274
}
275
- if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
276
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
95
return;
277
return;
96
+
278
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
97
+ case 7: /* SB */
279
return;
98
+ if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
280
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
99
+ goto do_unallocated;
281
}
100
+ }
282
}
101
+ /*
283
102
+ * TODO: There is no speculation barrier opcode for TCG;
284
+/*
103
+ * MB and end the TB instead.
285
+ * Include the generated SME FA64 decoder.
104
+ */
286
+ */
105
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
287
+
106
+ gen_goto_tb(s, 0, s->pc);
288
+#include "decode-sme-fa64.c.inc"
107
+ return;
289
+
108
+
290
+static bool trans_OK(DisasContext *s, arg_OK *a)
109
default:
291
+{
110
+ do_unallocated:
292
+ return true;
293
+}
294
+
295
+static bool trans_FAIL(DisasContext *s, arg_OK *a)
296
+{
297
+ s->is_nonstreaming = true;
298
+ return true;
299
+}
300
+
301
/**
302
* is_guarded_page:
303
* @env: The cpu environment
304
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
305
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
306
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
307
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
308
+ dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
309
dc->vec_len = 0;
310
dc->vec_stride = 0;
311
dc->cp_regs = arm_cpu->cp_regs;
312
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
313
}
314
}
315
316
+ s->is_nonstreaming = false;
317
+ if (s->sme_trap_nonstreaming) {
318
+ disas_sme_fa64(s, insn);
319
+ }
320
+
321
switch (extract32(insn, 25, 4)) {
322
case 0x0:
323
if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
324
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
325
index XXXXXXX..XXXXXXX 100644
326
--- a/target/arm/translate-vfp.c
327
+++ b/target/arm/translate-vfp.c
328
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
329
return false;
330
}
331
332
+ /*
333
+ * Note that rebuild_hflags_a32 has already accounted for being in EL0
334
+ * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not
335
+ * appear to be any insns which touch VFP which are allowed.
336
+ */
337
+ if (s->sme_trap_nonstreaming) {
338
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
339
+ syn_smetrap(SME_ET_Streaming,
340
+ s->base.pc_next - s->pc_curr == 2));
341
+ return false;
342
+ }
343
+
344
if (!s->vfp_enabled && !ignore_vfp_enabled) {
345
assert(!arm_dc_feature(s, ARM_FEATURE_M));
111
unallocated_encoding(s);
346
unallocated_encoding(s);
112
return;
113
}
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
347
diff --git a/target/arm/translate.c b/target/arm/translate.c
115
index XXXXXXX..XXXXXXX 100644
348
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/translate.c
349
--- a/target/arm/translate.c
117
+++ b/target/arm/translate.c
350
+++ b/target/arm/translate.c
118
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
351
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
119
*/
352
dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
120
gen_goto_tb(s, 0, s->pc & ~1);
353
dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
121
return;
354
}
122
+ case 7: /* sb */
355
+ dc->sme_trap_nonstreaming =
123
+ if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
356
+ EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
124
+ goto illegal_op;
357
}
125
+ }
358
dc->cp_regs = cpu->cp_regs;
126
+ /*
359
dc->features = env->features;
127
+ * TODO: There is no speculation barrier opcode
360
diff --git a/target/arm/meson.build b/target/arm/meson.build
128
+ * for TCG; MB and end the TB instead.
361
index XXXXXXX..XXXXXXX 100644
129
+ */
362
--- a/target/arm/meson.build
130
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
363
+++ b/target/arm/meson.build
131
+ gen_goto_tb(s, 0, s->pc & ~1);
364
@@ -XXX,XX +XXX,XX @@
132
+ return;
365
gen = [
133
default:
366
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
134
goto illegal_op;
367
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
135
}
368
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
136
@@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
369
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
137
*/
370
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
138
gen_goto_tb(s, 0, s->pc & ~1);
371
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
139
break;
140
+ case 7: /* sb */
141
+ if ((insn & 0xf) || !dc_isar_feature(aa32_sb, s)) {
142
+ goto illegal_op;
143
+ }
144
+ /*
145
+ * TODO: There is no speculation barrier opcode
146
+ * for TCG; MB and end the TB instead.
147
+ */
148
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
149
+ gen_goto_tb(s, 0, s->pc & ~1);
150
+ break;
151
default:
152
goto illegal_op;
153
}
154
--
372
--
155
2.20.1
373
2.25.1
156
157
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark ADR as a non-streaming instruction, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Removing entries from sme-fa64.decode is an easy way to see
7
what remains to be done.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220708151540.18136-5-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/translate.h | 7 +++++++
15
target/arm/sme-fa64.decode | 1 -
16
target/arm/translate-sve.c | 8 ++++----
17
3 files changed, 11 insertions(+), 5 deletions(-)
18
19
diff --git a/target/arm/translate.h b/target/arm/translate.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate.h
22
+++ b/target/arm/translate.h
23
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
24
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
25
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
26
27
+#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
28
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
29
+ { \
30
+ s->is_nonstreaming = true; \
31
+ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
32
+ }
33
+
34
#endif /* TARGET_ARM_TRANSLATE_H */
35
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/sme-fa64.decode
38
+++ b/target/arm/sme-fa64.decode
39
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
40
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
41
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
42
43
-FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
44
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
45
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
46
FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
47
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-sve.c
50
+++ b/target/arm/translate-sve.c
51
@@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
52
return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
53
}
54
55
-TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
56
-TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
57
-TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
58
-TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
59
+TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
60
+TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
61
+TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
62
+TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
63
64
/*
65
*** SVE Integer Misc - Unpredicated Group
66
--
67
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 9 ++++++---
13
2 files changed, 6 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
21
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
22
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
23
-FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
24
-FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
25
FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
33
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
34
35
/* Note pat == 31 is #all, to set all elements. */
36
-TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
37
+TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
38
+ do_predset, 0, FFR_PRED_NUM, 31, false)
39
40
/* Note pat == 32 is #unimp, to set no elements. */
41
TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
42
@@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
43
.rd = a->rd, .pg = a->pg, .s = a->s,
44
.rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
45
};
46
+
47
+ s->is_nonstreaming = true;
48
return trans_AND_pppp(s, &alt_a);
49
}
50
51
-TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
52
-TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
53
+TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
54
+TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
55
56
static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
57
void (*gen_fn)(TCGv_i32, TCGv_ptr,
58
--
59
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 22 ++++++++++++----------
13
2 files changed, 12 insertions(+), 13 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
24
-FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
25
-FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
34
NULL, gen_helper_sve_fexpa_h,
35
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
36
};
37
-TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
38
- fexpa_fns[a->esz], a->rd, a->rn, 0)
39
+TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
40
+ fexpa_fns[a->esz], a->rd, a->rn, 0)
41
42
static gen_helper_gvec_3 * const ftssel_fns[4] = {
43
NULL, gen_helper_sve_ftssel_h,
44
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
45
};
46
-TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
47
+TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
48
+ ftssel_fns[a->esz], a, 0)
49
50
/*
51
*** SVE Predicate Logical Operations Group
52
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
53
static gen_helper_gvec_3 * const compact_fns[4] = {
54
NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
55
};
56
-TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
57
+TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
58
+ compact_fns[a->esz], a, 0)
59
60
/* Call the helper that computes the ARM LastActiveElement pseudocode
61
* function, scaled by the element size. This includes the not found
62
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = {
63
gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
64
gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
65
};
66
-TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
67
- bext_fns[a->esz], a, 0)
68
+TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
69
+ bext_fns[a->esz], a, 0)
70
71
static gen_helper_gvec_3 * const bdep_fns[4] = {
72
gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
73
gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
74
};
75
-TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
76
- bdep_fns[a->esz], a, 0)
77
+TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
78
+ bdep_fns[a->esz], a, 0)
79
80
static gen_helper_gvec_3 * const bgrp_fns[4] = {
81
gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
82
gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
83
};
84
-TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
85
- bgrp_fns[a->esz], a, 0)
86
+TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
87
+ bgrp_fns[a->esz], a, 0)
88
89
static gen_helper_gvec_3 * const cadd_fns[4] = {
90
gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
91
--
92
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 24 +++++++++++++++---------
13
2 files changed, 15 insertions(+), 11 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
24
-FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
25
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
26
FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
27
FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
33
gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
34
NULL, gen_helper_sve2_pmull_d,
35
};
36
- if (a->esz == 0
37
- ? !dc_isar_feature(aa64_sve2_pmull128, s)
38
- : !dc_isar_feature(aa64_sve, s)) {
39
+
40
+ if (a->esz == 0) {
41
+ if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
42
+ return false;
43
+ }
44
+ s->is_nonstreaming = true;
45
+ } else if (!dc_isar_feature(aa64_sve, s)) {
46
return false;
47
}
48
return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
49
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
50
* SVE Integer Multiply-Add (unpredicated)
51
*/
52
53
-TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
54
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
55
-TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
56
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
57
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
58
+ gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
59
+ 0, FPST_FPCR)
60
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
61
+ gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
62
+ 0, FPST_FPCR)
63
64
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
65
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
66
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
67
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
68
gen_helper_gvec_bfdot_idx, a)
69
70
-TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
71
- gen_helper_gvec_bfmmla, a, 0)
72
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
73
+ gen_helper_gvec_bfmmla, a, 0)
74
75
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
76
{
77
--
78
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 15 +++++++++++----
13
2 files changed, 11 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
24
-FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
25
-FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
26
FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
27
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
28
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
34
NULL, gen_helper_sve_ftmad_h,
35
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
36
};
37
-TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
38
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
39
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
40
+TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
41
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
42
+ a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
43
44
/*
45
*** SVE Floating Point Accumulating Reduction Group
46
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
47
if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
48
return false;
49
}
50
+ s->is_nonstreaming = true;
51
if (!sve_access_check(s)) {
52
return true;
53
}
54
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
55
DO_FP3(FADD_zzz, fadd)
56
DO_FP3(FSUB_zzz, fsub)
57
DO_FP3(FMUL_zzz, fmul)
58
-DO_FP3(FTSMUL, ftsmul)
59
DO_FP3(FRECPS, recps)
60
DO_FP3(FRSQRTS, rsqrts)
61
62
#undef DO_FP3
63
64
+static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
65
+ NULL, gen_helper_gvec_ftsmul_h,
66
+ gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
67
+};
68
+TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
69
+ ftsmul_fns[a->esz], a, 0)
70
+
71
/*
72
*** SVE Floating Point Arithmetic - Predicated Group
73
*/
74
--
75
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 12 ++++++------
13
2 files changed, 6 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
24
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
25
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
26
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
32
TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
33
TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
34
35
-TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
36
- gen_helper_gvec_smmla_b, a, 0)
37
-TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
38
- gen_helper_gvec_usmmla_b, a, 0)
39
-TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
40
- gen_helper_gvec_ummla_b, a, 0)
41
+TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
42
+ gen_helper_gvec_smmla_b, a, 0)
43
+TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
44
+ gen_helper_gvec_usmmla_b, a, 0)
45
+TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
46
+ gen_helper_gvec_ummla_b, a, 0)
47
48
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
49
gen_helper_gvec_bfdot, a, 0)
50
--
51
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 35 ++++++++++++++++++-----------------
13
2 files changed, 18 insertions(+), 18 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
24
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
25
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
32
static gen_helper_gvec_flags_4 * const match_fns[4] = {
33
gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
34
};
35
-TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
36
+TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
37
38
static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
39
gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
40
};
41
-TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
42
+TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
43
44
static gen_helper_gvec_4 * const histcnt_fns[4] = {
45
NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
46
};
47
-TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
48
- histcnt_fns[a->esz], a, 0)
49
+TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
50
+ histcnt_fns[a->esz], a, 0)
51
52
-TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
53
- a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
54
+TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
55
+ a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
56
57
DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
58
DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
59
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
60
TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
61
a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
62
63
-TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
64
- gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
65
+TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
66
+ gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
67
68
-TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
69
- gen_helper_crypto_aese, a, false)
70
-TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
71
- gen_helper_crypto_aese, a, true)
72
+TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
73
+ gen_helper_crypto_aese, a, false)
74
+TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
75
+ gen_helper_crypto_aese, a, true)
76
77
-TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
78
- gen_helper_crypto_sm4e, a, 0)
79
-TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
80
- gen_helper_crypto_sm4ekey, a, 0)
81
+TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
82
+ gen_helper_crypto_sm4e, a, 0)
83
+TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
84
+ gen_helper_crypto_sm4ekey, a, 0)
85
86
-TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
87
+TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
88
+ gen_gvec_rax1, a)
89
90
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
91
gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
92
--
93
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-12-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 9 ---------
12
target/arm/translate-sve.c | 6 ++++++
13
2 files changed, 6 insertions(+), 9 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
24
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
25
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
26
-FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
27
-FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
28
-FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
29
-FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
30
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
31
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
32
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
33
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
34
FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
35
-FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
36
-FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
37
-FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
38
-FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
39
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate-sve.c
42
+++ b/target/arm/translate-sve.c
43
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
44
if (!dc_isar_feature(aa64_sve, s)) {
45
return false;
46
}
47
+ s->is_nonstreaming = true;
48
if (!sve_access_check(s)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
52
if (!dc_isar_feature(aa64_sve, s)) {
53
return false;
54
}
55
+ s->is_nonstreaming = true;
56
if (!sve_access_check(s)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
60
if (!dc_isar_feature(aa64_sve2, s)) {
61
return false;
62
}
63
+ s->is_nonstreaming = true;
64
if (!sve_access_check(s)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
68
if (!dc_isar_feature(aa64_sve, s)) {
69
return false;
70
}
71
+ s->is_nonstreaming = true;
72
if (!sve_access_check(s)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
76
if (!dc_isar_feature(aa64_sve, s)) {
77
return false;
78
}
79
+ s->is_nonstreaming = true;
80
if (!sve_access_check(s)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
84
if (!dc_isar_feature(aa64_sve2, s)) {
85
return false;
86
}
87
+ s->is_nonstreaming = true;
88
if (!sve_access_check(s)) {
89
return true;
90
}
91
--
92
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap if full
4
a64 support is not enabled in streaming mode. In this case, introduce
5
PRF_ns (prefetch non-streaming) to handle the checks.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/sme-fa64.decode | 3 ---
13
target/arm/sve.decode | 10 +++++-----
14
target/arm/translate-sve.c | 11 +++++++++++
15
3 files changed, 16 insertions(+), 8 deletions(-)
16
17
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sme-fa64.decode
20
+++ b/target/arm/sme-fa64.decode
21
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
22
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
23
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
24
25
-FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
-FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
28
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
29
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
30
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
31
-FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
35
+++ b/target/arm/sve.decode
36
@@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
37
@rpri_load_msz nreg=0
38
39
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
40
-PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
41
+PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ----
42
43
# SVE 32-bit gather prefetch (vector plus immediate)
44
-PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
45
+PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ----
46
47
# SVE contiguous prefetch (scalar plus immediate)
48
PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
49
@@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
50
@rpri_g_load esz=3
51
52
# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
53
-PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
54
+PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ----
55
56
# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
57
-PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
58
+PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ----
59
60
# SVE 64-bit gather prefetch (vector plus immediate)
61
-PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
62
+PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ----
63
64
### SVE Memory Store Group
65
66
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-sve.c
69
+++ b/target/arm/translate-sve.c
70
@@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
71
return true;
72
}
73
74
+static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
75
+{
76
+ if (!dc_isar_feature(aa64_sve, s)) {
77
+ return false;
78
+ }
79
+ /* Prefetch is a nop within QEMU. */
80
+ s->is_nonstreaming = true;
81
+ (void)sve_access_check(s);
82
+ return true;
83
+}
84
+
85
/*
86
* Move Prefix
87
*
88
--
89
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-14-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 2 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
24
-FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
25
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
26
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
32
if (!dc_isar_feature(aa64_sve, s)) {
33
return false;
34
}
35
+ s->is_nonstreaming = true;
36
if (sve_access_check(s)) {
37
TCGv_i64 addr = new_tmp_a64(s);
38
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
39
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
40
if (!dc_isar_feature(aa64_sve, s)) {
41
return false;
42
}
43
+ s->is_nonstreaming = true;
44
if (sve_access_check(s)) {
45
int vsz = vec_full_reg_size(s);
46
int elements = vsz >> dtype_esz[a->dtype];
47
--
48
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 3 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
21
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
22
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
23
-
24
-FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
25
-FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
31
if (a->rm == 31) {
32
return false;
33
}
34
+ s->is_nonstreaming = true;
35
if (sve_access_check(s)) {
36
TCGv_i64 addr = new_tmp_a64(s);
37
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
38
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
39
if (!dc_isar_feature(aa64_sve_f64mm, s)) {
40
return false;
41
}
42
+ s->is_nonstreaming = true;
43
if (sve_access_check(s)) {
44
TCGv_i64 addr = new_tmp_a64(s);
45
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
46
--
47
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
3
These functions will be used to verify that the cpu
4
is in the correct state for a given instruction.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190301200501.16533-9-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-16-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
10
---
9
target/arm/cpu.h | 5 ++++
11
target/arm/translate-a64.h | 21 +++++++++++++++++++++
10
target/arm/cpu64.c | 2 +-
12
target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++
11
target/arm/translate-a64.c | 58 ++++++++++++++++++++++++++++++++++++++
13
2 files changed, 55 insertions(+)
12
3 files changed, 64 insertions(+), 1 deletion(-)
13
14
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
17
--- a/target/arm/translate-a64.h
17
+++ b/target/arm/cpu.h
18
+++ b/target/arm/translate-a64.h
18
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
19
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
19
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
20
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
20
}
21
unsigned int imms, unsigned int immr);
21
22
bool sve_access_check(DisasContext *s);
22
+static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
23
+bool sme_enabled_check(DisasContext *s);
24
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
25
+
26
+/* This function corresponds to CheckStreamingSVEEnabled. */
27
+static inline bool sme_sm_enabled_check(DisasContext *s)
23
+{
28
+{
24
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
29
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
25
+}
30
+}
26
+
31
+
27
static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
32
+/* This function corresponds to CheckSMEAndZAEnabled. */
28
{
33
+static inline bool sme_za_enabled_check(DisasContext *s)
29
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
34
+{
30
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
35
+ return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
31
index XXXXXXX..XXXXXXX 100644
36
+}
32
--- a/target/arm/cpu64.c
37
+
33
+++ b/target/arm/cpu64.c
38
+/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
34
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
39
+static inline bool sme_smza_enabled_check(DisasContext *s)
35
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
40
+{
36
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
41
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
37
t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
42
+}
38
- t = FIELD_DP64(t, ID_AA64ISAR0, TS, 1);
43
+
39
+ t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
44
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
40
cpu->isar.id_aa64isar0 = t;
45
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
41
46
bool tag_checked, int log2_size);
42
t = cpu->isar.id_aa64isar1;
43
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
44
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-a64.c
49
--- a/target/arm/translate-a64.c
46
+++ b/target/arm/translate-a64.c
50
+++ b/target/arm/translate-a64.c
47
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
51
@@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s)
48
}
52
return true;
49
}
53
}
50
54
51
+static void gen_xaflag(void)
55
+/* This function corresponds to CheckSMEEnabled. */
56
+bool sme_enabled_check(DisasContext *s)
52
+{
57
+{
53
+ TCGv_i32 z = tcg_temp_new_i32();
54
+
55
+ tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
56
+
57
+ /*
58
+ /*
58
+ * (!C & !Z) << 31
59
+ * Note that unlike sve_excp_el, we have not constrained sme_excp_el
59
+ * (!(C | Z)) << 31
60
+ * to be zero when fp_excp_el has priority. This is because we need
60
+ * ~((C | Z) << 31)
61
+ * sme_excp_el by itself for cpregs access checks.
61
+ * ~-(C | Z)
62
+ * (C | Z) - 1
63
+ */
62
+ */
64
+ tcg_gen_or_i32(cpu_NF, cpu_CF, z);
63
+ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
65
+ tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
64
+ s->fp_access_checked = true;
66
+
65
+ return sme_access_check(s);
67
+ /* !(Z & C) */
66
+ }
68
+ tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
67
+ return fp_access_check_only(s);
69
+ tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
70
+
71
+ /* (!C & Z) << 31 -> -(Z & ~C) */
72
+ tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
73
+ tcg_gen_neg_i32(cpu_VF, cpu_VF);
74
+
75
+ /* C | Z */
76
+ tcg_gen_or_i32(cpu_CF, cpu_CF, z);
77
+
78
+ tcg_temp_free_i32(z);
79
+}
68
+}
80
+
69
+
81
+static void gen_axflag(void)
70
+/* Common subroutine for CheckSMEAnd*Enabled. */
71
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
82
+{
72
+{
83
+ tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
73
+ if (!sme_enabled_check(s)) {
84
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
74
+ return false;
85
+
75
+ }
86
+ /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
76
+ if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
87
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
77
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
88
+
78
+ syn_smetrap(SME_ET_NotStreaming, false));
89
+ tcg_gen_movi_i32(cpu_NF, 0);
79
+ return false;
90
+ tcg_gen_movi_i32(cpu_VF, 0);
80
+ }
81
+ if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
82
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
83
+ syn_smetrap(SME_ET_InactiveZA, false));
84
+ return false;
85
+ }
86
+ return true;
91
+}
87
+}
92
+
88
+
93
/* MSR (immediate) - move immediate to processor state field */
89
/*
94
static void handle_msr_i(DisasContext *s, uint32_t insn,
90
* This utility function is for doing register extension with an
95
unsigned int op1, unsigned int op2, unsigned int crm)
91
* optional shift. You will likely want to pass a temporary for the
96
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
97
s->base.is_jmp = DISAS_NEXT;
98
break;
99
100
+ case 0x01: /* XAFlag */
101
+ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
102
+ goto do_unallocated;
103
+ }
104
+ gen_xaflag();
105
+ s->base.is_jmp = DISAS_NEXT;
106
+ break;
107
+
108
+ case 0x02: /* AXFlag */
109
+ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
110
+ goto do_unallocated;
111
+ }
112
+ gen_axflag();
113
+ s->base.is_jmp = DISAS_NEXT;
114
+ break;
115
+
116
case 0x05: /* SPSel */
117
if (s->current_el == 0) {
118
goto do_unallocated;
119
--
92
--
120
2.20.1
93
2.25.1
121
122
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This decoding more closely matches the ARMv8.4 Table C4-6,
3
The pseudocode for CheckSVEEnabled gains a check for Streaming
4
Encoding table for Data Processing - Register Group.
4
SVE mode, and for SME present but SVE absent.
5
5
6
In particular, op2 == 0 is now more than just Add/sub (with carry).
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190301200501.16533-7-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-17-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
target/arm/translate-a64.c | 98 ++++++++++++++++++++++----------------
11
target/arm/translate-a64.c | 22 ++++++++++++++++------
14
1 file changed, 57 insertions(+), 41 deletions(-)
12
1 file changed, 16 insertions(+), 6 deletions(-)
15
13
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
16
--- a/target/arm/translate-a64.c
19
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
20
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
18
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
19
return true;
21
}
20
}
22
21
23
/* Add/subtract (with carry)
22
-/* Check that SVE access is enabled. If it is, return true.
24
- * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
23
+/*
25
- * +--+--+--+------------------------+------+---------+------+-----+
24
+ * Check that SVE access is enabled. If it is, return true.
26
- * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
25
* If not, emit code to generate an appropriate exception and return false.
27
- * +--+--+--+------------------------+------+---------+------+-----+
26
+ * This function corresponds to CheckSVEEnabled().
28
- * [000000]
29
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
30
+ * +--+--+--+------------------------+------+-------------+------+-----+
31
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
32
+ * +--+--+--+------------------------+------+-------------+------+-----+
33
*/
27
*/
34
28
bool sve_access_check(DisasContext *s)
35
static void disas_adc_sbc(DisasContext *s, uint32_t insn)
29
{
36
@@ -XXX,XX +XXX,XX @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
30
- if (s->sve_excp_el) {
37
unsigned int sf, op, setflags, rm, rn, rd;
31
- assert(!s->sve_access_checked);
38
TCGv_i64 tcg_y, tcg_rn, tcg_rd;
32
- s->sve_access_checked = true;
39
40
- if (extract32(insn, 10, 6) != 0) {
41
- unallocated_encoding(s);
42
- return;
43
- }
44
-
33
-
45
sf = extract32(insn, 31, 1);
34
+ if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
46
op = extract32(insn, 30, 1);
35
+ assert(dc_isar_feature(aa64_sme, s));
47
setflags = extract32(insn, 29, 1);
36
+ if (!sme_sm_enabled_check(s)) {
48
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
37
+ goto fail_exit;
38
+ }
39
+ } else if (s->sve_excp_el) {
40
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
41
syn_sve_access_trap(), s->sve_excp_el);
42
- return false;
43
+ goto fail_exit;
49
}
44
}
45
s->sve_access_checked = true;
46
return fp_access_check(s);
47
+
48
+ fail_exit:
49
+ /* Assert that we only raise one exception per instruction. */
50
+ assert(!s->sve_access_checked);
51
+ s->sve_access_checked = true;
52
+ return false;
50
}
53
}
51
54
52
-/* Data processing - register */
55
/*
53
+/*
54
+ * Data processing - register
55
+ * 31 30 29 28 25 21 20 16 10 0
56
+ * +--+---+--+---+-------+-----+-------+-------+---------+
57
+ * | |op0| |op1| 1 0 1 | op2 | | op3 | |
58
+ * +--+---+--+---+-------+-----+-------+-------+---------+
59
+ */
60
static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
61
{
62
- switch (extract32(insn, 24, 5)) {
63
- case 0x0a: /* Logical (shifted register) */
64
- disas_logic_reg(s, insn);
65
- break;
66
- case 0x0b: /* Add/subtract */
67
- if (insn & (1 << 21)) { /* (extended register) */
68
- disas_add_sub_ext_reg(s, insn);
69
+ int op0 = extract32(insn, 30, 1);
70
+ int op1 = extract32(insn, 28, 1);
71
+ int op2 = extract32(insn, 21, 4);
72
+ int op3 = extract32(insn, 10, 6);
73
+
74
+ if (!op1) {
75
+ if (op2 & 8) {
76
+ if (op2 & 1) {
77
+ /* Add/sub (extended register) */
78
+ disas_add_sub_ext_reg(s, insn);
79
+ } else {
80
+ /* Add/sub (shifted register) */
81
+ disas_add_sub_reg(s, insn);
82
+ }
83
} else {
84
- disas_add_sub_reg(s, insn);
85
+ /* Logical (shifted register) */
86
+ disas_logic_reg(s, insn);
87
}
88
- break;
89
- case 0x1b: /* Data-processing (3 source) */
90
- disas_data_proc_3src(s, insn);
91
- break;
92
- case 0x1a:
93
- switch (extract32(insn, 21, 3)) {
94
- case 0x0: /* Add/subtract (with carry) */
95
+ return;
96
+ }
97
+
98
+ switch (op2) {
99
+ case 0x0:
100
+ switch (op3) {
101
+ case 0x00: /* Add/subtract (with carry) */
102
disas_adc_sbc(s, insn);
103
break;
104
- case 0x2: /* Conditional compare */
105
- disas_cc(s, insn); /* both imm and reg forms */
106
- break;
107
- case 0x4: /* Conditional select */
108
- disas_cond_select(s, insn);
109
- break;
110
- case 0x6: /* Data-processing */
111
- if (insn & (1 << 30)) { /* (1 source) */
112
- disas_data_proc_1src(s, insn);
113
- } else { /* (2 source) */
114
- disas_data_proc_2src(s, insn);
115
- }
116
- break;
117
+
118
default:
119
- unallocated_encoding(s);
120
- break;
121
+ goto do_unallocated;
122
}
123
break;
124
+
125
+ case 0x2: /* Conditional compare */
126
+ disas_cc(s, insn); /* both imm and reg forms */
127
+ break;
128
+
129
+ case 0x4: /* Conditional select */
130
+ disas_cond_select(s, insn);
131
+ break;
132
+
133
+ case 0x6: /* Data-processing */
134
+ if (op0) { /* (1 source) */
135
+ disas_data_proc_1src(s, insn);
136
+ } else { /* (2 source) */
137
+ disas_data_proc_2src(s, insn);
138
+ }
139
+ break;
140
+ case 0x8 ... 0xf: /* (3 source) */
141
+ disas_data_proc_3src(s, insn);
142
+ break;
143
+
144
default:
145
+ do_unallocated:
146
unallocated_encoding(s);
147
break;
148
}
149
--
56
--
150
2.20.1
57
2.25.1
151
152
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add the kvm_arm_get_max_vm_ipa_size() helper that returns the
3
These SME instructions are nominally within the SVE decode space,
4
number of bits in the IPA address space supported by KVM.
4
so we add them to sve.decode and translate-sve.c.
5
5
6
This capability needs to be known to create the VM with a
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
specific IPA max size (kvm_type passed along KVM_CREATE_VM ioctl.
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
8
Message-id: 20220708151540.18136-18-richard.henderson@linaro.org
9
Signed-off-by: Eric Auger <eric.auger@redhat.com>
10
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
11
Message-id: 20190304101339.25970-6-eric.auger@redhat.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
10
---
14
target/arm/kvm_arm.h | 13 +++++++++++++
11
target/arm/translate-a64.h | 12 ++++++++++++
15
target/arm/kvm.c | 10 ++++++++++
12
target/arm/sve.decode | 5 ++++-
16
2 files changed, 23 insertions(+)
13
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++
14
3 files changed, 54 insertions(+), 1 deletion(-)
17
15
18
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/kvm_arm.h
18
--- a/target/arm/translate-a64.h
21
+++ b/target/arm/kvm_arm.h
19
+++ b/target/arm/translate-a64.h
22
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
20
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
23
*/
21
return s->vl;
24
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
25
26
+/**
27
+ * kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
28
+ * IPA address space supported by KVM
29
+ *
30
+ * @ms: Machine state handle
31
+ */
32
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms);
33
+
34
/**
35
* kvm_arm_sync_mpstate_to_kvm
36
* @cpu: ARMCPU
37
@@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
38
cpu->host_cpu_probe_failed = true;
39
}
22
}
40
23
41
+static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
24
+/* Return the byte size of the vector register, SVL / 8. */
25
+static inline int streaming_vec_reg_size(DisasContext *s)
42
+{
26
+{
43
+ return -ENOENT;
27
+ return s->svl;
44
+}
28
+}
45
+
29
+
46
static inline int kvm_arm_vgic_probe(void)
30
/*
47
{
31
* Return the offset info CPUARMState of the predicate vector register Pn.
48
return 0;
32
* Note for this purpose, FFR is P16.
49
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
33
@@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s)
50
index XXXXXXX..XXXXXXX 100644
34
return s->vl >> 3;
51
--- a/target/arm/kvm.c
52
+++ b/target/arm/kvm.c
53
@@ -XXX,XX +XXX,XX @@
54
#include "qemu/error-report.h"
55
#include "sysemu/sysemu.h"
56
#include "sysemu/kvm.h"
57
+#include "sysemu/kvm_int.h"
58
#include "kvm_arm.h"
59
#include "cpu.h"
60
#include "trace.h"
61
@@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
62
env->features = arm_host_cpu_features.features;
63
}
35
}
64
36
65
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
37
+/* Return the byte size of the predicate register, SVL / 64. */
38
+static inline int streaming_pred_reg_size(DisasContext *s)
66
+{
39
+{
67
+ KVMState *s = KVM_STATE(ms->accelerator);
40
+ return s->svl >> 3;
68
+ int ret;
69
+
70
+ ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
71
+ return ret > 0 ? ret : 40;
72
+}
41
+}
73
+
42
+
74
int kvm_arch_init(MachineState *ms, KVMState *s)
43
/*
44
* Round up the size of a register to a size allowed by
45
* the tcg vector infrastructure. Any operation which uses this
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
51
# SVE index generation (register start, register increment)
52
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
53
54
-### SVE Stack Allocation Group
55
+### SVE / Streaming SVE Stack Allocation Group
56
57
# SVE stack frame adjustment
58
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
59
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
60
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
61
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
62
63
# SVE stack frame size
64
RDVL 00000100 101 11111 01010 imm:s6 rd:5
65
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
66
67
### SVE Bitwise Shift - Unpredicated Group
68
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
74
return true;
75
}
76
77
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
78
+{
79
+ if (!dc_isar_feature(aa64_sme, s)) {
80
+ return false;
81
+ }
82
+ if (sme_enabled_check(s)) {
83
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
84
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
85
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
86
+ }
87
+ return true;
88
+}
89
+
90
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
75
{
91
{
76
/* For ARM interrupt delivery is always asynchronous,
92
if (!dc_isar_feature(aa64_sve, s)) {
93
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
94
return true;
95
}
96
97
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
98
+{
99
+ if (!dc_isar_feature(aa64_sme, s)) {
100
+ return false;
101
+ }
102
+ if (sme_enabled_check(s)) {
103
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
104
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
105
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
106
+ }
107
+ return true;
108
+}
109
+
110
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
111
{
112
if (!dc_isar_feature(aa64_sve, s)) {
113
@@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
114
return true;
115
}
116
117
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
118
+{
119
+ if (!dc_isar_feature(aa64_sme, s)) {
120
+ return false;
121
+ }
122
+ if (sme_enabled_check(s)) {
123
+ TCGv_i64 reg = cpu_reg(s, a->rd);
124
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
125
+ }
126
+ return true;
127
+}
128
+
129
/*
130
*** SVE Compute Vector Address Group
131
*/
77
--
132
--
78
2.20.1
133
2.25.1
79
80
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 4 ++++
10
target/arm/sme_helper.c | 25 +++++++++++++++++++++++++
11
target/arm/translate-sme.c | 13 +++++++++++++
12
4 files changed, 44 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@
19
20
DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
21
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
22
+
23
+DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/sme.decode
27
+++ b/target/arm/sme.decode
28
@@ -XXX,XX +XXX,XX @@
29
#
30
# This file is processed by scripts/decodetree.py
31
#
32
+
33
+### SME Misc
34
+
35
+ZERO 11000000 00 001 00000000000 imm:8
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
41
memset(env->zarray, 0, sizeof(env->zarray));
42
}
43
}
44
+
45
+void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
46
+{
47
+ uint32_t i;
48
+
49
+ /*
50
+ * Special case clearing the entire ZA space.
51
+ * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
52
+ * parts of the ZA storage outside of SVL.
53
+ */
54
+ if (imm == 0xff) {
55
+ memset(env->zarray, 0, sizeof(env->zarray));
56
+ return;
57
+ }
58
+
59
+ /*
60
+ * Recall that ZAnH.D[m] is spread across ZA[n+8*m],
61
+ * so each row is discontiguous within ZA[].
62
+ */
63
+ for (i = 0; i < svl; i++) {
64
+ if (imm & (1 << (i % 8))) {
65
+ memset(&env->zarray[i], 0, svl);
66
+ }
67
+ }
68
+}
69
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sme.c
72
+++ b/target/arm/translate-sme.c
73
@@ -XXX,XX +XXX,XX @@
74
*/
75
76
#include "decode-sme.c.inc"
77
+
78
+
79
+static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
80
+{
81
+ if (!dc_isar_feature(aa64_sme, s)) {
82
+ return false;
83
+ }
84
+ if (sme_za_enabled_check(s)) {
85
+ gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
86
+ tcg_constant_i32(streaming_vec_reg_size(s)));
87
+ }
88
+ return true;
89
+}
90
--
91
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We do not need an out-of-line helper for manipulating bits in pstate.
3
We can reuse the SVE functions for implementing moves to/from
4
While changing things, share the implementation of gen_ss_advance.
4
horizontal tile slices, but we need new ones for moves to/from
5
vertical tile slices.
5
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20190301200501.16533-6-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-20-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/helper.h | 2 --
12
target/arm/helper-sme.h | 12 +++
12
target/arm/translate.h | 34 ++++++++++++++++++++++++++++++++++
13
target/arm/helper-sve.h | 2 +
13
target/arm/op_helper.c | 5 -----
14
target/arm/translate-a64.h | 8 ++
14
target/arm/translate-a64.c | 11 -----------
15
target/arm/translate.h | 5 ++
15
target/arm/translate.c | 11 -----------
16
target/arm/sme.decode | 15 ++++
16
5 files changed, 34 insertions(+), 29 deletions(-)
17
target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++-
18
target/arm/sve_helper.c | 12 +++
19
target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++
20
8 files changed, 331 insertions(+), 1 deletion(-)
17
21
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
22
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper.h
24
--- a/target/arm/helper-sme.h
21
+++ b/target/arm/helper.h
25
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(get_cp_reg, i32, env, ptr)
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
23
DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
27
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
24
DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
28
25
29
DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
26
-DEF_HELPER_1(clear_pstate_ss, void, env)
30
+
27
-
31
+/* Move to/from vertical array slices, i.e. columns, so 'c'. */
28
DEF_HELPER_2(get_r13_banked, i32, env, i32)
32
+DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
33
+DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
42
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper-sve.h
45
+++ b/target/arm/helper-sve.h
46
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
47
void, ptr, ptr, ptr, ptr, i32)
48
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
49
void, ptr, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG,
51
+ void, ptr, ptr, ptr, ptr, i32)
52
53
DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
54
void, ptr, ptr, ptr, ptr, i32)
55
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.h
58
+++ b/target/arm/translate-a64.h
59
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
60
return size_for_gvec(pred_full_reg_size(s));
61
}
62
63
+/* Return a newly allocated pointer to the predicate register. */
64
+static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
65
+{
66
+ TCGv_ptr ret = tcg_temp_new_ptr();
67
+ tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
68
+ return ret;
69
+}
70
+
71
bool disas_sve(DisasContext *, uint32_t);
72
bool disas_sme(DisasContext *, uint32_t);
30
73
31
diff --git a/target/arm/translate.h b/target/arm/translate.h
74
diff --git a/target/arm/translate.h b/target/arm/translate.h
32
index XXXXXXX..XXXXXXX 100644
75
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/translate.h
76
--- a/target/arm/translate.h
34
+++ b/target/arm/translate.h
77
+++ b/target/arm/translate.h
35
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i32 get_ahp_flag(void)
78
@@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x)
36
return ret;
79
return x + 2;
37
}
80
}
38
81
39
+/* Set bits within PSTATE. */
82
+static inline int plus_12(DisasContext *s, int x)
40
+static inline void set_pstate_bits(uint32_t bits)
83
+{
41
+{
84
+ return x + 12;
42
+ TCGv_i32 p = tcg_temp_new_i32();
85
+}
43
+
86
+
44
+ tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
87
static inline int times_2(DisasContext *s, int x)
45
+
88
{
46
+ tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
89
return x * 2;
47
+ tcg_gen_ori_i32(p, p, bits);
90
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
48
+ tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
91
index XXXXXXX..XXXXXXX 100644
49
+ tcg_temp_free_i32(p);
92
--- a/target/arm/sme.decode
50
+}
93
+++ b/target/arm/sme.decode
51
+
94
@@ -XXX,XX +XXX,XX @@
52
+/* Clear bits within PSTATE. */
95
### SME Misc
53
+static inline void clear_pstate_bits(uint32_t bits)
96
54
+{
97
ZERO 11000000 00 001 00000000000 imm:8
55
+ TCGv_i32 p = tcg_temp_new_i32();
98
+
56
+
99
+### SME Move into/from Array
57
+ tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
100
+
58
+
101
+%mova_rs 13:2 !function=plus_12
59
+ tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
102
+&mova esz rs pg zr za_imm v:bool to_vec:bool
60
+ tcg_gen_andi_i32(p, p, ~bits);
103
+
61
+ tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
104
+MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
62
+ tcg_temp_free_i32(p);
105
+ &mova to_vec=0 rs=%mova_rs
63
+}
106
+MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
64
+
107
+ &mova to_vec=0 rs=%mova_rs esz=4
65
+/* If the singlestep state is Active-not-pending, advance to Active-pending. */
108
+
66
+static inline void gen_ss_advance(DisasContext *s)
109
+MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
67
+{
110
+ &mova to_vec=1 rs=%mova_rs
68
+ if (s->ss_active) {
111
+MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
69
+ s->pstate_ss = 0;
112
+ &mova to_vec=1 rs=%mova_rs esz=4
70
+ clear_pstate_bits(PSTATE_SS);
113
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
71
+ }
114
index XXXXXXX..XXXXXXX 100644
72
+}
115
--- a/target/arm/sme_helper.c
73
116
+++ b/target/arm/sme_helper.c
74
/* Vector operations shared between ARM and AArch64. */
117
@@ -XXX,XX +XXX,XX @@
75
extern const GVecGen3 bsl_op;
118
76
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
119
#include "qemu/osdep.h"
77
index XXXXXXX..XXXXXXX 100644
120
#include "cpu.h"
78
--- a/target/arm/op_helper.c
121
-#include "internals.h"
79
+++ b/target/arm/op_helper.c
122
+#include "tcg/tcg-gvec-desc.h"
80
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
123
#include "exec/helper-proto.h"
81
return res;
124
+#include "qemu/int128.h"
125
+#include "vec_internal.h"
126
127
/* ResetSVEState */
128
void arm_reset_sve_state(CPUARMState *env)
129
@@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
130
}
131
}
82
}
132
}
83
133
+
84
-void HELPER(clear_pstate_ss)(CPUARMState *env)
134
+
85
-{
135
+/*
86
- env->pstate &= ~PSTATE_SS;
136
+ * When considering the ZA storage as an array of elements of
87
-}
137
+ * type T, the index within that array of the Nth element of
88
-
138
+ * a vertical slice of a tile can be calculated like this,
89
void HELPER(pre_hvc)(CPUARMState *env)
139
+ * regardless of the size of type T. This is because the tiles
140
+ * are interleaved, so if type T is size N bytes then row 1 of
141
+ * the tile is N rows away from row 0. The division by N to
142
+ * convert a byte offset into an array index and the multiplication
143
+ * by N to convert from vslice-index-within-the-tile to
144
+ * the index within the ZA storage cancel out.
145
+ */
146
+#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg))
147
+
148
+/*
149
+ * When doing byte arithmetic on the ZA storage, the element
150
+ * byteoff bytes away in a tile vertical slice is always this
151
+ * many bytes away in the ZA storage, regardless of the
152
+ * size of the tile element, assuming that byteoff is a multiple
153
+ * of the element size. Again this is because of the interleaving
154
+ * of the tiles. For instance if we have 1 byte per element then
155
+ * each row of the ZA storage has one byte of the vslice data,
156
+ * and (counting from 0) byte 8 goes in row 8 of the storage
157
+ * at offset (8 * row-size-in-bytes).
158
+ * If we have 8 bytes per element then each row of the ZA storage
159
+ * has 8 bytes of the data, but there are 8 interleaved tiles and
160
+ * so byte 8 of the data goes into row 1 of the tile,
161
+ * which is again row 8 of the storage, so the offset is still
162
+ * (8 * row-size-in-bytes). Similarly for other element sizes.
163
+ */
164
+#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg))
165
+
166
+
167
+/*
168
+ * Move Zreg vector to ZArray column.
169
+ */
170
+#define DO_MOVA_C(NAME, TYPE, H) \
171
+void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \
172
+{ \
173
+ int i, oprsz = simd_oprsz(desc); \
174
+ for (i = 0; i < oprsz; ) { \
175
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
176
+ do { \
177
+ if (pg & 1) { \
178
+ *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \
179
+ } \
180
+ i += sizeof(TYPE); \
181
+ pg >>= sizeof(TYPE); \
182
+ } while (i & 15); \
183
+ } \
184
+}
185
+
186
+DO_MOVA_C(sme_mova_cz_b, uint8_t, H1)
187
+DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2)
188
+DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4)
189
+
190
+void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc)
191
+{
192
+ int i, oprsz = simd_oprsz(desc) / 8;
193
+ uint8_t *pg = vg;
194
+ uint64_t *n = vn;
195
+ uint64_t *a = za;
196
+
197
+ for (i = 0; i < oprsz; i++) {
198
+ if (pg[H1(i)] & 1) {
199
+ a[tile_vslice_index(i)] = n[i];
200
+ }
201
+ }
202
+}
203
+
204
+void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc)
205
+{
206
+ int i, oprsz = simd_oprsz(desc) / 16;
207
+ uint16_t *pg = vg;
208
+ Int128 *n = vn;
209
+ Int128 *a = za;
210
+
211
+ /*
212
+ * Int128 is used here simply to copy 16 bytes, and to simplify
213
+ * the address arithmetic.
214
+ */
215
+ for (i = 0; i < oprsz; i++) {
216
+ if (pg[H2(i)] & 1) {
217
+ a[tile_vslice_index(i)] = n[i];
218
+ }
219
+ }
220
+}
221
+
222
+#undef DO_MOVA_C
223
+
224
+/*
225
+ * Move ZArray column to Zreg vector.
226
+ */
227
+#define DO_MOVA_Z(NAME, TYPE, H) \
228
+void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \
229
+{ \
230
+ int i, oprsz = simd_oprsz(desc); \
231
+ for (i = 0; i < oprsz; ) { \
232
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
233
+ do { \
234
+ if (pg & 1) { \
235
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \
236
+ } \
237
+ i += sizeof(TYPE); \
238
+ pg >>= sizeof(TYPE); \
239
+ } while (i & 15); \
240
+ } \
241
+}
242
+
243
+DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1)
244
+DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2)
245
+DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4)
246
+
247
+void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc)
248
+{
249
+ int i, oprsz = simd_oprsz(desc) / 8;
250
+ uint8_t *pg = vg;
251
+ uint64_t *d = vd;
252
+ uint64_t *a = za;
253
+
254
+ for (i = 0; i < oprsz; i++) {
255
+ if (pg[H1(i)] & 1) {
256
+ d[i] = a[tile_vslice_index(i)];
257
+ }
258
+ }
259
+}
260
+
261
+void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
262
+{
263
+ int i, oprsz = simd_oprsz(desc) / 16;
264
+ uint16_t *pg = vg;
265
+ Int128 *d = vd;
266
+ Int128 *a = za;
267
+
268
+ /*
269
+ * Int128 is used here simply to copy 16 bytes, and to simplify
270
+ * the address arithmetic.
271
+ */
272
+ for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) {
273
+ if (pg[H2(i)] & 1) {
274
+ d[i] = a[tile_vslice_index(i)];
275
+ }
276
+ }
277
+}
278
+
279
+#undef DO_MOVA_Z
280
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
281
index XXXXXXX..XXXXXXX 100644
282
--- a/target/arm/sve_helper.c
283
+++ b/target/arm/sve_helper.c
284
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
285
}
286
}
287
288
+void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm,
289
+ void *vg, uint32_t desc)
290
+{
291
+ intptr_t i, opr_sz = simd_oprsz(desc) / 16;
292
+ Int128 *d = vd, *n = vn, *m = vm;
293
+ uint16_t *pg = vg;
294
+
295
+ for (i = 0; i < opr_sz; i += 1) {
296
+ d[i] = (pg[H2(i)] & 1 ? n : m)[i];
297
+ }
298
+}
299
+
300
/* Two operand comparison controlled by a predicate.
301
* ??? It is very tempting to want to be able to expand this inline
302
* with x86 instructions, e.g.
303
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/translate-sme.c
306
+++ b/target/arm/translate-sme.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "decode-sme.c.inc"
309
310
311
+/*
312
+ * Resolve tile.size[index] to a host pointer, where tile and index
313
+ * are always decoded together, dependent on the element size.
314
+ */
315
+static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
316
+ int tile_index, bool vertical)
317
+{
318
+ int tile = tile_index >> (4 - esz);
319
+ int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
320
+ int pos, len, offset;
321
+ TCGv_i32 tmp;
322
+ TCGv_ptr addr;
323
+
324
+ /* Compute the final index, which is Rs+imm. */
325
+ tmp = tcg_temp_new_i32();
326
+ tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
327
+ tcg_gen_addi_i32(tmp, tmp, index);
328
+
329
+ /* Prepare a power-of-two modulo via extraction of @len bits. */
330
+ len = ctz32(streaming_vec_reg_size(s)) - esz;
331
+
332
+ if (vertical) {
333
+ /*
334
+ * Compute the byte offset of the index within the tile:
335
+ * (index % (svl / size)) * size
336
+ * = (index % (svl >> esz)) << esz
337
+ * Perform the power-of-two modulo via extraction of the low @len bits.
338
+ * Perform the multiply by shifting left by @pos bits.
339
+ * Perform these operations simultaneously via deposit into zero.
340
+ */
341
+ pos = esz;
342
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
343
+
344
+ /*
345
+ * For big-endian, adjust the indexed column byte offset within
346
+ * the uint64_t host words that make up env->zarray[].
347
+ */
348
+ if (HOST_BIG_ENDIAN && esz < MO_64) {
349
+ tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz));
350
+ }
351
+ } else {
352
+ /*
353
+ * Compute the byte offset of the index within the tile:
354
+ * (index % (svl / size)) * (size * sizeof(row))
355
+ * = (index % (svl >> esz)) << (esz + log2(sizeof(row)))
356
+ */
357
+ pos = esz + ctz32(sizeof(ARMVectorReg));
358
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
359
+
360
+ /* Row slices are always aligned and need no endian adjustment. */
361
+ }
362
+
363
+ /* The tile byte offset within env->zarray is the row. */
364
+ offset = tile * sizeof(ARMVectorReg);
365
+
366
+ /* Include the byte offset of zarray to make this relative to env. */
367
+ offset += offsetof(CPUARMState, zarray);
368
+ tcg_gen_addi_i32(tmp, tmp, offset);
369
+
370
+ /* Add the byte offset to env to produce the final pointer. */
371
+ addr = tcg_temp_new_ptr();
372
+ tcg_gen_ext_i32_ptr(addr, tmp);
373
+ tcg_temp_free_i32(tmp);
374
+ tcg_gen_add_ptr(addr, addr, cpu_env);
375
+
376
+ return addr;
377
+}
378
+
379
static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
90
{
380
{
91
ARMCPU *cpu = arm_env_get_cpu(env);
381
if (!dc_isar_feature(aa64_sme, s)) {
92
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
382
@@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
93
index XXXXXXX..XXXXXXX 100644
383
}
94
--- a/target/arm/translate-a64.c
384
return true;
95
+++ b/target/arm/translate-a64.c
96
@@ -XXX,XX +XXX,XX @@ static void gen_exception_bkpt_insn(DisasContext *s, int offset,
97
s->base.is_jmp = DISAS_NORETURN;
98
}
385
}
99
386
+
100
-static void gen_ss_advance(DisasContext *s)
387
+static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
101
-{
388
+{
102
- /* If the singlestep state is Active-not-pending, advance to
389
+ static gen_helper_gvec_4 * const h_fns[5] = {
103
- * Active-pending.
390
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
104
- */
391
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d,
105
- if (s->ss_active) {
392
+ gen_helper_sve_sel_zpzz_q
106
- s->pstate_ss = 0;
393
+ };
107
- gen_helper_clear_pstate_ss(cpu_env);
394
+ static gen_helper_gvec_3 * const cz_fns[5] = {
108
- }
395
+ gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h,
109
-}
396
+ gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d,
110
-
397
+ gen_helper_sme_mova_cz_q,
111
static void gen_step_complete_exception(DisasContext *s)
398
+ };
112
{
399
+ static gen_helper_gvec_3 * const zc_fns[5] = {
113
/* We just completed step of an insn. Move from Active-not-pending
400
+ gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h,
114
diff --git a/target/arm/translate.c b/target/arm/translate.c
401
+ gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d,
115
index XXXXXXX..XXXXXXX 100644
402
+ gen_helper_sme_mova_zc_q,
116
--- a/target/arm/translate.c
403
+ };
117
+++ b/target/arm/translate.c
404
+
118
@@ -XXX,XX +XXX,XX @@ static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
405
+ TCGv_ptr t_za, t_zr, t_pg;
119
tcg_temp_free_i32(tcg_excp);
406
+ TCGv_i32 t_desc;
120
}
407
+ int svl;
121
408
+
122
-static void gen_ss_advance(DisasContext *s)
409
+ if (!dc_isar_feature(aa64_sme, s)) {
123
-{
410
+ return false;
124
- /* If the singlestep state is Active-not-pending, advance to
411
+ }
125
- * Active-pending.
412
+ if (!sme_smza_enabled_check(s)) {
126
- */
413
+ return true;
127
- if (s->ss_active) {
414
+ }
128
- s->pstate_ss = 0;
415
+
129
- gen_helper_clear_pstate_ss(cpu_env);
416
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
130
- }
417
+ t_zr = vec_full_reg_ptr(s, a->zr);
131
-}
418
+ t_pg = pred_full_reg_ptr(s, a->pg);
132
-
419
+
133
static void gen_step_complete_exception(DisasContext *s)
420
+ svl = streaming_vec_reg_size(s);
134
{
421
+ t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
135
/* We just completed step of an insn. Move from Active-not-pending
422
+
423
+ if (a->v) {
424
+ /* Vertical slice -- use sme mova helpers. */
425
+ if (a->to_vec) {
426
+ zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
427
+ } else {
428
+ cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
429
+ }
430
+ } else {
431
+ /* Horizontal slice -- reuse sve sel helpers. */
432
+ if (a->to_vec) {
433
+ h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
434
+ } else {
435
+ h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
436
+ }
437
+ }
438
+
439
+ tcg_temp_free_ptr(t_za);
440
+ tcg_temp_free_ptr(t_zr);
441
+ tcg_temp_free_ptr(t_pg);
442
+
443
+ return true;
444
+}
136
--
445
--
137
2.20.1
446
2.25.1
138
139
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Now we have the extended memory map (high IO regions beyond the
3
We cannot reuse the SVE functions for LD[1-4] and ST[1-4],
4
scalable RAM) and dynamic IPA range support at KVM/ARM level
4
because those functions accept only a Zreg register number.
5
we can bump the legacy 255GB initial RAM limit. The actual maximum
5
For SME, we want to pass a pointer into ZA storage.
6
RAM size now depends on the physical CPU and host kernel, in
7
accelerated mode. In TCG mode, it depends on the VCPU
8
AA64MMFR0.PARANGE.
9
6
10
Signed-off-by: Eric Auger <eric.auger@redhat.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20190304101339.25970-11-eric.auger@redhat.com
9
Message-id: 20220708151540.18136-21-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
11
---
15
hw/arm/virt.c | 21 +--------------------
12
target/arm/helper-sme.h | 82 +++++
16
1 file changed, 1 insertion(+), 20 deletions(-)
13
target/arm/sme.decode | 9 +
14
target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++
15
target/arm/translate-sme.c | 70 +++++
16
4 files changed, 756 insertions(+)
17
17
18
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
18
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/virt.c
20
--- a/target/arm/helper-sme.h
21
+++ b/hw/arm/virt.c
21
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
28
+DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
29
+DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
30
+DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
31
+
32
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
34
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
35
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
36
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
37
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
38
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
39
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
40
+
41
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
42
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
43
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
44
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
45
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
46
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
47
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
48
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
49
+
50
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
52
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
54
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
55
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
56
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
57
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
58
+
59
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
61
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
62
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
63
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
65
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
66
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
67
+
68
+DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
69
+DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
70
+DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
71
+DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
72
+
73
+DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
75
+DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
77
+DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
81
+
82
+DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
83
+DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
84
+DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
85
+DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
86
+DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
87
+DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
89
+DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
90
+
91
+DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
92
+DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
93
+DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
94
+DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
95
+DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
96
+DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
97
+DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
98
+DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
99
+
100
+DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
101
+DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
102
+DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
103
+DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
105
+DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
107
+DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
108
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/sme.decode
111
+++ b/target/arm/sme.decode
112
@@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
113
&mova to_vec=1 rs=%mova_rs
114
MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
115
&mova to_vec=1 rs=%mova_rs esz=4
116
+
117
+### SME Memory
118
+
119
+&ldst esz rs pg rn rm za_imm v:bool st:bool
120
+
121
+LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
122
+ &ldst rs=%mova_rs
123
+LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
124
+ &ldst esz=4 rs=%mova_rs
125
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/sme_helper.c
128
+++ b/target/arm/sme_helper.c
22
@@ -XXX,XX +XXX,XX @@
129
@@ -XXX,XX +XXX,XX @@
23
130
24
#define PLATFORM_BUS_NUM_IRQS 64
131
#include "qemu/osdep.h"
25
132
#include "cpu.h"
26
-/* RAM limit in GB. Since VIRT_MEM starts at the 1GB mark, this means
133
+#include "internals.h"
27
- * RAM can go up to the 256GB mark, leaving 256GB of the physical
134
#include "tcg/tcg-gvec-desc.h"
28
- * address space unallocated and free for future use between 256G and 512G.
135
#include "exec/helper-proto.h"
29
- * If we need to provide more RAM to VMs in the future then we need to:
136
+#include "exec/cpu_ldst.h"
30
- * * allocate a second bank of RAM starting at 2TB and working up
137
+#include "exec/exec-all.h"
31
- * * fix the DT and ACPI table generation code in QEMU to correctly
138
#include "qemu/int128.h"
32
- * report two split lumps of RAM to the guest
139
#include "vec_internal.h"
33
- * * fix KVM in the host kernel to allow guests with >40 bit address spaces
140
+#include "sve_ldst_internal.h"
34
- * (We don't want to fill all the way up to 512GB with RAM because
141
35
- * we might want it for non-RAM purposes later. Conversely it seems
142
/* ResetSVEState */
36
- * reasonable to assume that anybody configuring a VM with a quarter
143
void arm_reset_sve_state(CPUARMState *env)
37
- * of a terabyte of RAM will be doing it on a host with more than a
144
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
38
- * terabyte of physical address space.)
145
}
39
- */
146
40
+/* Legacy RAM limit in GB (< version 4.0) */
147
#undef DO_MOVA_Z
41
#define LEGACY_RAMLIMIT_GB 255
148
+
42
#define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB)
149
+/*
43
150
+ * Clear elements in a tile slice comprising len bytes.
44
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
151
+ */
45
152
+
46
vms->smp_cpus = smp_cpus;
153
+typedef void ClearFn(void *ptr, size_t off, size_t len);
47
154
+
48
- if (machine->ram_size > vms->memmap[VIRT_MEM].size) {
155
+static void clear_horizontal(void *ptr, size_t off, size_t len)
49
- error_report("mach-virt: cannot model more than %dGB RAM",
156
+{
50
- LEGACY_RAMLIMIT_GB);
157
+ memset(ptr + off, 0, len);
51
- exit(1);
158
+}
52
- }
159
+
53
-
160
+static void clear_vertical_b(void *vptr, size_t off, size_t len)
54
if (vms->virt && kvm_enabled()) {
161
+{
55
error_report("mach-virt: KVM does not support providing "
162
+ for (size_t i = 0; i < len; ++i) {
56
"Virtualization extensions to the guest CPU");
163
+ *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0;
164
+ }
165
+}
166
+
167
+static void clear_vertical_h(void *vptr, size_t off, size_t len)
168
+{
169
+ for (size_t i = 0; i < len; i += 2) {
170
+ *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0;
171
+ }
172
+}
173
+
174
+static void clear_vertical_s(void *vptr, size_t off, size_t len)
175
+{
176
+ for (size_t i = 0; i < len; i += 4) {
177
+ *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0;
178
+ }
179
+}
180
+
181
+static void clear_vertical_d(void *vptr, size_t off, size_t len)
182
+{
183
+ for (size_t i = 0; i < len; i += 8) {
184
+ *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0;
185
+ }
186
+}
187
+
188
+static void clear_vertical_q(void *vptr, size_t off, size_t len)
189
+{
190
+ for (size_t i = 0; i < len; i += 16) {
191
+ memset(vptr + tile_vslice_offset(i + off), 0, 16);
192
+ }
193
+}
194
+
195
+/*
196
+ * Copy elements from an array into a tile slice comprising len bytes.
197
+ */
198
+
199
+typedef void CopyFn(void *dst, const void *src, size_t len);
200
+
201
+static void copy_horizontal(void *dst, const void *src, size_t len)
202
+{
203
+ memcpy(dst, src, len);
204
+}
205
+
206
+static void copy_vertical_b(void *vdst, const void *vsrc, size_t len)
207
+{
208
+ const uint8_t *src = vsrc;
209
+ uint8_t *dst = vdst;
210
+ size_t i;
211
+
212
+ for (i = 0; i < len; ++i) {
213
+ dst[tile_vslice_index(i)] = src[i];
214
+ }
215
+}
216
+
217
+static void copy_vertical_h(void *vdst, const void *vsrc, size_t len)
218
+{
219
+ const uint16_t *src = vsrc;
220
+ uint16_t *dst = vdst;
221
+ size_t i;
222
+
223
+ for (i = 0; i < len / 2; ++i) {
224
+ dst[tile_vslice_index(i)] = src[i];
225
+ }
226
+}
227
+
228
+static void copy_vertical_s(void *vdst, const void *vsrc, size_t len)
229
+{
230
+ const uint32_t *src = vsrc;
231
+ uint32_t *dst = vdst;
232
+ size_t i;
233
+
234
+ for (i = 0; i < len / 4; ++i) {
235
+ dst[tile_vslice_index(i)] = src[i];
236
+ }
237
+}
238
+
239
+static void copy_vertical_d(void *vdst, const void *vsrc, size_t len)
240
+{
241
+ const uint64_t *src = vsrc;
242
+ uint64_t *dst = vdst;
243
+ size_t i;
244
+
245
+ for (i = 0; i < len / 8; ++i) {
246
+ dst[tile_vslice_index(i)] = src[i];
247
+ }
248
+}
249
+
250
+static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
251
+{
252
+ for (size_t i = 0; i < len; i += 16) {
253
+ memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16);
254
+ }
255
+}
256
+
257
+/*
258
+ * Host and TLB primitives for vertical tile slice addressing.
259
+ */
260
+
261
+#define DO_LD(NAME, TYPE, HOST, TLB) \
262
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
263
+{ \
264
+ TYPE val = HOST(host); \
265
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
266
+} \
267
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
268
+ intptr_t off, target_ulong addr, uintptr_t ra) \
269
+{ \
270
+ TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \
271
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
272
+}
273
+
274
+#define DO_ST(NAME, TYPE, HOST, TLB) \
275
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
276
+{ \
277
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
278
+ HOST(host, val); \
279
+} \
280
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
281
+ intptr_t off, target_ulong addr, uintptr_t ra) \
282
+{ \
283
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
284
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
285
+}
286
+
287
+/*
288
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
289
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
290
+ * corresponds to storing the two 64-bit pieces in little-endian order.
291
+ */
292
+#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
293
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
294
+{ \
295
+ uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
296
+ uint64_t *ptr = za + off; \
297
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
298
+} \
299
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
300
+{ \
301
+ HNAME##_host(za, tile_vslice_offset(off), host); \
302
+} \
303
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
304
+ target_ulong addr, uintptr_t ra) \
305
+{ \
306
+ uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
307
+ uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
308
+ uint64_t *ptr = za + off; \
309
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
310
+} \
311
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
312
+ target_ulong addr, uintptr_t ra) \
313
+{ \
314
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
315
+}
316
+
317
+#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
318
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
319
+{ \
320
+ uint64_t *ptr = za + off; \
321
+ HOST(host, ptr[BE]); \
322
+ HOST(host + 1, ptr[!BE]); \
323
+} \
324
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
325
+{ \
326
+ HNAME##_host(za, tile_vslice_offset(off), host); \
327
+} \
328
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
329
+ target_ulong addr, uintptr_t ra) \
330
+{ \
331
+ uint64_t *ptr = za + off; \
332
+ TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
333
+ TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
334
+} \
335
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
336
+ target_ulong addr, uintptr_t ra) \
337
+{ \
338
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
339
+}
340
+
341
+DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra)
342
+DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra)
343
+DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra)
344
+DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra)
345
+DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
346
+DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
347
+DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
348
+
349
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
350
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
351
+
352
+DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
353
+DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
354
+DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra)
355
+DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra)
356
+DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
357
+DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
358
+DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
359
+
360
+DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
361
+DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
362
+
363
+#undef DO_LD
364
+#undef DO_ST
365
+#undef DO_LDQ
366
+#undef DO_STQ
367
+
368
+/*
369
+ * Common helper for all contiguous predicated loads.
370
+ */
371
+
372
+static inline QEMU_ALWAYS_INLINE
373
+void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
374
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
375
+ const int esz, uint32_t mtedesc, bool vertical,
376
+ sve_ldst1_host_fn *host_fn,
377
+ sve_ldst1_tlb_fn *tlb_fn,
378
+ ClearFn *clr_fn,
379
+ CopyFn *cpy_fn)
380
+{
381
+ const intptr_t reg_max = simd_oprsz(desc);
382
+ const intptr_t esize = 1 << esz;
383
+ intptr_t reg_off, reg_last;
384
+ SVEContLdSt info;
385
+ void *host;
386
+ int flags;
387
+
388
+ /* Find the active elements. */
389
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
390
+ /* The entire predicate was false; no load occurs. */
391
+ clr_fn(za, 0, reg_max);
392
+ return;
393
+ }
394
+
395
+ /* Probe the page(s). Exit with exception for any invalid page. */
396
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
397
+
398
+ /* Handle watchpoints for all active elements. */
399
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
400
+ BP_MEM_READ, ra);
401
+
402
+ /*
403
+ * Handle mte checks for all active elements.
404
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
405
+ */
406
+ if (mtedesc) {
407
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
408
+ mtedesc, ra);
409
+ }
410
+
411
+ flags = info.page[0].flags | info.page[1].flags;
412
+ if (unlikely(flags != 0)) {
413
+#ifdef CONFIG_USER_ONLY
414
+ g_assert_not_reached();
415
+#else
416
+ /*
417
+ * At least one page includes MMIO.
418
+ * Any bus operation can fail with cpu_transaction_failed,
419
+ * which for ARM will raise SyncExternal. Perform the load
420
+ * into scratch memory to preserve register state until the end.
421
+ */
422
+ ARMVectorReg scratch = { };
423
+
424
+ reg_off = info.reg_off_first[0];
425
+ reg_last = info.reg_off_last[1];
426
+ if (reg_last < 0) {
427
+ reg_last = info.reg_off_split;
428
+ if (reg_last < 0) {
429
+ reg_last = info.reg_off_last[0];
430
+ }
431
+ }
432
+
433
+ do {
434
+ uint64_t pg = vg[reg_off >> 6];
435
+ do {
436
+ if ((pg >> (reg_off & 63)) & 1) {
437
+ tlb_fn(env, &scratch, reg_off, addr + reg_off, ra);
438
+ }
439
+ reg_off += esize;
440
+ } while (reg_off & 63);
441
+ } while (reg_off <= reg_last);
442
+
443
+ cpy_fn(za, &scratch, reg_max);
444
+ return;
445
+#endif
446
+ }
447
+
448
+ /* The entire operation is in RAM, on valid pages. */
449
+
450
+ reg_off = info.reg_off_first[0];
451
+ reg_last = info.reg_off_last[0];
452
+ host = info.page[0].host;
453
+
454
+ if (!vertical) {
455
+ memset(za, 0, reg_max);
456
+ } else if (reg_off) {
457
+ clr_fn(za, 0, reg_off);
458
+ }
459
+
460
+ while (reg_off <= reg_last) {
461
+ uint64_t pg = vg[reg_off >> 6];
462
+ do {
463
+ if ((pg >> (reg_off & 63)) & 1) {
464
+ host_fn(za, reg_off, host + reg_off);
465
+ } else if (vertical) {
466
+ clr_fn(za, reg_off, esize);
467
+ }
468
+ reg_off += esize;
469
+ } while (reg_off <= reg_last && (reg_off & 63));
470
+ }
471
+
472
+ /*
473
+ * Use the slow path to manage the cross-page misalignment.
474
+ * But we know this is RAM and cannot trap.
475
+ */
476
+ reg_off = info.reg_off_split;
477
+ if (unlikely(reg_off >= 0)) {
478
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
479
+ }
480
+
481
+ reg_off = info.reg_off_first[1];
482
+ if (unlikely(reg_off >= 0)) {
483
+ reg_last = info.reg_off_last[1];
484
+ host = info.page[1].host;
485
+
486
+ do {
487
+ uint64_t pg = vg[reg_off >> 6];
488
+ do {
489
+ if ((pg >> (reg_off & 63)) & 1) {
490
+ host_fn(za, reg_off, host + reg_off);
491
+ } else if (vertical) {
492
+ clr_fn(za, reg_off, esize);
493
+ }
494
+ reg_off += esize;
495
+ } while (reg_off & 63);
496
+ } while (reg_off <= reg_last);
497
+ }
498
+}
499
+
500
+static inline QEMU_ALWAYS_INLINE
501
+void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
502
+ target_ulong addr, uint32_t desc, uintptr_t ra,
503
+ const int esz, bool vertical,
504
+ sve_ldst1_host_fn *host_fn,
505
+ sve_ldst1_tlb_fn *tlb_fn,
506
+ ClearFn *clr_fn,
507
+ CopyFn *cpy_fn)
508
+{
509
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
510
+ int bit55 = extract64(addr, 55, 1);
511
+
512
+ /* Remove mtedesc from the normal sve descriptor. */
513
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
514
+
515
+ /* Perform gross MTE suppression early. */
516
+ if (!tbi_check(desc, bit55) ||
517
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
518
+ mtedesc = 0;
519
+ }
520
+
521
+ sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical,
522
+ host_fn, tlb_fn, clr_fn, cpy_fn);
523
+}
524
+
525
+#define DO_LD(L, END, ESZ) \
526
+void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
527
+ target_ulong addr, uint32_t desc) \
528
+{ \
529
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
530
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
531
+ clear_horizontal, copy_horizontal); \
532
+} \
533
+void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
534
+ target_ulong addr, uint32_t desc) \
535
+{ \
536
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
537
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
538
+ clear_vertical_##L, copy_vertical_##L); \
539
+} \
540
+void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
541
+ target_ulong addr, uint32_t desc) \
542
+{ \
543
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
544
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
545
+ clear_horizontal, copy_horizontal); \
546
+} \
547
+void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
548
+ target_ulong addr, uint32_t desc) \
549
+{ \
550
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
551
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
552
+ clear_vertical_##L, copy_vertical_##L); \
553
+}
554
+
555
+DO_LD(b, , MO_8)
556
+DO_LD(h, _be, MO_16)
557
+DO_LD(h, _le, MO_16)
558
+DO_LD(s, _be, MO_32)
559
+DO_LD(s, _le, MO_32)
560
+DO_LD(d, _be, MO_64)
561
+DO_LD(d, _le, MO_64)
562
+DO_LD(q, _be, MO_128)
563
+DO_LD(q, _le, MO_128)
564
+
565
+#undef DO_LD
566
+
567
+/*
568
+ * Common helper for all contiguous predicated stores.
569
+ */
570
+
571
+static inline QEMU_ALWAYS_INLINE
572
+void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
573
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
574
+ const int esz, uint32_t mtedesc, bool vertical,
575
+ sve_ldst1_host_fn *host_fn,
576
+ sve_ldst1_tlb_fn *tlb_fn)
577
+{
578
+ const intptr_t reg_max = simd_oprsz(desc);
579
+ const intptr_t esize = 1 << esz;
580
+ intptr_t reg_off, reg_last;
581
+ SVEContLdSt info;
582
+ void *host;
583
+ int flags;
584
+
585
+ /* Find the active elements. */
586
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
587
+ /* The entire predicate was false; no store occurs. */
588
+ return;
589
+ }
590
+
591
+ /* Probe the page(s). Exit with exception for any invalid page. */
592
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
593
+
594
+ /* Handle watchpoints for all active elements. */
595
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
596
+ BP_MEM_WRITE, ra);
597
+
598
+ /*
599
+ * Handle mte checks for all active elements.
600
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
601
+ */
602
+ if (mtedesc) {
603
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
604
+ mtedesc, ra);
605
+ }
606
+
607
+ flags = info.page[0].flags | info.page[1].flags;
608
+ if (unlikely(flags != 0)) {
609
+#ifdef CONFIG_USER_ONLY
610
+ g_assert_not_reached();
611
+#else
612
+ /*
613
+ * At least one page includes MMIO.
614
+ * Any bus operation can fail with cpu_transaction_failed,
615
+ * which for ARM will raise SyncExternal. We cannot avoid
616
+ * this fault and will leave with the store incomplete.
617
+ */
618
+ reg_off = info.reg_off_first[0];
619
+ reg_last = info.reg_off_last[1];
620
+ if (reg_last < 0) {
621
+ reg_last = info.reg_off_split;
622
+ if (reg_last < 0) {
623
+ reg_last = info.reg_off_last[0];
624
+ }
625
+ }
626
+
627
+ do {
628
+ uint64_t pg = vg[reg_off >> 6];
629
+ do {
630
+ if ((pg >> (reg_off & 63)) & 1) {
631
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
632
+ }
633
+ reg_off += esize;
634
+ } while (reg_off & 63);
635
+ } while (reg_off <= reg_last);
636
+ return;
637
+#endif
638
+ }
639
+
640
+ reg_off = info.reg_off_first[0];
641
+ reg_last = info.reg_off_last[0];
642
+ host = info.page[0].host;
643
+
644
+ while (reg_off <= reg_last) {
645
+ uint64_t pg = vg[reg_off >> 6];
646
+ do {
647
+ if ((pg >> (reg_off & 63)) & 1) {
648
+ host_fn(za, reg_off, host + reg_off);
649
+ }
650
+ reg_off += 1 << esz;
651
+ } while (reg_off <= reg_last && (reg_off & 63));
652
+ }
653
+
654
+ /*
655
+ * Use the slow path to manage the cross-page misalignment.
656
+ * But we know this is RAM and cannot trap.
657
+ */
658
+ reg_off = info.reg_off_split;
659
+ if (unlikely(reg_off >= 0)) {
660
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
661
+ }
662
+
663
+ reg_off = info.reg_off_first[1];
664
+ if (unlikely(reg_off >= 0)) {
665
+ reg_last = info.reg_off_last[1];
666
+ host = info.page[1].host;
667
+
668
+ do {
669
+ uint64_t pg = vg[reg_off >> 6];
670
+ do {
671
+ if ((pg >> (reg_off & 63)) & 1) {
672
+ host_fn(za, reg_off, host + reg_off);
673
+ }
674
+ reg_off += 1 << esz;
675
+ } while (reg_off & 63);
676
+ } while (reg_off <= reg_last);
677
+ }
678
+}
679
+
680
+static inline QEMU_ALWAYS_INLINE
681
+void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
682
+ uint32_t desc, uintptr_t ra, int esz, bool vertical,
683
+ sve_ldst1_host_fn *host_fn,
684
+ sve_ldst1_tlb_fn *tlb_fn)
685
+{
686
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
687
+ int bit55 = extract64(addr, 55, 1);
688
+
689
+ /* Remove mtedesc from the normal sve descriptor. */
690
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
691
+
692
+ /* Perform gross MTE suppression early. */
693
+ if (!tbi_check(desc, bit55) ||
694
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
695
+ mtedesc = 0;
696
+ }
697
+
698
+ sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc,
699
+ vertical, host_fn, tlb_fn);
700
+}
701
+
702
+#define DO_ST(L, END, ESZ) \
703
+void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
704
+ target_ulong addr, uint32_t desc) \
705
+{ \
706
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
707
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
708
+} \
709
+void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
710
+ target_ulong addr, uint32_t desc) \
711
+{ \
712
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
713
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
714
+} \
715
+void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
716
+ target_ulong addr, uint32_t desc) \
717
+{ \
718
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
719
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
720
+} \
721
+void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
722
+ target_ulong addr, uint32_t desc) \
723
+{ \
724
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
725
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
726
+}
727
+
728
+DO_ST(b, , MO_8)
729
+DO_ST(h, _be, MO_16)
730
+DO_ST(h, _le, MO_16)
731
+DO_ST(s, _be, MO_32)
732
+DO_ST(s, _le, MO_32)
733
+DO_ST(d, _be, MO_64)
734
+DO_ST(d, _le, MO_64)
735
+DO_ST(q, _be, MO_128)
736
+DO_ST(q, _le, MO_128)
737
+
738
+#undef DO_ST
739
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
740
index XXXXXXX..XXXXXXX 100644
741
--- a/target/arm/translate-sme.c
742
+++ b/target/arm/translate-sme.c
743
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
744
745
return true;
746
}
747
+
748
+static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
749
+{
750
+ typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
751
+
752
+ /*
753
+ * Indexed by [esz][be][v][mte][st], which is (except for load/store)
754
+ * also the order in which the elements appear in the function names,
755
+ * and so how we must concatenate the pieces.
756
+ */
757
+
758
+#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F }
759
+#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) }
760
+#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) }
761
+#define FN_END(L, B) { FN_HV(L), FN_HV(B) }
762
+
763
+ static GenLdSt1 * const fns[5][2][2][2][2] = {
764
+ FN_END(b, b),
765
+ FN_END(h_le, h_be),
766
+ FN_END(s_le, s_be),
767
+ FN_END(d_le, d_be),
768
+ FN_END(q_le, q_be),
769
+ };
770
+
771
+#undef FN_LS
772
+#undef FN_MTE
773
+#undef FN_HV
774
+#undef FN_END
775
+
776
+ TCGv_ptr t_za, t_pg;
777
+ TCGv_i64 addr;
778
+ int svl, desc = 0;
779
+ bool be = s->be_data == MO_BE;
780
+ bool mte = s->mte_active[0];
781
+
782
+ if (!dc_isar_feature(aa64_sme, s)) {
783
+ return false;
784
+ }
785
+ if (!sme_smza_enabled_check(s)) {
786
+ return true;
787
+ }
788
+
789
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
790
+ t_pg = pred_full_reg_ptr(s, a->pg);
791
+ addr = tcg_temp_new_i64();
792
+
793
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
794
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
795
+
796
+ if (mte) {
797
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
798
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
799
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
800
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
801
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
802
+ desc <<= SVE_MTEDESC_SHIFT;
803
+ } else {
804
+ addr = clean_data_tbi(s, addr);
805
+ }
806
+ svl = streaming_vec_reg_size(s);
807
+ desc = simd_desc(svl, svl, desc);
808
+
809
+ fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
810
+ tcg_constant_i32(desc));
811
+
812
+ tcg_temp_free_ptr(t_za);
813
+ tcg_temp_free_ptr(t_pg);
814
+ tcg_temp_free_i64(addr);
815
+ return true;
816
+}
57
--
817
--
58
2.20.1
818
2.25.1
59
60
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
On ARM, the kvm_type will be resolved by querying the KVMState.
3
Add a TCGv_ptr base argument, which will be cpu_env for SVE.
4
Let's add the MachineState handle to the callback so that we
4
We will reuse this for SME save and restore array insns.
5
can retrieve the KVMState handle. in kvm_init, when the callback
6
is called, the kvm_state variable is not yet set.
7
5
8
Signed-off-by: Eric Auger <eric.auger@redhat.com>
9
Acked-by: David Gibson <david@gibson.dropbear.id.au>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20190304101339.25970-5-eric.auger@redhat.com
8
Message-id: 20220708151540.18136-22-richard.henderson@linaro.org
13
[ppc parts]
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
10
---
18
include/hw/boards.h | 5 ++++-
11
target/arm/translate-a64.h | 3 +++
19
accel/kvm/kvm-all.c | 2 +-
12
target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++----------
20
hw/ppc/mac_newworld.c | 3 +--
13
2 files changed, 39 insertions(+), 12 deletions(-)
21
hw/ppc/mac_oldworld.c | 2 +-
22
hw/ppc/spapr.c | 2 +-
23
5 files changed, 8 insertions(+), 6 deletions(-)
24
14
25
diff --git a/include/hw/boards.h b/include/hw/boards.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
26
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/boards.h
17
--- a/target/arm/translate-a64.h
28
+++ b/include/hw/boards.h
18
+++ b/target/arm/translate-a64.h
29
@@ -XXX,XX +XXX,XX @@ typedef struct {
19
@@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
30
* should instead use "unimplemented-device" for all memory ranges where
20
uint32_t rm_ofs, int64_t shift,
31
* the guest will attempt to probe for a device that QEMU doesn't
21
uint32_t opr_sz, uint32_t max_sz);
32
* implement and a stub device is required.
22
33
+ * @kvm_type:
23
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
34
+ * Return the type of KVM corresponding to the kvm-type string option or
24
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
35
+ * computed based on other criteria such as the host kernel capabilities.
25
+
26
#endif /* TARGET_ARM_TRANSLATE_A64_H */
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
32
* The load should begin at the address Rn + IMM.
36
*/
33
*/
37
struct MachineClass {
34
38
/*< private >*/
35
-static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
39
@@ -XXX,XX +XXX,XX @@ struct MachineClass {
36
+void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
40
void (*init)(MachineState *state);
37
+ int len, int rn, int imm)
41
void (*reset)(void);
42
void (*hot_add_cpu)(const int64_t id, Error **errp);
43
- int (*kvm_type)(const char *arg);
44
+ int (*kvm_type)(MachineState *machine, const char *arg);
45
46
BlockInterfaceType block_default_type;
47
int units_per_default_bus;
48
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/accel/kvm/kvm-all.c
51
+++ b/accel/kvm/kvm-all.c
52
@@ -XXX,XX +XXX,XX @@ static int kvm_init(MachineState *ms)
53
54
kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
55
if (mc->kvm_type) {
56
- type = mc->kvm_type(kvm_type);
57
+ type = mc->kvm_type(ms, kvm_type);
58
} else if (kvm_type) {
59
ret = -EINVAL;
60
fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
61
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/hw/ppc/mac_newworld.c
64
+++ b/hw/ppc/mac_newworld.c
65
@@ -XXX,XX +XXX,XX @@ static char *core99_fw_dev_path(FWPathProvider *p, BusState *bus,
66
67
return NULL;
68
}
69
-
70
-static int core99_kvm_type(const char *arg)
71
+static int core99_kvm_type(MachineState *machine, const char *arg)
72
{
38
{
73
/* Always force PR KVM */
39
int len_align = QEMU_ALIGN_DOWN(len, 8);
74
return 2;
40
int len_remain = len % 8;
75
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
41
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
76
index XXXXXXX..XXXXXXX 100644
42
t0 = tcg_temp_new_i64();
77
--- a/hw/ppc/mac_oldworld.c
43
for (i = 0; i < len_align; i += 8) {
78
+++ b/hw/ppc/mac_oldworld.c
44
tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
79
@@ -XXX,XX +XXX,XX @@ static char *heathrow_fw_dev_path(FWPathProvider *p, BusState *bus,
45
- tcg_gen_st_i64(t0, cpu_env, vofs + i);
80
return NULL;
46
+ tcg_gen_st_i64(t0, base, vofs + i);
81
}
47
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
82
48
}
83
-static int heathrow_kvm_type(const char *arg)
49
tcg_temp_free_i64(t0);
84
+static int heathrow_kvm_type(MachineState *machine, const char *arg)
50
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
85
{
51
clean_addr = new_tmp_a64_local(s);
86
/* Always force PR KVM */
52
tcg_gen_mov_i64(clean_addr, t0);
87
return 2;
53
88
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
54
+ if (base != cpu_env) {
89
index XXXXXXX..XXXXXXX 100644
55
+ TCGv_ptr b = tcg_temp_local_new_ptr();
90
--- a/hw/ppc/spapr.c
56
+ tcg_gen_mov_ptr(b, base);
91
+++ b/hw/ppc/spapr.c
57
+ base = b;
92
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_init(MachineState *machine)
58
+ }
59
+
60
gen_set_label(loop);
61
62
t0 = tcg_temp_new_i64();
63
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
64
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
65
66
tp = tcg_temp_new_ptr();
67
- tcg_gen_add_ptr(tp, cpu_env, i);
68
+ tcg_gen_add_ptr(tp, base, i);
69
tcg_gen_addi_ptr(i, i, 8);
70
tcg_gen_st_i64(t0, tp, vofs);
71
tcg_temp_free_ptr(tp);
72
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
73
74
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
75
tcg_temp_free_ptr(i);
76
+
77
+ if (base != cpu_env) {
78
+ tcg_temp_free_ptr(base);
79
+ assert(len_remain == 0);
80
+ }
81
}
82
83
/*
84
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
85
default:
86
g_assert_not_reached();
87
}
88
- tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
89
+ tcg_gen_st_i64(t0, base, vofs + len_align);
90
tcg_temp_free_i64(t0);
93
}
91
}
94
}
92
}
95
93
96
-static int spapr_kvm_type(const char *vm_type)
94
/* Similarly for stores. */
97
+static int spapr_kvm_type(MachineState *machine, const char *vm_type)
95
-static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
96
+void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
97
+ int len, int rn, int imm)
98
{
98
{
99
if (!vm_type) {
99
int len_align = QEMU_ALIGN_DOWN(len, 8);
100
return 0;
100
int len_remain = len % 8;
101
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
102
103
t0 = tcg_temp_new_i64();
104
for (i = 0; i < len_align; i += 8) {
105
- tcg_gen_ld_i64(t0, cpu_env, vofs + i);
106
+ tcg_gen_ld_i64(t0, base, vofs + i);
107
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
108
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
111
clean_addr = new_tmp_a64_local(s);
112
tcg_gen_mov_i64(clean_addr, t0);
113
114
+ if (base != cpu_env) {
115
+ TCGv_ptr b = tcg_temp_local_new_ptr();
116
+ tcg_gen_mov_ptr(b, base);
117
+ base = b;
118
+ }
119
+
120
gen_set_label(loop);
121
122
t0 = tcg_temp_new_i64();
123
tp = tcg_temp_new_ptr();
124
- tcg_gen_add_ptr(tp, cpu_env, i);
125
+ tcg_gen_add_ptr(tp, base, i);
126
tcg_gen_ld_i64(t0, tp, vofs);
127
tcg_gen_addi_ptr(i, i, 8);
128
tcg_temp_free_ptr(tp);
129
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
130
131
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
132
tcg_temp_free_ptr(i);
133
+
134
+ if (base != cpu_env) {
135
+ tcg_temp_free_ptr(base);
136
+ assert(len_remain == 0);
137
+ }
138
}
139
140
/* Predicate register stores can be any multiple of 2. */
141
if (len_remain) {
142
t0 = tcg_temp_new_i64();
143
- tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
144
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
145
146
switch (len_remain) {
147
case 2:
148
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
149
if (sve_access_check(s)) {
150
int size = vec_full_reg_size(s);
151
int off = vec_full_reg_offset(s, a->rd);
152
- do_ldr(s, off, size, a->rn, a->imm * size);
153
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
154
}
155
return true;
156
}
157
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
158
if (sve_access_check(s)) {
159
int size = pred_full_reg_size(s);
160
int off = pred_full_reg_offset(s, a->rd);
161
- do_ldr(s, off, size, a->rn, a->imm * size);
162
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
163
}
164
return true;
165
}
166
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
167
if (sve_access_check(s)) {
168
int size = vec_full_reg_size(s);
169
int off = vec_full_reg_offset(s, a->rd);
170
- do_str(s, off, size, a->rn, a->imm * size);
171
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
172
}
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
176
if (sve_access_check(s)) {
177
int size = pred_full_reg_size(s);
178
int off = pred_full_reg_offset(s, a->rd);
179
- do_str(s, off, size, a->rn, a->imm * size);
180
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
181
}
182
return true;
183
}
101
--
184
--
102
2.20.1
185
2.25.1
103
104
diff view generated by jsdifflib
1
From: Michel Heily <michelheily@gmail.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Implement the watchdog timer for the stellaris boards.
3
We can reuse the SVE functions for LDR and STR, passing in the
4
This device is a close variant of the CMSDK APB watchdog
4
base of the ZA vector and a zero offset.
5
device, so we can model it by subclassing that device and
6
tweaking the behaviour of some of its registers.
7
5
8
Signed-off-by: Michel Heily <michelheily@gmail.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <petser.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
[PMM: rewrote commit message, fixed a few checkpatch nits,
8
Message-id: 20220708151540.18136-23-richard.henderson@linaro.org
11
added comment giving the URL of the spec for the Stellaris
12
variant of the watchdog device]
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
10
---
15
include/hw/watchdog/cmsdk-apb-watchdog.h | 8 +++
11
target/arm/sme.decode | 7 +++++++
16
hw/arm/stellaris.c | 22 ++++++-
12
target/arm/translate-sme.c | 24 ++++++++++++++++++++++++
17
hw/watchdog/cmsdk-apb-watchdog.c | 74 +++++++++++++++++++++++-
13
2 files changed, 31 insertions(+)
18
3 files changed, 100 insertions(+), 4 deletions(-)
19
14
20
diff --git a/include/hw/watchdog/cmsdk-apb-watchdog.h b/include/hw/watchdog/cmsdk-apb-watchdog.h
15
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/watchdog/cmsdk-apb-watchdog.h
17
--- a/target/arm/sme.decode
23
+++ b/include/hw/watchdog/cmsdk-apb-watchdog.h
18
+++ b/target/arm/sme.decode
24
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
25
#define CMSDK_APB_WATCHDOG(obj) OBJECT_CHECK(CMSDKAPBWatchdog, (obj), \
20
&ldst rs=%mova_rs
26
TYPE_CMSDK_APB_WATCHDOG)
21
LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
27
22
&ldst esz=4 rs=%mova_rs
28
+/*
29
+ * This shares the same struct (and cast macro) as the base
30
+ * cmsdk-apb-watchdog device.
31
+ */
32
+#define TYPE_LUMINARY_WATCHDOG "luminary-watchdog"
33
+
23
+
34
typedef struct CMSDKAPBWatchdog {
24
+&ldstr rv rn imm
35
/*< private >*/
25
+@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
36
SysBusDevice parent_obj;
26
+ &ldstr rv=%mova_rs
37
@@ -XXX,XX +XXX,XX @@ typedef struct CMSDKAPBWatchdog {
27
+
38
MemoryRegion iomem;
28
+LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
39
qemu_irq wdogint;
29
+STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
40
uint32_t wdogclk_frq;
30
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
41
+ bool is_luminary;
42
struct ptimer_state *timer;
43
44
uint32_t control;
45
@@ -XXX,XX +XXX,XX @@ typedef struct CMSDKAPBWatchdog {
46
uint32_t itcr;
47
uint32_t itop;
48
uint32_t resetstatus;
49
+ const uint32_t *id;
50
} CMSDKAPBWatchdog;
51
52
#endif
53
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
54
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/arm/stellaris.c
32
--- a/target/arm/translate-sme.c
56
+++ b/hw/arm/stellaris.c
33
+++ b/target/arm/translate-sme.c
57
@@ -XXX,XX +XXX,XX @@
34
@@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
58
#include "sysemu/sysemu.h"
35
tcg_temp_free_i64(addr);
59
#include "hw/arm/armv7m.h"
36
return true;
60
#include "hw/char/pl011.h"
37
}
61
+#include "hw/watchdog/cmsdk-apb-watchdog.h"
62
#include "hw/misc/unimp.h"
63
#include "cpu.h"
64
65
@@ -XXX,XX +XXX,XX @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
66
* Stellaris LM3S6965 Microcontroller Data Sheet (rev I)
67
* http://www.ti.com/lit/ds/symlink/lm3s6965.pdf
68
*
69
- * 40000000 wdtimer (unimplemented)
70
+ * 40000000 wdtimer
71
* 40002000 i2c (unimplemented)
72
* 40004000 GPIO
73
* 40005000 GPIO
74
@@ -XXX,XX +XXX,XX @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
75
stellaris_sys_init(0x400fe000, qdev_get_gpio_in(nvic, 28),
76
board, nd_table[0].macaddr.a);
77
78
+
38
+
79
+ if (board->dc1 & (1 << 3)) { /* watchdog present */
39
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
80
+ dev = qdev_create(NULL, TYPE_LUMINARY_WATCHDOG);
81
+
40
+
82
+ /* system_clock_scale is valid now */
41
+static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
83
+ uint32_t mainclk = NANOSECONDS_PER_SECOND / system_clock_scale;
42
+{
84
+ qdev_prop_set_uint32(dev, "wdogclk-frq", mainclk);
43
+ int svl = streaming_vec_reg_size(s);
44
+ int imm = a->imm;
45
+ TCGv_ptr base;
85
+
46
+
86
+ qdev_init_nofail(dev);
47
+ if (!sme_za_enabled_check(s)) {
87
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev),
48
+ return true;
88
+ 0,
89
+ 0x40000000u);
90
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev),
91
+ 0,
92
+ qdev_get_gpio_in(nvic, 18));
93
+ }
49
+ }
94
+
50
+
51
+ /* ZA[n] equates to ZA0H.B[n]. */
52
+ base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
95
+
53
+
96
for (i = 0; i < 7; i++) {
54
+ fn(s, base, 0, svl, a->rn, imm * svl);
97
if (board->dc4 & (1 << i)) {
98
gpio_dev[i] = sysbus_create_simple("pl061_luminary", gpio_addr[i],
99
@@ -XXX,XX +XXX,XX @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
100
/* Add dummy regions for the devices we don't implement yet,
101
* so guest accesses don't cause unlogged crashes.
102
*/
103
- create_unimplemented_device("wdtimer", 0x40000000, 0x1000);
104
create_unimplemented_device("i2c-0", 0x40002000, 0x1000);
105
create_unimplemented_device("i2c-2", 0x40021000, 0x1000);
106
create_unimplemented_device("PWM", 0x40028000, 0x1000);
107
diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/hw/watchdog/cmsdk-apb-watchdog.c
110
+++ b/hw/watchdog/cmsdk-apb-watchdog.c
111
@@ -XXX,XX +XXX,XX @@
112
* System Design Kit (CMSDK) and documented in the Cortex-M System
113
* Design Kit Technical Reference Manual (ARM DDI0479C):
114
* https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit
115
+ *
116
+ * We also support the variant of this device found in the TI
117
+ * Stellaris/Luminary boards and documented in:
118
+ * http://www.ti.com/lit/ds/symlink/lm3s6965.pdf
119
*/
120
121
#include "qemu/osdep.h"
122
@@ -XXX,XX +XXX,XX @@ REG32(WDOGINTCLR, 0xc)
123
REG32(WDOGRIS, 0x10)
124
FIELD(WDOGRIS, INT, 0, 1)
125
REG32(WDOGMIS, 0x14)
126
+REG32(WDOGTEST, 0x418) /* only in Stellaris/Luminary version of the device */
127
REG32(WDOGLOCK, 0xc00)
128
#define WDOG_UNLOCK_VALUE 0x1ACCE551
129
REG32(WDOGITCR, 0xf00)
130
@@ -XXX,XX +XXX,XX @@ REG32(CID2, 0xff8)
131
REG32(CID3, 0xffc)
132
133
/* PID/CID values */
134
-static const int watchdog_id[] = {
135
+static const uint32_t cmsdk_apb_watchdog_id[] = {
136
0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */
137
0x24, 0xb8, 0x1b, 0x00, /* PID0..PID3 */
138
0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
139
};
140
141
+static const uint32_t luminary_watchdog_id[] = {
142
+ 0x00, 0x00, 0x00, 0x00, /* PID4..PID7 */
143
+ 0x05, 0x18, 0x18, 0x01, /* PID0..PID3 */
144
+ 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */
145
+};
146
+
55
+
147
static bool cmsdk_apb_watchdog_intstatus(CMSDKAPBWatchdog *s)
56
+ tcg_temp_free_ptr(base);
148
{
57
+ return true;
149
/* Return masked interrupt status */
150
@@ -XXX,XX +XXX,XX @@ static void cmsdk_apb_watchdog_update(CMSDKAPBWatchdog *s)
151
bool wdogres;
152
153
if (s->itcr) {
154
+ /*
155
+ * Not checking that !s->is_luminary since s->itcr can't be written
156
+ * when s->is_luminary in the first place.
157
+ */
158
wdogint = s->itop & R_WDOGITOP_WDOGINT_MASK;
159
wdogres = s->itop & R_WDOGITOP_WDOGRES_MASK;
160
} else {
161
@@ -XXX,XX +XXX,XX @@ static uint64_t cmsdk_apb_watchdog_read(void *opaque, hwaddr offset,
162
r = s->lock;
163
break;
164
case A_WDOGITCR:
165
+ if (s->is_luminary) {
166
+ goto bad_offset;
167
+ }
168
r = s->itcr;
169
break;
170
case A_PID4 ... A_CID3:
171
- r = watchdog_id[(offset - A_PID4) / 4];
172
+ r = s->id[(offset - A_PID4) / 4];
173
break;
174
case A_WDOGINTCLR:
175
case A_WDOGITOP:
176
+ if (s->is_luminary) {
177
+ goto bad_offset;
178
+ }
179
qemu_log_mask(LOG_GUEST_ERROR,
180
"CMSDK APB watchdog read: read of WO offset %x\n",
181
(int)offset);
182
r = 0;
183
break;
184
+ case A_WDOGTEST:
185
+ if (!s->is_luminary) {
186
+ goto bad_offset;
187
+ }
188
+ qemu_log_mask(LOG_UNIMP,
189
+ "Luminary watchdog read: stall not implemented\n");
190
+ r = 0;
191
+ break;
192
default:
193
+bad_offset:
194
qemu_log_mask(LOG_GUEST_ERROR,
195
"CMSDK APB watchdog read: bad offset %x\n", (int)offset);
196
r = 0;
197
@@ -XXX,XX +XXX,XX @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset,
198
ptimer_run(s->timer, 0);
199
break;
200
case A_WDOGCONTROL:
201
+ if (s->is_luminary && 0 != (R_WDOGCONTROL_INTEN_MASK & s->control)) {
202
+ /*
203
+ * The Luminary version of this device ignores writes to
204
+ * this register after the guest has enabled interrupts
205
+ * (so they can only be disabled again via reset).
206
+ */
207
+ break;
208
+ }
209
s->control = value & R_WDOGCONTROL_VALID_MASK;
210
cmsdk_apb_watchdog_update(s);
211
break;
212
@@ -XXX,XX +XXX,XX @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset,
213
s->lock = (value != WDOG_UNLOCK_VALUE);
214
break;
215
case A_WDOGITCR:
216
+ if (s->is_luminary) {
217
+ goto bad_offset;
218
+ }
219
s->itcr = value & R_WDOGITCR_VALID_MASK;
220
cmsdk_apb_watchdog_update(s);
221
break;
222
case A_WDOGITOP:
223
+ if (s->is_luminary) {
224
+ goto bad_offset;
225
+ }
226
s->itop = value & R_WDOGITOP_VALID_MASK;
227
cmsdk_apb_watchdog_update(s);
228
break;
229
@@ -XXX,XX +XXX,XX @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset,
230
"CMSDK APB watchdog write: write to RO offset 0x%x\n",
231
(int)offset);
232
break;
233
+ case A_WDOGTEST:
234
+ if (!s->is_luminary) {
235
+ goto bad_offset;
236
+ }
237
+ qemu_log_mask(LOG_UNIMP,
238
+ "Luminary watchdog write: stall not implemented\n");
239
+ break;
240
default:
241
+bad_offset:
242
qemu_log_mask(LOG_GUEST_ERROR,
243
"CMSDK APB watchdog write: bad offset 0x%x\n",
244
(int)offset);
245
@@ -XXX,XX +XXX,XX @@ static void cmsdk_apb_watchdog_init(Object *obj)
246
s, "cmsdk-apb-watchdog", 0x1000);
247
sysbus_init_mmio(sbd, &s->iomem);
248
sysbus_init_irq(sbd, &s->wdogint);
249
+
250
+ s->is_luminary = false;
251
+ s->id = cmsdk_apb_watchdog_id;
252
}
253
254
static void cmsdk_apb_watchdog_realize(DeviceState *dev, Error **errp)
255
@@ -XXX,XX +XXX,XX @@ static const TypeInfo cmsdk_apb_watchdog_info = {
256
.class_init = cmsdk_apb_watchdog_class_init,
257
};
258
259
+static void luminary_watchdog_init(Object *obj)
260
+{
261
+ CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(obj);
262
+
263
+ s->is_luminary = true;
264
+ s->id = luminary_watchdog_id;
265
+}
58
+}
266
+
59
+
267
+static const TypeInfo luminary_watchdog_info = {
60
+TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
268
+ .name = TYPE_LUMINARY_WATCHDOG,
61
+TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
269
+ .parent = TYPE_CMSDK_APB_WATCHDOG,
270
+ .instance_init = luminary_watchdog_init
271
+};
272
+
273
static void cmsdk_apb_watchdog_register_types(void)
274
{
275
type_register_static(&cmsdk_apb_watchdog_info);
276
+ type_register_static(&luminary_watchdog_info);
277
}
278
279
type_init(cmsdk_apb_watchdog_register_types);
280
--
62
--
281
2.20.1
63
2.25.1
282
283
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We are about to allow the memory map to grow beyond 1TB and
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
potentially overshoot the VCPU AA64MMFR0.PARANGE.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
5
Message-id: 20220708151540.18136-24-richard.henderson@linaro.org
6
In aarch64 mode and when highmem is set, let's check the VCPU
7
PA range is sufficient to address the highest GPA of the memory
8
map.
9
10
Signed-off-by: Eric Auger <eric.auger@redhat.com>
11
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
12
Message-id: 20190304101339.25970-10-eric.auger@redhat.com
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
7
---
15
hw/arm/virt.c | 17 +++++++++++++++++
8
target/arm/helper-sme.h | 5 +++
16
1 file changed, 17 insertions(+)
9
target/arm/sme.decode | 11 +++++
10
target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 31 +++++++++++++
12
4 files changed, 137 insertions(+)
17
13
18
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/arm/virt.c
16
--- a/target/arm/helper-sme.h
21
+++ b/hw/arm/virt.c
17
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i
23
#include "standard-headers/linux/input.h"
19
DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
24
#include "hw/arm/smmuv3.h"
20
DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
25
#include "hw/acpi/acpi.h"
21
DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
26
+#include "target/arm/internals.h"
27
28
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
29
static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
30
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
31
fdt_add_timer_nodes(vms);
32
fdt_add_cpu_nodes(vms);
33
34
+ if (!kvm_enabled()) {
35
+ ARMCPU *cpu = ARM_CPU(first_cpu);
36
+ bool aarch64 = object_property_get_bool(OBJECT(cpu), "aarch64", NULL);
37
+
22
+
38
+ if (aarch64 && vms->highmem) {
23
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
39
+ int requested_pa_size, pamax = arm_pamax(cpu);
24
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
26
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
32
33
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
34
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
40
+
35
+
41
+ requested_pa_size = 64 - clz64(vms->highest_gpa);
36
+### SME Add Vector to Array
42
+ if (pamax < requested_pa_size) {
37
+
43
+ error_report("VCPU supports less PA bits (%d) than requested "
38
+&adda zad zn pm pn
44
+ "by the memory map (%d)", pamax, requested_pa_size);
39
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
45
+ exit(1);
40
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
41
+
42
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
43
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
44
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
45
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
46
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sme_helper.c
49
+++ b/target/arm/sme_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128)
51
DO_ST(q, _le, MO_128)
52
53
#undef DO_ST
54
+
55
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
56
+ void *vpm, uint32_t desc)
57
+{
58
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
59
+ uint64_t *pn = vpn, *pm = vpm;
60
+ uint32_t *zda = vzda, *zn = vzn;
61
+
62
+ for (row = 0; row < oprsz; ) {
63
+ uint64_t pa = pn[row >> 4];
64
+ do {
65
+ if (pa & 1) {
66
+ for (col = 0; col < oprsz; ) {
67
+ uint64_t pb = pm[col >> 4];
68
+ do {
69
+ if (pb & 1) {
70
+ zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)];
71
+ }
72
+ pb >>= 4;
73
+ } while (++col & 15);
74
+ }
75
+ }
76
+ pa >>= 4;
77
+ } while (++row & 15);
78
+ }
79
+}
80
+
81
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
82
+ void *vpm, uint32_t desc)
83
+{
84
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
85
+ uint8_t *pn = vpn, *pm = vpm;
86
+ uint64_t *zda = vzda, *zn = vzn;
87
+
88
+ for (row = 0; row < oprsz; ++row) {
89
+ if (pn[H1(row)] & 1) {
90
+ for (col = 0; col < oprsz; ++col) {
91
+ if (pm[H1(col)] & 1) {
92
+ zda[tile_vslice_index(row) + col] += zn[col];
93
+ }
46
+ }
94
+ }
47
+ }
95
+ }
48
+ }
96
+ }
97
+}
49
+
98
+
50
memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram",
99
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
51
machine->ram_size);
100
+ void *vpm, uint32_t desc)
52
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, ram);
101
+{
102
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
103
+ uint64_t *pn = vpn, *pm = vpm;
104
+ uint32_t *zda = vzda, *zn = vzn;
105
+
106
+ for (row = 0; row < oprsz; ) {
107
+ uint64_t pa = pn[row >> 4];
108
+ do {
109
+ if (pa & 1) {
110
+ uint32_t zn_row = zn[H4(row)];
111
+ for (col = 0; col < oprsz; ) {
112
+ uint64_t pb = pm[col >> 4];
113
+ do {
114
+ if (pb & 1) {
115
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
116
+ }
117
+ pb >>= 4;
118
+ } while (++col & 15);
119
+ }
120
+ }
121
+ pa >>= 4;
122
+ } while (++row & 15);
123
+ }
124
+}
125
+
126
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
127
+ void *vpm, uint32_t desc)
128
+{
129
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
130
+ uint8_t *pn = vpn, *pm = vpm;
131
+ uint64_t *zda = vzda, *zn = vzn;
132
+
133
+ for (row = 0; row < oprsz; ++row) {
134
+ if (pn[H1(row)] & 1) {
135
+ uint64_t zn_row = zn[row];
136
+ for (col = 0; col < oprsz; ++col) {
137
+ if (pm[H1(col)] & 1) {
138
+ zda[tile_vslice_index(row) + col] += zn_row;
139
+ }
140
+ }
141
+ }
142
+ }
143
+}
144
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-sme.c
147
+++ b/target/arm/translate-sme.c
148
@@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
149
150
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
151
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
152
+
153
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
154
+ gen_helper_gvec_4 *fn)
155
+{
156
+ int svl = streaming_vec_reg_size(s);
157
+ uint32_t desc = simd_desc(svl, svl, 0);
158
+ TCGv_ptr za, zn, pn, pm;
159
+
160
+ if (!sme_smza_enabled_check(s)) {
161
+ return true;
162
+ }
163
+
164
+ /* Sum XZR+zad to find ZAd. */
165
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
166
+ zn = vec_full_reg_ptr(s, a->zn);
167
+ pn = pred_full_reg_ptr(s, a->pn);
168
+ pm = pred_full_reg_ptr(s, a->pm);
169
+
170
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
171
+
172
+ tcg_temp_free_ptr(za);
173
+ tcg_temp_free_ptr(zn);
174
+ tcg_temp_free_ptr(pn);
175
+ tcg_temp_free_ptr(pm);
176
+ return true;
177
+}
178
+
179
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
180
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
181
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
182
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
53
--
183
--
54
2.20.1
184
2.25.1
55
56
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-25-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 5 +++
9
target/arm/sme.decode | 9 +++++
10
target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 32 ++++++++++++++++++
12
4 files changed, 115 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
24
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
32
ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
33
ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
34
ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
35
+
36
+### SME Outer Product
37
+
38
+&op zad zn zm pm pn sub:bool
39
+@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
40
+@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
41
+
42
+FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
43
+FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
44
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/sme_helper.c
47
+++ b/target/arm/sme_helper.c
48
@@ -XXX,XX +XXX,XX @@
49
#include "exec/cpu_ldst.h"
50
#include "exec/exec-all.h"
51
#include "qemu/int128.h"
52
+#include "fpu/softfloat.h"
53
#include "vec_internal.h"
54
#include "sve_ldst_internal.h"
55
56
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
57
}
58
}
59
}
60
+
61
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
62
+ void *vpm, void *vst, uint32_t desc)
63
+{
64
+ intptr_t row, col, oprsz = simd_maxsz(desc);
65
+ uint32_t neg = simd_data(desc) << 31;
66
+ uint16_t *pn = vpn, *pm = vpm;
67
+ float_status fpst;
68
+
69
+ /*
70
+ * Make a copy of float_status because this operation does not
71
+ * update the cumulative fp exception status. It also produces
72
+ * default nans.
73
+ */
74
+ fpst = *(float_status *)vst;
75
+ set_default_nan_mode(true, &fpst);
76
+
77
+ for (row = 0; row < oprsz; ) {
78
+ uint16_t pa = pn[H2(row >> 4)];
79
+ do {
80
+ if (pa & 1) {
81
+ void *vza_row = vza + tile_vslice_offset(row);
82
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
83
+
84
+ for (col = 0; col < oprsz; ) {
85
+ uint16_t pb = pm[H2(col >> 4)];
86
+ do {
87
+ if (pb & 1) {
88
+ uint32_t *a = vza_row + H1_4(col);
89
+ uint32_t *m = vzm + H1_4(col);
90
+ *a = float32_muladd(n, *m, *a, 0, vst);
91
+ }
92
+ col += 4;
93
+ pb >>= 4;
94
+ } while (col & 15);
95
+ }
96
+ }
97
+ row += 4;
98
+ pa >>= 4;
99
+ } while (row & 15);
100
+ }
101
+}
102
+
103
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
104
+ void *vpm, void *vst, uint32_t desc)
105
+{
106
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
107
+ uint64_t neg = (uint64_t)simd_data(desc) << 63;
108
+ uint64_t *za = vza, *zn = vzn, *zm = vzm;
109
+ uint8_t *pn = vpn, *pm = vpm;
110
+ float_status fpst = *(float_status *)vst;
111
+
112
+ set_default_nan_mode(true, &fpst);
113
+
114
+ for (row = 0; row < oprsz; ++row) {
115
+ if (pn[H1(row)] & 1) {
116
+ uint64_t *za_row = &za[tile_vslice_index(row)];
117
+ uint64_t n = zn[row] ^ neg;
118
+
119
+ for (col = 0; col < oprsz; ++col) {
120
+ if (pm[H1(col)] & 1) {
121
+ uint64_t *a = &za_row[col];
122
+ *a = float64_muladd(n, zm[col], *a, 0, &fpst);
123
+ }
124
+ }
125
+ }
126
+ }
127
+}
128
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate-sme.c
131
+++ b/target/arm/translate-sme.c
132
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
133
TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
134
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
135
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
136
+
137
+static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
138
+ gen_helper_gvec_5_ptr *fn)
139
+{
140
+ int svl = streaming_vec_reg_size(s);
141
+ uint32_t desc = simd_desc(svl, svl, a->sub);
142
+ TCGv_ptr za, zn, zm, pn, pm, fpst;
143
+
144
+ if (!sme_smza_enabled_check(s)) {
145
+ return true;
146
+ }
147
+
148
+ /* Sum XZR+zad to find ZAd. */
149
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
150
+ zn = vec_full_reg_ptr(s, a->zn);
151
+ zm = vec_full_reg_ptr(s, a->zm);
152
+ pn = pred_full_reg_ptr(s, a->pn);
153
+ pm = pred_full_reg_ptr(s, a->pm);
154
+ fpst = fpstatus_ptr(FPST_FPCR);
155
+
156
+ fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc));
157
+
158
+ tcg_temp_free_ptr(za);
159
+ tcg_temp_free_ptr(zn);
160
+ tcg_temp_free_ptr(pn);
161
+ tcg_temp_free_ptr(pm);
162
+ tcg_temp_free_ptr(fpst);
163
+ return true;
164
+}
165
+
166
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
167
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
168
--
169
2.25.1
diff view generated by jsdifflib
1
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
We introduce an helper to create a memory node.
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-26-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 2 ++
10
target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 30 ++++++++++++++++++++
12
4 files changed, 90 insertions(+)
4
13
5
Signed-off-by: Eric Auger <eric.auger@redhat.com>
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
6
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
7
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190304101339.25970-2-eric.auger@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/arm/boot.c | 54 ++++++++++++++++++++++++++++++++-------------------
13
1 file changed, 34 insertions(+), 20 deletions(-)
14
15
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/boot.c
16
--- a/target/arm/helper-sme.h
18
+++ b/hw/arm/boot.c
17
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ static void set_kernel_args_old(const struct arm_boot_info *info,
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
19
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, ptr, i32)
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/sme.decode
27
+++ b/target/arm/sme.decode
28
@@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
29
30
FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
31
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
32
+
33
+BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
34
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/sme_helper.c
37
+++ b/target/arm/sme_helper.c
38
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
39
}
20
}
40
}
21
}
41
}
22
42
+
23
+static int fdt_add_memory_node(void *fdt, uint32_t acells, hwaddr mem_base,
43
+/*
24
+ uint32_t scells, hwaddr mem_len,
44
+ * Alter PAIR as needed for controlling predicates being false,
25
+ int numa_node_id)
45
+ * and for NEG on an enabled row element.
46
+ */
47
+static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
26
+{
48
+{
27
+ char *nodename;
49
+ /*
28
+ int ret;
50
+ * The pseudocode uses a conditional negate after the conditional zero.
51
+ * It is simpler here to unconditionally negate before conditional zero.
52
+ */
53
+ pair ^= neg;
54
+ if (!(pg & 1)) {
55
+ pair &= 0xffff0000u;
56
+ }
57
+ if (!(pg & 4)) {
58
+ pair &= 0x0000ffffu;
59
+ }
60
+ return pair;
61
+}
29
+
62
+
30
+ nodename = g_strdup_printf("/memory@%" PRIx64, mem_base);
63
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
31
+ qemu_fdt_add_subnode(fdt, nodename);
64
+ void *vpm, uint32_t desc)
32
+ qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
65
+{
33
+ ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", acells, mem_base,
66
+ intptr_t row, col, oprsz = simd_maxsz(desc);
34
+ scells, mem_len);
67
+ uint32_t neg = simd_data(desc) * 0x80008000u;
35
+ if (ret < 0) {
68
+ uint16_t *pn = vpn, *pm = vpm;
36
+ goto out;
69
+
70
+ for (row = 0; row < oprsz; ) {
71
+ uint16_t prow = pn[H2(row >> 4)];
72
+ do {
73
+ void *vza_row = vza + tile_vslice_offset(row);
74
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
75
+
76
+ n = f16mop_adj_pair(n, prow, neg);
77
+
78
+ for (col = 0; col < oprsz; ) {
79
+ uint16_t pcol = pm[H2(col >> 4)];
80
+ do {
81
+ if (prow & pcol & 0b0101) {
82
+ uint32_t *a = vza_row + H1_4(col);
83
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
84
+
85
+ m = f16mop_adj_pair(m, pcol, 0);
86
+ *a = bfdotadd(*a, n, m);
87
+
88
+ col += 4;
89
+ pcol >>= 4;
90
+ }
91
+ } while (col & 15);
92
+ }
93
+ row += 4;
94
+ prow >>= 4;
95
+ } while (row & 15);
96
+ }
97
+}
98
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-sme.c
101
+++ b/target/arm/translate-sme.c
102
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
103
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
104
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
105
106
+static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
107
+ gen_helper_gvec_5 *fn)
108
+{
109
+ int svl = streaming_vec_reg_size(s);
110
+ uint32_t desc = simd_desc(svl, svl, a->sub);
111
+ TCGv_ptr za, zn, zm, pn, pm;
112
+
113
+ if (!sme_smza_enabled_check(s)) {
114
+ return true;
37
+ }
115
+ }
38
+
116
+
39
+ /* only set the NUMA ID if it is specified */
117
+ /* Sum XZR+zad to find ZAd. */
40
+ if (numa_node_id >= 0) {
118
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
41
+ ret = qemu_fdt_setprop_cell(fdt, nodename,
119
+ zn = vec_full_reg_ptr(s, a->zn);
42
+ "numa-node-id", numa_node_id);
120
+ zm = vec_full_reg_ptr(s, a->zm);
43
+ }
121
+ pn = pred_full_reg_ptr(s, a->pn);
44
+out:
122
+ pm = pred_full_reg_ptr(s, a->pm);
45
+ g_free(nodename);
123
+
46
+ return ret;
124
+ fn(za, zn, zm, pn, pm, tcg_constant_i32(desc));
125
+
126
+ tcg_temp_free_ptr(za);
127
+ tcg_temp_free_ptr(zn);
128
+ tcg_temp_free_ptr(pn);
129
+ tcg_temp_free_ptr(pm);
130
+ return true;
47
+}
131
+}
48
+
132
+
49
static void fdt_add_psci_node(void *fdt)
133
static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
134
gen_helper_gvec_5_ptr *fn)
50
{
135
{
51
uint32_t cpu_suspend_fn;
136
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
52
@@ -XXX,XX +XXX,XX @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
137
53
void *fdt = NULL;
138
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
54
int size, rc, n = 0;
139
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
55
uint32_t acells, scells;
140
+
56
- char *nodename;
141
+/* TODO: FEAT_EBF16 */
57
unsigned int i;
142
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
58
hwaddr mem_base, mem_len;
59
char **node_path;
60
@@ -XXX,XX +XXX,XX @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
61
mem_base = binfo->loader_start;
62
for (i = 0; i < nb_numa_nodes; i++) {
63
mem_len = numa_info[i].node_mem;
64
- nodename = g_strdup_printf("/memory@%" PRIx64, mem_base);
65
- qemu_fdt_add_subnode(fdt, nodename);
66
- qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
67
- rc = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg",
68
- acells, mem_base,
69
- scells, mem_len);
70
+ rc = fdt_add_memory_node(fdt, acells, mem_base,
71
+ scells, mem_len, i);
72
if (rc < 0) {
73
- fprintf(stderr, "couldn't set %s/reg for node %d\n", nodename,
74
- i);
75
+ fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n",
76
+ mem_base);
77
goto fail;
78
}
79
80
- qemu_fdt_setprop_cell(fdt, nodename, "numa-node-id", i);
81
mem_base += mem_len;
82
- g_free(nodename);
83
}
84
} else {
85
- nodename = g_strdup_printf("/memory@%" PRIx64, binfo->loader_start);
86
- qemu_fdt_add_subnode(fdt, nodename);
87
- qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
88
-
89
- rc = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg",
90
- acells, binfo->loader_start,
91
- scells, binfo->ram_size);
92
+ rc = fdt_add_memory_node(fdt, acells, binfo->loader_start,
93
+ scells, binfo->ram_size, -1);
94
if (rc < 0) {
95
- fprintf(stderr, "couldn't set %s reg\n", nodename);
96
+ fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n",
97
+ binfo->loader_start);
98
goto fail;
99
}
100
- g_free(nodename);
101
}
102
103
rc = fdt_path_offset(fdt, "/chosen");
104
--
143
--
105
2.20.1
144
2.25.1
106
107
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Minimize the number of places that will need updating when
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
the virtual host extensions are added.
4
Message-id: 20220708151540.18136-27-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 1 +
10
target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 1 +
12
4 files changed, 78 insertions(+)
5
13
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
7
Message-id: 20190301200501.16533-2-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 26 ++++++++++++++++----------
12
target/arm/helper.c | 8 ++------
13
2 files changed, 18 insertions(+), 16 deletions(-)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
16
--- a/target/arm/helper-sme.h
18
+++ b/target/arm/cpu.h
17
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ static inline bool arm_sctlr_b(CPUARMState *env)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
(env->cp15.sctlr_el[1] & SCTLR_B) != 0;
19
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
22
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
25
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
26
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
32
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
33
34
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
35
+FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
41
return pair;
21
}
42
}
22
43
23
+static inline uint64_t arm_sctlr(CPUARMState *env, int el)
44
+static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
45
+ float_status *s_std, float_status *s_odd)
24
+{
46
+{
25
+ if (el == 0) {
47
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
26
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
48
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
27
+ return env->cp15.sctlr_el[1];
49
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
28
+ } else {
50
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
29
+ return env->cp15.sctlr_el[el];
51
+ float64 t64;
52
+ float32 t32;
53
+
54
+ /*
55
+ * The ARM pseudocode function FPDot performs both multiplies
56
+ * and the add with a single rounding operation. Emulate this
57
+ * by performing the first multiply in round-to-odd, then doing
58
+ * the second multiply as fused multiply-add, and rounding to
59
+ * float32 all in one step.
60
+ */
61
+ t64 = float64_mul(e1r, e2r, s_odd);
62
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
63
+
64
+ /* This conversion is exact, because we've already rounded. */
65
+ t32 = float64_to_float32(t64, s_std);
66
+
67
+ /* The final accumulation step is not fused. */
68
+ return float32_add(sum, t32, s_std);
69
+}
70
+
71
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
72
+ void *vpm, void *vst, uint32_t desc)
73
+{
74
+ intptr_t row, col, oprsz = simd_maxsz(desc);
75
+ uint32_t neg = simd_data(desc) * 0x80008000u;
76
+ uint16_t *pn = vpn, *pm = vpm;
77
+ float_status fpst_odd, fpst_std;
78
+
79
+ /*
80
+ * Make a copy of float_status because this operation does not
81
+ * update the cumulative fp exception status. It also produces
82
+ * default nans. Make a second copy with round-to-odd -- see above.
83
+ */
84
+ fpst_std = *(float_status *)vst;
85
+ set_default_nan_mode(true, &fpst_std);
86
+ fpst_odd = fpst_std;
87
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
88
+
89
+ for (row = 0; row < oprsz; ) {
90
+ uint16_t prow = pn[H2(row >> 4)];
91
+ do {
92
+ void *vza_row = vza + tile_vslice_offset(row);
93
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
94
+
95
+ n = f16mop_adj_pair(n, prow, neg);
96
+
97
+ for (col = 0; col < oprsz; ) {
98
+ uint16_t pcol = pm[H2(col >> 4)];
99
+ do {
100
+ if (prow & pcol & 0b0101) {
101
+ uint32_t *a = vza_row + H1_4(col);
102
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
103
+
104
+ m = f16mop_adj_pair(m, pcol, 0);
105
+ *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
106
+
107
+ col += 4;
108
+ pcol >>= 4;
109
+ }
110
+ } while (col & 15);
111
+ }
112
+ row += 4;
113
+ prow >>= 4;
114
+ } while (row & 15);
30
+ }
115
+ }
31
+}
116
+}
32
+
117
+
33
+
118
void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
34
/* Return true if the processor is in big-endian mode. */
119
void *vpm, uint32_t desc)
35
static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
36
{
120
{
37
- int cur_el;
121
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
38
-
122
index XXXXXXX..XXXXXXX 100644
39
/* In 32bit endianness is determined by looking at CPSR's E bit */
123
--- a/target/arm/translate-sme.c
40
if (!is_a64(env)) {
124
+++ b/target/arm/translate-sme.c
41
return
125
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
42
@@ -XXX,XX +XXX,XX @@ static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
126
return true;
43
arm_sctlr_b(env) ||
44
#endif
45
((env->uncached_cpsr & CPSR_E) ? 1 : 0);
46
+ } else {
47
+ int cur_el = arm_current_el(env);
48
+ uint64_t sctlr = arm_sctlr(env, cur_el);
49
+
50
+ return (sctlr & (cur_el ? SCTLR_EE : SCTLR_E0E)) != 0;
51
}
52
-
53
- cur_el = arm_current_el(env);
54
-
55
- if (cur_el == 0) {
56
- return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
57
- }
58
-
59
- return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
60
}
127
}
61
128
62
#include "exec/cpu-all.h"
129
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h)
63
diff --git a/target/arm/helper.c b/target/arm/helper.c
130
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
64
index XXXXXXX..XXXXXXX 100644
131
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
65
--- a/target/arm/helper.c
132
66
+++ b/target/arm/helper.c
67
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
68
flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
69
}
70
71
- if (current_el == 0) {
72
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
73
- sctlr = env->cp15.sctlr_el[1];
74
- } else {
75
- sctlr = env->cp15.sctlr_el[current_el];
76
- }
77
+ sctlr = arm_sctlr(env, current_el);
78
+
79
if (cpu_isar_feature(aa64_pauth, cpu)) {
80
/*
81
* In order to save space in flags, we record only whether
82
--
133
--
83
2.20.1
134
2.25.1
84
85
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The EL0+UMA check is unique to DAIF. While SPSel had avoided the
3
This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16.
4
check by nature of already checking EL >= 1, the other post v8.0
5
extensions to MSR (imm) allow EL0 and do not require UMA. Avoid
6
the unconditional write to pc and use raise_exception_ra to unwind.
7
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20190301200501.16533-5-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-28-richard.henderson@linaro.org
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
9
---
13
target/arm/helper-a64.h | 3 +++
10
target/arm/helper-sme.h | 16 ++++++++
14
target/arm/helper.h | 1 -
11
target/arm/sme.decode | 10 +++++
15
target/arm/internals.h | 15 ++++++++++++++
12
target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
16
target/arm/helper-a64.c | 30 +++++++++++++++++++++++++++
13
target/arm/translate-sme.c | 10 +++++
17
target/arm/op_helper.c | 42 --------------------------------------
14
4 files changed, 118 insertions(+)
18
target/arm/translate-a64.c | 41 ++++++++++++++++++++++---------------
19
6 files changed, 73 insertions(+), 59 deletions(-)
20
15
21
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
16
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
22
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/helper-a64.h
18
--- a/target/arm/helper-sme.h
24
+++ b/target/arm/helper-a64.h
19
+++ b/target/arm/helper-sme.h
25
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
26
DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
27
DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
22
DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
28
DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
23
void, ptr, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_2(msr_i_spsel, void, env, i32)
24
+DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
30
+DEF_HELPER_2(msr_i_daifset, void, env, i32)
25
+ void, ptr, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_2(msr_i_daifclear, void, env, i32)
26
+DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
32
DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr)
27
+ void, ptr, ptr, ptr, ptr, ptr, i32)
33
DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr)
28
+DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG,
34
DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
35
diff --git a/target/arm/helper.h b/target/arm/helper.h
30
+DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG,
31
+ void, ptr, ptr, ptr, ptr, ptr, i32)
32
+DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG,
33
+ void, ptr, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, ptr, i32)
40
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
36
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper.h
42
--- a/target/arm/sme.decode
38
+++ b/target/arm/helper.h
43
+++ b/target/arm/sme.decode
39
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(get_cp_reg, i32, env, ptr)
44
@@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
40
DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
45
41
DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
46
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
42
47
FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
43
-DEF_HELPER_3(msr_i_pstate, void, env, i32, i32)
48
+
44
DEF_HELPER_1(clear_pstate_ss, void, env)
49
+SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
45
50
+SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
46
DEF_HELPER_2(get_r13_banked, i32, env, i32)
51
+USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32
47
diff --git a/target/arm/internals.h b/target/arm/internals.h
52
+UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32
53
+
54
+SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
55
+SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
56
+USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
57
+UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
58
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
48
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/internals.h
60
--- a/target/arm/sme_helper.c
50
+++ b/target/arm/internals.h
61
+++ b/target/arm/sme_helper.c
51
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
62
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
52
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
63
} while (row & 15);
53
ARMMMUIdx mmu_idx, bool data);
64
}
54
65
}
55
+static inline int exception_target_el(CPUARMState *env)
66
+
67
+typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool);
68
+
69
+static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm,
70
+ uint8_t *pn, uint8_t *pm,
71
+ uint32_t desc, IMOPFn *fn)
56
+{
72
+{
57
+ int target_el = MAX(1, arm_current_el(env));
73
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
74
+ bool neg = simd_data(desc);
58
+
75
+
59
+ /*
76
+ for (row = 0; row < oprsz; ++row) {
60
+ * No such thing as secure EL1 if EL3 is aarch32,
77
+ uint8_t pa = pn[H1(row)];
61
+ * so update the target EL to EL3 in this case.
78
+ uint64_t *za_row = &za[tile_vslice_index(row)];
62
+ */
79
+ uint64_t n = zn[row];
63
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
64
+ target_el = 3;
65
+ }
66
+
80
+
67
+ return target_el;
81
+ for (col = 0; col < oprsz; ++col) {
68
+}
82
+ uint8_t pb = pm[H1(col)];
83
+ uint64_t *a = &za_row[col];
69
+
84
+
70
#endif
85
+ *a = fn(n, zm[col], *a, pa & pb, neg);
71
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
86
+ }
72
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/helper-a64.c
74
+++ b/target/arm/helper-a64.c
75
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(rbit64)(uint64_t x)
76
return revbit64(x);
77
}
78
79
+void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm)
80
+{
81
+ update_spsel(env, imm);
82
+}
83
+
84
+static void daif_check(CPUARMState *env, uint32_t op,
85
+ uint32_t imm, uintptr_t ra)
86
+{
87
+ /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
88
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
89
+ raise_exception_ra(env, EXCP_UDEF,
90
+ syn_aa64_sysregtrap(0, extract32(op, 0, 3),
91
+ extract32(op, 3, 3), 4,
92
+ imm, 0x1f, 0),
93
+ exception_target_el(env), ra);
94
+ }
87
+ }
95
+}
88
+}
96
+
89
+
97
+void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm)
90
+#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
98
+{
91
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
99
+ daif_check(env, 0x1e, imm, GETPC());
92
+{ \
100
+ env->daif |= (imm << 6) & PSTATE_DAIF;
93
+ uint32_t sum0 = 0, sum1 = 0; \
94
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
95
+ n &= expand_pred_b(p); \
96
+ sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
97
+ sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \
98
+ sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
99
+ sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \
100
+ sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
101
+ sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \
102
+ sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
103
+ sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \
104
+ if (neg) { \
105
+ sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \
106
+ } else { \
107
+ sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \
108
+ } \
109
+ return ((uint64_t)sum1 << 32) | sum0; \
101
+}
110
+}
102
+
111
+
103
+void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm)
112
+#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
104
+{
113
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
105
+ daif_check(env, 0x1f, imm, GETPC());
114
+{ \
106
+ env->daif &= ~((imm << 6) & PSTATE_DAIF);
115
+ uint64_t sum = 0; \
116
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
117
+ n &= expand_pred_h(p); \
118
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
119
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
120
+ sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
121
+ sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
122
+ return neg ? a - sum : a + sum; \
107
+}
123
+}
108
+
124
+
109
/* Convert a softfloat float_relation_ (as returned by
125
+DEF_IMOP_32(smopa_s, int8_t, int8_t)
110
* the float*_compare functions) to the correct ARM
126
+DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
111
* NZCV flag state.
127
+DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
112
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
128
+DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
129
+
130
+DEF_IMOP_64(smopa_d, int16_t, int16_t)
131
+DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
132
+DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
133
+DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
134
+
135
+#define DEF_IMOPH(NAME) \
136
+ void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \
137
+ void *vpm, uint32_t desc) \
138
+ { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); }
139
+
140
+DEF_IMOPH(smopa_s)
141
+DEF_IMOPH(umopa_s)
142
+DEF_IMOPH(sumopa_s)
143
+DEF_IMOPH(usmopa_s)
144
+DEF_IMOPH(smopa_d)
145
+DEF_IMOPH(umopa_d)
146
+DEF_IMOPH(sumopa_d)
147
+DEF_IMOPH(usmopa_d)
148
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
113
index XXXXXXX..XXXXXXX 100644
149
index XXXXXXX..XXXXXXX 100644
114
--- a/target/arm/op_helper.c
150
--- a/target/arm/translate-sme.c
115
+++ b/target/arm/op_helper.c
151
+++ b/target/arm/translate-sme.c
116
@@ -XXX,XX +XXX,XX @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
152
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f
117
cpu_loop_exit_restore(cs, ra);
153
118
}
154
/* TODO: FEAT_EBF16 */
119
155
TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
120
-static int exception_target_el(CPUARMState *env)
121
-{
122
- int target_el = MAX(1, arm_current_el(env));
123
-
124
- /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
125
- * to EL3 in this case.
126
- */
127
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
128
- target_el = 3;
129
- }
130
-
131
- return target_el;
132
-}
133
-
134
uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
135
uint32_t maxindex)
136
{
137
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
138
return res;
139
}
140
141
-void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
142
-{
143
- /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
144
- * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
145
- * to catch that case at translate time.
146
- */
147
- if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
148
- uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
149
- extract32(op, 3, 3), 4,
150
- imm, 0x1f, 0);
151
- raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
152
- }
153
-
154
- switch (op) {
155
- case 0x05: /* SPSel */
156
- update_spsel(env, imm);
157
- break;
158
- case 0x1e: /* DAIFSet */
159
- env->daif |= (imm << 6) & PSTATE_DAIF;
160
- break;
161
- case 0x1f: /* DAIFClear */
162
- env->daif &= ~((imm << 6) & PSTATE_DAIF);
163
- break;
164
- default:
165
- g_assert_not_reached();
166
- }
167
-}
168
-
169
void HELPER(clear_pstate_ss)(CPUARMState *env)
170
{
171
env->pstate &= ~PSTATE_SS;
172
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/translate-a64.c
175
+++ b/target/arm/translate-a64.c
176
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
177
static void handle_msr_i(DisasContext *s, uint32_t insn,
178
unsigned int op1, unsigned int op2, unsigned int crm)
179
{
180
+ TCGv_i32 t1;
181
int op = op1 << 3 | op2;
182
+
156
+
183
+ /* End the TB by default, chaining is ok. */
157
+TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
184
+ s->base.is_jmp = DISAS_TOO_MANY;
158
+TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
159
+TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s)
160
+TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s)
185
+
161
+
186
switch (op) {
162
+TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d)
187
case 0x05: /* SPSel */
163
+TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
188
if (s->current_el == 0) {
164
+TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
189
- unallocated_encoding(s);
165
+TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
190
- return;
191
+ goto do_unallocated;
192
}
193
- /* fall through */
194
- case 0x1e: /* DAIFSet */
195
- case 0x1f: /* DAIFClear */
196
- {
197
- TCGv_i32 tcg_imm = tcg_const_i32(crm);
198
- TCGv_i32 tcg_op = tcg_const_i32(op);
199
- gen_a64_set_pc_im(s->pc - 4);
200
- gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
201
- tcg_temp_free_i32(tcg_imm);
202
- tcg_temp_free_i32(tcg_op);
203
- /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
204
- gen_a64_set_pc_im(s->pc);
205
- s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
206
+ t1 = tcg_const_i32(crm & PSTATE_SP);
207
+ gen_helper_msr_i_spsel(cpu_env, t1);
208
+ tcg_temp_free_i32(t1);
209
break;
210
- }
211
+
212
+ case 0x1e: /* DAIFSet */
213
+ t1 = tcg_const_i32(crm);
214
+ gen_helper_msr_i_daifset(cpu_env, t1);
215
+ tcg_temp_free_i32(t1);
216
+ break;
217
+
218
+ case 0x1f: /* DAIFClear */
219
+ t1 = tcg_const_i32(crm);
220
+ gen_helper_msr_i_daifclear(cpu_env, t1);
221
+ tcg_temp_free_i32(t1);
222
+ /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
223
+ s->base.is_jmp = DISAS_UPDATE;
224
+ break;
225
+
226
default:
227
+ do_unallocated:
228
unallocated_encoding(s);
229
return;
230
}
231
--
166
--
232
2.20.1
167
2.25.1
233
234
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-29-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sve.decode | 20 +++++++++++++
12
target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++
13
2 files changed, 77 insertions(+)
14
15
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sve.decode
18
+++ b/target/arm/sve.decode
19
@@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
20
21
### SVE2 floating-point bfloat16 dot-product (indexed)
22
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
23
+
24
+### SVE broadcast predicate element
25
+
26
+&psel esz pd pn pm rv imm
27
+%psel_rv 16:2 !function=plus_12
28
+%psel_imm_b 22:2 19:2
29
+%psel_imm_h 22:2 20:1
30
+%psel_imm_s 22:2
31
+%psel_imm_d 23:1
32
+@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \
33
+ &psel rv=%psel_rv
34
+
35
+PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \
36
+ @psel esz=0 imm=%psel_imm_b
37
+PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \
38
+ @psel esz=1 imm=%psel_imm_h
39
+PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
40
+ @psel esz=2 imm=%psel_imm_s
41
+PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
42
+ @psel esz=3 imm=%psel_imm_d
43
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-sve.c
46
+++ b/target/arm/translate-sve.c
47
@@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
48
49
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
50
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
51
+
52
+static bool trans_PSEL(DisasContext *s, arg_psel *a)
53
+{
54
+ int vl = vec_full_reg_size(s);
55
+ int pl = pred_gvec_reg_size(s);
56
+ int elements = vl >> a->esz;
57
+ TCGv_i64 tmp, didx, dbit;
58
+ TCGv_ptr ptr;
59
+
60
+ if (!dc_isar_feature(aa64_sme, s)) {
61
+ return false;
62
+ }
63
+ if (!sve_access_check(s)) {
64
+ return true;
65
+ }
66
+
67
+ tmp = tcg_temp_new_i64();
68
+ dbit = tcg_temp_new_i64();
69
+ didx = tcg_temp_new_i64();
70
+ ptr = tcg_temp_new_ptr();
71
+
72
+ /* Compute the predicate element. */
73
+ tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
74
+ if (is_power_of_2(elements)) {
75
+ tcg_gen_andi_i64(tmp, tmp, elements - 1);
76
+ } else {
77
+ tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
78
+ }
79
+
80
+ /* Extract the predicate byte and bit indices. */
81
+ tcg_gen_shli_i64(tmp, tmp, a->esz);
82
+ tcg_gen_andi_i64(dbit, tmp, 7);
83
+ tcg_gen_shri_i64(didx, tmp, 3);
84
+ if (HOST_BIG_ENDIAN) {
85
+ tcg_gen_xori_i64(didx, didx, 7);
86
+ }
87
+
88
+ /* Load the predicate word. */
89
+ tcg_gen_trunc_i64_ptr(ptr, didx);
90
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
91
+ tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
92
+
93
+ /* Extract the predicate bit and replicate to MO_64. */
94
+ tcg_gen_shr_i64(tmp, tmp, dbit);
95
+ tcg_gen_andi_i64(tmp, tmp, 1);
96
+ tcg_gen_neg_i64(tmp, tmp);
97
+
98
+ /* Apply to either copy the source, or write zeros. */
99
+ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
100
+ pred_full_reg_offset(s, a->pn), tmp, pl, pl);
101
+
102
+ tcg_temp_free_i64(tmp);
103
+ tcg_temp_free_i64(dbit);
104
+ tcg_temp_free_i64(didx);
105
+ tcg_temp_free_ptr(ptr);
106
+ return true;
107
+}
108
--
109
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-30-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper-sve.h | 2 ++
12
target/arm/sve.decode | 1 +
13
target/arm/sve_helper.c | 16 ++++++++++++++++
14
target/arm/translate-sve.c | 2 ++
15
4 files changed, 21 insertions(+)
16
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
20
+++ b/target/arm/helper-sve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
23
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
25
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/sve.decode
33
+++ b/target/arm/sve.decode
34
@@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
35
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
36
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
37
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
38
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
39
40
# SVE vector splice (predicated, destructive)
41
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
42
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/sve_helper.c
45
+++ b/target/arm/sve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
47
48
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
49
50
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
51
+{
52
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
53
+ uint64_t *d = vd, *n = vn;
54
+ uint8_t *pg = vg;
55
+
56
+ for (i = 0; i < opr_sz; i += 2) {
57
+ if (pg[H1(i)] & 1) {
58
+ uint64_t n0 = n[i + 0];
59
+ uint64_t n1 = n[i + 1];
60
+ d[i + 0] = n1;
61
+ d[i + 1] = n0;
62
+ }
63
+ }
64
+}
65
+
66
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
67
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
68
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
74
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
75
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
76
77
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
78
+
79
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
80
gen_helper_sve_splice, a, a->esz)
81
82
--
83
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20190301200501.16533-11-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-31-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
10
---
9
target/arm/cpu.h | 5 ++
11
target/arm/helper.h | 18 +++++++
10
target/arm/helper.h | 5 ++
12
target/arm/sve.decode | 5 ++
11
target/arm/cpu64.c | 1 +
13
target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++
12
target/arm/translate-a64.c | 71 ++++++++++++++++++++++++++--
14
target/arm/vec_helper.c | 24 +++++++++
13
target/arm/vfp_helper.c | 96 ++++++++++++++++++++++++++++++++++++++
15
4 files changed, 149 insertions(+)
14
5 files changed, 173 insertions(+), 5 deletions(-)
15
16
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
21
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
22
}
23
24
+static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
25
+{
26
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
27
+}
28
+
29
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
30
{
31
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
32
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
33
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/helper.h
19
--- a/target/arm/helper.h
35
+++ b/target/arm/helper.h
20
+++ b/target/arm/helper.h
36
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
37
DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
22
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
38
void, ptr, ptr, ptr, ptr, i32)
23
void, ptr, ptr, ptr, ptr, ptr, i32)
39
24
40
+DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr)
25
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
41
+DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
26
+ void, ptr, ptr, ptr, ptr, i32)
42
+DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
27
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
43
+DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
44
+
42
+
45
#ifdef TARGET_AARCH64
43
#ifdef TARGET_AARCH64
46
#include "helper-a64.h"
44
#include "helper-a64.h"
47
#include "helper-sve.h"
45
#include "helper-sve.h"
48
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
49
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/cpu64.c
48
--- a/target/arm/sve.decode
51
+++ b/target/arm/cpu64.c
49
+++ b/target/arm/sve.decode
52
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
50
@@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
53
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
51
@psel esz=2 imm=%psel_imm_s
54
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
52
PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
55
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
53
@psel esz=3 imm=%psel_imm_d
56
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
54
+
57
cpu->isar.id_aa64isar1 = t;
55
+### SVE clamp
58
56
+
59
t = cpu->isar.id_aa64pfr0;
57
+SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
60
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
+UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
61
index XXXXXXX..XXXXXXX 100644
59
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
62
--- a/target/arm/translate-a64.c
60
index XXXXXXX..XXXXXXX 100644
63
+++ b/target/arm/translate-a64.c
61
--- a/target/arm/translate-sve.c
64
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
62
+++ b/target/arm/translate-sve.c
65
case 0xf: /* FRINTI */
63
@@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
66
gen_fpst = gen_helper_rints;
64
tcg_temp_free_ptr(ptr);
67
break;
65
return true;
68
+ case 0x10: /* FRINT32Z */
66
}
69
+ rmode = float_round_to_zero;
67
+
70
+ gen_fpst = gen_helper_frint32_s;
68
+static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
71
+ break;
69
+{
72
+ case 0x11: /* FRINT32X */
70
+ tcg_gen_smax_i32(d, a, n);
73
+ gen_fpst = gen_helper_frint32_s;
71
+ tcg_gen_smin_i32(d, d, m);
74
+ break;
72
+}
75
+ case 0x12: /* FRINT64Z */
73
+
76
+ rmode = float_round_to_zero;
74
+static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
77
+ gen_fpst = gen_helper_frint64_s;
75
+{
78
+ break;
76
+ tcg_gen_smax_i64(d, a, n);
79
+ case 0x13: /* FRINT64X */
77
+ tcg_gen_smin_i64(d, d, m);
80
+ gen_fpst = gen_helper_frint64_s;
78
+}
81
+ break;
79
+
82
default:
80
+static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
83
g_assert_not_reached();
81
+ TCGv_vec m, TCGv_vec a)
82
+{
83
+ tcg_gen_smax_vec(vece, d, a, n);
84
+ tcg_gen_smin_vec(vece, d, d, m);
85
+}
86
+
87
+static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
88
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
89
+{
90
+ static const TCGOpcode vecop[] = {
91
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
92
+ };
93
+ static const GVecGen4 ops[4] = {
94
+ { .fniv = gen_sclamp_vec,
95
+ .fno = gen_helper_gvec_sclamp_b,
96
+ .opt_opc = vecop,
97
+ .vece = MO_8 },
98
+ { .fniv = gen_sclamp_vec,
99
+ .fno = gen_helper_gvec_sclamp_h,
100
+ .opt_opc = vecop,
101
+ .vece = MO_16 },
102
+ { .fni4 = gen_sclamp_i32,
103
+ .fniv = gen_sclamp_vec,
104
+ .fno = gen_helper_gvec_sclamp_s,
105
+ .opt_opc = vecop,
106
+ .vece = MO_32 },
107
+ { .fni8 = gen_sclamp_i64,
108
+ .fniv = gen_sclamp_vec,
109
+ .fno = gen_helper_gvec_sclamp_d,
110
+ .opt_opc = vecop,
111
+ .vece = MO_64,
112
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
113
+ };
114
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
115
+}
116
+
117
+TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
118
+
119
+static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
120
+{
121
+ tcg_gen_umax_i32(d, a, n);
122
+ tcg_gen_umin_i32(d, d, m);
123
+}
124
+
125
+static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
126
+{
127
+ tcg_gen_umax_i64(d, a, n);
128
+ tcg_gen_umin_i64(d, d, m);
129
+}
130
+
131
+static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
132
+ TCGv_vec m, TCGv_vec a)
133
+{
134
+ tcg_gen_umax_vec(vece, d, a, n);
135
+ tcg_gen_umin_vec(vece, d, d, m);
136
+}
137
+
138
+static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
139
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
140
+{
141
+ static const TCGOpcode vecop[] = {
142
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
143
+ };
144
+ static const GVecGen4 ops[4] = {
145
+ { .fniv = gen_uclamp_vec,
146
+ .fno = gen_helper_gvec_uclamp_b,
147
+ .opt_opc = vecop,
148
+ .vece = MO_8 },
149
+ { .fniv = gen_uclamp_vec,
150
+ .fno = gen_helper_gvec_uclamp_h,
151
+ .opt_opc = vecop,
152
+ .vece = MO_16 },
153
+ { .fni4 = gen_uclamp_i32,
154
+ .fniv = gen_uclamp_vec,
155
+ .fno = gen_helper_gvec_uclamp_s,
156
+ .opt_opc = vecop,
157
+ .vece = MO_32 },
158
+ { .fni8 = gen_uclamp_i64,
159
+ .fniv = gen_uclamp_vec,
160
+ .fno = gen_helper_gvec_uclamp_d,
161
+ .opt_opc = vecop,
162
+ .vece = MO_64,
163
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
164
+ };
165
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
166
+}
167
+
168
+TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
169
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/target/arm/vec_helper.c
172
+++ b/target/arm/vec_helper.c
173
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
84
}
174
}
85
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
175
clear_tail(d, opr_sz, simd_maxsz(desc));
86
case 0xf: /* FRINTI */
87
gen_fpst = gen_helper_rintd;
88
break;
89
+ case 0x10: /* FRINT32Z */
90
+ rmode = float_round_to_zero;
91
+ gen_fpst = gen_helper_frint32_d;
92
+ break;
93
+ case 0x11: /* FRINT32X */
94
+ gen_fpst = gen_helper_frint32_d;
95
+ break;
96
+ case 0x12: /* FRINT64Z */
97
+ rmode = float_round_to_zero;
98
+ gen_fpst = gen_helper_frint64_d;
99
+ break;
100
+ case 0x13: /* FRINT64X */
101
+ gen_fpst = gen_helper_frint64_d;
102
+ break;
103
default:
104
g_assert_not_reached();
105
}
106
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
107
handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
108
break;
109
}
110
+
111
+ case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
112
+ if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
113
+ unallocated_encoding(s);
114
+ return;
115
+ }
116
+ /* fall through */
117
case 0x0 ... 0x3:
118
case 0x8 ... 0xc:
119
case 0xe ... 0xf:
120
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
121
if (!fp_access_check(s)) {
122
return;
123
}
124
-
125
handle_fp_1src_single(s, opcode, rd, rn);
126
break;
127
case 1:
128
if (!fp_access_check(s)) {
129
return;
130
}
131
-
132
handle_fp_1src_double(s, opcode, rd, rn);
133
break;
134
case 3:
135
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
136
if (!fp_access_check(s)) {
137
return;
138
}
139
-
140
handle_fp_1src_half(s, opcode, rd, rn);
141
break;
142
default:
143
unallocated_encoding(s);
144
}
145
break;
146
+
147
default:
148
unallocated_encoding(s);
149
break;
150
@@ -XXX,XX +XXX,XX @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
151
case 0x59: /* FRINTX */
152
gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
153
break;
154
+ case 0x1e: /* FRINT32Z */
155
+ case 0x5e: /* FRINT32X */
156
+ gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
157
+ break;
158
+ case 0x1f: /* FRINT64Z */
159
+ case 0x5f: /* FRINT64X */
160
+ gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
161
+ break;
162
default:
163
g_assert_not_reached();
164
}
165
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
166
}
167
break;
168
case 0xc ... 0xf:
169
- case 0x16 ... 0x1d:
170
- case 0x1f:
171
+ case 0x16 ... 0x1f:
172
{
173
/* Floating point: U, size[1] and opcode indicate operation;
174
* size[0] indicates single or double precision.
175
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
176
}
177
need_fpstatus = true;
178
break;
179
+ case 0x1e: /* FRINT32Z */
180
+ case 0x1f: /* FRINT64Z */
181
+ need_rmode = true;
182
+ rmode = FPROUNDING_ZERO;
183
+ /* fall through */
184
+ case 0x5e: /* FRINT32X */
185
+ case 0x5f: /* FRINT64X */
186
+ need_fpstatus = true;
187
+ if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
188
+ unallocated_encoding(s);
189
+ return;
190
+ }
191
+ break;
192
default:
193
unallocated_encoding(s);
194
return;
195
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
196
case 0x7c: /* URSQRTE */
197
gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
198
break;
199
+ case 0x1e: /* FRINT32Z */
200
+ case 0x5e: /* FRINT32X */
201
+ gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
202
+ break;
203
+ case 0x1f: /* FRINT64Z */
204
+ case 0x5f: /* FRINT64X */
205
+ gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
206
+ break;
207
default:
208
g_assert_not_reached();
209
}
210
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
211
index XXXXXXX..XXXXXXX 100644
212
--- a/target/arm/vfp_helper.c
213
+++ b/target/arm/vfp_helper.c
214
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env)
215
216
return result;
217
}
176
}
218
+
177
+
219
+/* Round a float32 to an integer that fits in int32_t or int64_t. */
178
+#define DO_CLAMP(NAME, TYPE) \
220
+static float32 frint_s(float32 f, float_status *fpst, int intsize)
179
+void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
221
+{
180
+{ \
222
+ int old_flags = get_float_exception_flags(fpst);
181
+ intptr_t i, opr_sz = simd_oprsz(desc); \
223
+ uint32_t exp = extract32(f, 23, 8);
182
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
224
+
183
+ TYPE aa = *(TYPE *)(a + i); \
225
+ if (unlikely(exp == 0xff)) {
184
+ TYPE nn = *(TYPE *)(n + i); \
226
+ /* NaN or Inf. */
185
+ TYPE mm = *(TYPE *)(m + i); \
227
+ goto overflow;
186
+ TYPE dd = MIN(MAX(aa, nn), mm); \
228
+ }
187
+ *(TYPE *)(d + i) = dd; \
229
+
188
+ } \
230
+ /* Round and re-extract the exponent. */
189
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
231
+ f = float32_round_to_int(f, fpst);
190
+}
232
+ exp = extract32(f, 23, 8);
191
+
233
+
192
+DO_CLAMP(gvec_sclamp_b, int8_t)
234
+ /* Validate the range of the result. */
193
+DO_CLAMP(gvec_sclamp_h, int16_t)
235
+ if (exp < 126 + intsize) {
194
+DO_CLAMP(gvec_sclamp_s, int32_t)
236
+ /* abs(F) <= INT{N}_MAX */
195
+DO_CLAMP(gvec_sclamp_d, int64_t)
237
+ return f;
196
+
238
+ }
197
+DO_CLAMP(gvec_uclamp_b, uint8_t)
239
+ if (exp == 126 + intsize) {
198
+DO_CLAMP(gvec_uclamp_h, uint16_t)
240
+ uint32_t sign = extract32(f, 31, 1);
199
+DO_CLAMP(gvec_uclamp_s, uint32_t)
241
+ uint32_t frac = extract32(f, 0, 23);
200
+DO_CLAMP(gvec_uclamp_d, uint64_t)
242
+ if (sign && frac == 0) {
243
+ /* F == INT{N}_MIN */
244
+ return f;
245
+ }
246
+ }
247
+
248
+ overflow:
249
+ /*
250
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
251
+ * inexact exception float32_round_to_int may have raised.
252
+ */
253
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
254
+ return (0x100u + 126u + intsize) << 23;
255
+}
256
+
257
+float32 HELPER(frint32_s)(float32 f, void *fpst)
258
+{
259
+ return frint_s(f, fpst, 32);
260
+}
261
+
262
+float32 HELPER(frint64_s)(float32 f, void *fpst)
263
+{
264
+ return frint_s(f, fpst, 64);
265
+}
266
+
267
+/* Round a float64 to an integer that fits in int32_t or int64_t. */
268
+static float64 frint_d(float64 f, float_status *fpst, int intsize)
269
+{
270
+ int old_flags = get_float_exception_flags(fpst);
271
+ uint32_t exp = extract64(f, 52, 11);
272
+
273
+ if (unlikely(exp == 0x7ff)) {
274
+ /* NaN or Inf. */
275
+ goto overflow;
276
+ }
277
+
278
+ /* Round and re-extract the exponent. */
279
+ f = float64_round_to_int(f, fpst);
280
+ exp = extract64(f, 52, 11);
281
+
282
+ /* Validate the range of the result. */
283
+ if (exp < 1022 + intsize) {
284
+ /* abs(F) <= INT{N}_MAX */
285
+ return f;
286
+ }
287
+ if (exp == 1022 + intsize) {
288
+ uint64_t sign = extract64(f, 63, 1);
289
+ uint64_t frac = extract64(f, 0, 52);
290
+ if (sign && frac == 0) {
291
+ /* F == INT{N}_MIN */
292
+ return f;
293
+ }
294
+ }
295
+
296
+ overflow:
297
+ /*
298
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
299
+ * inexact exception float64_round_to_int may have raised.
300
+ */
301
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
302
+ return (uint64_t)(0x800 + 1022 + intsize) << 52;
303
+}
304
+
305
+float64 HELPER(frint32_d)(float64 f, void *fpst)
306
+{
307
+ return frint_d(f, fpst, 32);
308
+}
309
+
310
+float64 HELPER(frint64_d)(float64 f, void *fpst)
311
+{
312
+ return frint_d(f, fpst, 64);
313
+}
314
--
201
--
315
2.20.1
202
2.25.1
316
317
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This will allow sharing code that adjusts rmode beyond
3
We can handle both exception entry and exception return by
4
the existing users.
4
hooking into aarch64_sve_change_el.
5
5
6
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20190301200501.16533-10-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-32-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 90 +++++++++++++++++++++-----------------
11
target/arm/helper.c | 15 +++++++++++++--
13
1 file changed, 49 insertions(+), 41 deletions(-)
12
1 file changed, 13 insertions(+), 2 deletions(-)
14
13
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
16
--- a/target/arm/helper.c
18
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
18
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
20
/* Floating-point data-processing (1 source) - single precision */
21
static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
22
{
23
+ void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
24
+ TCGv_i32 tcg_op, tcg_res;
25
TCGv_ptr fpst;
26
- TCGv_i32 tcg_op;
27
- TCGv_i32 tcg_res;
28
+ int rmode = -1;
29
30
- fpst = get_fpstatus_ptr(false);
31
tcg_op = read_fp_sreg(s, rn);
32
tcg_res = tcg_temp_new_i32();
33
34
switch (opcode) {
35
case 0x0: /* FMOV */
36
tcg_gen_mov_i32(tcg_res, tcg_op);
37
- break;
38
+ goto done;
39
case 0x1: /* FABS */
40
gen_helper_vfp_abss(tcg_res, tcg_op);
41
- break;
42
+ goto done;
43
case 0x2: /* FNEG */
44
gen_helper_vfp_negs(tcg_res, tcg_op);
45
- break;
46
+ goto done;
47
case 0x3: /* FSQRT */
48
gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
49
- break;
50
+ goto done;
51
case 0x8: /* FRINTN */
52
case 0x9: /* FRINTP */
53
case 0xa: /* FRINTM */
54
case 0xb: /* FRINTZ */
55
case 0xc: /* FRINTA */
56
- {
57
- TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
58
-
59
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
60
- gen_helper_rints(tcg_res, tcg_op, fpst);
61
-
62
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
63
- tcg_temp_free_i32(tcg_rmode);
64
+ rmode = arm_rmode_to_sf(opcode & 7);
65
+ gen_fpst = gen_helper_rints;
66
break;
67
- }
68
case 0xe: /* FRINTX */
69
- gen_helper_rints_exact(tcg_res, tcg_op, fpst);
70
+ gen_fpst = gen_helper_rints_exact;
71
break;
72
case 0xf: /* FRINTI */
73
- gen_helper_rints(tcg_res, tcg_op, fpst);
74
+ gen_fpst = gen_helper_rints;
75
break;
76
default:
77
- abort();
78
+ g_assert_not_reached();
79
}
80
81
- write_fp_sreg(s, rd, tcg_res);
82
-
83
+ fpst = get_fpstatus_ptr(false);
84
+ if (rmode >= 0) {
85
+ TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
86
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
87
+ gen_fpst(tcg_res, tcg_op, fpst);
88
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
89
+ tcg_temp_free_i32(tcg_rmode);
90
+ } else {
91
+ gen_fpst(tcg_res, tcg_op, fpst);
92
+ }
93
tcg_temp_free_ptr(fpst);
94
+
95
+ done:
96
+ write_fp_sreg(s, rd, tcg_res);
97
tcg_temp_free_i32(tcg_op);
98
tcg_temp_free_i32(tcg_res);
99
}
100
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
101
/* Floating-point data-processing (1 source) - double precision */
102
static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
103
{
104
+ void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
105
+ TCGv_i64 tcg_op, tcg_res;
106
TCGv_ptr fpst;
107
- TCGv_i64 tcg_op;
108
- TCGv_i64 tcg_res;
109
+ int rmode = -1;
110
111
switch (opcode) {
112
case 0x0: /* FMOV */
113
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
114
return;
19
return;
115
}
20
}
116
21
117
- fpst = get_fpstatus_ptr(false);
22
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
118
tcg_op = read_fp_dreg(s, rn);
23
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
119
tcg_res = tcg_temp_new_i64();
24
+
120
25
+ /*
121
switch (opcode) {
26
+ * Both AArch64.TakeException and AArch64.ExceptionReturn
122
case 0x1: /* FABS */
27
+ * invoke ResetSVEState when taking an exception from, or
123
gen_helper_vfp_absd(tcg_res, tcg_op);
28
+ * returning to, AArch32 state when PSTATE.SM is enabled.
124
- break;
29
+ */
125
+ goto done;
30
+ if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
126
case 0x2: /* FNEG */
31
+ arm_reset_sve_state(env);
127
gen_helper_vfp_negd(tcg_res, tcg_op);
32
+ return;
128
- break;
129
+ goto done;
130
case 0x3: /* FSQRT */
131
gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
132
- break;
133
+ goto done;
134
case 0x8: /* FRINTN */
135
case 0x9: /* FRINTP */
136
case 0xa: /* FRINTM */
137
case 0xb: /* FRINTZ */
138
case 0xc: /* FRINTA */
139
- {
140
- TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
141
-
142
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
143
- gen_helper_rintd(tcg_res, tcg_op, fpst);
144
-
145
- gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
146
- tcg_temp_free_i32(tcg_rmode);
147
+ rmode = arm_rmode_to_sf(opcode & 7);
148
+ gen_fpst = gen_helper_rintd;
149
break;
150
- }
151
case 0xe: /* FRINTX */
152
- gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
153
+ gen_fpst = gen_helper_rintd_exact;
154
break;
155
case 0xf: /* FRINTI */
156
- gen_helper_rintd(tcg_res, tcg_op, fpst);
157
+ gen_fpst = gen_helper_rintd;
158
break;
159
default:
160
- abort();
161
+ g_assert_not_reached();
162
}
163
164
- write_fp_dreg(s, rd, tcg_res);
165
-
166
+ fpst = get_fpstatus_ptr(false);
167
+ if (rmode >= 0) {
168
+ TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
169
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
170
+ gen_fpst(tcg_res, tcg_op, fpst);
171
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
172
+ tcg_temp_free_i32(tcg_rmode);
173
+ } else {
174
+ gen_fpst(tcg_res, tcg_op, fpst);
175
+ }
33
+ }
176
tcg_temp_free_ptr(fpst);
177
+
34
+
178
+ done:
35
/*
179
+ write_fp_dreg(s, rd, tcg_res);
36
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
180
tcg_temp_free_i64(tcg_op);
37
* at ELx, or not available because the EL is in AArch32 state, then
181
tcg_temp_free_i64(tcg_res);
38
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
182
}
39
* we already have the correct register contents when encountering the
40
* vq0->vq0 transition between EL0->EL1.
41
*/
42
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
43
old_len = (old_a64 && !sve_exception_el(env, old_el)
44
? sve_vqm1_for_el(env, old_el) : 0);
45
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
46
new_len = (new_a64 && !sve_exception_el(env, new_el)
47
? sve_vqm1_for_el(env, new_el) : 0);
48
183
--
49
--
184
2.20.1
50
2.25.1
185
186
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Note that SME remains effectively disabled for user-only,
4
because we do not yet set CPACR_EL1.SMEN. This needs to
5
wait until the kernel ABI is implemented.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20190301200501.16533-4-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-33-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
target/arm/cpu.h | 13 ++++++++++-
12
docs/system/arm/emulation.rst | 4 ++++
9
target/arm/cpu.c | 1 +
13
target/arm/cpu64.c | 11 +++++++++++
10
target/arm/cpu64.c | 2 ++
14
2 files changed, 15 insertions(+)
11
target/arm/helper.c | 55 +++++++++++++++++++++++++++++++++++++++++++++
12
4 files changed, 70 insertions(+), 1 deletion(-)
13
15
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
18
--- a/docs/system/arm/emulation.rst
17
+++ b/target/arm/cpu.h
19
+++ b/docs/system/arm/emulation.rst
18
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
19
#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
21
- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
20
#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
22
- FEAT_SM3 (Advanced SIMD SM3 instructions)
21
#define SCTLR_F (1U << 10) /* up to v6 */
23
- FEAT_SM4 (Advanced SIMD SM4 instructions)
22
-#define SCTLR_SW (1U << 10) /* v7, RES0 in v8 */
24
+- FEAT_SME (Scalable Matrix Extension)
23
+#define SCTLR_SW (1U << 10) /* v7 */
25
+- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode)
24
+#define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */
26
+- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
25
#define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */
27
+- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
26
#define SCTLR_EOS (1U << 11) /* v8.5-ExS */
28
- FEAT_SPECRES (Speculation restriction instructions)
27
#define SCTLR_I (1U << 12)
29
- FEAT_SSBS (Speculative Store Bypass Safe)
28
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
30
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
29
return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
30
}
31
32
+static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
33
+{
34
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
35
+}
36
+
37
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
38
{
39
/*
40
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
41
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
42
}
43
44
+static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
45
+{
46
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
47
+}
48
+
49
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
50
{
51
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
52
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/cpu.c
55
+++ b/target/arm/cpu.c
56
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
57
t = FIELD_DP32(t, ID_ISAR6, DP, 1);
58
t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
59
t = FIELD_DP32(t, ID_ISAR6, SB, 1);
60
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
61
cpu->isar.id_isar6 = t;
62
63
t = cpu->id_mmfr4;
64
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
65
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/cpu64.c
33
--- a/target/arm/cpu64.c
67
+++ b/target/arm/cpu64.c
34
+++ b/target/arm/cpu64.c
68
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
35
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
69
t = FIELD_DP64(t, ID_AA64ISAR1, GPA, 1);
36
*/
70
t = FIELD_DP64(t, ID_AA64ISAR1, GPI, 0);
37
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
71
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
38
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
72
+ t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
39
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
73
cpu->isar.id_aa64isar1 = t;
40
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
74
41
cpu->isar.id_aa64pfr1 = t;
75
t = cpu->isar.id_aa64pfr0;
42
76
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
43
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
77
u = FIELD_DP32(u, ID_ISAR6, DP, 1);
44
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
78
u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
45
cpu->isar.id_aa64dfr0 = t;
79
u = FIELD_DP32(u, ID_ISAR6, SB, 1);
46
80
+ u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
47
+ t = cpu->isar.id_aa64smfr0;
81
cpu->isar.id_isar6 = u;
48
+ t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
82
49
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
83
/*
50
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
84
diff --git a/target/arm/helper.c b/target/arm/helper.c
51
+ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
85
index XXXXXXX..XXXXXXX 100644
52
+ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
86
--- a/target/arm/helper.c
53
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
87
+++ b/target/arm/helper.c
54
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
88
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo pauth_reginfo[] = {
55
+ cpu->isar.id_aa64smfr0 = t;
89
};
90
#endif
91
92
+static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
93
+ bool isread)
94
+{
95
+ int el = arm_current_el(env);
96
+
56
+
97
+ if (el == 0) {
57
/* Replicate the same data to the 32-bit id registers. */
98
+ uint64_t sctlr = arm_sctlr(env, el);
58
aa32_max_features(cpu);
99
+ if (!(sctlr & SCTLR_EnRCTX)) {
59
100
+ return CP_ACCESS_TRAP;
101
+ }
102
+ } else if (el == 1) {
103
+ uint64_t hcr = arm_hcr_el2_eff(env);
104
+ if (hcr & HCR_NV) {
105
+ return CP_ACCESS_TRAP_EL2;
106
+ }
107
+ }
108
+ return CP_ACCESS_OK;
109
+}
110
+
111
+static const ARMCPRegInfo predinv_reginfo[] = {
112
+ { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
113
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
114
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
115
+ { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
116
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
117
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
118
+ { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
119
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
120
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
121
+ /*
122
+ * Note the AArch32 opcodes have a different OPC1.
123
+ */
124
+ { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
125
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
126
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
127
+ { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
128
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
129
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
130
+ { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
131
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
132
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
133
+ REGINFO_SENTINEL
134
+};
135
+
136
void register_cp_regs_for_features(ARMCPU *cpu)
137
{
138
/* Register all the coprocessor registers based on feature bits */
139
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
140
define_arm_cp_regs(cpu, pauth_reginfo);
141
}
142
#endif
143
+
144
+ /*
145
+ * While all v8.0 cpus support aarch64, QEMU does have configurations
146
+ * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
147
+ * which will set ID_ISAR6.
148
+ */
149
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
150
+ ? cpu_isar_feature(aa64_predinv, cpu)
151
+ : cpu_isar_feature(aa32_predinv, cpu)) {
152
+ define_arm_cp_regs(cpu, predinv_reginfo);
153
+ }
154
}
155
156
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
157
--
60
--
158
2.20.1
61
2.25.1
159
160
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-34-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/target_cpu.h | 5 ++++-
9
1 file changed, 4 insertions(+), 1 deletion(-)
10
11
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/target_cpu.h
14
+++ b/linux-user/aarch64/target_cpu.h
15
@@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
16
17
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
18
{
19
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
20
+ /*
21
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
22
* different from AArch32 Linux, which uses TPIDRRO.
23
*/
24
env->cp15.tpidr_el[0] = newtls;
25
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
26
+ env->cp15.tpidr2_el0 = 0;
27
}
28
29
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
30
--
31
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-35-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/cpu_loop.c | 9 +++++++++
9
1 file changed, 9 insertions(+)
10
11
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/cpu_loop.c
14
+++ b/linux-user/aarch64/cpu_loop.c
15
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
16
17
switch (trapnr) {
18
case EXCP_SWI:
19
+ /*
20
+ * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
21
+ * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
22
+ */
23
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
24
+ env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
25
+ arm_rebuild_hflags(env);
26
+ arm_reset_sve_state(env);
27
+ }
28
ret = do_syscall(env,
29
env->xregs[8],
30
env->xregs[0],
31
--
32
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Make sure to zero the currently reserved fields.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-36-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/aarch64/signal.c | 9 ++++++++-
11
1 file changed, 8 insertions(+), 1 deletion(-)
12
13
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/aarch64/signal.c
16
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ struct target_extra_context {
18
struct target_sve_context {
19
struct target_aarch64_ctx head;
20
uint16_t vl;
21
- uint16_t reserved[3];
22
+ uint16_t flags;
23
+ uint16_t reserved[2];
24
/* The actual SVE data immediately follows. It is laid out
25
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
26
* the original struct pointer.
27
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
28
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
29
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
30
31
+#define TARGET_SVE_SIG_FLAG_SM 1
32
+
33
struct target_rt_sigframe {
34
struct target_siginfo info;
35
struct target_ucontext uc;
36
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
37
{
38
int i, j;
39
40
+ memset(sve, 0, sizeof(*sve));
41
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
42
__put_user(size, &sve->head.size);
43
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
44
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
45
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
46
+ }
47
48
/* Note that SVE regs are stored as a byte stream, with each byte element
49
* at a subsequent address. This corresponds to a little-endian store
50
--
51
2.25.1
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This patch implements the machine class kvm_type() callback.
3
Fold the return value setting into the goto, so each
4
It returns the number of bits requested to implement the whole GPA
4
point of failure need not do both.
5
range including the RAM and IO regions located beyond.
6
The returned value is passed though the KVM_CREATE_VM ioctl and
7
this allows KVM to set the stage2 tables dynamically.
8
5
9
To compute the highest GPA used in the memory map, kvm_type()
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
must freeze the memory map by calling virt_set_memmap().
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
8
Message-id: 20220708151540.18136-37-richard.henderson@linaro.org
12
Signed-off-by: Eric Auger <eric.auger@redhat.com>
13
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
14
Message-id: 20190304101339.25970-9-eric.auger@redhat.com
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
10
---
17
hw/arm/virt.c | 39 ++++++++++++++++++++++++++++++++++++++-
11
linux-user/aarch64/signal.c | 26 +++++++++++---------------
18
1 file changed, 38 insertions(+), 1 deletion(-)
12
1 file changed, 11 insertions(+), 15 deletions(-)
19
13
20
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
21
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/virt.c
16
--- a/linux-user/aarch64/signal.c
23
+++ b/hw/arm/virt.c
17
+++ b/linux-user/aarch64/signal.c
24
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
18
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
25
bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
19
struct target_sve_context *sve = NULL;
26
bool aarch64 = true;
20
uint64_t extra_datap = 0;
27
21
bool used_extra = false;
28
- virt_set_memmap(vms);
22
- bool err = false;
29
+ /*
23
int vq = 0, sve_size = 0;
30
+ * In accelerated mode, the memory map is computed earlier in kvm_type()
24
31
+ * to create a VM with the right number of IPA bits.
25
target_restore_general_frame(env, sf);
32
+ */
26
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
33
+ if (!vms->memmap) {
27
switch (magic) {
34
+ virt_set_memmap(vms);
28
case 0:
35
+ }
29
if (size != 0) {
36
30
- err = true;
37
/* We can probe only here because during property set
31
- goto exit;
38
* KVM is not available yet
32
+ goto err;
39
@@ -XXX,XX +XXX,XX @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
33
}
40
return NULL;
34
if (used_extra) {
35
ctx = NULL;
36
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
37
38
case TARGET_FPSIMD_MAGIC:
39
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
40
- err = true;
41
- goto exit;
42
+ goto err;
43
}
44
fpsimd = (struct target_fpsimd_context *)ctx;
45
break;
46
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
47
break;
48
}
49
}
50
- err = true;
51
- goto exit;
52
+ goto err;
53
54
case TARGET_EXTRA_MAGIC:
55
if (extra || size != sizeof(struct target_extra_context)) {
56
- err = true;
57
- goto exit;
58
+ goto err;
59
}
60
__get_user(extra_datap,
61
&((struct target_extra_context *)ctx)->datap);
62
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
63
/* Unknown record -- we certainly didn't generate it.
64
* Did we in fact get out of sync?
65
*/
66
- err = true;
67
- goto exit;
68
+ goto err;
69
}
70
ctx = (void *)ctx + size;
71
}
72
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
73
if (fpsimd) {
74
target_restore_fpsimd_record(env, fpsimd);
75
} else {
76
- err = true;
77
+ goto err;
78
}
79
80
/* SVE data, if present, overwrites FPSIMD data. */
81
if (sve) {
82
target_restore_sve_record(env, sve, vq);
83
}
84
-
85
- exit:
86
unlock_user(extra, extra_datap, 0);
87
- return err;
88
+ return 0;
89
+
90
+ err:
91
+ unlock_user(extra, extra_datap, 0);
92
+ return 1;
41
}
93
}
42
94
43
+/*
95
static abi_ulong get_sigframe(struct target_sigaction *ka,
44
+ * for arm64 kvm_type [7-0] encodes the requested number of bits
45
+ * in the IPA address space
46
+ */
47
+static int virt_kvm_type(MachineState *ms, const char *type_str)
48
+{
49
+ VirtMachineState *vms = VIRT_MACHINE(ms);
50
+ int max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms);
51
+ int requested_pa_size;
52
+
53
+ /* we freeze the memory map to compute the highest gpa */
54
+ virt_set_memmap(vms);
55
+
56
+ requested_pa_size = 64 - clz64(vms->highest_gpa);
57
+
58
+ if (requested_pa_size > max_vm_pa_size) {
59
+ error_report("-m and ,maxmem option values "
60
+ "require an IPA range (%d bits) larger than "
61
+ "the one supported by the host (%d bits)",
62
+ requested_pa_size, max_vm_pa_size);
63
+ exit(1);
64
+ }
65
+ /*
66
+ * By default we return 0 which corresponds to an implicit legacy
67
+ * 40b IPA setting. Otherwise we return the actual requested PA
68
+ * logsize
69
+ */
70
+ return requested_pa_size > 40 ? requested_pa_size : 0;
71
+}
72
+
73
static void virt_machine_class_init(ObjectClass *oc, void *data)
74
{
75
MachineClass *mc = MACHINE_CLASS(oc);
76
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
77
mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
78
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
79
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
80
+ mc->kvm_type = virt_kvm_type;
81
assert(!mc->get_hotplug_handler);
82
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
83
hc->plug = virt_machine_device_plug_cb;
84
--
96
--
85
2.20.1
97
2.25.1
86
87
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
In parse_user_sigframe, the kernel rejects duplicate sve records,
4
or records that are smaller than the header. We were silently
5
allowing these cases to pass, dropping the record.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-38-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
linux-user/aarch64/signal.c | 5 ++++-
13
1 file changed, 4 insertions(+), 1 deletion(-)
14
15
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/signal.c
18
+++ b/linux-user/aarch64/signal.c
19
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
20
break;
21
22
case TARGET_SVE_MAGIC:
23
+ if (sve || size < sizeof(struct target_sve_context)) {
24
+ goto err;
25
+ }
26
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
27
vq = sve_vq(env);
28
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
29
- if (!sve && size == sve_size) {
30
+ if (size == sve_size) {
31
sve = (struct target_sve_context *)ctx;
32
break;
33
}
34
--
35
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-39-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/signal.c | 3 +++
9
1 file changed, 3 insertions(+)
10
11
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/signal.c
14
+++ b/linux-user/aarch64/signal.c
15
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
16
__get_user(extra_size,
17
&((struct target_extra_context *)ctx)->size);
18
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
19
+ if (!extra) {
20
+ return 1;
21
+ }
22
break;
23
24
default:
25
--
26
2.25.1
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Up to now the memory map has been static and the high IO region
3
Move the checks out of the parsing loop and into the
4
base has always been 256GiB.
4
restore function. This more closely mirrors the code
5
structure in the kernel, and is slightly clearer.
5
6
6
This patch modifies the virt_set_memmap() function, which freezes
7
Reject rather than silently skip incorrect VL and SVE record sizes,
7
the memory map, so that the high IO range base becomes floating,
8
bringing our checks in to line with those the kernel does.
8
located after the initial RAM and the device memory.
9
9
10
The function computes
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
- the base of the device memory,
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
- the size of the device memory,
12
Message-id: 20220708151540.18136-40-richard.henderson@linaro.org
13
- the high IO region base
14
- the highest GPA used in the memory map.
15
16
Entries of the high IO region are assigned a base address. The
17
device memory is initialized.
18
19
The highest GPA used in the memory map will be used at VM creation
20
to choose the requested IPA size.
21
22
Setting all the existing highmem IO regions beyond the RAM
23
allows to have a single contiguous RAM region (initial RAM and
24
possible hotpluggable device memory). That way we do not need
25
to do invasive changes in the EDK2 FW to support a dynamic
26
RAM base.
27
28
Still the user cannot request an initial RAM size greater than 255GB.
29
30
Signed-off-by: Eric Auger <eric.auger@redhat.com>
31
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
32
Message-id: 20190304101339.25970-8-eric.auger@redhat.com
33
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
---
14
---
35
include/hw/arm/virt.h | 1 +
15
linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------
36
hw/arm/virt.c | 52 ++++++++++++++++++++++++++++++++++++++-----
16
1 file changed, 35 insertions(+), 16 deletions(-)
37
2 files changed, 47 insertions(+), 6 deletions(-)
38
17
39
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
18
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
40
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
41
--- a/include/hw/arm/virt.h
20
--- a/linux-user/aarch64/signal.c
42
+++ b/include/hw/arm/virt.h
21
+++ b/linux-user/aarch64/signal.c
43
@@ -XXX,XX +XXX,XX @@ typedef struct {
22
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
44
uint32_t msi_phandle;
23
}
45
uint32_t iommu_phandle;
24
}
46
int psci_conduit;
25
47
+ hwaddr highest_gpa;
26
-static void target_restore_sve_record(CPUARMState *env,
48
} VirtMachineState;
27
- struct target_sve_context *sve, int vq)
49
28
+static bool target_restore_sve_record(CPUARMState *env,
50
#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
29
+ struct target_sve_context *sve,
51
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
30
+ int size)
52
index XXXXXXX..XXXXXXX 100644
53
--- a/hw/arm/virt.c
54
+++ b/hw/arm/virt.c
55
@@ -XXX,XX +XXX,XX @@
56
#include "qapi/visitor.h"
57
#include "standard-headers/linux/input.h"
58
#include "hw/arm/smmuv3.h"
59
+#include "hw/acpi/acpi.h"
60
61
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
62
static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
63
@@ -XXX,XX +XXX,XX @@
64
* of a terabyte of RAM will be doing it on a host with more than a
65
* terabyte of physical address space.)
66
*/
67
-#define RAMLIMIT_GB 255
68
-#define RAMLIMIT_BYTES (RAMLIMIT_GB * 1024ULL * 1024 * 1024)
69
+#define LEGACY_RAMLIMIT_GB 255
70
+#define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB)
71
72
/* Addresses and sizes of our components.
73
* 0..128MB is space for a flash device so we can run bootrom code such as UEFI.
74
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry base_memmap[] = {
75
[VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
76
[VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
77
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
78
- [VIRT_MEM] = { 0x40000000, RAMLIMIT_BYTES },
79
+ /* Actual RAM size depends on initial RAM and device memory settings */
80
+ [VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES },
81
};
82
83
/*
84
@@ -XXX,XX +XXX,XX @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
85
86
static void virt_set_memmap(VirtMachineState *vms)
87
{
31
{
88
- hwaddr base;
32
- int i, j;
89
+ MachineState *ms = MACHINE(vms);
33
+ int i, j, vl, vq;
90
+ hwaddr base, device_memory_base, device_memory_size;
34
91
int i;
35
- /* Note that SVE regs are stored as a byte stream, with each byte element
92
36
+ if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
93
vms->memmap = extended_memmap;
37
+ return false;
94
@@ -XXX,XX +XXX,XX @@ static void virt_set_memmap(VirtMachineState *vms)
38
+ }
95
vms->memmap[i] = base_memmap[i];
39
+
96
}
40
+ __get_user(vl, &sve->vl);
97
41
+ vq = sve_vq(env);
98
- base = 256 * GiB; /* Top of the legacy initial RAM region */
42
+
99
+ if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) {
43
+ /* Reject mismatched VL. */
100
+ error_report("unsupported number of memory slots: %"PRIu64,
44
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
101
+ ms->ram_slots);
45
+ return false;
102
+ exit(EXIT_FAILURE);
46
+ }
47
+
48
+ /* Accept empty record -- used to clear PSTATE.SM. */
49
+ if (size <= sizeof(*sve)) {
50
+ return true;
51
+ }
52
+
53
+ /* Reject non-empty but incomplete record. */
54
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
55
+ return false;
103
+ }
56
+ }
104
+
57
+
105
+ /*
58
+ /*
106
+ * We compute the base of the high IO region depending on the
59
+ * Note that SVE regs are stored as a byte stream, with each byte element
107
+ * amount of initial and device memory. The device memory start/size
60
* at a subsequent address. This corresponds to a little-endian load
108
+ * is aligned on 1GiB. We never put the high IO region below 256GiB
61
* of our 64-bit hunks.
109
+ * so that if maxram_size is < 255GiB we keep the legacy memory map.
62
*/
110
+ * The device region size assumes 1GiB page max alignment per slot.
63
@@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env,
111
+ */
64
}
112
+ device_memory_base =
65
}
113
+ ROUND_UP(vms->memmap[VIRT_MEM].base + ms->ram_size, GiB);
114
+ device_memory_size = ms->maxram_size - ms->ram_size + ms->ram_slots * GiB;
115
+
116
+ /* Base address of the high IO region */
117
+ base = device_memory_base + ROUND_UP(device_memory_size, GiB);
118
+ if (base < device_memory_base) {
119
+ error_report("maxmem/slots too huge");
120
+ exit(EXIT_FAILURE);
121
+ }
122
+ if (base < vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES) {
123
+ base = vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES;
124
+ }
125
126
for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) {
127
hwaddr size = extended_memmap[i].size;
128
@@ -XXX,XX +XXX,XX @@ static void virt_set_memmap(VirtMachineState *vms)
129
vms->memmap[i].size = size;
130
base += size;
131
}
66
}
132
+ vms->highest_gpa = base - 1;
67
+ return true;
133
+ if (device_memory_size > 0) {
134
+ ms->device_memory = g_malloc0(sizeof(*ms->device_memory));
135
+ ms->device_memory->base = device_memory_base;
136
+ memory_region_init(&ms->device_memory->mr, OBJECT(vms),
137
+ "device-memory", device_memory_size);
138
+ }
139
}
68
}
140
69
141
static void machvirt_init(MachineState *machine)
70
static int target_restore_sigframe(CPUARMState *env,
142
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
71
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
143
vms->smp_cpus = smp_cpus;
72
struct target_sve_context *sve = NULL;
144
73
uint64_t extra_datap = 0;
145
if (machine->ram_size > vms->memmap[VIRT_MEM].size) {
74
bool used_extra = false;
146
- error_report("mach-virt: cannot model more than %dGB RAM", RAMLIMIT_GB);
75
- int vq = 0, sve_size = 0;
147
+ error_report("mach-virt: cannot model more than %dGB RAM",
76
+ int sve_size = 0;
148
+ LEGACY_RAMLIMIT_GB);
77
149
exit(1);
78
target_restore_general_frame(env, sf);
79
80
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
81
if (sve || size < sizeof(struct target_sve_context)) {
82
goto err;
83
}
84
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
85
- vq = sve_vq(env);
86
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
87
- if (size == sve_size) {
88
- sve = (struct target_sve_context *)ctx;
89
- break;
90
- }
91
- }
92
- goto err;
93
+ sve = (struct target_sve_context *)ctx;
94
+ sve_size = size;
95
+ break;
96
97
case TARGET_EXTRA_MAGIC:
98
if (extra || size != sizeof(struct target_extra_context)) {
99
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
150
}
100
}
151
101
152
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
102
/* SVE data, if present, overwrites FPSIMD data. */
153
memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram",
103
- if (sve) {
154
machine->ram_size);
104
- target_restore_sve_record(env, sve, vq);
155
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, ram);
105
+ if (sve && !target_restore_sve_record(env, sve, sve_size)) {
156
+ if (machine->device_memory) {
106
+ goto err;
157
+ memory_region_add_subregion(sysmem, machine->device_memory->base,
107
}
158
+ &machine->device_memory->mr);
108
unlock_user(extra, extra_datap, 0);
159
+ }
109
return 0;
160
161
create_flash(vms, sysmem, secure_sysmem ? secure_sysmem : sysmem);
162
163
--
110
--
164
2.20.1
111
2.25.1
165
166
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
In the prospect to introduce an extended memory map supporting more
3
Set the SM bit in the SVE record on signal delivery, create the ZA record.
4
RAM, let's split the memory map array into two parts:
4
Restore SM and ZA state according to the records present on return.
5
5
6
- the former a15memmap, renamed base_memmap, contains regions below
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
and including the RAM. MemMapEntries initialized in this array
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
have a static size and base address.
8
Message-id: 20220708151540.18136-41-richard.henderson@linaro.org
9
- extended_memmap, only initialized with entries located after the
10
RAM. MemMapEntries initialized in this array only get their size
11
initialized. Their base address is dynamically computed depending
12
on the the top of the RAM, with same alignment as their size.
13
14
Eventually base_memmap entries are copied into the extended_memmap
15
array. Using two separate arrays however clarifies which entries
16
are statically allocated and those which are dynamically allocated.
17
18
This new split will allow to grow the RAM size without changing the
19
description of the high IO entries.
20
21
We introduce a new virt_set_memmap() helper function which
22
"freezes" the memory map. We call it in machvirt_init as
23
memory attributes of the machine are not yet set when
24
virt_instance_init() gets called.
25
26
The memory map is unchanged (the top of the initial RAM still is
27
256GiB). Then come the high IO regions with same layout as before.
28
29
Signed-off-by: Eric Auger <eric.auger@redhat.com>
30
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
31
Message-id: 20190304101339.25970-4-eric.auger@redhat.com
32
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
33
---
10
---
34
include/hw/arm/virt.h | 13 +++++++----
11
linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++---
35
hw/arm/virt.c | 50 +++++++++++++++++++++++++++++++++++++------
12
1 file changed, 154 insertions(+), 13 deletions(-)
36
2 files changed, 53 insertions(+), 10 deletions(-)
37
13
38
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
39
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
40
--- a/include/hw/arm/virt.h
16
--- a/linux-user/aarch64/signal.c
41
+++ b/include/hw/arm/virt.h
17
+++ b/linux-user/aarch64/signal.c
42
@@ -XXX,XX +XXX,XX @@ enum {
18
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
43
VIRT_GIC_VCPU,
19
44
VIRT_GIC_ITS,
20
#define TARGET_SVE_SIG_FLAG_SM 1
45
VIRT_GIC_REDIST,
21
46
- VIRT_HIGH_GIC_REDIST2,
22
+#define TARGET_ZA_MAGIC 0x54366345
47
VIRT_SMMU,
23
+
48
VIRT_UART,
24
+struct target_za_context {
49
VIRT_MMIO,
25
+ struct target_aarch64_ctx head;
50
@@ -XXX,XX +XXX,XX @@ enum {
26
+ uint16_t vl;
51
VIRT_PCIE_MMIO,
27
+ uint16_t reserved[3];
52
VIRT_PCIE_PIO,
28
+ /* The actual ZA data immediately follows. */
53
VIRT_PCIE_ECAM,
54
- VIRT_HIGH_PCIE_ECAM,
55
VIRT_PLATFORM_BUS,
56
- VIRT_HIGH_PCIE_MMIO,
57
VIRT_GPIO,
58
VIRT_SECURE_UART,
59
VIRT_SECURE_MEM,
60
+ VIRT_LOWMEMMAP_LAST,
61
+};
29
+};
62
+
30
+
63
+/* indices of IO regions located after the RAM */
31
+#define TARGET_ZA_SIG_REGS_OFFSET \
64
+enum {
32
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
65
+ VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST,
33
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
66
+ VIRT_HIGH_PCIE_ECAM,
34
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
67
+ VIRT_HIGH_PCIE_MMIO,
35
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
68
};
36
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
69
37
+
70
typedef enum VirtIOMMUType {
38
struct target_rt_sigframe {
71
@@ -XXX,XX +XXX,XX @@ typedef struct {
39
struct target_siginfo info;
72
int32_t gic_version;
40
struct target_ucontext uc;
73
VirtIOMMUType iommu;
41
@@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
74
struct arm_boot_info bootinfo;
75
- const MemMapEntry *memmap;
76
+ MemMapEntry *memmap;
77
const int *irqmap;
78
int smp_cpus;
79
void *fdt;
80
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/hw/arm/virt.c
83
+++ b/hw/arm/virt.c
84
@@ -XXX,XX +XXX,XX @@
85
*/
86
87
#include "qemu/osdep.h"
88
+#include "qemu/units.h"
89
#include "qapi/error.h"
90
#include "hw/sysbus.h"
91
#include "hw/arm/arm.h"
92
@@ -XXX,XX +XXX,XX @@
93
* Note that devices should generally be placed at multiples of 0x10000,
94
* to accommodate guests using 64K pages.
95
*/
96
-static const MemMapEntry a15memmap[] = {
97
+static const MemMapEntry base_memmap[] = {
98
/* Space up to 0x8000000 is reserved for a boot ROM */
99
[VIRT_FLASH] = { 0, 0x08000000 },
100
[VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 },
101
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry a15memmap[] = {
102
[VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
103
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
104
[VIRT_MEM] = { 0x40000000, RAMLIMIT_BYTES },
105
+};
106
+
107
+/*
108
+ * Highmem IO Regions: This memory map is floating, located after the RAM.
109
+ * Each MemMapEntry base (GPA) will be dynamically computed, depending on the
110
+ * top of the RAM, so that its base get the same alignment as the size,
111
+ * ie. a 512GiB entry will be aligned on a 512GiB boundary. If there is
112
+ * less than 256GiB of RAM, the floating area starts at the 256GiB mark.
113
+ * Note the extended_memmap is sized so that it eventually also includes the
114
+ * base_memmap entries (VIRT_HIGH_GIC_REDIST2 index is greater than the last
115
+ * index of base_memmap).
116
+ */
117
+static MemMapEntry extended_memmap[] = {
118
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
119
- [VIRT_HIGH_GIC_REDIST2] = { 0x4000000000ULL, 0x4000000 },
120
- [VIRT_HIGH_PCIE_ECAM] = { 0x4010000000ULL, 0x10000000 },
121
- /* Second PCIe window, 512GB wide at the 512GB boundary */
122
- [VIRT_HIGH_PCIE_MMIO] = { 0x8000000000ULL, 0x8000000000ULL },
123
+ [VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB },
124
+ [VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB },
125
+ /* Second PCIe window */
126
+ [VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB },
127
};
128
129
static const int a15irqmap[] = {
130
@@ -XXX,XX +XXX,XX @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
131
return arm_cpu_mp_affinity(idx, clustersz);
132
}
42
}
133
43
134
+static void virt_set_memmap(VirtMachineState *vms)
44
static void target_setup_sve_record(struct target_sve_context *sve,
45
- CPUARMState *env, int vq, int size)
46
+ CPUARMState *env, int size)
47
{
48
- int i, j;
49
+ int i, j, vq = sve_vq(env);
50
51
memset(sve, 0, sizeof(*sve));
52
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
53
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
54
}
55
}
56
57
+static void target_setup_za_record(struct target_za_context *za,
58
+ CPUARMState *env, int size)
135
+{
59
+{
136
+ hwaddr base;
60
+ int vq = sme_vq(env);
137
+ int i;
61
+ int vl = vq * TARGET_SVE_VQ_BYTES;
138
+
62
+ int i, j;
139
+ vms->memmap = extended_memmap;
63
+
140
+
64
+ memset(za, 0, sizeof(*za));
141
+ for (i = 0; i < ARRAY_SIZE(base_memmap); i++) {
65
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
142
+ vms->memmap[i] = base_memmap[i];
66
+ __put_user(size, &za->head.size);
143
+ }
67
+ __put_user(vl, &za->vl);
144
+
68
+
145
+ base = 256 * GiB; /* Top of the legacy initial RAM region */
69
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
146
+
70
+ return;
147
+ for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) {
71
+ }
148
+ hwaddr size = extended_memmap[i].size;
72
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
149
+
73
+
150
+ base = ROUND_UP(base, size);
74
+ /*
151
+ vms->memmap[i].base = base;
75
+ * Note that ZA vectors are stored as a byte stream,
152
+ vms->memmap[i].size = size;
76
+ * with each byte element at a subsequent address.
153
+ base += size;
77
+ */
78
+ for (i = 0; i < vl; ++i) {
79
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
80
+ for (j = 0; j < vq * 2; ++j) {
81
+ __put_user_e(env->zarray[i].d[j], z + j, le);
82
+ }
154
+ }
83
+ }
155
+}
84
+}
156
+
85
+
157
static void machvirt_init(MachineState *machine)
86
static void target_restore_general_frame(CPUARMState *env,
158
{
87
struct target_rt_sigframe *sf)
159
VirtMachineState *vms = VIRT_MACHINE(machine);
88
{
160
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
89
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
161
bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0);
90
162
bool aarch64 = true;
91
static bool target_restore_sve_record(CPUARMState *env,
163
92
struct target_sve_context *sve,
164
+ virt_set_memmap(vms);
93
- int size)
165
+
94
+ int size, int *svcr)
166
/* We can probe only here because during property set
95
{
167
* KVM is not available yet
96
- int i, j, vl, vq;
168
*/
97
+ int i, j, vl, vq, flags;
169
@@ -XXX,XX +XXX,XX @@ static void virt_instance_init(Object *obj)
98
+ bool sm;
170
"Valid values are none and smmuv3",
99
171
NULL);
100
- if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
172
101
+ __get_user(vl, &sve->vl);
173
- vms->memmap = a15memmap;
102
+ __get_user(flags, &sve->flags);
174
vms->irqmap = a15irqmap;
103
+
104
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
105
+
106
+ /* The cpu must support Streaming or Non-streaming SVE. */
107
+ if (sm
108
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
109
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
110
return false;
111
}
112
113
- __get_user(vl, &sve->vl);
114
- vq = sve_vq(env);
115
+ /*
116
+ * Note that we cannot use sve_vq() because that depends on the
117
+ * current setting of PSTATE.SM, not the state to be restored.
118
+ */
119
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
120
121
/* Reject mismatched VL. */
122
if (vl != vq * TARGET_SVE_VQ_BYTES) {
123
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
124
return false;
125
}
126
127
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
128
+
129
/*
130
* Note that SVE regs are stored as a byte stream, with each byte element
131
* at a subsequent address. This corresponds to a little-endian load
132
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
133
return true;
175
}
134
}
176
135
136
+static bool target_restore_za_record(CPUARMState *env,
137
+ struct target_za_context *za,
138
+ int size, int *svcr)
139
+{
140
+ int i, j, vl, vq;
141
+
142
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
143
+ return false;
144
+ }
145
+
146
+ __get_user(vl, &za->vl);
147
+ vq = sme_vq(env);
148
+
149
+ /* Reject mismatched VL. */
150
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
151
+ return false;
152
+ }
153
+
154
+ /* Accept empty record -- used to clear PSTATE.ZA. */
155
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
156
+ return true;
157
+ }
158
+
159
+ /* Reject non-empty but incomplete record. */
160
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
161
+ return false;
162
+ }
163
+
164
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
165
+
166
+ for (i = 0; i < vl; ++i) {
167
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
168
+ for (j = 0; j < vq * 2; ++j) {
169
+ __get_user_e(env->zarray[i].d[j], z + j, le);
170
+ }
171
+ }
172
+ return true;
173
+}
174
+
175
static int target_restore_sigframe(CPUARMState *env,
176
struct target_rt_sigframe *sf)
177
{
178
struct target_aarch64_ctx *ctx, *extra = NULL;
179
struct target_fpsimd_context *fpsimd = NULL;
180
struct target_sve_context *sve = NULL;
181
+ struct target_za_context *za = NULL;
182
uint64_t extra_datap = 0;
183
bool used_extra = false;
184
int sve_size = 0;
185
+ int za_size = 0;
186
+ int svcr = 0;
187
188
target_restore_general_frame(env, sf);
189
190
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
191
sve_size = size;
192
break;
193
194
+ case TARGET_ZA_MAGIC:
195
+ if (za || size < sizeof(struct target_za_context)) {
196
+ goto err;
197
+ }
198
+ za = (struct target_za_context *)ctx;
199
+ za_size = size;
200
+ break;
201
+
202
case TARGET_EXTRA_MAGIC:
203
if (extra || size != sizeof(struct target_extra_context)) {
204
goto err;
205
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
206
}
207
208
/* SVE data, if present, overwrites FPSIMD data. */
209
- if (sve && !target_restore_sve_record(env, sve, sve_size)) {
210
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
211
goto err;
212
}
213
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
214
+ goto err;
215
+ }
216
+ if (env->svcr != svcr) {
217
+ env->svcr = svcr;
218
+ arm_rebuild_hflags(env);
219
+ }
220
unlock_user(extra, extra_datap, 0);
221
return 0;
222
223
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
224
.total_size = offsetof(struct target_rt_sigframe,
225
uc.tuc_mcontext.__reserved),
226
};
227
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
228
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
229
+ int sve_size = 0, za_size = 0;
230
struct target_rt_sigframe *frame;
231
struct target_rt_frame_record *fr;
232
abi_ulong frame_addr, return_addr;
233
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
234
&layout);
235
236
/* SVE state needs saving only if it exists. */
237
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
238
- vq = sve_vq(env);
239
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
240
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
241
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
242
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
243
sve_ofs = alloc_sigframe_space(sve_size, &layout);
244
}
245
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
246
+ /* ZA state needs saving only if it is enabled. */
247
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
248
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
249
+ } else {
250
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
251
+ }
252
+ za_ofs = alloc_sigframe_space(za_size, &layout);
253
+ }
254
255
if (layout.extra_ofs) {
256
/* Reserve space for the extra end marker. The standard end marker
257
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
258
target_setup_end_record((void *)frame + layout.extra_end_ofs);
259
}
260
if (sve_ofs) {
261
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
262
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
263
+ }
264
+ if (za_ofs) {
265
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
266
}
267
268
/* Set up the stack frame for unwinding. */
269
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
270
env->btype = 2;
271
}
272
273
+ /*
274
+ * Invoke the signal handler with both SM and ZA disabled.
275
+ * When clearing SM, ResetSVEState, per SMSTOP.
276
+ */
277
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
278
+ arm_reset_sve_state(env);
279
+ }
280
+ if (env->svcr) {
281
+ env->svcr = 0;
282
+ arm_rebuild_hflags(env);
283
+ }
284
+
285
if (info) {
286
tswap_siginfo(&frame->info, info);
287
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
177
--
288
--
178
2.20.1
289
2.25.1
179
180
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Add "sve" to the sve prctl functions, to distinguish
4
them from the coming "sme" prctls with similar names.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-42-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 8 ++++----
12
linux-user/syscall.c | 12 ++++++------
13
2 files changed, 10 insertions(+), 10 deletions(-)
14
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/target_prctl.h
18
+++ b/linux-user/aarch64/target_prctl.h
19
@@ -XXX,XX +XXX,XX @@
20
#ifndef AARCH64_TARGET_PRCTL_H
21
#define AARCH64_TARGET_PRCTL_H
22
23
-static abi_long do_prctl_get_vl(CPUArchState *env)
24
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
25
{
26
ARMCPU *cpu = env_archcpu(env);
27
if (cpu_isar_feature(aa64_sve, cpu)) {
28
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env)
29
}
30
return -TARGET_EINVAL;
31
}
32
-#define do_prctl_get_vl do_prctl_get_vl
33
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
34
35
-static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
36
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
37
{
38
/*
39
* We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
40
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
41
}
42
return -TARGET_EINVAL;
43
}
44
-#define do_prctl_set_vl do_prctl_set_vl
45
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
46
47
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
48
{
49
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/linux-user/syscall.c
52
+++ b/linux-user/syscall.c
53
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
54
#ifndef do_prctl_set_fp_mode
55
#define do_prctl_set_fp_mode do_prctl_inval1
56
#endif
57
-#ifndef do_prctl_get_vl
58
-#define do_prctl_get_vl do_prctl_inval0
59
+#ifndef do_prctl_sve_get_vl
60
+#define do_prctl_sve_get_vl do_prctl_inval0
61
#endif
62
-#ifndef do_prctl_set_vl
63
-#define do_prctl_set_vl do_prctl_inval1
64
+#ifndef do_prctl_sve_set_vl
65
+#define do_prctl_sve_set_vl do_prctl_inval1
66
#endif
67
#ifndef do_prctl_reset_keys
68
#define do_prctl_reset_keys do_prctl_inval1
69
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
70
case PR_SET_FP_MODE:
71
return do_prctl_set_fp_mode(env, arg2);
72
case PR_SVE_GET_VL:
73
- return do_prctl_get_vl(env);
74
+ return do_prctl_sve_get_vl(env);
75
case PR_SVE_SET_VL:
76
- return do_prctl_set_vl(env, arg2);
77
+ return do_prctl_sve_set_vl(env, arg2);
78
case PR_PAC_RESET_KEYS:
79
if (arg3 || arg4 || arg5) {
80
return -TARGET_EINVAL;
81
--
82
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These prctl set the Streaming SVE vector length, which may
4
be completely different from the Normal SVE vector length.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-43-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++
12
linux-user/syscall.c | 16 +++++++++
13
2 files changed, 70 insertions(+)
14
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/target_prctl.h
18
+++ b/linux-user/aarch64/target_prctl.h
19
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env)
20
{
21
ARMCPU *cpu = env_archcpu(env);
22
if (cpu_isar_feature(aa64_sve, cpu)) {
23
+ /* PSTATE.SM is always unset on syscall entry. */
24
return sve_vq(env) * 16;
25
}
26
return -TARGET_EINVAL;
27
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
28
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
29
uint32_t vq, old_vq;
30
31
+ /* PSTATE.SM is always unset on syscall entry. */
32
old_vq = sve_vq(env);
33
34
/*
35
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
36
}
37
#define do_prctl_sve_set_vl do_prctl_sve_set_vl
38
39
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
40
+{
41
+ ARMCPU *cpu = env_archcpu(env);
42
+ if (cpu_isar_feature(aa64_sme, cpu)) {
43
+ return sme_vq(env) * 16;
44
+ }
45
+ return -TARGET_EINVAL;
46
+}
47
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
48
+
49
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
50
+{
51
+ /*
52
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
53
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
54
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
55
+ */
56
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
57
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
58
+ int vq, old_vq;
59
+
60
+ old_vq = sme_vq(env);
61
+
62
+ /*
63
+ * Bound the value of vq, so that we know that it fits into
64
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
65
+ * on syscall entry, we are not modifying the current SVE
66
+ * vector length.
67
+ */
68
+ vq = MAX(arg2 / 16, 1);
69
+ vq = MIN(vq, 16);
70
+ env->vfp.smcr_el[1] =
71
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
72
+
73
+ /* Delay rebuilding hflags until we know if ZA must change. */
74
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
75
+
76
+ if (vq != old_vq) {
77
+ /*
78
+ * PSTATE.ZA state is cleared on any change to SVL.
79
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
80
+ * cleared on syscall entry, so this hasn't changed VL.
81
+ */
82
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
83
+ arm_rebuild_hflags(env);
84
+ }
85
+ return vq * 16;
86
+ }
87
+ return -TARGET_EINVAL;
88
+}
89
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
90
+
91
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
92
{
93
ARMCPU *cpu = env_archcpu(env);
94
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/linux-user/syscall.c
97
+++ b/linux-user/syscall.c
98
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
99
#ifndef PR_SET_SYSCALL_USER_DISPATCH
100
# define PR_SET_SYSCALL_USER_DISPATCH 59
101
#endif
102
+#ifndef PR_SME_SET_VL
103
+# define PR_SME_SET_VL 63
104
+# define PR_SME_GET_VL 64
105
+# define PR_SME_VL_LEN_MASK 0xffff
106
+# define PR_SME_VL_INHERIT (1 << 17)
107
+#endif
108
109
#include "target_prctl.h"
110
111
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
112
#ifndef do_prctl_set_unalign
113
#define do_prctl_set_unalign do_prctl_inval1
114
#endif
115
+#ifndef do_prctl_sme_get_vl
116
+#define do_prctl_sme_get_vl do_prctl_inval0
117
+#endif
118
+#ifndef do_prctl_sme_set_vl
119
+#define do_prctl_sme_set_vl do_prctl_inval1
120
+#endif
121
122
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
123
abi_long arg3, abi_long arg4, abi_long arg5)
124
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
125
return do_prctl_sve_get_vl(env);
126
case PR_SVE_SET_VL:
127
return do_prctl_sve_set_vl(env, arg2);
128
+ case PR_SME_GET_VL:
129
+ return do_prctl_sme_get_vl(env);
130
+ case PR_SME_SET_VL:
131
+ return do_prctl_sme_set_vl(env, arg2);
132
case PR_PAC_RESET_KEYS:
133
if (arg3 || arg4 || arg5) {
134
return -TARGET_EINVAL;
135
--
136
2.25.1
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The machine RAM attributes will need to be analyzed during the
3
There's no reason to set CPACR_EL1.ZEN if SVE disabled.
4
configure_accelerator() process. especially kvm_type() arm64
5
machine callback will use them to know how many IPA/GPA bits are
6
needed to model the whole RAM range. So let's assign those machine
7
state fields before calling configure_accelerator.
8
4
9
Signed-off-by: Eric Auger <eric.auger@redhat.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20190304101339.25970-7-eric.auger@redhat.com
7
Message-id: 20220708151540.18136-44-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
9
---
15
vl.c | 6 +++---
10
target/arm/cpu.c | 7 +++----
16
1 file changed, 3 insertions(+), 3 deletions(-)
11
1 file changed, 3 insertions(+), 4 deletions(-)
17
12
18
diff --git a/vl.c b/vl.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
19
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
20
--- a/vl.c
15
--- a/target/arm/cpu.c
21
+++ b/vl.c
16
+++ b/target/arm/cpu.c
22
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
23
machine_opts = qemu_get_machine_opts();
18
/* and to the FP/Neon instructions */
24
qemu_opt_foreach(machine_opts, machine_set_property, current_machine,
19
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
25
&error_fatal);
20
CPACR_EL1, FPEN, 3);
26
+ current_machine->ram_size = ram_size;
21
- /* and to the SVE instructions */
27
+ current_machine->maxram_size = maxram_size;
22
- env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
28
+ current_machine->ram_slots = ram_slots;
23
- CPACR_EL1, ZEN, 3);
29
24
- /* with reasonable vector length */
30
configure_accelerator(current_machine, argv[0]);
25
+ /* and to the SVE instructions, with default vector length */
31
26
if (cpu_isar_feature(aa64_sve, cpu)) {
32
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
27
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
33
replay_checkpoint(CHECKPOINT_INIT);
28
+ CPACR_EL1, ZEN, 3);
34
qdev_machine_init();
29
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
35
30
}
36
- current_machine->ram_size = ram_size;
31
/*
37
- current_machine->maxram_size = maxram_size;
38
- current_machine->ram_slots = ram_slots;
39
current_machine->boot_order = boot_order;
40
41
/* parse features once if machine provides default cpu_type */
42
--
32
--
43
2.20.1
33
2.25.1
44
45
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Found by inspection: Rn is the base register against which the
3
Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu.
4
load began; I is the register within the mask being processed.
5
The exception return should of course be processed from the loaded PC.
6
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20220708151540.18136-45-richard.henderson@linaro.org
9
Message-id: 20190301202921.21209-1-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
9
---
12
target/arm/translate.c | 2 +-
10
target/arm/cpu.c | 11 +++++++++++
13
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 11 insertions(+)
14
12
15
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate.c
15
--- a/target/arm/cpu.c
18
+++ b/target/arm/translate.c
16
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
20
} else if (i == rn) {
18
CPACR_EL1, ZEN, 3);
21
loaded_var = tmp;
19
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
22
loaded_base = 1;
20
}
23
- } else if (rn == 15 && exc_return) {
21
+ /* and for SME instructions, with default vector length, and TPIDR2 */
24
+ } else if (i == 15 && exc_return) {
22
+ if (cpu_isar_feature(aa64_sme, cpu)) {
25
store_pc_exc_ret(s, tmp);
23
+ env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
26
} else {
24
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
27
store_reg_from_load(s, i, tmp);
25
+ CPACR_EL1, SMEN, 3);
26
+ env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
27
+ if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
28
+ env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
29
+ SMCR, FA64, 1);
30
+ }
31
+ }
32
/*
33
* Enable 48-bit address space (TODO: take reserved_va into account).
34
* Enable TBI0 but not TBI1.
28
--
35
--
29
2.20.1
36
2.25.1
30
31
diff view generated by jsdifflib
1
From: Eric Auger <eric.auger@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
In preparation for a split of the memory map into a static
4
part and a dynamic part floating after the RAM, let's rename the
5
regions located after the RAM
6
7
Signed-off-by: Eric Auger <eric.auger@redhat.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20190304101339.25970-3-eric.auger@redhat.com
5
Message-id: 20220708151540.18136-46-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
7
---
13
include/hw/arm/virt.h | 8 ++++----
8
linux-user/elfload.c | 20 ++++++++++++++++++++
14
hw/arm/virt-acpi-build.c | 10 ++++++----
9
1 file changed, 20 insertions(+)
15
hw/arm/virt.c | 33 ++++++++++++++++++---------------
16
3 files changed, 28 insertions(+), 23 deletions(-)
17
10
18
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/arm/virt.h
13
--- a/linux-user/elfload.c
21
+++ b/include/hw/arm/virt.h
14
+++ b/linux-user/elfload.c
22
@@ -XXX,XX +XXX,XX @@ enum {
15
@@ -XXX,XX +XXX,XX @@ enum {
23
VIRT_GIC_VCPU,
16
ARM_HWCAP2_A64_RNG = 1 << 16,
24
VIRT_GIC_ITS,
17
ARM_HWCAP2_A64_BTI = 1 << 17,
25
VIRT_GIC_REDIST,
18
ARM_HWCAP2_A64_MTE = 1 << 18,
26
- VIRT_GIC_REDIST2,
19
+ ARM_HWCAP2_A64_ECV = 1 << 19,
27
+ VIRT_HIGH_GIC_REDIST2,
20
+ ARM_HWCAP2_A64_AFP = 1 << 20,
28
VIRT_SMMU,
21
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
29
VIRT_UART,
22
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
30
VIRT_MMIO,
23
+ ARM_HWCAP2_A64_SME = 1 << 23,
31
@@ -XXX,XX +XXX,XX @@ enum {
24
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
32
VIRT_PCIE_MMIO,
25
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
33
VIRT_PCIE_PIO,
26
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
34
VIRT_PCIE_ECAM,
27
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
35
- VIRT_PCIE_ECAM_HIGH,
28
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
36
+ VIRT_HIGH_PCIE_ECAM,
29
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
37
VIRT_PLATFORM_BUS,
30
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
38
- VIRT_PCIE_MMIO_HIGH,
39
+ VIRT_HIGH_PCIE_MMIO,
40
VIRT_GPIO,
41
VIRT_SECURE_UART,
42
VIRT_SECURE_MEM,
43
@@ -XXX,XX +XXX,XX @@ typedef struct {
44
int psci_conduit;
45
} VirtMachineState;
46
47
-#define VIRT_ECAM_ID(high) (high ? VIRT_PCIE_ECAM_HIGH : VIRT_PCIE_ECAM)
48
+#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
49
50
#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
51
#define VIRT_MACHINE(obj) \
52
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/arm/virt-acpi-build.c
55
+++ b/hw/arm/virt-acpi-build.c
56
@@ -XXX,XX +XXX,XX @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
57
size_pio));
58
59
if (use_highmem) {
60
- hwaddr base_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].base;
61
- hwaddr size_mmio_high = memmap[VIRT_PCIE_MMIO_HIGH].size;
62
+ hwaddr base_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].base;
63
+ hwaddr size_mmio_high = memmap[VIRT_HIGH_PCIE_MMIO].size;
64
65
aml_append(rbuf,
66
aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
67
@@ -XXX,XX +XXX,XX @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
68
gicr = acpi_data_push(table_data, sizeof(*gicr));
69
gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR;
70
gicr->length = sizeof(*gicr);
71
- gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST2].base);
72
- gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST2].size);
73
+ gicr->base_address =
74
+ cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base);
75
+ gicr->range_length =
76
+ cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size);
77
}
78
79
if (its_class_name() && !vmc->no_its) {
80
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/hw/arm/virt.c
83
+++ b/hw/arm/virt.c
84
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry a15memmap[] = {
85
[VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
86
[VIRT_MEM] = { 0x40000000, RAMLIMIT_BYTES },
87
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
88
- [VIRT_GIC_REDIST2] = { 0x4000000000ULL, 0x4000000 },
89
- [VIRT_PCIE_ECAM_HIGH] = { 0x4010000000ULL, 0x10000000 },
90
+ [VIRT_HIGH_GIC_REDIST2] = { 0x4000000000ULL, 0x4000000 },
91
+ [VIRT_HIGH_PCIE_ECAM] = { 0x4010000000ULL, 0x10000000 },
92
/* Second PCIe window, 512GB wide at the 512GB boundary */
93
- [VIRT_PCIE_MMIO_HIGH] = { 0x8000000000ULL, 0x8000000000ULL },
94
+ [VIRT_HIGH_PCIE_MMIO] = { 0x8000000000ULL, 0x8000000000ULL },
95
};
31
};
96
32
97
static const int a15irqmap[] = {
33
#define ELF_HWCAP get_elf_hwcap()
98
@@ -XXX,XX +XXX,XX @@ static void fdt_add_gic_node(VirtMachineState *vms)
34
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
99
2, vms->memmap[VIRT_GIC_REDIST].size);
35
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
100
} else {
36
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
101
qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
37
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
102
- 2, vms->memmap[VIRT_GIC_DIST].base,
38
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
103
- 2, vms->memmap[VIRT_GIC_DIST].size,
39
+ ARM_HWCAP2_A64_SME_F32F32 |
104
- 2, vms->memmap[VIRT_GIC_REDIST].base,
40
+ ARM_HWCAP2_A64_SME_B16F32 |
105
- 2, vms->memmap[VIRT_GIC_REDIST].size,
41
+ ARM_HWCAP2_A64_SME_F16F32 |
106
- 2, vms->memmap[VIRT_GIC_REDIST2].base,
42
+ ARM_HWCAP2_A64_SME_I8I32));
107
- 2, vms->memmap[VIRT_GIC_REDIST2].size);
43
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
108
+ 2, vms->memmap[VIRT_GIC_DIST].base,
44
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
109
+ 2, vms->memmap[VIRT_GIC_DIST].size,
45
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
110
+ 2, vms->memmap[VIRT_GIC_REDIST].base,
46
111
+ 2, vms->memmap[VIRT_GIC_REDIST].size,
47
return hwcaps;
112
+ 2, vms->memmap[VIRT_HIGH_GIC_REDIST2].base,
48
}
113
+ 2, vms->memmap[VIRT_HIGH_GIC_REDIST2].size);
114
}
115
116
if (vms->virt) {
117
@@ -XXX,XX +XXX,XX @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
118
119
if (nb_redist_regions == 2) {
120
uint32_t redist1_capacity =
121
- vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
122
+ vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
123
124
qdev_prop_set_uint32(gicdev, "redist-region-count[1]",
125
MIN(smp_cpus - redist0_count, redist1_capacity));
126
@@ -XXX,XX +XXX,XX @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
127
if (type == 3) {
128
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base);
129
if (nb_redist_regions == 2) {
130
- sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_REDIST2].base);
131
+ sysbus_mmio_map(gicbusdev, 2,
132
+ vms->memmap[VIRT_HIGH_GIC_REDIST2].base);
133
}
134
} else {
135
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
136
@@ -XXX,XX +XXX,XX @@ static void create_pcie(VirtMachineState *vms, qemu_irq *pic)
137
{
138
hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base;
139
hwaddr size_mmio = vms->memmap[VIRT_PCIE_MMIO].size;
140
- hwaddr base_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].base;
141
- hwaddr size_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].size;
142
+ hwaddr base_mmio_high = vms->memmap[VIRT_HIGH_PCIE_MMIO].base;
143
+ hwaddr size_mmio_high = vms->memmap[VIRT_HIGH_PCIE_MMIO].size;
144
hwaddr base_pio = vms->memmap[VIRT_PCIE_PIO].base;
145
hwaddr size_pio = vms->memmap[VIRT_PCIE_PIO].size;
146
hwaddr base_ecam, size_ecam;
147
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
148
* many redistributors we can fit into the memory map.
149
*/
150
if (vms->gic_version == 3) {
151
- virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
152
- virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / GICV3_REDIST_SIZE;
153
+ virt_max_cpus =
154
+ vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE;
155
+ virt_max_cpus +=
156
+ vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
157
} else {
158
virt_max_cpus = GIC_NCPU;
159
}
160
--
49
--
161
2.20.1
50
2.25.1
162
163
diff view generated by jsdifflib