1
The following changes since commit e5cd695266c5709308aa95b1baae499e4b5d4544:
1
I don't have anything else queued up at the moment, so this is just
2
Richard's SME patches.
2
3
3
Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging (2018-05-08 17:05:58 +0100)
4
-- PMM
5
6
The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3:
7
8
Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180510
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711
8
13
9
for you to fetch changes up to 9a9f1f59521f46e8ff4527d9a2b52f83577e2aa3:
14
for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8:
10
15
11
target/arm: Clear SVE high bits for FMOV (2018-05-10 18:10:58 +0100)
16
linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
target-arm queue:
19
target-arm:
15
* hw/arm/iotkit.c: fix minor memory leak
20
* Implement SME emulation, for both system and linux-user
16
* softfloat: fix wrong-exception-flags bug for multiply-add corner case
17
* arm: isolate and clean up DTB generation
18
* implement Arm v8.1-Atomics extension
19
* Fix some bugs and missing instructions in the v8.2-FP16 extension
20
21
21
----------------------------------------------------------------
22
----------------------------------------------------------------
22
Igor Mammedov (4):
23
Richard Henderson (45):
23
pc: simplify MachineClass::get_hotplug_handler handling
24
target/arm: Handle SME in aarch64_cpu_dump_state
24
platform-bus-device: use device plug callback instead of machine_done notifier
25
target/arm: Add infrastructure for disas_sme
25
arm/boot: split load_dtb() from arm_load_kernel()
26
target/arm: Trap non-streaming usage when Streaming SVE is active
26
make sure that we aren't overwriting mc->get_hotplug_handler by accident
27
target/arm: Mark ADR as non-streaming
28
target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming
29
target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming
30
target/arm: Mark PMULL, FMMLA as non-streaming
31
target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming
32
target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming
33
target/arm: Mark string/histo/crypto as non-streaming
34
target/arm: Mark gather/scatter load/store as non-streaming
35
target/arm: Mark gather prefetch as non-streaming
36
target/arm: Mark LDFF1 and LDNF1 as non-streaming
37
target/arm: Mark LD1RO as non-streaming
38
target/arm: Add SME enablement checks
39
target/arm: Handle SME in sve_access_check
40
target/arm: Implement SME RDSVL, ADDSVL, ADDSPL
41
target/arm: Implement SME ZERO
42
target/arm: Implement SME MOVA
43
target/arm: Implement SME LD1, ST1
44
target/arm: Export unpredicated ld/st from translate-sve.c
45
target/arm: Implement SME LDR, STR
46
target/arm: Implement SME ADDHA, ADDVA
47
target/arm: Implement FMOPA, FMOPS (non-widening)
48
target/arm: Implement BFMOPA, BFMOPS
49
target/arm: Implement FMOPA, FMOPS (widening)
50
target/arm: Implement SME integer outer product
51
target/arm: Implement PSEL
52
target/arm: Implement REVD
53
target/arm: Implement SCLAMP, UCLAMP
54
target/arm: Reset streaming sve state on exception boundaries
55
target/arm: Enable SME for -cpu max
56
linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS
57
linux-user/aarch64: Reset PSTATE.SM on syscalls
58
linux-user/aarch64: Add SM bit to SVE signal context
59
linux-user/aarch64: Tidy target_restore_sigframe error return
60
linux-user/aarch64: Do not allow duplicate or short sve records
61
linux-user/aarch64: Verify extra record lock succeeded
62
linux-user/aarch64: Move sve record checks into restore
63
linux-user/aarch64: Implement SME signal handling
64
linux-user: Rename sve prctls
65
linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL
66
target/arm: Only set ZEN in reset if SVE present
67
target/arm: Enable SME for user-only
68
linux-user/aarch64: Add SME related hwcap entries
27
69
28
Peter Maydell (3):
70
docs/system/arm/emulation.rst | 4 +
29
hw/arm/iotkit.c: fix minor memory leak
71
linux-user/aarch64/target_cpu.h | 5 +-
30
softfloat: Handle default NaN mode after pickNaNMulAdd, not before
72
linux-user/aarch64/target_prctl.h | 62 +-
31
atomic.h: Work around gcc spurious "unused value" warning
73
target/arm/cpu.h | 7 +
32
74
target/arm/helper-sme.h | 126 ++++
33
Richard Henderson (14):
75
target/arm/helper-sve.h | 4 +
34
tcg: Introduce helpers for integer min/max
76
target/arm/helper.h | 18 +
35
target/arm: Use new min/max expanders
77
target/arm/translate-a64.h | 45 ++
36
target/xtensa: Use new min/max expanders
78
target/arm/translate.h | 16 +
37
tcg: Introduce atomic helpers for integer min/max
79
target/arm/sme-fa64.decode | 60 ++
38
tcg: Use GEN_ATOMIC_HELPER_FN for opposite endian atomic add
80
target/arm/sme.decode | 88 +++
39
target/riscv: Use new atomic min/max expanders
81
target/arm/sve.decode | 41 +-
40
target/arm: Introduce ARM_FEATURE_V8_ATOMICS and initial decode
82
linux-user/aarch64/cpu_loop.c | 9 +
41
target/arm: Fill in disas_ldst_atomic
83
linux-user/aarch64/signal.c | 243 ++++++--
42
target/arm: Implement CAS and CASP
84
linux-user/elfload.c | 20 +
43
target/arm: Enable ARM_FEATURE_V8_ATOMICS for user-only
85
linux-user/syscall.c | 28 +-
44
target/arm: Implement vector shifted SCVF/UCVF for fp16
86
target/arm/cpu.c | 35 +-
45
target/arm: Implement vector shifted FCVT for fp16
87
target/arm/cpu64.c | 11 +
46
target/arm: Fix float16 to/from int16
88
target/arm/helper.c | 56 +-
47
target/arm: Clear SVE high bits for FMOV
89
target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++
48
90
target/arm/sve_helper.c | 28 +
49
accel/tcg/atomic_template.h | 112 ++++++----
91
target/arm/translate-a64.c | 103 +++-
50
accel/tcg/tcg-runtime.h | 8 +
92
target/arm/translate-sme.c | 373 ++++++++++++
51
hw/ppc/e500.h | 5 +
93
target/arm/translate-sve.c | 393 ++++++++++---
52
include/hw/arm/arm.h | 45 +++-
94
target/arm/translate-vfp.c | 12 +
53
include/hw/arm/sysbus-fdt.h | 37 +---
95
target/arm/translate.c | 2 +
54
include/hw/arm/virt.h | 1 +
96
target/arm/vec_helper.c | 24 +
55
include/hw/i386/pc.h | 8 -
97
target/arm/meson.build | 3 +
56
include/hw/platform-bus.h | 4 +-
98
28 files changed, 2821 insertions(+), 135 deletions(-)
57
include/qemu/atomic.h | 2 +-
99
create mode 100644 target/arm/sme-fa64.decode
58
target/arm/cpu.h | 1 +
100
create mode 100644 target/arm/sme.decode
59
target/arm/helper-a64.h | 2 +
101
create mode 100644 target/arm/translate-sme.c
60
target/arm/helper.h | 4 +-
61
tcg/tcg-op.h | 50 +++++
62
tcg/tcg.h | 8 +
63
fpu/softfloat.c | 52 +++--
64
hw/arm/boot.c | 72 ++-----
65
hw/arm/iotkit.c | 1 +
66
hw/arm/sysbus-fdt.c | 64 +-----
67
hw/arm/virt.c | 96 ++++++---
68
hw/core/platform-bus.c | 29 +--
69
hw/i386/pc.c | 7 +-
70
hw/ppc/e500.c | 38 ++--
71
hw/ppc/e500plat.c | 32 +++
72
hw/ppc/spapr.c | 1 +
73
hw/s390x/s390-virtio-ccw.c | 1 +
74
linux-user/elfload.c | 1 +
75
target/arm/cpu64.c | 1 +
76
target/arm/helper-a64.c | 43 ++++
77
target/arm/helper.c | 53 ++++-
78
target/arm/translate-a64.c | 490 +++++++++++++++++++++++++++++++++-----------
79
target/riscv/translate.c | 72 ++-----
80
target/xtensa/translate.c | 50 +++--
81
tcg/tcg-op.c | 48 +++++
82
33 files changed, 934 insertions(+), 504 deletions(-)
83
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Dump SVCR, plus use the correct access check for Streaming Mode.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-2-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/cpu.c | 17 ++++++++++++++++-
11
1 file changed, 16 insertions(+), 1 deletion(-)
12
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
18
int i;
19
int el = arm_current_el(env);
20
const char *ns_status;
21
+ bool sve;
22
23
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
24
for (i = 0; i < 32; i++) {
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
26
el,
27
psr & PSTATE_SP ? 'h' : 't');
28
29
+ if (cpu_isar_feature(aa64_sme, cpu)) {
30
+ qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
31
+ env->svcr,
32
+ (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
33
+ (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
34
+ }
35
if (cpu_isar_feature(aa64_bti, cpu)) {
36
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
37
}
38
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
39
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
40
vfp_get_fpcr(env), vfp_get_fpsr(env));
41
42
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
43
+ if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
44
+ sve = sme_exception_el(env, el) == 0;
45
+ } else if (cpu_isar_feature(aa64_sve, cpu)) {
46
+ sve = sve_exception_el(env, el) == 0;
47
+ } else {
48
+ sve = false;
49
+ }
50
+
51
+ if (sve) {
52
int j, zcr_len = sve_vqm1_for_el(env, el);
53
54
for (i = 0; i <= FFR_PRED_NUM; i++) {
55
--
56
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Use write_fp_dreg and clear_vec_high to zero the bits
3
This includes the build rules for the decoder, and the
4
that need zeroing for these cases.
4
new file for translation, but excludes any instructions.
5
5
6
Cc: qemu-stable@nongnu.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180502221552.3873-5-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-3-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 17 +++++------------
11
target/arm/translate-a64.h | 1 +
13
1 file changed, 5 insertions(+), 12 deletions(-)
12
target/arm/sme.decode | 20 ++++++++++++++++++++
13
target/arm/translate-a64.c | 7 ++++++-
14
target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++
15
target/arm/meson.build | 2 ++
16
5 files changed, 64 insertions(+), 1 deletion(-)
17
create mode 100644 target/arm/sme.decode
18
create mode 100644 target/arm/translate-sme.c
14
19
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate-a64.h
23
+++ b/target/arm/translate-a64.h
24
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
25
}
26
27
bool disas_sve(DisasContext *, uint32_t);
28
+bool disas_sme(DisasContext *, uint32_t);
29
30
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
31
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
32
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
33
new file mode 100644
34
index XXXXXXX..XXXXXXX
35
--- /dev/null
36
+++ b/target/arm/sme.decode
37
@@ -XXX,XX +XXX,XX @@
38
+# AArch64 SME instruction descriptions
39
+#
40
+# Copyright (c) 2022 Linaro, Ltd
41
+#
42
+# This library is free software; you can redistribute it and/or
43
+# modify it under the terms of the GNU Lesser General Public
44
+# License as published by the Free Software Foundation; either
45
+# version 2.1 of the License, or (at your option) any later version.
46
+#
47
+# This library is distributed in the hope that it will be useful,
48
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
49
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
50
+# Lesser General Public License for more details.
51
+#
52
+# You should have received a copy of the GNU Lesser General Public
53
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
54
+
55
+#
56
+# This file is processed by scripts/decodetree.py
57
+#
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
60
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
61
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
62
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
20
63
}
21
if (itof) {
64
22
TCGv_i64 tcg_rn = cpu_reg(s, rn);
65
switch (extract32(insn, 25, 4)) {
23
+ TCGv_i64 tmp;
66
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
24
67
+ case 0x0:
25
switch (type) {
68
+ if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
26
case 0:
69
+ unallocated_encoding(s);
27
- {
70
+ }
28
/* 32 bit */
71
+ break;
29
- TCGv_i64 tmp = tcg_temp_new_i64();
72
+ case 0x1: case 0x3: /* UNALLOCATED */
30
+ tmp = tcg_temp_new_i64();
73
unallocated_encoding(s);
31
tcg_gen_ext32u_i64(tmp, tcg_rn);
74
break;
32
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
75
case 0x2:
33
- tcg_gen_movi_i64(tmp, 0);
76
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
34
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
77
new file mode 100644
35
+ write_fp_dreg(s, rd, tmp);
78
index XXXXXXX..XXXXXXX
36
tcg_temp_free_i64(tmp);
79
--- /dev/null
37
break;
80
+++ b/target/arm/translate-sme.c
38
- }
81
@@ -XXX,XX +XXX,XX @@
39
case 1:
82
+/*
40
- {
83
+ * AArch64 SME translation
41
/* 64 bit */
84
+ *
42
- TCGv_i64 tmp = tcg_const_i64(0);
85
+ * Copyright (c) 2022 Linaro, Ltd
43
- tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
86
+ *
44
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
87
+ * This library is free software; you can redistribute it and/or
45
- tcg_temp_free_i64(tmp);
88
+ * modify it under the terms of the GNU Lesser General Public
46
+ write_fp_dreg(s, rd, tcg_rn);
89
+ * License as published by the Free Software Foundation; either
47
break;
90
+ * version 2.1 of the License, or (at your option) any later version.
48
- }
91
+ *
49
case 2:
92
+ * This library is distributed in the hope that it will be useful,
50
/* 64 bit to top half. */
93
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
51
tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
94
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
52
+ clear_vec_high(s, true, rd);
95
+ * Lesser General Public License for more details.
53
break;
96
+ *
54
}
97
+ * You should have received a copy of the GNU Lesser General Public
55
} else {
98
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
99
+ */
100
+
101
+#include "qemu/osdep.h"
102
+#include "cpu.h"
103
+#include "tcg/tcg-op.h"
104
+#include "tcg/tcg-op-gvec.h"
105
+#include "tcg/tcg-gvec-desc.h"
106
+#include "translate.h"
107
+#include "exec/helper-gen.h"
108
+#include "translate-a64.h"
109
+#include "fpu/softfloat.h"
110
+
111
+
112
+/*
113
+ * Include the generated decoder.
114
+ */
115
+
116
+#include "decode-sme.c.inc"
117
diff --git a/target/arm/meson.build b/target/arm/meson.build
118
index XXXXXXX..XXXXXXX 100644
119
--- a/target/arm/meson.build
120
+++ b/target/arm/meson.build
121
@@ -XXX,XX +XXX,XX @@
122
gen = [
123
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
124
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
125
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
126
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
127
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
128
@@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
129
'sme_helper.c',
130
'translate-a64.c',
131
'translate-sve.c',
132
+ 'translate-sme.c',
133
))
134
135
arm_softmmu_ss = ss.source_set()
56
--
136
--
57
2.17.0
137
2.25.1
58
59
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The insns in the ARMv8.1-Atomics are added to the existing
3
This new behaviour is in the ARM pseudocode function
4
load/store exclusive and load/store reg opcode spaces.
4
AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32
5
Rearrange the top-level decoders for these to accomodate.
5
via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which
6
The Atomics insns themselves still generate Unallocated.
6
the trap would be delivered is in AArch64 mode.
7
7
8
Given that ARMv9 drops support for AArch32 outside EL0, the trap EL
9
detection ought to be trivially true, but the pseudocode still contains
10
a number of conditions, and QEMU has not yet committed to dropping A32
11
support for EL[12] when v9 features are present.
12
13
Since the computation of SME_TRAP_NONSTREAMING is necessarily different
14
for the two modes, we might as well preserve bits within TBFLAG_ANY and
15
allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead.
16
17
Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table
18
of instructions illegal in streaming mode.
19
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180508151437.4232-8-richard.henderson@linaro.org
22
Message-id: 20220708151540.18136-4-richard.henderson@linaro.org
10
[PMM: Drop the ARM_FEATURE_V8_1 feature flag]
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
24
---
14
target/arm/cpu.h | 1 +
25
target/arm/cpu.h | 7 +++
15
linux-user/elfload.c | 1 +
26
target/arm/translate.h | 4 ++
16
target/arm/translate-a64.c | 182 +++++++++++++++++++++++++++----------
27
target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++
17
3 files changed, 138 insertions(+), 46 deletions(-)
28
target/arm/helper.c | 41 +++++++++++++++++
29
target/arm/translate-a64.c | 40 ++++++++++++++++-
30
target/arm/translate-vfp.c | 12 +++++
31
target/arm/translate.c | 2 +
32
target/arm/meson.build | 1 +
33
8 files changed, 195 insertions(+), 2 deletions(-)
34
create mode 100644 target/arm/sme-fa64.decode
18
35
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
36
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
38
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
39
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ enum arm_features {
40
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
24
ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
41
* the same thing as the current security state of the processor!
25
ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
42
*/
26
ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
43
FIELD(TBFLAG_A32, NS, 10, 1)
27
+ ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
44
+/*
28
ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
45
+ * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
29
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
46
+ * This requires an SME trap from AArch32 mode when using NEON.
30
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
47
+ */
31
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
48
+FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
32
index XXXXXXX..XXXXXXX 100644
49
33
--- a/linux-user/elfload.c
50
/*
34
+++ b/linux-user/elfload.c
51
* Bit usage when in AArch32 state, for M-profile only.
35
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
52
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
36
GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
53
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
37
GET_FEATURE(ARM_FEATURE_V8_FP16,
54
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
38
ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
55
FIELD(TBFLAG_A64, SVL, 24, 4)
39
+ GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
56
+/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
40
GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
57
+FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
41
GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
58
42
#undef GET_FEATURE
59
/*
60
* Helpers for using the above.
61
diff --git a/target/arm/translate.h b/target/arm/translate.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/translate.h
64
+++ b/target/arm/translate.h
65
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
66
bool pstate_sm;
67
/* True if PSTATE.ZA is set. */
68
bool pstate_za;
69
+ /* True if non-streaming insns should raise an SME Streaming exception. */
70
+ bool sme_trap_nonstreaming;
71
+ /* True if the current instruction is non-streaming. */
72
+ bool is_nonstreaming;
73
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
74
bool mve_no_pred;
75
/*
76
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/target/arm/sme-fa64.decode
81
@@ -XXX,XX +XXX,XX @@
82
+# AArch64 SME allowed instruction decoding
83
+#
84
+# Copyright (c) 2022 Linaro, Ltd
85
+#
86
+# This library is free software; you can redistribute it and/or
87
+# modify it under the terms of the GNU Lesser General Public
88
+# License as published by the Free Software Foundation; either
89
+# version 2.1 of the License, or (at your option) any later version.
90
+#
91
+# This library is distributed in the hope that it will be useful,
92
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
93
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
94
+# Lesser General Public License for more details.
95
+#
96
+# You should have received a copy of the GNU Lesser General Public
97
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
98
+
99
+#
100
+# This file is processed by scripts/decodetree.py
101
+#
102
+
103
+# These patterns are taken from Appendix E1.1 of DDI0616 A.a,
104
+# Arm Architecture Reference Manual Supplement,
105
+# The Scalable Matrix Extension (SME), for Armv9-A
106
+
107
+{
108
+ [
109
+ OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0]
110
+ OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0]
111
+ OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0]
112
+ OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0]
113
+ OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0]
114
+ OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0]
115
+ OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0]
116
+ ]
117
+ FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations
118
+}
119
+
120
+{
121
+ [
122
+ OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar)
123
+ OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16)
124
+ OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar)
125
+ OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16)
126
+ ]
127
+ FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations
128
+}
129
+
130
+FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store
131
+FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions
132
+FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
133
+
134
+# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions
135
+# We don't actually need to include these, as the default is OK.
136
+# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations
137
+# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers
138
+# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal)
139
+# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
140
+# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
141
+# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
142
+
143
+FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
144
+FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
145
+FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
146
+FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
147
+FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
148
+FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
149
+FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
150
+FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
151
+FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
152
+FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
153
+FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
154
+FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
155
+FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
156
+FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
157
+FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
158
+FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
159
+FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
160
+FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
161
+FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
162
+FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
163
+FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
164
+FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
165
+FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
166
+FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
167
+FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
168
+FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
169
+FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
170
+FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
171
+FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
172
diff --git a/target/arm/helper.c b/target/arm/helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/helper.c
175
+++ b/target/arm/helper.c
176
@@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el)
177
return 0;
178
}
179
180
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
181
+static bool sme_fa64(CPUARMState *env, int el)
182
+{
183
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
184
+ return false;
185
+ }
186
+
187
+ if (el <= 1 && !el_is_in_host(env, el)) {
188
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
189
+ return false;
190
+ }
191
+ }
192
+ if (el <= 2 && arm_is_el2_enabled(env)) {
193
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
194
+ return false;
195
+ }
196
+ }
197
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
198
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
199
+ return false;
200
+ }
201
+ }
202
+
203
+ return true;
204
+}
205
+
206
/*
207
* Given that SVE is enabled, return the vector length for EL.
208
*/
209
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
210
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
211
}
212
213
+ /*
214
+ * The SME exception we are testing for is raised via
215
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
216
+ * AArch32.CheckAdvSIMDOrFPEnabled().
217
+ */
218
+ if (el == 0
219
+ && FIELD_EX64(env->svcr, SVCR, SM)
220
+ && (!arm_is_el2_enabled(env)
221
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
222
+ && arm_el_is_aa64(env, 1)
223
+ && !sme_fa64(env, el)) {
224
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
225
+ }
226
+
227
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
228
}
229
230
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
231
}
232
if (FIELD_EX64(env->svcr, SVCR, SM)) {
233
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
234
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
235
}
236
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
237
}
43
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
238
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
44
index XXXXXXX..XXXXXXX 100644
239
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-a64.c
240
--- a/target/arm/translate-a64.c
46
+++ b/target/arm/translate-a64.c
241
+++ b/target/arm/translate-a64.c
47
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
242
@@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
48
int rt = extract32(insn, 0, 5);
243
* unallocated-encoding checks (otherwise the syndrome information
49
int rn = extract32(insn, 5, 5);
244
* for the resulting exception will be incorrect).
50
int rt2 = extract32(insn, 10, 5);
245
*/
51
- int is_lasr = extract32(insn, 15, 1);
246
-static bool fp_access_check(DisasContext *s)
52
int rs = extract32(insn, 16, 5);
247
+static bool fp_access_check_only(DisasContext *s)
53
- int is_pair = extract32(insn, 21, 1);
248
{
54
- int is_store = !extract32(insn, 22, 1);
249
if (s->fp_excp_el) {
55
- int is_excl = !extract32(insn, 23, 1);
250
assert(!s->fp_access_checked);
56
+ int is_lasr = extract32(insn, 15, 1);
251
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
57
+ int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
252
return true;
58
int size = extract32(insn, 30, 2);
253
}
59
TCGv_i64 tcg_addr;
254
60
255
+static bool fp_access_check(DisasContext *s)
61
- if ((!is_excl && !is_pair && !is_lasr) ||
256
+{
62
- (!is_excl && is_pair) ||
257
+ if (!fp_access_check_only(s)) {
63
- (is_pair && size < 2)) {
258
+ return false;
64
- unallocated_encoding(s);
259
+ }
65
+ switch (o2_L_o1_o0) {
260
+ if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
66
+ case 0x0: /* STXR */
261
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
67
+ case 0x1: /* STLXR */
262
+ syn_smetrap(SME_ET_Streaming, false));
68
+ if (rn == 31) {
263
+ return false;
69
+ gen_check_sp_alignment(s);
264
+ }
70
+ }
265
+ return true;
71
+ if (is_lasr) {
266
+}
72
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
267
+
73
+ }
268
/* Check that SVE access is enabled. If it is, return true.
74
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
269
* If not, emit code to generate an appropriate exception and return false.
75
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
270
*/
271
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
272
default:
273
g_assert_not_reached();
274
}
275
- if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
276
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
76
return;
277
return;
77
- }
278
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
78
279
return;
79
- if (rn == 31) {
280
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
80
- gen_check_sp_alignment(s);
281
}
81
- }
282
}
82
- tcg_addr = read_cpu_reg_sp(s, rn, 1);
283
83
-
284
+/*
84
- /* Note that since TCG is single threaded load-acquire/store-release
285
+ * Include the generated SME FA64 decoder.
85
- * semantics require no extra if (is_lasr) { ... } handling.
286
+ */
86
- */
287
+
87
-
288
+#include "decode-sme-fa64.c.inc"
88
- if (is_excl) {
289
+
89
- if (!is_store) {
290
+static bool trans_OK(DisasContext *s, arg_OK *a)
90
- s->is_ldex = true;
291
+{
91
- gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
292
+ return true;
92
- if (is_lasr) {
293
+}
93
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
294
+
94
- }
295
+static bool trans_FAIL(DisasContext *s, arg_OK *a)
95
- } else {
296
+{
96
- if (is_lasr) {
297
+ s->is_nonstreaming = true;
97
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
298
+ return true;
98
- }
299
+}
99
- gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
300
+
100
+ case 0x4: /* LDXR */
301
/**
101
+ case 0x5: /* LDAXR */
302
* is_guarded_page:
102
+ if (rn == 31) {
303
* @env: The cpu environment
103
+ gen_check_sp_alignment(s);
304
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
305
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
306
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
307
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
308
+ dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
309
dc->vec_len = 0;
310
dc->vec_stride = 0;
311
dc->cp_regs = arm_cpu->cp_regs;
312
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
104
}
313
}
105
- } else {
314
}
106
- TCGv_i64 tcg_rt = cpu_reg(s, rt);
315
107
- bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
316
+ s->is_nonstreaming = false;
108
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
317
+ if (s->sme_trap_nonstreaming) {
109
+ s->is_ldex = true;
318
+ disas_sme_fa64(s, insn);
110
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
319
+ }
111
+ if (is_lasr) {
320
+
112
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
321
switch (extract32(insn, 25, 4)) {
113
+ }
322
case 0x0:
114
+ return;
323
if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
115
324
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
116
+ case 0x9: /* STLR */
325
index XXXXXXX..XXXXXXX 100644
117
/* Generate ISS for non-exclusive accesses including LASR. */
326
--- a/target/arm/translate-vfp.c
118
- if (is_store) {
327
+++ b/target/arm/translate-vfp.c
119
+ if (rn == 31) {
328
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
120
+ gen_check_sp_alignment(s);
329
return false;
121
+ }
330
}
122
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
331
123
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
332
+ /*
124
+ do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
333
+ * Note that rebuild_hflags_a32 has already accounted for being in EL0
125
+ disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
334
+ * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not
126
+ return;
335
+ * appear to be any insns which touch VFP which are allowed.
127
+
336
+ */
128
+ case 0xd: /* LDAR */
337
+ if (s->sme_trap_nonstreaming) {
129
+ /* Generate ISS for non-exclusive accesses including LASR. */
338
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
130
+ if (rn == 31) {
339
+ syn_smetrap(SME_ET_Streaming,
131
+ gen_check_sp_alignment(s);
340
+ s->base.pc_next - s->pc_curr == 2));
132
+ }
341
+ return false;
133
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
342
+ }
134
+ do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
343
+
135
+ disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
344
if (!s->vfp_enabled && !ignore_vfp_enabled) {
136
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
345
assert(!arm_dc_feature(s, ARM_FEATURE_M));
137
+ return;
346
unallocated_encoding(s);
138
+
347
diff --git a/target/arm/translate.c b/target/arm/translate.c
139
+ case 0x2: case 0x3: /* CASP / STXP */
348
index XXXXXXX..XXXXXXX 100644
140
+ if (size & 2) { /* STXP / STLXP */
349
--- a/target/arm/translate.c
141
+ if (rn == 31) {
350
+++ b/target/arm/translate.c
142
+ gen_check_sp_alignment(s);
351
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
143
+ }
352
dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
144
if (is_lasr) {
353
dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
145
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
146
}
147
- do_gpr_st(s, tcg_rt, tcg_addr, size,
148
- true, rt, iss_sf, is_lasr);
149
- } else {
150
- do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
151
- true, rt, iss_sf, is_lasr);
152
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
153
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
154
+ return;
155
+ }
156
+ /* CASP / CASPL */
157
+ break;
158
+
159
+ case 0x6: case 0x7: /* CASP / LDXP */
160
+ if (size & 2) { /* LDXP / LDAXP */
161
+ if (rn == 31) {
162
+ gen_check_sp_alignment(s);
163
+ }
164
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
165
+ s->is_ldex = true;
166
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
167
if (is_lasr) {
168
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
169
}
170
+ return;
171
}
354
}
172
+ /* CASPA / CASPAL */
355
+ dc->sme_trap_nonstreaming =
173
+ break;
356
+ EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
174
+
357
}
175
+ case 0xa: /* CAS */
358
dc->cp_regs = cpu->cp_regs;
176
+ case 0xb: /* CASL */
359
dc->features = env->features;
177
+ case 0xe: /* CASA */
360
diff --git a/target/arm/meson.build b/target/arm/meson.build
178
+ case 0xf: /* CASAL */
361
index XXXXXXX..XXXXXXX 100644
179
+ break;
362
--- a/target/arm/meson.build
180
}
363
+++ b/target/arm/meson.build
181
+ unallocated_encoding(s);
364
@@ -XXX,XX +XXX,XX @@
182
}
365
gen = [
183
366
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
184
/*
367
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
185
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
368
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
186
}
369
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
187
}
370
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
188
371
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
189
+/* Atomic memory operations
190
+ *
191
+ * 31 30 27 26 24 22 21 16 15 12 10 5 0
192
+ * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
193
+ * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
194
+ * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
195
+ *
196
+ * Rt: the result register
197
+ * Rn: base address or SP
198
+ * Rs: the source register for the operation
199
+ * V: vector flag (always 0 as of v8.3)
200
+ * A: acquire flag
201
+ * R: release flag
202
+ */
203
+static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
204
+ int size, int rt, bool is_vector)
205
+{
206
+ int rs = extract32(insn, 16, 5);
207
+ int rn = extract32(insn, 5, 5);
208
+ int o3_opc = extract32(insn, 12, 4);
209
+ int feature = ARM_FEATURE_V8_ATOMICS;
210
+
211
+ if (is_vector) {
212
+ unallocated_encoding(s);
213
+ return;
214
+ }
215
+ switch (o3_opc) {
216
+ case 000: /* LDADD */
217
+ case 001: /* LDCLR */
218
+ case 002: /* LDEOR */
219
+ case 003: /* LDSET */
220
+ case 004: /* LDSMAX */
221
+ case 005: /* LDSMIN */
222
+ case 006: /* LDUMAX */
223
+ case 007: /* LDUMIN */
224
+ case 010: /* SWP */
225
+ default:
226
+ unallocated_encoding(s);
227
+ return;
228
+ }
229
+ if (!arm_dc_feature(s, feature)) {
230
+ unallocated_encoding(s);
231
+ return;
232
+ }
233
+
234
+ (void)rs;
235
+ (void)rn;
236
+}
237
+
238
/* Load/store register (all forms) */
239
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
240
{
241
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
242
243
switch (extract32(insn, 24, 2)) {
244
case 0:
245
- if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
246
- disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
247
- } else {
248
+ if (extract32(insn, 21, 1) == 0) {
249
/* Load/store register (unscaled immediate)
250
* Load/store immediate pre/post-indexed
251
* Load/store register unprivileged
252
*/
253
disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
254
+ return;
255
+ }
256
+ switch (extract32(insn, 10, 2)) {
257
+ case 0:
258
+ disas_ldst_atomic(s, insn, size, rt, is_vector);
259
+ return;
260
+ case 2:
261
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
262
+ return;
263
}
264
break;
265
case 1:
266
disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
267
- break;
268
- default:
269
- unallocated_encoding(s);
270
- break;
271
+ return;
272
}
273
+ unallocated_encoding(s);
274
}
275
276
/* AdvSIMD load/store multiple structures
277
--
372
--
278
2.17.0
373
2.25.1
279
280
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark ADR as a non-streaming instruction, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Removing entries from sme-fa64.decode is an easy way to see
7
what remains to be done.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220708151540.18136-5-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/translate.h | 7 +++++++
15
target/arm/sme-fa64.decode | 1 -
16
target/arm/translate-sve.c | 8 ++++----
17
3 files changed, 11 insertions(+), 5 deletions(-)
18
19
diff --git a/target/arm/translate.h b/target/arm/translate.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate.h
22
+++ b/target/arm/translate.h
23
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
24
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
25
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
26
27
+#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
28
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
29
+ { \
30
+ s->is_nonstreaming = true; \
31
+ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
32
+ }
33
+
34
#endif /* TARGET_ARM_TRANSLATE_H */
35
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/sme-fa64.decode
38
+++ b/target/arm/sme-fa64.decode
39
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
40
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
41
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
42
43
-FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
44
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
45
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
46
FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
47
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-sve.c
50
+++ b/target/arm/translate-sve.c
51
@@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
52
return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
53
}
54
55
-TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
56
-TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
57
-TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
58
-TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
59
+TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
60
+TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
61
+TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
62
+TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
63
64
/*
65
*** SVE Integer Misc - Unpredicated Group
66
--
67
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 9 ++++++---
13
2 files changed, 6 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
21
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
22
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
23
-FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
24
-FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
25
FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
33
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
34
35
/* Note pat == 31 is #all, to set all elements. */
36
-TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
37
+TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
38
+ do_predset, 0, FFR_PRED_NUM, 31, false)
39
40
/* Note pat == 32 is #unimp, to set no elements. */
41
TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
42
@@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
43
.rd = a->rd, .pg = a->pg, .s = a->s,
44
.rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
45
};
46
+
47
+ s->is_nonstreaming = true;
48
return trans_AND_pppp(s, &alt_a);
49
}
50
51
-TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
52
-TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
53
+TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
54
+TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
55
56
static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
57
void (*gen_fn)(TCGv_i32, TCGv_ptr,
58
--
59
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 22 ++++++++++++----------
13
2 files changed, 12 insertions(+), 13 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
24
-FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
25
-FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
34
NULL, gen_helper_sve_fexpa_h,
35
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
36
};
37
-TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
38
- fexpa_fns[a->esz], a->rd, a->rn, 0)
39
+TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
40
+ fexpa_fns[a->esz], a->rd, a->rn, 0)
41
42
static gen_helper_gvec_3 * const ftssel_fns[4] = {
43
NULL, gen_helper_sve_ftssel_h,
44
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
45
};
46
-TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
47
+TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
48
+ ftssel_fns[a->esz], a, 0)
49
50
/*
51
*** SVE Predicate Logical Operations Group
52
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
53
static gen_helper_gvec_3 * const compact_fns[4] = {
54
NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
55
};
56
-TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
57
+TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
58
+ compact_fns[a->esz], a, 0)
59
60
/* Call the helper that computes the ARM LastActiveElement pseudocode
61
* function, scaled by the element size. This includes the not found
62
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = {
63
gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
64
gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
65
};
66
-TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
67
- bext_fns[a->esz], a, 0)
68
+TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
69
+ bext_fns[a->esz], a, 0)
70
71
static gen_helper_gvec_3 * const bdep_fns[4] = {
72
gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
73
gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
74
};
75
-TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
76
- bdep_fns[a->esz], a, 0)
77
+TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
78
+ bdep_fns[a->esz], a, 0)
79
80
static gen_helper_gvec_3 * const bgrp_fns[4] = {
81
gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
82
gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
83
};
84
-TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
85
- bgrp_fns[a->esz], a, 0)
86
+TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
87
+ bgrp_fns[a->esz], a, 0)
88
89
static gen_helper_gvec_3 * const cadd_fns[4] = {
90
gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
91
--
92
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 24 +++++++++++++++---------
13
2 files changed, 15 insertions(+), 11 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
24
-FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
25
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
26
FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
27
FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
33
gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
34
NULL, gen_helper_sve2_pmull_d,
35
};
36
- if (a->esz == 0
37
- ? !dc_isar_feature(aa64_sve2_pmull128, s)
38
- : !dc_isar_feature(aa64_sve, s)) {
39
+
40
+ if (a->esz == 0) {
41
+ if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
42
+ return false;
43
+ }
44
+ s->is_nonstreaming = true;
45
+ } else if (!dc_isar_feature(aa64_sve, s)) {
46
return false;
47
}
48
return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
49
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
50
* SVE Integer Multiply-Add (unpredicated)
51
*/
52
53
-TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
54
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
55
-TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
56
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
57
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
58
+ gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
59
+ 0, FPST_FPCR)
60
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
61
+ gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
62
+ 0, FPST_FPCR)
63
64
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
65
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
66
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
67
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
68
gen_helper_gvec_bfdot_idx, a)
69
70
-TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
71
- gen_helper_gvec_bfmmla, a, 0)
72
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
73
+ gen_helper_gvec_bfmmla, a, 0)
74
75
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
76
{
77
--
78
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 15 +++++++++++----
13
2 files changed, 11 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
24
-FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
25
-FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
26
FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
27
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
28
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
34
NULL, gen_helper_sve_ftmad_h,
35
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
36
};
37
-TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
38
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
39
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
40
+TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
41
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
42
+ a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
43
44
/*
45
*** SVE Floating Point Accumulating Reduction Group
46
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
47
if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
48
return false;
49
}
50
+ s->is_nonstreaming = true;
51
if (!sve_access_check(s)) {
52
return true;
53
}
54
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
55
DO_FP3(FADD_zzz, fadd)
56
DO_FP3(FSUB_zzz, fsub)
57
DO_FP3(FMUL_zzz, fmul)
58
-DO_FP3(FTSMUL, ftsmul)
59
DO_FP3(FRECPS, recps)
60
DO_FP3(FRSQRTS, rsqrts)
61
62
#undef DO_FP3
63
64
+static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
65
+ NULL, gen_helper_gvec_ftsmul_h,
66
+ gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
67
+};
68
+TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
69
+ ftsmul_fns[a->esz], a, 0)
70
+
71
/*
72
*** SVE Floating Point Arithmetic - Predicated Group
73
*/
74
--
75
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 12 ++++++------
13
2 files changed, 6 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
24
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
25
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
26
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
32
TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
33
TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
34
35
-TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
36
- gen_helper_gvec_smmla_b, a, 0)
37
-TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
38
- gen_helper_gvec_usmmla_b, a, 0)
39
-TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
40
- gen_helper_gvec_ummla_b, a, 0)
41
+TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
42
+ gen_helper_gvec_smmla_b, a, 0)
43
+TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
44
+ gen_helper_gvec_usmmla_b, a, 0)
45
+TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
46
+ gen_helper_gvec_ummla_b, a, 0)
47
48
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
49
gen_helper_gvec_bfdot, a, 0)
50
--
51
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 35 ++++++++++++++++++-----------------
13
2 files changed, 18 insertions(+), 18 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
24
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
25
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
32
static gen_helper_gvec_flags_4 * const match_fns[4] = {
33
gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
34
};
35
-TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
36
+TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
37
38
static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
39
gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
40
};
41
-TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
42
+TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
43
44
static gen_helper_gvec_4 * const histcnt_fns[4] = {
45
NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
46
};
47
-TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
48
- histcnt_fns[a->esz], a, 0)
49
+TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
50
+ histcnt_fns[a->esz], a, 0)
51
52
-TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
53
- a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
54
+TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
55
+ a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
56
57
DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
58
DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
59
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
60
TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
61
a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
62
63
-TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
64
- gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
65
+TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
66
+ gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
67
68
-TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
69
- gen_helper_crypto_aese, a, false)
70
-TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
71
- gen_helper_crypto_aese, a, true)
72
+TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
73
+ gen_helper_crypto_aese, a, false)
74
+TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
75
+ gen_helper_crypto_aese, a, true)
76
77
-TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
78
- gen_helper_crypto_sm4e, a, 0)
79
-TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
80
- gen_helper_crypto_sm4ekey, a, 0)
81
+TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
82
+ gen_helper_crypto_sm4e, a, 0)
83
+TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
84
+ gen_helper_crypto_sm4ekey, a, 0)
85
86
-TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
87
+TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
88
+ gen_gvec_rax1, a)
89
90
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
91
gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
92
--
93
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-12-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 9 ---------
12
target/arm/translate-sve.c | 6 ++++++
13
2 files changed, 6 insertions(+), 9 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
24
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
25
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
26
-FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
27
-FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
28
-FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
29
-FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
30
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
31
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
32
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
33
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
34
FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
35
-FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
36
-FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
37
-FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
38
-FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
39
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate-sve.c
42
+++ b/target/arm/translate-sve.c
43
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
44
if (!dc_isar_feature(aa64_sve, s)) {
45
return false;
46
}
47
+ s->is_nonstreaming = true;
48
if (!sve_access_check(s)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
52
if (!dc_isar_feature(aa64_sve, s)) {
53
return false;
54
}
55
+ s->is_nonstreaming = true;
56
if (!sve_access_check(s)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
60
if (!dc_isar_feature(aa64_sve2, s)) {
61
return false;
62
}
63
+ s->is_nonstreaming = true;
64
if (!sve_access_check(s)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
68
if (!dc_isar_feature(aa64_sve, s)) {
69
return false;
70
}
71
+ s->is_nonstreaming = true;
72
if (!sve_access_check(s)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
76
if (!dc_isar_feature(aa64_sve, s)) {
77
return false;
78
}
79
+ s->is_nonstreaming = true;
80
if (!sve_access_check(s)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
84
if (!dc_isar_feature(aa64_sve2, s)) {
85
return false;
86
}
87
+ s->is_nonstreaming = true;
88
if (!sve_access_check(s)) {
89
return true;
90
}
91
--
92
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap if full
4
a64 support is not enabled in streaming mode. In this case, introduce
5
PRF_ns (prefetch non-streaming) to handle the checks.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/sme-fa64.decode | 3 ---
13
target/arm/sve.decode | 10 +++++-----
14
target/arm/translate-sve.c | 11 +++++++++++
15
3 files changed, 16 insertions(+), 8 deletions(-)
16
17
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sme-fa64.decode
20
+++ b/target/arm/sme-fa64.decode
21
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
22
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
23
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
24
25
-FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
-FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
28
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
29
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
30
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
31
-FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
35
+++ b/target/arm/sve.decode
36
@@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
37
@rpri_load_msz nreg=0
38
39
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
40
-PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
41
+PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ----
42
43
# SVE 32-bit gather prefetch (vector plus immediate)
44
-PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
45
+PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ----
46
47
# SVE contiguous prefetch (scalar plus immediate)
48
PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
49
@@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
50
@rpri_g_load esz=3
51
52
# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
53
-PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
54
+PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ----
55
56
# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
57
-PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
58
+PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ----
59
60
# SVE 64-bit gather prefetch (vector plus immediate)
61
-PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
62
+PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ----
63
64
### SVE Memory Store Group
65
66
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-sve.c
69
+++ b/target/arm/translate-sve.c
70
@@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
71
return true;
72
}
73
74
+static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
75
+{
76
+ if (!dc_isar_feature(aa64_sve, s)) {
77
+ return false;
78
+ }
79
+ /* Prefetch is a nop within QEMU. */
80
+ s->is_nonstreaming = true;
81
+ (void)sve_access_check(s);
82
+ return true;
83
+}
84
+
85
/*
86
* Move Prefix
87
*
88
--
89
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-14-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 2 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
24
-FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
25
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
26
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
32
if (!dc_isar_feature(aa64_sve, s)) {
33
return false;
34
}
35
+ s->is_nonstreaming = true;
36
if (sve_access_check(s)) {
37
TCGv_i64 addr = new_tmp_a64(s);
38
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
39
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
40
if (!dc_isar_feature(aa64_sve, s)) {
41
return false;
42
}
43
+ s->is_nonstreaming = true;
44
if (sve_access_check(s)) {
45
int vsz = vec_full_reg_size(s);
46
int elements = vsz >> dtype_esz[a->dtype];
47
--
48
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 3 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
21
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
22
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
23
-
24
-FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
25
-FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
31
if (a->rm == 31) {
32
return false;
33
}
34
+ s->is_nonstreaming = true;
35
if (sve_access_check(s)) {
36
TCGv_i64 addr = new_tmp_a64(s);
37
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
38
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
39
if (!dc_isar_feature(aa64_sve_f64mm, s)) {
40
return false;
41
}
42
+ s->is_nonstreaming = true;
43
if (sve_access_check(s)) {
44
TCGv_i64 addr = new_tmp_a64(s);
45
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
46
--
47
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
These functions will be used to verify that the cpu
4
is in the correct state for a given instruction.
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20180508151437.4232-10-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-16-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/arm/helper-a64.h | 2 +
11
target/arm/translate-a64.h | 21 +++++++++++++++++++++
9
target/arm/helper-a64.c | 43 ++++++++++++++
12
target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++
10
target/arm/translate-a64.c | 119 ++++++++++++++++++++++++++++++++++++-
13
2 files changed, 55 insertions(+)
11
3 files changed, 161 insertions(+), 3 deletions(-)
12
14
13
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/helper-a64.h
17
--- a/target/arm/translate-a64.h
16
+++ b/target/arm/helper-a64.h
18
+++ b/target/arm/translate-a64.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
19
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
18
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
20
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
19
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
21
unsigned int imms, unsigned int immr);
20
i64, env, i64, i64, i64)
22
bool sve_access_check(DisasContext *s);
21
+DEF_HELPER_5(casp_le_parallel, void, env, i32, i64, i64, i64)
23
+bool sme_enabled_check(DisasContext *s);
22
+DEF_HELPER_5(casp_be_parallel, void, env, i32, i64, i64, i64)
24
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
23
DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
25
+
24
DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
26
+/* This function corresponds to CheckStreamingSVEEnabled. */
25
DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
27
+static inline bool sme_sm_enabled_check(DisasContext *s)
26
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/helper-a64.c
29
+++ b/target/arm/helper-a64.c
30
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
31
return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true, GETPC());
32
}
33
34
+/* Writes back the old data into Rs. */
35
+void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
36
+ uint64_t new_lo, uint64_t new_hi)
37
+{
28
+{
38
+ uintptr_t ra = GETPC();
29
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
39
+#ifndef CONFIG_ATOMIC128
40
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
41
+#else
42
+ Int128 oldv, cmpv, newv;
43
+
44
+ cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
45
+ newv = int128_make128(new_lo, new_hi);
46
+
47
+ int mem_idx = cpu_mmu_index(env, false);
48
+ TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
49
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
50
+
51
+ env->xregs[rs] = int128_getlo(oldv);
52
+ env->xregs[rs + 1] = int128_gethi(oldv);
53
+#endif
54
+}
30
+}
55
+
31
+
56
+void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
32
+/* This function corresponds to CheckSMEAndZAEnabled. */
57
+ uint64_t new_hi, uint64_t new_lo)
33
+static inline bool sme_za_enabled_check(DisasContext *s)
58
+{
34
+{
59
+ uintptr_t ra = GETPC();
35
+ return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
60
+#ifndef CONFIG_ATOMIC128
61
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
62
+#else
63
+ Int128 oldv, cmpv, newv;
64
+
65
+ cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
66
+ newv = int128_make128(new_lo, new_hi);
67
+
68
+ int mem_idx = cpu_mmu_index(env, false);
69
+ TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
70
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
71
+
72
+ env->xregs[rs + 1] = int128_getlo(oldv);
73
+ env->xregs[rs] = int128_gethi(oldv);
74
+#endif
75
+}
36
+}
76
+
37
+
77
/*
38
+/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
78
* AdvSIMD half-precision
39
+static inline bool sme_smza_enabled_check(DisasContext *s)
79
*/
40
+{
41
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
42
+}
43
+
44
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
45
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
46
bool tag_checked, int log2_size);
80
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
47
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
81
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
82
--- a/target/arm/translate-a64.c
49
--- a/target/arm/translate-a64.c
83
+++ b/target/arm/translate-a64.c
50
+++ b/target/arm/translate-a64.c
84
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
51
@@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s)
85
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
52
return true;
86
}
53
}
87
54
88
+static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
55
+/* This function corresponds to CheckSMEEnabled. */
89
+ int rn, int size)
56
+bool sme_enabled_check(DisasContext *s)
90
+{
57
+{
91
+ TCGv_i64 tcg_rs = cpu_reg(s, rs);
58
+ /*
92
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
59
+ * Note that unlike sve_excp_el, we have not constrained sme_excp_el
93
+ int memidx = get_mem_index(s);
60
+ * to be zero when fp_excp_el has priority. This is because we need
94
+ TCGv_i64 addr = cpu_reg_sp(s, rn);
61
+ * sme_excp_el by itself for cpregs access checks.
95
+
62
+ */
96
+ if (rn == 31) {
63
+ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
97
+ gen_check_sp_alignment(s);
64
+ s->fp_access_checked = true;
65
+ return sme_access_check(s);
98
+ }
66
+ }
99
+ tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
67
+ return fp_access_check_only(s);
100
+ size | MO_ALIGN | s->be_data);
101
+}
68
+}
102
+
69
+
103
+static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
70
+/* Common subroutine for CheckSMEAnd*Enabled. */
104
+ int rn, int size)
71
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
105
+{
72
+{
106
+ TCGv_i64 s1 = cpu_reg(s, rs);
73
+ if (!sme_enabled_check(s)) {
107
+ TCGv_i64 s2 = cpu_reg(s, rs + 1);
74
+ return false;
108
+ TCGv_i64 t1 = cpu_reg(s, rt);
109
+ TCGv_i64 t2 = cpu_reg(s, rt + 1);
110
+ TCGv_i64 addr = cpu_reg_sp(s, rn);
111
+ int memidx = get_mem_index(s);
112
+
113
+ if (rn == 31) {
114
+ gen_check_sp_alignment(s);
115
+ }
75
+ }
116
+
76
+ if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
117
+ if (size == 2) {
77
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
118
+ TCGv_i64 cmp = tcg_temp_new_i64();
78
+ syn_smetrap(SME_ET_NotStreaming, false));
119
+ TCGv_i64 val = tcg_temp_new_i64();
79
+ return false;
120
+
121
+ if (s->be_data == MO_LE) {
122
+ tcg_gen_concat32_i64(val, t1, t2);
123
+ tcg_gen_concat32_i64(cmp, s1, s2);
124
+ } else {
125
+ tcg_gen_concat32_i64(val, t2, t1);
126
+ tcg_gen_concat32_i64(cmp, s2, s1);
127
+ }
128
+
129
+ tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
130
+ MO_64 | MO_ALIGN | s->be_data);
131
+ tcg_temp_free_i64(val);
132
+
133
+ if (s->be_data == MO_LE) {
134
+ tcg_gen_extr32_i64(s1, s2, cmp);
135
+ } else {
136
+ tcg_gen_extr32_i64(s2, s1, cmp);
137
+ }
138
+ tcg_temp_free_i64(cmp);
139
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
140
+ TCGv_i32 tcg_rs = tcg_const_i32(rs);
141
+
142
+ if (s->be_data == MO_LE) {
143
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
144
+ } else {
145
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
146
+ }
147
+ tcg_temp_free_i32(tcg_rs);
148
+ } else {
149
+ TCGv_i64 d1 = tcg_temp_new_i64();
150
+ TCGv_i64 d2 = tcg_temp_new_i64();
151
+ TCGv_i64 a2 = tcg_temp_new_i64();
152
+ TCGv_i64 c1 = tcg_temp_new_i64();
153
+ TCGv_i64 c2 = tcg_temp_new_i64();
154
+ TCGv_i64 zero = tcg_const_i64(0);
155
+
156
+ /* Load the two words, in memory order. */
157
+ tcg_gen_qemu_ld_i64(d1, addr, memidx,
158
+ MO_64 | MO_ALIGN_16 | s->be_data);
159
+ tcg_gen_addi_i64(a2, addr, 8);
160
+ tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
161
+
162
+ /* Compare the two words, also in memory order. */
163
+ tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
164
+ tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
165
+ tcg_gen_and_i64(c2, c2, c1);
166
+
167
+ /* If compare equal, write back new data, else write back old data. */
168
+ tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
169
+ tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
170
+ tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
171
+ tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
172
+ tcg_temp_free_i64(a2);
173
+ tcg_temp_free_i64(c1);
174
+ tcg_temp_free_i64(c2);
175
+ tcg_temp_free_i64(zero);
176
+
177
+ /* Write back the data from memory to Rs. */
178
+ tcg_gen_mov_i64(s1, d1);
179
+ tcg_gen_mov_i64(s2, d2);
180
+ tcg_temp_free_i64(d1);
181
+ tcg_temp_free_i64(d2);
182
+ }
80
+ }
81
+ if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
82
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
83
+ syn_smetrap(SME_ET_InactiveZA, false));
84
+ return false;
85
+ }
86
+ return true;
183
+}
87
+}
184
+
88
+
185
/* Update the Sixty-Four bit (SF) registersize. This logic is derived
89
/*
186
* from the ARMv8 specs for LDR (Shared decode for all encodings).
90
* This utility function is for doing register extension with an
187
*/
91
* optional shift. You will likely want to pass a temporary for the
188
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
189
gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
190
return;
191
}
192
- /* CASP / CASPL */
193
+ if (rt2 == 31
194
+ && ((rt | rs) & 1) == 0
195
+ && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
196
+ /* CASP / CASPL */
197
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
198
+ return;
199
+ }
200
break;
201
202
- case 0x6: case 0x7: /* CASP / LDXP */
203
+ case 0x6: case 0x7: /* CASPA / LDXP */
204
if (size & 2) { /* LDXP / LDAXP */
205
if (rn == 31) {
206
gen_check_sp_alignment(s);
207
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
208
}
209
return;
210
}
211
- /* CASPA / CASPAL */
212
+ if (rt2 == 31
213
+ && ((rt | rs) & 1) == 0
214
+ && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
215
+ /* CASPA / CASPAL */
216
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
217
+ return;
218
+ }
219
break;
220
221
case 0xa: /* CAS */
222
case 0xb: /* CASL */
223
case 0xe: /* CASA */
224
case 0xf: /* CASAL */
225
+ if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
226
+ gen_compare_and_swap(s, rs, rt, rn, size);
227
+ return;
228
+ }
229
break;
230
}
231
unallocated_encoding(s);
232
--
92
--
233
2.17.0
93
2.25.1
234
235
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
While we have some of the scalar paths for *CVF for fp16,
3
The pseudocode for CheckSVEEnabled gains a check for Streaming
4
we failed to decode the fp16 version of these instructions.
4
SVE mode, and for SME present but SVE absent.
5
5
6
Cc: qemu-stable@nongnu.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180502221552.3873-2-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-17-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
target/arm/translate-a64.c | 33 ++++++++++++++++++++-------------
11
target/arm/translate-a64.c | 22 ++++++++++++++++------
13
1 file changed, 20 insertions(+), 13 deletions(-)
12
1 file changed, 16 insertions(+), 6 deletions(-)
14
13
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
16
--- a/target/arm/translate-a64.c
18
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
19
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
18
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
20
int immh, int immb, int opcode,
19
return true;
21
int rn, int rd)
20
}
21
22
-/* Check that SVE access is enabled. If it is, return true.
23
+/*
24
+ * Check that SVE access is enabled. If it is, return true.
25
* If not, emit code to generate an appropriate exception and return false.
26
+ * This function corresponds to CheckSVEEnabled().
27
*/
28
bool sve_access_check(DisasContext *s)
22
{
29
{
23
- bool is_double = extract32(immh, 3, 1);
30
- if (s->sve_excp_el) {
24
- int size = is_double ? MO_64 : MO_32;
31
- assert(!s->sve_access_checked);
25
- int elements;
32
- s->sve_access_checked = true;
26
+ int size, elements, fracbits;
33
-
27
int immhb = immh << 3 | immb;
34
+ if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
28
- int fracbits = (is_double ? 128 : 64) - immhb;
35
+ assert(dc_isar_feature(aa64_sme, s));
29
36
+ if (!sme_sm_enabled_check(s)) {
30
- if (!extract32(immh, 2, 2)) {
37
+ goto fail_exit;
31
+ if (immh & 8) {
32
+ size = MO_64;
33
+ if (!is_scalar && !is_q) {
34
+ unallocated_encoding(s);
35
+ return;
36
+ }
38
+ }
37
+ } else if (immh & 4) {
39
+ } else if (s->sve_excp_el) {
38
+ size = MO_32;
40
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
39
+ } else if (immh & 2) {
41
syn_sve_access_trap(), s->sve_excp_el);
40
+ size = MO_16;
42
- return false;
41
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
43
+ goto fail_exit;
42
+ unallocated_encoding(s);
43
+ return;
44
+ }
45
+ } else {
46
+ /* immh == 0 would be a failure of the decode logic */
47
+ g_assert(immh == 1);
48
unallocated_encoding(s);
49
return;
50
}
44
}
51
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
45
s->sve_access_checked = true;
52
if (is_scalar) {
46
return fp_access_check(s);
53
elements = 1;
47
+
54
} else {
48
+ fail_exit:
55
- elements = is_double ? 2 : is_q ? 4 : 2;
49
+ /* Assert that we only raise one exception per instruction. */
56
- if (is_double && !is_q) {
50
+ assert(!s->sve_access_checked);
57
- unallocated_encoding(s);
51
+ s->sve_access_checked = true;
58
- return;
52
+ return false;
59
- }
60
+ elements = (8 << is_q) >> size;
61
}
62
+ fracbits = (16 << size) - immhb;
63
64
if (!fp_access_check(s)) {
65
return;
66
}
67
68
- /* immh == 0 would be a failure of the decode logic */
69
- g_assert(immh);
70
-
71
handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
72
}
53
}
73
54
55
/*
74
--
56
--
75
2.17.0
57
2.25.1
76
77
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These operations are re-invented by several targets so far.
3
These SME instructions are nominally within the SVE decode space,
4
Several supported hosts have insns for these, so place the
4
so we add them to sve.decode and translate-sve.c.
5
expanders out-of-line for a future introduction of tcg opcodes.
6
5
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180508151437.4232-2-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-18-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
10
---
12
tcg/tcg-op.h | 16 ++++++++++++++++
11
target/arm/translate-a64.h | 12 ++++++++++++
13
tcg/tcg-op.c | 40 ++++++++++++++++++++++++++++++++++++++++
12
target/arm/sve.decode | 5 ++++-
14
2 files changed, 56 insertions(+)
13
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++
14
3 files changed, 54 insertions(+), 1 deletion(-)
15
15
16
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg-op.h
18
--- a/target/arm/translate-a64.h
19
+++ b/tcg/tcg-op.h
19
+++ b/target/arm/translate-a64.h
20
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
20
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
21
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
21
return s->vl;
22
void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg);
23
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
24
+void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
25
+void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
26
+void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
27
+void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
28
29
static inline void tcg_gen_discard_i32(TCGv_i32 arg)
30
{
31
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
32
void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg);
33
void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg);
34
void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
35
+void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
36
+void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
37
+void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
38
+void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
39
40
#if TCG_TARGET_REG_BITS == 64
41
static inline void tcg_gen_discard_i64(TCGv_i64 arg)
42
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
43
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
44
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
45
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
46
+#define tcg_gen_smin_tl tcg_gen_smin_i64
47
+#define tcg_gen_umin_tl tcg_gen_umin_i64
48
+#define tcg_gen_smax_tl tcg_gen_smax_i64
49
+#define tcg_gen_umax_tl tcg_gen_umax_i64
50
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i64
51
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i64
52
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i64
53
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
54
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
55
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
56
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
57
+#define tcg_gen_smin_tl tcg_gen_smin_i32
58
+#define tcg_gen_umin_tl tcg_gen_umin_i32
59
+#define tcg_gen_smax_tl tcg_gen_smax_i32
60
+#define tcg_gen_umax_tl tcg_gen_umax_i32
61
#define tcg_gen_atomic_cmpxchg_tl tcg_gen_atomic_cmpxchg_i32
62
#define tcg_gen_atomic_xchg_tl tcg_gen_atomic_xchg_i32
63
#define tcg_gen_atomic_fetch_add_tl tcg_gen_atomic_fetch_add_i32
64
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/tcg-op.c
67
+++ b/tcg/tcg-op.c
68
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
69
}
70
}
22
}
71
23
72
+void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
24
+/* Return the byte size of the vector register, SVL / 8. */
25
+static inline int streaming_vec_reg_size(DisasContext *s)
73
+{
26
+{
74
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b);
27
+ return s->svl;
75
+}
28
+}
76
+
29
+
77
+void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
30
/*
31
* Return the offset info CPUARMState of the predicate vector register Pn.
32
* Note for this purpose, FFR is P16.
33
@@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s)
34
return s->vl >> 3;
35
}
36
37
+/* Return the byte size of the predicate register, SVL / 64. */
38
+static inline int streaming_pred_reg_size(DisasContext *s)
78
+{
39
+{
79
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b);
40
+ return s->svl >> 3;
80
+}
41
+}
81
+
42
+
82
+void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
43
/*
44
* Round up the size of a register to a size allowed by
45
* the tcg vector infrastructure. Any operation which uses this
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
51
# SVE index generation (register start, register increment)
52
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
53
54
-### SVE Stack Allocation Group
55
+### SVE / Streaming SVE Stack Allocation Group
56
57
# SVE stack frame adjustment
58
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
59
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
60
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
61
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
62
63
# SVE stack frame size
64
RDVL 00000100 101 11111 01010 imm:s6 rd:5
65
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
66
67
### SVE Bitwise Shift - Unpredicated Group
68
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
74
return true;
75
}
76
77
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
83
+{
78
+{
84
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a);
79
+ if (!dc_isar_feature(aa64_sme, s)) {
80
+ return false;
81
+ }
82
+ if (sme_enabled_check(s)) {
83
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
84
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
85
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
86
+ }
87
+ return true;
85
+}
88
+}
86
+
89
+
87
+void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b)
90
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
91
{
92
if (!dc_isar_feature(aa64_sve, s)) {
93
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
94
return true;
95
}
96
97
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
88
+{
98
+{
89
+ tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a);
99
+ if (!dc_isar_feature(aa64_sme, s)) {
100
+ return false;
101
+ }
102
+ if (sme_enabled_check(s)) {
103
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
104
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
105
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
106
+ }
107
+ return true;
90
+}
108
+}
91
+
109
+
92
/* 64-bit ops */
110
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
93
111
{
94
#if TCG_TARGET_REG_BITS == 32
112
if (!dc_isar_feature(aa64_sve, s)) {
95
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
113
@@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
96
tcg_temp_free_i64(t2);
114
return true;
97
}
115
}
98
116
99
+void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
117
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
100
+{
118
+{
101
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b);
119
+ if (!dc_isar_feature(aa64_sme, s)) {
120
+ return false;
121
+ }
122
+ if (sme_enabled_check(s)) {
123
+ TCGv_i64 reg = cpu_reg(s, a->rd);
124
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
125
+ }
126
+ return true;
102
+}
127
+}
103
+
128
+
104
+void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
129
/*
105
+{
130
*** SVE Compute Vector Address Group
106
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b);
131
*/
107
+}
108
+
109
+void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
110
+{
111
+ tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a);
112
+}
113
+
114
+void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
115
+{
116
+ tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a);
117
+}
118
+
119
/* Size changing operations. */
120
121
void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
122
--
132
--
123
2.17.0
133
2.25.1
124
125
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 4 ++++
10
target/arm/sme_helper.c | 25 +++++++++++++++++++++++++
11
target/arm/translate-sme.c | 13 +++++++++++++
12
4 files changed, 44 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@
19
20
DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
21
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
22
+
23
+DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/sme.decode
27
+++ b/target/arm/sme.decode
28
@@ -XXX,XX +XXX,XX @@
29
#
30
# This file is processed by scripts/decodetree.py
31
#
32
+
33
+### SME Misc
34
+
35
+ZERO 11000000 00 001 00000000000 imm:8
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
41
memset(env->zarray, 0, sizeof(env->zarray));
42
}
43
}
44
+
45
+void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
46
+{
47
+ uint32_t i;
48
+
49
+ /*
50
+ * Special case clearing the entire ZA space.
51
+ * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
52
+ * parts of the ZA storage outside of SVL.
53
+ */
54
+ if (imm == 0xff) {
55
+ memset(env->zarray, 0, sizeof(env->zarray));
56
+ return;
57
+ }
58
+
59
+ /*
60
+ * Recall that ZAnH.D[m] is spread across ZA[n+8*m],
61
+ * so each row is discontiguous within ZA[].
62
+ */
63
+ for (i = 0; i < svl; i++) {
64
+ if (imm & (1 << (i % 8))) {
65
+ memset(&env->zarray[i], 0, svl);
66
+ }
67
+ }
68
+}
69
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sme.c
72
+++ b/target/arm/translate-sme.c
73
@@ -XXX,XX +XXX,XX @@
74
*/
75
76
#include "decode-sme.c.inc"
77
+
78
+
79
+static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
80
+{
81
+ if (!dc_isar_feature(aa64_sme, s)) {
82
+ return false;
83
+ }
84
+ if (sme_za_enabled_check(s)) {
85
+ gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
86
+ tcg_constant_i32(streaming_vec_reg_size(s)));
87
+ }
88
+ return true;
89
+}
90
--
91
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
While we have some of the scalar paths for FCVT for fp16,
3
We can reuse the SVE functions for implementing moves to/from
4
we failed to decode the fp16 version of these instructions.
4
horizontal tile slices, but we need new ones for moves to/from
5
vertical tile slices.
5
6
6
Cc: qemu-stable@nongnu.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180502221552.3873-3-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-20-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/translate-a64.c | 65 +++++++++++++++++++++++++++-----------
12
target/arm/helper-sme.h | 12 +++
13
1 file changed, 46 insertions(+), 19 deletions(-)
13
target/arm/helper-sve.h | 2 +
14
target/arm/translate-a64.h | 8 ++
15
target/arm/translate.h | 5 ++
16
target/arm/sme.decode | 15 ++++
17
target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++-
18
target/arm/sve_helper.c | 12 +++
19
target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++
20
8 files changed, 331 insertions(+), 1 deletion(-)
14
21
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
22
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
16
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/translate-a64.c
24
--- a/target/arm/helper-sme.h
18
+++ b/target/arm/translate-a64.c
25
+++ b/target/arm/helper-sme.h
19
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
20
bool is_q, bool is_u,
27
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
21
int immh, int immb, int rn, int rd)
28
29
DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
30
+
31
+/* Move to/from vertical array slices, i.e. columns, so 'c'. */
32
+DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
33
+DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
34
+DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
35
+DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
37
+DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
41
+DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
42
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper-sve.h
45
+++ b/target/arm/helper-sve.h
46
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
47
void, ptr, ptr, ptr, ptr, i32)
48
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
49
void, ptr, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG,
51
+ void, ptr, ptr, ptr, ptr, i32)
52
53
DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
54
void, ptr, ptr, ptr, ptr, i32)
55
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.h
58
+++ b/target/arm/translate-a64.h
59
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
60
return size_for_gvec(pred_full_reg_size(s));
61
}
62
63
+/* Return a newly allocated pointer to the predicate register. */
64
+static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
65
+{
66
+ TCGv_ptr ret = tcg_temp_new_ptr();
67
+ tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
68
+ return ret;
69
+}
70
+
71
bool disas_sve(DisasContext *, uint32_t);
72
bool disas_sme(DisasContext *, uint32_t);
73
74
diff --git a/target/arm/translate.h b/target/arm/translate.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.h
77
+++ b/target/arm/translate.h
78
@@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x)
79
return x + 2;
80
}
81
82
+static inline int plus_12(DisasContext *s, int x)
83
+{
84
+ return x + 12;
85
+}
86
+
87
static inline int times_2(DisasContext *s, int x)
22
{
88
{
23
- bool is_double = extract32(immh, 3, 1);
89
return x * 2;
24
int immhb = immh << 3 | immb;
90
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
- int fracbits = (is_double ? 128 : 64) - immhb;
91
index XXXXXXX..XXXXXXX 100644
26
- int pass;
92
--- a/target/arm/sme.decode
27
+ int pass, size, fracbits;
93
+++ b/target/arm/sme.decode
28
TCGv_ptr tcg_fpstatus;
94
@@ -XXX,XX +XXX,XX @@
29
TCGv_i32 tcg_rmode, tcg_shift;
95
### SME Misc
30
96
31
- if (!extract32(immh, 2, 2)) {
97
ZERO 11000000 00 001 00000000000 imm:8
32
- unallocated_encoding(s);
98
+
33
- return;
99
+### SME Move into/from Array
34
- }
100
+
35
-
101
+%mova_rs 13:2 !function=plus_12
36
- if (!is_scalar && !is_q && is_double) {
102
+&mova esz rs pg zr za_imm v:bool to_vec:bool
37
+ if (immh & 0x8) {
103
+
38
+ size = MO_64;
104
+MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
39
+ if (!is_scalar && !is_q) {
105
+ &mova to_vec=0 rs=%mova_rs
40
+ unallocated_encoding(s);
106
+MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
41
+ return;
107
+ &mova to_vec=0 rs=%mova_rs esz=4
42
+ }
108
+
43
+ } else if (immh & 0x4) {
109
+MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
44
+ size = MO_32;
110
+ &mova to_vec=1 rs=%mova_rs
45
+ } else if (immh & 0x2) {
111
+MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
46
+ size = MO_16;
112
+ &mova to_vec=1 rs=%mova_rs esz=4
47
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
113
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
48
+ unallocated_encoding(s);
114
index XXXXXXX..XXXXXXX 100644
49
+ return;
115
--- a/target/arm/sme_helper.c
116
+++ b/target/arm/sme_helper.c
117
@@ -XXX,XX +XXX,XX @@
118
119
#include "qemu/osdep.h"
120
#include "cpu.h"
121
-#include "internals.h"
122
+#include "tcg/tcg-gvec-desc.h"
123
#include "exec/helper-proto.h"
124
+#include "qemu/int128.h"
125
+#include "vec_internal.h"
126
127
/* ResetSVEState */
128
void arm_reset_sve_state(CPUARMState *env)
129
@@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
130
}
131
}
132
}
133
+
134
+
135
+/*
136
+ * When considering the ZA storage as an array of elements of
137
+ * type T, the index within that array of the Nth element of
138
+ * a vertical slice of a tile can be calculated like this,
139
+ * regardless of the size of type T. This is because the tiles
140
+ * are interleaved, so if type T is size N bytes then row 1 of
141
+ * the tile is N rows away from row 0. The division by N to
142
+ * convert a byte offset into an array index and the multiplication
143
+ * by N to convert from vslice-index-within-the-tile to
144
+ * the index within the ZA storage cancel out.
145
+ */
146
+#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg))
147
+
148
+/*
149
+ * When doing byte arithmetic on the ZA storage, the element
150
+ * byteoff bytes away in a tile vertical slice is always this
151
+ * many bytes away in the ZA storage, regardless of the
152
+ * size of the tile element, assuming that byteoff is a multiple
153
+ * of the element size. Again this is because of the interleaving
154
+ * of the tiles. For instance if we have 1 byte per element then
155
+ * each row of the ZA storage has one byte of the vslice data,
156
+ * and (counting from 0) byte 8 goes in row 8 of the storage
157
+ * at offset (8 * row-size-in-bytes).
158
+ * If we have 8 bytes per element then each row of the ZA storage
159
+ * has 8 bytes of the data, but there are 8 interleaved tiles and
160
+ * so byte 8 of the data goes into row 1 of the tile,
161
+ * which is again row 8 of the storage, so the offset is still
162
+ * (8 * row-size-in-bytes). Similarly for other element sizes.
163
+ */
164
+#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg))
165
+
166
+
167
+/*
168
+ * Move Zreg vector to ZArray column.
169
+ */
170
+#define DO_MOVA_C(NAME, TYPE, H) \
171
+void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \
172
+{ \
173
+ int i, oprsz = simd_oprsz(desc); \
174
+ for (i = 0; i < oprsz; ) { \
175
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
176
+ do { \
177
+ if (pg & 1) { \
178
+ *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \
179
+ } \
180
+ i += sizeof(TYPE); \
181
+ pg >>= sizeof(TYPE); \
182
+ } while (i & 15); \
183
+ } \
184
+}
185
+
186
+DO_MOVA_C(sme_mova_cz_b, uint8_t, H1)
187
+DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2)
188
+DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4)
189
+
190
+void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc)
191
+{
192
+ int i, oprsz = simd_oprsz(desc) / 8;
193
+ uint8_t *pg = vg;
194
+ uint64_t *n = vn;
195
+ uint64_t *a = za;
196
+
197
+ for (i = 0; i < oprsz; i++) {
198
+ if (pg[H1(i)] & 1) {
199
+ a[tile_vslice_index(i)] = n[i];
200
+ }
201
+ }
202
+}
203
+
204
+void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc)
205
+{
206
+ int i, oprsz = simd_oprsz(desc) / 16;
207
+ uint16_t *pg = vg;
208
+ Int128 *n = vn;
209
+ Int128 *a = za;
210
+
211
+ /*
212
+ * Int128 is used here simply to copy 16 bytes, and to simplify
213
+ * the address arithmetic.
214
+ */
215
+ for (i = 0; i < oprsz; i++) {
216
+ if (pg[H2(i)] & 1) {
217
+ a[tile_vslice_index(i)] = n[i];
218
+ }
219
+ }
220
+}
221
+
222
+#undef DO_MOVA_C
223
+
224
+/*
225
+ * Move ZArray column to Zreg vector.
226
+ */
227
+#define DO_MOVA_Z(NAME, TYPE, H) \
228
+void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \
229
+{ \
230
+ int i, oprsz = simd_oprsz(desc); \
231
+ for (i = 0; i < oprsz; ) { \
232
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
233
+ do { \
234
+ if (pg & 1) { \
235
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \
236
+ } \
237
+ i += sizeof(TYPE); \
238
+ pg >>= sizeof(TYPE); \
239
+ } while (i & 15); \
240
+ } \
241
+}
242
+
243
+DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1)
244
+DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2)
245
+DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4)
246
+
247
+void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc)
248
+{
249
+ int i, oprsz = simd_oprsz(desc) / 8;
250
+ uint8_t *pg = vg;
251
+ uint64_t *d = vd;
252
+ uint64_t *a = za;
253
+
254
+ for (i = 0; i < oprsz; i++) {
255
+ if (pg[H1(i)] & 1) {
256
+ d[i] = a[tile_vslice_index(i)];
257
+ }
258
+ }
259
+}
260
+
261
+void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
262
+{
263
+ int i, oprsz = simd_oprsz(desc) / 16;
264
+ uint16_t *pg = vg;
265
+ Int128 *d = vd;
266
+ Int128 *a = za;
267
+
268
+ /*
269
+ * Int128 is used here simply to copy 16 bytes, and to simplify
270
+ * the address arithmetic.
271
+ */
272
+ for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) {
273
+ if (pg[H2(i)] & 1) {
274
+ d[i] = a[tile_vslice_index(i)];
275
+ }
276
+ }
277
+}
278
+
279
+#undef DO_MOVA_Z
280
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
281
index XXXXXXX..XXXXXXX 100644
282
--- a/target/arm/sve_helper.c
283
+++ b/target/arm/sve_helper.c
284
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
285
}
286
}
287
288
+void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm,
289
+ void *vg, uint32_t desc)
290
+{
291
+ intptr_t i, opr_sz = simd_oprsz(desc) / 16;
292
+ Int128 *d = vd, *n = vn, *m = vm;
293
+ uint16_t *pg = vg;
294
+
295
+ for (i = 0; i < opr_sz; i += 1) {
296
+ d[i] = (pg[H2(i)] & 1 ? n : m)[i];
297
+ }
298
+}
299
+
300
/* Two operand comparison controlled by a predicate.
301
* ??? It is very tempting to want to be able to expand this inline
302
* with x86 instructions, e.g.
303
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/translate-sme.c
306
+++ b/target/arm/translate-sme.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "decode-sme.c.inc"
309
310
311
+/*
312
+ * Resolve tile.size[index] to a host pointer, where tile and index
313
+ * are always decoded together, dependent on the element size.
314
+ */
315
+static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
316
+ int tile_index, bool vertical)
317
+{
318
+ int tile = tile_index >> (4 - esz);
319
+ int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
320
+ int pos, len, offset;
321
+ TCGv_i32 tmp;
322
+ TCGv_ptr addr;
323
+
324
+ /* Compute the final index, which is Rs+imm. */
325
+ tmp = tcg_temp_new_i32();
326
+ tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
327
+ tcg_gen_addi_i32(tmp, tmp, index);
328
+
329
+ /* Prepare a power-of-two modulo via extraction of @len bits. */
330
+ len = ctz32(streaming_vec_reg_size(s)) - esz;
331
+
332
+ if (vertical) {
333
+ /*
334
+ * Compute the byte offset of the index within the tile:
335
+ * (index % (svl / size)) * size
336
+ * = (index % (svl >> esz)) << esz
337
+ * Perform the power-of-two modulo via extraction of the low @len bits.
338
+ * Perform the multiply by shifting left by @pos bits.
339
+ * Perform these operations simultaneously via deposit into zero.
340
+ */
341
+ pos = esz;
342
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
343
+
344
+ /*
345
+ * For big-endian, adjust the indexed column byte offset within
346
+ * the uint64_t host words that make up env->zarray[].
347
+ */
348
+ if (HOST_BIG_ENDIAN && esz < MO_64) {
349
+ tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz));
50
+ }
350
+ }
51
+ } else {
351
+ } else {
52
+ /* Should have split out AdvSIMD modified immediate earlier. */
352
+ /*
53
+ assert(immh == 1);
353
+ * Compute the byte offset of the index within the tile:
54
unallocated_encoding(s);
354
+ * (index % (svl / size)) * (size * sizeof(row))
55
return;
355
+ * = (index % (svl >> esz)) << (esz + log2(sizeof(row)))
356
+ */
357
+ pos = esz + ctz32(sizeof(ARMVectorReg));
358
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
359
+
360
+ /* Row slices are always aligned and need no endian adjustment. */
361
+ }
362
+
363
+ /* The tile byte offset within env->zarray is the row. */
364
+ offset = tile * sizeof(ARMVectorReg);
365
+
366
+ /* Include the byte offset of zarray to make this relative to env. */
367
+ offset += offsetof(CPUARMState, zarray);
368
+ tcg_gen_addi_i32(tmp, tmp, offset);
369
+
370
+ /* Add the byte offset to env to produce the final pointer. */
371
+ addr = tcg_temp_new_ptr();
372
+ tcg_gen_ext_i32_ptr(addr, tmp);
373
+ tcg_temp_free_i32(tmp);
374
+ tcg_gen_add_ptr(addr, addr, cpu_env);
375
+
376
+ return addr;
377
+}
378
+
379
static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
380
{
381
if (!dc_isar_feature(aa64_sme, s)) {
382
@@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
56
}
383
}
57
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
384
return true;
58
assert(!(is_scalar && is_q));
385
}
59
386
+
60
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
387
+static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
61
- tcg_fpstatus = get_fpstatus_ptr(false);
388
+{
62
+ tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
389
+ static gen_helper_gvec_4 * const h_fns[5] = {
63
gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
390
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
64
+ fracbits = (16 << size) - immhb;
391
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d,
65
tcg_shift = tcg_const_i32(fracbits);
392
+ gen_helper_sve_sel_zpzz_q
66
393
+ };
67
- if (is_double) {
394
+ static gen_helper_gvec_3 * const cz_fns[5] = {
68
+ if (size == MO_64) {
395
+ gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h,
69
int maxpass = is_scalar ? 1 : 2;
396
+ gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d,
70
397
+ gen_helper_sme_mova_cz_q,
71
for (pass = 0; pass < maxpass; pass++) {
398
+ };
72
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
399
+ static gen_helper_gvec_3 * const zc_fns[5] = {
73
}
400
+ gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h,
74
clear_vec_high(s, is_q, rd);
401
+ gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d,
75
} else {
402
+ gen_helper_sme_mova_zc_q,
76
- int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
403
+ };
77
+ void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
404
+
78
+ int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
405
+ TCGv_ptr t_za, t_zr, t_pg;
79
+
406
+ TCGv_i32 t_desc;
80
+ switch (size) {
407
+ int svl;
81
+ case MO_16:
408
+
82
+ if (is_u) {
409
+ if (!dc_isar_feature(aa64_sme, s)) {
83
+ fn = gen_helper_vfp_toulh;
410
+ return false;
84
+ } else {
411
+ }
85
+ fn = gen_helper_vfp_toslh;
412
+ if (!sme_smza_enabled_check(s)) {
86
+ }
413
+ return true;
87
+ break;
414
+ }
88
+ case MO_32:
415
+
89
+ if (is_u) {
416
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
90
+ fn = gen_helper_vfp_touls;
417
+ t_zr = vec_full_reg_ptr(s, a->zr);
91
+ } else {
418
+ t_pg = pred_full_reg_ptr(s, a->pg);
92
+ fn = gen_helper_vfp_tosls;
419
+
93
+ }
420
+ svl = streaming_vec_reg_size(s);
94
+ break;
421
+ t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
95
+ default:
422
+
96
+ g_assert_not_reached();
423
+ if (a->v) {
97
+ }
424
+ /* Vertical slice -- use sme mova helpers. */
98
+
425
+ if (a->to_vec) {
99
for (pass = 0; pass < maxpass; pass++) {
426
+ zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
100
TCGv_i32 tcg_op = tcg_temp_new_i32();
427
+ } else {
101
428
+ cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
102
- read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
429
+ }
103
- if (is_u) {
430
+ } else {
104
- gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
431
+ /* Horizontal slice -- reuse sve sel helpers. */
105
- } else {
432
+ if (a->to_vec) {
106
- gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
433
+ h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
107
- }
434
+ } else {
108
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
435
+ h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
109
+ fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
436
+ }
110
if (is_scalar) {
437
+ }
111
write_fp_sreg(s, rd, tcg_op);
438
+
112
} else {
439
+ tcg_temp_free_ptr(t_za);
113
- write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
440
+ tcg_temp_free_ptr(t_zr);
114
+ write_vec_element_i32(s, tcg_op, rd, pass, size);
441
+ tcg_temp_free_ptr(t_pg);
115
}
442
+
116
tcg_temp_free_i32(tcg_op);
443
+ return true;
117
}
444
+}
118
--
445
--
119
2.17.0
446
2.25.1
120
121
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We cannot reuse the SVE functions for LD[1-4] and ST[1-4],
4
because those functions accept only a Zreg register number.
5
For SME, we want to pass a pointer into ZA storage.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-21-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper-sme.h | 82 +++++
13
target/arm/sme.decode | 9 +
14
target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++
15
target/arm/translate-sme.c | 70 +++++
16
4 files changed, 756 insertions(+)
17
18
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-sme.h
21
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
28
+DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
29
+DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
30
+DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
31
+
32
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
34
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
35
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
36
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
37
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
38
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
39
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
40
+
41
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
42
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
43
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
44
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
45
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
46
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
47
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
48
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
49
+
50
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
52
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
54
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
55
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
56
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
57
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
58
+
59
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
61
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
62
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
63
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
65
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
66
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
67
+
68
+DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
69
+DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
70
+DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
71
+DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
72
+
73
+DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
75
+DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
77
+DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
81
+
82
+DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
83
+DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
84
+DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
85
+DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
86
+DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
87
+DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
89
+DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
90
+
91
+DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
92
+DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
93
+DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
94
+DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
95
+DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
96
+DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
97
+DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
98
+DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
99
+
100
+DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
101
+DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
102
+DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
103
+DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
105
+DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
107
+DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
108
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/sme.decode
111
+++ b/target/arm/sme.decode
112
@@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
113
&mova to_vec=1 rs=%mova_rs
114
MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
115
&mova to_vec=1 rs=%mova_rs esz=4
116
+
117
+### SME Memory
118
+
119
+&ldst esz rs pg rn rm za_imm v:bool st:bool
120
+
121
+LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
122
+ &ldst rs=%mova_rs
123
+LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
124
+ &ldst esz=4 rs=%mova_rs
125
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/sme_helper.c
128
+++ b/target/arm/sme_helper.c
129
@@ -XXX,XX +XXX,XX @@
130
131
#include "qemu/osdep.h"
132
#include "cpu.h"
133
+#include "internals.h"
134
#include "tcg/tcg-gvec-desc.h"
135
#include "exec/helper-proto.h"
136
+#include "exec/cpu_ldst.h"
137
+#include "exec/exec-all.h"
138
#include "qemu/int128.h"
139
#include "vec_internal.h"
140
+#include "sve_ldst_internal.h"
141
142
/* ResetSVEState */
143
void arm_reset_sve_state(CPUARMState *env)
144
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
145
}
146
147
#undef DO_MOVA_Z
148
+
149
+/*
150
+ * Clear elements in a tile slice comprising len bytes.
151
+ */
152
+
153
+typedef void ClearFn(void *ptr, size_t off, size_t len);
154
+
155
+static void clear_horizontal(void *ptr, size_t off, size_t len)
156
+{
157
+ memset(ptr + off, 0, len);
158
+}
159
+
160
+static void clear_vertical_b(void *vptr, size_t off, size_t len)
161
+{
162
+ for (size_t i = 0; i < len; ++i) {
163
+ *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0;
164
+ }
165
+}
166
+
167
+static void clear_vertical_h(void *vptr, size_t off, size_t len)
168
+{
169
+ for (size_t i = 0; i < len; i += 2) {
170
+ *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0;
171
+ }
172
+}
173
+
174
+static void clear_vertical_s(void *vptr, size_t off, size_t len)
175
+{
176
+ for (size_t i = 0; i < len; i += 4) {
177
+ *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0;
178
+ }
179
+}
180
+
181
+static void clear_vertical_d(void *vptr, size_t off, size_t len)
182
+{
183
+ for (size_t i = 0; i < len; i += 8) {
184
+ *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0;
185
+ }
186
+}
187
+
188
+static void clear_vertical_q(void *vptr, size_t off, size_t len)
189
+{
190
+ for (size_t i = 0; i < len; i += 16) {
191
+ memset(vptr + tile_vslice_offset(i + off), 0, 16);
192
+ }
193
+}
194
+
195
+/*
196
+ * Copy elements from an array into a tile slice comprising len bytes.
197
+ */
198
+
199
+typedef void CopyFn(void *dst, const void *src, size_t len);
200
+
201
+static void copy_horizontal(void *dst, const void *src, size_t len)
202
+{
203
+ memcpy(dst, src, len);
204
+}
205
+
206
+static void copy_vertical_b(void *vdst, const void *vsrc, size_t len)
207
+{
208
+ const uint8_t *src = vsrc;
209
+ uint8_t *dst = vdst;
210
+ size_t i;
211
+
212
+ for (i = 0; i < len; ++i) {
213
+ dst[tile_vslice_index(i)] = src[i];
214
+ }
215
+}
216
+
217
+static void copy_vertical_h(void *vdst, const void *vsrc, size_t len)
218
+{
219
+ const uint16_t *src = vsrc;
220
+ uint16_t *dst = vdst;
221
+ size_t i;
222
+
223
+ for (i = 0; i < len / 2; ++i) {
224
+ dst[tile_vslice_index(i)] = src[i];
225
+ }
226
+}
227
+
228
+static void copy_vertical_s(void *vdst, const void *vsrc, size_t len)
229
+{
230
+ const uint32_t *src = vsrc;
231
+ uint32_t *dst = vdst;
232
+ size_t i;
233
+
234
+ for (i = 0; i < len / 4; ++i) {
235
+ dst[tile_vslice_index(i)] = src[i];
236
+ }
237
+}
238
+
239
+static void copy_vertical_d(void *vdst, const void *vsrc, size_t len)
240
+{
241
+ const uint64_t *src = vsrc;
242
+ uint64_t *dst = vdst;
243
+ size_t i;
244
+
245
+ for (i = 0; i < len / 8; ++i) {
246
+ dst[tile_vslice_index(i)] = src[i];
247
+ }
248
+}
249
+
250
+static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
251
+{
252
+ for (size_t i = 0; i < len; i += 16) {
253
+ memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16);
254
+ }
255
+}
256
+
257
+/*
258
+ * Host and TLB primitives for vertical tile slice addressing.
259
+ */
260
+
261
+#define DO_LD(NAME, TYPE, HOST, TLB) \
262
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
263
+{ \
264
+ TYPE val = HOST(host); \
265
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
266
+} \
267
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
268
+ intptr_t off, target_ulong addr, uintptr_t ra) \
269
+{ \
270
+ TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \
271
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
272
+}
273
+
274
+#define DO_ST(NAME, TYPE, HOST, TLB) \
275
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
276
+{ \
277
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
278
+ HOST(host, val); \
279
+} \
280
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
281
+ intptr_t off, target_ulong addr, uintptr_t ra) \
282
+{ \
283
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
284
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
285
+}
286
+
287
+/*
288
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
289
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
290
+ * corresponds to storing the two 64-bit pieces in little-endian order.
291
+ */
292
+#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
293
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
294
+{ \
295
+ uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
296
+ uint64_t *ptr = za + off; \
297
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
298
+} \
299
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
300
+{ \
301
+ HNAME##_host(za, tile_vslice_offset(off), host); \
302
+} \
303
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
304
+ target_ulong addr, uintptr_t ra) \
305
+{ \
306
+ uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
307
+ uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
308
+ uint64_t *ptr = za + off; \
309
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
310
+} \
311
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
312
+ target_ulong addr, uintptr_t ra) \
313
+{ \
314
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
315
+}
316
+
317
+#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
318
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
319
+{ \
320
+ uint64_t *ptr = za + off; \
321
+ HOST(host, ptr[BE]); \
322
+ HOST(host + 1, ptr[!BE]); \
323
+} \
324
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
325
+{ \
326
+ HNAME##_host(za, tile_vslice_offset(off), host); \
327
+} \
328
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
329
+ target_ulong addr, uintptr_t ra) \
330
+{ \
331
+ uint64_t *ptr = za + off; \
332
+ TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
333
+ TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
334
+} \
335
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
336
+ target_ulong addr, uintptr_t ra) \
337
+{ \
338
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
339
+}
340
+
341
+DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra)
342
+DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra)
343
+DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra)
344
+DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra)
345
+DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
346
+DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
347
+DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
348
+
349
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
350
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
351
+
352
+DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
353
+DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
354
+DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra)
355
+DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra)
356
+DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
357
+DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
358
+DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
359
+
360
+DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
361
+DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
362
+
363
+#undef DO_LD
364
+#undef DO_ST
365
+#undef DO_LDQ
366
+#undef DO_STQ
367
+
368
+/*
369
+ * Common helper for all contiguous predicated loads.
370
+ */
371
+
372
+static inline QEMU_ALWAYS_INLINE
373
+void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
374
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
375
+ const int esz, uint32_t mtedesc, bool vertical,
376
+ sve_ldst1_host_fn *host_fn,
377
+ sve_ldst1_tlb_fn *tlb_fn,
378
+ ClearFn *clr_fn,
379
+ CopyFn *cpy_fn)
380
+{
381
+ const intptr_t reg_max = simd_oprsz(desc);
382
+ const intptr_t esize = 1 << esz;
383
+ intptr_t reg_off, reg_last;
384
+ SVEContLdSt info;
385
+ void *host;
386
+ int flags;
387
+
388
+ /* Find the active elements. */
389
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
390
+ /* The entire predicate was false; no load occurs. */
391
+ clr_fn(za, 0, reg_max);
392
+ return;
393
+ }
394
+
395
+ /* Probe the page(s). Exit with exception for any invalid page. */
396
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
397
+
398
+ /* Handle watchpoints for all active elements. */
399
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
400
+ BP_MEM_READ, ra);
401
+
402
+ /*
403
+ * Handle mte checks for all active elements.
404
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
405
+ */
406
+ if (mtedesc) {
407
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
408
+ mtedesc, ra);
409
+ }
410
+
411
+ flags = info.page[0].flags | info.page[1].flags;
412
+ if (unlikely(flags != 0)) {
413
+#ifdef CONFIG_USER_ONLY
414
+ g_assert_not_reached();
415
+#else
416
+ /*
417
+ * At least one page includes MMIO.
418
+ * Any bus operation can fail with cpu_transaction_failed,
419
+ * which for ARM will raise SyncExternal. Perform the load
420
+ * into scratch memory to preserve register state until the end.
421
+ */
422
+ ARMVectorReg scratch = { };
423
+
424
+ reg_off = info.reg_off_first[0];
425
+ reg_last = info.reg_off_last[1];
426
+ if (reg_last < 0) {
427
+ reg_last = info.reg_off_split;
428
+ if (reg_last < 0) {
429
+ reg_last = info.reg_off_last[0];
430
+ }
431
+ }
432
+
433
+ do {
434
+ uint64_t pg = vg[reg_off >> 6];
435
+ do {
436
+ if ((pg >> (reg_off & 63)) & 1) {
437
+ tlb_fn(env, &scratch, reg_off, addr + reg_off, ra);
438
+ }
439
+ reg_off += esize;
440
+ } while (reg_off & 63);
441
+ } while (reg_off <= reg_last);
442
+
443
+ cpy_fn(za, &scratch, reg_max);
444
+ return;
445
+#endif
446
+ }
447
+
448
+ /* The entire operation is in RAM, on valid pages. */
449
+
450
+ reg_off = info.reg_off_first[0];
451
+ reg_last = info.reg_off_last[0];
452
+ host = info.page[0].host;
453
+
454
+ if (!vertical) {
455
+ memset(za, 0, reg_max);
456
+ } else if (reg_off) {
457
+ clr_fn(za, 0, reg_off);
458
+ }
459
+
460
+ while (reg_off <= reg_last) {
461
+ uint64_t pg = vg[reg_off >> 6];
462
+ do {
463
+ if ((pg >> (reg_off & 63)) & 1) {
464
+ host_fn(za, reg_off, host + reg_off);
465
+ } else if (vertical) {
466
+ clr_fn(za, reg_off, esize);
467
+ }
468
+ reg_off += esize;
469
+ } while (reg_off <= reg_last && (reg_off & 63));
470
+ }
471
+
472
+ /*
473
+ * Use the slow path to manage the cross-page misalignment.
474
+ * But we know this is RAM and cannot trap.
475
+ */
476
+ reg_off = info.reg_off_split;
477
+ if (unlikely(reg_off >= 0)) {
478
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
479
+ }
480
+
481
+ reg_off = info.reg_off_first[1];
482
+ if (unlikely(reg_off >= 0)) {
483
+ reg_last = info.reg_off_last[1];
484
+ host = info.page[1].host;
485
+
486
+ do {
487
+ uint64_t pg = vg[reg_off >> 6];
488
+ do {
489
+ if ((pg >> (reg_off & 63)) & 1) {
490
+ host_fn(za, reg_off, host + reg_off);
491
+ } else if (vertical) {
492
+ clr_fn(za, reg_off, esize);
493
+ }
494
+ reg_off += esize;
495
+ } while (reg_off & 63);
496
+ } while (reg_off <= reg_last);
497
+ }
498
+}
499
+
500
+static inline QEMU_ALWAYS_INLINE
501
+void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
502
+ target_ulong addr, uint32_t desc, uintptr_t ra,
503
+ const int esz, bool vertical,
504
+ sve_ldst1_host_fn *host_fn,
505
+ sve_ldst1_tlb_fn *tlb_fn,
506
+ ClearFn *clr_fn,
507
+ CopyFn *cpy_fn)
508
+{
509
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
510
+ int bit55 = extract64(addr, 55, 1);
511
+
512
+ /* Remove mtedesc from the normal sve descriptor. */
513
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
514
+
515
+ /* Perform gross MTE suppression early. */
516
+ if (!tbi_check(desc, bit55) ||
517
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
518
+ mtedesc = 0;
519
+ }
520
+
521
+ sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical,
522
+ host_fn, tlb_fn, clr_fn, cpy_fn);
523
+}
524
+
525
+#define DO_LD(L, END, ESZ) \
526
+void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
527
+ target_ulong addr, uint32_t desc) \
528
+{ \
529
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
530
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
531
+ clear_horizontal, copy_horizontal); \
532
+} \
533
+void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
534
+ target_ulong addr, uint32_t desc) \
535
+{ \
536
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
537
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
538
+ clear_vertical_##L, copy_vertical_##L); \
539
+} \
540
+void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
541
+ target_ulong addr, uint32_t desc) \
542
+{ \
543
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
544
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
545
+ clear_horizontal, copy_horizontal); \
546
+} \
547
+void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
548
+ target_ulong addr, uint32_t desc) \
549
+{ \
550
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
551
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
552
+ clear_vertical_##L, copy_vertical_##L); \
553
+}
554
+
555
+DO_LD(b, , MO_8)
556
+DO_LD(h, _be, MO_16)
557
+DO_LD(h, _le, MO_16)
558
+DO_LD(s, _be, MO_32)
559
+DO_LD(s, _le, MO_32)
560
+DO_LD(d, _be, MO_64)
561
+DO_LD(d, _le, MO_64)
562
+DO_LD(q, _be, MO_128)
563
+DO_LD(q, _le, MO_128)
564
+
565
+#undef DO_LD
566
+
567
+/*
568
+ * Common helper for all contiguous predicated stores.
569
+ */
570
+
571
+static inline QEMU_ALWAYS_INLINE
572
+void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
573
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
574
+ const int esz, uint32_t mtedesc, bool vertical,
575
+ sve_ldst1_host_fn *host_fn,
576
+ sve_ldst1_tlb_fn *tlb_fn)
577
+{
578
+ const intptr_t reg_max = simd_oprsz(desc);
579
+ const intptr_t esize = 1 << esz;
580
+ intptr_t reg_off, reg_last;
581
+ SVEContLdSt info;
582
+ void *host;
583
+ int flags;
584
+
585
+ /* Find the active elements. */
586
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
587
+ /* The entire predicate was false; no store occurs. */
588
+ return;
589
+ }
590
+
591
+ /* Probe the page(s). Exit with exception for any invalid page. */
592
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
593
+
594
+ /* Handle watchpoints for all active elements. */
595
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
596
+ BP_MEM_WRITE, ra);
597
+
598
+ /*
599
+ * Handle mte checks for all active elements.
600
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
601
+ */
602
+ if (mtedesc) {
603
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
604
+ mtedesc, ra);
605
+ }
606
+
607
+ flags = info.page[0].flags | info.page[1].flags;
608
+ if (unlikely(flags != 0)) {
609
+#ifdef CONFIG_USER_ONLY
610
+ g_assert_not_reached();
611
+#else
612
+ /*
613
+ * At least one page includes MMIO.
614
+ * Any bus operation can fail with cpu_transaction_failed,
615
+ * which for ARM will raise SyncExternal. We cannot avoid
616
+ * this fault and will leave with the store incomplete.
617
+ */
618
+ reg_off = info.reg_off_first[0];
619
+ reg_last = info.reg_off_last[1];
620
+ if (reg_last < 0) {
621
+ reg_last = info.reg_off_split;
622
+ if (reg_last < 0) {
623
+ reg_last = info.reg_off_last[0];
624
+ }
625
+ }
626
+
627
+ do {
628
+ uint64_t pg = vg[reg_off >> 6];
629
+ do {
630
+ if ((pg >> (reg_off & 63)) & 1) {
631
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
632
+ }
633
+ reg_off += esize;
634
+ } while (reg_off & 63);
635
+ } while (reg_off <= reg_last);
636
+ return;
637
+#endif
638
+ }
639
+
640
+ reg_off = info.reg_off_first[0];
641
+ reg_last = info.reg_off_last[0];
642
+ host = info.page[0].host;
643
+
644
+ while (reg_off <= reg_last) {
645
+ uint64_t pg = vg[reg_off >> 6];
646
+ do {
647
+ if ((pg >> (reg_off & 63)) & 1) {
648
+ host_fn(za, reg_off, host + reg_off);
649
+ }
650
+ reg_off += 1 << esz;
651
+ } while (reg_off <= reg_last && (reg_off & 63));
652
+ }
653
+
654
+ /*
655
+ * Use the slow path to manage the cross-page misalignment.
656
+ * But we know this is RAM and cannot trap.
657
+ */
658
+ reg_off = info.reg_off_split;
659
+ if (unlikely(reg_off >= 0)) {
660
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
661
+ }
662
+
663
+ reg_off = info.reg_off_first[1];
664
+ if (unlikely(reg_off >= 0)) {
665
+ reg_last = info.reg_off_last[1];
666
+ host = info.page[1].host;
667
+
668
+ do {
669
+ uint64_t pg = vg[reg_off >> 6];
670
+ do {
671
+ if ((pg >> (reg_off & 63)) & 1) {
672
+ host_fn(za, reg_off, host + reg_off);
673
+ }
674
+ reg_off += 1 << esz;
675
+ } while (reg_off & 63);
676
+ } while (reg_off <= reg_last);
677
+ }
678
+}
679
+
680
+static inline QEMU_ALWAYS_INLINE
681
+void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
682
+ uint32_t desc, uintptr_t ra, int esz, bool vertical,
683
+ sve_ldst1_host_fn *host_fn,
684
+ sve_ldst1_tlb_fn *tlb_fn)
685
+{
686
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
687
+ int bit55 = extract64(addr, 55, 1);
688
+
689
+ /* Remove mtedesc from the normal sve descriptor. */
690
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
691
+
692
+ /* Perform gross MTE suppression early. */
693
+ if (!tbi_check(desc, bit55) ||
694
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
695
+ mtedesc = 0;
696
+ }
697
+
698
+ sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc,
699
+ vertical, host_fn, tlb_fn);
700
+}
701
+
702
+#define DO_ST(L, END, ESZ) \
703
+void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
704
+ target_ulong addr, uint32_t desc) \
705
+{ \
706
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
707
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
708
+} \
709
+void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
710
+ target_ulong addr, uint32_t desc) \
711
+{ \
712
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
713
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
714
+} \
715
+void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
716
+ target_ulong addr, uint32_t desc) \
717
+{ \
718
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
719
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
720
+} \
721
+void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
722
+ target_ulong addr, uint32_t desc) \
723
+{ \
724
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
725
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
726
+}
727
+
728
+DO_ST(b, , MO_8)
729
+DO_ST(h, _be, MO_16)
730
+DO_ST(h, _le, MO_16)
731
+DO_ST(s, _be, MO_32)
732
+DO_ST(s, _le, MO_32)
733
+DO_ST(d, _be, MO_64)
734
+DO_ST(d, _le, MO_64)
735
+DO_ST(q, _be, MO_128)
736
+DO_ST(q, _le, MO_128)
737
+
738
+#undef DO_ST
739
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
740
index XXXXXXX..XXXXXXX 100644
741
--- a/target/arm/translate-sme.c
742
+++ b/target/arm/translate-sme.c
743
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
744
745
return true;
746
}
747
+
748
+static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
749
+{
750
+ typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
751
+
752
+ /*
753
+ * Indexed by [esz][be][v][mte][st], which is (except for load/store)
754
+ * also the order in which the elements appear in the function names,
755
+ * and so how we must concatenate the pieces.
756
+ */
757
+
758
+#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F }
759
+#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) }
760
+#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) }
761
+#define FN_END(L, B) { FN_HV(L), FN_HV(B) }
762
+
763
+ static GenLdSt1 * const fns[5][2][2][2][2] = {
764
+ FN_END(b, b),
765
+ FN_END(h_le, h_be),
766
+ FN_END(s_le, s_be),
767
+ FN_END(d_le, d_be),
768
+ FN_END(q_le, q_be),
769
+ };
770
+
771
+#undef FN_LS
772
+#undef FN_MTE
773
+#undef FN_HV
774
+#undef FN_END
775
+
776
+ TCGv_ptr t_za, t_pg;
777
+ TCGv_i64 addr;
778
+ int svl, desc = 0;
779
+ bool be = s->be_data == MO_BE;
780
+ bool mte = s->mte_active[0];
781
+
782
+ if (!dc_isar_feature(aa64_sme, s)) {
783
+ return false;
784
+ }
785
+ if (!sme_smza_enabled_check(s)) {
786
+ return true;
787
+ }
788
+
789
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
790
+ t_pg = pred_full_reg_ptr(s, a->pg);
791
+ addr = tcg_temp_new_i64();
792
+
793
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
794
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
795
+
796
+ if (mte) {
797
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
798
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
799
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
800
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
801
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
802
+ desc <<= SVE_MTEDESC_SHIFT;
803
+ } else {
804
+ addr = clean_data_tbi(s, addr);
805
+ }
806
+ svl = streaming_vec_reg_size(s);
807
+ desc = simd_desc(svl, svl, desc);
808
+
809
+ fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
810
+ tcg_constant_i32(desc));
811
+
812
+ tcg_temp_free_ptr(t_za);
813
+ tcg_temp_free_ptr(t_pg);
814
+ tcg_temp_free_i64(addr);
815
+ return true;
816
+}
817
--
818
2.25.1
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
By default MachineClass::get_hotplug_handler is NULL and concrete board
3
Add a TCGv_ptr base argument, which will be cpu_env for SVE.
4
should set it to it's own handler.
4
We will reuse this for SME save and restore array insns.
5
Considering there isn't any default handler, drop saving empty
6
MachineClass::get_hotplug_handler in child class and make PC code
7
consistent with spapr/s390x boards.
8
5
9
We can bring this back when actual usecase surfaces and do it
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
consistently across boards that use get_hotplug_handler().
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
8
Message-id: 20220708151540.18136-22-richard.henderson@linaro.org
12
Suggested-by: David Gibson <david@gibson.dropbear.id.au>
13
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Reviewed-by: Eduardo Habkost <ehabkost@redhat.com>
16
Message-id: 1525691524-32265-2-git-send-email-imammedo@redhat.com
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
10
---
19
include/hw/i386/pc.h | 8 --------
11
target/arm/translate-a64.h | 3 +++
20
hw/i386/pc.c | 6 +-----
12
target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++----------
21
2 files changed, 1 insertion(+), 13 deletions(-)
13
2 files changed, 39 insertions(+), 12 deletions(-)
22
14
23
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
24
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
25
--- a/include/hw/i386/pc.h
17
--- a/target/arm/translate-a64.h
26
+++ b/include/hw/i386/pc.h
18
+++ b/target/arm/translate-a64.h
27
@@ -XXX,XX +XXX,XX @@ struct PCMachineState {
19
@@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
28
/**
20
uint32_t rm_ofs, int64_t shift,
29
* PCMachineClass:
21
uint32_t opr_sz, uint32_t max_sz);
30
*
22
31
- * Methods:
23
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
32
- *
24
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
33
- * @get_hotplug_handler: pointer to parent class callback @get_hotplug_handler
25
+
34
- *
26
#endif /* TARGET_ARM_TRANSLATE_A64_H */
35
* Compat fields:
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
36
*
37
* @enforce_aligned_dimm: check that DIMM's address/size is aligned by
38
@@ -XXX,XX +XXX,XX @@ struct PCMachineClass {
39
40
/*< public >*/
41
42
- /* Methods: */
43
- HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
44
- DeviceState *dev);
45
-
46
/* Device configuration: */
47
bool pci_enabled;
48
bool kvmclock_enabled;
49
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
50
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/i386/pc.c
29
--- a/target/arm/translate-sve.c
52
+++ b/hw/i386/pc.c
30
+++ b/target/arm/translate-sve.c
53
@@ -XXX,XX +XXX,XX @@ static void pc_machine_device_unplug_cb(HotplugHandler *hotplug_dev,
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
54
static HotplugHandler *pc_get_hotpug_handler(MachineState *machine,
32
* The load should begin at the address Rn + IMM.
55
DeviceState *dev)
33
*/
34
35
-static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
36
+void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
37
+ int len, int rn, int imm)
56
{
38
{
57
- PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine);
39
int len_align = QEMU_ALIGN_DOWN(len, 8);
58
-
40
int len_remain = len % 8;
59
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
41
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
60
object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
42
t0 = tcg_temp_new_i64();
61
return HOTPLUG_HANDLER(machine);
43
for (i = 0; i < len_align; i += 8) {
44
tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
45
- tcg_gen_st_i64(t0, cpu_env, vofs + i);
46
+ tcg_gen_st_i64(t0, base, vofs + i);
47
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
48
}
49
tcg_temp_free_i64(t0);
50
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
51
clean_addr = new_tmp_a64_local(s);
52
tcg_gen_mov_i64(clean_addr, t0);
53
54
+ if (base != cpu_env) {
55
+ TCGv_ptr b = tcg_temp_local_new_ptr();
56
+ tcg_gen_mov_ptr(b, base);
57
+ base = b;
58
+ }
59
+
60
gen_set_label(loop);
61
62
t0 = tcg_temp_new_i64();
63
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
64
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
65
66
tp = tcg_temp_new_ptr();
67
- tcg_gen_add_ptr(tp, cpu_env, i);
68
+ tcg_gen_add_ptr(tp, base, i);
69
tcg_gen_addi_ptr(i, i, 8);
70
tcg_gen_st_i64(t0, tp, vofs);
71
tcg_temp_free_ptr(tp);
72
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
73
74
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
75
tcg_temp_free_ptr(i);
76
+
77
+ if (base != cpu_env) {
78
+ tcg_temp_free_ptr(base);
79
+ assert(len_remain == 0);
80
+ }
62
}
81
}
63
82
64
- return pcmc->get_hotplug_handler ?
83
/*
65
- pcmc->get_hotplug_handler(machine, dev) : NULL;
84
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
66
+ return NULL;
85
default:
86
g_assert_not_reached();
87
}
88
- tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
89
+ tcg_gen_st_i64(t0, base, vofs + len_align);
90
tcg_temp_free_i64(t0);
91
}
67
}
92
}
68
93
69
static void
94
/* Similarly for stores. */
70
@@ -XXX,XX +XXX,XX @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
95
-static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
71
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
96
+void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
72
NMIClass *nc = NMI_CLASS(oc);
97
+ int len, int rn, int imm)
73
98
{
74
- pcmc->get_hotplug_handler = mc->get_hotplug_handler;
99
int len_align = QEMU_ALIGN_DOWN(len, 8);
75
pcmc->pci_enabled = true;
100
int len_remain = len % 8;
76
pcmc->has_acpi_build = true;
101
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
77
pcmc->rsdp_in_ram = true;
102
103
t0 = tcg_temp_new_i64();
104
for (i = 0; i < len_align; i += 8) {
105
- tcg_gen_ld_i64(t0, cpu_env, vofs + i);
106
+ tcg_gen_ld_i64(t0, base, vofs + i);
107
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
108
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
111
clean_addr = new_tmp_a64_local(s);
112
tcg_gen_mov_i64(clean_addr, t0);
113
114
+ if (base != cpu_env) {
115
+ TCGv_ptr b = tcg_temp_local_new_ptr();
116
+ tcg_gen_mov_ptr(b, base);
117
+ base = b;
118
+ }
119
+
120
gen_set_label(loop);
121
122
t0 = tcg_temp_new_i64();
123
tp = tcg_temp_new_ptr();
124
- tcg_gen_add_ptr(tp, cpu_env, i);
125
+ tcg_gen_add_ptr(tp, base, i);
126
tcg_gen_ld_i64(t0, tp, vofs);
127
tcg_gen_addi_ptr(i, i, 8);
128
tcg_temp_free_ptr(tp);
129
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
130
131
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
132
tcg_temp_free_ptr(i);
133
+
134
+ if (base != cpu_env) {
135
+ tcg_temp_free_ptr(base);
136
+ assert(len_remain == 0);
137
+ }
138
}
139
140
/* Predicate register stores can be any multiple of 2. */
141
if (len_remain) {
142
t0 = tcg_temp_new_i64();
143
- tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
144
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
145
146
switch (len_remain) {
147
case 2:
148
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
149
if (sve_access_check(s)) {
150
int size = vec_full_reg_size(s);
151
int off = vec_full_reg_offset(s, a->rd);
152
- do_ldr(s, off, size, a->rn, a->imm * size);
153
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
154
}
155
return true;
156
}
157
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
158
if (sve_access_check(s)) {
159
int size = pred_full_reg_size(s);
160
int off = pred_full_reg_offset(s, a->rd);
161
- do_ldr(s, off, size, a->rn, a->imm * size);
162
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
163
}
164
return true;
165
}
166
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
167
if (sve_access_check(s)) {
168
int size = vec_full_reg_size(s);
169
int off = vec_full_reg_offset(s, a->rd);
170
- do_str(s, off, size, a->rn, a->imm * size);
171
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
172
}
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
176
if (sve_access_check(s)) {
177
int size = pred_full_reg_size(s);
178
int off = pred_full_reg_offset(s, a->rd);
179
- do_str(s, off, size, a->rn, a->imm * size);
180
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
181
}
182
return true;
183
}
78
--
184
--
79
2.17.0
185
2.25.1
80
81
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We can reuse the SVE functions for LDR and STR, passing in the
4
base of the ZA vector and a zero offset.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-23-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme.decode | 7 +++++++
12
target/arm/translate-sme.c | 24 ++++++++++++++++++++++++
13
2 files changed, 31 insertions(+)
14
15
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme.decode
18
+++ b/target/arm/sme.decode
19
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
20
&ldst rs=%mova_rs
21
LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
22
&ldst esz=4 rs=%mova_rs
23
+
24
+&ldstr rv rn imm
25
+@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
26
+ &ldstr rv=%mova_rs
27
+
28
+LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
29
+STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
30
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-sme.c
33
+++ b/target/arm/translate-sme.c
34
@@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
35
tcg_temp_free_i64(addr);
36
return true;
37
}
38
+
39
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
40
+
41
+static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
42
+{
43
+ int svl = streaming_vec_reg_size(s);
44
+ int imm = a->imm;
45
+ TCGv_ptr base;
46
+
47
+ if (!sme_za_enabled_check(s)) {
48
+ return true;
49
+ }
50
+
51
+ /* ZA[n] equates to ZA0H.B[n]. */
52
+ base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
53
+
54
+ fn(s, base, 0, svl, a->rn, imm * svl);
55
+
56
+ tcg_temp_free_ptr(base);
57
+ return true;
58
+}
59
+
60
+TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
61
+TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
62
--
63
2.25.1
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
platform-bus were using machine_done notifier to get and map
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
(assign irq/mmio resources) dynamically added sysbus devices
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
after all '-device' options had been processed.
5
Message-id: 20220708151540.18136-24-richard.henderson@linaro.org
6
That however creates non obvious dependencies on ordering of
7
machine_done notifiers and requires carefull line juggling
8
to keep it working. For example see comment above
9
create_platform_bus() and 'straitforward' arm_load_kernel()
10
had to converted to machine_done notifier and that lead to
11
yet another machine_done notifier to keep it working
12
arm_register_platform_bus_fdt_creator().
13
14
Instead of hiding resource assignment in platform-bus-device
15
to magically initialize sysbus devices, use device plug
16
callback and assign resources explicitly at board level
17
at the moment each -device option is being processed.
18
19
That adds a bunch of machine declaration boiler plate to
20
e500plat board, similar to ARM/x86 but gets rid of hidden
21
machine_done notifier and would allow to remove the dependent
22
notifiers in ARM code simplifying it and making code flow
23
easier to follow.
24
25
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
26
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
27
Acked-by: David Gibson <david@gibson.dropbear.id.au>
28
Message-id: 1525691524-32265-3-git-send-email-imammedo@redhat.com
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
---
7
---
31
hw/ppc/e500.h | 5 +++++
8
target/arm/helper-sme.h | 5 +++
32
include/hw/arm/virt.h | 1 +
9
target/arm/sme.decode | 11 +++++
33
include/hw/platform-bus.h | 4 ++--
10
target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++
34
hw/arm/sysbus-fdt.c | 3 ---
11
target/arm/translate-sme.c | 31 +++++++++++++
35
hw/arm/virt.c | 31 +++++++++++++++++++++++++++++++
12
4 files changed, 137 insertions(+)
36
hw/core/platform-bus.c | 29 +++++------------------------
37
hw/ppc/e500.c | 38 +++++++++++++++++---------------------
38
hw/ppc/e500plat.c | 31 +++++++++++++++++++++++++++++++
39
8 files changed, 92 insertions(+), 50 deletions(-)
40
13
41
diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
42
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
43
--- a/hw/ppc/e500.h
16
--- a/target/arm/helper-sme.h
44
+++ b/hw/ppc/e500.h
17
+++ b/target/arm/helper-sme.h
45
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i
46
#define PPCE500_H
19
DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
47
20
DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
48
#include "hw/boards.h"
21
DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
49
+#include "hw/platform-bus.h"
22
+
50
23
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
51
typedef struct PPCE500MachineState {
24
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
52
/*< private >*/
25
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
53
MachineState parent_obj;
26
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
54
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
55
+ /* points to instance of TYPE_PLATFORM_BUS_DEVICE if
56
+ * board supports dynamic sysbus devices
57
+ */
58
+ PlatformBusDevice *pbus_dev;
59
} PPCE500MachineState;
60
61
typedef struct PPCE500MachineClass {
62
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
63
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
64
--- a/include/hw/arm/virt.h
29
--- a/target/arm/sme.decode
65
+++ b/include/hw/arm/virt.h
30
+++ b/target/arm/sme.decode
66
@@ -XXX,XX +XXX,XX @@ typedef struct {
31
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
67
typedef struct {
32
68
MachineState parent;
33
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
69
Notifier machine_done;
34
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
70
+ DeviceState *platform_bus_dev;
35
+
71
FWCfgState *fw_cfg;
36
+### SME Add Vector to Array
72
bool secure;
37
+
73
bool highmem;
38
+&adda zad zn pm pn
74
diff --git a/include/hw/platform-bus.h b/include/hw/platform-bus.h
39
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
40
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
41
+
42
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
43
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
44
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
45
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
46
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
75
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
76
--- a/include/hw/platform-bus.h
48
--- a/target/arm/sme_helper.c
77
+++ b/include/hw/platform-bus.h
49
+++ b/target/arm/sme_helper.c
78
@@ -XXX,XX +XXX,XX @@ typedef struct PlatformBusDevice PlatformBusDevice;
50
@@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128)
79
struct PlatformBusDevice {
51
DO_ST(q, _le, MO_128)
80
/*< private >*/
52
81
SysBusDevice parent_obj;
53
#undef DO_ST
82
- Notifier notifier;
83
- bool done_gathering;
84
85
/*< public >*/
86
uint32_t mmio_size;
87
@@ -XXX,XX +XXX,XX @@ int platform_bus_get_irqn(PlatformBusDevice *platform_bus, SysBusDevice *sbdev,
88
hwaddr platform_bus_get_mmio_addr(PlatformBusDevice *pbus, SysBusDevice *sbdev,
89
int n);
90
91
+void platform_bus_link_device(PlatformBusDevice *pbus, SysBusDevice *sbdev);
92
+
54
+
93
#endif /* HW_PLATFORM_BUS_H */
55
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
94
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
56
+ void *vpm, uint32_t desc)
95
index XXXXXXX..XXXXXXX 100644
96
--- a/hw/arm/sysbus-fdt.c
97
+++ b/hw/arm/sysbus-fdt.c
98
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
99
dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
100
pbus = PLATFORM_BUS_DEVICE(dev);
101
102
- /* We can only create dt nodes for dynamic devices when they're ready */
103
- assert(pbus->done_gathering);
104
-
105
PlatformBusFDTData data = {
106
.fdt = fdt,
107
.irq_start = irq_start,
108
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/hw/arm/virt.c
111
+++ b/hw/arm/virt.c
112
@@ -XXX,XX +XXX,XX @@ static void create_platform_bus(VirtMachineState *vms, qemu_irq *pic)
113
qdev_prop_set_uint32(dev, "mmio_size",
114
platform_bus_params.platform_bus_size);
115
qdev_init_nofail(dev);
116
+ vms->platform_bus_dev = dev;
117
s = SYS_BUS_DEVICE(dev);
118
119
for (i = 0; i < platform_bus_params.platform_bus_num_irqs; i++) {
120
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
121
return ms->possible_cpus;
122
}
123
124
+static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
125
+ DeviceState *dev, Error **errp)
126
+{
57
+{
127
+ VirtMachineState *vms = VIRT_MACHINE(hotplug_dev);
58
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
59
+ uint64_t *pn = vpn, *pm = vpm;
60
+ uint32_t *zda = vzda, *zn = vzn;
128
+
61
+
129
+ if (vms->platform_bus_dev) {
62
+ for (row = 0; row < oprsz; ) {
130
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
63
+ uint64_t pa = pn[row >> 4];
131
+ platform_bus_link_device(PLATFORM_BUS_DEVICE(vms->platform_bus_dev),
64
+ do {
132
+ SYS_BUS_DEVICE(dev));
65
+ if (pa & 1) {
66
+ for (col = 0; col < oprsz; ) {
67
+ uint64_t pb = pm[col >> 4];
68
+ do {
69
+ if (pb & 1) {
70
+ zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)];
71
+ }
72
+ pb >>= 4;
73
+ } while (++col & 15);
74
+ }
75
+ }
76
+ pa >>= 4;
77
+ } while (++row & 15);
78
+ }
79
+}
80
+
81
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
82
+ void *vpm, uint32_t desc)
83
+{
84
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
85
+ uint8_t *pn = vpn, *pm = vpm;
86
+ uint64_t *zda = vzda, *zn = vzn;
87
+
88
+ for (row = 0; row < oprsz; ++row) {
89
+ if (pn[H1(row)] & 1) {
90
+ for (col = 0; col < oprsz; ++col) {
91
+ if (pm[H1(col)] & 1) {
92
+ zda[tile_vslice_index(row) + col] += zn[col];
93
+ }
94
+ }
133
+ }
95
+ }
134
+ }
96
+ }
135
+}
97
+}
136
+
98
+
137
+static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
99
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
138
+ DeviceState *dev)
100
+ void *vpm, uint32_t desc)
139
+{
101
+{
140
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
102
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
141
+ return HOTPLUG_HANDLER(machine);
103
+ uint64_t *pn = vpn, *pm = vpm;
104
+ uint32_t *zda = vzda, *zn = vzn;
105
+
106
+ for (row = 0; row < oprsz; ) {
107
+ uint64_t pa = pn[row >> 4];
108
+ do {
109
+ if (pa & 1) {
110
+ uint32_t zn_row = zn[H4(row)];
111
+ for (col = 0; col < oprsz; ) {
112
+ uint64_t pb = pm[col >> 4];
113
+ do {
114
+ if (pb & 1) {
115
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
116
+ }
117
+ pb >>= 4;
118
+ } while (++col & 15);
119
+ }
120
+ }
121
+ pa >>= 4;
122
+ } while (++row & 15);
142
+ }
123
+ }
143
+
144
+ return NULL;
145
+}
124
+}
146
+
125
+
147
static void virt_machine_class_init(ObjectClass *oc, void *data)
126
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
148
{
127
+ void *vpm, uint32_t desc)
149
MachineClass *mc = MACHINE_CLASS(oc);
150
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
151
152
mc->init = machvirt_init;
153
/* Start max_cpus at the maximum QEMU supports. We'll further restrict
154
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
155
mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
156
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
157
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
158
+ mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
159
+ hc->plug = virt_machine_device_plug_cb;
160
}
161
162
static const TypeInfo virt_machine_info = {
163
@@ -XXX,XX +XXX,XX @@ static const TypeInfo virt_machine_info = {
164
.instance_size = sizeof(VirtMachineState),
165
.class_size = sizeof(VirtMachineClass),
166
.class_init = virt_machine_class_init,
167
+ .interfaces = (InterfaceInfo[]) {
168
+ { TYPE_HOTPLUG_HANDLER },
169
+ { }
170
+ },
171
};
172
173
static void machvirt_machine_init(void)
174
diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c
175
index XXXXXXX..XXXXXXX 100644
176
--- a/hw/core/platform-bus.c
177
+++ b/hw/core/platform-bus.c
178
@@ -XXX,XX +XXX,XX @@ static void plaform_bus_refresh_irqs(PlatformBusDevice *pbus)
179
{
180
bitmap_zero(pbus->used_irqs, pbus->num_irqs);
181
foreach_dynamic_sysbus_device(platform_bus_count_irqs, pbus);
182
- pbus->done_gathering = true;
183
}
184
185
static void platform_bus_map_irq(PlatformBusDevice *pbus, SysBusDevice *sbdev,
186
@@ -XXX,XX +XXX,XX @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev,
187
}
188
189
/*
190
- * For each sysbus device, look for unassigned IRQ lines as well as
191
- * unassociated MMIO regions. Connect them to the platform bus if available.
192
+ * Look for unassigned IRQ lines as well as unassociated MMIO regions.
193
+ * Connect them to the platform bus if available.
194
*/
195
-static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
196
+void platform_bus_link_device(PlatformBusDevice *pbus, SysBusDevice *sbdev)
197
{
198
- PlatformBusDevice *pbus = opaque;
199
int i;
200
201
for (i = 0; sysbus_has_irq(sbdev, i); i++) {
202
@@ -XXX,XX +XXX,XX @@ static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
203
}
204
}
205
206
-static void platform_bus_init_notify(Notifier *notifier, void *data)
207
-{
208
- PlatformBusDevice *pb = container_of(notifier, PlatformBusDevice, notifier);
209
-
210
- /*
211
- * Generate a bitmap of used IRQ lines, as the user might have specified
212
- * them on the command line.
213
- */
214
- plaform_bus_refresh_irqs(pb);
215
-
216
- foreach_dynamic_sysbus_device(link_sysbus_device, pb);
217
-}
218
-
219
static void platform_bus_realize(DeviceState *dev, Error **errp)
220
{
221
PlatformBusDevice *pbus;
222
@@ -XXX,XX +XXX,XX @@ static void platform_bus_realize(DeviceState *dev, Error **errp)
223
sysbus_init_irq(d, &pbus->irqs[i]);
224
}
225
226
- /*
227
- * Register notifier that allows us to gather dangling devices once the
228
- * machine is completely assembled
229
- */
230
- pbus->notifier.notify = platform_bus_init_notify;
231
- qemu_add_machine_init_done_notifier(&pbus->notifier);
232
+ /* some devices might be initialized before so update used IRQs map */
233
+ plaform_bus_refresh_irqs(pbus);
234
}
235
236
static Property platform_bus_properties[] = {
237
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/hw/ppc/e500.c
240
+++ b/hw/ppc/e500.c
241
@@ -XXX,XX +XXX,XX @@ static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
242
}
243
}
244
245
-static void platform_bus_create_devtree(const PPCE500MachineClass *pmc,
246
+static void platform_bus_create_devtree(PPCE500MachineState *pms,
247
void *fdt, const char *mpic)
248
{
249
+ const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms);
250
gchar *node = g_strdup_printf("/platform@%"PRIx64, pmc->platform_bus_base);
251
const char platcomp[] = "qemu,platform\0simple-bus";
252
uint64_t addr = pmc->platform_bus_base;
253
uint64_t size = pmc->platform_bus_size;
254
int irq_start = pmc->platform_bus_first_irq;
255
- PlatformBusDevice *pbus;
256
- DeviceState *dev;
257
258
/* Create a /platform node that we can put all devices into */
259
260
@@ -XXX,XX +XXX,XX @@ static void platform_bus_create_devtree(const PPCE500MachineClass *pmc,
261
262
qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic);
263
264
- dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
265
- pbus = PLATFORM_BUS_DEVICE(dev);
266
+ /* Create dt nodes for dynamic devices */
267
+ PlatformDevtreeData data = {
268
+ .fdt = fdt,
269
+ .mpic = mpic,
270
+ .irq_start = irq_start,
271
+ .node = node,
272
+ .pbus = pms->pbus_dev,
273
+ };
274
275
- /* We can only create dt nodes for dynamic devices when they're ready */
276
- if (pbus->done_gathering) {
277
- PlatformDevtreeData data = {
278
- .fdt = fdt,
279
- .mpic = mpic,
280
- .irq_start = irq_start,
281
- .node = node,
282
- .pbus = pbus,
283
- };
284
-
285
- /* Loop through all dynamic sysbus devices and create nodes for them */
286
- foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data);
287
- }
288
+ /* Loop through all dynamic sysbus devices and create nodes for them */
289
+ foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data);
290
291
g_free(node);
292
}
293
@@ -XXX,XX +XXX,XX @@ static int ppce500_load_device_tree(PPCE500MachineState *pms,
294
}
295
g_free(soc);
296
297
- if (pmc->has_platform_bus) {
298
- platform_bus_create_devtree(pmc, fdt, mpic);
299
+ if (pms->pbus_dev) {
300
+ platform_bus_create_devtree(pms, fdt, mpic);
301
}
302
g_free(mpic);
303
304
@@ -XXX,XX +XXX,XX @@ void ppce500_init(MachineState *machine)
305
qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs);
306
qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size);
307
qdev_init_nofail(dev);
308
- s = SYS_BUS_DEVICE(dev);
309
+ pms->pbus_dev = PLATFORM_BUS_DEVICE(dev);
310
311
+ s = SYS_BUS_DEVICE(pms->pbus_dev);
312
for (i = 0; i < pmc->platform_bus_num_irqs; i++) {
313
int irqn = pmc->platform_bus_first_irq + i;
314
sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, irqn));
315
@@ -XXX,XX +XXX,XX @@ static const TypeInfo ppce500_info = {
316
.name = TYPE_PPCE500_MACHINE,
317
.parent = TYPE_MACHINE,
318
.abstract = true,
319
+ .instance_size = sizeof(PPCE500MachineState),
320
.class_size = sizeof(PPCE500MachineClass),
321
};
322
323
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
324
index XXXXXXX..XXXXXXX 100644
325
--- a/hw/ppc/e500plat.c
326
+++ b/hw/ppc/e500plat.c
327
@@ -XXX,XX +XXX,XX @@ static void e500plat_init(MachineState *machine)
328
ppce500_init(machine);
329
}
330
331
+static void e500plat_machine_device_plug_cb(HotplugHandler *hotplug_dev,
332
+ DeviceState *dev, Error **errp)
333
+{
128
+{
334
+ PPCE500MachineState *pms = PPCE500_MACHINE(hotplug_dev);
129
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
130
+ uint8_t *pn = vpn, *pm = vpm;
131
+ uint64_t *zda = vzda, *zn = vzn;
335
+
132
+
336
+ if (pms->pbus_dev) {
133
+ for (row = 0; row < oprsz; ++row) {
337
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
134
+ if (pn[H1(row)] & 1) {
338
+ platform_bus_link_device(pms->pbus_dev, SYS_BUS_DEVICE(dev));
135
+ uint64_t zn_row = zn[row];
136
+ for (col = 0; col < oprsz; ++col) {
137
+ if (pm[H1(col)] & 1) {
138
+ zda[tile_vslice_index(row) + col] += zn_row;
139
+ }
140
+ }
339
+ }
141
+ }
340
+ }
142
+ }
341
+}
143
+}
144
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-sme.c
147
+++ b/target/arm/translate-sme.c
148
@@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
149
150
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
151
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
342
+
152
+
343
+static
153
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
344
+HotplugHandler *e500plat_machine_get_hotpug_handler(MachineState *machine,
154
+ gen_helper_gvec_4 *fn)
345
+ DeviceState *dev)
346
+{
155
+{
347
+ if (object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
156
+ int svl = streaming_vec_reg_size(s);
348
+ return HOTPLUG_HANDLER(machine);
157
+ uint32_t desc = simd_desc(svl, svl, 0);
158
+ TCGv_ptr za, zn, pn, pm;
159
+
160
+ if (!sme_smza_enabled_check(s)) {
161
+ return true;
349
+ }
162
+ }
350
+
163
+
351
+ return NULL;
164
+ /* Sum XZR+zad to find ZAd. */
165
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
166
+ zn = vec_full_reg_ptr(s, a->zn);
167
+ pn = pred_full_reg_ptr(s, a->pn);
168
+ pm = pred_full_reg_ptr(s, a->pm);
169
+
170
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
171
+
172
+ tcg_temp_free_ptr(za);
173
+ tcg_temp_free_ptr(zn);
174
+ tcg_temp_free_ptr(pn);
175
+ tcg_temp_free_ptr(pm);
176
+ return true;
352
+}
177
+}
353
+
178
+
354
#define TYPE_E500PLAT_MACHINE MACHINE_TYPE_NAME("ppce500")
179
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
355
180
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
356
static void e500plat_machine_class_init(ObjectClass *oc, void *data)
181
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
357
{
182
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
358
PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc);
359
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
360
MachineClass *mc = MACHINE_CLASS(oc);
361
362
+ mc->get_hotplug_handler = e500plat_machine_get_hotpug_handler;
363
+ hc->plug = e500plat_machine_device_plug_cb;
364
+
365
pmc->pci_first_slot = 0x1;
366
pmc->pci_nr_slots = PCI_SLOT_MAX - 1;
367
pmc->fixup_devtree = e500plat_fixup_devtree;
368
@@ -XXX,XX +XXX,XX @@ static const TypeInfo e500plat_info = {
369
.name = TYPE_E500PLAT_MACHINE,
370
.parent = TYPE_PPCE500_MACHINE,
371
.class_init = e500plat_machine_class_init,
372
+ .interfaces = (InterfaceInfo[]) {
373
+ { TYPE_HOTPLUG_HANDLER },
374
+ { }
375
+ }
376
};
377
378
static void e500plat_register_types(void)
379
--
183
--
380
2.17.0
184
2.25.1
381
382
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-25-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 5 +++
9
target/arm/sme.decode | 9 +++++
10
target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 32 ++++++++++++++++++
12
4 files changed, 115 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
19
DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
21
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
22
+
23
+DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
24
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
32
ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
33
ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
34
ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
35
+
36
+### SME Outer Product
37
+
38
+&op zad zn zm pm pn sub:bool
39
+@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
40
+@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
41
+
42
+FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
43
+FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
44
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/sme_helper.c
47
+++ b/target/arm/sme_helper.c
48
@@ -XXX,XX +XXX,XX @@
49
#include "exec/cpu_ldst.h"
50
#include "exec/exec-all.h"
51
#include "qemu/int128.h"
52
+#include "fpu/softfloat.h"
53
#include "vec_internal.h"
54
#include "sve_ldst_internal.h"
55
56
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
57
}
58
}
59
}
60
+
61
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
62
+ void *vpm, void *vst, uint32_t desc)
63
+{
64
+ intptr_t row, col, oprsz = simd_maxsz(desc);
65
+ uint32_t neg = simd_data(desc) << 31;
66
+ uint16_t *pn = vpn, *pm = vpm;
67
+ float_status fpst;
68
+
69
+ /*
70
+ * Make a copy of float_status because this operation does not
71
+ * update the cumulative fp exception status. It also produces
72
+ * default nans.
73
+ */
74
+ fpst = *(float_status *)vst;
75
+ set_default_nan_mode(true, &fpst);
76
+
77
+ for (row = 0; row < oprsz; ) {
78
+ uint16_t pa = pn[H2(row >> 4)];
79
+ do {
80
+ if (pa & 1) {
81
+ void *vza_row = vza + tile_vslice_offset(row);
82
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
83
+
84
+ for (col = 0; col < oprsz; ) {
85
+ uint16_t pb = pm[H2(col >> 4)];
86
+ do {
87
+ if (pb & 1) {
88
+ uint32_t *a = vza_row + H1_4(col);
89
+ uint32_t *m = vzm + H1_4(col);
90
+ *a = float32_muladd(n, *m, *a, 0, vst);
91
+ }
92
+ col += 4;
93
+ pb >>= 4;
94
+ } while (col & 15);
95
+ }
96
+ }
97
+ row += 4;
98
+ pa >>= 4;
99
+ } while (row & 15);
100
+ }
101
+}
102
+
103
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
104
+ void *vpm, void *vst, uint32_t desc)
105
+{
106
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
107
+ uint64_t neg = (uint64_t)simd_data(desc) << 63;
108
+ uint64_t *za = vza, *zn = vzn, *zm = vzm;
109
+ uint8_t *pn = vpn, *pm = vpm;
110
+ float_status fpst = *(float_status *)vst;
111
+
112
+ set_default_nan_mode(true, &fpst);
113
+
114
+ for (row = 0; row < oprsz; ++row) {
115
+ if (pn[H1(row)] & 1) {
116
+ uint64_t *za_row = &za[tile_vslice_index(row)];
117
+ uint64_t n = zn[row] ^ neg;
118
+
119
+ for (col = 0; col < oprsz; ++col) {
120
+ if (pm[H1(col)] & 1) {
121
+ uint64_t *a = &za_row[col];
122
+ *a = float64_muladd(n, zm[col], *a, 0, &fpst);
123
+ }
124
+ }
125
+ }
126
+ }
127
+}
128
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate-sme.c
131
+++ b/target/arm/translate-sme.c
132
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
133
TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
134
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
135
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
136
+
137
+static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
138
+ gen_helper_gvec_5_ptr *fn)
139
+{
140
+ int svl = streaming_vec_reg_size(s);
141
+ uint32_t desc = simd_desc(svl, svl, a->sub);
142
+ TCGv_ptr za, zn, zm, pn, pm, fpst;
143
+
144
+ if (!sme_smza_enabled_check(s)) {
145
+ return true;
146
+ }
147
+
148
+ /* Sum XZR+zad to find ZAd. */
149
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
150
+ zn = vec_full_reg_ptr(s, a->zn);
151
+ zm = vec_full_reg_ptr(s, a->zm);
152
+ pn = pred_full_reg_ptr(s, a->pn);
153
+ pm = pred_full_reg_ptr(s, a->pm);
154
+ fpst = fpstatus_ptr(FPST_FPCR);
155
+
156
+ fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc));
157
+
158
+ tcg_temp_free_ptr(za);
159
+ tcg_temp_free_ptr(zn);
160
+ tcg_temp_free_ptr(pn);
161
+ tcg_temp_free_ptr(pm);
162
+ tcg_temp_free_ptr(fpst);
163
+ return true;
164
+}
165
+
166
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
167
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
168
--
169
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-26-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 2 ++
10
target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 30 ++++++++++++++++++++
12
4 files changed, 90 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
19
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
20
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
22
+DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
23
+ void, ptr, ptr, ptr, ptr, ptr, i32)
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/sme.decode
27
+++ b/target/arm/sme.decode
28
@@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
29
30
FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
31
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
32
+
33
+BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
34
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/sme_helper.c
37
+++ b/target/arm/sme_helper.c
38
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
39
}
40
}
41
}
42
+
43
+/*
44
+ * Alter PAIR as needed for controlling predicates being false,
45
+ * and for NEG on an enabled row element.
46
+ */
47
+static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
48
+{
49
+ /*
50
+ * The pseudocode uses a conditional negate after the conditional zero.
51
+ * It is simpler here to unconditionally negate before conditional zero.
52
+ */
53
+ pair ^= neg;
54
+ if (!(pg & 1)) {
55
+ pair &= 0xffff0000u;
56
+ }
57
+ if (!(pg & 4)) {
58
+ pair &= 0x0000ffffu;
59
+ }
60
+ return pair;
61
+}
62
+
63
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
64
+ void *vpm, uint32_t desc)
65
+{
66
+ intptr_t row, col, oprsz = simd_maxsz(desc);
67
+ uint32_t neg = simd_data(desc) * 0x80008000u;
68
+ uint16_t *pn = vpn, *pm = vpm;
69
+
70
+ for (row = 0; row < oprsz; ) {
71
+ uint16_t prow = pn[H2(row >> 4)];
72
+ do {
73
+ void *vza_row = vza + tile_vslice_offset(row);
74
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
75
+
76
+ n = f16mop_adj_pair(n, prow, neg);
77
+
78
+ for (col = 0; col < oprsz; ) {
79
+ uint16_t pcol = pm[H2(col >> 4)];
80
+ do {
81
+ if (prow & pcol & 0b0101) {
82
+ uint32_t *a = vza_row + H1_4(col);
83
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
84
+
85
+ m = f16mop_adj_pair(m, pcol, 0);
86
+ *a = bfdotadd(*a, n, m);
87
+
88
+ col += 4;
89
+ pcol >>= 4;
90
+ }
91
+ } while (col & 15);
92
+ }
93
+ row += 4;
94
+ prow >>= 4;
95
+ } while (row & 15);
96
+ }
97
+}
98
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-sme.c
101
+++ b/target/arm/translate-sme.c
102
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
103
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
104
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
105
106
+static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
107
+ gen_helper_gvec_5 *fn)
108
+{
109
+ int svl = streaming_vec_reg_size(s);
110
+ uint32_t desc = simd_desc(svl, svl, a->sub);
111
+ TCGv_ptr za, zn, zm, pn, pm;
112
+
113
+ if (!sme_smza_enabled_check(s)) {
114
+ return true;
115
+ }
116
+
117
+ /* Sum XZR+zad to find ZAd. */
118
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
119
+ zn = vec_full_reg_ptr(s, a->zn);
120
+ zm = vec_full_reg_ptr(s, a->zm);
121
+ pn = pred_full_reg_ptr(s, a->pn);
122
+ pm = pred_full_reg_ptr(s, a->pm);
123
+
124
+ fn(za, zn, zm, pn, pm, tcg_constant_i32(desc));
125
+
126
+ tcg_temp_free_ptr(za);
127
+ tcg_temp_free_ptr(zn);
128
+ tcg_temp_free_ptr(pn);
129
+ tcg_temp_free_ptr(pm);
130
+ return true;
131
+}
132
+
133
static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
134
gen_helper_gvec_5_ptr *fn)
135
{
136
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
137
138
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
139
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
140
+
141
+/* TODO: FEAT_EBF16 */
142
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
143
--
144
2.25.1
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
load_dtb() depends on arm_load_kernel() to figure out place
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
in RAM where it should be loaded, but it's not required for
4
Message-id: 20220708151540.18136-27-richard.henderson@linaro.org
5
arm_load_kernel() to work. Sometimes it's neccesary for
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
devices added with -device/device_add to be enumerated in
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
DTB as well, which's lead to [1] and surrounding commits to
7
---
8
add 2 more machine_done notifiers with non obvious ordering
8
target/arm/helper-sme.h | 2 ++
9
to make dynamic sysbus devices initialization happen in
9
target/arm/sme.decode | 1 +
10
the right order.
10
target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 1 +
12
4 files changed, 78 insertions(+)
11
13
12
However instead of moving whole arm_load_kernel() in to
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
13
machine_done, it's sufficient to move only load_dtb() into
14
virt_machine_done() notifier and remove ArmLoadKernelNotifier/
15
/PlatformBusFDTNotifierParams notifiers, which saves us ~90LOC
16
and simplifies code flow quite a bit.
17
Later would allow to consolidate DTB generation within one
18
function for 'mach-virt' board and make it reentrant so it
19
could generate updated DTB in device hotplug secenarios.
20
21
While at it rename load_dtb() to arm_load_dtb() since it's
22
public now.
23
24
Add additional field skip_dtb_autoload to struct arm_boot_info
25
to allow manual DTB load later in mach-virt and to avoid touching
26
all other boards to explicitly call arm_load_dtb().
27
28
1) (ac9d32e hw/arm/boot: arm_load_kernel implemented as a machine init done notifier)
29
30
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
31
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
32
Reviewed-by: Andrew Jones <drjones@redhat.com>
33
Message-id: 1525691524-32265-4-git-send-email-imammedo@redhat.com
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
---
36
include/hw/arm/arm.h | 45 +++++++++++++++++------
37
include/hw/arm/sysbus-fdt.h | 37 ++++---------------
38
hw/arm/boot.c | 72 ++++++++++---------------------------
39
hw/arm/sysbus-fdt.c | 61 +++----------------------------
40
hw/arm/virt.c | 64 ++++++++++++++++-----------------
41
5 files changed, 94 insertions(+), 185 deletions(-)
42
43
diff --git a/include/hw/arm/arm.h b/include/hw/arm/arm.h
44
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
45
--- a/include/hw/arm/arm.h
16
--- a/target/arm/helper-sme.h
46
+++ b/include/hw/arm/arm.h
17
+++ b/target/arm/helper-sme.h
47
@@ -XXX,XX +XXX,XX @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
48
*/
19
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
49
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
20
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
50
21
51
-/*
22
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
52
- * struct used as a parameter of the arm_load_kernel machine init
23
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
53
- * done notifier
24
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
54
- */
25
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
55
-typedef struct {
26
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
56
- Notifier notifier; /* actual notifier */
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
57
- ARMCPU *cpu; /* handle to the first cpu object */
58
-} ArmLoadKernelNotifier;
59
-
60
/* arm_boot.c */
61
struct arm_boot_info {
62
uint64_t ram_size;
63
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
64
const char *initrd_filename;
65
const char *dtb_filename;
66
hwaddr loader_start;
67
+ hwaddr dtb_start;
68
+ hwaddr dtb_limit;
69
+ /* If set to True, arm_load_kernel() will not load DTB.
70
+ * It allows board to load DTB manually later.
71
+ * (default: False)
72
+ */
73
+ bool skip_dtb_autoload;
74
/* multicore boards that use the default secondary core boot functions
75
* need to put the address of the secondary boot code, the boot reg,
76
* and the GIC address in the next 3 values, respectively. boards that
77
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
78
* the user it should implement this hook.
79
*/
80
void (*modify_dtb)(const struct arm_boot_info *info, void *fdt);
81
- /* machine init done notifier executing arm_load_dtb */
82
- ArmLoadKernelNotifier load_kernel_notifier;
83
/* Used internally by arm_boot.c */
84
int is_linux;
85
hwaddr initrd_start;
86
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
87
*/
88
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info);
89
90
+AddressSpace *arm_boot_address_space(ARMCPU *cpu,
91
+ const struct arm_boot_info *info);
92
+
93
+/**
94
+ * arm_load_dtb() - load a device tree binary image into memory
95
+ * @addr: the address to load the image at
96
+ * @binfo: struct describing the boot environment
97
+ * @addr_limit: upper limit of the available memory area at @addr
98
+ * @as: address space to load image to
99
+ *
100
+ * Load a device tree supplied by the machine or by the user with the
101
+ * '-dtb' command line option, and put it at offset @addr in target
102
+ * memory.
103
+ *
104
+ * If @addr_limit contains a meaningful value (i.e., it is strictly greater
105
+ * than @addr), the device tree is only loaded if its size does not exceed
106
+ * the limit.
107
+ *
108
+ * Returns: the size of the device tree image on success,
109
+ * 0 if the image size exceeds the limit,
110
+ * -1 on errors.
111
+ *
112
+ * Note: Must not be called unless have_dtb(binfo) is true.
113
+ */
114
+int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
115
+ hwaddr addr_limit, AddressSpace *as);
116
+
117
/* Write a secure board setup routine with a dummy handler for SMCs */
118
void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu,
119
const struct arm_boot_info *info,
120
diff --git a/include/hw/arm/sysbus-fdt.h b/include/hw/arm/sysbus-fdt.h
121
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
122
--- a/include/hw/arm/sysbus-fdt.h
29
--- a/target/arm/sme.decode
123
+++ b/include/hw/arm/sysbus-fdt.h
30
+++ b/target/arm/sme.decode
124
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
125
#ifndef HW_ARM_SYSBUS_FDT_H
32
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
126
#define HW_ARM_SYSBUS_FDT_H
33
127
34
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
128
-#include "hw/arm/arm.h"
35
+FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
129
-#include "qemu-common.h"
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
130
-#include "hw/sysbus.h"
131
-
132
-/*
133
- * struct that contains dimensioning parameters of the platform bus
134
- */
135
-typedef struct {
136
- hwaddr platform_bus_base; /* start address of the bus */
137
- hwaddr platform_bus_size; /* size of the bus */
138
- int platform_bus_first_irq; /* first hwirq assigned to the bus */
139
- int platform_bus_num_irqs; /* number of hwirq assigned to the bus */
140
-} ARMPlatformBusSystemParams;
141
-
142
-/*
143
- * struct that contains all relevant info to build the fdt nodes of
144
- * platform bus and attached dynamic sysbus devices
145
- * in the future might be augmented with additional info
146
- * such as PHY, CLK handles ...
147
- */
148
-typedef struct {
149
- const ARMPlatformBusSystemParams *system_params;
150
- struct arm_boot_info *binfo;
151
- const char *intc; /* parent interrupt controller name */
152
-} ARMPlatformBusFDTParams;
153
+#include "exec/hwaddr.h"
154
155
/**
156
- * arm_register_platform_bus_fdt_creator - register a machine init done
157
- * notifier that creates the device tree nodes of the platform bus and
158
- * associated dynamic sysbus devices
159
+ * platform_bus_add_all_fdt_nodes - create all the platform bus nodes
160
+ *
161
+ * builds the parent platform bus node and all the nodes of dynamic
162
+ * sysbus devices attached to it.
163
*/
164
-void arm_register_platform_bus_fdt_creator(ARMPlatformBusFDTParams *fdt_params);
165
-
166
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
167
+ hwaddr bus_size, int irq_start);
168
#endif
169
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
170
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
171
--- a/hw/arm/boot.c
38
--- a/target/arm/sme_helper.c
172
+++ b/hw/arm/boot.c
39
+++ b/target/arm/sme_helper.c
173
@@ -XXX,XX +XXX,XX @@
40
@@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
174
#define ARM64_TEXT_OFFSET_OFFSET 8
41
return pair;
175
#define ARM64_MAGIC_OFFSET 56
176
177
-static AddressSpace *arm_boot_address_space(ARMCPU *cpu,
178
- const struct arm_boot_info *info)
179
+AddressSpace *arm_boot_address_space(ARMCPU *cpu,
180
+ const struct arm_boot_info *info)
181
{
182
/* Return the address space to use for bootloader reads and writes.
183
* We prefer the secure address space if the CPU has it and we're
184
@@ -XXX,XX +XXX,XX @@ static void fdt_add_psci_node(void *fdt)
185
qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
186
}
42
}
187
43
188
-/**
44
+static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
189
- * load_dtb() - load a device tree binary image into memory
45
+ float_status *s_std, float_status *s_odd)
190
- * @addr: the address to load the image at
46
+{
191
- * @binfo: struct describing the boot environment
47
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
192
- * @addr_limit: upper limit of the available memory area at @addr
48
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
193
- * @as: address space to load image to
49
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
194
- *
50
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
195
- * Load a device tree supplied by the machine or by the user with the
51
+ float64 t64;
196
- * '-dtb' command line option, and put it at offset @addr in target
52
+ float32 t32;
197
- * memory.
198
- *
199
- * If @addr_limit contains a meaningful value (i.e., it is strictly greater
200
- * than @addr), the device tree is only loaded if its size does not exceed
201
- * the limit.
202
- *
203
- * Returns: the size of the device tree image on success,
204
- * 0 if the image size exceeds the limit,
205
- * -1 on errors.
206
- *
207
- * Note: Must not be called unless have_dtb(binfo) is true.
208
- */
209
-static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
210
- hwaddr addr_limit, AddressSpace *as)
211
+int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
212
+ hwaddr addr_limit, AddressSpace *as)
213
{
214
void *fdt = NULL;
215
int size, rc;
216
@@ -XXX,XX +XXX,XX @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
217
return size;
218
}
219
220
-static void arm_load_kernel_notify(Notifier *notifier, void *data)
221
+void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
222
{
223
CPUState *cs;
224
int kernel_size;
225
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
226
int elf_machine;
227
hwaddr entry;
228
static const ARMInsnFixup *primary_loader;
229
- ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier,
230
- notifier, notifier);
231
- ARMCPU *cpu = n->cpu;
232
- struct arm_boot_info *info =
233
- container_of(n, struct arm_boot_info, load_kernel_notifier);
234
AddressSpace *as = arm_boot_address_space(cpu, info);
235
236
/* The board code is not supposed to set secure_board_setup unless
237
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
238
assert(!(info->secure_board_setup && kvm_enabled()));
239
240
info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
241
+ info->dtb_limit = 0;
242
243
/* Load the kernel. */
244
if (!info->kernel_filename || info->firmware_loaded) {
245
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
246
* the kernel is supposed to be loaded by the bootloader), copy the
247
* DTB to the base of RAM for the bootloader to pick up.
248
*/
249
- if (load_dtb(info->loader_start, info, 0, as) < 0) {
250
- exit(1);
251
- }
252
+ info->dtb_start = info->loader_start;
253
}
254
255
if (info->kernel_filename) {
256
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
257
*/
258
if (elf_low_addr > info->loader_start
259
|| elf_high_addr < info->loader_start) {
260
- /* Pass elf_low_addr as address limit to load_dtb if it may be
261
+ /* Set elf_low_addr as address limit for arm_load_dtb if it may be
262
* pointing into RAM, otherwise pass '0' (no limit)
263
*/
264
if (elf_low_addr < info->loader_start) {
265
elf_low_addr = 0;
266
}
267
- if (load_dtb(info->loader_start, info, elf_low_addr, as) < 0) {
268
- exit(1);
269
- }
270
+ info->dtb_start = info->loader_start;
271
+ info->dtb_limit = elf_low_addr;
272
}
273
}
274
entry = elf_entry;
275
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
276
*/
277
if (have_dtb(info)) {
278
hwaddr align;
279
- hwaddr dtb_start;
280
281
if (elf_machine == EM_AARCH64) {
282
/*
283
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
284
}
285
286
/* Place the DTB after the initrd in memory with alignment. */
287
- dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size, align);
288
- if (load_dtb(dtb_start, info, 0, as) < 0) {
289
- exit(1);
290
- }
291
- fixupcontext[FIXUP_ARGPTR] = dtb_start;
292
+ info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
293
+ align);
294
+ fixupcontext[FIXUP_ARGPTR] = info->dtb_start;
295
} else {
296
fixupcontext[FIXUP_ARGPTR] = info->loader_start + KERNEL_ARGS_ADDR;
297
if (info->ram_size >= (1ULL << 32)) {
298
@@ -XXX,XX +XXX,XX @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
299
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
300
ARM_CPU(cs)->env.boot_info = info;
301
}
302
-}
303
-
304
-void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
305
-{
306
- CPUState *cs;
307
-
308
- info->load_kernel_notifier.cpu = cpu;
309
- info->load_kernel_notifier.notifier.notify = arm_load_kernel_notify;
310
- qemu_add_machine_init_done_notifier(&info->load_kernel_notifier.notifier);
311
312
/* CPU objects (unlike devices) are not automatically reset on system
313
* reset, so we must always register a handler to do so. If we're
314
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
315
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
316
qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
317
}
318
+
319
+ if (!info->skip_dtb_autoload && have_dtb(info)) {
320
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
321
+ exit(1);
322
+ }
323
+ }
324
}
325
326
static const TypeInfo arm_linux_boot_if_info = {
327
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
328
index XXXXXXX..XXXXXXX 100644
329
--- a/hw/arm/sysbus-fdt.c
330
+++ b/hw/arm/sysbus-fdt.c
331
@@ -XXX,XX +XXX,XX @@ typedef struct PlatformBusFDTData {
332
PlatformBusDevice *pbus;
333
} PlatformBusFDTData;
334
335
-/*
336
- * struct used when calling the machine init done notifier
337
- * that constructs the fdt nodes of platform bus devices
338
- */
339
-typedef struct PlatformBusFDTNotifierParams {
340
- Notifier notifier;
341
- ARMPlatformBusFDTParams *fdt_params;
342
-} PlatformBusFDTNotifierParams;
343
-
344
/* struct that associates a device type name and a node creation function */
345
typedef struct NodeCreationPair {
346
const char *typename;
347
@@ -XXX,XX +XXX,XX @@ static void add_fdt_node(SysBusDevice *sbdev, void *opaque)
348
exit(1);
349
}
350
351
-/**
352
- * add_all_platform_bus_fdt_nodes - create all the platform bus nodes
353
- *
354
- * builds the parent platform bus node and all the nodes of dynamic
355
- * sysbus devices attached to it.
356
- */
357
-static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
358
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
359
+ hwaddr bus_size, int irq_start)
360
{
361
const char platcomp[] = "qemu,platform\0simple-bus";
362
PlatformBusDevice *pbus;
363
DeviceState *dev;
364
gchar *node;
365
- uint64_t addr, size;
366
- int irq_start, dtb_size;
367
- struct arm_boot_info *info = fdt_params->binfo;
368
- const ARMPlatformBusSystemParams *params = fdt_params->system_params;
369
- const char *intc = fdt_params->intc;
370
- void *fdt = info->get_dtb(info, &dtb_size);
371
-
372
- /*
373
- * If the user provided a dtb, we assume the dynamic sysbus nodes
374
- * already are integrated there. This corresponds to a use case where
375
- * the dynamic sysbus nodes are complex and their generation is not yet
376
- * supported. In that case the user can take charge of the guest dt
377
- * while qemu takes charge of the qom stuff.
378
- */
379
- if (info->dtb_filename) {
380
- return;
381
- }
382
383
assert(fdt);
384
385
- node = g_strdup_printf("/platform@%"PRIx64, params->platform_bus_base);
386
- addr = params->platform_bus_base;
387
- size = params->platform_bus_size;
388
- irq_start = params->platform_bus_first_irq;
389
+ node = g_strdup_printf("/platform@%"PRIx64, addr);
390
391
/* Create a /platform node that we can put all devices into */
392
qemu_fdt_add_subnode(fdt, node);
393
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
394
*/
395
qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1);
396
qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1);
397
- qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, size);
398
+ qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, bus_size);
399
400
qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc);
401
402
@@ -XXX,XX +XXX,XX @@ static void add_all_platform_bus_fdt_nodes(ARMPlatformBusFDTParams *fdt_params)
403
404
g_free(node);
405
}
406
-
407
-static void platform_bus_fdt_notify(Notifier *notifier, void *data)
408
-{
409
- PlatformBusFDTNotifierParams *p = DO_UPCAST(PlatformBusFDTNotifierParams,
410
- notifier, notifier);
411
-
412
- add_all_platform_bus_fdt_nodes(p->fdt_params);
413
- g_free(p->fdt_params);
414
- g_free(p);
415
-}
416
-
417
-void arm_register_platform_bus_fdt_creator(ARMPlatformBusFDTParams *fdt_params)
418
-{
419
- PlatformBusFDTNotifierParams *p = g_new(PlatformBusFDTNotifierParams, 1);
420
-
421
- p->fdt_params = fdt_params;
422
- p->notifier.notify = platform_bus_fdt_notify;
423
- qemu_add_machine_init_done_notifier(&p->notifier);
424
-}
425
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/hw/arm/virt.c
428
+++ b/hw/arm/virt.c
429
@@ -XXX,XX +XXX,XX @@
430
431
#define PLATFORM_BUS_NUM_IRQS 64
432
433
-static ARMPlatformBusSystemParams platform_bus_params;
434
-
435
/* RAM limit in GB. Since VIRT_MEM starts at the 1GB mark, this means
436
* RAM can go up to the 256GB mark, leaving 256GB of the physical
437
* address space unallocated and free for future use between 256G and 512G.
438
@@ -XXX,XX +XXX,XX @@ static void create_platform_bus(VirtMachineState *vms, qemu_irq *pic)
439
DeviceState *dev;
440
SysBusDevice *s;
441
int i;
442
- ARMPlatformBusFDTParams *fdt_params = g_new(ARMPlatformBusFDTParams, 1);
443
MemoryRegion *sysmem = get_system_memory();
444
445
- platform_bus_params.platform_bus_base = vms->memmap[VIRT_PLATFORM_BUS].base;
446
- platform_bus_params.platform_bus_size = vms->memmap[VIRT_PLATFORM_BUS].size;
447
- platform_bus_params.platform_bus_first_irq = vms->irqmap[VIRT_PLATFORM_BUS];
448
- platform_bus_params.platform_bus_num_irqs = PLATFORM_BUS_NUM_IRQS;
449
-
450
- fdt_params->system_params = &platform_bus_params;
451
- fdt_params->binfo = &vms->bootinfo;
452
- fdt_params->intc = "/intc";
453
- /*
454
- * register a machine init done notifier that creates the device tree
455
- * nodes of the platform bus and its children dynamic sysbus devices
456
- */
457
- arm_register_platform_bus_fdt_creator(fdt_params);
458
-
459
dev = qdev_create(NULL, TYPE_PLATFORM_BUS_DEVICE);
460
dev->id = TYPE_PLATFORM_BUS_DEVICE;
461
- qdev_prop_set_uint32(dev, "num_irqs",
462
- platform_bus_params.platform_bus_num_irqs);
463
- qdev_prop_set_uint32(dev, "mmio_size",
464
- platform_bus_params.platform_bus_size);
465
+ qdev_prop_set_uint32(dev, "num_irqs", PLATFORM_BUS_NUM_IRQS);
466
+ qdev_prop_set_uint32(dev, "mmio_size", vms->memmap[VIRT_PLATFORM_BUS].size);
467
qdev_init_nofail(dev);
468
vms->platform_bus_dev = dev;
469
- s = SYS_BUS_DEVICE(dev);
470
471
- for (i = 0; i < platform_bus_params.platform_bus_num_irqs; i++) {
472
- int irqn = platform_bus_params.platform_bus_first_irq + i;
473
+ s = SYS_BUS_DEVICE(dev);
474
+ for (i = 0; i < PLATFORM_BUS_NUM_IRQS; i++) {
475
+ int irqn = vms->irqmap[VIRT_PLATFORM_BUS] + i;
476
sysbus_connect_irq(s, i, pic[irqn]);
477
}
478
479
memory_region_add_subregion(sysmem,
480
- platform_bus_params.platform_bus_base,
481
+ vms->memmap[VIRT_PLATFORM_BUS].base,
482
sysbus_mmio_get_region(s, 0));
483
}
484
485
@@ -XXX,XX +XXX,XX @@ void virt_machine_done(Notifier *notifier, void *data)
486
{
487
VirtMachineState *vms = container_of(notifier, VirtMachineState,
488
machine_done);
489
+ ARMCPU *cpu = ARM_CPU(first_cpu);
490
+ struct arm_boot_info *info = &vms->bootinfo;
491
+ AddressSpace *as = arm_boot_address_space(cpu, info);
492
+
53
+
493
+ /*
54
+ /*
494
+ * If the user provided a dtb, we assume the dynamic sysbus nodes
55
+ * The ARM pseudocode function FPDot performs both multiplies
495
+ * already are integrated there. This corresponds to a use case where
56
+ * and the add with a single rounding operation. Emulate this
496
+ * the dynamic sysbus nodes are complex and their generation is not yet
57
+ * by performing the first multiply in round-to-odd, then doing
497
+ * supported. In that case the user can take charge of the guest dt
58
+ * the second multiply as fused multiply-add, and rounding to
498
+ * while qemu takes charge of the qom stuff.
59
+ * float32 all in one step.
499
+ */
60
+ */
500
+ if (info->dtb_filename == NULL) {
61
+ t64 = float64_mul(e1r, e2r, s_odd);
501
+ platform_bus_add_all_fdt_nodes(vms->fdt, "/intc",
62
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
502
+ vms->memmap[VIRT_PLATFORM_BUS].base,
63
+
503
+ vms->memmap[VIRT_PLATFORM_BUS].size,
64
+ /* This conversion is exact, because we've already rounded. */
504
+ vms->irqmap[VIRT_PLATFORM_BUS]);
65
+ t32 = float64_to_float32(t64, s_std);
66
+
67
+ /* The final accumulation step is not fused. */
68
+ return float32_add(sum, t32, s_std);
69
+}
70
+
71
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
72
+ void *vpm, void *vst, uint32_t desc)
73
+{
74
+ intptr_t row, col, oprsz = simd_maxsz(desc);
75
+ uint32_t neg = simd_data(desc) * 0x80008000u;
76
+ uint16_t *pn = vpn, *pm = vpm;
77
+ float_status fpst_odd, fpst_std;
78
+
79
+ /*
80
+ * Make a copy of float_status because this operation does not
81
+ * update the cumulative fp exception status. It also produces
82
+ * default nans. Make a second copy with round-to-odd -- see above.
83
+ */
84
+ fpst_std = *(float_status *)vst;
85
+ set_default_nan_mode(true, &fpst_std);
86
+ fpst_odd = fpst_std;
87
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
88
+
89
+ for (row = 0; row < oprsz; ) {
90
+ uint16_t prow = pn[H2(row >> 4)];
91
+ do {
92
+ void *vza_row = vza + tile_vslice_offset(row);
93
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
94
+
95
+ n = f16mop_adj_pair(n, prow, neg);
96
+
97
+ for (col = 0; col < oprsz; ) {
98
+ uint16_t pcol = pm[H2(col >> 4)];
99
+ do {
100
+ if (prow & pcol & 0b0101) {
101
+ uint32_t *a = vza_row + H1_4(col);
102
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
103
+
104
+ m = f16mop_adj_pair(m, pcol, 0);
105
+ *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
106
+
107
+ col += 4;
108
+ pcol >>= 4;
109
+ }
110
+ } while (col & 15);
111
+ }
112
+ row += 4;
113
+ prow >>= 4;
114
+ } while (row & 15);
505
+ }
115
+ }
506
+ if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
116
+}
507
+ exit(1);
117
+
508
+ }
118
void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
509
119
void *vpm, uint32_t desc)
510
virt_acpi_setup(vms);
120
{
511
virt_build_smbios(vms);
121
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
512
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
122
index XXXXXXX..XXXXXXX 100644
513
vms->fw_cfg = create_fw_cfg(vms, &address_space_memory);
123
--- a/target/arm/translate-sme.c
514
rom_set_fw(vms->fw_cfg);
124
+++ b/target/arm/translate-sme.c
515
125
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
516
- vms->machine_done.notify = virt_machine_done;
126
return true;
517
- qemu_add_machine_init_done_notifier(&vms->machine_done);
518
+ create_platform_bus(vms, pic);
519
520
vms->bootinfo.ram_size = machine->ram_size;
521
vms->bootinfo.kernel_filename = machine->kernel_filename;
522
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
523
vms->bootinfo.board_id = -1;
524
vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base;
525
vms->bootinfo.get_dtb = machvirt_dtb;
526
+ vms->bootinfo.skip_dtb_autoload = true;
527
vms->bootinfo.firmware_loaded = firmware_loaded;
528
arm_load_kernel(ARM_CPU(first_cpu), &vms->bootinfo);
529
530
- /*
531
- * arm_load_kernel machine init done notifier registration must
532
- * happen before the platform_bus_create call. In this latter,
533
- * another notifier is registered which adds platform bus nodes.
534
- * Notifiers are executed in registration reverse order.
535
- */
536
- create_platform_bus(vms, pic);
537
+ vms->machine_done.notify = virt_machine_done;
538
+ qemu_add_machine_init_done_notifier(&vms->machine_done);
539
}
127
}
540
128
541
static bool virt_get_secure(Object *obj, Error **errp)
129
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h)
130
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
131
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
132
542
--
133
--
543
2.17.0
134
2.25.1
544
545
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Given that this atomic operation will be used by both risc-v
3
This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16.
4
and aarch64, let's not duplicate code across the two targets.
5
4
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180508151437.4232-5-richard.henderson@linaro.org
7
Message-id: 20220708151540.18136-28-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
accel/tcg/atomic_template.h | 71 +++++++++++++++++++++++++++++++++++++
10
target/arm/helper-sme.h | 16 ++++++++
12
accel/tcg/tcg-runtime.h | 8 +++++
11
target/arm/sme.decode | 10 +++++
13
tcg/tcg-op.h | 34 ++++++++++++++++++
12
target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
14
tcg/tcg.h | 8 +++++
13
target/arm/translate-sme.c | 10 +++++
15
tcg/tcg-op.c | 8 +++++
14
4 files changed, 118 insertions(+)
16
5 files changed, 129 insertions(+)
17
15
18
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
16
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/accel/tcg/atomic_template.h
18
--- a/target/arm/helper-sme.h
21
+++ b/accel/tcg/atomic_template.h
19
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
23
#elif DATA_SIZE == 8
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
24
# define SUFFIX q
22
DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
25
# define DATA_TYPE uint64_t
23
void, ptr, ptr, ptr, ptr, ptr, i32)
26
+# define SDATA_TYPE int64_t
24
+DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
27
# define BSWAP bswap64
25
+ void, ptr, ptr, ptr, ptr, ptr, i32)
28
#elif DATA_SIZE == 4
26
+DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
29
# define SUFFIX l
27
+ void, ptr, ptr, ptr, ptr, ptr, i32)
30
# define DATA_TYPE uint32_t
28
+DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG,
31
+# define SDATA_TYPE int32_t
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
32
# define BSWAP bswap32
30
+DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG,
33
#elif DATA_SIZE == 2
31
+ void, ptr, ptr, ptr, ptr, ptr, i32)
34
# define SUFFIX w
32
+DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG,
35
# define DATA_TYPE uint16_t
33
+ void, ptr, ptr, ptr, ptr, ptr, i32)
36
+# define SDATA_TYPE int16_t
34
+DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG,
37
# define BSWAP bswap16
35
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
#elif DATA_SIZE == 1
36
+DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
39
# define SUFFIX b
37
+ void, ptr, ptr, ptr, ptr, ptr, i32)
40
# define DATA_TYPE uint8_t
38
+DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
41
+# define SDATA_TYPE int8_t
39
+ void, ptr, ptr, ptr, ptr, ptr, i32)
42
# define BSWAP
40
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
43
#else
41
index XXXXXXX..XXXXXXX 100644
44
# error unsupported data size
42
--- a/target/arm/sme.decode
45
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(or_fetch)
43
+++ b/target/arm/sme.decode
46
GEN_ATOMIC_HELPER(xor_fetch)
44
@@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
47
45
48
#undef GEN_ATOMIC_HELPER
46
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
47
FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
49
+
48
+
50
+/* These helpers are, as a whole, full barriers. Within the helper,
49
+SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
51
+ * the leading barrier is explicit and the trailing barrier is within
50
+SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
52
+ * cmpxchg primitive.
51
+USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32
53
+ */
52
+UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32
54
+#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
55
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
56
+ ABI_TYPE xval EXTRA_ARGS) \
57
+{ \
58
+ ATOMIC_MMU_DECLS; \
59
+ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
60
+ XDATA_TYPE cmp, old, new, val = xval; \
61
+ smp_mb(); \
62
+ cmp = atomic_read__nocheck(haddr); \
63
+ do { \
64
+ old = cmp; new = FN(old, val); \
65
+ cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
66
+ } while (cmp != old); \
67
+ ATOMIC_MMU_CLEANUP; \
68
+ return RET; \
69
+}
70
+
53
+
71
+GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
54
+SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
72
+GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
55
+SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
73
+GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
56
+USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
74
+GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
57
+UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
75
+
58
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
76
+GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
59
index XXXXXXX..XXXXXXX 100644
77
+GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
60
--- a/target/arm/sme_helper.c
78
+GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
61
+++ b/target/arm/sme_helper.c
79
+GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
62
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
80
+
63
} while (row & 15);
81
+#undef GEN_ATOMIC_HELPER_FN
82
#endif /* DATA SIZE >= 16 */
83
84
#undef END
85
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
86
ldo = ldn;
87
}
64
}
88
}
65
}
89
+
66
+
90
+/* These helpers are, as a whole, full barriers. Within the helper,
67
+typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool);
91
+ * the leading barrier is explicit and the trailing barrier is within
68
+
92
+ * cmpxchg primitive.
69
+static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm,
93
+ */
70
+ uint8_t *pn, uint8_t *pm,
94
+#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
71
+ uint32_t desc, IMOPFn *fn)
95
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
72
+{
96
+ ABI_TYPE xval EXTRA_ARGS) \
73
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
97
+{ \
74
+ bool neg = simd_data(desc);
98
+ ATOMIC_MMU_DECLS; \
75
+
99
+ XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
76
+ for (row = 0; row < oprsz; ++row) {
100
+ XDATA_TYPE ldo, ldn, old, new, val = xval; \
77
+ uint8_t pa = pn[H1(row)];
101
+ smp_mb(); \
78
+ uint64_t *za_row = &za[tile_vslice_index(row)];
102
+ ldn = atomic_read__nocheck(haddr); \
79
+ uint64_t n = zn[row];
103
+ do { \
80
+
104
+ ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
81
+ for (col = 0; col < oprsz; ++col) {
105
+ ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
82
+ uint8_t pb = pm[H1(col)];
106
+ } while (ldo != ldn); \
83
+ uint64_t *a = &za_row[col];
107
+ ATOMIC_MMU_CLEANUP; \
84
+
108
+ return RET; \
85
+ *a = fn(n, zm[col], *a, pa & pb, neg);
86
+ }
87
+ }
109
+}
88
+}
110
+
89
+
111
+GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
90
+#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
112
+GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
91
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
113
+GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
92
+{ \
114
+GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
93
+ uint32_t sum0 = 0, sum1 = 0; \
94
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
95
+ n &= expand_pred_b(p); \
96
+ sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
97
+ sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \
98
+ sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
99
+ sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \
100
+ sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
101
+ sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \
102
+ sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
103
+ sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \
104
+ if (neg) { \
105
+ sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \
106
+ } else { \
107
+ sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \
108
+ } \
109
+ return ((uint64_t)sum1 << 32) | sum0; \
110
+}
115
+
111
+
116
+GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
112
+#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
117
+GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
113
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
118
+GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
114
+{ \
119
+GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
115
+ uint64_t sum = 0; \
116
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
117
+ n &= expand_pred_h(p); \
118
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
119
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
120
+ sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
121
+ sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
122
+ return neg ? a - sum : a + sum; \
123
+}
120
+
124
+
121
+#undef GEN_ATOMIC_HELPER_FN
125
+DEF_IMOP_32(smopa_s, int8_t, int8_t)
122
#endif /* DATA_SIZE >= 16 */
126
+DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
123
127
+DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
124
#undef END
128
+DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
125
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
129
+
126
#undef BSWAP
130
+DEF_IMOP_64(smopa_d, int16_t, int16_t)
127
#undef ABI_TYPE
131
+DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
128
#undef DATA_TYPE
132
+DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
129
+#undef SDATA_TYPE
133
+DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
130
#undef SUFFIX
134
+
131
#undef DATA_SIZE
135
+#define DEF_IMOPH(NAME) \
132
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
136
+ void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \
137
+ void *vpm, uint32_t desc) \
138
+ { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); }
139
+
140
+DEF_IMOPH(smopa_s)
141
+DEF_IMOPH(umopa_s)
142
+DEF_IMOPH(sumopa_s)
143
+DEF_IMOPH(usmopa_s)
144
+DEF_IMOPH(smopa_d)
145
+DEF_IMOPH(umopa_d)
146
+DEF_IMOPH(sumopa_d)
147
+DEF_IMOPH(usmopa_d)
148
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
133
index XXXXXXX..XXXXXXX 100644
149
index XXXXXXX..XXXXXXX 100644
134
--- a/accel/tcg/tcg-runtime.h
150
--- a/target/arm/translate-sme.c
135
+++ b/accel/tcg/tcg-runtime.h
151
+++ b/target/arm/translate-sme.c
136
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPERS(fetch_add)
152
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f
137
GEN_ATOMIC_HELPERS(fetch_and)
153
138
GEN_ATOMIC_HELPERS(fetch_or)
154
/* TODO: FEAT_EBF16 */
139
GEN_ATOMIC_HELPERS(fetch_xor)
155
TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
140
+GEN_ATOMIC_HELPERS(fetch_smin)
141
+GEN_ATOMIC_HELPERS(fetch_umin)
142
+GEN_ATOMIC_HELPERS(fetch_smax)
143
+GEN_ATOMIC_HELPERS(fetch_umax)
144
145
GEN_ATOMIC_HELPERS(add_fetch)
146
GEN_ATOMIC_HELPERS(and_fetch)
147
GEN_ATOMIC_HELPERS(or_fetch)
148
GEN_ATOMIC_HELPERS(xor_fetch)
149
+GEN_ATOMIC_HELPERS(smin_fetch)
150
+GEN_ATOMIC_HELPERS(umin_fetch)
151
+GEN_ATOMIC_HELPERS(smax_fetch)
152
+GEN_ATOMIC_HELPERS(umax_fetch)
153
154
GEN_ATOMIC_HELPERS(xchg)
155
156
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/tcg-op.h
159
+++ b/tcg/tcg-op.h
160
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
161
162
void tcg_gen_atomic_xchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
163
void tcg_gen_atomic_xchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
164
+
156
+
165
void tcg_gen_atomic_fetch_add_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
157
+TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
166
void tcg_gen_atomic_fetch_add_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
158
+TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
167
void tcg_gen_atomic_fetch_and_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
159
+TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s)
168
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_fetch_or_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
160
+TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s)
169
void tcg_gen_atomic_fetch_or_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
170
void tcg_gen_atomic_fetch_xor_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
171
void tcg_gen_atomic_fetch_xor_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
172
+void tcg_gen_atomic_fetch_smin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
173
+void tcg_gen_atomic_fetch_smin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
174
+void tcg_gen_atomic_fetch_umin_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
175
+void tcg_gen_atomic_fetch_umin_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
176
+void tcg_gen_atomic_fetch_smax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
177
+void tcg_gen_atomic_fetch_smax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
178
+void tcg_gen_atomic_fetch_umax_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
179
+void tcg_gen_atomic_fetch_umax_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
180
+
161
+
181
void tcg_gen_atomic_add_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
162
+TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d)
182
void tcg_gen_atomic_add_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
163
+TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
183
void tcg_gen_atomic_and_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
164
+TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
184
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_or_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
165
+TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
185
void tcg_gen_atomic_or_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
186
void tcg_gen_atomic_xor_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
187
void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
188
+void tcg_gen_atomic_smin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
189
+void tcg_gen_atomic_smin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
190
+void tcg_gen_atomic_umin_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
191
+void tcg_gen_atomic_umin_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
192
+void tcg_gen_atomic_smax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
193
+void tcg_gen_atomic_smax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
194
+void tcg_gen_atomic_umax_fetch_i32(TCGv_i32, TCGv, TCGv_i32, TCGArg, TCGMemOp);
195
+void tcg_gen_atomic_umax_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
196
197
void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
198
void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
199
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
200
#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i64
201
#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i64
202
#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i64
203
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i64
204
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i64
205
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i64
206
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i64
207
#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i64
208
#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i64
209
#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i64
210
#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i64
211
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i64
212
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i64
213
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i64
214
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
215
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
216
#else
217
#define tcg_gen_movi_tl tcg_gen_movi_i32
218
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
219
#define tcg_gen_atomic_fetch_and_tl tcg_gen_atomic_fetch_and_i32
220
#define tcg_gen_atomic_fetch_or_tl tcg_gen_atomic_fetch_or_i32
221
#define tcg_gen_atomic_fetch_xor_tl tcg_gen_atomic_fetch_xor_i32
222
+#define tcg_gen_atomic_fetch_smin_tl tcg_gen_atomic_fetch_smin_i32
223
+#define tcg_gen_atomic_fetch_umin_tl tcg_gen_atomic_fetch_umin_i32
224
+#define tcg_gen_atomic_fetch_smax_tl tcg_gen_atomic_fetch_smax_i32
225
+#define tcg_gen_atomic_fetch_umax_tl tcg_gen_atomic_fetch_umax_i32
226
#define tcg_gen_atomic_add_fetch_tl tcg_gen_atomic_add_fetch_i32
227
#define tcg_gen_atomic_and_fetch_tl tcg_gen_atomic_and_fetch_i32
228
#define tcg_gen_atomic_or_fetch_tl tcg_gen_atomic_or_fetch_i32
229
#define tcg_gen_atomic_xor_fetch_tl tcg_gen_atomic_xor_fetch_i32
230
+#define tcg_gen_atomic_smin_fetch_tl tcg_gen_atomic_smin_fetch_i32
231
+#define tcg_gen_atomic_umin_fetch_tl tcg_gen_atomic_umin_fetch_i32
232
+#define tcg_gen_atomic_smax_fetch_tl tcg_gen_atomic_smax_fetch_i32
233
+#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
234
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
235
#endif
236
237
diff --git a/tcg/tcg.h b/tcg/tcg.h
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/tcg.h
240
+++ b/tcg/tcg.h
241
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_ALL(fetch_sub)
242
GEN_ATOMIC_HELPER_ALL(fetch_and)
243
GEN_ATOMIC_HELPER_ALL(fetch_or)
244
GEN_ATOMIC_HELPER_ALL(fetch_xor)
245
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
246
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
247
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
248
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
249
250
GEN_ATOMIC_HELPER_ALL(add_fetch)
251
GEN_ATOMIC_HELPER_ALL(sub_fetch)
252
GEN_ATOMIC_HELPER_ALL(and_fetch)
253
GEN_ATOMIC_HELPER_ALL(or_fetch)
254
GEN_ATOMIC_HELPER_ALL(xor_fetch)
255
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
256
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
257
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
258
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
259
260
GEN_ATOMIC_HELPER_ALL(xchg)
261
262
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/tcg-op.c
265
+++ b/tcg/tcg-op.c
266
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(fetch_add, add, 0)
267
GEN_ATOMIC_HELPER(fetch_and, and, 0)
268
GEN_ATOMIC_HELPER(fetch_or, or, 0)
269
GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
270
+GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
271
+GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
272
+GEN_ATOMIC_HELPER(fetch_smax, smax, 0)
273
+GEN_ATOMIC_HELPER(fetch_umax, umax, 0)
274
275
GEN_ATOMIC_HELPER(add_fetch, add, 1)
276
GEN_ATOMIC_HELPER(and_fetch, and, 1)
277
GEN_ATOMIC_HELPER(or_fetch, or, 1)
278
GEN_ATOMIC_HELPER(xor_fetch, xor, 1)
279
+GEN_ATOMIC_HELPER(smin_fetch, smin, 1)
280
+GEN_ATOMIC_HELPER(umin_fetch, umin, 1)
281
+GEN_ATOMIC_HELPER(smax_fetch, smax, 1)
282
+GEN_ATOMIC_HELPER(umax_fetch, umax, 1)
283
284
static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b)
285
{
286
--
166
--
287
2.17.0
167
2.25.1
288
289
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-29-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sve.decode | 20 +++++++++++++
12
target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++
13
2 files changed, 77 insertions(+)
14
15
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sve.decode
18
+++ b/target/arm/sve.decode
19
@@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
20
21
### SVE2 floating-point bfloat16 dot-product (indexed)
22
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
23
+
24
+### SVE broadcast predicate element
25
+
26
+&psel esz pd pn pm rv imm
27
+%psel_rv 16:2 !function=plus_12
28
+%psel_imm_b 22:2 19:2
29
+%psel_imm_h 22:2 20:1
30
+%psel_imm_s 22:2
31
+%psel_imm_d 23:1
32
+@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \
33
+ &psel rv=%psel_rv
34
+
35
+PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \
36
+ @psel esz=0 imm=%psel_imm_b
37
+PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \
38
+ @psel esz=1 imm=%psel_imm_h
39
+PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
40
+ @psel esz=2 imm=%psel_imm_s
41
+PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
42
+ @psel esz=3 imm=%psel_imm_d
43
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-sve.c
46
+++ b/target/arm/translate-sve.c
47
@@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
48
49
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
50
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
51
+
52
+static bool trans_PSEL(DisasContext *s, arg_psel *a)
53
+{
54
+ int vl = vec_full_reg_size(s);
55
+ int pl = pred_gvec_reg_size(s);
56
+ int elements = vl >> a->esz;
57
+ TCGv_i64 tmp, didx, dbit;
58
+ TCGv_ptr ptr;
59
+
60
+ if (!dc_isar_feature(aa64_sme, s)) {
61
+ return false;
62
+ }
63
+ if (!sve_access_check(s)) {
64
+ return true;
65
+ }
66
+
67
+ tmp = tcg_temp_new_i64();
68
+ dbit = tcg_temp_new_i64();
69
+ didx = tcg_temp_new_i64();
70
+ ptr = tcg_temp_new_ptr();
71
+
72
+ /* Compute the predicate element. */
73
+ tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
74
+ if (is_power_of_2(elements)) {
75
+ tcg_gen_andi_i64(tmp, tmp, elements - 1);
76
+ } else {
77
+ tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
78
+ }
79
+
80
+ /* Extract the predicate byte and bit indices. */
81
+ tcg_gen_shli_i64(tmp, tmp, a->esz);
82
+ tcg_gen_andi_i64(dbit, tmp, 7);
83
+ tcg_gen_shri_i64(didx, tmp, 3);
84
+ if (HOST_BIG_ENDIAN) {
85
+ tcg_gen_xori_i64(didx, didx, 7);
86
+ }
87
+
88
+ /* Load the predicate word. */
89
+ tcg_gen_trunc_i64_ptr(ptr, didx);
90
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
91
+ tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
92
+
93
+ /* Extract the predicate bit and replicate to MO_64. */
94
+ tcg_gen_shr_i64(tmp, tmp, dbit);
95
+ tcg_gen_andi_i64(tmp, tmp, 1);
96
+ tcg_gen_neg_i64(tmp, tmp);
97
+
98
+ /* Apply to either copy the source, or write zeros. */
99
+ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
100
+ pred_full_reg_offset(s, a->pn), tmp, pl, pl);
101
+
102
+ tcg_temp_free_i64(tmp);
103
+ tcg_temp_free_i64(dbit);
104
+ tcg_temp_free_i64(didx);
105
+ tcg_temp_free_ptr(ptr);
106
+ return true;
107
+}
108
--
109
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20180508151437.4232-6-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-30-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
10
---
9
accel/tcg/atomic_template.h | 49 ++++++-------------------------------
11
target/arm/helper-sve.h | 2 ++
10
1 file changed, 7 insertions(+), 42 deletions(-)
12
target/arm/sve.decode | 1 +
13
target/arm/sve_helper.c | 16 ++++++++++++++++
14
target/arm/translate-sve.c | 2 ++
15
4 files changed, 21 insertions(+)
11
16
12
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
13
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/atomic_template.h
19
--- a/target/arm/helper-sve.h
15
+++ b/accel/tcg/atomic_template.h
20
+++ b/target/arm/helper-sve.h
16
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
17
22
18
#undef GEN_ATOMIC_HELPER
23
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
19
24
20
-/* Note that for addition, we need to use a separate cmpxchg loop instead
25
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
21
- of bswaps for the reverse-host-endian helpers. */
22
-ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr,
23
- ABI_TYPE val EXTRA_ARGS)
24
-{
25
- ATOMIC_MMU_DECLS;
26
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
27
- DATA_TYPE ldo, ldn, ret, sto;
28
-
29
- ldo = atomic_read__nocheck(haddr);
30
- while (1) {
31
- ret = BSWAP(ldo);
32
- sto = BSWAP(ret + val);
33
- ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
34
- if (ldn == ldo) {
35
- ATOMIC_MMU_CLEANUP;
36
- return ret;
37
- }
38
- ldo = ldn;
39
- }
40
-}
41
-
42
-ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
43
- ABI_TYPE val EXTRA_ARGS)
44
-{
45
- ATOMIC_MMU_DECLS;
46
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
47
- DATA_TYPE ldo, ldn, ret, sto;
48
-
49
- ldo = atomic_read__nocheck(haddr);
50
- while (1) {
51
- ret = BSWAP(ldo) + val;
52
- sto = BSWAP(ret);
53
- ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
54
- if (ldn == ldo) {
55
- ATOMIC_MMU_CLEANUP;
56
- return ret;
57
- }
58
- ldo = ldn;
59
- }
60
-}
61
-
62
/* These helpers are, as a whole, full barriers. Within the helper,
63
* the leading barrier is explicit and the trailing barrier is within
64
* cmpxchg primitive.
65
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
66
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
67
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
68
69
+/* Note that for addition, we need to use a separate cmpxchg loop instead
70
+ of bswaps for the reverse-host-endian helpers. */
71
+#define ADD(X, Y) (X + Y)
72
+GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
73
+GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
74
+#undef ADD
75
+
26
+
76
#undef GEN_ATOMIC_HELPER_FN
27
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
77
#endif /* DATA_SIZE >= 16 */
28
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/sve.decode
33
+++ b/target/arm/sve.decode
34
@@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
35
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
36
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
37
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
38
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
39
40
# SVE vector splice (predicated, destructive)
41
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
42
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/sve_helper.c
45
+++ b/target/arm/sve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
47
48
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
49
50
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
51
+{
52
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
53
+ uint64_t *d = vd, *n = vn;
54
+ uint8_t *pg = vg;
55
+
56
+ for (i = 0; i < opr_sz; i += 2) {
57
+ if (pg[H1(i)] & 1) {
58
+ uint64_t n0 = n[i + 0];
59
+ uint64_t n1 = n[i + 1];
60
+ d[i + 0] = n1;
61
+ d[i + 1] = n0;
62
+ }
63
+ }
64
+}
65
+
66
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
67
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
68
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
74
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
75
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
76
77
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
78
+
79
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
80
gen_helper_sve_splice, a, a->esz)
78
81
79
--
82
--
80
2.17.0
83
2.25.1
81
82
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The instruction "ucvtf v0.4h, v04h, #2", with input 0x8000u,
3
This is an SVE instruction that operates using the SVE vector
4
overflows the intermediate float16 to infinity before we have a
4
length but that it is present only if SME is implemented.
5
chance to scale the output. Use float64 as the intermediate type
6
so that no input argument (uint32_t in this case) can overflow
7
or round before scaling. Given the declared argument, the signed
8
int32_t function has the same problem.
9
5
10
When converting from float16 to integer, using u/int32_t instead
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
of u/int16_t means that the bounding is incorrect.
12
13
Cc: qemu-stable@nongnu.org
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20180502221552.3873-4-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-31-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
10
---
19
target/arm/helper.h | 4 +--
11
target/arm/helper.h | 18 +++++++
20
target/arm/helper.c | 53 ++++++++++++++++++++++++++++++++++++--
12
target/arm/sve.decode | 5 ++
21
target/arm/translate-a64.c | 4 +--
13
target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++
22
3 files changed, 55 insertions(+), 6 deletions(-)
14
target/arm/vec_helper.c | 24 +++++++++
15
4 files changed, 149 insertions(+)
23
16
24
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
25
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/helper.h
19
--- a/target/arm/helper.h
27
+++ b/target/arm/helper.h
20
+++ b/target/arm/helper.h
28
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
29
DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
22
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
30
DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
23
void, ptr, ptr, ptr, ptr, ptr, i32)
31
DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
24
32
-DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr)
25
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
33
-DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr)
26
+ void, ptr, ptr, ptr, ptr, i32)
34
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr)
27
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
35
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr)
28
+ void, ptr, ptr, ptr, ptr, i32)
36
DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
29
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
37
DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
30
+ void, ptr, ptr, ptr, ptr, i32)
38
DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
31
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
39
diff --git a/target/arm/helper.c b/target/arm/helper.c
32
+ void, ptr, ptr, ptr, ptr, i32)
40
index XXXXXXX..XXXXXXX 100644
33
+
41
--- a/target/arm/helper.c
34
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
42
+++ b/target/arm/helper.c
35
+ void, ptr, ptr, ptr, ptr, i32)
43
@@ -XXX,XX +XXX,XX @@ VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
36
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
44
VFP_CONV_FIX(uh, s, 32, 32, uint16)
37
+ void, ptr, ptr, ptr, ptr, i32)
45
VFP_CONV_FIX(ul, s, 32, 32, uint32)
38
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
46
VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
39
+ void, ptr, ptr, ptr, ptr, i32)
47
-VFP_CONV_FIX_A64(sl, h, 16, 32, int32)
40
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
48
-VFP_CONV_FIX_A64(ul, h, 16, 32, uint32)
41
+ void, ptr, ptr, ptr, ptr, i32)
49
+
42
+
50
#undef VFP_CONV_FIX
43
#ifdef TARGET_AARCH64
51
#undef VFP_CONV_FIX_FLOAT
44
#include "helper-a64.h"
52
#undef VFP_CONV_FLOAT_FIX_ROUND
45
#include "helper-sve.h"
53
+#undef VFP_CONV_FIX_A64
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
54
+
47
index XXXXXXX..XXXXXXX 100644
55
+/* Conversion to/from f16 can overflow to infinity before/after scaling.
48
--- a/target/arm/sve.decode
56
+ * Therefore we convert to f64 (which does not round), scale,
49
+++ b/target/arm/sve.decode
57
+ * and then convert f64 to f16 (which may round).
50
@@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
58
+ */
51
@psel esz=2 imm=%psel_imm_s
59
+
52
PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
60
+static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
53
@psel esz=3 imm=%psel_imm_d
61
+{
54
+
62
+ return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst);
55
+### SVE clamp
63
+}
56
+
64
+
57
+SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
65
+float16 HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
58
+UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
66
+{
59
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
+ return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst);
60
index XXXXXXX..XXXXXXX 100644
68
+}
61
--- a/target/arm/translate-sve.c
69
+
62
+++ b/target/arm/translate-sve.c
70
+float16 HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
63
@@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
71
+{
64
tcg_temp_free_ptr(ptr);
72
+ return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
65
return true;
73
+}
66
}
74
+
67
+
75
+static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
68
+static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
76
+{
69
+{
77
+ if (unlikely(float16_is_any_nan(f))) {
70
+ tcg_gen_smax_i32(d, a, n);
78
+ float_raise(float_flag_invalid, fpst);
71
+ tcg_gen_smin_i32(d, d, m);
79
+ return 0;
72
+}
80
+ } else {
73
+
81
+ int old_exc_flags = get_float_exception_flags(fpst);
74
+static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
82
+ float64 ret;
75
+{
83
+
76
+ tcg_gen_smax_i64(d, a, n);
84
+ ret = float16_to_float64(f, true, fpst);
77
+ tcg_gen_smin_i64(d, d, m);
85
+ ret = float64_scalbn(ret, shift, fpst);
78
+}
86
+ old_exc_flags |= get_float_exception_flags(fpst)
79
+
87
+ & float_flag_input_denormal;
80
+static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
88
+ set_float_exception_flags(old_exc_flags, fpst);
81
+ TCGv_vec m, TCGv_vec a)
89
+
82
+{
90
+ return ret;
83
+ tcg_gen_smax_vec(vece, d, a, n);
91
+ }
84
+ tcg_gen_smin_vec(vece, d, d, m);
92
+}
85
+}
93
+
86
+
94
+uint32_t HELPER(vfp_toshh)(float16 x, uint32_t shift, void *fpst)
87
+static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
95
+{
88
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
96
+ return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst);
89
+{
97
+}
90
+ static const TCGOpcode vecop[] = {
98
+
91
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
99
+uint32_t HELPER(vfp_touhh)(float16 x, uint32_t shift, void *fpst)
92
+ };
100
+{
93
+ static const GVecGen4 ops[4] = {
101
+ return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
94
+ { .fniv = gen_sclamp_vec,
102
+}
95
+ .fno = gen_helper_gvec_sclamp_b,
103
96
+ .opt_opc = vecop,
104
/* Set the current fp rounding mode and return the old one.
97
+ .vece = MO_8 },
105
* The argument is a softfloat float_round_ value.
98
+ { .fniv = gen_sclamp_vec,
106
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
99
+ .fno = gen_helper_gvec_sclamp_h,
107
index XXXXXXX..XXXXXXX 100644
100
+ .opt_opc = vecop,
108
--- a/target/arm/translate-a64.c
101
+ .vece = MO_16 },
109
+++ b/target/arm/translate-a64.c
102
+ { .fni4 = gen_sclamp_i32,
110
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
103
+ .fniv = gen_sclamp_vec,
111
switch (size) {
104
+ .fno = gen_helper_gvec_sclamp_s,
112
case MO_16:
105
+ .opt_opc = vecop,
113
if (is_u) {
106
+ .vece = MO_32 },
114
- fn = gen_helper_vfp_toulh;
107
+ { .fni8 = gen_sclamp_i64,
115
+ fn = gen_helper_vfp_touhh;
108
+ .fniv = gen_sclamp_vec,
116
} else {
109
+ .fno = gen_helper_gvec_sclamp_d,
117
- fn = gen_helper_vfp_toslh;
110
+ .opt_opc = vecop,
118
+ fn = gen_helper_vfp_toshh;
111
+ .vece = MO_64,
119
}
112
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
120
break;
113
+ };
121
case MO_32:
114
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
115
+}
116
+
117
+TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
118
+
119
+static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
120
+{
121
+ tcg_gen_umax_i32(d, a, n);
122
+ tcg_gen_umin_i32(d, d, m);
123
+}
124
+
125
+static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
126
+{
127
+ tcg_gen_umax_i64(d, a, n);
128
+ tcg_gen_umin_i64(d, d, m);
129
+}
130
+
131
+static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
132
+ TCGv_vec m, TCGv_vec a)
133
+{
134
+ tcg_gen_umax_vec(vece, d, a, n);
135
+ tcg_gen_umin_vec(vece, d, d, m);
136
+}
137
+
138
+static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
139
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
140
+{
141
+ static const TCGOpcode vecop[] = {
142
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
143
+ };
144
+ static const GVecGen4 ops[4] = {
145
+ { .fniv = gen_uclamp_vec,
146
+ .fno = gen_helper_gvec_uclamp_b,
147
+ .opt_opc = vecop,
148
+ .vece = MO_8 },
149
+ { .fniv = gen_uclamp_vec,
150
+ .fno = gen_helper_gvec_uclamp_h,
151
+ .opt_opc = vecop,
152
+ .vece = MO_16 },
153
+ { .fni4 = gen_uclamp_i32,
154
+ .fniv = gen_uclamp_vec,
155
+ .fno = gen_helper_gvec_uclamp_s,
156
+ .opt_opc = vecop,
157
+ .vece = MO_32 },
158
+ { .fni8 = gen_uclamp_i64,
159
+ .fniv = gen_uclamp_vec,
160
+ .fno = gen_helper_gvec_uclamp_d,
161
+ .opt_opc = vecop,
162
+ .vece = MO_64,
163
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
164
+ };
165
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
166
+}
167
+
168
+TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
169
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/target/arm/vec_helper.c
172
+++ b/target/arm/vec_helper.c
173
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
174
}
175
clear_tail(d, opr_sz, simd_maxsz(desc));
176
}
177
+
178
+#define DO_CLAMP(NAME, TYPE) \
179
+void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
180
+{ \
181
+ intptr_t i, opr_sz = simd_oprsz(desc); \
182
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
183
+ TYPE aa = *(TYPE *)(a + i); \
184
+ TYPE nn = *(TYPE *)(n + i); \
185
+ TYPE mm = *(TYPE *)(m + i); \
186
+ TYPE dd = MIN(MAX(aa, nn), mm); \
187
+ *(TYPE *)(d + i) = dd; \
188
+ } \
189
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
190
+}
191
+
192
+DO_CLAMP(gvec_sclamp_b, int8_t)
193
+DO_CLAMP(gvec_sclamp_h, int16_t)
194
+DO_CLAMP(gvec_sclamp_s, int32_t)
195
+DO_CLAMP(gvec_sclamp_d, int64_t)
196
+
197
+DO_CLAMP(gvec_uclamp_b, uint8_t)
198
+DO_CLAMP(gvec_uclamp_h, uint16_t)
199
+DO_CLAMP(gvec_uclamp_s, uint32_t)
200
+DO_CLAMP(gvec_uclamp_d, uint64_t)
122
--
201
--
123
2.17.0
202
2.25.1
124
125
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This implements all of the v8.1-Atomics instructions except
3
We can handle both exception entry and exception return by
4
for compare-and-swap, which is decoded elsewhere.
4
hooking into aarch64_sve_change_el.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180508151437.4232-9-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-32-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/translate-a64.c | 38 ++++++++++++++++++++++++++++++++++++--
11
target/arm/helper.c | 15 +++++++++++++--
12
1 file changed, 36 insertions(+), 2 deletions(-)
12
1 file changed, 13 insertions(+), 2 deletions(-)
13
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
16
--- a/target/arm/helper.c
17
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
18
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
19
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
20
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
21
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
22
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
23
24
/* Note that the gvec expanders operate on offsets + sizes. */
25
typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
26
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
27
int rn = extract32(insn, 5, 5);
28
int o3_opc = extract32(insn, 12, 4);
29
int feature = ARM_FEATURE_V8_ATOMICS;
30
+ TCGv_i64 tcg_rn, tcg_rs;
31
+ AtomicThreeOpFn *fn;
32
33
if (is_vector) {
34
unallocated_encoding(s);
35
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
36
}
37
switch (o3_opc) {
38
case 000: /* LDADD */
39
+ fn = tcg_gen_atomic_fetch_add_i64;
40
+ break;
41
case 001: /* LDCLR */
42
+ fn = tcg_gen_atomic_fetch_and_i64;
43
+ break;
44
case 002: /* LDEOR */
45
+ fn = tcg_gen_atomic_fetch_xor_i64;
46
+ break;
47
case 003: /* LDSET */
48
+ fn = tcg_gen_atomic_fetch_or_i64;
49
+ break;
50
case 004: /* LDSMAX */
51
+ fn = tcg_gen_atomic_fetch_smax_i64;
52
+ break;
53
case 005: /* LDSMIN */
54
+ fn = tcg_gen_atomic_fetch_smin_i64;
55
+ break;
56
case 006: /* LDUMAX */
57
+ fn = tcg_gen_atomic_fetch_umax_i64;
58
+ break;
59
case 007: /* LDUMIN */
60
+ fn = tcg_gen_atomic_fetch_umin_i64;
61
+ break;
62
case 010: /* SWP */
63
+ fn = tcg_gen_atomic_xchg_i64;
64
+ break;
65
default:
66
unallocated_encoding(s);
67
return;
68
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
69
return;
19
return;
70
}
20
}
71
21
72
- (void)rs;
22
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
73
- (void)rn;
23
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
74
+ if (rn == 31) {
75
+ gen_check_sp_alignment(s);
76
+ }
77
+ tcg_rn = cpu_reg_sp(s, rn);
78
+ tcg_rs = read_cpu_reg(s, rs, true);
79
+
24
+
80
+ if (o3_opc == 1) { /* LDCLR */
25
+ /*
81
+ tcg_gen_not_i64(tcg_rs, tcg_rs);
26
+ * Both AArch64.TakeException and AArch64.ExceptionReturn
27
+ * invoke ResetSVEState when taking an exception from, or
28
+ * returning to, AArch32 state when PSTATE.SM is enabled.
29
+ */
30
+ if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
31
+ arm_reset_sve_state(env);
32
+ return;
82
+ }
33
+ }
83
+
34
+
84
+ /* The tcg atomic primitives are all full barriers. Therefore we
35
/*
85
+ * can ignore the Acquire and Release bits of this instruction.
36
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
86
+ */
37
* at ELx, or not available because the EL is in AArch32 state, then
87
+ fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
38
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
88
+ s->be_data | size | MO_ALIGN);
39
* we already have the correct register contents when encountering the
89
}
40
* vq0->vq0 transition between EL0->EL1.
90
41
*/
91
/* Load/store register (all forms) */
42
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
43
old_len = (old_a64 && !sve_exception_el(env, old_el)
44
? sve_vqm1_for_el(env, old_el) : 0);
45
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
46
new_len = (new_a64 && !sve_exception_el(env, new_el)
47
? sve_vqm1_for_el(env, new_el) : 0);
48
92
--
49
--
93
2.17.0
50
2.25.1
94
95
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Note that SME remains effectively disabled for user-only,
4
because we do not yet set CPACR_EL1.SMEN. This needs to
5
wait until the kernel ABI is implemented.
2
6
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20180508151437.4232-11-richard.henderson@linaro.org
9
Message-id: 20220708151540.18136-33-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
11
---
8
target/arm/cpu64.c | 1 +
12
docs/system/arm/emulation.rst | 4 ++++
9
1 file changed, 1 insertion(+)
13
target/arm/cpu64.c | 11 +++++++++++
14
2 files changed, 15 insertions(+)
10
15
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/arm/emulation.rst
19
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
22
- FEAT_SM3 (Advanced SIMD SM3 instructions)
23
- FEAT_SM4 (Advanced SIMD SM4 instructions)
24
+- FEAT_SME (Scalable Matrix Extension)
25
+- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode)
26
+- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
27
+- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
28
- FEAT_SPECRES (Speculation restriction instructions)
29
- FEAT_SSBS (Speculative Store Bypass Safe)
30
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
11
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
31
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
12
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/cpu64.c
33
--- a/target/arm/cpu64.c
14
+++ b/target/arm/cpu64.c
34
+++ b/target/arm/cpu64.c
15
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
35
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
16
set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
36
*/
17
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
37
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
18
set_feature(&cpu->env, ARM_FEATURE_CRC);
38
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
19
+ set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
39
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
20
set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
40
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
21
set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
41
cpu->isar.id_aa64pfr1 = t;
22
set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
42
43
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
44
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
45
cpu->isar.id_aa64dfr0 = t;
46
47
+ t = cpu->isar.id_aa64smfr0;
48
+ t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
49
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
50
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
51
+ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
52
+ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
53
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
54
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
55
+ cpu->isar.id_aa64smfr0 = t;
56
+
57
/* Replicate the same data to the 32-bit id registers. */
58
aa32_max_features(cpu);
59
23
--
60
--
24
2.17.0
61
2.25.1
25
26
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-34-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/target_cpu.h | 5 ++++-
9
1 file changed, 4 insertions(+), 1 deletion(-)
10
11
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/target_cpu.h
14
+++ b/linux-user/aarch64/target_cpu.h
15
@@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
16
17
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
18
{
19
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
20
+ /*
21
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
22
* different from AArch32 Linux, which uses TPIDRRO.
23
*/
24
env->cp15.tpidr_el[0] = newtls;
25
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
26
+ env->cp15.tpidr2_el0 = 0;
27
}
28
29
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
30
--
31
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-35-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/cpu_loop.c | 9 +++++++++
9
1 file changed, 9 insertions(+)
10
11
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/cpu_loop.c
14
+++ b/linux-user/aarch64/cpu_loop.c
15
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
16
17
switch (trapnr) {
18
case EXCP_SWI:
19
+ /*
20
+ * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
21
+ * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
22
+ */
23
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
24
+ env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
25
+ arm_rebuild_hflags(env);
26
+ arm_reset_sve_state(env);
27
+ }
28
ret = do_syscall(env,
29
env->xregs[8],
30
env->xregs[0],
31
--
32
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Make sure to zero the currently reserved fields.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-36-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/aarch64/signal.c | 9 ++++++++-
11
1 file changed, 8 insertions(+), 1 deletion(-)
12
13
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/aarch64/signal.c
16
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ struct target_extra_context {
18
struct target_sve_context {
19
struct target_aarch64_ctx head;
20
uint16_t vl;
21
- uint16_t reserved[3];
22
+ uint16_t flags;
23
+ uint16_t reserved[2];
24
/* The actual SVE data immediately follows. It is laid out
25
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
26
* the original struct pointer.
27
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
28
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
29
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
30
31
+#define TARGET_SVE_SIG_FLAG_SM 1
32
+
33
struct target_rt_sigframe {
34
struct target_siginfo info;
35
struct target_ucontext uc;
36
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
37
{
38
int i, j;
39
40
+ memset(sve, 0, sizeof(*sve));
41
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
42
__put_user(size, &sve->head.size);
43
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
44
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
45
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
46
+ }
47
48
/* Note that SVE regs are stored as a byte stream, with each byte element
49
* at a subsequent address. This corresponds to a little-endian store
50
--
51
2.25.1
diff view generated by jsdifflib
1
It is implementation defined whether a multiply-add of
1
From: Richard Henderson <richard.henderson@linaro.org>
2
(0,inf,qnan) or (inf,0,qnan) raises InvalidaOperation or
3
not, so we let the target-specific pickNaNMulAdd function
4
handle this. This means that we must do the "return the
5
default NaN in default NaN mode" check after the call,
6
not before. Correct the ordering, and restore the comment
7
from the old propagateFloat64MulAddNaN() that warned about
8
this corner case.
9
2
10
This fixes a regression from 2.11 for Arm guests where we would
3
Fold the return value setting into the goto, so each
11
incorrectly fail to set the Invalid flag for these cases.
4
point of failure need not do both.
12
5
13
Cc: qemu-stable@nongnu.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-37-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
17
Tested-by: Alex Bennée <alex.bennee@linaro.org>
18
Message-id: 20180504100547.14621-1-peter.maydell@linaro.org
19
---
10
---
20
fpu/softfloat.c | 52 ++++++++++++++++++++++++++++---------------------
11
linux-user/aarch64/signal.c | 26 +++++++++++---------------
21
1 file changed, 30 insertions(+), 22 deletions(-)
12
1 file changed, 11 insertions(+), 15 deletions(-)
22
13
23
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
24
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
25
--- a/fpu/softfloat.c
16
--- a/linux-user/aarch64/signal.c
26
+++ b/fpu/softfloat.c
17
+++ b/linux-user/aarch64/signal.c
27
@@ -XXX,XX +XXX,XX @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
18
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
28
static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
19
struct target_sve_context *sve = NULL;
29
bool inf_zero, float_status *s)
20
uint64_t extra_datap = 0;
30
{
21
bool used_extra = false;
31
+ int which;
22
- bool err = false;
23
int vq = 0, sve_size = 0;
24
25
target_restore_general_frame(env, sf);
26
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
27
switch (magic) {
28
case 0:
29
if (size != 0) {
30
- err = true;
31
- goto exit;
32
+ goto err;
33
}
34
if (used_extra) {
35
ctx = NULL;
36
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
37
38
case TARGET_FPSIMD_MAGIC:
39
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
40
- err = true;
41
- goto exit;
42
+ goto err;
43
}
44
fpsimd = (struct target_fpsimd_context *)ctx;
45
break;
46
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
47
break;
48
}
49
}
50
- err = true;
51
- goto exit;
52
+ goto err;
53
54
case TARGET_EXTRA_MAGIC:
55
if (extra || size != sizeof(struct target_extra_context)) {
56
- err = true;
57
- goto exit;
58
+ goto err;
59
}
60
__get_user(extra_datap,
61
&((struct target_extra_context *)ctx)->datap);
62
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
63
/* Unknown record -- we certainly didn't generate it.
64
* Did we in fact get out of sync?
65
*/
66
- err = true;
67
- goto exit;
68
+ goto err;
69
}
70
ctx = (void *)ctx + size;
71
}
72
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
73
if (fpsimd) {
74
target_restore_fpsimd_record(env, fpsimd);
75
} else {
76
- err = true;
77
+ goto err;
78
}
79
80
/* SVE data, if present, overwrites FPSIMD data. */
81
if (sve) {
82
target_restore_sve_record(env, sve, vq);
83
}
84
-
85
- exit:
86
unlock_user(extra, extra_datap, 0);
87
- return err;
88
+ return 0;
32
+
89
+
33
if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
90
+ err:
34
s->float_exception_flags |= float_flag_invalid;
91
+ unlock_user(extra, extra_datap, 0);
35
}
92
+ return 1;
36
37
- if (s->default_nan_mode) {
38
- a.cls = float_class_dnan;
39
- } else {
40
- switch (pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
41
- is_qnan(b.cls), is_snan(b.cls),
42
- is_qnan(c.cls), is_snan(c.cls),
43
- inf_zero, s)) {
44
- case 0:
45
- break;
46
- case 1:
47
- a = b;
48
- break;
49
- case 2:
50
- a = c;
51
- break;
52
- case 3:
53
- a.cls = float_class_dnan;
54
- return a;
55
- default:
56
- g_assert_not_reached();
57
- }
58
+ which = pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
59
+ is_qnan(b.cls), is_snan(b.cls),
60
+ is_qnan(c.cls), is_snan(c.cls),
61
+ inf_zero, s);
62
63
- a.cls = float_class_msnan;
64
+ if (s->default_nan_mode) {
65
+ /* Note that this check is after pickNaNMulAdd so that function
66
+ * has an opportunity to set the Invalid flag.
67
+ */
68
+ a.cls = float_class_dnan;
69
+ return a;
70
}
71
+
72
+ switch (which) {
73
+ case 0:
74
+ break;
75
+ case 1:
76
+ a = b;
77
+ break;
78
+ case 2:
79
+ a = c;
80
+ break;
81
+ case 3:
82
+ a.cls = float_class_dnan;
83
+ return a;
84
+ default:
85
+ g_assert_not_reached();
86
+ }
87
+ a.cls = float_class_msnan;
88
+
89
return a;
90
}
93
}
91
94
95
static abi_ulong get_sigframe(struct target_sigaction *ka,
92
--
96
--
93
2.17.0
97
2.25.1
94
95
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
In parse_user_sigframe, the kernel rejects duplicate sve records,
4
or records that are smaller than the header. We were silently
5
allowing these cases to pass, dropping the record.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-38-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
linux-user/aarch64/signal.c | 5 ++++-
13
1 file changed, 4 insertions(+), 1 deletion(-)
14
15
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/signal.c
18
+++ b/linux-user/aarch64/signal.c
19
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
20
break;
21
22
case TARGET_SVE_MAGIC:
23
+ if (sve || size < sizeof(struct target_sve_context)) {
24
+ goto err;
25
+ }
26
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
27
vq = sve_vq(env);
28
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
29
- if (!sve && size == sve_size) {
30
+ if (size == sve_size) {
31
sve = (struct target_sve_context *)ctx;
32
break;
33
}
34
--
35
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-39-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/signal.c | 3 +++
9
1 file changed, 3 insertions(+)
10
11
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/signal.c
14
+++ b/linux-user/aarch64/signal.c
15
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
16
__get_user(extra_size,
17
&((struct target_extra_context *)ctx)->size);
18
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
19
+ if (!extra) {
20
+ return 1;
21
+ }
22
break;
23
24
default:
25
--
26
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The generic expanders replace nearly identical code in the translator.
3
Move the checks out of the parsing loop and into the
4
restore function. This more closely mirrors the code
5
structure in the kernel, and is slightly clearer.
6
7
Reject rather than silently skip incorrect VL and SVE record sizes,
8
bringing our checks in to line with those the kernel does.
4
9
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180508151437.4232-3-richard.henderson@linaro.org
12
Message-id: 20220708151540.18136-40-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
14
---
10
target/arm/translate-a64.c | 46 ++++++++++++--------------------------
15
linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------
11
1 file changed, 14 insertions(+), 32 deletions(-)
16
1 file changed, 35 insertions(+), 16 deletions(-)
12
17
13
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
18
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-a64.c
20
--- a/linux-user/aarch64/signal.c
16
+++ b/target/arm/translate-a64.c
21
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
22
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
18
tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
19
break;
20
case 0x0a: /* SMAXV / UMAXV */
21
- tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
22
- tcg_res,
23
- tcg_res, tcg_elt, tcg_res, tcg_elt);
24
+ if (is_u) {
25
+ tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
26
+ } else {
27
+ tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
28
+ }
29
break;
30
case 0x1a: /* SMINV / UMINV */
31
- tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
32
- tcg_res,
33
- tcg_res, tcg_elt, tcg_res, tcg_elt);
34
- break;
35
+ if (is_u) {
36
+ tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
37
+ } else {
38
+ tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
39
+ }
40
break;
41
default:
42
g_assert_not_reached();
43
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
44
}
23
}
45
}
24
}
46
25
47
-/* Helper functions for 32 bit comparisons */
26
-static void target_restore_sve_record(CPUARMState *env,
48
-static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
27
- struct target_sve_context *sve, int vq)
49
-{
28
+static bool target_restore_sve_record(CPUARMState *env,
50
- tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
29
+ struct target_sve_context *sve,
51
-}
30
+ int size)
52
-
31
{
53
-static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
32
- int i, j;
54
-{
33
+ int i, j, vl, vq;
55
- tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
34
56
-}
35
- /* Note that SVE regs are stored as a byte stream, with each byte element
57
-
36
+ if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
58
-static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
37
+ return false;
59
-{
38
+ }
60
- tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
39
+
61
-}
40
+ __get_user(vl, &sve->vl);
62
-
41
+ vq = sve_vq(env);
63
-static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
42
+
64
-{
43
+ /* Reject mismatched VL. */
65
- tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
44
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
66
-}
45
+ return false;
67
-
46
+ }
68
/* Pairwise op subgroup of C3.6.16.
47
+
69
*
48
+ /* Accept empty record -- used to clear PSTATE.SM. */
70
* This is called directly or via the handle_3same_float for float pairwise
49
+ if (size <= sizeof(*sve)) {
71
@@ -XXX,XX +XXX,XX @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
50
+ return true;
72
static NeonGenTwoOpFn * const fns[3][2] = {
51
+ }
73
{ gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
52
+
74
{ gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
53
+ /* Reject non-empty but incomplete record. */
75
- { gen_max_s32, gen_max_u32 },
54
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
76
+ { tcg_gen_smax_i32, tcg_gen_umax_i32 },
55
+ return false;
77
};
56
+ }
78
genfn = fns[size][u];
57
+
79
break;
58
+ /*
80
@@ -XXX,XX +XXX,XX @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
59
+ * Note that SVE regs are stored as a byte stream, with each byte element
81
static NeonGenTwoOpFn * const fns[3][2] = {
60
* at a subsequent address. This corresponds to a little-endian load
82
{ gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
61
* of our 64-bit hunks.
83
{ gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
62
*/
84
- { gen_min_s32, gen_min_u32 },
63
@@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env,
85
+ { tcg_gen_smin_i32, tcg_gen_umin_i32 },
64
}
86
};
65
}
87
genfn = fns[size][u];
66
}
88
break;
67
+ return true;
89
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
68
}
90
static NeonGenTwoOpFn * const fns[3][2] = {
69
91
{ gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
70
static int target_restore_sigframe(CPUARMState *env,
92
{ gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
71
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
93
- { gen_max_s32, gen_max_u32 },
72
struct target_sve_context *sve = NULL;
94
+ { tcg_gen_smax_i32, tcg_gen_umax_i32 },
73
uint64_t extra_datap = 0;
95
};
74
bool used_extra = false;
96
genfn = fns[size][u];
75
- int vq = 0, sve_size = 0;
97
break;
76
+ int sve_size = 0;
98
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
77
99
static NeonGenTwoOpFn * const fns[3][2] = {
78
target_restore_general_frame(env, sf);
100
{ gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
79
101
{ gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
80
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
102
- { gen_min_s32, gen_min_u32 },
81
if (sve || size < sizeof(struct target_sve_context)) {
103
+ { tcg_gen_smin_i32, tcg_gen_umin_i32 },
82
goto err;
104
};
83
}
105
genfn = fns[size][u];
84
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
106
break;
85
- vq = sve_vq(env);
86
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
87
- if (size == sve_size) {
88
- sve = (struct target_sve_context *)ctx;
89
- break;
90
- }
91
- }
92
- goto err;
93
+ sve = (struct target_sve_context *)ctx;
94
+ sve_size = size;
95
+ break;
96
97
case TARGET_EXTRA_MAGIC:
98
if (extra || size != sizeof(struct target_extra_context)) {
99
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
100
}
101
102
/* SVE data, if present, overwrites FPSIMD data. */
103
- if (sve) {
104
- target_restore_sve_record(env, sve, vq);
105
+ if (sve && !target_restore_sve_record(env, sve, sve_size)) {
106
+ goto err;
107
}
108
unlock_user(extra, extra_datap, 0);
109
return 0;
107
--
110
--
108
2.17.0
111
2.25.1
109
110
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The generic expanders replace nearly identical code in the translator.
3
Set the SM bit in the SVE record on signal delivery, create the ZA record.
4
Restore SM and ZA state according to the records present on return.
4
5
5
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180508151437.4232-4-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-41-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
---
10
target/xtensa/translate.c | 50 ++++++++++++++++++++++++++-------------
11
linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++---
11
1 file changed, 33 insertions(+), 17 deletions(-)
12
1 file changed, 154 insertions(+), 13 deletions(-)
12
13
13
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/target/xtensa/translate.c
16
--- a/linux-user/aarch64/signal.c
16
+++ b/target/xtensa/translate.c
17
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ static void translate_clamps(DisasContext *dc, const uint32_t arg[],
18
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
18
TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2]);
19
19
TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2]) - 1);
20
#define TARGET_SVE_SIG_FLAG_SM 1
20
21
21
- tcg_gen_movcond_i32(TCG_COND_GT, tmp1,
22
+#define TARGET_ZA_MAGIC 0x54366345
22
- cpu_R[arg[1]], tmp1, cpu_R[arg[1]], tmp1);
23
+
23
- tcg_gen_movcond_i32(TCG_COND_LT, cpu_R[arg[0]],
24
+struct target_za_context {
24
- tmp1, tmp2, tmp1, tmp2);
25
+ struct target_aarch64_ctx head;
25
+ tcg_gen_smax_i32(tmp1, tmp1, cpu_R[arg[1]]);
26
+ uint16_t vl;
26
+ tcg_gen_smin_i32(cpu_R[arg[0]], tmp1, tmp2);
27
+ uint16_t reserved[3];
27
tcg_temp_free(tmp1);
28
+ /* The actual ZA data immediately follows. */
28
tcg_temp_free(tmp2);
29
+};
29
}
30
+
30
@@ -XXX,XX +XXX,XX @@ static void translate_memw(DisasContext *dc, const uint32_t arg[],
31
+#define TARGET_ZA_SIG_REGS_OFFSET \
31
tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
32
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
33
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
34
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
35
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
36
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
37
+
38
struct target_rt_sigframe {
39
struct target_siginfo info;
40
struct target_ucontext uc;
41
@@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
32
}
42
}
33
43
34
-static void translate_minmax(DisasContext *dc, const uint32_t arg[],
44
static void target_setup_sve_record(struct target_sve_context *sve,
35
- const uint32_t par[])
45
- CPUARMState *env, int vq, int size)
36
+static void translate_smin(DisasContext *dc, const uint32_t arg[],
46
+ CPUARMState *env, int size)
37
+ const uint32_t par[])
47
{
38
{
48
- int i, j;
39
if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
49
+ int i, j, vq = sve_vq(env);
40
- tcg_gen_movcond_i32(par[0], cpu_R[arg[0]],
50
41
- cpu_R[arg[1]], cpu_R[arg[2]],
51
memset(sve, 0, sizeof(*sve));
42
- cpu_R[arg[1]], cpu_R[arg[2]]);
52
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
43
+ tcg_gen_smin_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
53
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
54
}
55
}
56
57
+static void target_setup_za_record(struct target_za_context *za,
58
+ CPUARMState *env, int size)
59
+{
60
+ int vq = sme_vq(env);
61
+ int vl = vq * TARGET_SVE_VQ_BYTES;
62
+ int i, j;
63
+
64
+ memset(za, 0, sizeof(*za));
65
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
66
+ __put_user(size, &za->head.size);
67
+ __put_user(vl, &za->vl);
68
+
69
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
70
+ return;
71
+ }
72
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
73
+
74
+ /*
75
+ * Note that ZA vectors are stored as a byte stream,
76
+ * with each byte element at a subsequent address.
77
+ */
78
+ for (i = 0; i < vl; ++i) {
79
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
80
+ for (j = 0; j < vq * 2; ++j) {
81
+ __put_user_e(env->zarray[i].d[j], z + j, le);
82
+ }
44
+ }
83
+ }
45
+}
84
+}
46
+
85
+
47
+static void translate_umin(DisasContext *dc, const uint32_t arg[],
86
static void target_restore_general_frame(CPUARMState *env,
48
+ const uint32_t par[])
87
struct target_rt_sigframe *sf)
88
{
89
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
90
91
static bool target_restore_sve_record(CPUARMState *env,
92
struct target_sve_context *sve,
93
- int size)
94
+ int size, int *svcr)
95
{
96
- int i, j, vl, vq;
97
+ int i, j, vl, vq, flags;
98
+ bool sm;
99
100
- if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
101
+ __get_user(vl, &sve->vl);
102
+ __get_user(flags, &sve->flags);
103
+
104
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
105
+
106
+ /* The cpu must support Streaming or Non-streaming SVE. */
107
+ if (sm
108
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
109
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
110
return false;
111
}
112
113
- __get_user(vl, &sve->vl);
114
- vq = sve_vq(env);
115
+ /*
116
+ * Note that we cannot use sve_vq() because that depends on the
117
+ * current setting of PSTATE.SM, not the state to be restored.
118
+ */
119
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
120
121
/* Reject mismatched VL. */
122
if (vl != vq * TARGET_SVE_VQ_BYTES) {
123
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
124
return false;
125
}
126
127
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
128
+
129
/*
130
* Note that SVE regs are stored as a byte stream, with each byte element
131
* at a subsequent address. This corresponds to a little-endian load
132
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
133
return true;
134
}
135
136
+static bool target_restore_za_record(CPUARMState *env,
137
+ struct target_za_context *za,
138
+ int size, int *svcr)
49
+{
139
+{
50
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
140
+ int i, j, vl, vq;
51
+ tcg_gen_umin_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
141
+
52
+ }
142
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
143
+ return false;
144
+ }
145
+
146
+ __get_user(vl, &za->vl);
147
+ vq = sme_vq(env);
148
+
149
+ /* Reject mismatched VL. */
150
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
151
+ return false;
152
+ }
153
+
154
+ /* Accept empty record -- used to clear PSTATE.ZA. */
155
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
156
+ return true;
157
+ }
158
+
159
+ /* Reject non-empty but incomplete record. */
160
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
161
+ return false;
162
+ }
163
+
164
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
165
+
166
+ for (i = 0; i < vl; ++i) {
167
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
168
+ for (j = 0; j < vq * 2; ++j) {
169
+ __get_user_e(env->zarray[i].d[j], z + j, le);
170
+ }
171
+ }
172
+ return true;
53
+}
173
+}
54
+
174
+
55
+static void translate_smax(DisasContext *dc, const uint32_t arg[],
175
static int target_restore_sigframe(CPUARMState *env,
56
+ const uint32_t par[])
176
struct target_rt_sigframe *sf)
57
+{
177
{
58
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
178
struct target_aarch64_ctx *ctx, *extra = NULL;
59
+ tcg_gen_smax_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
179
struct target_fpsimd_context *fpsimd = NULL;
60
+ }
180
struct target_sve_context *sve = NULL;
61
+}
181
+ struct target_za_context *za = NULL;
62
+
182
uint64_t extra_datap = 0;
63
+static void translate_umax(DisasContext *dc, const uint32_t arg[],
183
bool used_extra = false;
64
+ const uint32_t par[])
184
int sve_size = 0;
65
+{
185
+ int za_size = 0;
66
+ if (gen_window_check3(dc, arg[0], arg[1], arg[2])) {
186
+ int svcr = 0;
67
+ tcg_gen_umax_i32(cpu_R[arg[0]], cpu_R[arg[1]], cpu_R[arg[2]]);
187
68
}
188
target_restore_general_frame(env, sf);
69
}
189
70
190
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
71
@@ -XXX,XX +XXX,XX @@ static const XtensaOpcodeOps core_ops[] = {
191
sve_size = size;
72
.par = (const uint32_t[]){TCG_COND_NE},
192
break;
73
}, {
193
74
.name = "max",
194
+ case TARGET_ZA_MAGIC:
75
- .translate = translate_minmax,
195
+ if (za || size < sizeof(struct target_za_context)) {
76
- .par = (const uint32_t[]){TCG_COND_GE},
196
+ goto err;
77
+ .translate = translate_smax,
197
+ }
78
}, {
198
+ za = (struct target_za_context *)ctx;
79
.name = "maxu",
199
+ za_size = size;
80
- .translate = translate_minmax,
200
+ break;
81
- .par = (const uint32_t[]){TCG_COND_GEU},
201
+
82
+ .translate = translate_umax,
202
case TARGET_EXTRA_MAGIC:
83
}, {
203
if (extra || size != sizeof(struct target_extra_context)) {
84
.name = "memw",
204
goto err;
85
.translate = translate_memw,
205
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
86
}, {
206
}
87
.name = "min",
207
88
- .translate = translate_minmax,
208
/* SVE data, if present, overwrites FPSIMD data. */
89
- .par = (const uint32_t[]){TCG_COND_LT},
209
- if (sve && !target_restore_sve_record(env, sve, sve_size)) {
90
+ .translate = translate_smin,
210
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
91
}, {
211
goto err;
92
.name = "minu",
212
}
93
- .translate = translate_minmax,
213
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
94
- .par = (const uint32_t[]){TCG_COND_LTU},
214
+ goto err;
95
+ .translate = translate_umin,
215
+ }
96
}, {
216
+ if (env->svcr != svcr) {
97
.name = "mov",
217
+ env->svcr = svcr;
98
.translate = translate_mov,
218
+ arm_rebuild_hflags(env);
219
+ }
220
unlock_user(extra, extra_datap, 0);
221
return 0;
222
223
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
224
.total_size = offsetof(struct target_rt_sigframe,
225
uc.tuc_mcontext.__reserved),
226
};
227
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
228
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
229
+ int sve_size = 0, za_size = 0;
230
struct target_rt_sigframe *frame;
231
struct target_rt_frame_record *fr;
232
abi_ulong frame_addr, return_addr;
233
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
234
&layout);
235
236
/* SVE state needs saving only if it exists. */
237
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
238
- vq = sve_vq(env);
239
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
240
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
241
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
242
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
243
sve_ofs = alloc_sigframe_space(sve_size, &layout);
244
}
245
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
246
+ /* ZA state needs saving only if it is enabled. */
247
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
248
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
249
+ } else {
250
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
251
+ }
252
+ za_ofs = alloc_sigframe_space(za_size, &layout);
253
+ }
254
255
if (layout.extra_ofs) {
256
/* Reserve space for the extra end marker. The standard end marker
257
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
258
target_setup_end_record((void *)frame + layout.extra_end_ofs);
259
}
260
if (sve_ofs) {
261
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
262
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
263
+ }
264
+ if (za_ofs) {
265
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
266
}
267
268
/* Set up the stack frame for unwinding. */
269
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
270
env->btype = 2;
271
}
272
273
+ /*
274
+ * Invoke the signal handler with both SM and ZA disabled.
275
+ * When clearing SM, ResetSVEState, per SMSTOP.
276
+ */
277
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
278
+ arm_reset_sve_state(env);
279
+ }
280
+ if (env->svcr) {
281
+ env->svcr = 0;
282
+ arm_rebuild_hflags(env);
283
+ }
284
+
285
if (info) {
286
tswap_siginfo(&frame->info, info);
287
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
99
--
288
--
100
2.17.0
289
2.25.1
101
102
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Add "sve" to the sve prctl functions, to distinguish
4
them from the coming "sme" prctls with similar names.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-42-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 8 ++++----
12
linux-user/syscall.c | 12 ++++++------
13
2 files changed, 10 insertions(+), 10 deletions(-)
14
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/target_prctl.h
18
+++ b/linux-user/aarch64/target_prctl.h
19
@@ -XXX,XX +XXX,XX @@
20
#ifndef AARCH64_TARGET_PRCTL_H
21
#define AARCH64_TARGET_PRCTL_H
22
23
-static abi_long do_prctl_get_vl(CPUArchState *env)
24
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
25
{
26
ARMCPU *cpu = env_archcpu(env);
27
if (cpu_isar_feature(aa64_sve, cpu)) {
28
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env)
29
}
30
return -TARGET_EINVAL;
31
}
32
-#define do_prctl_get_vl do_prctl_get_vl
33
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
34
35
-static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
36
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
37
{
38
/*
39
* We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
40
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
41
}
42
return -TARGET_EINVAL;
43
}
44
-#define do_prctl_set_vl do_prctl_set_vl
45
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
46
47
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
48
{
49
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/linux-user/syscall.c
52
+++ b/linux-user/syscall.c
53
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
54
#ifndef do_prctl_set_fp_mode
55
#define do_prctl_set_fp_mode do_prctl_inval1
56
#endif
57
-#ifndef do_prctl_get_vl
58
-#define do_prctl_get_vl do_prctl_inval0
59
+#ifndef do_prctl_sve_get_vl
60
+#define do_prctl_sve_get_vl do_prctl_inval0
61
#endif
62
-#ifndef do_prctl_set_vl
63
-#define do_prctl_set_vl do_prctl_inval1
64
+#ifndef do_prctl_sve_set_vl
65
+#define do_prctl_sve_set_vl do_prctl_inval1
66
#endif
67
#ifndef do_prctl_reset_keys
68
#define do_prctl_reset_keys do_prctl_inval1
69
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
70
case PR_SET_FP_MODE:
71
return do_prctl_set_fp_mode(env, arg2);
72
case PR_SVE_GET_VL:
73
- return do_prctl_get_vl(env);
74
+ return do_prctl_sve_get_vl(env);
75
case PR_SVE_SET_VL:
76
- return do_prctl_set_vl(env, arg2);
77
+ return do_prctl_sve_set_vl(env, arg2);
78
case PR_PAC_RESET_KEYS:
79
if (arg3 || arg4 || arg5) {
80
return -TARGET_EINVAL;
81
--
82
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Michael Clark <mjc@sifive.com>
3
These prctl set the Streaming SVE vector length, which may
4
be completely different from the Normal SVE vector length.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20180508151437.4232-7-richard.henderson@linaro.org
8
Message-id: 20220708151540.18136-43-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
10
---
8
target/riscv/translate.c | 72 +++++++++++-----------------------------
11
linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++
9
1 file changed, 20 insertions(+), 52 deletions(-)
12
linux-user/syscall.c | 16 +++++++++
13
2 files changed, 70 insertions(+)
10
14
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/translate.c
17
--- a/linux-user/aarch64/target_prctl.h
14
+++ b/target/riscv/translate.c
18
+++ b/linux-user/aarch64/target_prctl.h
15
@@ -XXX,XX +XXX,XX @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
19
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env)
16
TCGv src1, src2, dat;
20
{
17
TCGLabel *l1, *l2;
21
ARMCPU *cpu = env_archcpu(env);
18
TCGMemOp mop;
22
if (cpu_isar_feature(aa64_sve, cpu)) {
19
- TCGCond cond;
23
+ /* PSTATE.SM is always unset on syscall entry. */
20
bool aq, rl;
24
return sve_vq(env) * 16;
21
25
}
22
/* Extract the size of the atomic operation. */
26
return -TARGET_EINVAL;
23
@@ -XXX,XX +XXX,XX @@ static void gen_atomic(DisasContext *ctx, uint32_t opc,
27
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
24
tcg_gen_atomic_fetch_or_tl(src2, src1, src2, ctx->mem_idx, mop);
28
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
25
gen_set_gpr(rd, src2);
29
uint32_t vq, old_vq;
26
break;
30
27
-
31
+ /* PSTATE.SM is always unset on syscall entry. */
28
case OPC_RISC_AMOMIN:
32
old_vq = sve_vq(env);
29
- cond = TCG_COND_LT;
33
30
- goto do_minmax;
34
/*
31
- case OPC_RISC_AMOMAX:
35
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
32
- cond = TCG_COND_GT;
36
}
33
- goto do_minmax;
37
#define do_prctl_sve_set_vl do_prctl_sve_set_vl
34
- case OPC_RISC_AMOMINU:
38
35
- cond = TCG_COND_LTU;
39
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
36
- goto do_minmax;
40
+{
37
- case OPC_RISC_AMOMAXU:
41
+ ARMCPU *cpu = env_archcpu(env);
38
- cond = TCG_COND_GTU;
42
+ if (cpu_isar_feature(aa64_sme, cpu)) {
39
- goto do_minmax;
43
+ return sme_vq(env) * 16;
40
- do_minmax:
44
+ }
41
- /* Handle the RL barrier. The AQ barrier is handled along the
45
+ return -TARGET_EINVAL;
42
- parallel path by the SC atomic cmpxchg. On the serial path,
46
+}
43
- of course, barriers do not matter. */
47
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
44
- if (rl) {
48
+
45
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
49
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
46
- }
50
+{
47
- if (tb_cflags(ctx->tb) & CF_PARALLEL) {
51
+ /*
48
- l1 = gen_new_label();
52
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
49
- gen_set_label(l1);
53
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
50
- } else {
54
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
51
- l1 = NULL;
55
+ */
52
- }
56
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
53
-
57
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
54
gen_get_gpr(src1, rs1);
58
+ int vq, old_vq;
55
gen_get_gpr(src2, rs2);
59
+
56
- if ((mop & MO_SSIZE) == MO_SL) {
60
+ old_vq = sme_vq(env);
57
- /* Sign-extend the register comparison input. */
61
+
58
- tcg_gen_ext32s_tl(src2, src2);
62
+ /*
59
- }
63
+ * Bound the value of vq, so that we know that it fits into
60
- dat = tcg_temp_local_new();
64
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
61
- tcg_gen_qemu_ld_tl(dat, src1, ctx->mem_idx, mop);
65
+ * on syscall entry, we are not modifying the current SVE
62
- tcg_gen_movcond_tl(cond, src2, dat, src2, dat, src2);
66
+ * vector length.
63
-
67
+ */
64
- if (tb_cflags(ctx->tb) & CF_PARALLEL) {
68
+ vq = MAX(arg2 / 16, 1);
65
- /* Parallel context. Make this operation atomic by verifying
69
+ vq = MIN(vq, 16);
66
- that the memory didn't change while we computed the result. */
70
+ env->vfp.smcr_el[1] =
67
- tcg_gen_atomic_cmpxchg_tl(src2, src1, dat, src2, ctx->mem_idx, mop);
71
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
68
-
72
+
69
- /* If the cmpxchg failed, retry. */
73
+ /* Delay rebuilding hflags until we know if ZA must change. */
70
- /* ??? There is an assumption here that this will eventually
74
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
71
- succeed, such that we don't live-lock. This is not unlike
75
+
72
- a similar loop that the compiler would generate for e.g.
76
+ if (vq != old_vq) {
73
- __atomic_fetch_and_xor, so don't worry about it. */
77
+ /*
74
- tcg_gen_brcond_tl(TCG_COND_NE, dat, src2, l1);
78
+ * PSTATE.ZA state is cleared on any change to SVL.
75
- } else {
79
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
76
- /* Serial context. Directly store the result. */
80
+ * cleared on syscall entry, so this hasn't changed VL.
77
- tcg_gen_qemu_st_tl(src2, src1, ctx->mem_idx, mop);
81
+ */
78
- }
82
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
79
- gen_set_gpr(rd, dat);
83
+ arm_rebuild_hflags(env);
80
- tcg_temp_free(dat);
84
+ }
81
+ tcg_gen_atomic_fetch_smin_tl(src2, src1, src2, ctx->mem_idx, mop);
85
+ return vq * 16;
82
+ gen_set_gpr(rd, src2);
86
+ }
83
+ break;
87
+ return -TARGET_EINVAL;
84
+ case OPC_RISC_AMOMAX:
88
+}
85
+ gen_get_gpr(src1, rs1);
89
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
86
+ gen_get_gpr(src2, rs2);
90
+
87
+ tcg_gen_atomic_fetch_smax_tl(src2, src1, src2, ctx->mem_idx, mop);
91
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
88
+ gen_set_gpr(rd, src2);
92
{
89
+ break;
93
ARMCPU *cpu = env_archcpu(env);
90
+ case OPC_RISC_AMOMINU:
94
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
91
+ gen_get_gpr(src1, rs1);
95
index XXXXXXX..XXXXXXX 100644
92
+ gen_get_gpr(src2, rs2);
96
--- a/linux-user/syscall.c
93
+ tcg_gen_atomic_fetch_umin_tl(src2, src1, src2, ctx->mem_idx, mop);
97
+++ b/linux-user/syscall.c
94
+ gen_set_gpr(rd, src2);
98
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
95
+ break;
99
#ifndef PR_SET_SYSCALL_USER_DISPATCH
96
+ case OPC_RISC_AMOMAXU:
100
# define PR_SET_SYSCALL_USER_DISPATCH 59
97
+ gen_get_gpr(src1, rs1);
101
#endif
98
+ gen_get_gpr(src2, rs2);
102
+#ifndef PR_SME_SET_VL
99
+ tcg_gen_atomic_fetch_umax_tl(src2, src1, src2, ctx->mem_idx, mop);
103
+# define PR_SME_SET_VL 63
100
+ gen_set_gpr(rd, src2);
104
+# define PR_SME_GET_VL 64
101
break;
105
+# define PR_SME_VL_LEN_MASK 0xffff
102
106
+# define PR_SME_VL_INHERIT (1 << 17)
103
default:
107
+#endif
108
109
#include "target_prctl.h"
110
111
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
112
#ifndef do_prctl_set_unalign
113
#define do_prctl_set_unalign do_prctl_inval1
114
#endif
115
+#ifndef do_prctl_sme_get_vl
116
+#define do_prctl_sme_get_vl do_prctl_inval0
117
+#endif
118
+#ifndef do_prctl_sme_set_vl
119
+#define do_prctl_sme_set_vl do_prctl_inval1
120
+#endif
121
122
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
123
abi_long arg3, abi_long arg4, abi_long arg5)
124
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
125
return do_prctl_sve_get_vl(env);
126
case PR_SVE_SET_VL:
127
return do_prctl_sve_set_vl(env, arg2);
128
+ case PR_SME_GET_VL:
129
+ return do_prctl_sme_get_vl(env);
130
+ case PR_SME_SET_VL:
131
+ return do_prctl_sme_set_vl(env, arg2);
132
case PR_PAC_RESET_KEYS:
133
if (arg3 || arg4 || arg5) {
134
return -TARGET_EINVAL;
104
--
135
--
105
2.17.0
136
2.25.1
106
107
diff view generated by jsdifflib
1
Some versions of gcc produce a spurious warning if the result of
1
From: Richard Henderson <richard.henderson@linaro.org>
2
__atomic_compare_echange_n() is not used and the type involved
3
is a signed 8 bit value:
4
error: value computed is not used [-Werror=unused-value]
5
This has been seen on at least
6
gcc (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609
7
2
8
Work around this by using an explicit cast to void to indicate
3
There's no reason to set CPACR_EL1.ZEN if SVE disabled.
9
that we don't care about the return value.
10
4
11
We don't currently use our atomic_cmpxchg() macro on any signed
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
8 bit types, but the upcoming support for the Arm v8.1-Atomics
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
will require it.
7
Message-id: 20220708151540.18136-44-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/cpu.c | 7 +++----
11
1 file changed, 3 insertions(+), 4 deletions(-)
14
12
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
18
---
19
include/qemu/atomic.h | 2 +-
20
1 file changed, 1 insertion(+), 1 deletion(-)
21
22
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
23
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/atomic.h
15
--- a/target/arm/cpu.c
25
+++ b/include/qemu/atomic.h
16
+++ b/target/arm/cpu.c
26
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
27
/* Returns the eventual value, failed or not */
18
/* and to the FP/Neon instructions */
28
#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
19
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
29
typeof_strip_qual(*ptr) _old = (old); \
20
CPACR_EL1, FPEN, 3);
30
- __atomic_compare_exchange_n(ptr, &_old, new, false, \
21
- /* and to the SVE instructions */
31
+ (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
22
- env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
32
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
23
- CPACR_EL1, ZEN, 3);
33
_old; \
24
- /* with reasonable vector length */
34
})
25
+ /* and to the SVE instructions, with default vector length */
26
if (cpu_isar_feature(aa64_sve, cpu)) {
27
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
28
+ CPACR_EL1, ZEN, 3);
29
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
30
}
31
/*
35
--
32
--
36
2.17.0
33
2.25.1
37
38
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Suggested-by: Eduardo Habkost <ehabkost@redhat.com>
3
Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu.
4
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Message-id: 1525691524-32265-5-git-send-email-imammedo@redhat.com
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-45-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
---
9
hw/arm/virt.c | 1 +
10
target/arm/cpu.c | 11 +++++++++++
10
hw/i386/pc.c | 1 +
11
1 file changed, 11 insertions(+)
11
hw/ppc/e500plat.c | 1 +
12
hw/ppc/spapr.c | 1 +
13
hw/s390x/s390-virtio-ccw.c | 1 +
14
5 files changed, 5 insertions(+)
15
12
16
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/virt.c
15
--- a/target/arm/cpu.c
19
+++ b/hw/arm/virt.c
16
+++ b/target/arm/cpu.c
20
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
21
mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
18
CPACR_EL1, ZEN, 3);
22
mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15");
19
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
23
mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
20
}
24
+ assert(!mc->get_hotplug_handler);
21
+ /* and for SME instructions, with default vector length, and TPIDR2 */
25
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;
22
+ if (cpu_isar_feature(aa64_sme, cpu)) {
26
hc->plug = virt_machine_device_plug_cb;
23
+ env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
27
}
24
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
28
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
25
+ CPACR_EL1, SMEN, 3);
29
index XXXXXXX..XXXXXXX 100644
26
+ env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
30
--- a/hw/i386/pc.c
27
+ if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
31
+++ b/hw/i386/pc.c
28
+ env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
32
@@ -XXX,XX +XXX,XX @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
29
+ SMCR, FA64, 1);
33
pcmc->acpi_data_size = 0x20000 + 0x8000;
30
+ }
34
pcmc->save_tsc_khz = true;
31
+ }
35
pcmc->linuxboot_dma_enabled = true;
32
/*
36
+ assert(!mc->get_hotplug_handler);
33
* Enable 48-bit address space (TODO: take reserved_va into account).
37
mc->get_hotplug_handler = pc_get_hotpug_handler;
34
* Enable TBI0 but not TBI1.
38
mc->cpu_index_to_instance_props = pc_cpu_index_to_props;
39
mc->get_default_cpu_node_id = pc_get_default_cpu_node_id;
40
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/ppc/e500plat.c
43
+++ b/hw/ppc/e500plat.c
44
@@ -XXX,XX +XXX,XX @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data)
45
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
46
MachineClass *mc = MACHINE_CLASS(oc);
47
48
+ assert(!mc->get_hotplug_handler);
49
mc->get_hotplug_handler = e500plat_machine_get_hotpug_handler;
50
hc->plug = e500plat_machine_device_plug_cb;
51
52
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/ppc/spapr.c
55
+++ b/hw/ppc/spapr.c
56
@@ -XXX,XX +XXX,XX @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
57
mc->kvm_type = spapr_kvm_type;
58
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE);
59
mc->pci_allow_0_address = true;
60
+ assert(!mc->get_hotplug_handler);
61
mc->get_hotplug_handler = spapr_get_hotplug_handler;
62
hc->pre_plug = spapr_machine_device_pre_plug;
63
hc->plug = spapr_machine_device_plug;
64
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/s390x/s390-virtio-ccw.c
67
+++ b/hw/s390x/s390-virtio-ccw.c
68
@@ -XXX,XX +XXX,XX @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
69
mc->no_sdcard = 1;
70
mc->max_cpus = S390_MAX_CPUS;
71
mc->has_hotpluggable_cpus = true;
72
+ assert(!mc->get_hotplug_handler);
73
mc->get_hotplug_handler = s390_get_hotplug_handler;
74
mc->cpu_index_to_instance_props = s390_cpu_index_to_props;
75
mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids;
76
--
35
--
77
2.17.0
36
2.25.1
78
79
diff view generated by jsdifflib
1
Coverity (CID1390573) spots that we forgot to free the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
gpioname strings in a loop in the iotkit realize function.
3
Correct the error.
4
2
5
This isn't a significant leak, because this function
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
only ever runs once.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-46-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/elfload.c | 20 ++++++++++++++++++++
9
1 file changed, 20 insertions(+)
7
10
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Peter Xu <peterx@redhat.com>
11
Message-id: 20180427110137.19304-1-peter.maydell@linaro.org
12
---
13
hw/arm/iotkit.c | 1 +
14
1 file changed, 1 insertion(+)
15
16
diff --git a/hw/arm/iotkit.c b/hw/arm/iotkit.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/iotkit.c
13
--- a/linux-user/elfload.c
19
+++ b/hw/arm/iotkit.c
14
+++ b/linux-user/elfload.c
20
@@ -XXX,XX +XXX,XX @@ static void iotkit_realize(DeviceState *dev, Error **errp)
15
@@ -XXX,XX +XXX,XX @@ enum {
21
qdev_get_gpio_in(DEVICE(&s->ppc_irq_orgate), i));
16
ARM_HWCAP2_A64_RNG = 1 << 16,
22
qdev_connect_gpio_out_named(DEVICE(ppc), "irq", 0,
17
ARM_HWCAP2_A64_BTI = 1 << 17,
23
qdev_get_gpio_in(devs, 0));
18
ARM_HWCAP2_A64_MTE = 1 << 18,
24
+ g_free(gpioname);
19
+ ARM_HWCAP2_A64_ECV = 1 << 19,
25
}
20
+ ARM_HWCAP2_A64_AFP = 1 << 20,
26
21
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
27
iotkit_forward_sec_resp_cfg(s);
22
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
23
+ ARM_HWCAP2_A64_SME = 1 << 23,
24
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
25
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
26
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
27
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
28
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
29
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
30
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
31
};
32
33
#define ELF_HWCAP get_elf_hwcap()
34
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
35
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
36
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
37
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
38
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
39
+ ARM_HWCAP2_A64_SME_F32F32 |
40
+ ARM_HWCAP2_A64_SME_B16F32 |
41
+ ARM_HWCAP2_A64_SME_F16F32 |
42
+ ARM_HWCAP2_A64_SME_I8I32));
43
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
44
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
45
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
46
47
return hwcaps;
48
}
28
--
49
--
29
2.17.0
50
2.25.1
30
31
diff view generated by jsdifflib