1
Nothing too exciting in this lot :-)
1
I don't have anything else queued up at the moment, so this is just
2
Richard's SME patches.
2
3
3
The following changes since commit ba0fa56bc06e563de68d2a2bf3ddb0cfea1be4f9:
4
-- PMM
4
5
5
Merge remote-tracking branch 'remotes/vivier/tags/q800-for-6.2-pull-request' into staging (2021-09-29 21:20:49 +0100)
6
The following changes since commit 63b38f6c85acd312c2cab68554abf33adf4ee2b3:
7
8
Merge tag 'pull-target-arm-20220707' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2022-07-08 06:17:11 +0530)
6
9
7
are available in the Git repository at:
10
are available in the Git repository at:
8
11
9
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210930
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220711
10
13
11
for you to fetch changes up to 1f4b2ec701b9d73d3fa7bb90c8b4376bc7d3c42b:
14
for you to fetch changes up to f9982ceaf26df27d15547a3a7990a95019e9e3a8:
12
15
13
hw/arm: sabrelite: Connect SPI flash CS line to GPIO3_19 (2021-09-30 13:44:13 +0100)
16
linux-user/aarch64: Add SME related hwcap entries (2022-07-11 13:43:52 +0100)
14
17
15
----------------------------------------------------------------
18
----------------------------------------------------------------
16
target-arm queue:
19
target-arm:
17
* allwinner-h3: Switch to SMC as PSCI conduit
20
* Implement SME emulation, for both system and linux-user
18
* arm: tcg: Adhere to SMCCC 1.3 section 5.2
19
* xlnx-zcu102, xlnx-versal-virt: Support BBRAM and eFUSE devices
20
* gdbstub related code cleanups
21
* Don't put FPEXC and FPSID in org.gnu.gdb.arm.vfp XML
22
* Use _init vs _new convention in bus creation function names
23
* sabrelite: Connect SPI flash CS line to GPIO3_19
24
21
25
----------------------------------------------------------------
22
----------------------------------------------------------------
26
Alexander Graf (2):
23
Richard Henderson (45):
27
allwinner-h3: Switch to SMC as PSCI conduit
24
target/arm: Handle SME in aarch64_cpu_dump_state
28
arm: tcg: Adhere to SMCCC 1.3 section 5.2
25
target/arm: Add infrastructure for disas_sme
26
target/arm: Trap non-streaming usage when Streaming SVE is active
27
target/arm: Mark ADR as non-streaming
28
target/arm: Mark RDFFR, WRFFR, SETFFR as non-streaming
29
target/arm: Mark BDEP, BEXT, BGRP, COMPACT, FEXPA, FTSSEL as non-streaming
30
target/arm: Mark PMULL, FMMLA as non-streaming
31
target/arm: Mark FTSMUL, FTMAD, FADDA as non-streaming
32
target/arm: Mark SMMLA, UMMLA, USMMLA as non-streaming
33
target/arm: Mark string/histo/crypto as non-streaming
34
target/arm: Mark gather/scatter load/store as non-streaming
35
target/arm: Mark gather prefetch as non-streaming
36
target/arm: Mark LDFF1 and LDNF1 as non-streaming
37
target/arm: Mark LD1RO as non-streaming
38
target/arm: Add SME enablement checks
39
target/arm: Handle SME in sve_access_check
40
target/arm: Implement SME RDSVL, ADDSVL, ADDSPL
41
target/arm: Implement SME ZERO
42
target/arm: Implement SME MOVA
43
target/arm: Implement SME LD1, ST1
44
target/arm: Export unpredicated ld/st from translate-sve.c
45
target/arm: Implement SME LDR, STR
46
target/arm: Implement SME ADDHA, ADDVA
47
target/arm: Implement FMOPA, FMOPS (non-widening)
48
target/arm: Implement BFMOPA, BFMOPS
49
target/arm: Implement FMOPA, FMOPS (widening)
50
target/arm: Implement SME integer outer product
51
target/arm: Implement PSEL
52
target/arm: Implement REVD
53
target/arm: Implement SCLAMP, UCLAMP
54
target/arm: Reset streaming sve state on exception boundaries
55
target/arm: Enable SME for -cpu max
56
linux-user/aarch64: Clear tpidr2_el0 if CLONE_SETTLS
57
linux-user/aarch64: Reset PSTATE.SM on syscalls
58
linux-user/aarch64: Add SM bit to SVE signal context
59
linux-user/aarch64: Tidy target_restore_sigframe error return
60
linux-user/aarch64: Do not allow duplicate or short sve records
61
linux-user/aarch64: Verify extra record lock succeeded
62
linux-user/aarch64: Move sve record checks into restore
63
linux-user/aarch64: Implement SME signal handling
64
linux-user: Rename sve prctls
65
linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL
66
target/arm: Only set ZEN in reset if SVE present
67
target/arm: Enable SME for user-only
68
linux-user/aarch64: Add SME related hwcap entries
29
69
30
Peter Maydell (10):
70
docs/system/arm/emulation.rst | 4 +
31
configs: Don't include 32-bit-only GDB XML in aarch64 linux configs
71
linux-user/aarch64/target_cpu.h | 5 +-
32
target/arm: Fix coding style issues in gdbstub code in helper.c
72
linux-user/aarch64/target_prctl.h | 62 +-
33
target/arm: Move gdbstub related code out of helper.c
73
target/arm/cpu.h | 7 +
34
target/arm: Don't put FPEXC and FPSID in org.gnu.gdb.arm.vfp XML
74
target/arm/helper-sme.h | 126 ++++
35
scsi: Replace scsi_bus_new() with scsi_bus_init(), scsi_bus_init_named()
75
target/arm/helper-sve.h | 4 +
36
ipack: Rename ipack_bus_new_inplace() to ipack_bus_init()
76
target/arm/helper.h | 18 +
37
pci: Rename pci_root_bus_new_inplace() to pci_root_bus_init()
77
target/arm/translate-a64.h | 45 ++
38
qbus: Rename qbus_create_inplace() to qbus_init()
78
target/arm/translate.h | 16 +
39
qbus: Rename qbus_create() to qbus_new()
79
target/arm/sme-fa64.decode | 60 ++
40
ide: Rename ide_bus_new() to ide_bus_init()
80
target/arm/sme.decode | 88 +++
41
81
target/arm/sve.decode | 41 +-
42
Tong Ho (9):
82
linux-user/aarch64/cpu_loop.c | 9 +
43
hw/nvram: Introduce Xilinx eFuse QOM
83
linux-user/aarch64/signal.c | 243 ++++++--
44
hw/nvram: Introduce Xilinx Versal eFuse device
84
linux-user/elfload.c | 20 +
45
hw/nvram: Introduce Xilinx ZynqMP eFuse device
85
linux-user/syscall.c | 28 +-
46
hw/nvram: Introduce Xilinx battery-backed ram
86
target/arm/cpu.c | 35 +-
47
hw/arm: xlnx-versal-virt: Add Xilinx BBRAM device
87
target/arm/cpu64.c | 11 +
48
hw/arm: xlnx-versal-virt: Add Xilinx eFUSE device
88
target/arm/helper.c | 56 +-
49
hw/arm: xlnx-zcu102: Add Xilinx BBRAM device
89
target/arm/sme_helper.c | 1140 +++++++++++++++++++++++++++++++++++++
50
hw/arm: xlnx-zcu102: Add Xilinx eFUSE device
90
target/arm/sve_helper.c | 28 +
51
docs/system/arm: xlnx-versal-virt: BBRAM and eFUSE Usage
91
target/arm/translate-a64.c | 103 +++-
52
92
target/arm/translate-sme.c | 373 ++++++++++++
53
Xuzhou Cheng (1):
93
target/arm/translate-sve.c | 393 ++++++++++---
54
hw/arm: sabrelite: Connect SPI flash CS line to GPIO3_19
94
target/arm/translate-vfp.c | 12 +
55
95
target/arm/translate.c | 2 +
56
docs/system/arm/xlnx-versal-virt.rst | 49 ++
96
target/arm/vec_helper.c | 24 +
57
configs/targets/aarch64-linux-user.mak | 2 +-
97
target/arm/meson.build | 3 +
58
configs/targets/aarch64-softmmu.mak | 2 +-
98
28 files changed, 2821 insertions(+), 135 deletions(-)
59
configs/targets/aarch64_be-linux-user.mak | 2 +-
99
create mode 100644 target/arm/sme-fa64.decode
60
configs/targets/arm-linux-user.mak | 2 +-
100
create mode 100644 target/arm/sme.decode
61
configs/targets/arm-softmmu.mak | 2 +-
101
create mode 100644 target/arm/translate-sme.c
62
configs/targets/armeb-linux-user.mak | 2 +-
63
include/hw/arm/xlnx-versal.h | 15 +
64
include/hw/arm/xlnx-zynqmp.h | 5 +
65
include/hw/ide/internal.h | 4 +-
66
include/hw/ipack/ipack.h | 8 +-
67
include/hw/nvram/xlnx-bbram.h | 54 ++
68
include/hw/nvram/xlnx-efuse.h | 132 +++++
69
include/hw/nvram/xlnx-versal-efuse.h | 68 +++
70
include/hw/nvram/xlnx-zynqmp-efuse.h | 44 ++
71
include/hw/pci/pci.h | 10 +-
72
include/hw/qdev-core.h | 6 +-
73
include/hw/scsi/scsi.h | 30 +-
74
target/arm/internals.h | 7 +
75
hw/arm/allwinner-h3.c | 2 +-
76
hw/arm/sabrelite.c | 2 +-
77
hw/arm/xlnx-versal-virt.c | 88 +++
78
hw/arm/xlnx-versal.c | 57 ++
79
hw/arm/xlnx-zcu102.c | 30 ++
80
hw/arm/xlnx-zynqmp.c | 49 ++
81
hw/audio/intel-hda.c | 2 +-
82
hw/block/fdc.c | 2 +-
83
hw/block/swim.c | 3 +-
84
hw/char/virtio-serial-bus.c | 4 +-
85
hw/core/bus.c | 13 +-
86
hw/core/sysbus.c | 10 +-
87
hw/gpio/bcm2835_gpio.c | 3 +-
88
hw/hyperv/vmbus.c | 2 +-
89
hw/i2c/core.c | 2 +-
90
hw/ide/ahci.c | 2 +-
91
hw/ide/cmd646.c | 2 +-
92
hw/ide/isa.c | 2 +-
93
hw/ide/macio.c | 2 +-
94
hw/ide/microdrive.c | 2 +-
95
hw/ide/mmio.c | 2 +-
96
hw/ide/piix.c | 2 +-
97
hw/ide/qdev.c | 4 +-
98
hw/ide/sii3112.c | 2 +-
99
hw/ide/via.c | 2 +-
100
hw/ipack/ipack.c | 10 +-
101
hw/ipack/tpci200.c | 4 +-
102
hw/isa/isa-bus.c | 2 +-
103
hw/misc/auxbus.c | 2 +-
104
hw/misc/mac_via.c | 4 +-
105
hw/misc/macio/cuda.c | 4 +-
106
hw/misc/macio/macio.c | 4 +-
107
hw/misc/macio/pmu.c | 4 +-
108
hw/nubus/nubus-bridge.c | 2 +-
109
hw/nvme/ctrl.c | 4 +-
110
hw/nvme/subsys.c | 3 +-
111
hw/nvram/xlnx-bbram.c | 545 +++++++++++++++++++
112
hw/nvram/xlnx-efuse-crc.c | 119 +++++
113
hw/nvram/xlnx-efuse.c | 280 ++++++++++
114
hw/nvram/xlnx-versal-efuse-cache.c | 114 ++++
115
hw/nvram/xlnx-versal-efuse-ctrl.c | 783 +++++++++++++++++++++++++++
116
hw/nvram/xlnx-zynqmp-efuse.c | 855 ++++++++++++++++++++++++++++++
117
hw/pci-host/raven.c | 4 +-
118
hw/pci-host/versatile.c | 6 +-
119
hw/pci/pci.c | 30 +-
120
hw/pci/pci_bridge.c | 4 +-
121
hw/ppc/spapr_vio.c | 2 +-
122
hw/s390x/ap-bridge.c | 2 +-
123
hw/s390x/css-bridge.c | 2 +-
124
hw/s390x/event-facility.c | 4 +-
125
hw/s390x/s390-pci-bus.c | 2 +-
126
hw/s390x/virtio-ccw.c | 3 +-
127
hw/scsi/esp-pci.c | 2 +-
128
hw/scsi/esp.c | 2 +-
129
hw/scsi/lsi53c895a.c | 2 +-
130
hw/scsi/megasas.c | 3 +-
131
hw/scsi/mptsas.c | 2 +-
132
hw/scsi/scsi-bus.c | 6 +-
133
hw/scsi/spapr_vscsi.c | 3 +-
134
hw/scsi/virtio-scsi.c | 4 +-
135
hw/scsi/vmw_pvscsi.c | 3 +-
136
hw/sd/allwinner-sdhost.c | 4 +-
137
hw/sd/bcm2835_sdhost.c | 4 +-
138
hw/sd/pl181.c | 3 +-
139
hw/sd/pxa2xx_mmci.c | 4 +-
140
hw/sd/sdhci.c | 3 +-
141
hw/sd/ssi-sd.c | 3 +-
142
hw/ssi/ssi.c | 2 +-
143
hw/usb/bus.c | 2 +-
144
hw/usb/dev-smartcard-reader.c | 3 +-
145
hw/usb/dev-storage-bot.c | 3 +-
146
hw/usb/dev-storage-classic.c | 4 +-
147
hw/usb/dev-uas.c | 3 +-
148
hw/virtio/virtio-mmio.c | 3 +-
149
hw/virtio/virtio-pci.c | 3 +-
150
hw/xen/xen-bus.c | 2 +-
151
hw/xen/xen-legacy-backend.c | 2 +-
152
target/arm/gdbstub.c | 154 ++++++
153
target/arm/gdbstub64.c | 140 +++++
154
target/arm/helper.c | 262 ---------
155
target/arm/psci.c | 35 +-
156
gdb-xml/arm-neon.xml | 2 -
157
gdb-xml/arm-vfp-sysregs.xml | 17 +
158
gdb-xml/arm-vfp.xml | 2 -
159
gdb-xml/arm-vfp3.xml | 2 -
160
hw/Kconfig | 2 +
161
hw/arm/Kconfig | 2 +
162
hw/nvram/Kconfig | 19 +
163
hw/nvram/meson.build | 8 +
164
108 files changed, 3806 insertions(+), 447 deletions(-)
165
create mode 100644 include/hw/nvram/xlnx-bbram.h
166
create mode 100644 include/hw/nvram/xlnx-efuse.h
167
create mode 100644 include/hw/nvram/xlnx-versal-efuse.h
168
create mode 100644 include/hw/nvram/xlnx-zynqmp-efuse.h
169
create mode 100644 hw/nvram/xlnx-bbram.c
170
create mode 100644 hw/nvram/xlnx-efuse-crc.c
171
create mode 100644 hw/nvram/xlnx-efuse.c
172
create mode 100644 hw/nvram/xlnx-versal-efuse-cache.c
173
create mode 100644 hw/nvram/xlnx-versal-efuse-ctrl.c
174
create mode 100644 hw/nvram/xlnx-zynqmp-efuse.c
175
create mode 100644 gdb-xml/arm-vfp-sysregs.xml
176
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Dump SVCR, plus use the correct access check for Streaming Mode.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-2-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/cpu.c | 17 ++++++++++++++++-
11
1 file changed, 16 insertions(+), 1 deletion(-)
12
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
18
int i;
19
int el = arm_current_el(env);
20
const char *ns_status;
21
+ bool sve;
22
23
qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
24
for (i = 0; i < 32; i++) {
25
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
26
el,
27
psr & PSTATE_SP ? 'h' : 't');
28
29
+ if (cpu_isar_feature(aa64_sme, cpu)) {
30
+ qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c",
31
+ env->svcr,
32
+ (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'),
33
+ (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
34
+ }
35
if (cpu_isar_feature(aa64_bti, cpu)) {
36
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
37
}
38
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
39
qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
40
vfp_get_fpcr(env), vfp_get_fpsr(env));
41
42
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
43
+ if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) {
44
+ sve = sme_exception_el(env, el) == 0;
45
+ } else if (cpu_isar_feature(aa64_sve, cpu)) {
46
+ sve = sve_exception_el(env, el) == 0;
47
+ } else {
48
+ sve = false;
49
+ }
50
+
51
+ if (sve) {
52
int j, zcr_len = sve_vqm1_for_el(env, el);
53
54
for (i = 0; i <= FFR_PRED_NUM; i++) {
55
--
56
2.25.1
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This implements the Xilinx ZynqMP eFuse, an one-time
3
This includes the build rules for the decoder, and the
4
field-programmable non-volatile storage device. There is
4
new file for translation, but excludes any instructions.
5
only one such device in the Xilinx ZynqMP product family.
6
5
7
Co-authored-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
8
Co-authored-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
9
10
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
11
Signed-off-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
12
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
13
Message-id: 20210917052400.1249094-4-tong.ho@xilinx.com
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-3-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
10
---
17
include/hw/nvram/xlnx-zynqmp-efuse.h | 44 ++
11
target/arm/translate-a64.h | 1 +
18
hw/nvram/xlnx-zynqmp-efuse.c | 855 +++++++++++++++++++++++++++
12
target/arm/sme.decode | 20 ++++++++++++++++++++
19
hw/nvram/Kconfig | 4 +
13
target/arm/translate-a64.c | 7 ++++++-
20
hw/nvram/meson.build | 2 +
14
target/arm/translate-sme.c | 35 +++++++++++++++++++++++++++++++++++
21
4 files changed, 905 insertions(+)
15
target/arm/meson.build | 2 ++
22
create mode 100644 include/hw/nvram/xlnx-zynqmp-efuse.h
16
5 files changed, 64 insertions(+), 1 deletion(-)
23
create mode 100644 hw/nvram/xlnx-zynqmp-efuse.c
17
create mode 100644 target/arm/sme.decode
18
create mode 100644 target/arm/translate-sme.c
24
19
25
diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h
20
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/translate-a64.h
23
+++ b/target/arm/translate-a64.h
24
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
25
}
26
27
bool disas_sve(DisasContext *, uint32_t);
28
+bool disas_sme(DisasContext *, uint32_t);
29
30
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
31
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
32
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
26
new file mode 100644
33
new file mode 100644
27
index XXXXXXX..XXXXXXX
34
index XXXXXXX..XXXXXXX
28
--- /dev/null
35
--- /dev/null
29
+++ b/include/hw/nvram/xlnx-zynqmp-efuse.h
36
+++ b/target/arm/sme.decode
30
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@
31
+/*
38
+# AArch64 SME instruction descriptions
32
+ * Copyright (c) 2021 Xilinx Inc.
39
+#
33
+ *
40
+# Copyright (c) 2022 Linaro, Ltd
34
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
41
+#
35
+ * of this software and associated documentation files (the "Software"), to deal
42
+# This library is free software; you can redistribute it and/or
36
+ * in the Software without restriction, including without limitation the rights
43
+# modify it under the terms of the GNU Lesser General Public
37
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
44
+# License as published by the Free Software Foundation; either
38
+ * copies of the Software, and to permit persons to whom the Software is
45
+# version 2.1 of the License, or (at your option) any later version.
39
+ * furnished to do so, subject to the following conditions:
46
+#
40
+ *
47
+# This library is distributed in the hope that it will be useful,
41
+ * The above copyright notice and this permission notice shall be included in
48
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
42
+ * all copies or substantial portions of the Software.
49
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43
+ *
50
+# Lesser General Public License for more details.
44
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
51
+#
45
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
52
+# You should have received a copy of the GNU Lesser General Public
46
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
53
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
47
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
48
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
49
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
50
+ * THE SOFTWARE.
51
+ */
52
+#ifndef XLNX_ZYNQMP_EFUSE_H
53
+#define XLNX_ZYNQMP_EFUSE_H
54
+
54
+
55
+#include "hw/irq.h"
55
+#
56
+#include "hw/sysbus.h"
56
+# This file is processed by scripts/decodetree.py
57
+#include "hw/register.h"
57
+#
58
+#include "hw/nvram/xlnx-efuse.h"
58
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
59
+
59
index XXXXXXX..XXXXXXX 100644
60
+#define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1)
60
--- a/target/arm/translate-a64.c
61
+
61
+++ b/target/arm/translate-a64.c
62
+#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx,zynqmp-efuse"
62
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
63
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE);
63
}
64
+
64
65
+struct XlnxZynqMPEFuse {
65
switch (extract32(insn, 25, 4)) {
66
+ SysBusDevice parent_obj;
66
- case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
67
+ qemu_irq irq;
67
+ case 0x0:
68
+
68
+ if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
69
+ XlnxEFuse *efuse;
69
+ unallocated_encoding(s);
70
+ uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX];
70
+ }
71
+ RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX];
71
+ break;
72
+};
72
+ case 0x1: case 0x3: /* UNALLOCATED */
73
+
73
unallocated_encoding(s);
74
+#endif
74
break;
75
diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c
75
case 0x2:
76
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
76
new file mode 100644
77
new file mode 100644
77
index XXXXXXX..XXXXXXX
78
index XXXXXXX..XXXXXXX
78
--- /dev/null
79
--- /dev/null
79
+++ b/hw/nvram/xlnx-zynqmp-efuse.c
80
+++ b/target/arm/translate-sme.c
80
@@ -XXX,XX +XXX,XX @@
81
@@ -XXX,XX +XXX,XX @@
81
+/*
82
+/*
82
+ * QEMU model of the ZynqMP eFuse
83
+ * AArch64 SME translation
83
+ *
84
+ *
84
+ * Copyright (c) 2015 Xilinx Inc.
85
+ * Copyright (c) 2022 Linaro, Ltd
85
+ *
86
+ *
86
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
87
+ * This library is free software; you can redistribute it and/or
88
+ * modify it under the terms of the GNU Lesser General Public
89
+ * License as published by the Free Software Foundation; either
90
+ * version 2.1 of the License, or (at your option) any later version.
87
+ *
91
+ *
88
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
92
+ * This library is distributed in the hope that it will be useful,
89
+ * of this software and associated documentation files (the "Software"), to deal
93
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
90
+ * in the Software without restriction, including without limitation the rights
94
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
91
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
95
+ * Lesser General Public License for more details.
92
+ * copies of the Software, and to permit persons to whom the Software is
93
+ * furnished to do so, subject to the following conditions:
94
+ *
96
+ *
95
+ * The above copyright notice and this permission notice shall be included in
97
+ * You should have received a copy of the GNU Lesser General Public
96
+ * all copies or substantial portions of the Software.
98
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
97
+ *
98
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
99
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
100
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
101
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
102
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
103
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
104
+ * THE SOFTWARE.
105
+ */
99
+ */
106
+
100
+
107
+#include "qemu/osdep.h"
101
+#include "qemu/osdep.h"
108
+#include "hw/nvram/xlnx-zynqmp-efuse.h"
102
+#include "cpu.h"
103
+#include "tcg/tcg-op.h"
104
+#include "tcg/tcg-op-gvec.h"
105
+#include "tcg/tcg-gvec-desc.h"
106
+#include "translate.h"
107
+#include "exec/helper-gen.h"
108
+#include "translate-a64.h"
109
+#include "fpu/softfloat.h"
109
+
110
+
110
+#include "qemu/log.h"
111
+#include "qapi/error.h"
112
+#include "migration/vmstate.h"
113
+#include "hw/qdev-properties.h"
114
+
115
+#ifndef ZYNQMP_EFUSE_ERR_DEBUG
116
+#define ZYNQMP_EFUSE_ERR_DEBUG 0
117
+#endif
118
+
119
+REG32(WR_LOCK, 0x0)
120
+ FIELD(WR_LOCK, LOCK, 0, 16)
121
+REG32(CFG, 0x4)
122
+ FIELD(CFG, SLVERR_ENABLE, 5, 1)
123
+ FIELD(CFG, MARGIN_RD, 2, 2)
124
+ FIELD(CFG, PGM_EN, 1, 1)
125
+ FIELD(CFG, EFUSE_CLK_SEL, 0, 1)
126
+REG32(STATUS, 0x8)
127
+ FIELD(STATUS, AES_CRC_PASS, 7, 1)
128
+ FIELD(STATUS, AES_CRC_DONE, 6, 1)
129
+ FIELD(STATUS, CACHE_DONE, 5, 1)
130
+ FIELD(STATUS, CACHE_LOAD, 4, 1)
131
+ FIELD(STATUS, EFUSE_3_TBIT, 2, 1)
132
+ FIELD(STATUS, EFUSE_2_TBIT, 1, 1)
133
+ FIELD(STATUS, EFUSE_0_TBIT, 0, 1)
134
+REG32(EFUSE_PGM_ADDR, 0xc)
135
+ FIELD(EFUSE_PGM_ADDR, EFUSE, 11, 2)
136
+ FIELD(EFUSE_PGM_ADDR, ROW, 5, 6)
137
+ FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5)
138
+REG32(EFUSE_RD_ADDR, 0x10)
139
+ FIELD(EFUSE_RD_ADDR, EFUSE, 11, 2)
140
+ FIELD(EFUSE_RD_ADDR, ROW, 5, 6)
141
+REG32(EFUSE_RD_DATA, 0x14)
142
+REG32(TPGM, 0x18)
143
+ FIELD(TPGM, VALUE, 0, 16)
144
+REG32(TRD, 0x1c)
145
+ FIELD(TRD, VALUE, 0, 8)
146
+REG32(TSU_H_PS, 0x20)
147
+ FIELD(TSU_H_PS, VALUE, 0, 8)
148
+REG32(TSU_H_PS_CS, 0x24)
149
+ FIELD(TSU_H_PS_CS, VALUE, 0, 8)
150
+REG32(TSU_H_CS, 0x2c)
151
+ FIELD(TSU_H_CS, VALUE, 0, 4)
152
+REG32(EFUSE_ISR, 0x30)
153
+ FIELD(EFUSE_ISR, APB_SLVERR, 31, 1)
154
+ FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1)
155
+ FIELD(EFUSE_ISR, RD_ERROR, 3, 1)
156
+ FIELD(EFUSE_ISR, RD_DONE, 2, 1)
157
+ FIELD(EFUSE_ISR, PGM_ERROR, 1, 1)
158
+ FIELD(EFUSE_ISR, PGM_DONE, 0, 1)
159
+REG32(EFUSE_IMR, 0x34)
160
+ FIELD(EFUSE_IMR, APB_SLVERR, 31, 1)
161
+ FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1)
162
+ FIELD(EFUSE_IMR, RD_ERROR, 3, 1)
163
+ FIELD(EFUSE_IMR, RD_DONE, 2, 1)
164
+ FIELD(EFUSE_IMR, PGM_ERROR, 1, 1)
165
+ FIELD(EFUSE_IMR, PGM_DONE, 0, 1)
166
+REG32(EFUSE_IER, 0x38)
167
+ FIELD(EFUSE_IER, APB_SLVERR, 31, 1)
168
+ FIELD(EFUSE_IER, CACHE_ERROR, 4, 1)
169
+ FIELD(EFUSE_IER, RD_ERROR, 3, 1)
170
+ FIELD(EFUSE_IER, RD_DONE, 2, 1)
171
+ FIELD(EFUSE_IER, PGM_ERROR, 1, 1)
172
+ FIELD(EFUSE_IER, PGM_DONE, 0, 1)
173
+REG32(EFUSE_IDR, 0x3c)
174
+ FIELD(EFUSE_IDR, APB_SLVERR, 31, 1)
175
+ FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1)
176
+ FIELD(EFUSE_IDR, RD_ERROR, 3, 1)
177
+ FIELD(EFUSE_IDR, RD_DONE, 2, 1)
178
+ FIELD(EFUSE_IDR, PGM_ERROR, 1, 1)
179
+ FIELD(EFUSE_IDR, PGM_DONE, 0, 1)
180
+REG32(EFUSE_CACHE_LOAD, 0x40)
181
+ FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1)
182
+REG32(EFUSE_PGM_LOCK, 0x44)
183
+ FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1)
184
+REG32(EFUSE_AES_CRC, 0x48)
185
+REG32(EFUSE_TBITS_PRGRMG_EN, 0x100)
186
+ FIELD(EFUSE_TBITS_PRGRMG_EN, TBITS_PRGRMG_EN, 3, 1)
187
+REG32(DNA_0, 0x100c)
188
+REG32(DNA_1, 0x1010)
189
+REG32(DNA_2, 0x1014)
190
+REG32(IPDISABLE, 0x1018)
191
+ FIELD(IPDISABLE, VCU_DIS, 8, 1)
192
+ FIELD(IPDISABLE, GPU_DIS, 5, 1)
193
+ FIELD(IPDISABLE, APU3_DIS, 3, 1)
194
+ FIELD(IPDISABLE, APU2_DIS, 2, 1)
195
+ FIELD(IPDISABLE, APU1_DIS, 1, 1)
196
+ FIELD(IPDISABLE, APU0_DIS, 0, 1)
197
+REG32(SYSOSC_CTRL, 0x101c)
198
+ FIELD(SYSOSC_CTRL, SYSOSC_EN, 0, 1)
199
+REG32(USER_0, 0x1020)
200
+REG32(USER_1, 0x1024)
201
+REG32(USER_2, 0x1028)
202
+REG32(USER_3, 0x102c)
203
+REG32(USER_4, 0x1030)
204
+REG32(USER_5, 0x1034)
205
+REG32(USER_6, 0x1038)
206
+REG32(USER_7, 0x103c)
207
+REG32(MISC_USER_CTRL, 0x1040)
208
+ FIELD(MISC_USER_CTRL, FPD_SC_EN_0, 14, 1)
209
+ FIELD(MISC_USER_CTRL, LPD_SC_EN_0, 11, 1)
210
+ FIELD(MISC_USER_CTRL, LBIST_EN, 10, 1)
211
+ FIELD(MISC_USER_CTRL, USR_WRLK_7, 7, 1)
212
+ FIELD(MISC_USER_CTRL, USR_WRLK_6, 6, 1)
213
+ FIELD(MISC_USER_CTRL, USR_WRLK_5, 5, 1)
214
+ FIELD(MISC_USER_CTRL, USR_WRLK_4, 4, 1)
215
+ FIELD(MISC_USER_CTRL, USR_WRLK_3, 3, 1)
216
+ FIELD(MISC_USER_CTRL, USR_WRLK_2, 2, 1)
217
+ FIELD(MISC_USER_CTRL, USR_WRLK_1, 1, 1)
218
+ FIELD(MISC_USER_CTRL, USR_WRLK_0, 0, 1)
219
+REG32(ROM_RSVD, 0x1044)
220
+ FIELD(ROM_RSVD, PBR_BOOT_ERROR, 0, 3)
221
+REG32(PUF_CHASH, 0x1050)
222
+REG32(PUF_MISC, 0x1054)
223
+ FIELD(PUF_MISC, REGISTER_DIS, 31, 1)
224
+ FIELD(PUF_MISC, SYN_WRLK, 30, 1)
225
+ FIELD(PUF_MISC, SYN_INVLD, 29, 1)
226
+ FIELD(PUF_MISC, TEST2_DIS, 28, 1)
227
+ FIELD(PUF_MISC, UNUSED27, 27, 1)
228
+ FIELD(PUF_MISC, UNUSED26, 26, 1)
229
+ FIELD(PUF_MISC, UNUSED25, 25, 1)
230
+ FIELD(PUF_MISC, UNUSED24, 24, 1)
231
+ FIELD(PUF_MISC, AUX, 0, 24)
232
+REG32(SEC_CTRL, 0x1058)
233
+ FIELD(SEC_CTRL, PPK1_INVLD, 30, 2)
234
+ FIELD(SEC_CTRL, PPK1_WRLK, 29, 1)
235
+ FIELD(SEC_CTRL, PPK0_INVLD, 27, 2)
236
+ FIELD(SEC_CTRL, PPK0_WRLK, 26, 1)
237
+ FIELD(SEC_CTRL, RSA_EN, 11, 15)
238
+ FIELD(SEC_CTRL, SEC_LOCK, 10, 1)
239
+ FIELD(SEC_CTRL, PROG_GATE_2, 9, 1)
240
+ FIELD(SEC_CTRL, PROG_GATE_1, 8, 1)
241
+ FIELD(SEC_CTRL, PROG_GATE_0, 7, 1)
242
+ FIELD(SEC_CTRL, DFT_DIS, 6, 1)
243
+ FIELD(SEC_CTRL, JTAG_DIS, 5, 1)
244
+ FIELD(SEC_CTRL, ERROR_DIS, 4, 1)
245
+ FIELD(SEC_CTRL, BBRAM_DIS, 3, 1)
246
+ FIELD(SEC_CTRL, ENC_ONLY, 2, 1)
247
+ FIELD(SEC_CTRL, AES_WRLK, 1, 1)
248
+ FIELD(SEC_CTRL, AES_RDLK, 0, 1)
249
+REG32(SPK_ID, 0x105c)
250
+REG32(PPK0_0, 0x10a0)
251
+REG32(PPK0_1, 0x10a4)
252
+REG32(PPK0_2, 0x10a8)
253
+REG32(PPK0_3, 0x10ac)
254
+REG32(PPK0_4, 0x10b0)
255
+REG32(PPK0_5, 0x10b4)
256
+REG32(PPK0_6, 0x10b8)
257
+REG32(PPK0_7, 0x10bc)
258
+REG32(PPK0_8, 0x10c0)
259
+REG32(PPK0_9, 0x10c4)
260
+REG32(PPK0_10, 0x10c8)
261
+REG32(PPK0_11, 0x10cc)
262
+REG32(PPK1_0, 0x10d0)
263
+REG32(PPK1_1, 0x10d4)
264
+REG32(PPK1_2, 0x10d8)
265
+REG32(PPK1_3, 0x10dc)
266
+REG32(PPK1_4, 0x10e0)
267
+REG32(PPK1_5, 0x10e4)
268
+REG32(PPK1_6, 0x10e8)
269
+REG32(PPK1_7, 0x10ec)
270
+REG32(PPK1_8, 0x10f0)
271
+REG32(PPK1_9, 0x10f4)
272
+REG32(PPK1_10, 0x10f8)
273
+REG32(PPK1_11, 0x10fc)
274
+
275
+#define BIT_POS(ROW, COLUMN) (ROW * 32 + COLUMN)
276
+#define R_MAX (R_PPK1_11 + 1)
277
+
278
+/* #define EFUSE_XOSC 26 */
279
+
111
+
280
+/*
112
+/*
281
+ * eFUSE layout references:
113
+ * Include the generated decoder.
282
+ * ZynqMP: UG1085 (v2.1) August 21, 2019, p.277, Table 12-13
283
+ */
114
+ */
284
+#define EFUSE_AES_RDLK BIT_POS(22, 0)
285
+#define EFUSE_AES_WRLK BIT_POS(22, 1)
286
+#define EFUSE_ENC_ONLY BIT_POS(22, 2)
287
+#define EFUSE_BBRAM_DIS BIT_POS(22, 3)
288
+#define EFUSE_ERROR_DIS BIT_POS(22, 4)
289
+#define EFUSE_JTAG_DIS BIT_POS(22, 5)
290
+#define EFUSE_DFT_DIS BIT_POS(22, 6)
291
+#define EFUSE_PROG_GATE_0 BIT_POS(22, 7)
292
+#define EFUSE_PROG_GATE_1 BIT_POS(22, 7)
293
+#define EFUSE_PROG_GATE_2 BIT_POS(22, 9)
294
+#define EFUSE_SEC_LOCK BIT_POS(22, 10)
295
+#define EFUSE_RSA_EN BIT_POS(22, 11)
296
+#define EFUSE_RSA_EN14 BIT_POS(22, 25)
297
+#define EFUSE_PPK0_WRLK BIT_POS(22, 26)
298
+#define EFUSE_PPK0_INVLD BIT_POS(22, 27)
299
+#define EFUSE_PPK0_INVLD_1 BIT_POS(22, 28)
300
+#define EFUSE_PPK1_WRLK BIT_POS(22, 29)
301
+#define EFUSE_PPK1_INVLD BIT_POS(22, 30)
302
+#define EFUSE_PPK1_INVLD_1 BIT_POS(22, 31)
303
+
115
+
304
+/* Areas. */
116
+#include "decode-sme.c.inc"
305
+#define EFUSE_TRIM_START BIT_POS(1, 0)
117
diff --git a/target/arm/meson.build b/target/arm/meson.build
306
+#define EFUSE_TRIM_END BIT_POS(1, 30)
307
+#define EFUSE_DNA_START BIT_POS(3, 0)
308
+#define EFUSE_DNA_END BIT_POS(5, 31)
309
+#define EFUSE_AES_START BIT_POS(24, 0)
310
+#define EFUSE_AES_END BIT_POS(31, 31)
311
+#define EFUSE_ROM_START BIT_POS(17, 0)
312
+#define EFUSE_ROM_END BIT_POS(17, 31)
313
+#define EFUSE_IPDIS_START BIT_POS(6, 0)
314
+#define EFUSE_IPDIS_END BIT_POS(6, 31)
315
+#define EFUSE_USER_START BIT_POS(8, 0)
316
+#define EFUSE_USER_END BIT_POS(15, 31)
317
+#define EFUSE_BISR_START BIT_POS(32, 0)
318
+#define EFUSE_BISR_END BIT_POS(39, 31)
319
+
320
+#define EFUSE_USER_CTRL_START BIT_POS(16, 0)
321
+#define EFUSE_USER_CTRL_END BIT_POS(16, 16)
322
+#define EFUSE_USER_CTRL_MASK ((uint32_t)MAKE_64BIT_MASK(0, 17))
323
+
324
+#define EFUSE_PUF_CHASH_START BIT_POS(20, 0)
325
+#define EFUSE_PUF_CHASH_END BIT_POS(20, 31)
326
+#define EFUSE_PUF_MISC_START BIT_POS(21, 0)
327
+#define EFUSE_PUF_MISC_END BIT_POS(21, 31)
328
+#define EFUSE_PUF_SYN_WRLK BIT_POS(21, 30)
329
+
330
+#define EFUSE_SPK_START BIT_POS(23, 0)
331
+#define EFUSE_SPK_END BIT_POS(23, 31)
332
+
333
+#define EFUSE_PPK0_START BIT_POS(40, 0)
334
+#define EFUSE_PPK0_END BIT_POS(51, 31)
335
+#define EFUSE_PPK1_START BIT_POS(52, 0)
336
+#define EFUSE_PPK1_END BIT_POS(63, 31)
337
+
338
+#define EFUSE_CACHE_FLD(s, reg, field) \
339
+ ARRAY_FIELD_DP32((s)->regs, reg, field, \
340
+ (xlnx_efuse_get_row((s->efuse), EFUSE_ ## field) \
341
+ >> (EFUSE_ ## field % 32)))
342
+
343
+#define EFUSE_CACHE_BIT(s, reg, field) \
344
+ ARRAY_FIELD_DP32((s)->regs, reg, field, xlnx_efuse_get_bit((s->efuse), \
345
+ EFUSE_ ## field))
346
+
347
+#define FBIT_UNKNOWN (~0)
348
+
349
+QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxZynqMPEFuse *)0)->regs));
350
+
351
+static void update_tbit_status(XlnxZynqMPEFuse *s)
352
+{
353
+ unsigned int check = xlnx_efuse_tbits_check(s->efuse);
354
+ uint32_t val = s->regs[R_STATUS];
355
+
356
+ val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0)));
357
+ val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 1)));
358
+ val = FIELD_DP32(val, STATUS, EFUSE_3_TBIT, !!(check & (1 << 2)));
359
+
360
+ s->regs[R_STATUS] = val;
361
+}
362
+
363
+/* Update the u32 array from efuse bits. Slow but simple approach. */
364
+static void cache_sync_u32(XlnxZynqMPEFuse *s, unsigned int r_start,
365
+ unsigned int f_start, unsigned int f_end,
366
+ unsigned int f_written)
367
+{
368
+ uint32_t *u32 = &s->regs[r_start];
369
+ unsigned int fbit, wbits = 0, u32_off = 0;
370
+
371
+ /* Avoid working on bits that are not relevant. */
372
+ if (f_written != FBIT_UNKNOWN
373
+ && (f_written < f_start || f_written > f_end)) {
374
+ return;
375
+ }
376
+
377
+ for (fbit = f_start; fbit <= f_end; fbit++, wbits++) {
378
+ if (wbits == 32) {
379
+ /* Update the key offset. */
380
+ u32_off += 1;
381
+ wbits = 0;
382
+ }
383
+ u32[u32_off] |= xlnx_efuse_get_bit(s->efuse, fbit) << wbits;
384
+ }
385
+}
386
+
387
+/*
388
+ * Keep the syncs in bit order so we can bail out for the
389
+ * slower ones.
390
+ */
391
+static void zynqmp_efuse_sync_cache(XlnxZynqMPEFuse *s, unsigned int bit)
392
+{
393
+ EFUSE_CACHE_BIT(s, SEC_CTRL, AES_RDLK);
394
+ EFUSE_CACHE_BIT(s, SEC_CTRL, AES_WRLK);
395
+ EFUSE_CACHE_BIT(s, SEC_CTRL, ENC_ONLY);
396
+ EFUSE_CACHE_BIT(s, SEC_CTRL, BBRAM_DIS);
397
+ EFUSE_CACHE_BIT(s, SEC_CTRL, ERROR_DIS);
398
+ EFUSE_CACHE_BIT(s, SEC_CTRL, JTAG_DIS);
399
+ EFUSE_CACHE_BIT(s, SEC_CTRL, DFT_DIS);
400
+ EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_0);
401
+ EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_1);
402
+ EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_2);
403
+ EFUSE_CACHE_BIT(s, SEC_CTRL, SEC_LOCK);
404
+ EFUSE_CACHE_BIT(s, SEC_CTRL, PPK0_WRLK);
405
+ EFUSE_CACHE_BIT(s, SEC_CTRL, PPK1_WRLK);
406
+
407
+ EFUSE_CACHE_FLD(s, SEC_CTRL, RSA_EN);
408
+ EFUSE_CACHE_FLD(s, SEC_CTRL, PPK0_INVLD);
409
+ EFUSE_CACHE_FLD(s, SEC_CTRL, PPK1_INVLD);
410
+
411
+ /* Update the tbits. */
412
+ update_tbit_status(s);
413
+
414
+ /* Sync the various areas. */
415
+ s->regs[R_MISC_USER_CTRL] = xlnx_efuse_get_row(s->efuse,
416
+ EFUSE_USER_CTRL_START)
417
+ & EFUSE_USER_CTRL_MASK;
418
+ s->regs[R_PUF_CHASH] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_CHASH_START);
419
+ s->regs[R_PUF_MISC] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_MISC_START);
420
+
421
+ cache_sync_u32(s, R_DNA_0, EFUSE_DNA_START, EFUSE_DNA_END, bit);
422
+
423
+ if (bit < EFUSE_AES_START) {
424
+ return;
425
+ }
426
+
427
+ cache_sync_u32(s, R_ROM_RSVD, EFUSE_ROM_START, EFUSE_ROM_END, bit);
428
+ cache_sync_u32(s, R_IPDISABLE, EFUSE_IPDIS_START, EFUSE_IPDIS_END, bit);
429
+ cache_sync_u32(s, R_USER_0, EFUSE_USER_START, EFUSE_USER_END, bit);
430
+ cache_sync_u32(s, R_SPK_ID, EFUSE_SPK_START, EFUSE_SPK_END, bit);
431
+ cache_sync_u32(s, R_PPK0_0, EFUSE_PPK0_START, EFUSE_PPK0_END, bit);
432
+ cache_sync_u32(s, R_PPK1_0, EFUSE_PPK1_START, EFUSE_PPK1_END, bit);
433
+}
434
+
435
+static void zynqmp_efuse_update_irq(XlnxZynqMPEFuse *s)
436
+{
437
+ bool pending = s->regs[R_EFUSE_ISR] & s->regs[R_EFUSE_IMR];
438
+ qemu_set_irq(s->irq, pending);
439
+}
440
+
441
+static void zynqmp_efuse_isr_postw(RegisterInfo *reg, uint64_t val64)
442
+{
443
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
444
+ zynqmp_efuse_update_irq(s);
445
+}
446
+
447
+static uint64_t zynqmp_efuse_ier_prew(RegisterInfo *reg, uint64_t val64)
448
+{
449
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
450
+ uint32_t val = val64;
451
+
452
+ s->regs[R_EFUSE_IMR] |= val;
453
+ zynqmp_efuse_update_irq(s);
454
+ return 0;
455
+}
456
+
457
+static uint64_t zynqmp_efuse_idr_prew(RegisterInfo *reg, uint64_t val64)
458
+{
459
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
460
+ uint32_t val = val64;
461
+
462
+ s->regs[R_EFUSE_IMR] &= ~val;
463
+ zynqmp_efuse_update_irq(s);
464
+ return 0;
465
+}
466
+
467
+static void zynqmp_efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64)
468
+{
469
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
470
+ unsigned bit = val64;
471
+ unsigned page = FIELD_EX32(bit, EFUSE_PGM_ADDR, EFUSE);
472
+ bool puf_prot = false;
473
+ const char *errmsg = NULL;
474
+
475
+ /* Allow only valid array, and adjust for skipped array 1 */
476
+ switch (page) {
477
+ case 0:
478
+ break;
479
+ case 2 ... 3:
480
+ bit = FIELD_DP32(bit, EFUSE_PGM_ADDR, EFUSE, page - 1);
481
+ puf_prot = xlnx_efuse_get_bit(s->efuse, EFUSE_PUF_SYN_WRLK);
482
+ break;
483
+ default:
484
+ errmsg = "Invalid address";
485
+ goto pgm_done;
486
+ }
487
+
488
+ if (ARRAY_FIELD_EX32(s->regs, WR_LOCK, LOCK)) {
489
+ errmsg = "Array write-locked";
490
+ goto pgm_done;
491
+ }
492
+
493
+ if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) {
494
+ errmsg = "Array pgm-disabled";
495
+ goto pgm_done;
496
+ }
497
+
498
+ if (puf_prot) {
499
+ errmsg = "PUF_HD-store write-locked";
500
+ goto pgm_done;
501
+ }
502
+
503
+ if (ARRAY_FIELD_EX32(s->regs, SEC_CTRL, AES_WRLK)
504
+ && bit >= EFUSE_AES_START && bit <= EFUSE_AES_END) {
505
+ errmsg = "AES key-store Write-locked";
506
+ goto pgm_done;
507
+ }
508
+
509
+ if (!xlnx_efuse_set_bit(s->efuse, bit)) {
510
+ errmsg = "Write failed";
511
+ }
512
+
513
+ pgm_done:
514
+ if (!errmsg) {
515
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 0);
516
+ } else {
517
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1);
518
+ qemu_log_mask(LOG_GUEST_ERROR,
519
+ "%s - eFuse write error: %s; addr=0x%x\n",
520
+ object_get_canonical_path(OBJECT(s)),
521
+ errmsg, (unsigned)val64);
522
+ }
523
+
524
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1);
525
+ zynqmp_efuse_update_irq(s);
526
+}
527
+
528
+static void zynqmp_efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64)
529
+{
530
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
531
+
532
+ /*
533
+ * Grant reads only to allowed bits; reference sources:
534
+ * 1/ XilSKey - XilSKey_ZynqMp_EfusePs_ReadRow()
535
+ * 2/ UG1085, v2.0, table 12-13
536
+ * (note: enumerates the masks as <first, last> per described in
537
+ * references to avoid mental translation).
538
+ */
539
+#define COL_MASK(L_, H_) \
540
+ ((uint32_t)MAKE_64BIT_MASK((L_), (1 + (H_) - (L_))))
541
+
542
+ static const uint32_t ary0_col_mask[] = {
543
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_TBITS_ROW */
544
+ [0] = COL_MASK(28, 31),
545
+
546
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_USR{0:7}_FUSE_ROW */
547
+ [8] = COL_MASK(0, 31), [9] = COL_MASK(0, 31),
548
+ [10] = COL_MASK(0, 31), [11] = COL_MASK(0, 31),
549
+ [12] = COL_MASK(0, 31), [13] = COL_MASK(0, 31),
550
+ [14] = COL_MASK(0, 31), [15] = COL_MASK(0, 31),
551
+
552
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_MISC_USR_CTRL_ROW */
553
+ [16] = COL_MASK(0, 7) | COL_MASK(10, 16),
554
+
555
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_PBR_BOOT_ERR_ROW */
556
+ [17] = COL_MASK(0, 2),
557
+
558
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_CHASH_ROW */
559
+ [20] = COL_MASK(0, 31),
560
+
561
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_AUX_ROW */
562
+ [21] = COL_MASK(0, 23) | COL_MASK(29, 31),
563
+
564
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_SEC_CTRL_ROW */
565
+ [22] = COL_MASK(0, 31),
566
+
567
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_SPK_ID_ROW */
568
+ [23] = COL_MASK(0, 31),
569
+
570
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK0_START_ROW */
571
+ [40] = COL_MASK(0, 31), [41] = COL_MASK(0, 31),
572
+ [42] = COL_MASK(0, 31), [43] = COL_MASK(0, 31),
573
+ [44] = COL_MASK(0, 31), [45] = COL_MASK(0, 31),
574
+ [46] = COL_MASK(0, 31), [47] = COL_MASK(0, 31),
575
+ [48] = COL_MASK(0, 31), [49] = COL_MASK(0, 31),
576
+ [50] = COL_MASK(0, 31), [51] = COL_MASK(0, 31),
577
+
578
+ /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK1_START_ROW */
579
+ [52] = COL_MASK(0, 31), [53] = COL_MASK(0, 31),
580
+ [54] = COL_MASK(0, 31), [55] = COL_MASK(0, 31),
581
+ [56] = COL_MASK(0, 31), [57] = COL_MASK(0, 31),
582
+ [58] = COL_MASK(0, 31), [59] = COL_MASK(0, 31),
583
+ [60] = COL_MASK(0, 31), [61] = COL_MASK(0, 31),
584
+ [62] = COL_MASK(0, 31), [63] = COL_MASK(0, 31),
585
+ };
586
+
587
+ uint32_t col_mask = COL_MASK(0, 31);
588
+#undef COL_MASK
589
+
590
+ uint32_t efuse_idx = s->regs[R_EFUSE_RD_ADDR];
591
+ uint32_t efuse_ary = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, EFUSE);
592
+ uint32_t efuse_row = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, ROW);
593
+
594
+ switch (efuse_ary) {
595
+ case 0: /* Various */
596
+ if (efuse_row >= ARRAY_SIZE(ary0_col_mask)) {
597
+ goto denied;
598
+ }
599
+
600
+ col_mask = ary0_col_mask[efuse_row];
601
+ if (!col_mask) {
602
+ goto denied;
603
+ }
604
+ break;
605
+ case 2: /* PUF helper data, adjust for skipped array 1 */
606
+ case 3:
607
+ val64 = FIELD_DP32(efuse_idx, EFUSE_RD_ADDR, EFUSE, efuse_ary - 1);
608
+ break;
609
+ default:
610
+ goto denied;
611
+ }
612
+
613
+ s->regs[R_EFUSE_RD_DATA] = xlnx_efuse_get_row(s->efuse, val64) & col_mask;
614
+
615
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 0);
616
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1);
617
+ zynqmp_efuse_update_irq(s);
618
+ return;
619
+
620
+ denied:
621
+ qemu_log_mask(LOG_GUEST_ERROR,
622
+ "%s: Denied efuse read from array %u, row %u\n",
623
+ object_get_canonical_path(OBJECT(s)),
624
+ efuse_ary, efuse_row);
625
+
626
+ s->regs[R_EFUSE_RD_DATA] = 0;
627
+
628
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1);
629
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 0);
630
+ zynqmp_efuse_update_irq(s);
631
+}
632
+
633
+static void zynqmp_efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64)
634
+{
635
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
636
+ bool ok;
637
+
638
+ ok = xlnx_efuse_k256_check(s->efuse, (uint32_t)val64, EFUSE_AES_START);
639
+
640
+ ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_PASS, (ok ? 1 : 0));
641
+ ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_DONE, 1);
642
+
643
+ s->regs[R_EFUSE_AES_CRC] = 0; /* crc value is write-only */
644
+}
645
+
646
+static uint64_t zynqmp_efuse_cache_load_prew(RegisterInfo *reg,
647
+ uint64_t valu64)
648
+{
649
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque);
650
+
651
+ if (valu64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) {
652
+ zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN);
653
+ ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1);
654
+ zynqmp_efuse_update_irq(s);
655
+ }
656
+
657
+ return 0;
658
+}
659
+
660
+static uint64_t zynqmp_efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val)
661
+{
662
+ return val == 0xDF0D ? 0 : 1;
663
+}
664
+
665
+static RegisterAccessInfo zynqmp_efuse_regs_info[] = {
666
+ { .name = "WR_LOCK", .addr = A_WR_LOCK,
667
+ .reset = 0x1,
668
+ .pre_write = zynqmp_efuse_wr_lock_prew,
669
+ },{ .name = "CFG", .addr = A_CFG,
670
+ },{ .name = "STATUS", .addr = A_STATUS,
671
+ .rsvd = 0x8,
672
+ .ro = 0xff,
673
+ },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR,
674
+ .post_write = zynqmp_efuse_pgm_addr_postw
675
+ },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR,
676
+ .rsvd = 0x1f,
677
+ .post_write = zynqmp_efuse_rd_addr_postw,
678
+ },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA,
679
+ .ro = 0xffffffff,
680
+ },{ .name = "TPGM", .addr = A_TPGM,
681
+ },{ .name = "TRD", .addr = A_TRD,
682
+ .reset = 0x1b,
683
+ },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS,
684
+ .reset = 0xff,
685
+ },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS,
686
+ .reset = 0xb,
687
+ },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS,
688
+ .reset = 0x7,
689
+ },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR,
690
+ .rsvd = 0x7fffffe0,
691
+ .w1c = 0x8000001f,
692
+ .post_write = zynqmp_efuse_isr_postw,
693
+ },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR,
694
+ .reset = 0x8000001f,
695
+ .rsvd = 0x7fffffe0,
696
+ .ro = 0xffffffff,
697
+ },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER,
698
+ .rsvd = 0x7fffffe0,
699
+ .pre_write = zynqmp_efuse_ier_prew,
700
+ },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR,
701
+ .rsvd = 0x7fffffe0,
702
+ .pre_write = zynqmp_efuse_idr_prew,
703
+ },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD,
704
+ .pre_write = zynqmp_efuse_cache_load_prew,
705
+ },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK,
706
+ },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC,
707
+ .post_write = zynqmp_efuse_aes_crc_postw,
708
+ },{ .name = "EFUSE_TBITS_PRGRMG_EN", .addr = A_EFUSE_TBITS_PRGRMG_EN,
709
+ .reset = R_EFUSE_TBITS_PRGRMG_EN_TBITS_PRGRMG_EN_MASK,
710
+ },{ .name = "DNA_0", .addr = A_DNA_0,
711
+ .ro = 0xffffffff,
712
+ },{ .name = "DNA_1", .addr = A_DNA_1,
713
+ .ro = 0xffffffff,
714
+ },{ .name = "DNA_2", .addr = A_DNA_2,
715
+ .ro = 0xffffffff,
716
+ },{ .name = "IPDISABLE", .addr = A_IPDISABLE,
717
+ .ro = 0xffffffff,
718
+ },{ .name = "SYSOSC_CTRL", .addr = A_SYSOSC_CTRL,
719
+ .ro = 0xffffffff,
720
+ },{ .name = "USER_0", .addr = A_USER_0,
721
+ .ro = 0xffffffff,
722
+ },{ .name = "USER_1", .addr = A_USER_1,
723
+ .ro = 0xffffffff,
724
+ },{ .name = "USER_2", .addr = A_USER_2,
725
+ .ro = 0xffffffff,
726
+ },{ .name = "USER_3", .addr = A_USER_3,
727
+ .ro = 0xffffffff,
728
+ },{ .name = "USER_4", .addr = A_USER_4,
729
+ .ro = 0xffffffff,
730
+ },{ .name = "USER_5", .addr = A_USER_5,
731
+ .ro = 0xffffffff,
732
+ },{ .name = "USER_6", .addr = A_USER_6,
733
+ .ro = 0xffffffff,
734
+ },{ .name = "USER_7", .addr = A_USER_7,
735
+ .ro = 0xffffffff,
736
+ },{ .name = "MISC_USER_CTRL", .addr = A_MISC_USER_CTRL,
737
+ .ro = 0xffffffff,
738
+ },{ .name = "ROM_RSVD", .addr = A_ROM_RSVD,
739
+ .ro = 0xffffffff,
740
+ },{ .name = "PUF_CHASH", .addr = A_PUF_CHASH,
741
+ .ro = 0xffffffff,
742
+ },{ .name = "PUF_MISC", .addr = A_PUF_MISC,
743
+ .ro = 0xffffffff,
744
+ },{ .name = "SEC_CTRL", .addr = A_SEC_CTRL,
745
+ .ro = 0xffffffff,
746
+ },{ .name = "SPK_ID", .addr = A_SPK_ID,
747
+ .ro = 0xffffffff,
748
+ },{ .name = "PPK0_0", .addr = A_PPK0_0,
749
+ .ro = 0xffffffff,
750
+ },{ .name = "PPK0_1", .addr = A_PPK0_1,
751
+ .ro = 0xffffffff,
752
+ },{ .name = "PPK0_2", .addr = A_PPK0_2,
753
+ .ro = 0xffffffff,
754
+ },{ .name = "PPK0_3", .addr = A_PPK0_3,
755
+ .ro = 0xffffffff,
756
+ },{ .name = "PPK0_4", .addr = A_PPK0_4,
757
+ .ro = 0xffffffff,
758
+ },{ .name = "PPK0_5", .addr = A_PPK0_5,
759
+ .ro = 0xffffffff,
760
+ },{ .name = "PPK0_6", .addr = A_PPK0_6,
761
+ .ro = 0xffffffff,
762
+ },{ .name = "PPK0_7", .addr = A_PPK0_7,
763
+ .ro = 0xffffffff,
764
+ },{ .name = "PPK0_8", .addr = A_PPK0_8,
765
+ .ro = 0xffffffff,
766
+ },{ .name = "PPK0_9", .addr = A_PPK0_9,
767
+ .ro = 0xffffffff,
768
+ },{ .name = "PPK0_10", .addr = A_PPK0_10,
769
+ .ro = 0xffffffff,
770
+ },{ .name = "PPK0_11", .addr = A_PPK0_11,
771
+ .ro = 0xffffffff,
772
+ },{ .name = "PPK1_0", .addr = A_PPK1_0,
773
+ .ro = 0xffffffff,
774
+ },{ .name = "PPK1_1", .addr = A_PPK1_1,
775
+ .ro = 0xffffffff,
776
+ },{ .name = "PPK1_2", .addr = A_PPK1_2,
777
+ .ro = 0xffffffff,
778
+ },{ .name = "PPK1_3", .addr = A_PPK1_3,
779
+ .ro = 0xffffffff,
780
+ },{ .name = "PPK1_4", .addr = A_PPK1_4,
781
+ .ro = 0xffffffff,
782
+ },{ .name = "PPK1_5", .addr = A_PPK1_5,
783
+ .ro = 0xffffffff,
784
+ },{ .name = "PPK1_6", .addr = A_PPK1_6,
785
+ .ro = 0xffffffff,
786
+ },{ .name = "PPK1_7", .addr = A_PPK1_7,
787
+ .ro = 0xffffffff,
788
+ },{ .name = "PPK1_8", .addr = A_PPK1_8,
789
+ .ro = 0xffffffff,
790
+ },{ .name = "PPK1_9", .addr = A_PPK1_9,
791
+ .ro = 0xffffffff,
792
+ },{ .name = "PPK1_10", .addr = A_PPK1_10,
793
+ .ro = 0xffffffff,
794
+ },{ .name = "PPK1_11", .addr = A_PPK1_11,
795
+ .ro = 0xffffffff,
796
+ }
797
+};
798
+
799
+static void zynqmp_efuse_reg_write(void *opaque, hwaddr addr,
800
+ uint64_t data, unsigned size)
801
+{
802
+ RegisterInfoArray *reg_array = opaque;
803
+ XlnxZynqMPEFuse *s;
804
+ Object *dev;
805
+
806
+ assert(reg_array != NULL);
807
+
808
+ dev = reg_array->mem.owner;
809
+ assert(dev);
810
+
811
+ s = XLNX_ZYNQMP_EFUSE(dev);
812
+
813
+ if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) {
814
+ qemu_log_mask(LOG_GUEST_ERROR,
815
+ "%s[reg_0x%02lx]: Attempt to write locked register.\n",
816
+ object_get_canonical_path(OBJECT(s)), (long)addr);
817
+ } else {
818
+ register_write_memory(opaque, addr, data, size);
819
+ }
820
+}
821
+
822
+static const MemoryRegionOps zynqmp_efuse_ops = {
823
+ .read = register_read_memory,
824
+ .write = zynqmp_efuse_reg_write,
825
+ .endianness = DEVICE_LITTLE_ENDIAN,
826
+ .valid = {
827
+ .min_access_size = 4,
828
+ .max_access_size = 4,
829
+ },
830
+};
831
+
832
+static void zynqmp_efuse_register_reset(RegisterInfo *reg)
833
+{
834
+ if (!reg->data || !reg->access) {
835
+ return;
836
+ }
837
+
838
+ /* Reset must not trigger some registers' writers */
839
+ switch (reg->access->addr) {
840
+ case A_EFUSE_AES_CRC:
841
+ *(uint32_t *)reg->data = reg->access->reset;
842
+ return;
843
+ }
844
+
845
+ register_reset(reg);
846
+}
847
+
848
+static void zynqmp_efuse_reset(DeviceState *dev)
849
+{
850
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev);
851
+ unsigned int i;
852
+
853
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
854
+ zynqmp_efuse_register_reset(&s->regs_info[i]);
855
+ }
856
+
857
+ zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN);
858
+ ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1);
859
+ zynqmp_efuse_update_irq(s);
860
+}
861
+
862
+static void zynqmp_efuse_realize(DeviceState *dev, Error **errp)
863
+{
864
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev);
865
+
866
+ if (!s->efuse) {
867
+ error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE",
868
+ object_get_canonical_path(OBJECT(dev)));
869
+ return;
870
+ }
871
+
872
+ s->efuse->dev = dev;
873
+}
874
+
875
+static void zynqmp_efuse_init(Object *obj)
876
+{
877
+ XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj);
878
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
879
+ RegisterInfoArray *reg_array;
880
+
881
+ reg_array =
882
+ register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info,
883
+ ARRAY_SIZE(zynqmp_efuse_regs_info),
884
+ s->regs_info, s->regs,
885
+ &zynqmp_efuse_ops,
886
+ ZYNQMP_EFUSE_ERR_DEBUG,
887
+ R_MAX * 4);
888
+
889
+ sysbus_init_mmio(sbd, &reg_array->mem);
890
+ sysbus_init_irq(sbd, &s->irq);
891
+}
892
+
893
+static const VMStateDescription vmstate_efuse = {
894
+ .name = TYPE_XLNX_ZYNQMP_EFUSE,
895
+ .version_id = 1,
896
+ .minimum_version_id = 1,
897
+ .fields = (VMStateField[]) {
898
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX),
899
+ VMSTATE_END_OF_LIST(),
900
+ }
901
+};
902
+
903
+static Property zynqmp_efuse_props[] = {
904
+ DEFINE_PROP_LINK("efuse",
905
+ XlnxZynqMPEFuse, efuse,
906
+ TYPE_XLNX_EFUSE, XlnxEFuse *),
907
+
908
+ DEFINE_PROP_END_OF_LIST(),
909
+};
910
+
911
+static void zynqmp_efuse_class_init(ObjectClass *klass, void *data)
912
+{
913
+ DeviceClass *dc = DEVICE_CLASS(klass);
914
+
915
+ dc->reset = zynqmp_efuse_reset;
916
+ dc->realize = zynqmp_efuse_realize;
917
+ dc->vmsd = &vmstate_efuse;
918
+ device_class_set_props(dc, zynqmp_efuse_props);
919
+}
920
+
921
+
922
+static const TypeInfo efuse_info = {
923
+ .name = TYPE_XLNX_ZYNQMP_EFUSE,
924
+ .parent = TYPE_SYS_BUS_DEVICE,
925
+ .instance_size = sizeof(XlnxZynqMPEFuse),
926
+ .class_init = zynqmp_efuse_class_init,
927
+ .instance_init = zynqmp_efuse_init,
928
+};
929
+
930
+static void efuse_register_types(void)
931
+{
932
+ type_register_static(&efuse_info);
933
+}
934
+
935
+type_init(efuse_register_types)
936
diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig
937
index XXXXXXX..XXXXXXX 100644
118
index XXXXXXX..XXXXXXX 100644
938
--- a/hw/nvram/Kconfig
119
--- a/target/arm/meson.build
939
+++ b/hw/nvram/Kconfig
120
+++ b/target/arm/meson.build
940
@@ -XXX,XX +XXX,XX @@ config XLNX_EFUSE
121
@@ -XXX,XX +XXX,XX @@
941
config XLNX_EFUSE_VERSAL
122
gen = [
942
bool
123
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
943
select XLNX_EFUSE
124
+ decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
944
+
125
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
945
+config XLNX_EFUSE_ZYNQMP
126
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
946
+ bool
127
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
947
+ select XLNX_EFUSE
128
@@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
948
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
129
'sme_helper.c',
949
index XXXXXXX..XXXXXXX 100644
130
'translate-a64.c',
950
--- a/hw/nvram/meson.build
131
'translate-sve.c',
951
+++ b/hw/nvram/meson.build
132
+ 'translate-sme.c',
952
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c'))
133
))
953
softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files(
134
954
'xlnx-versal-efuse-cache.c',
135
arm_softmmu_ss = ss.source_set()
955
'xlnx-versal-efuse-ctrl.c'))
956
+softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files(
957
+ 'xlnx-zynqmp-efuse.c'))
958
959
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
960
--
136
--
961
2.20.1
137
2.25.1
962
963
diff view generated by jsdifflib
1
Rename the pci_root_bus_new_inplace() function to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
pci_root_bus_init(); this brings the bus type in to line with a
2
3
"_init for in-place init, _new for allocate-and-return" convention.
3
This new behaviour is in the ARM pseudocode function
4
To do this we need to rename the implementation-internal function
4
AArch64.CheckFPAdvSIMDEnabled, which applies to AArch32
5
that was using the pci_root_bus_init() name to
5
via AArch32.CheckAdvSIMDOrFPEnabled when the EL to which
6
pci_root_bus_internal_init().
6
the trap would be delivered is in AArch64 mode.
7
7
8
Given that ARMv9 drops support for AArch32 outside EL0, the trap EL
9
detection ought to be trivially true, but the pseudocode still contains
10
a number of conditions, and QEMU has not yet committed to dropping A32
11
support for EL[12] when v9 features are present.
12
13
Since the computation of SME_TRAP_NONSTREAMING is necessarily different
14
for the two modes, we might as well preserve bits within TBFLAG_ANY and
15
allocate separate bits within TBFLAG_A32 and TBFLAG_A64 instead.
16
17
Note that DDI0616A.a has typos for bits [22:21] of LD1RO in the table
18
of instructions illegal in streaming mode.
19
20
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Message-id: 20220708151540.18136-4-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
11
Message-id: 20210923121153.23754-4-peter.maydell@linaro.org
12
---
24
---
13
include/hw/pci/pci.h | 10 +++++-----
25
target/arm/cpu.h | 7 +++
14
hw/pci-host/raven.c | 4 ++--
26
target/arm/translate.h | 4 ++
15
hw/pci-host/versatile.c | 6 +++---
27
target/arm/sme-fa64.decode | 90 ++++++++++++++++++++++++++++++++++++++
16
hw/pci/pci.c | 26 +++++++++++++-------------
28
target/arm/helper.c | 41 +++++++++++++++++
17
4 files changed, 23 insertions(+), 23 deletions(-)
29
target/arm/translate-a64.c | 40 ++++++++++++++++-
18
30
target/arm/translate-vfp.c | 12 +++++
19
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
31
target/arm/translate.c | 2 +
20
index XXXXXXX..XXXXXXX 100644
32
target/arm/meson.build | 1 +
21
--- a/include/hw/pci/pci.h
33
8 files changed, 195 insertions(+), 2 deletions(-)
22
+++ b/include/hw/pci/pci.h
34
create mode 100644 target/arm/sme-fa64.decode
23
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS)
35
24
36
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
25
bool pci_bus_is_express(PCIBus *bus);
37
index XXXXXXX..XXXXXXX 100644
26
38
--- a/target/arm/cpu.h
27
-void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
39
+++ b/target/arm/cpu.h
28
- const char *name,
40
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
29
- MemoryRegion *address_space_mem,
41
* the same thing as the current security state of the processor!
30
- MemoryRegion *address_space_io,
42
*/
31
- uint8_t devfn_min, const char *typename);
43
FIELD(TBFLAG_A32, NS, 10, 1)
32
+void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
44
+/*
33
+ const char *name,
45
+ * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not.
34
+ MemoryRegion *address_space_mem,
46
+ * This requires an SME trap from AArch32 mode when using NEON.
35
+ MemoryRegion *address_space_io,
47
+ */
36
+ uint8_t devfn_min, const char *typename);
48
+FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
37
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
49
38
MemoryRegion *address_space_mem,
50
/*
39
MemoryRegion *address_space_io,
51
* Bit usage when in AArch32 state, for M-profile only.
40
diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c
52
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
41
index XXXXXXX..XXXXXXX 100644
53
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
42
--- a/hw/pci-host/raven.c
54
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
43
+++ b/hw/pci-host/raven.c
55
FIELD(TBFLAG_A64, SVL, 24, 4)
44
@@ -XXX,XX +XXX,XX @@ static void raven_pcihost_initfn(Object *obj)
56
+/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */
45
memory_region_add_subregion_overlap(address_space_mem, PCI_IO_BASE_ADDR,
57
+FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
46
&s->pci_io_non_contiguous, 1);
58
47
memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory);
59
/*
48
- pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL,
60
* Helpers for using the above.
49
- &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS);
61
diff --git a/target/arm/translate.h b/target/arm/translate.h
50
+ pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL,
62
index XXXXXXX..XXXXXXX 100644
51
+ &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS);
63
--- a/target/arm/translate.h
52
64
+++ b/target/arm/translate.h
53
/* Bus master address space */
65
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
54
memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB);
66
bool pstate_sm;
55
diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c
67
/* True if PSTATE.ZA is set. */
56
index XXXXXXX..XXXXXXX 100644
68
bool pstate_za;
57
--- a/hw/pci-host/versatile.c
69
+ /* True if non-streaming insns should raise an SME Streaming exception. */
58
+++ b/hw/pci-host/versatile.c
70
+ bool sme_trap_nonstreaming;
59
@@ -XXX,XX +XXX,XX @@ static void pci_vpb_realize(DeviceState *dev, Error **errp)
71
+ /* True if the current instruction is non-streaming. */
60
memory_region_init(&s->pci_io_space, OBJECT(s), "pci_io", 4 * GiB);
72
+ bool is_nonstreaming;
61
memory_region_init(&s->pci_mem_space, OBJECT(s), "pci_mem", 4 * GiB);
73
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
62
74
bool mve_no_pred;
63
- pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), dev, "pci",
75
/*
64
- &s->pci_mem_space, &s->pci_io_space,
76
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
65
- PCI_DEVFN(11, 0), TYPE_PCI_BUS);
77
new file mode 100644
66
+ pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), dev, "pci",
78
index XXXXXXX..XXXXXXX
67
+ &s->pci_mem_space, &s->pci_io_space,
79
--- /dev/null
68
+ PCI_DEVFN(11, 0), TYPE_PCI_BUS);
80
+++ b/target/arm/sme-fa64.decode
69
h->bus = &s->pci_bus;
81
@@ -XXX,XX +XXX,XX @@
70
82
+# AArch64 SME allowed instruction decoding
71
object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_VERSATILE_PCI_HOST);
83
+#
72
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
84
+# Copyright (c) 2022 Linaro, Ltd
73
index XXXXXXX..XXXXXXX 100644
85
+#
74
--- a/hw/pci/pci.c
86
+# This library is free software; you can redistribute it and/or
75
+++ b/hw/pci/pci.c
87
+# modify it under the terms of the GNU Lesser General Public
76
@@ -XXX,XX +XXX,XX @@ bool pci_bus_bypass_iommu(PCIBus *bus)
88
+# License as published by the Free Software Foundation; either
77
return host_bridge->bypass_iommu;
89
+# version 2.1 of the License, or (at your option) any later version.
90
+#
91
+# This library is distributed in the hope that it will be useful,
92
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
93
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
94
+# Lesser General Public License for more details.
95
+#
96
+# You should have received a copy of the GNU Lesser General Public
97
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
98
+
99
+#
100
+# This file is processed by scripts/decodetree.py
101
+#
102
+
103
+# These patterns are taken from Appendix E1.1 of DDI0616 A.a,
104
+# Arm Architecture Reference Manual Supplement,
105
+# The Scalable Matrix Extension (SME), for Armv9-A
106
+
107
+{
108
+ [
109
+ OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0]
110
+ OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0]
111
+ OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0]
112
+ OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0]
113
+ OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0]
114
+ OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0]
115
+ OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0]
116
+ ]
117
+ FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations
118
+}
119
+
120
+{
121
+ [
122
+ OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar)
123
+ OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16)
124
+ OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar)
125
+ OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16)
126
+ ]
127
+ FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations
128
+}
129
+
130
+FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store
131
+FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions
132
+FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
133
+
134
+# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions
135
+# We don't actually need to include these, as the default is OK.
136
+# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations
137
+# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers
138
+# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal)
139
+# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
140
+# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
141
+# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
142
+
143
+FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
144
+FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
145
+FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
146
+FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
147
+FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
148
+FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
149
+FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
150
+FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
151
+FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
152
+FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
153
+FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
154
+FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
155
+FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
156
+FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
157
+FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
158
+FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
159
+FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
160
+FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
161
+FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
162
+FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
163
+FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
164
+FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
165
+FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
166
+FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
167
+FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
168
+FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
169
+FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
170
+FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
171
+FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
172
diff --git a/target/arm/helper.c b/target/arm/helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/helper.c
175
+++ b/target/arm/helper.c
176
@@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el)
177
return 0;
78
}
178
}
79
179
80
-static void pci_root_bus_init(PCIBus *bus, DeviceState *parent,
180
+/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
81
- MemoryRegion *address_space_mem,
181
+static bool sme_fa64(CPUARMState *env, int el)
82
- MemoryRegion *address_space_io,
182
+{
83
- uint8_t devfn_min)
183
+ if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
84
+static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
184
+ return false;
85
+ MemoryRegion *address_space_mem,
185
+ }
86
+ MemoryRegion *address_space_io,
186
+
87
+ uint8_t devfn_min)
187
+ if (el <= 1 && !el_is_in_host(env, el)) {
188
+ if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
189
+ return false;
190
+ }
191
+ }
192
+ if (el <= 2 && arm_is_el2_enabled(env)) {
193
+ if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
194
+ return false;
195
+ }
196
+ }
197
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
198
+ if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
199
+ return false;
200
+ }
201
+ }
202
+
203
+ return true;
204
+}
205
+
206
/*
207
* Given that SVE is enabled, return the vector length for EL.
208
*/
209
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
210
DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
211
}
212
213
+ /*
214
+ * The SME exception we are testing for is raised via
215
+ * AArch64.CheckFPAdvSIMDEnabled(), as called from
216
+ * AArch32.CheckAdvSIMDOrFPEnabled().
217
+ */
218
+ if (el == 0
219
+ && FIELD_EX64(env->svcr, SVCR, SM)
220
+ && (!arm_is_el2_enabled(env)
221
+ || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
222
+ && arm_el_is_aa64(env, 1)
223
+ && !sme_fa64(env, el)) {
224
+ DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
225
+ }
226
+
227
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
228
}
229
230
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
231
}
232
if (FIELD_EX64(env->svcr, SVCR, SM)) {
233
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
234
+ DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
235
}
236
DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
237
}
238
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/arm/translate-a64.c
241
+++ b/target/arm/translate-a64.c
242
@@ -XXX,XX +XXX,XX @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
243
* unallocated-encoding checks (otherwise the syndrome information
244
* for the resulting exception will be incorrect).
245
*/
246
-static bool fp_access_check(DisasContext *s)
247
+static bool fp_access_check_only(DisasContext *s)
88
{
248
{
89
assert(PCI_FUNC(devfn_min) == 0);
249
if (s->fp_excp_el) {
90
bus->devfn_min = devfn_min;
250
assert(!s->fp_access_checked);
91
@@ -XXX,XX +XXX,XX @@ bool pci_bus_is_express(PCIBus *bus)
251
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
92
return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
252
return true;
93
}
253
}
94
254
95
-void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
255
+static bool fp_access_check(DisasContext *s)
96
- const char *name,
256
+{
97
- MemoryRegion *address_space_mem,
257
+ if (!fp_access_check_only(s)) {
98
- MemoryRegion *address_space_io,
258
+ return false;
99
- uint8_t devfn_min, const char *typename)
259
+ }
100
+void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
260
+ if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
101
+ const char *name,
261
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
102
+ MemoryRegion *address_space_mem,
262
+ syn_smetrap(SME_ET_Streaming, false));
103
+ MemoryRegion *address_space_io,
263
+ return false;
104
+ uint8_t devfn_min, const char *typename)
264
+ }
105
{
265
+ return true;
106
qbus_create_inplace(bus, bus_size, typename, parent, name);
266
+}
107
- pci_root_bus_init(bus, parent, address_space_mem, address_space_io,
267
+
108
- devfn_min);
268
/* Check that SVE access is enabled. If it is, return true.
109
+ pci_root_bus_internal_init(bus, parent, address_space_mem,
269
* If not, emit code to generate an appropriate exception and return false.
110
+ address_space_io, devfn_min);
270
*/
271
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
272
default:
273
g_assert_not_reached();
274
}
275
- if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
276
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
277
return;
278
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
279
return;
280
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
281
}
111
}
282
}
112
283
113
PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
284
+/*
114
@@ -XXX,XX +XXX,XX @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
285
+ * Include the generated SME FA64 decoder.
115
PCIBus *bus;
286
+ */
116
287
+
117
bus = PCI_BUS(qbus_create(typename, parent, name));
288
+#include "decode-sme-fa64.c.inc"
118
- pci_root_bus_init(bus, parent, address_space_mem, address_space_io,
289
+
119
- devfn_min);
290
+static bool trans_OK(DisasContext *s, arg_OK *a)
120
+ pci_root_bus_internal_init(bus, parent, address_space_mem,
291
+{
121
+ address_space_io, devfn_min);
292
+ return true;
122
return bus;
293
+}
123
}
294
+
124
295
+static bool trans_FAIL(DisasContext *s, arg_OK *a)
296
+{
297
+ s->is_nonstreaming = true;
298
+ return true;
299
+}
300
+
301
/**
302
* is_guarded_page:
303
* @env: The cpu environment
304
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
305
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
306
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
307
dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
308
+ dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
309
dc->vec_len = 0;
310
dc->vec_stride = 0;
311
dc->cp_regs = arm_cpu->cp_regs;
312
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
313
}
314
}
315
316
+ s->is_nonstreaming = false;
317
+ if (s->sme_trap_nonstreaming) {
318
+ disas_sme_fa64(s, insn);
319
+ }
320
+
321
switch (extract32(insn, 25, 4)) {
322
case 0x0:
323
if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) {
324
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
325
index XXXXXXX..XXXXXXX 100644
326
--- a/target/arm/translate-vfp.c
327
+++ b/target/arm/translate-vfp.c
328
@@ -XXX,XX +XXX,XX @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
329
return false;
330
}
331
332
+ /*
333
+ * Note that rebuild_hflags_a32 has already accounted for being in EL0
334
+ * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not
335
+ * appear to be any insns which touch VFP which are allowed.
336
+ */
337
+ if (s->sme_trap_nonstreaming) {
338
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
339
+ syn_smetrap(SME_ET_Streaming,
340
+ s->base.pc_next - s->pc_curr == 2));
341
+ return false;
342
+ }
343
+
344
if (!s->vfp_enabled && !ignore_vfp_enabled) {
345
assert(!arm_dc_feature(s, ARM_FEATURE_M));
346
unallocated_encoding(s);
347
diff --git a/target/arm/translate.c b/target/arm/translate.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/arm/translate.c
350
+++ b/target/arm/translate.c
351
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
352
dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
353
dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
354
}
355
+ dc->sme_trap_nonstreaming =
356
+ EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
357
}
358
dc->cp_regs = cpu->cp_regs;
359
dc->features = env->features;
360
diff --git a/target/arm/meson.build b/target/arm/meson.build
361
index XXXXXXX..XXXXXXX 100644
362
--- a/target/arm/meson.build
363
+++ b/target/arm/meson.build
364
@@ -XXX,XX +XXX,XX @@
365
gen = [
366
decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
367
decodetree.process('sme.decode', extra_args: '--decode=disas_sme'),
368
+ decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'),
369
decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
370
decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
371
decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
125
--
372
--
126
2.20.1
373
2.25.1
127
128
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark ADR as a non-streaming instruction, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Removing entries from sme-fa64.decode is an easy way to see
7
what remains to be done.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220708151540.18136-5-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/translate.h | 7 +++++++
15
target/arm/sme-fa64.decode | 1 -
16
target/arm/translate-sve.c | 8 ++++----
17
3 files changed, 11 insertions(+), 5 deletions(-)
18
19
diff --git a/target/arm/translate.h b/target/arm/translate.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate.h
22
+++ b/target/arm/translate.h
23
@@ -XXX,XX +XXX,XX @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
24
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
25
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
26
27
+#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
28
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
29
+ { \
30
+ s->is_nonstreaming = true; \
31
+ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
32
+ }
33
+
34
#endif /* TARGET_ARM_TRANSLATE_H */
35
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/sme-fa64.decode
38
+++ b/target/arm/sme-fa64.decode
39
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
40
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
41
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
42
43
-FAIL 0000 0100 --1- ---- 1010 ---- ---- ---- # ADR
44
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
45
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
46
FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
47
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-sve.c
50
+++ b/target/arm/translate-sve.c
51
@@ -XXX,XX +XXX,XX @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
52
return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
53
}
54
55
-TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
56
-TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
57
-TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
58
-TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
59
+TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
60
+TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
61
+TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
62
+TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
63
64
/*
65
*** SVE Integer Misc - Unpredicated Group
66
--
67
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 9 ++++++---
13
2 files changed, 6 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
21
FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
22
FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
23
-FAIL 0010 0101 --01 100- 1111 000- ---0 ---- # RDFFR, RDFFRS
24
-FAIL 0010 0101 --10 1--- 1001 ---- ---- ---- # WRFFR, SETFFR
25
FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
33
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
34
35
/* Note pat == 31 is #all, to set all elements. */
36
-TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
37
+TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
38
+ do_predset, 0, FFR_PRED_NUM, 31, false)
39
40
/* Note pat == 32 is #unimp, to set no elements. */
41
TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
42
@@ -XXX,XX +XXX,XX @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
43
.rd = a->rd, .pg = a->pg, .s = a->s,
44
.rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
45
};
46
+
47
+ s->is_nonstreaming = true;
48
return trans_AND_pppp(s, &alt_a);
49
}
50
51
-TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
52
-TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
53
+TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
54
+TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
55
56
static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
57
void (*gen_fn)(TCGv_i32, TCGv_ptr,
58
--
59
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 22 ++++++++++++----------
13
2 files changed, 12 insertions(+), 13 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0000 0100 --1- ---- 1011 -0-- ---- ---- # FTSSEL, FEXPA
24
-FAIL 0000 0101 --10 0001 100- ---- ---- ---- # COMPACT
25
-FAIL 0100 0101 --0- ---- 1011 ---- ---- ---- # BDEP, BEXT, BGRP
26
FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
27
FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
28
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_2 * const fexpa_fns[4] = {
34
NULL, gen_helper_sve_fexpa_h,
35
gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
36
};
37
-TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
38
- fexpa_fns[a->esz], a->rd, a->rn, 0)
39
+TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
40
+ fexpa_fns[a->esz], a->rd, a->rn, 0)
41
42
static gen_helper_gvec_3 * const ftssel_fns[4] = {
43
NULL, gen_helper_sve_ftssel_h,
44
gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
45
};
46
-TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
47
+TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
48
+ ftssel_fns[a->esz], a, 0)
49
50
/*
51
*** SVE Predicate Logical Operations Group
52
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
53
static gen_helper_gvec_3 * const compact_fns[4] = {
54
NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
55
};
56
-TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
57
+TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
58
+ compact_fns[a->esz], a, 0)
59
60
/* Call the helper that computes the ARM LastActiveElement pseudocode
61
* function, scaled by the element size. This includes the not found
62
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3 * const bext_fns[4] = {
63
gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
64
gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
65
};
66
-TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
67
- bext_fns[a->esz], a, 0)
68
+TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
69
+ bext_fns[a->esz], a, 0)
70
71
static gen_helper_gvec_3 * const bdep_fns[4] = {
72
gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
73
gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
74
};
75
-TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
76
- bdep_fns[a->esz], a, 0)
77
+TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
78
+ bdep_fns[a->esz], a, 0)
79
80
static gen_helper_gvec_3 * const bgrp_fns[4] = {
81
gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
82
gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
83
};
84
-TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
85
- bgrp_fns[a->esz], a, 0)
86
+TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
87
+ bgrp_fns[a->esz], a, 0)
88
89
static gen_helper_gvec_3 * const cadd_fns[4] = {
90
gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
91
--
92
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 2 --
12
target/arm/translate-sve.c | 24 +++++++++++++++---------
13
2 files changed, 15 insertions(+), 11 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 000- ---- 0110 1--- ---- ---- # PMULLB, PMULLT (128b result)
24
-FAIL 0110 0100 --1- ---- 1110 01-- ---- ---- # FMMLA, BFMMLA
25
FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
26
FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
27
FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
28
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/arm/translate-sve.c
31
+++ b/target/arm/translate-sve.c
32
@@ -XXX,XX +XXX,XX @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
33
gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
34
NULL, gen_helper_sve2_pmull_d,
35
};
36
- if (a->esz == 0
37
- ? !dc_isar_feature(aa64_sve2_pmull128, s)
38
- : !dc_isar_feature(aa64_sve, s)) {
39
+
40
+ if (a->esz == 0) {
41
+ if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
42
+ return false;
43
+ }
44
+ s->is_nonstreaming = true;
45
+ } else if (!dc_isar_feature(aa64_sve, s)) {
46
return false;
47
}
48
return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
49
@@ -XXX,XX +XXX,XX @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
50
* SVE Integer Multiply-Add (unpredicated)
51
*/
52
53
-TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
54
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
55
-TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
56
- a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
57
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
58
+ gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
59
+ 0, FPST_FPCR)
60
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
61
+ gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
62
+ 0, FPST_FPCR)
63
64
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
65
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
66
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
67
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
68
gen_helper_gvec_bfdot_idx, a)
69
70
-TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
71
- gen_helper_gvec_bfmmla, a, 0)
72
+TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
73
+ gen_helper_gvec_bfmmla, a, 0)
74
75
static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
76
{
77
--
78
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 15 +++++++++++----
13
2 files changed, 11 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0110 0101 --0- ---- 0000 11-- ---- ---- # FTSMUL
24
-FAIL 0110 0101 --01 0--- 100- ---- ---- ---- # FTMAD
25
-FAIL 0110 0101 --01 1--- 001- ---- ---- ---- # FADDA
26
FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
27
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
28
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
29
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/translate-sve.c
32
+++ b/target/arm/translate-sve.c
33
@@ -XXX,XX +XXX,XX @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
34
NULL, gen_helper_sve_ftmad_h,
35
gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
36
};
37
-TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
38
- ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
39
- a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
40
+TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
41
+ ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
42
+ a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
43
44
/*
45
*** SVE Floating Point Accumulating Reduction Group
46
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
47
if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
48
return false;
49
}
50
+ s->is_nonstreaming = true;
51
if (!sve_access_check(s)) {
52
return true;
53
}
54
@@ -XXX,XX +XXX,XX @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
55
DO_FP3(FADD_zzz, fadd)
56
DO_FP3(FSUB_zzz, fsub)
57
DO_FP3(FMUL_zzz, fmul)
58
-DO_FP3(FTSMUL, ftsmul)
59
DO_FP3(FRECPS, recps)
60
DO_FP3(FRSQRTS, rsqrts)
61
62
#undef DO_FP3
63
64
+static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
65
+ NULL, gen_helper_gvec_ftsmul_h,
66
+ gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
67
+};
68
+TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
69
+ ftsmul_fns[a->esz], a, 0)
70
+
71
/*
72
*** SVE Floating Point Arithmetic - Predicated Group
73
*/
74
--
75
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-10-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 12 ++++++------
13
2 files changed, 6 insertions(+), 7 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --0- ---- 1001 10-- ---- ---- # SMMLA, UMMLA, USMMLA
24
FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
25
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
26
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
32
TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
33
TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
34
35
-TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
36
- gen_helper_gvec_smmla_b, a, 0)
37
-TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
38
- gen_helper_gvec_usmmla_b, a, 0)
39
-TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
40
- gen_helper_gvec_ummla_b, a, 0)
41
+TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
42
+ gen_helper_gvec_smmla_b, a, 0)
43
+TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
44
+ gen_helper_gvec_usmmla_b, a, 0)
45
+TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
46
+ gen_helper_gvec_ummla_b, a, 0)
47
48
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
49
gen_helper_gvec_bfdot, a, 0)
50
--
51
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 1 -
12
target/arm/translate-sve.c | 35 ++++++++++++++++++-----------------
13
2 files changed, 18 insertions(+), 18 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
23
-FAIL 0100 0101 --1- ---- 1--- ---- ---- ---- # SVE2 string/histo/crypto instructions
24
FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
25
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
32
static gen_helper_gvec_flags_4 * const match_fns[4] = {
33
gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
34
};
35
-TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
36
+TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
37
38
static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
39
gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
40
};
41
-TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
42
+TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
43
44
static gen_helper_gvec_4 * const histcnt_fns[4] = {
45
NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
46
};
47
-TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
48
- histcnt_fns[a->esz], a, 0)
49
+TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
50
+ histcnt_fns[a->esz], a, 0)
51
52
-TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
53
- a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
54
+TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
55
+ a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
56
57
DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
58
DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
59
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
60
TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
61
a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
62
63
-TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
64
- gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
65
+TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
66
+ gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
67
68
-TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
69
- gen_helper_crypto_aese, a, false)
70
-TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
71
- gen_helper_crypto_aese, a, true)
72
+TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
73
+ gen_helper_crypto_aese, a, false)
74
+TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
75
+ gen_helper_crypto_aese, a, true)
76
77
-TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
78
- gen_helper_crypto_sm4e, a, 0)
79
-TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
80
- gen_helper_crypto_sm4ekey, a, 0)
81
+TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
82
+ gen_helper_crypto_sm4e, a, 0)
83
+TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
84
+ gen_helper_crypto_sm4ekey, a, 0)
85
86
-TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
87
+TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
88
+ gen_gvec_rax1, a)
89
90
TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
91
gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
92
--
93
2.25.1
diff view generated by jsdifflib
1
From: Xuzhou Cheng <xuzhou.cheng@windriver.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The Linux spi-imx driver does not work on QEMU. The reason is that the
3
Mark these as a non-streaming instructions, which should trap
4
state of m25p80 loops in STATE_READING_DATA state after receiving
4
if full a64 support is not enabled in streaming mode.
5
RDSR command, the new command is ignored. Before sending a new command,
6
CS line should be pulled high to make the state of m25p80 back to IDLE.
7
5
8
Currently the SPI flash CS line is connected to the SPI controller, but
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
on the real board, it's connected to GPIO3_19. This matches the ecspi1
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
device node in the board dts.
8
Message-id: 20220708151540.18136-12-richard.henderson@linaro.org
11
12
ecspi1 node in imx6qdl-sabrelite.dtsi:
13
&ecspi1 {
14
cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
15
pinctrl-names = "default";
16
pinctrl-0 = <&pinctrl_ecspi1>;
17
status = "okay";
18
19
flash: m25p80@0 {
20
compatible = "sst,sst25vf016b", "jedec,spi-nor";
21
spi-max-frequency = <20000000>;
22
reg = <0>;
23
};
24
};
25
26
Should connect the SSI_GPIO_CS to GPIO3_19 when adding a spi-nor to
27
spi1 on sabrelite machine.
28
29
Verified this patch on Linux v5.14.
30
31
Logs:
32
# echo "01234567899876543210" > test
33
# mtd_debug erase /dev/mtd0 0x0 0x1000
34
Erased 4096 bytes from address 0x00000000 in flash
35
# mtd_debug write /dev/mtdblock0 0x0 20 test
36
Copied 20 bytes from test to address 0x00000000 in flash
37
# mtd_debug read /dev/mtdblock0 0x0 20 test_out
38
Copied 20 bytes from address 0x00000000 in flash to test_out
39
# cat test_out
40
01234567899876543210#
41
42
Signed-off-by: Xuzhou Cheng <xuzhou.cheng@windriver.com>
43
Reported-by: Guenter Roeck <linux@roeck-us.net>
44
Reviewed-by: Bin Meng <bin.meng@windriver.com>
45
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
46
Message-id: 20210927142825.491-1-xchengl.cn@gmail.com
47
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
48
---
10
---
49
hw/arm/sabrelite.c | 2 +-
11
target/arm/sme-fa64.decode | 9 ---------
50
1 file changed, 1 insertion(+), 1 deletion(-)
12
target/arm/translate-sve.c | 6 ++++++
13
2 files changed, 6 insertions(+), 9 deletions(-)
51
14
52
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
53
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/arm/sabrelite.c
17
--- a/target/arm/sme-fa64.decode
55
+++ b/hw/arm/sabrelite.c
18
+++ b/target/arm/sme-fa64.decode
56
@@ -XXX,XX +XXX,XX @@ static void sabrelite_init(MachineState *machine)
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
57
qdev_realize_and_unref(flash_dev, BUS(spi_bus), &error_fatal);
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
58
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
59
cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
22
60
- sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line);
23
-FAIL 1000 010- -00- ---- 10-- ---- ---- ---- # SVE2 32-bit gather NT load (vector+scalar)
61
+ qdev_connect_gpio_out(DEVICE(&s->gpio[2]), 19, cs_line);
24
FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
62
}
25
FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
63
}
26
-FAIL 1000 010- -01- ---- 1--- ---- ---- ---- # SVE 32-bit gather load (vector+imm)
27
-FAIL 1000 0100 0-0- ---- 0--- ---- ---- ---- # SVE 32-bit gather load byte (scalar+vector)
28
-FAIL 1000 0100 1--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load half (scalar+vector)
29
-FAIL 1000 0101 0--- ---- 0--- ---- ---- ---- # SVE 32-bit gather load word (scalar+vector)
30
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
31
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
32
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
33
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
34
FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
35
-FAIL 1110 010- -00- ---- 001- ---- ---- ---- # SVE2 64-bit scatter NT store (vector+scalar)
36
-FAIL 1110 010- -10- ---- 001- ---- ---- ---- # SVE2 32-bit scatter NT store (vector+scalar)
37
-FAIL 1110 010- ---- ---- 1-0- ---- ---- ---- # SVE scatter store (scalar+32-bit vector)
38
-FAIL 1110 010- ---- ---- 101- ---- ---- ---- # SVE scatter store (misc)
39
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/translate-sve.c
42
+++ b/target/arm/translate-sve.c
43
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
44
if (!dc_isar_feature(aa64_sve, s)) {
45
return false;
46
}
47
+ s->is_nonstreaming = true;
48
if (!sve_access_check(s)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
52
if (!dc_isar_feature(aa64_sve, s)) {
53
return false;
54
}
55
+ s->is_nonstreaming = true;
56
if (!sve_access_check(s)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
60
if (!dc_isar_feature(aa64_sve2, s)) {
61
return false;
62
}
63
+ s->is_nonstreaming = true;
64
if (!sve_access_check(s)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
68
if (!dc_isar_feature(aa64_sve, s)) {
69
return false;
70
}
71
+ s->is_nonstreaming = true;
72
if (!sve_access_check(s)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
76
if (!dc_isar_feature(aa64_sve, s)) {
77
return false;
78
}
79
+ s->is_nonstreaming = true;
80
if (!sve_access_check(s)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
84
if (!dc_isar_feature(aa64_sve2, s)) {
85
return false;
86
}
87
+ s->is_nonstreaming = true;
88
if (!sve_access_check(s)) {
89
return true;
64
}
90
}
65
--
91
--
66
2.20.1
92
2.25.1
67
68
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap if full
4
a64 support is not enabled in streaming mode. In this case, introduce
5
PRF_ns (prefetch non-streaming) to handle the checks.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/sme-fa64.decode | 3 ---
13
target/arm/sve.decode | 10 +++++-----
14
target/arm/translate-sve.c | 11 +++++++++++
15
3 files changed, 16 insertions(+), 8 deletions(-)
16
17
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/sme-fa64.decode
20
+++ b/target/arm/sme-fa64.decode
21
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
22
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
23
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
24
25
-FAIL 1000 010- -00- ---- 111- ---- ---- ---- # SVE 32-bit gather prefetch (vector+imm)
26
-FAIL 1000 0100 0-1- ---- 0--- ---- ---- ---- # SVE 32-bit gather prefetch (scalar+vector)
27
FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
28
FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
29
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
30
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
31
-FAIL 1100 010- ---- ---- ---- ---- ---- ---- # SVE 64-bit gather load/prefetch
32
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/sve.decode
35
+++ b/target/arm/sve.decode
36
@@ -XXX,XX +XXX,XX @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
37
@rpri_load_msz nreg=0
38
39
# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
40
-PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
41
+PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ----
42
43
# SVE 32-bit gather prefetch (vector plus immediate)
44
-PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
45
+PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ----
46
47
# SVE contiguous prefetch (scalar plus immediate)
48
PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
49
@@ -XXX,XX +XXX,XX @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
50
@rpri_g_load esz=3
51
52
# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
53
-PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
54
+PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ----
55
56
# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
57
-PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
58
+PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ----
59
60
# SVE 64-bit gather prefetch (vector plus immediate)
61
-PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
62
+PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ----
63
64
### SVE Memory Store Group
65
66
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/arm/translate-sve.c
69
+++ b/target/arm/translate-sve.c
70
@@ -XXX,XX +XXX,XX @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
71
return true;
72
}
73
74
+static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
75
+{
76
+ if (!dc_isar_feature(aa64_sve, s)) {
77
+ return false;
78
+ }
79
+ /* Prefetch is a nop within QEMU. */
80
+ s->is_nonstreaming = true;
81
+ (void)sve_access_check(s);
82
+ return true;
83
+}
84
+
85
/*
86
* Move Prefix
87
*
88
--
89
2.25.1
diff view generated by jsdifflib
1
The aarch64-linux QEMU usermode binaries can never run 32-bit
1
From: Richard Henderson <richard.henderson@linaro.org>
2
code, so they do not need to include the GDB XML for it.
3
(arm_cpu_register_gdb_regs_for_features() will not use these
4
XML files if the CPU has ARM_FEATURE_AARCH64, so we will not
5
advertise to gdb that we have them.)
6
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-14-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210921162901.17508-2-peter.maydell@linaro.org
10
---
10
---
11
configs/targets/aarch64-linux-user.mak | 2 +-
11
target/arm/sme-fa64.decode | 2 --
12
configs/targets/aarch64_be-linux-user.mak | 2 +-
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 2 deletions(-)
13
2 files changed, 2 insertions(+), 2 deletions(-)
14
14
15
diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/configs/targets/aarch64-linux-user.mak
17
--- a/target/arm/sme-fa64.decode
18
+++ b/configs/targets/aarch64-linux-user.mak
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
TARGET_ARCH=aarch64
20
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
21
TARGET_BASE_ARCH=arm
21
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
22
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
22
23
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
23
-FAIL 1010 010- ---- ---- 011- ---- ---- ---- # SVE contiguous FF load (scalar+scalar)
24
TARGET_HAS_BFLT=y
24
-FAIL 1010 010- ---1 ---- 101- ---- ---- ---- # SVE contiguous NF load (scalar+imm)
25
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
25
FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
26
diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak
26
FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
28
--- a/configs/targets/aarch64_be-linux-user.mak
29
--- a/target/arm/translate-sve.c
29
+++ b/configs/targets/aarch64_be-linux-user.mak
30
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
31
TARGET_ARCH=aarch64
32
if (!dc_isar_feature(aa64_sve, s)) {
32
TARGET_BASE_ARCH=arm
33
return false;
33
TARGET_WORDS_BIGENDIAN=y
34
}
34
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
35
+ s->is_nonstreaming = true;
35
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml
36
if (sve_access_check(s)) {
36
TARGET_HAS_BFLT=y
37
TCGv_i64 addr = new_tmp_a64(s);
37
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
38
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
39
@@ -XXX,XX +XXX,XX @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
40
if (!dc_isar_feature(aa64_sve, s)) {
41
return false;
42
}
43
+ s->is_nonstreaming = true;
44
if (sve_access_check(s)) {
45
int vsz = vec_full_reg_size(s);
46
int elements = vsz >> dtype_esz[a->dtype];
38
--
47
--
39
2.20.1
48
2.25.1
40
41
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Mark these as a non-streaming instructions, which should trap
4
if full a64 support is not enabled in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme-fa64.decode | 3 ---
12
target/arm/translate-sve.c | 2 ++
13
2 files changed, 2 insertions(+), 3 deletions(-)
14
15
diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme-fa64.decode
18
+++ b/target/arm/sme-fa64.decode
19
@@ -XXX,XX +XXX,XX @@ FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS
20
# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm)
21
# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset)
22
# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm)
23
-
24
-FAIL 1010 010- -01- ---- 000- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+scalar)
25
-FAIL 1010 010- -010 ---- 001- ---- ---- ---- # SVE load & replicate 32 bytes (scalar+imm)
26
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/arm/translate-sve.c
29
+++ b/target/arm/translate-sve.c
30
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
31
if (a->rm == 31) {
32
return false;
33
}
34
+ s->is_nonstreaming = true;
35
if (sve_access_check(s)) {
36
TCGv_i64 addr = new_tmp_a64(s);
37
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
38
@@ -XXX,XX +XXX,XX @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
39
if (!dc_isar_feature(aa64_sve_f64mm, s)) {
40
return false;
41
}
42
+ s->is_nonstreaming = true;
43
if (sve_access_check(s)) {
44
TCGv_i64 addr = new_tmp_a64(s);
45
tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
46
--
47
2.25.1
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This introduces the QOM for Xilinx eFuse, an one-time
3
These functions will be used to verify that the cpu
4
field-programmable storage bit array.
4
is in the correct state for a given instruction.
5
5
6
The actual mmio interface to the array varies by device
7
families and will be provided in different change-sets.
8
9
Co-authored-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
10
Co-authored-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
11
12
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
13
Signed-off-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
14
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
15
Message-id: 20210917052400.1249094-2-tong.ho@xilinx.com
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-16-richard.henderson@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
10
---
19
include/hw/nvram/xlnx-efuse.h | 132 ++++++++++++++++
11
target/arm/translate-a64.h | 21 +++++++++++++++++++++
20
hw/nvram/xlnx-efuse-crc.c | 119 +++++++++++++++
12
target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++
21
hw/nvram/xlnx-efuse.c | 280 ++++++++++++++++++++++++++++++++++
13
2 files changed, 55 insertions(+)
22
hw/nvram/Kconfig | 7 +
23
hw/nvram/meson.build | 2 +
24
5 files changed, 540 insertions(+)
25
create mode 100644 include/hw/nvram/xlnx-efuse.h
26
create mode 100644 hw/nvram/xlnx-efuse-crc.c
27
create mode 100644 hw/nvram/xlnx-efuse.c
28
14
29
diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
30
new file mode 100644
16
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX
17
--- a/target/arm/translate-a64.h
32
--- /dev/null
18
+++ b/target/arm/translate-a64.h
33
+++ b/include/hw/nvram/xlnx-efuse.h
19
@@ -XXX,XX +XXX,XX @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
34
@@ -XXX,XX +XXX,XX @@
20
bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
35
+/*
21
unsigned int imms, unsigned int immr);
36
+ * QEMU model of the Xilinx eFuse core
22
bool sve_access_check(DisasContext *s);
37
+ *
23
+bool sme_enabled_check(DisasContext *s);
38
+ * Copyright (c) 2015 Xilinx Inc.
24
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned);
39
+ *
40
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
41
+ *
42
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
43
+ * of this software and associated documentation files (the "Software"), to deal
44
+ * in the Software without restriction, including without limitation the rights
45
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
46
+ * copies of the Software, and to permit persons to whom the Software is
47
+ * furnished to do so, subject to the following conditions:
48
+ *
49
+ * The above copyright notice and this permission notice shall be included in
50
+ * all copies or substantial portions of the Software.
51
+ *
52
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
53
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
54
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
55
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
56
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
57
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
58
+ * THE SOFTWARE.
59
+ */
60
+
25
+
61
+#ifndef XLNX_EFUSE_H
26
+/* This function corresponds to CheckStreamingSVEEnabled. */
62
+#define XLNX_EFUSE_H
27
+static inline bool sme_sm_enabled_check(DisasContext *s)
63
+
64
+#include "sysemu/block-backend.h"
65
+#include "hw/qdev-core.h"
66
+
67
+#define TYPE_XLNX_EFUSE "xlnx,efuse"
68
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE);
69
+
70
+struct XlnxEFuse {
71
+ DeviceState parent_obj;
72
+ BlockBackend *blk;
73
+ bool blk_ro;
74
+ uint32_t *fuse32;
75
+
76
+ DeviceState *dev;
77
+
78
+ bool init_tbits;
79
+
80
+ uint8_t efuse_nr;
81
+ uint32_t efuse_size;
82
+
83
+ uint32_t *ro_bits;
84
+ uint32_t ro_bits_cnt;
85
+};
86
+
87
+/**
88
+ * xlnx_efuse_calc_crc:
89
+ * @data: an array of 32-bit words for which the CRC should be computed
90
+ * @u32_cnt: the array size in number of 32-bit words
91
+ * @zpads: the number of 32-bit zeros prepended to @data before computation
92
+ *
93
+ * This function is used to compute the CRC for an array of 32-bit words,
94
+ * using a Xilinx-specific data padding.
95
+ *
96
+ * Returns: the computed 32-bit CRC
97
+ */
98
+uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt,
99
+ unsigned zpads);
100
+
101
+/**
102
+ * xlnx_efuse_get_bit:
103
+ * @s: the efuse object
104
+ * @bit: the efuse bit-address to read the data
105
+ *
106
+ * Returns: the bit, 0 or 1, at @bit of object @s
107
+ */
108
+bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit);
109
+
110
+/**
111
+ * xlnx_efuse_set_bit:
112
+ * @s: the efuse object
113
+ * @bit: the efuse bit-address to be written a value of 1
114
+ *
115
+ * Returns: true on success, false on failure
116
+ */
117
+bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit);
118
+
119
+/**
120
+ * xlnx_efuse_k256_check:
121
+ * @s: the efuse object
122
+ * @crc: the 32-bit CRC to be compared with
123
+ * @start: the efuse bit-address (which must be multiple of 32) of the
124
+ * start of a 256-bit array
125
+ *
126
+ * This function computes the CRC of a 256-bit array starting at @start
127
+ * then compares to the given @crc
128
+ *
129
+ * Returns: true of @crc == computed, false otherwise
130
+ */
131
+bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start);
132
+
133
+/**
134
+ * xlnx_efuse_tbits_check:
135
+ * @s: the efuse object
136
+ *
137
+ * This function inspects a number of efuse bits at specific addresses
138
+ * to see if they match a validation pattern. Each pattern is a group
139
+ * of 4 bits, and there are 3 groups.
140
+ *
141
+ * Returns: a 3-bit mask, where a bit of '1' means the corresponding
142
+ * group has a valid pattern.
143
+ */
144
+uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s);
145
+
146
+/**
147
+ * xlnx_efuse_get_row:
148
+ * @s: the efuse object
149
+ * @bit: the efuse bit address for which a 32-bit value is read
150
+ *
151
+ * Returns: the entire 32 bits of the efuse, starting at a bit
152
+ * address that is multiple of 32 and contains the bit at @bit
153
+ */
154
+static inline uint32_t xlnx_efuse_get_row(XlnxEFuse *s, unsigned int bit)
155
+{
28
+{
156
+ if (!(s->fuse32)) {
29
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK);
157
+ return 0;
158
+ } else {
159
+ unsigned int row_idx = bit / 32;
160
+
161
+ assert(row_idx < (s->efuse_size * s->efuse_nr / 32));
162
+ return s->fuse32[row_idx];
163
+ }
164
+}
30
+}
165
+
31
+
166
+#endif
32
+/* This function corresponds to CheckSMEAndZAEnabled. */
167
diff --git a/hw/nvram/xlnx-efuse-crc.c b/hw/nvram/xlnx-efuse-crc.c
33
+static inline bool sme_za_enabled_check(DisasContext *s)
168
new file mode 100644
169
index XXXXXXX..XXXXXXX
170
--- /dev/null
171
+++ b/hw/nvram/xlnx-efuse-crc.c
172
@@ -XXX,XX +XXX,XX @@
173
+/*
174
+ * Xilinx eFuse/bbram CRC calculator
175
+ *
176
+ * Copyright (c) 2021 Xilinx Inc.
177
+ *
178
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
179
+ * of this software and associated documentation files (the "Software"), to deal
180
+ * in the Software without restriction, including without limitation the rights
181
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
182
+ * copies of the Software, and to permit persons to whom the Software is
183
+ * furnished to do so, subject to the following conditions:
184
+ *
185
+ * The above copyright notice and this permission notice shall be included in
186
+ * all copies or substantial portions of the Software.
187
+ *
188
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
189
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
190
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
191
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
192
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
193
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
194
+ * THE SOFTWARE.
195
+ */
196
+#include "qemu/osdep.h"
197
+#include "hw/nvram/xlnx-efuse.h"
198
+
199
+static uint32_t xlnx_efuse_u37_crc(uint32_t prev_crc, uint32_t data,
200
+ uint32_t addr)
201
+{
34
+{
202
+ /* A table for 7-bit slicing */
35
+ return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK);
203
+ static const uint32_t crc_tab[128] = {
204
+ 0x00000000, 0xe13b70f7, 0xc79a971f, 0x26a1e7e8,
205
+ 0x8ad958cf, 0x6be22838, 0x4d43cfd0, 0xac78bf27,
206
+ 0x105ec76f, 0xf165b798, 0xd7c45070, 0x36ff2087,
207
+ 0x9a879fa0, 0x7bbcef57, 0x5d1d08bf, 0xbc267848,
208
+ 0x20bd8ede, 0xc186fe29, 0xe72719c1, 0x061c6936,
209
+ 0xaa64d611, 0x4b5fa6e6, 0x6dfe410e, 0x8cc531f9,
210
+ 0x30e349b1, 0xd1d83946, 0xf779deae, 0x1642ae59,
211
+ 0xba3a117e, 0x5b016189, 0x7da08661, 0x9c9bf696,
212
+ 0x417b1dbc, 0xa0406d4b, 0x86e18aa3, 0x67dafa54,
213
+ 0xcba24573, 0x2a993584, 0x0c38d26c, 0xed03a29b,
214
+ 0x5125dad3, 0xb01eaa24, 0x96bf4dcc, 0x77843d3b,
215
+ 0xdbfc821c, 0x3ac7f2eb, 0x1c661503, 0xfd5d65f4,
216
+ 0x61c69362, 0x80fde395, 0xa65c047d, 0x4767748a,
217
+ 0xeb1fcbad, 0x0a24bb5a, 0x2c855cb2, 0xcdbe2c45,
218
+ 0x7198540d, 0x90a324fa, 0xb602c312, 0x5739b3e5,
219
+ 0xfb410cc2, 0x1a7a7c35, 0x3cdb9bdd, 0xdde0eb2a,
220
+ 0x82f63b78, 0x63cd4b8f, 0x456cac67, 0xa457dc90,
221
+ 0x082f63b7, 0xe9141340, 0xcfb5f4a8, 0x2e8e845f,
222
+ 0x92a8fc17, 0x73938ce0, 0x55326b08, 0xb4091bff,
223
+ 0x1871a4d8, 0xf94ad42f, 0xdfeb33c7, 0x3ed04330,
224
+ 0xa24bb5a6, 0x4370c551, 0x65d122b9, 0x84ea524e,
225
+ 0x2892ed69, 0xc9a99d9e, 0xef087a76, 0x0e330a81,
226
+ 0xb21572c9, 0x532e023e, 0x758fe5d6, 0x94b49521,
227
+ 0x38cc2a06, 0xd9f75af1, 0xff56bd19, 0x1e6dcdee,
228
+ 0xc38d26c4, 0x22b65633, 0x0417b1db, 0xe52cc12c,
229
+ 0x49547e0b, 0xa86f0efc, 0x8ecee914, 0x6ff599e3,
230
+ 0xd3d3e1ab, 0x32e8915c, 0x144976b4, 0xf5720643,
231
+ 0x590ab964, 0xb831c993, 0x9e902e7b, 0x7fab5e8c,
232
+ 0xe330a81a, 0x020bd8ed, 0x24aa3f05, 0xc5914ff2,
233
+ 0x69e9f0d5, 0x88d28022, 0xae7367ca, 0x4f48173d,
234
+ 0xf36e6f75, 0x12551f82, 0x34f4f86a, 0xd5cf889d,
235
+ 0x79b737ba, 0x988c474d, 0xbe2da0a5, 0x5f16d052
236
+ };
237
+
238
+ /*
239
+ * eFuse calculation is shown here:
240
+ * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1496
241
+ *
242
+ * Each u32 word is appended a 5-bit value, for a total of 37 bits; see:
243
+ * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1356
244
+ */
245
+ uint32_t crc = prev_crc;
246
+ const unsigned rshf = 7;
247
+ const uint32_t im = (1 << rshf) - 1;
248
+ const uint32_t rm = (1 << (32 - rshf)) - 1;
249
+ const uint32_t i2 = (1 << 2) - 1;
250
+ const uint32_t r2 = (1 << 30) - 1;
251
+
252
+ unsigned j;
253
+ uint32_t i, r;
254
+ uint64_t w;
255
+
256
+ w = (uint64_t)(addr) << 32;
257
+ w |= data;
258
+
259
+ /* Feed 35 bits, in 5 rounds, each a slice of 7 bits */
260
+ for (j = 0; j < 5; j++) {
261
+ r = rm & (crc >> rshf);
262
+ i = im & (crc ^ w);
263
+ crc = crc_tab[i] ^ r;
264
+
265
+ w >>= rshf;
266
+ }
267
+
268
+ /* Feed the remaining 2 bits */
269
+ r = r2 & (crc >> 2);
270
+ i = i2 & (crc ^ w);
271
+ crc = crc_tab[i << (rshf - 2)] ^ r;
272
+
273
+ return crc;
274
+}
36
+}
275
+
37
+
276
+uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt,
38
+/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */
277
+ unsigned zpads)
39
+static inline bool sme_smza_enabled_check(DisasContext *s)
278
+{
40
+{
279
+ uint32_t crc = 0;
41
+ return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
280
+ unsigned index;
281
+
282
+ for (index = zpads; index; index--) {
283
+ crc = xlnx_efuse_u37_crc(crc, 0, (index + u32_cnt));
284
+ }
285
+
286
+ for (index = u32_cnt; index; index--) {
287
+ crc = xlnx_efuse_u37_crc(crc, data[index - 1], index);
288
+ }
289
+
290
+ return crc;
291
+}
292
diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c
293
new file mode 100644
294
index XXXXXXX..XXXXXXX
295
--- /dev/null
296
+++ b/hw/nvram/xlnx-efuse.c
297
@@ -XXX,XX +XXX,XX @@
298
+/*
299
+ * QEMU model of the EFUSE eFuse
300
+ *
301
+ * Copyright (c) 2015 Xilinx Inc.
302
+ *
303
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
304
+ *
305
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
306
+ * of this software and associated documentation files (the "Software"), to deal
307
+ * in the Software without restriction, including without limitation the rights
308
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
309
+ * copies of the Software, and to permit persons to whom the Software is
310
+ * furnished to do so, subject to the following conditions:
311
+ *
312
+ * The above copyright notice and this permission notice shall be included in
313
+ * all copies or substantial portions of the Software.
314
+ *
315
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
316
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
317
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
318
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
319
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
320
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
321
+ * THE SOFTWARE.
322
+ */
323
+
324
+#include "qemu/osdep.h"
325
+#include "hw/nvram/xlnx-efuse.h"
326
+
327
+#include "qemu/error-report.h"
328
+#include "qemu/log.h"
329
+#include "qapi/error.h"
330
+#include "sysemu/blockdev.h"
331
+#include "hw/qdev-properties.h"
332
+#include "hw/qdev-properties-system.h"
333
+
334
+#define TBIT0_OFFSET 28
335
+#define TBIT1_OFFSET 29
336
+#define TBIT2_OFFSET 30
337
+#define TBIT3_OFFSET 31
338
+#define TBITS_PATTERN (0x0AU << TBIT0_OFFSET)
339
+#define TBITS_MASK (0x0FU << TBIT0_OFFSET)
340
+
341
+bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit)
342
+{
343
+ bool b = s->fuse32[bit / 32] & (1 << (bit % 32));
344
+ return b;
345
+}
42
+}
346
+
43
+
347
+static int efuse_bytes(XlnxEFuse *s)
44
TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
45
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
46
bool tag_checked, int log2_size);
47
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate-a64.c
50
+++ b/target/arm/translate-a64.c
51
@@ -XXX,XX +XXX,XX @@ static bool sme_access_check(DisasContext *s)
52
return true;
53
}
54
55
+/* This function corresponds to CheckSMEEnabled. */
56
+bool sme_enabled_check(DisasContext *s)
348
+{
57
+{
349
+ return ROUND_UP((s->efuse_nr * s->efuse_size) / 8, 4);
58
+ /*
59
+ * Note that unlike sve_excp_el, we have not constrained sme_excp_el
60
+ * to be zero when fp_excp_el has priority. This is because we need
61
+ * sme_excp_el by itself for cpregs access checks.
62
+ */
63
+ if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
64
+ s->fp_access_checked = true;
65
+ return sme_access_check(s);
66
+ }
67
+ return fp_access_check_only(s);
350
+}
68
+}
351
+
69
+
352
+static int efuse_bdrv_read(XlnxEFuse *s, Error **errp)
70
+/* Common subroutine for CheckSMEAnd*Enabled. */
71
+bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
353
+{
72
+{
354
+ uint32_t *ram = s->fuse32;
73
+ if (!sme_enabled_check(s)) {
355
+ int nr = efuse_bytes(s);
356
+
357
+ if (!s->blk) {
358
+ return 0;
359
+ }
360
+
361
+ s->blk_ro = !blk_supports_write_perm(s->blk);
362
+ if (!s->blk_ro) {
363
+ int rc;
364
+
365
+ rc = blk_set_perm(s->blk,
366
+ (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE),
367
+ BLK_PERM_ALL, NULL);
368
+ if (rc) {
369
+ s->blk_ro = true;
370
+ }
371
+ }
372
+ if (s->blk_ro) {
373
+ warn_report("%s: Skip saving updates to read-only eFUSE backstore.",
374
+ blk_name(s->blk));
375
+ }
376
+
377
+ if (blk_pread(s->blk, 0, ram, nr) < 0) {
378
+ error_setg(errp, "%s: Failed to read %u bytes from eFUSE backstore.",
379
+ blk_name(s->blk), nr);
380
+ return -1;
381
+ }
382
+
383
+ /* Convert from little-endian backstore for each 32-bit row */
384
+ nr /= 4;
385
+ while (nr--) {
386
+ ram[nr] = le32_to_cpu(ram[nr]);
387
+ }
388
+
389
+ return 0;
390
+}
391
+
392
+static void efuse_bdrv_sync(XlnxEFuse *s, unsigned int bit)
393
+{
394
+ unsigned int row_offset;
395
+ uint32_t le32;
396
+
397
+ if (!s->blk || s->blk_ro) {
398
+ return; /* Silent on read-only backend to avoid message flood */
399
+ }
400
+
401
+ /* Backstore is always in little-endian */
402
+ le32 = cpu_to_le32(xlnx_efuse_get_row(s, bit));
403
+
404
+ row_offset = (bit / 32) * 4;
405
+ if (blk_pwrite(s->blk, row_offset, &le32, 4, 0) < 0) {
406
+ error_report("%s: Failed to write offset %u of eFUSE backstore.",
407
+ blk_name(s->blk), row_offset);
408
+ }
409
+}
410
+
411
+static int efuse_ro_bits_cmp(const void *a, const void *b)
412
+{
413
+ uint32_t i = *(const uint32_t *)a;
414
+ uint32_t j = *(const uint32_t *)b;
415
+
416
+ return (i > j) - (i < j);
417
+}
418
+
419
+static void efuse_ro_bits_sort(XlnxEFuse *s)
420
+{
421
+ uint32_t *ary = s->ro_bits;
422
+ const uint32_t cnt = s->ro_bits_cnt;
423
+
424
+ if (ary && cnt > 1) {
425
+ qsort(ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp);
426
+ }
427
+}
428
+
429
+static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k)
430
+{
431
+ const uint32_t *ary = s->ro_bits;
432
+ const uint32_t cnt = s->ro_bits_cnt;
433
+
434
+ if (!ary || !cnt) {
435
+ return false;
74
+ return false;
436
+ }
75
+ }
437
+
76
+ if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
438
+ return bsearch(&k, ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp) != NULL;
77
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
439
+}
78
+ syn_smetrap(SME_ET_NotStreaming, false));
440
+
441
+bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
442
+{
443
+ if (efuse_ro_bits_find(s, bit)) {
444
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: WARN: "
445
+ "Ignored setting of readonly efuse bit<%u,%u>!\n",
446
+ object_get_canonical_path(OBJECT(s)),
447
+ (bit / 32), (bit % 32));
448
+ return false;
79
+ return false;
449
+ }
80
+ }
450
+
81
+ if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
451
+ s->fuse32[bit / 32] |= 1 << (bit % 32);
82
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
452
+ efuse_bdrv_sync(s, bit);
83
+ syn_smetrap(SME_ET_InactiveZA, false));
84
+ return false;
85
+ }
453
+ return true;
86
+ return true;
454
+}
87
+}
455
+
88
+
456
+bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start)
89
/*
457
+{
90
* This utility function is for doing register extension with an
458
+ uint32_t calc;
91
* optional shift. You will likely want to pass a temporary for the
459
+
460
+ /* A key always occupies multiple of whole rows */
461
+ assert((start % 32) == 0);
462
+
463
+ calc = xlnx_efuse_calc_crc(&s->fuse32[start / 32], (256 / 32), 0);
464
+ return calc == crc;
465
+}
466
+
467
+uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s)
468
+{
469
+ int nr;
470
+ uint32_t check = 0;
471
+
472
+ for (nr = s->efuse_nr; nr-- > 0; ) {
473
+ int efuse_start_row_num = (s->efuse_size * nr) / 32;
474
+ uint32_t data = s->fuse32[efuse_start_row_num];
475
+
476
+ /*
477
+ * If the option is on, auto-init blank T-bits.
478
+ * (non-blank will still be reported as '0' in the check, e.g.,
479
+ * for error-injection tests)
480
+ */
481
+ if ((data & TBITS_MASK) == 0 && s->init_tbits) {
482
+ data |= TBITS_PATTERN;
483
+
484
+ s->fuse32[efuse_start_row_num] = data;
485
+ efuse_bdrv_sync(s, (efuse_start_row_num * 32 + TBIT0_OFFSET));
486
+ }
487
+
488
+ check = (check << 1) | ((data & TBITS_MASK) == TBITS_PATTERN);
489
+ }
490
+
491
+ return check;
492
+}
493
+
494
+static void efuse_realize(DeviceState *dev, Error **errp)
495
+{
496
+ XlnxEFuse *s = XLNX_EFUSE(dev);
497
+
498
+ /* Sort readonly-list for bsearch lookup */
499
+ efuse_ro_bits_sort(s);
500
+
501
+ if ((s->efuse_size % 32) != 0) {
502
+ error_setg(errp,
503
+ "%s.efuse-size: %u: property value not multiple of 32.",
504
+ object_get_canonical_path(OBJECT(dev)), s->efuse_size);
505
+ return;
506
+ }
507
+
508
+ s->fuse32 = g_malloc0(efuse_bytes(s));
509
+ if (efuse_bdrv_read(s, errp)) {
510
+ g_free(s->fuse32);
511
+ }
512
+}
513
+
514
+static void efuse_prop_set_drive(Object *obj, Visitor *v, const char *name,
515
+ void *opaque, Error **errp)
516
+{
517
+ DeviceState *dev = DEVICE(obj);
518
+
519
+ qdev_prop_drive.set(obj, v, name, opaque, errp);
520
+
521
+ /* Fill initial data if backend is attached after realized */
522
+ if (dev->realized) {
523
+ efuse_bdrv_read(XLNX_EFUSE(obj), errp);
524
+ }
525
+}
526
+
527
+static void efuse_prop_get_drive(Object *obj, Visitor *v, const char *name,
528
+ void *opaque, Error **errp)
529
+{
530
+ qdev_prop_drive.get(obj, v, name, opaque, errp);
531
+}
532
+
533
+static void efuse_prop_release_drive(Object *obj, const char *name,
534
+ void *opaque)
535
+{
536
+ qdev_prop_drive.release(obj, name, opaque);
537
+}
538
+
539
+static const PropertyInfo efuse_prop_drive = {
540
+ .name = "str",
541
+ .description = "Node name or ID of a block device to use as eFUSE backend",
542
+ .realized_set_allowed = true,
543
+ .get = efuse_prop_get_drive,
544
+ .set = efuse_prop_set_drive,
545
+ .release = efuse_prop_release_drive,
546
+};
547
+
548
+static Property efuse_properties[] = {
549
+ DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *),
550
+ DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3),
551
+ DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32),
552
+ DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true),
553
+ DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits,
554
+ qdev_prop_uint32, uint32_t),
555
+ DEFINE_PROP_END_OF_LIST(),
556
+};
557
+
558
+static void efuse_class_init(ObjectClass *klass, void *data)
559
+{
560
+ DeviceClass *dc = DEVICE_CLASS(klass);
561
+
562
+ dc->realize = efuse_realize;
563
+ device_class_set_props(dc, efuse_properties);
564
+}
565
+
566
+static const TypeInfo efuse_info = {
567
+ .name = TYPE_XLNX_EFUSE,
568
+ .parent = TYPE_DEVICE,
569
+ .instance_size = sizeof(XlnxEFuse),
570
+ .class_init = efuse_class_init,
571
+};
572
+
573
+static void efuse_register_types(void)
574
+{
575
+ type_register_static(&efuse_info);
576
+}
577
+type_init(efuse_register_types)
578
diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig
579
index XXXXXXX..XXXXXXX 100644
580
--- a/hw/nvram/Kconfig
581
+++ b/hw/nvram/Kconfig
582
@@ -XXX,XX +XXX,XX @@ config NMC93XX_EEPROM
583
584
config CHRP_NVRAM
585
bool
586
+
587
+config XLNX_EFUSE_CRC
588
+ bool
589
+
590
+config XLNX_EFUSE
591
+ bool
592
+ select XLNX_EFUSE_CRC
593
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
594
index XXXXXXX..XXXXXXX 100644
595
--- a/hw/nvram/meson.build
596
+++ b/hw/nvram/meson.build
597
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c'))
598
softmmu_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c'))
599
softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c'))
600
softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c'))
601
+softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c'))
602
+softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c'))
603
604
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
605
--
92
--
606
2.20.1
93
2.25.1
607
608
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
The pseudocode for CheckSVEEnabled gains a check for Streaming
4
SVE mode, and for SME present but SVE absent.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-17-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/translate-a64.c | 22 ++++++++++++++++------
12
1 file changed, 16 insertions(+), 6 deletions(-)
13
14
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.c
17
+++ b/target/arm/translate-a64.c
18
@@ -XXX,XX +XXX,XX @@ static bool fp_access_check(DisasContext *s)
19
return true;
20
}
21
22
-/* Check that SVE access is enabled. If it is, return true.
23
+/*
24
+ * Check that SVE access is enabled. If it is, return true.
25
* If not, emit code to generate an appropriate exception and return false.
26
+ * This function corresponds to CheckSVEEnabled().
27
*/
28
bool sve_access_check(DisasContext *s)
29
{
30
- if (s->sve_excp_el) {
31
- assert(!s->sve_access_checked);
32
- s->sve_access_checked = true;
33
-
34
+ if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
35
+ assert(dc_isar_feature(aa64_sme, s));
36
+ if (!sme_sm_enabled_check(s)) {
37
+ goto fail_exit;
38
+ }
39
+ } else if (s->sve_excp_el) {
40
gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
41
syn_sve_access_trap(), s->sve_excp_el);
42
- return false;
43
+ goto fail_exit;
44
}
45
s->sve_access_checked = true;
46
return fp_access_check(s);
47
+
48
+ fail_exit:
49
+ /* Assert that we only raise one exception per instruction. */
50
+ assert(!s->sve_access_checked);
51
+ s->sve_access_checked = true;
52
+ return false;
53
}
54
55
/*
56
--
57
2.25.1
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Connect the support for Versal eFUSE one-time field-programmable
3
These SME instructions are nominally within the SVE decode space,
4
bit array.
4
so we add them to sve.decode and translate-sve.c.
5
5
6
The command argument:
7
-drive if=pflash,index=1,...
8
Can be used to optionally connect the bit array to a
9
backend storage, such that field-programmed values
10
in one invocation can be made available to next
11
invocation.
12
13
The backend storage must be a seekable binary file, and
14
its size must be 3072 bytes or larger. A file with all
15
binary 0's is a 'blank'.
16
17
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
18
Message-id: 20210917052400.1249094-7-tong.ho@xilinx.com
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-18-richard.henderson@linaro.org
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
10
---
22
include/hw/arm/xlnx-versal.h | 10 +++++++
11
target/arm/translate-a64.h | 12 ++++++++++++
23
hw/arm/xlnx-versal-virt.c | 52 ++++++++++++++++++++++++++++++++++++
12
target/arm/sve.decode | 5 ++++-
24
hw/arm/xlnx-versal.c | 39 +++++++++++++++++++++++++++
13
target/arm/translate-sve.c | 38 ++++++++++++++++++++++++++++++++++++++
25
hw/arm/Kconfig | 1 +
14
3 files changed, 54 insertions(+), 1 deletion(-)
26
4 files changed, 102 insertions(+)
27
15
28
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
16
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
29
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/xlnx-versal.h
18
--- a/target/arm/translate-a64.h
31
+++ b/include/hw/arm/xlnx-versal.h
19
+++ b/target/arm/translate-a64.h
32
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
33
#include "hw/usb/xlnx-usb-subsystem.h"
21
return s->vl;
34
#include "hw/misc/xlnx-versal-xramc.h"
35
#include "hw/nvram/xlnx-bbram.h"
36
+#include "hw/nvram/xlnx-versal-efuse.h"
37
38
#define TYPE_XLNX_VERSAL "xlnx-versal"
39
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
40
@@ -XXX,XX +XXX,XX @@ struct Versal {
41
42
XlnxZynqMPRTC rtc;
43
XlnxBBRam bbram;
44
+ XlnxEFuse efuse;
45
+ XlnxVersalEFuseCtrl efuse_ctrl;
46
+ XlnxVersalEFuseCache efuse_cache;
47
} pmc;
48
49
struct {
50
@@ -XXX,XX +XXX,XX @@ struct Versal {
51
#define VERSAL_BBRAM_APB_IRQ_0 121
52
#define VERSAL_RTC_APB_ERR_IRQ 121
53
#define VERSAL_SD0_IRQ_0 126
54
+#define VERSAL_EFUSE_IRQ 139
55
#define VERSAL_RTC_ALARM_IRQ 142
56
#define VERSAL_RTC_SECONDS_IRQ 143
57
58
@@ -XXX,XX +XXX,XX @@ struct Versal {
59
#define MM_PMC_SD0_SIZE 0x10000
60
#define MM_PMC_BBRAM_CTRL 0xf11f0000
61
#define MM_PMC_BBRAM_CTRL_SIZE 0x00050
62
+#define MM_PMC_EFUSE_CTRL 0xf1240000
63
+#define MM_PMC_EFUSE_CTRL_SIZE 0x00104
64
+#define MM_PMC_EFUSE_CACHE 0xf1250000
65
+#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00
66
+
67
#define MM_PMC_CRP 0xf1260000U
68
#define MM_PMC_CRP_SIZE 0x10000
69
#define MM_PMC_RTC 0xf12a0000
70
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/hw/arm/xlnx-versal-virt.c
73
+++ b/hw/arm/xlnx-versal-virt.c
74
@@ -XXX,XX +XXX,XX @@ static void fdt_add_bbram_node(VersalVirt *s)
75
g_free(name);
76
}
22
}
77
23
78
+static void fdt_add_efuse_ctrl_node(VersalVirt *s)
24
+/* Return the byte size of the vector register, SVL / 8. */
25
+static inline int streaming_vec_reg_size(DisasContext *s)
79
+{
26
+{
80
+ const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CTRL;
27
+ return s->svl;
81
+ const char interrupt_names[] = "pmc_efuse";
82
+ char *name = g_strdup_printf("/pmc_efuse@%x", MM_PMC_EFUSE_CTRL);
83
+
84
+ qemu_fdt_add_subnode(s->fdt, name);
85
+
86
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
87
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_EFUSE_IRQ,
88
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
89
+ qemu_fdt_setprop(s->fdt, name, "interrupt-names",
90
+ interrupt_names, sizeof(interrupt_names));
91
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
92
+ 2, MM_PMC_EFUSE_CTRL,
93
+ 2, MM_PMC_EFUSE_CTRL_SIZE);
94
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
95
+ g_free(name);
96
+}
28
+}
97
+
29
+
98
+static void fdt_add_efuse_cache_node(VersalVirt *s)
30
/*
31
* Return the offset info CPUARMState of the predicate vector register Pn.
32
* Note for this purpose, FFR is P16.
33
@@ -XXX,XX +XXX,XX @@ static inline int pred_full_reg_size(DisasContext *s)
34
return s->vl >> 3;
35
}
36
37
+/* Return the byte size of the predicate register, SVL / 64. */
38
+static inline int streaming_pred_reg_size(DisasContext *s)
99
+{
39
+{
100
+ const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CACHE;
40
+ return s->svl >> 3;
101
+ char *name = g_strdup_printf("/xlnx_pmc_efuse_cache@%x",
102
+ MM_PMC_EFUSE_CACHE);
103
+
104
+ qemu_fdt_add_subnode(s->fdt, name);
105
+
106
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
107
+ 2, MM_PMC_EFUSE_CACHE,
108
+ 2, MM_PMC_EFUSE_CACHE_SIZE);
109
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
110
+ g_free(name);
111
+}
41
+}
112
+
42
+
113
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
43
/*
114
{
44
* Round up the size of a register to a size allowed by
115
Error *err = NULL;
45
* the tcg vector infrastructure. Any operation which uses this
116
@@ -XXX,XX +XXX,XX @@ static void bbram_attach_drive(XlnxBBRam *dev)
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
117
}
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
51
# SVE index generation (register start, register increment)
52
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
53
54
-### SVE Stack Allocation Group
55
+### SVE / Streaming SVE Stack Allocation Group
56
57
# SVE stack frame adjustment
58
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
59
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
60
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
61
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
62
63
# SVE stack frame size
64
RDVL 00000100 101 11111 01010 imm:s6 rd:5
65
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
66
67
### SVE Bitwise Shift - Unpredicated Group
68
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
74
return true;
118
}
75
}
119
76
120
+static void efuse_attach_drive(XlnxEFuse *dev)
77
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
121
+{
78
+{
122
+ DriveInfo *dinfo;
79
+ if (!dc_isar_feature(aa64_sme, s)) {
123
+ BlockBackend *blk;
80
+ return false;
124
+
125
+ dinfo = drive_get_by_index(IF_PFLASH, 1);
126
+ blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL;
127
+ if (blk) {
128
+ qdev_prop_set_drive(DEVICE(dev), "drive", blk);
129
+ }
81
+ }
82
+ if (sme_enabled_check(s)) {
83
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
84
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
85
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
86
+ }
87
+ return true;
130
+}
88
+}
131
+
89
+
132
static void sd_plugin_card(SDHCIState *sd, DriveInfo *di)
90
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
133
{
91
{
134
BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL;
92
if (!dc_isar_feature(aa64_sve, s)) {
135
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
93
@@ -XXX,XX +XXX,XX @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
136
fdt_add_sd_nodes(s);
94
return true;
137
fdt_add_rtc_node(s);
138
fdt_add_bbram_node(s);
139
+ fdt_add_efuse_ctrl_node(s);
140
+ fdt_add_efuse_cache_node(s);
141
fdt_add_cpu_nodes(s, psci_conduit);
142
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
143
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
144
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
145
/* Attach bbram backend, if given */
146
bbram_attach_drive(&s->soc.pmc.bbram);
147
148
+ /* Attach efuse backend, if given */
149
+ efuse_attach_drive(&s->soc.pmc.efuse);
150
+
151
/* Plugin SD cards. */
152
for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) {
153
sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD));
154
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/hw/arm/xlnx-versal.c
157
+++ b/hw/arm/xlnx-versal.c
158
@@ -XXX,XX +XXX,XX @@ static void versal_create_bbram(Versal *s, qemu_irq *pic)
159
sysbus_connect_irq(sbd, 0, pic[VERSAL_BBRAM_APB_IRQ_0]);
160
}
95
}
161
96
162
+static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base)
97
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
163
+{
98
+{
164
+ SysBusDevice *part = SYS_BUS_DEVICE(dev);
99
+ if (!dc_isar_feature(aa64_sme, s)) {
165
+
100
+ return false;
166
+ object_property_set_link(OBJECT(part), "efuse",
101
+ }
167
+ OBJECT(&s->pmc.efuse), &error_abort);
102
+ if (sme_enabled_check(s)) {
168
+
103
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
169
+ sysbus_realize(part, &error_abort);
104
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
170
+ memory_region_add_subregion(&s->mr_ps, base,
105
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
171
+ sysbus_mmio_get_region(part, 0));
106
+ }
107
+ return true;
172
+}
108
+}
173
+
109
+
174
+static void versal_create_efuse(Versal *s, qemu_irq *pic)
110
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
111
{
112
if (!dc_isar_feature(aa64_sve, s)) {
113
@@ -XXX,XX +XXX,XX @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
114
return true;
115
}
116
117
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
175
+{
118
+{
176
+ Object *bits = OBJECT(&s->pmc.efuse);
119
+ if (!dc_isar_feature(aa64_sme, s)) {
177
+ Object *ctrl = OBJECT(&s->pmc.efuse_ctrl);
120
+ return false;
178
+ Object *cache = OBJECT(&s->pmc.efuse_cache);
121
+ }
179
+
122
+ if (sme_enabled_check(s)) {
180
+ object_initialize_child(OBJECT(s), "efuse-ctrl", &s->pmc.efuse_ctrl,
123
+ TCGv_i64 reg = cpu_reg(s, a->rd);
181
+ TYPE_XLNX_VERSAL_EFUSE_CTRL);
124
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
182
+
125
+ }
183
+ object_initialize_child(OBJECT(s), "efuse-cache", &s->pmc.efuse_cache,
126
+ return true;
184
+ TYPE_XLNX_VERSAL_EFUSE_CACHE);
185
+
186
+ object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits,
187
+ sizeof(s->pmc.efuse),
188
+ TYPE_XLNX_EFUSE, &error_abort,
189
+ "efuse-nr", "3",
190
+ "efuse-size", "8192",
191
+ NULL);
192
+
193
+ qdev_realize(DEVICE(bits), NULL, &error_abort);
194
+ versal_realize_efuse_part(s, ctrl, MM_PMC_EFUSE_CTRL);
195
+ versal_realize_efuse_part(s, cache, MM_PMC_EFUSE_CACHE);
196
+
197
+ sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]);
198
+}
127
+}
199
+
128
+
200
/* This takes the board allocated linear DDR memory and creates aliases
129
/*
201
* for each split DDR range/aperture on the Versal address map.
130
*** SVE Compute Vector Address Group
202
*/
131
*/
203
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
204
versal_create_rtc(s, pic);
205
versal_create_xrams(s, pic);
206
versal_create_bbram(s, pic);
207
+ versal_create_efuse(s, pic);
208
versal_map_ddr(s);
209
versal_unimp(s);
210
211
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
212
index XXXXXXX..XXXXXXX 100644
213
--- a/hw/arm/Kconfig
214
+++ b/hw/arm/Kconfig
215
@@ -XXX,XX +XXX,XX @@ config XLNX_VERSAL
216
select XLNX_ZYNQMP
217
select OR_IRQ
218
select XLNX_BBRAM
219
+ select XLNX_EFUSE_VERSAL
220
221
config NPCM7XX
222
bool
223
--
132
--
224
2.20.1
133
2.25.1
225
226
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-19-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 4 ++++
10
target/arm/sme_helper.c | 25 +++++++++++++++++++++++++
11
target/arm/translate-sme.c | 13 +++++++++++++
12
4 files changed, 44 insertions(+)
13
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-sme.h
17
+++ b/target/arm/helper-sme.h
18
@@ -XXX,XX +XXX,XX @@
19
20
DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
21
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
22
+
23
+DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/sme.decode
27
+++ b/target/arm/sme.decode
28
@@ -XXX,XX +XXX,XX @@
29
#
30
# This file is processed by scripts/decodetree.py
31
#
32
+
33
+### SME Misc
34
+
35
+ZERO 11000000 00 001 00000000000 imm:8
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i)
41
memset(env->zarray, 0, sizeof(env->zarray));
42
}
43
}
44
+
45
+void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
46
+{
47
+ uint32_t i;
48
+
49
+ /*
50
+ * Special case clearing the entire ZA space.
51
+ * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
52
+ * parts of the ZA storage outside of SVL.
53
+ */
54
+ if (imm == 0xff) {
55
+ memset(env->zarray, 0, sizeof(env->zarray));
56
+ return;
57
+ }
58
+
59
+ /*
60
+ * Recall that ZAnH.D[m] is spread across ZA[n+8*m],
61
+ * so each row is discontiguous within ZA[].
62
+ */
63
+ for (i = 0; i < svl; i++) {
64
+ if (imm & (1 << (i % 8))) {
65
+ memset(&env->zarray[i], 0, svl);
66
+ }
67
+ }
68
+}
69
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sme.c
72
+++ b/target/arm/translate-sme.c
73
@@ -XXX,XX +XXX,XX @@
74
*/
75
76
#include "decode-sme.c.inc"
77
+
78
+
79
+static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
80
+{
81
+ if (!dc_isar_feature(aa64_sme, s)) {
82
+ return false;
83
+ }
84
+ if (sme_za_enabled_check(s)) {
85
+ gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm),
86
+ tcg_constant_i32(streaming_vec_reg_size(s)));
87
+ }
88
+ return true;
89
+}
90
--
91
2.25.1
diff view generated by jsdifflib
1
Rename qbus_create_inplace() to qbus_init(); this is more in line
1
From: Richard Henderson <richard.henderson@linaro.org>
2
with our usual naming convention for functions that in-place
3
initialize objects.
4
2
3
We can reuse the SVE functions for implementing moves to/from
4
horizontal tile slices, but we need new ones for moves to/from
5
vertical tile slices.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-20-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
8
Message-id: 20210923121153.23754-5-peter.maydell@linaro.org
9
---
11
---
10
include/hw/qdev-core.h | 4 ++--
12
target/arm/helper-sme.h | 12 +++
11
hw/audio/intel-hda.c | 2 +-
13
target/arm/helper-sve.h | 2 +
12
hw/block/fdc.c | 2 +-
14
target/arm/translate-a64.h | 8 ++
13
hw/block/swim.c | 3 +--
15
target/arm/translate.h | 5 ++
14
hw/char/virtio-serial-bus.c | 4 ++--
16
target/arm/sme.decode | 15 ++++
15
hw/core/bus.c | 11 ++++++-----
17
target/arm/sme_helper.c | 151 ++++++++++++++++++++++++++++++++++++-
16
hw/core/sysbus.c | 10 ++++++----
18
target/arm/sve_helper.c | 12 +++
17
hw/gpio/bcm2835_gpio.c | 3 +--
19
target/arm/translate-sme.c | 127 +++++++++++++++++++++++++++++++
18
hw/ide/qdev.c | 2 +-
20
8 files changed, 331 insertions(+), 1 deletion(-)
19
hw/ipack/ipack.c | 2 +-
20
hw/misc/mac_via.c | 4 ++--
21
hw/misc/macio/cuda.c | 4 ++--
22
hw/misc/macio/macio.c | 4 ++--
23
hw/misc/macio/pmu.c | 4 ++--
24
hw/nubus/nubus-bridge.c | 2 +-
25
hw/nvme/ctrl.c | 4 ++--
26
hw/nvme/subsys.c | 3 +--
27
hw/pci/pci.c | 2 +-
28
hw/pci/pci_bridge.c | 4 ++--
29
hw/s390x/event-facility.c | 4 ++--
30
hw/s390x/virtio-ccw.c | 3 +--
31
hw/scsi/scsi-bus.c | 2 +-
32
hw/sd/allwinner-sdhost.c | 4 ++--
33
hw/sd/bcm2835_sdhost.c | 4 ++--
34
hw/sd/pl181.c | 3 +--
35
hw/sd/pxa2xx_mmci.c | 4 ++--
36
hw/sd/sdhci.c | 3 +--
37
hw/sd/ssi-sd.c | 3 +--
38
hw/usb/bus.c | 2 +-
39
hw/usb/dev-smartcard-reader.c | 3 +--
40
hw/virtio/virtio-mmio.c | 3 +--
41
hw/virtio/virtio-pci.c | 3 +--
42
32 files changed, 54 insertions(+), 61 deletions(-)
43
21
44
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
22
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
45
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
46
--- a/include/hw/qdev-core.h
24
--- a/target/arm/helper-sme.h
47
+++ b/include/hw/qdev-core.h
25
+++ b/target/arm/helper-sme.h
48
@@ -XXX,XX +XXX,XX @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id);
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
49
typedef int (qbus_walkerfn)(BusState *bus, void *opaque);
27
DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
50
typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque);
28
51
29
DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32)
52
-void qbus_create_inplace(void *bus, size_t size, const char *typename,
30
+
53
- DeviceState *parent, const char *name);
31
+/* Move to/from vertical array slices, i.e. columns, so 'c'. */
54
+void qbus_init(void *bus, size_t size, const char *typename,
32
+DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
55
+ DeviceState *parent, const char *name);
33
+DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
56
BusState *qbus_create(const char *typename, DeviceState *parent, const char *name);
34
+DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
57
bool qbus_realize(BusState *bus, Error **errp);
35
+DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
58
void qbus_unrealize(BusState *bus);
36
+DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
59
diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
37
+DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
60
index XXXXXXX..XXXXXXX 100644
38
+DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
61
--- a/hw/audio/intel-hda.c
39
+DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
62
+++ b/hw/audio/intel-hda.c
40
+DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
63
@@ -XXX,XX +XXX,XX @@ void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, size_t bus_size,
41
+DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
64
hda_codec_response_func response,
42
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
65
hda_codec_xfer_func xfer)
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/helper-sve.h
45
+++ b/target/arm/helper-sve.h
46
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
47
void, ptr, ptr, ptr, ptr, i32)
48
DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
49
void, ptr, ptr, ptr, ptr, i32)
50
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG,
51
+ void, ptr, ptr, ptr, ptr, i32)
52
53
DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
54
void, ptr, ptr, ptr, ptr, i32)
55
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/translate-a64.h
58
+++ b/target/arm/translate-a64.h
59
@@ -XXX,XX +XXX,XX @@ static inline int pred_gvec_reg_size(DisasContext *s)
60
return size_for_gvec(pred_full_reg_size(s));
61
}
62
63
+/* Return a newly allocated pointer to the predicate register. */
64
+static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno)
65
+{
66
+ TCGv_ptr ret = tcg_temp_new_ptr();
67
+ tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno));
68
+ return ret;
69
+}
70
+
71
bool disas_sve(DisasContext *, uint32_t);
72
bool disas_sme(DisasContext *, uint32_t);
73
74
diff --git a/target/arm/translate.h b/target/arm/translate.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/translate.h
77
+++ b/target/arm/translate.h
78
@@ -XXX,XX +XXX,XX @@ static inline int plus_2(DisasContext *s, int x)
79
return x + 2;
80
}
81
82
+static inline int plus_12(DisasContext *s, int x)
83
+{
84
+ return x + 12;
85
+}
86
+
87
static inline int times_2(DisasContext *s, int x)
66
{
88
{
67
- qbus_create_inplace(bus, bus_size, TYPE_HDA_BUS, dev, NULL);
89
return x * 2;
68
+ qbus_init(bus, bus_size, TYPE_HDA_BUS, dev, NULL);
90
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
69
bus->response = response;
91
index XXXXXXX..XXXXXXX 100644
70
bus->xfer = xfer;
92
--- a/target/arm/sme.decode
71
}
93
+++ b/target/arm/sme.decode
72
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
94
@@ -XXX,XX +XXX,XX @@
73
index XXXXXXX..XXXXXXX 100644
95
### SME Misc
74
--- a/hw/block/fdc.c
96
75
+++ b/hw/block/fdc.c
97
ZERO 11000000 00 001 00000000000 imm:8
76
@@ -XXX,XX +XXX,XX @@ static const TypeInfo floppy_bus_info = {
98
+
77
99
+### SME Move into/from Array
78
static void floppy_bus_create(FDCtrl *fdc, FloppyBus *bus, DeviceState *dev)
100
+
79
{
101
+%mova_rs 13:2 !function=plus_12
80
- qbus_create_inplace(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL);
102
+&mova esz rs pg zr za_imm v:bool to_vec:bool
81
+ qbus_init(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL);
103
+
82
bus->fdc = fdc;
104
+MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
83
}
105
+ &mova to_vec=0 rs=%mova_rs
84
106
+MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
85
diff --git a/hw/block/swim.c b/hw/block/swim.c
107
+ &mova to_vec=0 rs=%mova_rs esz=4
86
index XXXXXXX..XXXXXXX 100644
108
+
87
--- a/hw/block/swim.c
109
+MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
88
+++ b/hw/block/swim.c
110
+ &mova to_vec=1 rs=%mova_rs
89
@@ -XXX,XX +XXX,XX @@ static void sysbus_swim_realize(DeviceState *dev, Error **errp)
111
+MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
90
Swim *sys = SWIM(dev);
112
+ &mova to_vec=1 rs=%mova_rs esz=4
91
SWIMCtrl *swimctrl = &sys->ctrl;
113
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
92
114
index XXXXXXX..XXXXXXX 100644
93
- qbus_create_inplace(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev,
115
--- a/target/arm/sme_helper.c
94
- NULL);
116
+++ b/target/arm/sme_helper.c
95
+ qbus_init(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, NULL);
117
@@ -XXX,XX +XXX,XX @@
96
swimctrl->bus.ctrl = swimctrl;
118
97
}
119
#include "qemu/osdep.h"
98
120
#include "cpu.h"
99
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
121
-#include "internals.h"
100
index XXXXXXX..XXXXXXX 100644
122
+#include "tcg/tcg-gvec-desc.h"
101
--- a/hw/char/virtio-serial-bus.c
123
#include "exec/helper-proto.h"
102
+++ b/hw/char/virtio-serial-bus.c
124
+#include "qemu/int128.h"
103
@@ -XXX,XX +XXX,XX @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
125
+#include "vec_internal.h"
104
config_size);
126
105
127
/* ResetSVEState */
106
/* Spawn a new virtio-serial bus on which the ports will ride as devices */
128
void arm_reset_sve_state(CPUARMState *env)
107
- qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS,
129
@@ -XXX,XX +XXX,XX @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
108
- dev, vdev->bus_name);
130
}
109
+ qbus_init(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS,
110
+ dev, vdev->bus_name);
111
qbus_set_hotplug_handler(BUS(&vser->bus), OBJECT(vser));
112
vser->bus.vser = vser;
113
QTAILQ_INIT(&vser->ports);
114
diff --git a/hw/core/bus.c b/hw/core/bus.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/hw/core/bus.c
117
+++ b/hw/core/bus.c
118
@@ -XXX,XX +XXX,XX @@ static void bus_reset_child_foreach(Object *obj, ResettableChildCallback cb,
119
}
131
}
120
}
132
}
121
133
+
122
-static void qbus_init(BusState *bus, DeviceState *parent, const char *name)
134
+
123
+static void qbus_init_internal(BusState *bus, DeviceState *parent,
135
+/*
124
+ const char *name)
136
+ * When considering the ZA storage as an array of elements of
125
{
137
+ * type T, the index within that array of the Nth element of
126
const char *typename = object_get_typename(OBJECT(bus));
138
+ * a vertical slice of a tile can be calculated like this,
127
BusClass *bc;
139
+ * regardless of the size of type T. This is because the tiles
128
@@ -XXX,XX +XXX,XX @@ static void bus_unparent(Object *obj)
140
+ * are interleaved, so if type T is size N bytes then row 1 of
129
bus->parent = NULL;
141
+ * the tile is N rows away from row 0. The division by N to
130
}
142
+ * convert a byte offset into an array index and the multiplication
131
143
+ * by N to convert from vslice-index-within-the-tile to
132
-void qbus_create_inplace(void *bus, size_t size, const char *typename,
144
+ * the index within the ZA storage cancel out.
133
- DeviceState *parent, const char *name)
145
+ */
134
+void qbus_init(void *bus, size_t size, const char *typename,
146
+#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg))
135
+ DeviceState *parent, const char *name)
147
+
136
{
148
+/*
137
object_initialize(bus, size, typename);
149
+ * When doing byte arithmetic on the ZA storage, the element
138
- qbus_init(bus, parent, name);
150
+ * byteoff bytes away in a tile vertical slice is always this
139
+ qbus_init_internal(bus, parent, name);
151
+ * many bytes away in the ZA storage, regardless of the
140
}
152
+ * size of the tile element, assuming that byteoff is a multiple
141
153
+ * of the element size. Again this is because of the interleaving
142
BusState *qbus_create(const char *typename, DeviceState *parent, const char *name)
154
+ * of the tiles. For instance if we have 1 byte per element then
143
@@ -XXX,XX +XXX,XX @@ BusState *qbus_create(const char *typename, DeviceState *parent, const char *nam
155
+ * each row of the ZA storage has one byte of the vslice data,
144
BusState *bus;
156
+ * and (counting from 0) byte 8 goes in row 8 of the storage
145
157
+ * at offset (8 * row-size-in-bytes).
146
bus = BUS(object_new(typename));
158
+ * If we have 8 bytes per element then each row of the ZA storage
147
- qbus_init(bus, parent, name);
159
+ * has 8 bytes of the data, but there are 8 interleaved tiles and
148
+ qbus_init_internal(bus, parent, name);
160
+ * so byte 8 of the data goes into row 1 of the tile,
149
161
+ * which is again row 8 of the storage, so the offset is still
150
return bus;
162
+ * (8 * row-size-in-bytes). Similarly for other element sizes.
151
}
163
+ */
152
diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c
164
+#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg))
153
index XXXXXXX..XXXXXXX 100644
165
+
154
--- a/hw/core/sysbus.c
166
+
155
+++ b/hw/core/sysbus.c
167
+/*
156
@@ -XXX,XX +XXX,XX @@ static BusState *main_system_bus;
168
+ * Move Zreg vector to ZArray column.
157
169
+ */
158
static void main_system_bus_create(void)
170
+#define DO_MOVA_C(NAME, TYPE, H) \
159
{
171
+void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \
160
- /* assign main_system_bus before qbus_create_inplace()
172
+{ \
161
- * in order to make "if (bus != sysbus_get_default())" work */
173
+ int i, oprsz = simd_oprsz(desc); \
174
+ for (i = 0; i < oprsz; ) { \
175
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
176
+ do { \
177
+ if (pg & 1) { \
178
+ *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \
179
+ } \
180
+ i += sizeof(TYPE); \
181
+ pg >>= sizeof(TYPE); \
182
+ } while (i & 15); \
183
+ } \
184
+}
185
+
186
+DO_MOVA_C(sme_mova_cz_b, uint8_t, H1)
187
+DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2)
188
+DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4)
189
+
190
+void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc)
191
+{
192
+ int i, oprsz = simd_oprsz(desc) / 8;
193
+ uint8_t *pg = vg;
194
+ uint64_t *n = vn;
195
+ uint64_t *a = za;
196
+
197
+ for (i = 0; i < oprsz; i++) {
198
+ if (pg[H1(i)] & 1) {
199
+ a[tile_vslice_index(i)] = n[i];
200
+ }
201
+ }
202
+}
203
+
204
+void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc)
205
+{
206
+ int i, oprsz = simd_oprsz(desc) / 16;
207
+ uint16_t *pg = vg;
208
+ Int128 *n = vn;
209
+ Int128 *a = za;
210
+
162
+ /*
211
+ /*
163
+ * assign main_system_bus before qbus_init()
212
+ * Int128 is used here simply to copy 16 bytes, and to simplify
164
+ * in order to make "if (bus != sysbus_get_default())" work
213
+ * the address arithmetic.
165
+ */
214
+ */
166
main_system_bus = g_malloc0(system_bus_info.instance_size);
215
+ for (i = 0; i < oprsz; i++) {
167
- qbus_create_inplace(main_system_bus, system_bus_info.instance_size,
216
+ if (pg[H2(i)] & 1) {
168
- TYPE_SYSTEM_BUS, NULL, "main-system-bus");
217
+ a[tile_vslice_index(i)] = n[i];
169
+ qbus_init(main_system_bus, system_bus_info.instance_size,
218
+ }
170
+ TYPE_SYSTEM_BUS, NULL, "main-system-bus");
219
+ }
171
OBJECT(main_system_bus)->free = g_free;
220
+}
172
}
221
+
173
222
+#undef DO_MOVA_C
174
diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c
223
+
175
index XXXXXXX..XXXXXXX 100644
224
+/*
176
--- a/hw/gpio/bcm2835_gpio.c
225
+ * Move ZArray column to Zreg vector.
177
+++ b/hw/gpio/bcm2835_gpio.c
226
+ */
178
@@ -XXX,XX +XXX,XX @@ static void bcm2835_gpio_init(Object *obj)
227
+#define DO_MOVA_Z(NAME, TYPE, H) \
179
DeviceState *dev = DEVICE(obj);
228
+void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \
180
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
229
+{ \
181
230
+ int i, oprsz = simd_oprsz(desc); \
182
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
231
+ for (i = 0; i < oprsz; ) { \
183
- TYPE_SD_BUS, DEVICE(s), "sd-bus");
232
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
184
+ qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(s), "sd-bus");
233
+ do { \
185
234
+ if (pg & 1) { \
186
memory_region_init_io(&s->iomem, obj,
235
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \
187
&bcm2835_gpio_ops, s, "bcm2835_gpio", 0x1000);
236
+ } \
188
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
237
+ i += sizeof(TYPE); \
189
index XXXXXXX..XXXXXXX 100644
238
+ pg >>= sizeof(TYPE); \
190
--- a/hw/ide/qdev.c
239
+ } while (i & 15); \
191
+++ b/hw/ide/qdev.c
240
+ } \
192
@@ -XXX,XX +XXX,XX @@ static const TypeInfo ide_bus_info = {
241
+}
193
void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
242
+
194
int bus_id, int max_units)
243
+DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1)
195
{
244
+DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2)
196
- qbus_create_inplace(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL);
245
+DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4)
197
+ qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL);
246
+
198
idebus->bus_id = bus_id;
247
+void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc)
199
idebus->max_units = max_units;
248
+{
200
}
249
+ int i, oprsz = simd_oprsz(desc) / 8;
201
diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c
250
+ uint8_t *pg = vg;
202
index XXXXXXX..XXXXXXX 100644
251
+ uint64_t *d = vd;
203
--- a/hw/ipack/ipack.c
252
+ uint64_t *a = za;
204
+++ b/hw/ipack/ipack.c
253
+
205
@@ -XXX,XX +XXX,XX @@ void ipack_bus_init(IPackBus *bus, size_t bus_size,
254
+ for (i = 0; i < oprsz; i++) {
206
uint8_t n_slots,
255
+ if (pg[H1(i)] & 1) {
207
qemu_irq_handler handler)
256
+ d[i] = a[tile_vslice_index(i)];
208
{
257
+ }
209
- qbus_create_inplace(bus, bus_size, TYPE_IPACK_BUS, parent, NULL);
258
+ }
210
+ qbus_init(bus, bus_size, TYPE_IPACK_BUS, parent, NULL);
259
+}
211
bus->n_slots = n_slots;
260
+
212
bus->set_irq = handler;
261
+void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
213
}
262
+{
214
diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c
263
+ int i, oprsz = simd_oprsz(desc) / 16;
215
index XXXXXXX..XXXXXXX 100644
264
+ uint16_t *pg = vg;
216
--- a/hw/misc/mac_via.c
265
+ Int128 *d = vd;
217
+++ b/hw/misc/mac_via.c
266
+ Int128 *a = za;
218
@@ -XXX,XX +XXX,XX @@ static void mos6522_q800_via1_init(Object *obj)
267
+
219
sysbus_init_mmio(sbd, &v1s->via_mem);
268
+ /*
220
269
+ * Int128 is used here simply to copy 16 bytes, and to simplify
221
/* ADB */
270
+ * the address arithmetic.
222
- qbus_create_inplace((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus),
271
+ */
223
- TYPE_ADB_BUS, DEVICE(v1s), "adb.0");
272
+ for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) {
224
+ qbus_init((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus),
273
+ if (pg[H2(i)] & 1) {
225
+ TYPE_ADB_BUS, DEVICE(v1s), "adb.0");
274
+ d[i] = a[tile_vslice_index(i)];
226
275
+ }
227
qdev_init_gpio_in(DEVICE(obj), via1_irq_request, VIA1_IRQ_NB);
276
+ }
228
}
277
+}
229
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
278
+
230
index XXXXXXX..XXXXXXX 100644
279
+#undef DO_MOVA_Z
231
--- a/hw/misc/macio/cuda.c
280
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
232
+++ b/hw/misc/macio/cuda.c
281
index XXXXXXX..XXXXXXX 100644
233
@@ -XXX,XX +XXX,XX @@ static void cuda_init(Object *obj)
282
--- a/target/arm/sve_helper.c
234
memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000);
283
+++ b/target/arm/sve_helper.c
235
sysbus_init_mmio(sbd, &s->mem);
284
@@ -XXX,XX +XXX,XX @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
236
237
- qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
238
- DEVICE(obj), "adb.0");
239
+ qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
240
+ DEVICE(obj), "adb.0");
241
}
242
243
static Property cuda_properties[] = {
244
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
245
index XXXXXXX..XXXXXXX 100644
246
--- a/hw/misc/macio/macio.c
247
+++ b/hw/misc/macio/macio.c
248
@@ -XXX,XX +XXX,XX @@ static void macio_instance_init(Object *obj)
249
250
memory_region_init(&s->bar, obj, "macio", 0x80000);
251
252
- qbus_create_inplace(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS,
253
- DEVICE(obj), "macio.0");
254
+ qbus_init(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS,
255
+ DEVICE(obj), "macio.0");
256
257
object_initialize_child(OBJECT(s), "dbdma", &s->dbdma, TYPE_MAC_DBDMA);
258
259
diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c
260
index XXXXXXX..XXXXXXX 100644
261
--- a/hw/misc/macio/pmu.c
262
+++ b/hw/misc/macio/pmu.c
263
@@ -XXX,XX +XXX,XX @@ static void pmu_realize(DeviceState *dev, Error **errp)
264
timer_mod(s->one_sec_timer, s->one_sec_target);
265
266
if (s->has_adb) {
267
- qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
268
- dev, "adb.0");
269
+ qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
270
+ dev, "adb.0");
271
adb_register_autopoll_callback(adb_bus, pmu_adb_poll, s);
272
}
285
}
273
}
286
}
274
diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c
287
275
index XXXXXXX..XXXXXXX 100644
288
+void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm,
276
--- a/hw/nubus/nubus-bridge.c
289
+ void *vg, uint32_t desc)
277
+++ b/hw/nubus/nubus-bridge.c
290
+{
278
@@ -XXX,XX +XXX,XX @@ static void nubus_bridge_init(Object *obj)
291
+ intptr_t i, opr_sz = simd_oprsz(desc) / 16;
279
NubusBridge *s = NUBUS_BRIDGE(obj);
292
+ Int128 *d = vd, *n = vn, *m = vm;
280
NubusBus *bus = &s->bus;
293
+ uint16_t *pg = vg;
281
294
+
282
- qbus_create_inplace(bus, sizeof(s->bus), TYPE_NUBUS_BUS, DEVICE(s), NULL);
295
+ for (i = 0; i < opr_sz; i += 1) {
283
+ qbus_init(bus, sizeof(s->bus), TYPE_NUBUS_BUS, DEVICE(s), NULL);
296
+ d[i] = (pg[H2(i)] & 1 ? n : m)[i];
284
297
+ }
285
qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS);
298
+}
299
+
300
/* Two operand comparison controlled by a predicate.
301
* ??? It is very tempting to want to be able to expand this inline
302
* with x86 instructions, e.g.
303
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/translate-sme.c
306
+++ b/target/arm/translate-sme.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "decode-sme.c.inc"
309
310
311
+/*
312
+ * Resolve tile.size[index] to a host pointer, where tile and index
313
+ * are always decoded together, dependent on the element size.
314
+ */
315
+static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
316
+ int tile_index, bool vertical)
317
+{
318
+ int tile = tile_index >> (4 - esz);
319
+ int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
320
+ int pos, len, offset;
321
+ TCGv_i32 tmp;
322
+ TCGv_ptr addr;
323
+
324
+ /* Compute the final index, which is Rs+imm. */
325
+ tmp = tcg_temp_new_i32();
326
+ tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
327
+ tcg_gen_addi_i32(tmp, tmp, index);
328
+
329
+ /* Prepare a power-of-two modulo via extraction of @len bits. */
330
+ len = ctz32(streaming_vec_reg_size(s)) - esz;
331
+
332
+ if (vertical) {
333
+ /*
334
+ * Compute the byte offset of the index within the tile:
335
+ * (index % (svl / size)) * size
336
+ * = (index % (svl >> esz)) << esz
337
+ * Perform the power-of-two modulo via extraction of the low @len bits.
338
+ * Perform the multiply by shifting left by @pos bits.
339
+ * Perform these operations simultaneously via deposit into zero.
340
+ */
341
+ pos = esz;
342
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
343
+
344
+ /*
345
+ * For big-endian, adjust the indexed column byte offset within
346
+ * the uint64_t host words that make up env->zarray[].
347
+ */
348
+ if (HOST_BIG_ENDIAN && esz < MO_64) {
349
+ tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz));
350
+ }
351
+ } else {
352
+ /*
353
+ * Compute the byte offset of the index within the tile:
354
+ * (index % (svl / size)) * (size * sizeof(row))
355
+ * = (index % (svl >> esz)) << (esz + log2(sizeof(row)))
356
+ */
357
+ pos = esz + ctz32(sizeof(ARMVectorReg));
358
+ tcg_gen_deposit_z_i32(tmp, tmp, pos, len);
359
+
360
+ /* Row slices are always aligned and need no endian adjustment. */
361
+ }
362
+
363
+ /* The tile byte offset within env->zarray is the row. */
364
+ offset = tile * sizeof(ARMVectorReg);
365
+
366
+ /* Include the byte offset of zarray to make this relative to env. */
367
+ offset += offsetof(CPUARMState, zarray);
368
+ tcg_gen_addi_i32(tmp, tmp, offset);
369
+
370
+ /* Add the byte offset to env to produce the final pointer. */
371
+ addr = tcg_temp_new_ptr();
372
+ tcg_gen_ext_i32_ptr(addr, tmp);
373
+ tcg_temp_free_i32(tmp);
374
+ tcg_gen_add_ptr(addr, addr, cpu_env);
375
+
376
+ return addr;
377
+}
378
+
379
static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
380
{
381
if (!dc_isar_feature(aa64_sme, s)) {
382
@@ -XXX,XX +XXX,XX @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
383
}
384
return true;
286
}
385
}
287
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
386
+
288
index XXXXXXX..XXXXXXX 100644
387
+static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
289
--- a/hw/nvme/ctrl.c
388
+{
290
+++ b/hw/nvme/ctrl.c
389
+ static gen_helper_gvec_4 * const h_fns[5] = {
291
@@ -XXX,XX +XXX,XX @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
390
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
292
return;
391
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d,
293
}
392
+ gen_helper_sve_sel_zpzz_q
294
393
+ };
295
- qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS,
394
+ static gen_helper_gvec_3 * const cz_fns[5] = {
296
- &pci_dev->qdev, n->parent_obj.qdev.id);
395
+ gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h,
297
+ qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS,
396
+ gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d,
298
+ &pci_dev->qdev, n->parent_obj.qdev.id);
397
+ gen_helper_sme_mova_cz_q,
299
398
+ };
300
nvme_init_state(n);
399
+ static gen_helper_gvec_3 * const zc_fns[5] = {
301
if (nvme_init_pci(n, pci_dev, errp)) {
400
+ gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h,
302
diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c
401
+ gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d,
303
index XXXXXXX..XXXXXXX 100644
402
+ gen_helper_sme_mova_zc_q,
304
--- a/hw/nvme/subsys.c
403
+ };
305
+++ b/hw/nvme/subsys.c
404
+
306
@@ -XXX,XX +XXX,XX @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp)
405
+ TCGv_ptr t_za, t_zr, t_pg;
307
{
406
+ TCGv_i32 t_desc;
308
NvmeSubsystem *subsys = NVME_SUBSYS(dev);
407
+ int svl;
309
408
+
310
- qbus_create_inplace(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev,
409
+ if (!dc_isar_feature(aa64_sme, s)) {
311
- dev->id);
410
+ return false;
312
+ qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);
411
+ }
313
412
+ if (!sme_smza_enabled_check(s)) {
314
nvme_subsys_setup(subsys);
413
+ return true;
315
}
414
+ }
316
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
415
+
317
index XXXXXXX..XXXXXXX 100644
416
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
318
--- a/hw/pci/pci.c
417
+ t_zr = vec_full_reg_ptr(s, a->zr);
319
+++ b/hw/pci/pci.c
418
+ t_pg = pred_full_reg_ptr(s, a->pg);
320
@@ -XXX,XX +XXX,XX @@ void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
419
+
321
MemoryRegion *address_space_io,
420
+ svl = streaming_vec_reg_size(s);
322
uint8_t devfn_min, const char *typename)
421
+ t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
323
{
422
+
324
- qbus_create_inplace(bus, bus_size, typename, parent, name);
423
+ if (a->v) {
325
+ qbus_init(bus, bus_size, typename, parent, name);
424
+ /* Vertical slice -- use sme mova helpers. */
326
pci_root_bus_internal_init(bus, parent, address_space_mem,
425
+ if (a->to_vec) {
327
address_space_io, devfn_min);
426
+ zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
328
}
427
+ } else {
329
diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c
428
+ cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
330
index XXXXXXX..XXXXXXX 100644
429
+ }
331
--- a/hw/pci/pci_bridge.c
430
+ } else {
332
+++ b/hw/pci/pci_bridge.c
431
+ /* Horizontal slice -- reuse sve sel helpers. */
333
@@ -XXX,XX +XXX,XX @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename)
432
+ if (a->to_vec) {
334
br->bus_name = dev->qdev.id;
433
+ h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
335
}
434
+ } else {
336
435
+ h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
337
- qbus_create_inplace(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev),
436
+ }
338
- br->bus_name);
437
+ }
339
+ qbus_init(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev),
438
+
340
+ br->bus_name);
439
+ tcg_temp_free_ptr(t_za);
341
sec_bus->parent_dev = dev;
440
+ tcg_temp_free_ptr(t_zr);
342
sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn;
441
+ tcg_temp_free_ptr(t_pg);
343
sec_bus->address_space_mem = &br->address_space_mem;
442
+
344
diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c
443
+ return true;
345
index XXXXXXX..XXXXXXX 100644
444
+}
346
--- a/hw/s390x/event-facility.c
347
+++ b/hw/s390x/event-facility.c
348
@@ -XXX,XX +XXX,XX @@ static void init_event_facility(Object *obj)
349
sclp_event_set_allow_all_mask_sizes);
350
351
/* Spawn a new bus for SCLP events */
352
- qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
353
- TYPE_SCLP_EVENTS_BUS, sdev, NULL);
354
+ qbus_init(&event_facility->sbus, sizeof(event_facility->sbus),
355
+ TYPE_SCLP_EVENTS_BUS, sdev, NULL);
356
357
object_initialize_child(obj, TYPE_SCLP_QUIESCE,
358
&event_facility->quiesce,
359
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
360
index XXXXXXX..XXXXXXX 100644
361
--- a/hw/s390x/virtio-ccw.c
362
+++ b/hw/s390x/virtio-ccw.c
363
@@ -XXX,XX +XXX,XX @@ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
364
DeviceState *qdev = DEVICE(dev);
365
char virtio_bus_name[] = "virtio-bus";
366
367
- qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
368
- qdev, virtio_bus_name);
369
+ qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name);
370
}
371
372
static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
373
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
374
index XXXXXXX..XXXXXXX 100644
375
--- a/hw/scsi/scsi-bus.c
376
+++ b/hw/scsi/scsi-bus.c
377
@@ -XXX,XX +XXX,XX @@ void scsi_device_unit_attention_reported(SCSIDevice *s)
378
void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
379
const SCSIBusInfo *info, const char *bus_name)
380
{
381
- qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
382
+ qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
383
bus->busnr = next_scsi_bus++;
384
bus->info = info;
385
qbus_set_bus_hotplug_handler(BUS(bus));
386
diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c
387
index XXXXXXX..XXXXXXX 100644
388
--- a/hw/sd/allwinner-sdhost.c
389
+++ b/hw/sd/allwinner-sdhost.c
390
@@ -XXX,XX +XXX,XX @@ static void allwinner_sdhost_init(Object *obj)
391
{
392
AwSdHostState *s = AW_SDHOST(obj);
393
394
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
395
- TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus");
396
+ qbus_init(&s->sdbus, sizeof(s->sdbus),
397
+ TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus");
398
399
memory_region_init_io(&s->iomem, obj, &allwinner_sdhost_ops, s,
400
TYPE_AW_SDHOST, 4 * KiB);
401
diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c
402
index XXXXXXX..XXXXXXX 100644
403
--- a/hw/sd/bcm2835_sdhost.c
404
+++ b/hw/sd/bcm2835_sdhost.c
405
@@ -XXX,XX +XXX,XX @@ static void bcm2835_sdhost_init(Object *obj)
406
{
407
BCM2835SDHostState *s = BCM2835_SDHOST(obj);
408
409
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
410
- TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus");
411
+ qbus_init(&s->sdbus, sizeof(s->sdbus),
412
+ TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus");
413
414
memory_region_init_io(&s->iomem, obj, &bcm2835_sdhost_ops, s,
415
TYPE_BCM2835_SDHOST, 0x1000);
416
diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/hw/sd/pl181.c
419
+++ b/hw/sd/pl181.c
420
@@ -XXX,XX +XXX,XX @@ static void pl181_init(Object *obj)
421
qdev_init_gpio_out_named(dev, &s->card_readonly, "card-read-only", 1);
422
qdev_init_gpio_out_named(dev, &s->card_inserted, "card-inserted", 1);
423
424
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
425
- TYPE_PL181_BUS, dev, "sd-bus");
426
+ qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus");
427
}
428
429
static void pl181_class_init(ObjectClass *klass, void *data)
430
diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c
431
index XXXXXXX..XXXXXXX 100644
432
--- a/hw/sd/pxa2xx_mmci.c
433
+++ b/hw/sd/pxa2xx_mmci.c
434
@@ -XXX,XX +XXX,XX @@ static void pxa2xx_mmci_instance_init(Object *obj)
435
qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1);
436
qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1);
437
438
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
439
- TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus");
440
+ qbus_init(&s->sdbus, sizeof(s->sdbus),
441
+ TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus");
442
}
443
444
static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data)
445
diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c
446
index XXXXXXX..XXXXXXX 100644
447
--- a/hw/sd/sdhci.c
448
+++ b/hw/sd/sdhci.c
449
@@ -XXX,XX +XXX,XX @@ static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp)
450
451
void sdhci_initfn(SDHCIState *s)
452
{
453
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus),
454
- TYPE_SDHCI_BUS, DEVICE(s), "sd-bus");
455
+ qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus");
456
457
s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s);
458
s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s);
459
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
460
index XXXXXXX..XXXXXXX 100644
461
--- a/hw/sd/ssi-sd.c
462
+++ b/hw/sd/ssi-sd.c
463
@@ -XXX,XX +XXX,XX @@ static void ssi_sd_realize(SSIPeripheral *d, Error **errp)
464
DeviceState *carddev;
465
DriveInfo *dinfo;
466
467
- qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS,
468
- DEVICE(d), "sd-bus");
469
+ qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(d), "sd-bus");
470
471
/* Create and plug in the sd card */
472
/* FIXME use a qdev drive property instead of drive_get_next() */
473
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
474
index XXXXXXX..XXXXXXX 100644
475
--- a/hw/usb/bus.c
476
+++ b/hw/usb/bus.c
477
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_usb_device = {
478
void usb_bus_new(USBBus *bus, size_t bus_size,
479
USBBusOps *ops, DeviceState *host)
480
{
481
- qbus_create_inplace(bus, bus_size, TYPE_USB_BUS, host, NULL);
482
+ qbus_init(bus, bus_size, TYPE_USB_BUS, host, NULL);
483
qbus_set_bus_hotplug_handler(BUS(bus));
484
bus->ops = ops;
485
bus->busnr = next_usb_bus++;
486
diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c
487
index XXXXXXX..XXXXXXX 100644
488
--- a/hw/usb/dev-smartcard-reader.c
489
+++ b/hw/usb/dev-smartcard-reader.c
490
@@ -XXX,XX +XXX,XX @@ static void ccid_realize(USBDevice *dev, Error **errp)
491
492
usb_desc_create_serial(dev);
493
usb_desc_init(dev);
494
- qbus_create_inplace(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev),
495
- NULL);
496
+ qbus_init(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), NULL);
497
qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
498
s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP);
499
s->bulk = usb_ep_get(dev, USB_TOKEN_IN, CCID_BULK_IN_EP);
500
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
501
index XXXXXXX..XXXXXXX 100644
502
--- a/hw/virtio/virtio-mmio.c
503
+++ b/hw/virtio/virtio-mmio.c
504
@@ -XXX,XX +XXX,XX @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
505
VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
506
SysBusDevice *sbd = SYS_BUS_DEVICE(d);
507
508
- qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
509
- d, NULL);
510
+ qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL);
511
sysbus_init_irq(sbd, &proxy->irq);
512
513
if (!kvm_eventfds_enabled()) {
514
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
515
index XXXXXXX..XXXXXXX 100644
516
--- a/hw/virtio/virtio-pci.c
517
+++ b/hw/virtio/virtio-pci.c
518
@@ -XXX,XX +XXX,XX @@ static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
519
DeviceState *qdev = DEVICE(dev);
520
char virtio_bus_name[] = "virtio-bus";
521
522
- qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
523
- virtio_bus_name);
524
+ qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name);
525
}
526
527
static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
528
--
445
--
529
2.20.1
446
2.25.1
530
531
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We cannot reuse the SVE functions for LD[1-4] and ST[1-4],
4
because those functions accept only a Zreg register number.
5
For SME, we want to pass a pointer into ZA storage.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-21-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper-sme.h | 82 +++++
13
target/arm/sme.decode | 9 +
14
target/arm/sme_helper.c | 595 +++++++++++++++++++++++++++++++++++++
15
target/arm/translate-sme.c | 70 +++++
16
4 files changed, 756 insertions(+)
17
18
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/helper-sme.h
21
+++ b/target/arm/helper-sme.h
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
23
DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
25
DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
+DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
28
+DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
29
+DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
30
+DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
31
+
32
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
33
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
34
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
35
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
36
+DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
37
+DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
38
+DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
39
+DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
40
+
41
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
42
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
43
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
44
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
45
+DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
46
+DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
47
+DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
48
+DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
49
+
50
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
51
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
52
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
53
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
54
+DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
55
+DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
56
+DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
57
+DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
58
+
59
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
60
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
61
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
62
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
63
+DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
64
+DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
65
+DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
66
+DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
67
+
68
+DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
69
+DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
70
+DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
71
+DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
72
+
73
+DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
74
+DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
75
+DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
76
+DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
77
+DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
78
+DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
79
+DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
80
+DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
81
+
82
+DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
83
+DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
84
+DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
85
+DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
86
+DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
87
+DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
88
+DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
89
+DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
90
+
91
+DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
92
+DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
93
+DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
94
+DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
95
+DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
96
+DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
97
+DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
98
+DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
99
+
100
+DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
101
+DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
102
+DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
103
+DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
104
+DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
105
+DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
106
+DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
107
+DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
108
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/sme.decode
111
+++ b/target/arm/sme.decode
112
@@ -XXX,XX +XXX,XX @@ MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
113
&mova to_vec=1 rs=%mova_rs
114
MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
115
&mova to_vec=1 rs=%mova_rs esz=4
116
+
117
+### SME Memory
118
+
119
+&ldst esz rs pg rn rm za_imm v:bool st:bool
120
+
121
+LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
122
+ &ldst rs=%mova_rs
123
+LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
124
+ &ldst esz=4 rs=%mova_rs
125
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/sme_helper.c
128
+++ b/target/arm/sme_helper.c
129
@@ -XXX,XX +XXX,XX @@
130
131
#include "qemu/osdep.h"
132
#include "cpu.h"
133
+#include "internals.h"
134
#include "tcg/tcg-gvec-desc.h"
135
#include "exec/helper-proto.h"
136
+#include "exec/cpu_ldst.h"
137
+#include "exec/exec-all.h"
138
#include "qemu/int128.h"
139
#include "vec_internal.h"
140
+#include "sve_ldst_internal.h"
141
142
/* ResetSVEState */
143
void arm_reset_sve_state(CPUARMState *env)
144
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
145
}
146
147
#undef DO_MOVA_Z
148
+
149
+/*
150
+ * Clear elements in a tile slice comprising len bytes.
151
+ */
152
+
153
+typedef void ClearFn(void *ptr, size_t off, size_t len);
154
+
155
+static void clear_horizontal(void *ptr, size_t off, size_t len)
156
+{
157
+ memset(ptr + off, 0, len);
158
+}
159
+
160
+static void clear_vertical_b(void *vptr, size_t off, size_t len)
161
+{
162
+ for (size_t i = 0; i < len; ++i) {
163
+ *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0;
164
+ }
165
+}
166
+
167
+static void clear_vertical_h(void *vptr, size_t off, size_t len)
168
+{
169
+ for (size_t i = 0; i < len; i += 2) {
170
+ *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0;
171
+ }
172
+}
173
+
174
+static void clear_vertical_s(void *vptr, size_t off, size_t len)
175
+{
176
+ for (size_t i = 0; i < len; i += 4) {
177
+ *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0;
178
+ }
179
+}
180
+
181
+static void clear_vertical_d(void *vptr, size_t off, size_t len)
182
+{
183
+ for (size_t i = 0; i < len; i += 8) {
184
+ *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0;
185
+ }
186
+}
187
+
188
+static void clear_vertical_q(void *vptr, size_t off, size_t len)
189
+{
190
+ for (size_t i = 0; i < len; i += 16) {
191
+ memset(vptr + tile_vslice_offset(i + off), 0, 16);
192
+ }
193
+}
194
+
195
+/*
196
+ * Copy elements from an array into a tile slice comprising len bytes.
197
+ */
198
+
199
+typedef void CopyFn(void *dst, const void *src, size_t len);
200
+
201
+static void copy_horizontal(void *dst, const void *src, size_t len)
202
+{
203
+ memcpy(dst, src, len);
204
+}
205
+
206
+static void copy_vertical_b(void *vdst, const void *vsrc, size_t len)
207
+{
208
+ const uint8_t *src = vsrc;
209
+ uint8_t *dst = vdst;
210
+ size_t i;
211
+
212
+ for (i = 0; i < len; ++i) {
213
+ dst[tile_vslice_index(i)] = src[i];
214
+ }
215
+}
216
+
217
+static void copy_vertical_h(void *vdst, const void *vsrc, size_t len)
218
+{
219
+ const uint16_t *src = vsrc;
220
+ uint16_t *dst = vdst;
221
+ size_t i;
222
+
223
+ for (i = 0; i < len / 2; ++i) {
224
+ dst[tile_vslice_index(i)] = src[i];
225
+ }
226
+}
227
+
228
+static void copy_vertical_s(void *vdst, const void *vsrc, size_t len)
229
+{
230
+ const uint32_t *src = vsrc;
231
+ uint32_t *dst = vdst;
232
+ size_t i;
233
+
234
+ for (i = 0; i < len / 4; ++i) {
235
+ dst[tile_vslice_index(i)] = src[i];
236
+ }
237
+}
238
+
239
+static void copy_vertical_d(void *vdst, const void *vsrc, size_t len)
240
+{
241
+ const uint64_t *src = vsrc;
242
+ uint64_t *dst = vdst;
243
+ size_t i;
244
+
245
+ for (i = 0; i < len / 8; ++i) {
246
+ dst[tile_vslice_index(i)] = src[i];
247
+ }
248
+}
249
+
250
+static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
251
+{
252
+ for (size_t i = 0; i < len; i += 16) {
253
+ memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16);
254
+ }
255
+}
256
+
257
+/*
258
+ * Host and TLB primitives for vertical tile slice addressing.
259
+ */
260
+
261
+#define DO_LD(NAME, TYPE, HOST, TLB) \
262
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
263
+{ \
264
+ TYPE val = HOST(host); \
265
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
266
+} \
267
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
268
+ intptr_t off, target_ulong addr, uintptr_t ra) \
269
+{ \
270
+ TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \
271
+ *(TYPE *)(za + tile_vslice_offset(off)) = val; \
272
+}
273
+
274
+#define DO_ST(NAME, TYPE, HOST, TLB) \
275
+static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \
276
+{ \
277
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
278
+ HOST(host, val); \
279
+} \
280
+static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
281
+ intptr_t off, target_ulong addr, uintptr_t ra) \
282
+{ \
283
+ TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \
284
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
285
+}
286
+
287
+/*
288
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
289
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
290
+ * corresponds to storing the two 64-bit pieces in little-endian order.
291
+ */
292
+#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
293
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
294
+{ \
295
+ uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
296
+ uint64_t *ptr = za + off; \
297
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
298
+} \
299
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
300
+{ \
301
+ HNAME##_host(za, tile_vslice_offset(off), host); \
302
+} \
303
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
304
+ target_ulong addr, uintptr_t ra) \
305
+{ \
306
+ uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
307
+ uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
308
+ uint64_t *ptr = za + off; \
309
+ ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
310
+} \
311
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
312
+ target_ulong addr, uintptr_t ra) \
313
+{ \
314
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
315
+}
316
+
317
+#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
318
+static inline void HNAME##_host(void *za, intptr_t off, void *host) \
319
+{ \
320
+ uint64_t *ptr = za + off; \
321
+ HOST(host, ptr[BE]); \
322
+ HOST(host + 1, ptr[!BE]); \
323
+} \
324
+static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
325
+{ \
326
+ HNAME##_host(za, tile_vslice_offset(off), host); \
327
+} \
328
+static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
329
+ target_ulong addr, uintptr_t ra) \
330
+{ \
331
+ uint64_t *ptr = za + off; \
332
+ TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
333
+ TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
334
+} \
335
+static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
336
+ target_ulong addr, uintptr_t ra) \
337
+{ \
338
+ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
339
+}
340
+
341
+DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra)
342
+DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra)
343
+DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra)
344
+DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra)
345
+DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
346
+DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
347
+DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
348
+
349
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
350
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
351
+
352
+DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
353
+DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
354
+DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra)
355
+DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra)
356
+DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
357
+DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
358
+DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
359
+
360
+DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
361
+DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
362
+
363
+#undef DO_LD
364
+#undef DO_ST
365
+#undef DO_LDQ
366
+#undef DO_STQ
367
+
368
+/*
369
+ * Common helper for all contiguous predicated loads.
370
+ */
371
+
372
+static inline QEMU_ALWAYS_INLINE
373
+void sme_ld1(CPUARMState *env, void *za, uint64_t *vg,
374
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
375
+ const int esz, uint32_t mtedesc, bool vertical,
376
+ sve_ldst1_host_fn *host_fn,
377
+ sve_ldst1_tlb_fn *tlb_fn,
378
+ ClearFn *clr_fn,
379
+ CopyFn *cpy_fn)
380
+{
381
+ const intptr_t reg_max = simd_oprsz(desc);
382
+ const intptr_t esize = 1 << esz;
383
+ intptr_t reg_off, reg_last;
384
+ SVEContLdSt info;
385
+ void *host;
386
+ int flags;
387
+
388
+ /* Find the active elements. */
389
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
390
+ /* The entire predicate was false; no load occurs. */
391
+ clr_fn(za, 0, reg_max);
392
+ return;
393
+ }
394
+
395
+ /* Probe the page(s). Exit with exception for any invalid page. */
396
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
397
+
398
+ /* Handle watchpoints for all active elements. */
399
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
400
+ BP_MEM_READ, ra);
401
+
402
+ /*
403
+ * Handle mte checks for all active elements.
404
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
405
+ */
406
+ if (mtedesc) {
407
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
408
+ mtedesc, ra);
409
+ }
410
+
411
+ flags = info.page[0].flags | info.page[1].flags;
412
+ if (unlikely(flags != 0)) {
413
+#ifdef CONFIG_USER_ONLY
414
+ g_assert_not_reached();
415
+#else
416
+ /*
417
+ * At least one page includes MMIO.
418
+ * Any bus operation can fail with cpu_transaction_failed,
419
+ * which for ARM will raise SyncExternal. Perform the load
420
+ * into scratch memory to preserve register state until the end.
421
+ */
422
+ ARMVectorReg scratch = { };
423
+
424
+ reg_off = info.reg_off_first[0];
425
+ reg_last = info.reg_off_last[1];
426
+ if (reg_last < 0) {
427
+ reg_last = info.reg_off_split;
428
+ if (reg_last < 0) {
429
+ reg_last = info.reg_off_last[0];
430
+ }
431
+ }
432
+
433
+ do {
434
+ uint64_t pg = vg[reg_off >> 6];
435
+ do {
436
+ if ((pg >> (reg_off & 63)) & 1) {
437
+ tlb_fn(env, &scratch, reg_off, addr + reg_off, ra);
438
+ }
439
+ reg_off += esize;
440
+ } while (reg_off & 63);
441
+ } while (reg_off <= reg_last);
442
+
443
+ cpy_fn(za, &scratch, reg_max);
444
+ return;
445
+#endif
446
+ }
447
+
448
+ /* The entire operation is in RAM, on valid pages. */
449
+
450
+ reg_off = info.reg_off_first[0];
451
+ reg_last = info.reg_off_last[0];
452
+ host = info.page[0].host;
453
+
454
+ if (!vertical) {
455
+ memset(za, 0, reg_max);
456
+ } else if (reg_off) {
457
+ clr_fn(za, 0, reg_off);
458
+ }
459
+
460
+ while (reg_off <= reg_last) {
461
+ uint64_t pg = vg[reg_off >> 6];
462
+ do {
463
+ if ((pg >> (reg_off & 63)) & 1) {
464
+ host_fn(za, reg_off, host + reg_off);
465
+ } else if (vertical) {
466
+ clr_fn(za, reg_off, esize);
467
+ }
468
+ reg_off += esize;
469
+ } while (reg_off <= reg_last && (reg_off & 63));
470
+ }
471
+
472
+ /*
473
+ * Use the slow path to manage the cross-page misalignment.
474
+ * But we know this is RAM and cannot trap.
475
+ */
476
+ reg_off = info.reg_off_split;
477
+ if (unlikely(reg_off >= 0)) {
478
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
479
+ }
480
+
481
+ reg_off = info.reg_off_first[1];
482
+ if (unlikely(reg_off >= 0)) {
483
+ reg_last = info.reg_off_last[1];
484
+ host = info.page[1].host;
485
+
486
+ do {
487
+ uint64_t pg = vg[reg_off >> 6];
488
+ do {
489
+ if ((pg >> (reg_off & 63)) & 1) {
490
+ host_fn(za, reg_off, host + reg_off);
491
+ } else if (vertical) {
492
+ clr_fn(za, reg_off, esize);
493
+ }
494
+ reg_off += esize;
495
+ } while (reg_off & 63);
496
+ } while (reg_off <= reg_last);
497
+ }
498
+}
499
+
500
+static inline QEMU_ALWAYS_INLINE
501
+void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg,
502
+ target_ulong addr, uint32_t desc, uintptr_t ra,
503
+ const int esz, bool vertical,
504
+ sve_ldst1_host_fn *host_fn,
505
+ sve_ldst1_tlb_fn *tlb_fn,
506
+ ClearFn *clr_fn,
507
+ CopyFn *cpy_fn)
508
+{
509
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
510
+ int bit55 = extract64(addr, 55, 1);
511
+
512
+ /* Remove mtedesc from the normal sve descriptor. */
513
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
514
+
515
+ /* Perform gross MTE suppression early. */
516
+ if (!tbi_check(desc, bit55) ||
517
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
518
+ mtedesc = 0;
519
+ }
520
+
521
+ sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical,
522
+ host_fn, tlb_fn, clr_fn, cpy_fn);
523
+}
524
+
525
+#define DO_LD(L, END, ESZ) \
526
+void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
527
+ target_ulong addr, uint32_t desc) \
528
+{ \
529
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
530
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
531
+ clear_horizontal, copy_horizontal); \
532
+} \
533
+void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
534
+ target_ulong addr, uint32_t desc) \
535
+{ \
536
+ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
537
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
538
+ clear_vertical_##L, copy_vertical_##L); \
539
+} \
540
+void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
541
+ target_ulong addr, uint32_t desc) \
542
+{ \
543
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
544
+ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \
545
+ clear_horizontal, copy_horizontal); \
546
+} \
547
+void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
548
+ target_ulong addr, uint32_t desc) \
549
+{ \
550
+ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
551
+ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \
552
+ clear_vertical_##L, copy_vertical_##L); \
553
+}
554
+
555
+DO_LD(b, , MO_8)
556
+DO_LD(h, _be, MO_16)
557
+DO_LD(h, _le, MO_16)
558
+DO_LD(s, _be, MO_32)
559
+DO_LD(s, _le, MO_32)
560
+DO_LD(d, _be, MO_64)
561
+DO_LD(d, _le, MO_64)
562
+DO_LD(q, _be, MO_128)
563
+DO_LD(q, _le, MO_128)
564
+
565
+#undef DO_LD
566
+
567
+/*
568
+ * Common helper for all contiguous predicated stores.
569
+ */
570
+
571
+static inline QEMU_ALWAYS_INLINE
572
+void sme_st1(CPUARMState *env, void *za, uint64_t *vg,
573
+ const target_ulong addr, uint32_t desc, const uintptr_t ra,
574
+ const int esz, uint32_t mtedesc, bool vertical,
575
+ sve_ldst1_host_fn *host_fn,
576
+ sve_ldst1_tlb_fn *tlb_fn)
577
+{
578
+ const intptr_t reg_max = simd_oprsz(desc);
579
+ const intptr_t esize = 1 << esz;
580
+ intptr_t reg_off, reg_last;
581
+ SVEContLdSt info;
582
+ void *host;
583
+ int flags;
584
+
585
+ /* Find the active elements. */
586
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) {
587
+ /* The entire predicate was false; no store occurs. */
588
+ return;
589
+ }
590
+
591
+ /* Probe the page(s). Exit with exception for any invalid page. */
592
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
593
+
594
+ /* Handle watchpoints for all active elements. */
595
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize,
596
+ BP_MEM_WRITE, ra);
597
+
598
+ /*
599
+ * Handle mte checks for all active elements.
600
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
601
+ */
602
+ if (mtedesc) {
603
+ sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize,
604
+ mtedesc, ra);
605
+ }
606
+
607
+ flags = info.page[0].flags | info.page[1].flags;
608
+ if (unlikely(flags != 0)) {
609
+#ifdef CONFIG_USER_ONLY
610
+ g_assert_not_reached();
611
+#else
612
+ /*
613
+ * At least one page includes MMIO.
614
+ * Any bus operation can fail with cpu_transaction_failed,
615
+ * which for ARM will raise SyncExternal. We cannot avoid
616
+ * this fault and will leave with the store incomplete.
617
+ */
618
+ reg_off = info.reg_off_first[0];
619
+ reg_last = info.reg_off_last[1];
620
+ if (reg_last < 0) {
621
+ reg_last = info.reg_off_split;
622
+ if (reg_last < 0) {
623
+ reg_last = info.reg_off_last[0];
624
+ }
625
+ }
626
+
627
+ do {
628
+ uint64_t pg = vg[reg_off >> 6];
629
+ do {
630
+ if ((pg >> (reg_off & 63)) & 1) {
631
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
632
+ }
633
+ reg_off += esize;
634
+ } while (reg_off & 63);
635
+ } while (reg_off <= reg_last);
636
+ return;
637
+#endif
638
+ }
639
+
640
+ reg_off = info.reg_off_first[0];
641
+ reg_last = info.reg_off_last[0];
642
+ host = info.page[0].host;
643
+
644
+ while (reg_off <= reg_last) {
645
+ uint64_t pg = vg[reg_off >> 6];
646
+ do {
647
+ if ((pg >> (reg_off & 63)) & 1) {
648
+ host_fn(za, reg_off, host + reg_off);
649
+ }
650
+ reg_off += 1 << esz;
651
+ } while (reg_off <= reg_last && (reg_off & 63));
652
+ }
653
+
654
+ /*
655
+ * Use the slow path to manage the cross-page misalignment.
656
+ * But we know this is RAM and cannot trap.
657
+ */
658
+ reg_off = info.reg_off_split;
659
+ if (unlikely(reg_off >= 0)) {
660
+ tlb_fn(env, za, reg_off, addr + reg_off, ra);
661
+ }
662
+
663
+ reg_off = info.reg_off_first[1];
664
+ if (unlikely(reg_off >= 0)) {
665
+ reg_last = info.reg_off_last[1];
666
+ host = info.page[1].host;
667
+
668
+ do {
669
+ uint64_t pg = vg[reg_off >> 6];
670
+ do {
671
+ if ((pg >> (reg_off & 63)) & 1) {
672
+ host_fn(za, reg_off, host + reg_off);
673
+ }
674
+ reg_off += 1 << esz;
675
+ } while (reg_off & 63);
676
+ } while (reg_off <= reg_last);
677
+ }
678
+}
679
+
680
+static inline QEMU_ALWAYS_INLINE
681
+void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr,
682
+ uint32_t desc, uintptr_t ra, int esz, bool vertical,
683
+ sve_ldst1_host_fn *host_fn,
684
+ sve_ldst1_tlb_fn *tlb_fn)
685
+{
686
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
687
+ int bit55 = extract64(addr, 55, 1);
688
+
689
+ /* Remove mtedesc from the normal sve descriptor. */
690
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
691
+
692
+ /* Perform gross MTE suppression early. */
693
+ if (!tbi_check(desc, bit55) ||
694
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
695
+ mtedesc = 0;
696
+ }
697
+
698
+ sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc,
699
+ vertical, host_fn, tlb_fn);
700
+}
701
+
702
+#define DO_ST(L, END, ESZ) \
703
+void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \
704
+ target_ulong addr, uint32_t desc) \
705
+{ \
706
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \
707
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
708
+} \
709
+void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \
710
+ target_ulong addr, uint32_t desc) \
711
+{ \
712
+ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \
713
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
714
+} \
715
+void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \
716
+ target_ulong addr, uint32_t desc) \
717
+{ \
718
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \
719
+ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \
720
+} \
721
+void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \
722
+ target_ulong addr, uint32_t desc) \
723
+{ \
724
+ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \
725
+ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \
726
+}
727
+
728
+DO_ST(b, , MO_8)
729
+DO_ST(h, _be, MO_16)
730
+DO_ST(h, _le, MO_16)
731
+DO_ST(s, _be, MO_32)
732
+DO_ST(s, _le, MO_32)
733
+DO_ST(d, _be, MO_64)
734
+DO_ST(d, _le, MO_64)
735
+DO_ST(q, _be, MO_128)
736
+DO_ST(q, _le, MO_128)
737
+
738
+#undef DO_ST
739
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
740
index XXXXXXX..XXXXXXX 100644
741
--- a/target/arm/translate-sme.c
742
+++ b/target/arm/translate-sme.c
743
@@ -XXX,XX +XXX,XX @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
744
745
return true;
746
}
747
+
748
+static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
749
+{
750
+ typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
751
+
752
+ /*
753
+ * Indexed by [esz][be][v][mte][st], which is (except for load/store)
754
+ * also the order in which the elements appear in the function names,
755
+ * and so how we must concatenate the pieces.
756
+ */
757
+
758
+#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F }
759
+#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) }
760
+#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) }
761
+#define FN_END(L, B) { FN_HV(L), FN_HV(B) }
762
+
763
+ static GenLdSt1 * const fns[5][2][2][2][2] = {
764
+ FN_END(b, b),
765
+ FN_END(h_le, h_be),
766
+ FN_END(s_le, s_be),
767
+ FN_END(d_le, d_be),
768
+ FN_END(q_le, q_be),
769
+ };
770
+
771
+#undef FN_LS
772
+#undef FN_MTE
773
+#undef FN_HV
774
+#undef FN_END
775
+
776
+ TCGv_ptr t_za, t_pg;
777
+ TCGv_i64 addr;
778
+ int svl, desc = 0;
779
+ bool be = s->be_data == MO_BE;
780
+ bool mte = s->mte_active[0];
781
+
782
+ if (!dc_isar_feature(aa64_sme, s)) {
783
+ return false;
784
+ }
785
+ if (!sme_smza_enabled_check(s)) {
786
+ return true;
787
+ }
788
+
789
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
790
+ t_pg = pred_full_reg_ptr(s, a->pg);
791
+ addr = tcg_temp_new_i64();
792
+
793
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
794
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
795
+
796
+ if (mte) {
797
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
798
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
799
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
800
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st);
801
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1);
802
+ desc <<= SVE_MTEDESC_SHIFT;
803
+ } else {
804
+ addr = clean_data_tbi(s, addr);
805
+ }
806
+ svl = streaming_vec_reg_size(s);
807
+ desc = simd_desc(svl, svl, desc);
808
+
809
+ fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr,
810
+ tcg_constant_i32(desc));
811
+
812
+ tcg_temp_free_ptr(t_za);
813
+ tcg_temp_free_ptr(t_pg);
814
+ tcg_temp_free_i64(addr);
815
+ return true;
816
+}
817
--
818
2.25.1
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The SMCCC 1.3 spec section 5.2 says
3
Add a TCGv_ptr base argument, which will be cpu_env for SVE.
4
We will reuse this for SME save and restore array insns.
4
5
5
The Unknown SMC Function Identifier is a sign-extended value of (-1)
6
that is returned in the R0, W0 or X0 registers. An implementation must
7
return this error code when it receives:
8
9
* An SMC or HVC call with an unknown Function Identifier
10
* An SMC or HVC call for a removed Function Identifier
11
* An SMC64/HVC64 call from AArch32 state
12
13
To comply with these statements, let's always return -1 when we encounter
14
an unknown HVC or SMC call.
15
16
Signed-off-by: Alexander Graf <agraf@csgraf.de>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-22-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
10
---
20
target/arm/psci.c | 35 ++++++-----------------------------
11
target/arm/translate-a64.h | 3 +++
21
1 file changed, 6 insertions(+), 29 deletions(-)
12
target/arm/translate-sve.c | 48 ++++++++++++++++++++++++++++----------
13
2 files changed, 39 insertions(+), 12 deletions(-)
22
14
23
diff --git a/target/arm/psci.c b/target/arm/psci.c
15
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
24
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/psci.c
17
--- a/target/arm/translate-a64.h
26
+++ b/target/arm/psci.c
18
+++ b/target/arm/translate-a64.h
27
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
28
20
uint32_t rm_ofs, int64_t shift,
29
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
21
uint32_t opr_sz, uint32_t max_sz);
22
23
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
24
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
25
+
26
#endif /* TARGET_ARM_TRANSLATE_A64_H */
27
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/translate-sve.c
30
+++ b/target/arm/translate-sve.c
31
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
32
* The load should begin at the address Rn + IMM.
33
*/
34
35
-static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
36
+void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
37
+ int len, int rn, int imm)
30
{
38
{
31
- /* Return true if the r0/x0 value indicates a PSCI call and
39
int len_align = QEMU_ALIGN_DOWN(len, 8);
32
- * the exception type matches the configured PSCI conduit. This is
40
int len_remain = len % 8;
33
- * called before the SMC/HVC instruction is executed, to decide whether
41
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
34
- * we should treat it as a PSCI call or with the architecturally
42
t0 = tcg_temp_new_i64();
35
+ /*
43
for (i = 0; i < len_align; i += 8) {
36
+ * Return true if the exception type matches the configured PSCI conduit.
44
tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
37
+ * This is called before the SMC/HVC instruction is executed, to decide
45
- tcg_gen_st_i64(t0, cpu_env, vofs + i);
38
+ * whether we should treat it as a PSCI call or with the architecturally
46
+ tcg_gen_st_i64(t0, base, vofs + i);
39
* defined behaviour for an SMC or HVC (which might be UNDEF or trap
47
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
40
* to EL2 or to EL3).
48
}
41
*/
49
tcg_temp_free_i64(t0);
42
- CPUARMState *env = &cpu->env;
50
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
43
- uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
51
clean_addr = new_tmp_a64_local(s);
44
52
tcg_gen_mov_i64(clean_addr, t0);
45
switch (excp_type) {
53
46
case EXCP_HVC:
54
+ if (base != cpu_env) {
47
@@ -XXX,XX +XXX,XX @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
55
+ TCGv_ptr b = tcg_temp_local_new_ptr();
48
return false;
56
+ tcg_gen_mov_ptr(b, base);
57
+ base = b;
58
+ }
59
+
60
gen_set_label(loop);
61
62
t0 = tcg_temp_new_i64();
63
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
64
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
65
66
tp = tcg_temp_new_ptr();
67
- tcg_gen_add_ptr(tp, cpu_env, i);
68
+ tcg_gen_add_ptr(tp, base, i);
69
tcg_gen_addi_ptr(i, i, 8);
70
tcg_gen_st_i64(t0, tp, vofs);
71
tcg_temp_free_ptr(tp);
72
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
73
74
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
75
tcg_temp_free_ptr(i);
76
+
77
+ if (base != cpu_env) {
78
+ tcg_temp_free_ptr(base);
79
+ assert(len_remain == 0);
80
+ }
49
}
81
}
50
82
51
- switch (param) {
83
/*
52
- case QEMU_PSCI_0_2_FN_PSCI_VERSION:
84
@@ -XXX,XX +XXX,XX @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
53
- case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
85
default:
54
- case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
86
g_assert_not_reached();
55
- case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
87
}
56
- case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
88
- tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
57
- case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
89
+ tcg_gen_st_i64(t0, base, vofs + len_align);
58
- case QEMU_PSCI_0_1_FN_CPU_ON:
90
tcg_temp_free_i64(t0);
59
- case QEMU_PSCI_0_2_FN_CPU_ON:
91
}
60
- case QEMU_PSCI_0_2_FN64_CPU_ON:
61
- case QEMU_PSCI_0_1_FN_CPU_OFF:
62
- case QEMU_PSCI_0_2_FN_CPU_OFF:
63
- case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
64
- case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
65
- case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
66
- case QEMU_PSCI_0_1_FN_MIGRATE:
67
- case QEMU_PSCI_0_2_FN_MIGRATE:
68
- return true;
69
- default:
70
- return false;
71
- }
72
+ return true;
73
}
92
}
74
93
75
void arm_handle_psci_call(ARMCPU *cpu)
94
/* Similarly for stores. */
76
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
95
-static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
77
break;
96
+void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
78
case QEMU_PSCI_0_1_FN_MIGRATE:
97
+ int len, int rn, int imm)
79
case QEMU_PSCI_0_2_FN_MIGRATE:
98
{
80
+ default:
99
int len_align = QEMU_ALIGN_DOWN(len, 8);
81
ret = QEMU_PSCI_RET_NOT_SUPPORTED;
100
int len_remain = len % 8;
82
break;
101
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
83
- default:
102
84
- g_assert_not_reached();
103
t0 = tcg_temp_new_i64();
104
for (i = 0; i < len_align; i += 8) {
105
- tcg_gen_ld_i64(t0, cpu_env, vofs + i);
106
+ tcg_gen_ld_i64(t0, base, vofs + i);
107
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
108
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
111
clean_addr = new_tmp_a64_local(s);
112
tcg_gen_mov_i64(clean_addr, t0);
113
114
+ if (base != cpu_env) {
115
+ TCGv_ptr b = tcg_temp_local_new_ptr();
116
+ tcg_gen_mov_ptr(b, base);
117
+ base = b;
118
+ }
119
+
120
gen_set_label(loop);
121
122
t0 = tcg_temp_new_i64();
123
tp = tcg_temp_new_ptr();
124
- tcg_gen_add_ptr(tp, cpu_env, i);
125
+ tcg_gen_add_ptr(tp, base, i);
126
tcg_gen_ld_i64(t0, tp, vofs);
127
tcg_gen_addi_ptr(i, i, 8);
128
tcg_temp_free_ptr(tp);
129
@@ -XXX,XX +XXX,XX @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
130
131
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
132
tcg_temp_free_ptr(i);
133
+
134
+ if (base != cpu_env) {
135
+ tcg_temp_free_ptr(base);
136
+ assert(len_remain == 0);
137
+ }
85
}
138
}
86
139
87
err:
140
/* Predicate register stores can be any multiple of 2. */
141
if (len_remain) {
142
t0 = tcg_temp_new_i64();
143
- tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
144
+ tcg_gen_ld_i64(t0, base, vofs + len_align);
145
146
switch (len_remain) {
147
case 2:
148
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
149
if (sve_access_check(s)) {
150
int size = vec_full_reg_size(s);
151
int off = vec_full_reg_offset(s, a->rd);
152
- do_ldr(s, off, size, a->rn, a->imm * size);
153
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
154
}
155
return true;
156
}
157
@@ -XXX,XX +XXX,XX @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
158
if (sve_access_check(s)) {
159
int size = pred_full_reg_size(s);
160
int off = pred_full_reg_offset(s, a->rd);
161
- do_ldr(s, off, size, a->rn, a->imm * size);
162
+ gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
163
}
164
return true;
165
}
166
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
167
if (sve_access_check(s)) {
168
int size = vec_full_reg_size(s);
169
int off = vec_full_reg_offset(s, a->rd);
170
- do_str(s, off, size, a->rn, a->imm * size);
171
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
172
}
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
176
if (sve_access_check(s)) {
177
int size = pred_full_reg_size(s);
178
int off = pred_full_reg_offset(s, a->rd);
179
- do_str(s, off, size, a->rn, a->imm * size);
180
+ gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
181
}
182
return true;
183
}
88
--
184
--
89
2.20.1
185
2.25.1
90
91
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We can reuse the SVE functions for LDR and STR, passing in the
4
base of the ZA vector and a zero offset.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-23-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/sme.decode | 7 +++++++
12
target/arm/translate-sme.c | 24 ++++++++++++++++++++++++
13
2 files changed, 31 insertions(+)
14
15
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/sme.decode
18
+++ b/target/arm/sme.decode
19
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
20
&ldst rs=%mova_rs
21
LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
22
&ldst esz=4 rs=%mova_rs
23
+
24
+&ldstr rv rn imm
25
+@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
26
+ &ldstr rv=%mova_rs
27
+
28
+LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
29
+STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
30
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-sme.c
33
+++ b/target/arm/translate-sme.c
34
@@ -XXX,XX +XXX,XX @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
35
tcg_temp_free_i64(addr);
36
return true;
37
}
38
+
39
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
40
+
41
+static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
42
+{
43
+ int svl = streaming_vec_reg_size(s);
44
+ int imm = a->imm;
45
+ TCGv_ptr base;
46
+
47
+ if (!sme_za_enabled_check(s)) {
48
+ return true;
49
+ }
50
+
51
+ /* ZA[n] equates to ZA0H.B[n]. */
52
+ base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
53
+
54
+ fn(s, base, 0, svl, a->rn, imm * svl);
55
+
56
+ tcg_temp_free_ptr(base);
57
+ return true;
58
+}
59
+
60
+TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
61
+TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
62
--
63
2.25.1
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This implements the Xilinx Versal eFuse, an one-time
4
field-programmable non-volatile storage device. There is
5
only one such device in the Xilinx Versal product family.
6
7
This device has two separate mmio interfaces, a controller
8
and a flatten readback.
9
10
The controller provides interfaces for field-programming,
11
configuration, control, and status.
12
13
The flatten readback is a cache to provide a byte-accessible
14
read-only interface to efficiently read efuse array.
15
16
Co-authored-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
17
Co-authored-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
18
19
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
20
Signed-off-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
21
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
22
Message-id: 20210917052400.1249094-3-tong.ho@xilinx.com
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-24-richard.henderson@linaro.org
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
7
---
26
include/hw/nvram/xlnx-versal-efuse.h | 68 +++
8
target/arm/helper-sme.h | 5 +++
27
hw/nvram/xlnx-versal-efuse-cache.c | 114 ++++
9
target/arm/sme.decode | 11 +++++
28
hw/nvram/xlnx-versal-efuse-ctrl.c | 783 +++++++++++++++++++++++++++
10
target/arm/sme_helper.c | 90 ++++++++++++++++++++++++++++++++++++++
29
hw/nvram/Kconfig | 4 +
11
target/arm/translate-sme.c | 31 +++++++++++++
30
hw/nvram/meson.build | 3 +
12
4 files changed, 137 insertions(+)
31
5 files changed, 972 insertions(+)
32
create mode 100644 include/hw/nvram/xlnx-versal-efuse.h
33
create mode 100644 hw/nvram/xlnx-versal-efuse-cache.c
34
create mode 100644 hw/nvram/xlnx-versal-efuse-ctrl.c
35
13
36
diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
37
new file mode 100644
15
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX
16
--- a/target/arm/helper-sme.h
39
--- /dev/null
17
+++ b/target/arm/helper-sme.h
40
+++ b/include/hw/nvram/xlnx-versal-efuse.h
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i
41
@@ -XXX,XX +XXX,XX @@
19
DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
42
+/*
20
DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
43
+ * Copyright (c) 2020 Xilinx Inc.
21
DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
44
+ *
45
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
46
+ * of this software and associated documentation files (the "Software"), to deal
47
+ * in the Software without restriction, including without limitation the rights
48
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
49
+ * copies of the Software, and to permit persons to whom the Software is
50
+ * furnished to do so, subject to the following conditions:
51
+ *
52
+ * The above copyright notice and this permission notice shall be included in
53
+ * all copies or substantial portions of the Software.
54
+ *
55
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
56
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
57
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
58
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
59
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
60
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
61
+ * THE SOFTWARE.
62
+ */
63
+#ifndef XLNX_VERSAL_EFUSE_H
64
+#define XLNX_VERSAL_EFUSE_H
65
+
22
+
66
+#include "hw/irq.h"
23
+DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
67
+#include "hw/sysbus.h"
24
+DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
68
+#include "hw/register.h"
25
+DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
69
+#include "hw/nvram/xlnx-efuse.h"
26
+DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/sme.decode
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
32
33
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
34
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
70
+
35
+
71
+#define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1)
36
+### SME Add Vector to Array
72
+
37
+
73
+#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx,versal-efuse"
38
+&adda zad zn pm pn
74
+#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx,pmc-efuse-cache"
39
+@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda
40
+@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda
75
+
41
+
76
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL);
42
+ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
77
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE);
43
+ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
44
+ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
45
+ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
46
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sme_helper.c
49
+++ b/target/arm/sme_helper.c
50
@@ -XXX,XX +XXX,XX @@ DO_ST(q, _be, MO_128)
51
DO_ST(q, _le, MO_128)
52
53
#undef DO_ST
78
+
54
+
79
+struct XlnxVersalEFuseCtrl {
55
+void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn,
80
+ SysBusDevice parent_obj;
56
+ void *vpm, uint32_t desc)
81
+ qemu_irq irq_efuse_imr;
57
+{
58
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
59
+ uint64_t *pn = vpn, *pm = vpm;
60
+ uint32_t *zda = vzda, *zn = vzn;
82
+
61
+
83
+ XlnxEFuse *efuse;
62
+ for (row = 0; row < oprsz; ) {
84
+
63
+ uint64_t pa = pn[row >> 4];
85
+ void *extra_pg0_lock_spec; /* Opaque property */
64
+ do {
86
+ uint32_t extra_pg0_lock_n16;
65
+ if (pa & 1) {
87
+
66
+ for (col = 0; col < oprsz; ) {
88
+ uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
67
+ uint64_t pb = pm[col >> 4];
89
+ RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX];
68
+ do {
90
+};
69
+ if (pb & 1) {
91
+
70
+ zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)];
92
+struct XlnxVersalEFuseCache {
71
+ }
93
+ SysBusDevice parent_obj;
72
+ pb >>= 4;
94
+ MemoryRegion iomem;
73
+ } while (++col & 15);
95
+
74
+ }
96
+ XlnxEFuse *efuse;
75
+ }
97
+};
76
+ pa >>= 4;
98
+
77
+ } while (++row & 15);
99
+/**
100
+ * xlnx_versal_efuse_read_row:
101
+ * @s: the efuse object
102
+ * @bit: the bit-address within the 32-bit row to be read
103
+ * @denied: if non-NULL, to receive true if the row is write-only
104
+ *
105
+ * Returns: the 32-bit word containing address @bit; 0 if @denies is true
106
+ */
107
+uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *s, uint32_t bit, bool *denied);
108
+
109
+#endif
110
diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c
111
new file mode 100644
112
index XXXXXXX..XXXXXXX
113
--- /dev/null
114
+++ b/hw/nvram/xlnx-versal-efuse-cache.c
115
@@ -XXX,XX +XXX,XX @@
116
+/*
117
+ * QEMU model of the EFuse_Cache
118
+ *
119
+ * Copyright (c) 2017 Xilinx Inc.
120
+ *
121
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
122
+ * of this software and associated documentation files (the "Software"), to deal
123
+ * in the Software without restriction, including without limitation the rights
124
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
125
+ * copies of the Software, and to permit persons to whom the Software is
126
+ * furnished to do so, subject to the following conditions:
127
+ *
128
+ * The above copyright notice and this permission notice shall be included in
129
+ * all copies or substantial portions of the Software.
130
+ *
131
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
132
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
133
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
134
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
135
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
136
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
137
+ * THE SOFTWARE.
138
+ */
139
+
140
+#include "qemu/osdep.h"
141
+#include "hw/nvram/xlnx-versal-efuse.h"
142
+
143
+#include "qemu/log.h"
144
+#include "hw/qdev-properties.h"
145
+
146
+#define MR_SIZE 0xC00
147
+
148
+static uint64_t efuse_cache_read(void *opaque, hwaddr addr, unsigned size)
149
+{
150
+ XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(opaque);
151
+ unsigned int w0 = QEMU_ALIGN_DOWN(addr * 8, 32);
152
+ unsigned int w1 = QEMU_ALIGN_DOWN((addr + size - 1) * 8, 32);
153
+
154
+ uint64_t ret;
155
+
156
+ assert(w0 == w1 || (w0 + 32) == w1);
157
+
158
+ ret = xlnx_versal_efuse_read_row(s->efuse, w1, NULL);
159
+ if (w0 < w1) {
160
+ ret <<= 32;
161
+ ret |= xlnx_versal_efuse_read_row(s->efuse, w0, NULL);
162
+ }
78
+ }
163
+
164
+ /* If 'addr' unaligned, the guest is always assumed to be little-endian. */
165
+ addr &= 3;
166
+ if (addr) {
167
+ ret >>= 8 * addr;
168
+ }
169
+
170
+ return ret;
171
+}
79
+}
172
+
80
+
173
+static void efuse_cache_write(void *opaque, hwaddr addr, uint64_t value,
81
+void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn,
174
+ unsigned size)
82
+ void *vpm, uint32_t desc)
175
+{
83
+{
176
+ /* No Register Writes allowed */
84
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
177
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: efuse cache registers are read-only",
85
+ uint8_t *pn = vpn, *pm = vpm;
178
+ __func__);
86
+ uint64_t *zda = vzda, *zn = vzn;
179
+}
180
+
87
+
181
+static const MemoryRegionOps efuse_cache_ops = {
88
+ for (row = 0; row < oprsz; ++row) {
182
+ .read = efuse_cache_read,
89
+ if (pn[H1(row)] & 1) {
183
+ .write = efuse_cache_write,
90
+ for (col = 0; col < oprsz; ++col) {
184
+ .endianness = DEVICE_LITTLE_ENDIAN,
91
+ if (pm[H1(col)] & 1) {
185
+ .valid = {
92
+ zda[tile_vslice_index(row) + col] += zn[col];
186
+ .min_access_size = 1,
93
+ }
187
+ .max_access_size = 4,
94
+ }
188
+ },
189
+};
190
+
191
+static void efuse_cache_init(Object *obj)
192
+{
193
+ XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(obj);
194
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
195
+
196
+ memory_region_init_io(&s->iomem, obj, &efuse_cache_ops, s,
197
+ TYPE_XLNX_VERSAL_EFUSE_CACHE, MR_SIZE);
198
+ sysbus_init_mmio(sbd, &s->iomem);
199
+}
200
+
201
+static Property efuse_cache_props[] = {
202
+ DEFINE_PROP_LINK("efuse",
203
+ XlnxVersalEFuseCache, efuse,
204
+ TYPE_XLNX_EFUSE, XlnxEFuse *),
205
+
206
+ DEFINE_PROP_END_OF_LIST(),
207
+};
208
+
209
+static void efuse_cache_class_init(ObjectClass *klass, void *data)
210
+{
211
+ DeviceClass *dc = DEVICE_CLASS(klass);
212
+
213
+ device_class_set_props(dc, efuse_cache_props);
214
+}
215
+
216
+static const TypeInfo efuse_cache_info = {
217
+ .name = TYPE_XLNX_VERSAL_EFUSE_CACHE,
218
+ .parent = TYPE_SYS_BUS_DEVICE,
219
+ .instance_size = sizeof(XlnxVersalEFuseCache),
220
+ .class_init = efuse_cache_class_init,
221
+ .instance_init = efuse_cache_init,
222
+};
223
+
224
+static void efuse_cache_register_types(void)
225
+{
226
+ type_register_static(&efuse_cache_info);
227
+}
228
+
229
+type_init(efuse_cache_register_types)
230
diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c
231
new file mode 100644
232
index XXXXXXX..XXXXXXX
233
--- /dev/null
234
+++ b/hw/nvram/xlnx-versal-efuse-ctrl.c
235
@@ -XXX,XX +XXX,XX @@
236
+/*
237
+ * QEMU model of the Versal eFuse controller
238
+ *
239
+ * Copyright (c) 2020 Xilinx Inc.
240
+ *
241
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
242
+ * of this software and associated documentation files (the "Software"), to deal
243
+ * in the Software without restriction, including without limitation the rights
244
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
245
+ * copies of the Software, and to permit persons to whom the Software is
246
+ * furnished to do so, subject to the following conditions:
247
+ *
248
+ * The above copyright notice and this permission notice shall be included in
249
+ * all copies or substantial portions of the Software.
250
+ *
251
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
252
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
253
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
254
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
255
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
256
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
257
+ * THE SOFTWARE.
258
+ */
259
+
260
+#include "qemu/osdep.h"
261
+#include "hw/nvram/xlnx-versal-efuse.h"
262
+
263
+#include "qemu/log.h"
264
+#include "qapi/error.h"
265
+#include "migration/vmstate.h"
266
+#include "hw/qdev-properties.h"
267
+
268
+#ifndef XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG
269
+#define XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG 0
270
+#endif
271
+
272
+REG32(WR_LOCK, 0x0)
273
+ FIELD(WR_LOCK, LOCK, 0, 16)
274
+REG32(CFG, 0x4)
275
+ FIELD(CFG, SLVERR_ENABLE, 5, 1)
276
+ FIELD(CFG, MARGIN_RD, 2, 1)
277
+ FIELD(CFG, PGM_EN, 1, 1)
278
+REG32(STATUS, 0x8)
279
+ FIELD(STATUS, AES_USER_KEY_1_CRC_PASS, 11, 1)
280
+ FIELD(STATUS, AES_USER_KEY_1_CRC_DONE, 10, 1)
281
+ FIELD(STATUS, AES_USER_KEY_0_CRC_PASS, 9, 1)
282
+ FIELD(STATUS, AES_USER_KEY_0_CRC_DONE, 8, 1)
283
+ FIELD(STATUS, AES_CRC_PASS, 7, 1)
284
+ FIELD(STATUS, AES_CRC_DONE, 6, 1)
285
+ FIELD(STATUS, CACHE_DONE, 5, 1)
286
+ FIELD(STATUS, CACHE_LOAD, 4, 1)
287
+ FIELD(STATUS, EFUSE_2_TBIT, 2, 1)
288
+ FIELD(STATUS, EFUSE_1_TBIT, 1, 1)
289
+ FIELD(STATUS, EFUSE_0_TBIT, 0, 1)
290
+REG32(EFUSE_PGM_ADDR, 0xc)
291
+ FIELD(EFUSE_PGM_ADDR, PAGE, 13, 4)
292
+ FIELD(EFUSE_PGM_ADDR, ROW, 5, 8)
293
+ FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5)
294
+REG32(EFUSE_RD_ADDR, 0x10)
295
+ FIELD(EFUSE_RD_ADDR, PAGE, 13, 4)
296
+ FIELD(EFUSE_RD_ADDR, ROW, 5, 8)
297
+REG32(EFUSE_RD_DATA, 0x14)
298
+REG32(TPGM, 0x18)
299
+ FIELD(TPGM, VALUE, 0, 16)
300
+REG32(TRD, 0x1c)
301
+ FIELD(TRD, VALUE, 0, 8)
302
+REG32(TSU_H_PS, 0x20)
303
+ FIELD(TSU_H_PS, VALUE, 0, 8)
304
+REG32(TSU_H_PS_CS, 0x24)
305
+ FIELD(TSU_H_PS_CS, VALUE, 0, 8)
306
+REG32(TRDM, 0x28)
307
+ FIELD(TRDM, VALUE, 0, 8)
308
+REG32(TSU_H_CS, 0x2c)
309
+ FIELD(TSU_H_CS, VALUE, 0, 8)
310
+REG32(EFUSE_ISR, 0x30)
311
+ FIELD(EFUSE_ISR, APB_SLVERR, 31, 1)
312
+ FIELD(EFUSE_ISR, CACHE_PARITY_E2, 14, 1)
313
+ FIELD(EFUSE_ISR, CACHE_PARITY_E1, 13, 1)
314
+ FIELD(EFUSE_ISR, CACHE_PARITY_E0S, 12, 1)
315
+ FIELD(EFUSE_ISR, CACHE_PARITY_E0R, 11, 1)
316
+ FIELD(EFUSE_ISR, CACHE_APB_SLVERR, 10, 1)
317
+ FIELD(EFUSE_ISR, CACHE_REQ_ERROR, 9, 1)
318
+ FIELD(EFUSE_ISR, MAIN_REQ_ERROR, 8, 1)
319
+ FIELD(EFUSE_ISR, READ_ON_CACHE_LD, 7, 1)
320
+ FIELD(EFUSE_ISR, CACHE_FSM_ERROR, 6, 1)
321
+ FIELD(EFUSE_ISR, MAIN_FSM_ERROR, 5, 1)
322
+ FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1)
323
+ FIELD(EFUSE_ISR, RD_ERROR, 3, 1)
324
+ FIELD(EFUSE_ISR, RD_DONE, 2, 1)
325
+ FIELD(EFUSE_ISR, PGM_ERROR, 1, 1)
326
+ FIELD(EFUSE_ISR, PGM_DONE, 0, 1)
327
+REG32(EFUSE_IMR, 0x34)
328
+ FIELD(EFUSE_IMR, APB_SLVERR, 31, 1)
329
+ FIELD(EFUSE_IMR, CACHE_PARITY_E2, 14, 1)
330
+ FIELD(EFUSE_IMR, CACHE_PARITY_E1, 13, 1)
331
+ FIELD(EFUSE_IMR, CACHE_PARITY_E0S, 12, 1)
332
+ FIELD(EFUSE_IMR, CACHE_PARITY_E0R, 11, 1)
333
+ FIELD(EFUSE_IMR, CACHE_APB_SLVERR, 10, 1)
334
+ FIELD(EFUSE_IMR, CACHE_REQ_ERROR, 9, 1)
335
+ FIELD(EFUSE_IMR, MAIN_REQ_ERROR, 8, 1)
336
+ FIELD(EFUSE_IMR, READ_ON_CACHE_LD, 7, 1)
337
+ FIELD(EFUSE_IMR, CACHE_FSM_ERROR, 6, 1)
338
+ FIELD(EFUSE_IMR, MAIN_FSM_ERROR, 5, 1)
339
+ FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1)
340
+ FIELD(EFUSE_IMR, RD_ERROR, 3, 1)
341
+ FIELD(EFUSE_IMR, RD_DONE, 2, 1)
342
+ FIELD(EFUSE_IMR, PGM_ERROR, 1, 1)
343
+ FIELD(EFUSE_IMR, PGM_DONE, 0, 1)
344
+REG32(EFUSE_IER, 0x38)
345
+ FIELD(EFUSE_IER, APB_SLVERR, 31, 1)
346
+ FIELD(EFUSE_IER, CACHE_PARITY_E2, 14, 1)
347
+ FIELD(EFUSE_IER, CACHE_PARITY_E1, 13, 1)
348
+ FIELD(EFUSE_IER, CACHE_PARITY_E0S, 12, 1)
349
+ FIELD(EFUSE_IER, CACHE_PARITY_E0R, 11, 1)
350
+ FIELD(EFUSE_IER, CACHE_APB_SLVERR, 10, 1)
351
+ FIELD(EFUSE_IER, CACHE_REQ_ERROR, 9, 1)
352
+ FIELD(EFUSE_IER, MAIN_REQ_ERROR, 8, 1)
353
+ FIELD(EFUSE_IER, READ_ON_CACHE_LD, 7, 1)
354
+ FIELD(EFUSE_IER, CACHE_FSM_ERROR, 6, 1)
355
+ FIELD(EFUSE_IER, MAIN_FSM_ERROR, 5, 1)
356
+ FIELD(EFUSE_IER, CACHE_ERROR, 4, 1)
357
+ FIELD(EFUSE_IER, RD_ERROR, 3, 1)
358
+ FIELD(EFUSE_IER, RD_DONE, 2, 1)
359
+ FIELD(EFUSE_IER, PGM_ERROR, 1, 1)
360
+ FIELD(EFUSE_IER, PGM_DONE, 0, 1)
361
+REG32(EFUSE_IDR, 0x3c)
362
+ FIELD(EFUSE_IDR, APB_SLVERR, 31, 1)
363
+ FIELD(EFUSE_IDR, CACHE_PARITY_E2, 14, 1)
364
+ FIELD(EFUSE_IDR, CACHE_PARITY_E1, 13, 1)
365
+ FIELD(EFUSE_IDR, CACHE_PARITY_E0S, 12, 1)
366
+ FIELD(EFUSE_IDR, CACHE_PARITY_E0R, 11, 1)
367
+ FIELD(EFUSE_IDR, CACHE_APB_SLVERR, 10, 1)
368
+ FIELD(EFUSE_IDR, CACHE_REQ_ERROR, 9, 1)
369
+ FIELD(EFUSE_IDR, MAIN_REQ_ERROR, 8, 1)
370
+ FIELD(EFUSE_IDR, READ_ON_CACHE_LD, 7, 1)
371
+ FIELD(EFUSE_IDR, CACHE_FSM_ERROR, 6, 1)
372
+ FIELD(EFUSE_IDR, MAIN_FSM_ERROR, 5, 1)
373
+ FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1)
374
+ FIELD(EFUSE_IDR, RD_ERROR, 3, 1)
375
+ FIELD(EFUSE_IDR, RD_DONE, 2, 1)
376
+ FIELD(EFUSE_IDR, PGM_ERROR, 1, 1)
377
+ FIELD(EFUSE_IDR, PGM_DONE, 0, 1)
378
+REG32(EFUSE_CACHE_LOAD, 0x40)
379
+ FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1)
380
+REG32(EFUSE_PGM_LOCK, 0x44)
381
+ FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1)
382
+REG32(EFUSE_AES_CRC, 0x48)
383
+REG32(EFUSE_AES_USR_KEY0_CRC, 0x4c)
384
+REG32(EFUSE_AES_USR_KEY1_CRC, 0x50)
385
+REG32(EFUSE_PD, 0x54)
386
+REG32(EFUSE_ANLG_OSC_SW_1LP, 0x60)
387
+REG32(EFUSE_TEST_CTRL, 0x100)
388
+
389
+#define R_MAX (R_EFUSE_TEST_CTRL + 1)
390
+
391
+#define R_WR_LOCK_UNLOCK_PASSCODE (0xDF0D)
392
+
393
+/*
394
+ * eFuse layout references:
395
+ * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilnvm/src/xnvm_efuse_hw.h
396
+ */
397
+#define BIT_POS_OF(A_) \
398
+ ((uint32_t)((A_) & (R_EFUSE_PGM_ADDR_ROW_MASK | \
399
+ R_EFUSE_PGM_ADDR_COLUMN_MASK)))
400
+
401
+#define BIT_POS(R_, C_) \
402
+ ((uint32_t)((R_EFUSE_PGM_ADDR_ROW_MASK \
403
+ & ((R_) << R_EFUSE_PGM_ADDR_ROW_SHIFT)) \
404
+ | \
405
+ (R_EFUSE_PGM_ADDR_COLUMN_MASK \
406
+ & ((C_) << R_EFUSE_PGM_ADDR_COLUMN_SHIFT))))
407
+
408
+#define EFUSE_TBIT_POS(A_) (BIT_POS_OF(A_) >= BIT_POS(0, 28))
409
+
410
+#define EFUSE_ANCHOR_ROW (0)
411
+#define EFUSE_ANCHOR_3_COL (27)
412
+#define EFUSE_ANCHOR_1_COL (1)
413
+
414
+#define EFUSE_AES_KEY_START BIT_POS(12, 0)
415
+#define EFUSE_AES_KEY_END BIT_POS(19, 31)
416
+#define EFUSE_USER_KEY_0_START BIT_POS(20, 0)
417
+#define EFUSE_USER_KEY_0_END BIT_POS(27, 31)
418
+#define EFUSE_USER_KEY_1_START BIT_POS(28, 0)
419
+#define EFUSE_USER_KEY_1_END BIT_POS(35, 31)
420
+
421
+#define EFUSE_RD_BLOCKED_START EFUSE_AES_KEY_START
422
+#define EFUSE_RD_BLOCKED_END EFUSE_USER_KEY_1_END
423
+
424
+#define EFUSE_GLITCH_DET_WR_LK BIT_POS(4, 31)
425
+#define EFUSE_PPK0_WR_LK BIT_POS(43, 6)
426
+#define EFUSE_PPK1_WR_LK BIT_POS(43, 7)
427
+#define EFUSE_PPK2_WR_LK BIT_POS(43, 8)
428
+#define EFUSE_AES_WR_LK BIT_POS(43, 11)
429
+#define EFUSE_USER_KEY_0_WR_LK BIT_POS(43, 13)
430
+#define EFUSE_USER_KEY_1_WR_LK BIT_POS(43, 15)
431
+#define EFUSE_PUF_SYN_LK BIT_POS(43, 16)
432
+#define EFUSE_DNA_WR_LK BIT_POS(43, 27)
433
+#define EFUSE_BOOT_ENV_WR_LK BIT_POS(43, 28)
434
+
435
+#define EFUSE_PGM_LOCKED_START BIT_POS(44, 0)
436
+#define EFUSE_PGM_LOCKED_END BIT_POS(51, 31)
437
+
438
+#define EFUSE_PUF_PAGE (2)
439
+#define EFUSE_PUF_SYN_START BIT_POS(129, 0)
440
+#define EFUSE_PUF_SYN_END BIT_POS(255, 27)
441
+
442
+#define EFUSE_KEY_CRC_LK_ROW (43)
443
+#define EFUSE_AES_KEY_CRC_LK_MASK ((1U << 9) | (1U << 10))
444
+#define EFUSE_USER_KEY_0_CRC_LK_MASK (1U << 12)
445
+#define EFUSE_USER_KEY_1_CRC_LK_MASK (1U << 14)
446
+
447
+/*
448
+ * A handy macro to return value of an array element,
449
+ * or a specific default if given index is out of bound.
450
+ */
451
+#define ARRAY_GET(A_, I_, D_) \
452
+ ((unsigned int)(I_) < ARRAY_SIZE(A_) ? (A_)[I_] : (D_))
453
+
454
+QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxVersalEFuseCtrl *)0)->regs));
455
+
456
+typedef struct XlnxEFuseLkSpec {
457
+ uint16_t row;
458
+ uint16_t lk_bit;
459
+} XlnxEFuseLkSpec;
460
+
461
+static void efuse_imr_update_irq(XlnxVersalEFuseCtrl *s)
462
+{
463
+ bool pending = s->regs[R_EFUSE_ISR] & ~s->regs[R_EFUSE_IMR];
464
+ qemu_set_irq(s->irq_efuse_imr, pending);
465
+}
466
+
467
+static void efuse_isr_postw(RegisterInfo *reg, uint64_t val64)
468
+{
469
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
470
+ efuse_imr_update_irq(s);
471
+}
472
+
473
+static uint64_t efuse_ier_prew(RegisterInfo *reg, uint64_t val64)
474
+{
475
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
476
+ uint32_t val = val64;
477
+
478
+ s->regs[R_EFUSE_IMR] &= ~val;
479
+ efuse_imr_update_irq(s);
480
+ return 0;
481
+}
482
+
483
+static uint64_t efuse_idr_prew(RegisterInfo *reg, uint64_t val64)
484
+{
485
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
486
+ uint32_t val = val64;
487
+
488
+ s->regs[R_EFUSE_IMR] |= val;
489
+ efuse_imr_update_irq(s);
490
+ return 0;
491
+}
492
+
493
+static void efuse_status_tbits_sync(XlnxVersalEFuseCtrl *s)
494
+{
495
+ uint32_t check = xlnx_efuse_tbits_check(s->efuse);
496
+ uint32_t val = s->regs[R_STATUS];
497
+
498
+ val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0)));
499
+ val = FIELD_DP32(val, STATUS, EFUSE_1_TBIT, !!(check & (1 << 1)));
500
+ val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 2)));
501
+
502
+ s->regs[R_STATUS] = val;
503
+}
504
+
505
+static void efuse_anchor_bits_check(XlnxVersalEFuseCtrl *s)
506
+{
507
+ unsigned page;
508
+
509
+ if (!s->efuse || !s->efuse->init_tbits) {
510
+ return;
511
+ }
512
+
513
+ for (page = 0; page < s->efuse->efuse_nr; page++) {
514
+ uint32_t row = 0, bit;
515
+
516
+ row = FIELD_DP32(row, EFUSE_PGM_ADDR, PAGE, page);
517
+ row = FIELD_DP32(row, EFUSE_PGM_ADDR, ROW, EFUSE_ANCHOR_ROW);
518
+
519
+ bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_3_COL);
520
+ if (!xlnx_efuse_get_bit(s->efuse, bit)) {
521
+ xlnx_efuse_set_bit(s->efuse, bit);
522
+ }
523
+
524
+ bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_1_COL);
525
+ if (!xlnx_efuse_get_bit(s->efuse, bit)) {
526
+ xlnx_efuse_set_bit(s->efuse, bit);
527
+ }
95
+ }
528
+ }
96
+ }
529
+}
97
+}
530
+
98
+
531
+static void efuse_key_crc_check(RegisterInfo *reg, uint32_t crc,
99
+void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn,
532
+ uint32_t pass_mask, uint32_t done_mask,
100
+ void *vpm, uint32_t desc)
533
+ unsigned first, uint32_t lk_mask)
534
+{
101
+{
535
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
102
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 4;
536
+ uint32_t r, lk_bits;
103
+ uint64_t *pn = vpn, *pm = vpm;
104
+ uint32_t *zda = vzda, *zn = vzn;
537
+
105
+
538
+ /*
106
+ for (row = 0; row < oprsz; ) {
539
+ * To start, assume both DONE and PASS, and clear PASS by xor
107
+ uint64_t pa = pn[row >> 4];
540
+ * if CRC-check fails or CRC-check disabled by lock fuse.
108
+ do {
541
+ */
109
+ if (pa & 1) {
542
+ r = s->regs[R_STATUS] | done_mask | pass_mask;
110
+ uint32_t zn_row = zn[H4(row)];
543
+
111
+ for (col = 0; col < oprsz; ) {
544
+ lk_bits = xlnx_efuse_get_row(s->efuse, EFUSE_KEY_CRC_LK_ROW) & lk_mask;
112
+ uint64_t pb = pm[col >> 4];
545
+ if (lk_bits == 0 && xlnx_efuse_k256_check(s->efuse, crc, first)) {
113
+ do {
546
+ pass_mask = 0;
114
+ if (pb & 1) {
547
+ }
115
+ zda[tile_vslice_index(row) + H4(col)] += zn_row;
548
+
116
+ }
549
+ s->regs[R_STATUS] = r ^ pass_mask;
117
+ pb >>= 4;
550
+}
118
+ } while (++col & 15);
551
+
119
+ }
552
+static void efuse_data_sync(XlnxVersalEFuseCtrl *s)
120
+ }
553
+{
121
+ pa >>= 4;
554
+ efuse_status_tbits_sync(s);
122
+ } while (++row & 15);
555
+}
556
+
557
+static int efuse_lk_spec_cmp(const void *a, const void *b)
558
+{
559
+ uint16_t r1 = ((const XlnxEFuseLkSpec *)a)->row;
560
+ uint16_t r2 = ((const XlnxEFuseLkSpec *)b)->row;
561
+
562
+ return (r1 > r2) - (r1 < r2);
563
+}
564
+
565
+static void efuse_lk_spec_sort(XlnxVersalEFuseCtrl *s)
566
+{
567
+ XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec;
568
+ const uint32_t n8 = s->extra_pg0_lock_n16 * 2;
569
+ const uint32_t sz = sizeof(ary[0]);
570
+ const uint32_t cnt = n8 / sz;
571
+
572
+ if (ary && cnt) {
573
+ qsort(ary, cnt, sz, efuse_lk_spec_cmp);
574
+ }
123
+ }
575
+}
124
+}
576
+
125
+
577
+static uint32_t efuse_lk_spec_find(XlnxVersalEFuseCtrl *s, uint32_t row)
126
+void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
127
+ void *vpm, uint32_t desc)
578
+{
128
+{
579
+ const XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec;
129
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
580
+ const uint32_t n8 = s->extra_pg0_lock_n16 * 2;
130
+ uint8_t *pn = vpn, *pm = vpm;
581
+ const uint32_t sz = sizeof(ary[0]);
131
+ uint64_t *zda = vzda, *zn = vzn;
582
+ const uint32_t cnt = n8 / sz;
583
+ const XlnxEFuseLkSpec *item = NULL;
584
+
132
+
585
+ if (ary && cnt) {
133
+ for (row = 0; row < oprsz; ++row) {
586
+ XlnxEFuseLkSpec k = { .row = row, };
134
+ if (pn[H1(row)] & 1) {
135
+ uint64_t zn_row = zn[row];
136
+ for (col = 0; col < oprsz; ++col) {
137
+ if (pm[H1(col)] & 1) {
138
+ zda[tile_vslice_index(row) + col] += zn_row;
139
+ }
140
+ }
141
+ }
142
+ }
143
+}
144
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/translate-sme.c
147
+++ b/target/arm/translate-sme.c
148
@@ -XXX,XX +XXX,XX @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
149
150
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
151
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
587
+
152
+
588
+ item = bsearch(&k, ary, cnt, sz, efuse_lk_spec_cmp);
153
+static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
154
+ gen_helper_gvec_4 *fn)
155
+{
156
+ int svl = streaming_vec_reg_size(s);
157
+ uint32_t desc = simd_desc(svl, svl, 0);
158
+ TCGv_ptr za, zn, pn, pm;
159
+
160
+ if (!sme_smza_enabled_check(s)) {
161
+ return true;
589
+ }
162
+ }
590
+
163
+
591
+ return item ? item->lk_bit : 0;
164
+ /* Sum XZR+zad to find ZAd. */
165
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
166
+ zn = vec_full_reg_ptr(s, a->zn);
167
+ pn = pred_full_reg_ptr(s, a->pn);
168
+ pm = pred_full_reg_ptr(s, a->pm);
169
+
170
+ fn(za, zn, pn, pm, tcg_constant_i32(desc));
171
+
172
+ tcg_temp_free_ptr(za);
173
+ tcg_temp_free_ptr(zn);
174
+ tcg_temp_free_ptr(pn);
175
+ tcg_temp_free_ptr(pm);
176
+ return true;
592
+}
177
+}
593
+
178
+
594
+static uint32_t efuse_bit_locked(XlnxVersalEFuseCtrl *s, uint32_t bit)
179
+TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
595
+{
180
+TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
596
+ /* Hard-coded locks */
181
+TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
597
+ static const uint16_t pg0_hard_lock[] = {
182
+TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
598
+ [4] = EFUSE_GLITCH_DET_WR_LK,
599
+ [37] = EFUSE_BOOT_ENV_WR_LK,
600
+
601
+ [8 ... 11] = EFUSE_DNA_WR_LK,
602
+ [12 ... 19] = EFUSE_AES_WR_LK,
603
+ [20 ... 27] = EFUSE_USER_KEY_0_WR_LK,
604
+ [28 ... 35] = EFUSE_USER_KEY_1_WR_LK,
605
+ [64 ... 71] = EFUSE_PPK0_WR_LK,
606
+ [72 ... 79] = EFUSE_PPK1_WR_LK,
607
+ [80 ... 87] = EFUSE_PPK2_WR_LK,
608
+ };
609
+
610
+ uint32_t row = FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW);
611
+ uint32_t lk_bit = ARRAY_GET(pg0_hard_lock, row, 0);
612
+
613
+ return lk_bit ? lk_bit : efuse_lk_spec_find(s, row);
614
+}
615
+
616
+static bool efuse_pgm_locked(XlnxVersalEFuseCtrl *s, unsigned int bit)
617
+{
618
+
619
+ unsigned int lock = 1;
620
+
621
+ /* Global lock */
622
+ if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) {
623
+ goto ret_lock;
624
+ }
625
+
626
+ /* Row lock */
627
+ switch (FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE)) {
628
+ case 0:
629
+ if (ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK) &&
630
+ bit >= EFUSE_PGM_LOCKED_START && bit <= EFUSE_PGM_LOCKED_END) {
631
+ goto ret_lock;
632
+ }
633
+
634
+ lock = efuse_bit_locked(s, bit);
635
+ break;
636
+ case EFUSE_PUF_PAGE:
637
+ if (bit < EFUSE_PUF_SYN_START || bit > EFUSE_PUF_SYN_END) {
638
+ lock = 0;
639
+ goto ret_lock;
640
+ }
641
+
642
+ lock = EFUSE_PUF_SYN_LK;
643
+ break;
644
+ default:
645
+ lock = 0;
646
+ goto ret_lock;
647
+ }
648
+
649
+ /* Row lock by an efuse bit */
650
+ if (lock) {
651
+ lock = xlnx_efuse_get_bit(s->efuse, lock);
652
+ }
653
+
654
+ ret_lock:
655
+ return lock != 0;
656
+}
657
+
658
+static void efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64)
659
+{
660
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
661
+ unsigned bit = val64;
662
+ bool ok = false;
663
+
664
+ /* Always zero out PGM_ADDR because it is write-only */
665
+ s->regs[R_EFUSE_PGM_ADDR] = 0;
666
+
667
+ /*
668
+ * Indicate error if bit is write-protected (or read-only
669
+ * as guarded by efuse_set_bit()).
670
+ *
671
+ * Keep it simple by not modeling program timing.
672
+ *
673
+ * Note: model must NEVER clear the PGM_ERROR bit; it is
674
+ * up to guest to do so (or by reset).
675
+ */
676
+ if (efuse_pgm_locked(s, bit)) {
677
+ qemu_log_mask(LOG_GUEST_ERROR,
678
+ "%s: Denied setting of efuse<%u, %u, %u>\n",
679
+ object_get_canonical_path(OBJECT(s)),
680
+ FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE),
681
+ FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW),
682
+ FIELD_EX32(bit, EFUSE_PGM_ADDR, COLUMN));
683
+ } else if (xlnx_efuse_set_bit(s->efuse, bit)) {
684
+ ok = true;
685
+ if (EFUSE_TBIT_POS(bit)) {
686
+ efuse_status_tbits_sync(s);
687
+ }
688
+ }
689
+
690
+ if (!ok) {
691
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1);
692
+ }
693
+
694
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1);
695
+ efuse_imr_update_irq(s);
696
+}
697
+
698
+static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64)
699
+{
700
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
701
+ unsigned bit = val64;
702
+ bool denied;
703
+
704
+ /* Always zero out RD_ADDR because it is write-only */
705
+ s->regs[R_EFUSE_RD_ADDR] = 0;
706
+
707
+ /*
708
+ * Indicate error if row is read-blocked.
709
+ *
710
+ * Note: model must NEVER clear the RD_ERROR bit; it is
711
+ * up to guest to do so (or by reset).
712
+ */
713
+ s->regs[R_EFUSE_RD_DATA] = xlnx_versal_efuse_read_row(s->efuse,
714
+ bit, &denied);
715
+ if (denied) {
716
+ qemu_log_mask(LOG_GUEST_ERROR,
717
+ "%s: Denied reading of efuse<%u, %u>\n",
718
+ object_get_canonical_path(OBJECT(s)),
719
+ FIELD_EX32(bit, EFUSE_RD_ADDR, PAGE),
720
+ FIELD_EX32(bit, EFUSE_RD_ADDR, ROW));
721
+
722
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1);
723
+ }
724
+
725
+ ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1);
726
+ efuse_imr_update_irq(s);
727
+ return;
728
+}
729
+
730
+static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64)
731
+{
732
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
733
+
734
+ if (val64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) {
735
+ efuse_data_sync(s);
736
+
737
+ ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1);
738
+ efuse_imr_update_irq(s);
739
+ }
740
+
741
+ return 0;
742
+}
743
+
744
+static uint64_t efuse_pgm_lock_prew(RegisterInfo *reg, uint64_t val64)
745
+{
746
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque);
747
+
748
+ /* Ignore all other bits */
749
+ val64 = FIELD_EX32(val64, EFUSE_PGM_LOCK, SPK_ID_LOCK);
750
+
751
+ /* Once the bit is written 1, only reset will clear it to 0 */
752
+ val64 |= ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK);
753
+
754
+ return val64;
755
+}
756
+
757
+static void efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64)
758
+{
759
+ efuse_key_crc_check(reg, val64,
760
+ R_STATUS_AES_CRC_PASS_MASK,
761
+ R_STATUS_AES_CRC_DONE_MASK,
762
+ EFUSE_AES_KEY_START,
763
+ EFUSE_AES_KEY_CRC_LK_MASK);
764
+}
765
+
766
+static void efuse_aes_u0_crc_postw(RegisterInfo *reg, uint64_t val64)
767
+{
768
+ efuse_key_crc_check(reg, val64,
769
+ R_STATUS_AES_USER_KEY_0_CRC_PASS_MASK,
770
+ R_STATUS_AES_USER_KEY_0_CRC_DONE_MASK,
771
+ EFUSE_USER_KEY_0_START,
772
+ EFUSE_USER_KEY_0_CRC_LK_MASK);
773
+}
774
+
775
+static void efuse_aes_u1_crc_postw(RegisterInfo *reg, uint64_t val64)
776
+{
777
+ efuse_key_crc_check(reg, val64,
778
+ R_STATUS_AES_USER_KEY_1_CRC_PASS_MASK,
779
+ R_STATUS_AES_USER_KEY_1_CRC_DONE_MASK,
780
+ EFUSE_USER_KEY_1_START,
781
+ EFUSE_USER_KEY_1_CRC_LK_MASK);
782
+}
783
+
784
+static uint64_t efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val)
785
+{
786
+ return val != R_WR_LOCK_UNLOCK_PASSCODE;
787
+}
788
+
789
+static const RegisterAccessInfo efuse_ctrl_regs_info[] = {
790
+ { .name = "WR_LOCK", .addr = A_WR_LOCK,
791
+ .reset = 0x1,
792
+ .pre_write = efuse_wr_lock_prew,
793
+ },{ .name = "CFG", .addr = A_CFG,
794
+ .rsvd = 0x9,
795
+ },{ .name = "STATUS", .addr = A_STATUS,
796
+ .rsvd = 0x8,
797
+ .ro = 0xfff,
798
+ },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR,
799
+ .post_write = efuse_pgm_addr_postw,
800
+ },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR,
801
+ .rsvd = 0x1f,
802
+ .post_write = efuse_rd_addr_postw,
803
+ },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA,
804
+ .ro = 0xffffffff,
805
+ },{ .name = "TPGM", .addr = A_TPGM,
806
+ },{ .name = "TRD", .addr = A_TRD,
807
+ .reset = 0x19,
808
+ },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS,
809
+ .reset = 0xff,
810
+ },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS,
811
+ .reset = 0x11,
812
+ },{ .name = "TRDM", .addr = A_TRDM,
813
+ .reset = 0x3a,
814
+ },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS,
815
+ .reset = 0x16,
816
+ },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR,
817
+ .rsvd = 0x7fff8000,
818
+ .w1c = 0x80007fff,
819
+ .post_write = efuse_isr_postw,
820
+ },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR,
821
+ .reset = 0x80007fff,
822
+ .rsvd = 0x7fff8000,
823
+ .ro = 0xffffffff,
824
+ },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER,
825
+ .rsvd = 0x7fff8000,
826
+ .pre_write = efuse_ier_prew,
827
+ },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR,
828
+ .rsvd = 0x7fff8000,
829
+ .pre_write = efuse_idr_prew,
830
+ },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD,
831
+ .pre_write = efuse_cache_load_prew,
832
+ },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK,
833
+ .pre_write = efuse_pgm_lock_prew,
834
+ },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC,
835
+ .post_write = efuse_aes_crc_postw,
836
+ },{ .name = "EFUSE_AES_USR_KEY0_CRC", .addr = A_EFUSE_AES_USR_KEY0_CRC,
837
+ .post_write = efuse_aes_u0_crc_postw,
838
+ },{ .name = "EFUSE_AES_USR_KEY1_CRC", .addr = A_EFUSE_AES_USR_KEY1_CRC,
839
+ .post_write = efuse_aes_u1_crc_postw,
840
+ },{ .name = "EFUSE_PD", .addr = A_EFUSE_PD,
841
+ .ro = 0xfffffffe,
842
+ },{ .name = "EFUSE_ANLG_OSC_SW_1LP", .addr = A_EFUSE_ANLG_OSC_SW_1LP,
843
+ },{ .name = "EFUSE_TEST_CTRL", .addr = A_EFUSE_TEST_CTRL,
844
+ .reset = 0x8,
845
+ }
846
+};
847
+
848
+static void efuse_ctrl_reg_write(void *opaque, hwaddr addr,
849
+ uint64_t data, unsigned size)
850
+{
851
+ RegisterInfoArray *reg_array = opaque;
852
+ XlnxVersalEFuseCtrl *s;
853
+ Object *dev;
854
+
855
+ assert(reg_array != NULL);
856
+
857
+ dev = reg_array->mem.owner;
858
+ assert(dev);
859
+
860
+ s = XLNX_VERSAL_EFUSE_CTRL(dev);
861
+
862
+ if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) {
863
+ qemu_log_mask(LOG_GUEST_ERROR,
864
+ "%s[reg_0x%02lx]: Attempt to write locked register.\n",
865
+ object_get_canonical_path(OBJECT(s)), (long)addr);
866
+ } else {
867
+ register_write_memory(opaque, addr, data, size);
868
+ }
869
+}
870
+
871
+static void efuse_ctrl_register_reset(RegisterInfo *reg)
872
+{
873
+ if (!reg->data || !reg->access) {
874
+ return;
875
+ }
876
+
877
+ /* Reset must not trigger some registers' writers */
878
+ switch (reg->access->addr) {
879
+ case A_EFUSE_AES_CRC:
880
+ case A_EFUSE_AES_USR_KEY0_CRC:
881
+ case A_EFUSE_AES_USR_KEY1_CRC:
882
+ *(uint32_t *)reg->data = reg->access->reset;
883
+ return;
884
+ }
885
+
886
+ register_reset(reg);
887
+}
888
+
889
+static void efuse_ctrl_reset(DeviceState *dev)
890
+{
891
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev);
892
+ unsigned int i;
893
+
894
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
895
+ efuse_ctrl_register_reset(&s->regs_info[i]);
896
+ }
897
+
898
+ efuse_anchor_bits_check(s);
899
+ efuse_data_sync(s);
900
+ efuse_imr_update_irq(s);
901
+}
902
+
903
+static const MemoryRegionOps efuse_ctrl_ops = {
904
+ .read = register_read_memory,
905
+ .write = efuse_ctrl_reg_write,
906
+ .endianness = DEVICE_LITTLE_ENDIAN,
907
+ .valid = {
908
+ .min_access_size = 4,
909
+ .max_access_size = 4,
910
+ },
911
+};
912
+
913
+static void efuse_ctrl_realize(DeviceState *dev, Error **errp)
914
+{
915
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev);
916
+ const uint32_t lks_sz = sizeof(XlnxEFuseLkSpec) / 2;
917
+
918
+ if (!s->efuse) {
919
+ error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE",
920
+ object_get_canonical_path(OBJECT(dev)));
921
+ return;
922
+ }
923
+
924
+ /* Sort property-defined pgm-locks for bsearch lookup */
925
+ if ((s->extra_pg0_lock_n16 % lks_sz) != 0) {
926
+ error_setg(errp,
927
+ "%s.pg0-lock: array property item-count not multiple of %u",
928
+ object_get_canonical_path(OBJECT(dev)), lks_sz);
929
+ return;
930
+ }
931
+
932
+ efuse_lk_spec_sort(s);
933
+}
934
+
935
+static void efuse_ctrl_init(Object *obj)
936
+{
937
+ XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj);
938
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
939
+ RegisterInfoArray *reg_array;
940
+
941
+ reg_array =
942
+ register_init_block32(DEVICE(obj), efuse_ctrl_regs_info,
943
+ ARRAY_SIZE(efuse_ctrl_regs_info),
944
+ s->regs_info, s->regs,
945
+ &efuse_ctrl_ops,
946
+ XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG,
947
+ R_MAX * 4);
948
+
949
+ sysbus_init_mmio(sbd, &reg_array->mem);
950
+ sysbus_init_irq(sbd, &s->irq_efuse_imr);
951
+}
952
+
953
+static const VMStateDescription vmstate_efuse_ctrl = {
954
+ .name = TYPE_XLNX_VERSAL_EFUSE_CTRL,
955
+ .version_id = 1,
956
+ .minimum_version_id = 1,
957
+ .fields = (VMStateField[]) {
958
+ VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX),
959
+ VMSTATE_END_OF_LIST(),
960
+ }
961
+};
962
+
963
+static Property efuse_ctrl_props[] = {
964
+ DEFINE_PROP_LINK("efuse",
965
+ XlnxVersalEFuseCtrl, efuse,
966
+ TYPE_XLNX_EFUSE, XlnxEFuse *),
967
+ DEFINE_PROP_ARRAY("pg0-lock",
968
+ XlnxVersalEFuseCtrl, extra_pg0_lock_n16,
969
+ extra_pg0_lock_spec, qdev_prop_uint16, uint16_t),
970
+
971
+ DEFINE_PROP_END_OF_LIST(),
972
+};
973
+
974
+static void efuse_ctrl_class_init(ObjectClass *klass, void *data)
975
+{
976
+ DeviceClass *dc = DEVICE_CLASS(klass);
977
+
978
+ dc->reset = efuse_ctrl_reset;
979
+ dc->realize = efuse_ctrl_realize;
980
+ dc->vmsd = &vmstate_efuse_ctrl;
981
+ device_class_set_props(dc, efuse_ctrl_props);
982
+}
983
+
984
+static const TypeInfo efuse_ctrl_info = {
985
+ .name = TYPE_XLNX_VERSAL_EFUSE_CTRL,
986
+ .parent = TYPE_SYS_BUS_DEVICE,
987
+ .instance_size = sizeof(XlnxVersalEFuseCtrl),
988
+ .class_init = efuse_ctrl_class_init,
989
+ .instance_init = efuse_ctrl_init,
990
+};
991
+
992
+static void efuse_ctrl_register_types(void)
993
+{
994
+ type_register_static(&efuse_ctrl_info);
995
+}
996
+
997
+type_init(efuse_ctrl_register_types)
998
+
999
+/*
1000
+ * Retrieve a row, with unreadable bits returned as 0.
1001
+ */
1002
+uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *efuse,
1003
+ uint32_t bit, bool *denied)
1004
+{
1005
+ bool dummy;
1006
+
1007
+ if (!denied) {
1008
+ denied = &dummy;
1009
+ }
1010
+
1011
+ if (bit >= EFUSE_RD_BLOCKED_START && bit <= EFUSE_RD_BLOCKED_END) {
1012
+ *denied = true;
1013
+ return 0;
1014
+ }
1015
+
1016
+ *denied = false;
1017
+ return xlnx_efuse_get_row(efuse, bit);
1018
+}
1019
diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig
1020
index XXXXXXX..XXXXXXX 100644
1021
--- a/hw/nvram/Kconfig
1022
+++ b/hw/nvram/Kconfig
1023
@@ -XXX,XX +XXX,XX @@ config XLNX_EFUSE_CRC
1024
config XLNX_EFUSE
1025
bool
1026
select XLNX_EFUSE_CRC
1027
+
1028
+config XLNX_EFUSE_VERSAL
1029
+ bool
1030
+ select XLNX_EFUSE
1031
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
1032
index XXXXXXX..XXXXXXX 100644
1033
--- a/hw/nvram/meson.build
1034
+++ b/hw/nvram/meson.build
1035
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c'))
1036
softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c'))
1037
softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c'))
1038
softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c'))
1039
+softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files(
1040
+ 'xlnx-versal-efuse-cache.c',
1041
+ 'xlnx-versal-efuse-ctrl.c'))
1042
1043
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
1044
--
183
--
1045
2.20.1
184
2.25.1
1046
1047
diff view generated by jsdifflib
1
Currently helper.c includes some code which is part of the arm
1
From: Richard Henderson <richard.henderson@linaro.org>
2
target's gdbstub support. This code has a better home: in gdbstub.c
3
and gdbstub64.c. Move it there.
4
2
5
Because aarch64_fpu_gdb_get_reg() and aarch64_fpu_gdb_set_reg() move
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
into gdbstub64.c, this means that they're now compiled only for
4
Message-id: 20220708151540.18136-25-richard.henderson@linaro.org
7
TARGET_AARCH64 rather than always. That is the only case when they
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
would ever be used, but it does mean that the ifdef in
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
arm_cpu_register_gdb_regs_for_features() needs to be adjusted to
7
---
10
match.
8
target/arm/helper-sme.h | 5 +++
9
target/arm/sme.decode | 9 +++++
10
target/arm/sme_helper.c | 69 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 32 ++++++++++++++++++
12
4 files changed, 115 insertions(+)
11
13
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20210921162901.17508-4-peter.maydell@linaro.org
16
---
17
target/arm/internals.h | 7 ++
18
target/arm/gdbstub.c | 130 ++++++++++++++++++++
19
target/arm/gdbstub64.c | 140 +++++++++++++++++++++
20
target/arm/helper.c | 271 -----------------------------------------
21
4 files changed, 277 insertions(+), 271 deletions(-)
22
23
diff --git a/target/arm/internals.h b/target/arm/internals.h
24
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
25
--- a/target/arm/internals.h
16
--- a/target/arm/helper-sme.h
26
+++ b/target/arm/internals.h
17
+++ b/target/arm/helper-sme.h
27
@@ -XXX,XX +XXX,XX @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
28
return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
19
DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
29
}
20
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
30
21
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
31
+#ifdef TARGET_AARCH64
32
+int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
33
+int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
34
+int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
35
+int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
36
+#endif
37
+
22
+
38
#endif
23
+DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
39
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
24
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
25
+DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
40
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/gdbstub.c
29
--- a/target/arm/sme.decode
42
+++ b/target/arm/gdbstub.c
30
+++ b/target/arm/sme.decode
31
@@ -XXX,XX +XXX,XX @@ ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32
32
ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32
33
ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64
34
ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
35
+
36
+### SME Outer Product
37
+
38
+&op zad zn zm pm pn sub:bool
39
+@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
40
+@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
41
+
42
+FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
43
+FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
44
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/sme_helper.c
47
+++ b/target/arm/sme_helper.c
43
@@ -XXX,XX +XXX,XX @@
48
@@ -XXX,XX +XXX,XX @@
44
*/
49
#include "exec/cpu_ldst.h"
45
#include "qemu/osdep.h"
50
#include "exec/exec-all.h"
46
#include "cpu.h"
51
#include "qemu/int128.h"
47
+#include "internals.h"
52
+#include "fpu/softfloat.h"
48
#include "exec/gdbstub.h"
53
#include "vec_internal.h"
49
54
#include "sve_ldst_internal.h"
50
typedef struct RegisterSysregXmlParam {
55
51
@@ -XXX,XX +XXX,XX @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
56
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
52
return 0;
57
}
53
}
54
55
+static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
56
+{
57
+ ARMCPU *cpu = env_archcpu(env);
58
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
59
+
60
+ /* VFP data registers are always little-endian. */
61
+ if (reg < nregs) {
62
+ return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
63
+ }
64
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
65
+ /* Aliases for Q regs. */
66
+ nregs += 16;
67
+ if (reg < nregs) {
68
+ uint64_t *q = aa32_vfp_qreg(env, reg - 32);
69
+ return gdb_get_reg128(buf, q[0], q[1]);
70
+ }
71
+ }
72
+ switch (reg - nregs) {
73
+ case 0:
74
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
75
+ case 1:
76
+ return gdb_get_reg32(buf, vfp_get_fpscr(env));
77
+ case 2:
78
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
79
+ }
80
+ return 0;
81
+}
82
+
83
+static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
84
+{
85
+ ARMCPU *cpu = env_archcpu(env);
86
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
87
+
88
+ if (reg < nregs) {
89
+ *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
90
+ return 8;
91
+ }
92
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
93
+ nregs += 16;
94
+ if (reg < nregs) {
95
+ uint64_t *q = aa32_vfp_qreg(env, reg - 32);
96
+ q[0] = ldq_le_p(buf);
97
+ q[1] = ldq_le_p(buf + 8);
98
+ return 16;
99
+ }
100
+ }
101
+ switch (reg - nregs) {
102
+ case 0:
103
+ env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
104
+ return 4;
105
+ case 1:
106
+ vfp_set_fpscr(env, ldl_p(buf));
107
+ return 4;
108
+ case 2:
109
+ env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30);
110
+ return 4;
111
+ }
112
+ return 0;
113
+}
114
+
115
+/**
116
+ * arm_get/set_gdb_*: get/set a gdb register
117
+ * @env: the CPU state
118
+ * @buf: a buffer to copy to/from
119
+ * @reg: register number (offset from start of group)
120
+ *
121
+ * We return the number of bytes copied
122
+ */
123
+
124
+static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
125
+{
126
+ ARMCPU *cpu = env_archcpu(env);
127
+ const ARMCPRegInfo *ri;
128
+ uint32_t key;
129
+
130
+ key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
131
+ ri = get_arm_cp_reginfo(cpu->cp_regs, key);
132
+ if (ri) {
133
+ if (cpreg_field_is_64bit(ri)) {
134
+ return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
135
+ } else {
136
+ return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
137
+ }
138
+ }
139
+ return 0;
140
+}
141
+
142
+static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
143
+{
144
+ return 0;
145
+}
146
+
147
static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml,
148
ARMCPRegInfo *ri, uint32_t ri_key,
149
int bitsize, int regnum)
150
@@ -XXX,XX +XXX,XX @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
151
}
58
}
152
return NULL;
153
}
59
}
154
+
60
+
155
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
61
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
62
+ void *vpm, void *vst, uint32_t desc)
156
+{
63
+{
157
+ CPUState *cs = CPU(cpu);
64
+ intptr_t row, col, oprsz = simd_maxsz(desc);
158
+ CPUARMState *env = &cpu->env;
65
+ uint32_t neg = simd_data(desc) << 31;
66
+ uint16_t *pn = vpn, *pm = vpm;
67
+ float_status fpst;
159
+
68
+
160
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
69
+ /*
161
+ /*
70
+ * Make a copy of float_status because this operation does not
162
+ * The lower part of each SVE register aliases to the FPU
71
+ * update the cumulative fp exception status. It also produces
163
+ * registers so we don't need to include both.
72
+ * default nans.
164
+ */
73
+ */
165
+#ifdef TARGET_AARCH64
74
+ fpst = *(float_status *)vst;
166
+ if (isar_feature_aa64_sve(&cpu->isar)) {
75
+ set_default_nan_mode(true, &fpst);
167
+ gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
168
+ arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
169
+ "sve-registers.xml", 0);
170
+ } else {
171
+ gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
172
+ aarch64_fpu_gdb_set_reg,
173
+ 34, "aarch64-fpu.xml", 0);
174
+ }
175
+#endif
176
+ } else if (arm_feature(env, ARM_FEATURE_NEON)) {
177
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
178
+ 51, "arm-neon.xml", 0);
179
+ } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
180
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
181
+ 35, "arm-vfp3.xml", 0);
182
+ } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
183
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
184
+ 19, "arm-vfp.xml", 0);
185
+ }
186
+ gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
187
+ arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
188
+ "system-registers.xml", 0);
189
+
76
+
190
+}
77
+ for (row = 0; row < oprsz; ) {
191
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
78
+ uint16_t pa = pn[H2(row >> 4)];
192
index XXXXXXX..XXXXXXX 100644
79
+ do {
193
--- a/target/arm/gdbstub64.c
80
+ if (pa & 1) {
194
+++ b/target/arm/gdbstub64.c
81
+ void *vza_row = vza + tile_vslice_offset(row);
195
@@ -XXX,XX +XXX,XX @@
82
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
196
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
197
*/
198
#include "qemu/osdep.h"
199
+#include "qemu/log.h"
200
#include "cpu.h"
201
+#include "internals.h"
202
#include "exec/gdbstub.h"
203
204
int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
205
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
206
/* Unknown register. */
207
return 0;
208
}
209
+
83
+
210
+int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
84
+ for (col = 0; col < oprsz; ) {
211
+{
85
+ uint16_t pb = pm[H2(col >> 4)];
212
+ switch (reg) {
86
+ do {
213
+ case 0 ... 31:
87
+ if (pb & 1) {
214
+ {
88
+ uint32_t *a = vza_row + H1_4(col);
215
+ /* 128 bit FP register - quads are in LE order */
89
+ uint32_t *m = vzm + H1_4(col);
216
+ uint64_t *q = aa64_vfp_qreg(env, reg);
90
+ *a = float32_muladd(n, *m, *a, 0, vst);
217
+ return gdb_get_reg128(buf, q[1], q[0]);
91
+ }
218
+ }
92
+ col += 4;
219
+ case 32:
93
+ pb >>= 4;
220
+ /* FPSR */
94
+ } while (col & 15);
221
+ return gdb_get_reg32(buf, vfp_get_fpsr(env));
95
+ }
222
+ case 33:
96
+ }
223
+ /* FPCR */
97
+ row += 4;
224
+ return gdb_get_reg32(buf, vfp_get_fpcr(env));
98
+ pa >>= 4;
225
+ default:
99
+ } while (row & 15);
226
+ return 0;
227
+ }
100
+ }
228
+}
101
+}
229
+
102
+
230
+int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
103
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
104
+ void *vpm, void *vst, uint32_t desc)
231
+{
105
+{
232
+ switch (reg) {
106
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
233
+ case 0 ... 31:
107
+ uint64_t neg = (uint64_t)simd_data(desc) << 63;
234
+ /* 128 bit FP register */
108
+ uint64_t *za = vza, *zn = vzn, *zm = vzm;
235
+ {
109
+ uint8_t *pn = vpn, *pm = vpm;
236
+ uint64_t *q = aa64_vfp_qreg(env, reg);
110
+ float_status fpst = *(float_status *)vst;
237
+ q[0] = ldq_le_p(buf);
111
+
238
+ q[1] = ldq_le_p(buf + 8);
112
+ set_default_nan_mode(true, &fpst);
239
+ return 16;
113
+
114
+ for (row = 0; row < oprsz; ++row) {
115
+ if (pn[H1(row)] & 1) {
116
+ uint64_t *za_row = &za[tile_vslice_index(row)];
117
+ uint64_t n = zn[row] ^ neg;
118
+
119
+ for (col = 0; col < oprsz; ++col) {
120
+ if (pm[H1(col)] & 1) {
121
+ uint64_t *a = &za_row[col];
122
+ *a = float64_muladd(n, zm[col], *a, 0, &fpst);
123
+ }
124
+ }
240
+ }
125
+ }
241
+ case 32:
242
+ /* FPSR */
243
+ vfp_set_fpsr(env, ldl_p(buf));
244
+ return 4;
245
+ case 33:
246
+ /* FPCR */
247
+ vfp_set_fpcr(env, ldl_p(buf));
248
+ return 4;
249
+ default:
250
+ return 0;
251
+ }
126
+ }
252
+}
127
+}
128
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/arm/translate-sme.c
131
+++ b/target/arm/translate-sme.c
132
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s)
133
TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
134
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
135
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
253
+
136
+
254
+int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
137
+static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
138
+ gen_helper_gvec_5_ptr *fn)
255
+{
139
+{
256
+ ARMCPU *cpu = env_archcpu(env);
140
+ int svl = streaming_vec_reg_size(s);
141
+ uint32_t desc = simd_desc(svl, svl, a->sub);
142
+ TCGv_ptr za, zn, zm, pn, pm, fpst;
257
+
143
+
258
+ switch (reg) {
144
+ if (!sme_smza_enabled_check(s)) {
259
+ /* The first 32 registers are the zregs */
145
+ return true;
260
+ case 0 ... 31:
261
+ {
262
+ int vq, len = 0;
263
+ for (vq = 0; vq < cpu->sve_max_vq; vq++) {
264
+ len += gdb_get_reg128(buf,
265
+ env->vfp.zregs[reg].d[vq * 2 + 1],
266
+ env->vfp.zregs[reg].d[vq * 2]);
267
+ }
268
+ return len;
269
+ }
270
+ case 32:
271
+ return gdb_get_reg32(buf, vfp_get_fpsr(env));
272
+ case 33:
273
+ return gdb_get_reg32(buf, vfp_get_fpcr(env));
274
+ /* then 16 predicates and the ffr */
275
+ case 34 ... 50:
276
+ {
277
+ int preg = reg - 34;
278
+ int vq, len = 0;
279
+ for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
280
+ len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
281
+ }
282
+ return len;
283
+ }
284
+ case 51:
285
+ {
286
+ /*
287
+ * We report in Vector Granules (VG) which is 64bit in a Z reg
288
+ * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
289
+ */
290
+ int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
291
+ return gdb_get_reg64(buf, vq * 2);
292
+ }
293
+ default:
294
+ /* gdbstub asked for something out our range */
295
+ qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
296
+ break;
297
+ }
146
+ }
298
+
147
+
299
+ return 0;
148
+ /* Sum XZR+zad to find ZAd. */
149
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
150
+ zn = vec_full_reg_ptr(s, a->zn);
151
+ zm = vec_full_reg_ptr(s, a->zm);
152
+ pn = pred_full_reg_ptr(s, a->pn);
153
+ pm = pred_full_reg_ptr(s, a->pm);
154
+ fpst = fpstatus_ptr(FPST_FPCR);
155
+
156
+ fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc));
157
+
158
+ tcg_temp_free_ptr(za);
159
+ tcg_temp_free_ptr(zn);
160
+ tcg_temp_free_ptr(pn);
161
+ tcg_temp_free_ptr(pm);
162
+ tcg_temp_free_ptr(fpst);
163
+ return true;
300
+}
164
+}
301
+
165
+
302
+int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
166
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
303
+{
167
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
304
+ ARMCPU *cpu = env_archcpu(env);
305
+
306
+ /* The first 32 registers are the zregs */
307
+ switch (reg) {
308
+ /* The first 32 registers are the zregs */
309
+ case 0 ... 31:
310
+ {
311
+ int vq, len = 0;
312
+ uint64_t *p = (uint64_t *) buf;
313
+ for (vq = 0; vq < cpu->sve_max_vq; vq++) {
314
+ env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
315
+ env->vfp.zregs[reg].d[vq * 2] = *p++;
316
+ len += 16;
317
+ }
318
+ return len;
319
+ }
320
+ case 32:
321
+ vfp_set_fpsr(env, *(uint32_t *)buf);
322
+ return 4;
323
+ case 33:
324
+ vfp_set_fpcr(env, *(uint32_t *)buf);
325
+ return 4;
326
+ case 34 ... 50:
327
+ {
328
+ int preg = reg - 34;
329
+ int vq, len = 0;
330
+ uint64_t *p = (uint64_t *) buf;
331
+ for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
332
+ env->vfp.pregs[preg].p[vq / 4] = *p++;
333
+ len += 8;
334
+ }
335
+ return len;
336
+ }
337
+ case 51:
338
+ /* cannot set vg via gdbstub */
339
+ return 0;
340
+ default:
341
+ /* gdbstub asked for something out our range */
342
+ break;
343
+ }
344
+
345
+ return 0;
346
+}
347
diff --git a/target/arm/helper.c b/target/arm/helper.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/arm/helper.c
350
+++ b/target/arm/helper.c
351
@@ -XXX,XX +XXX,XX @@
352
#include "trace.h"
353
#include "cpu.h"
354
#include "internals.h"
355
-#include "exec/gdbstub.h"
356
#include "exec/helper-proto.h"
357
#include "qemu/host-utils.h"
358
#include "qemu/main-loop.h"
359
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
360
static void switch_mode(CPUARMState *env, int mode);
361
static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
362
363
-static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
364
-{
365
- ARMCPU *cpu = env_archcpu(env);
366
- int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
367
-
368
- /* VFP data registers are always little-endian. */
369
- if (reg < nregs) {
370
- return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
371
- }
372
- if (arm_feature(env, ARM_FEATURE_NEON)) {
373
- /* Aliases for Q regs. */
374
- nregs += 16;
375
- if (reg < nregs) {
376
- uint64_t *q = aa32_vfp_qreg(env, reg - 32);
377
- return gdb_get_reg128(buf, q[0], q[1]);
378
- }
379
- }
380
- switch (reg - nregs) {
381
- case 0:
382
- return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
383
- case 1:
384
- return gdb_get_reg32(buf, vfp_get_fpscr(env));
385
- case 2:
386
- return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
387
- }
388
- return 0;
389
-}
390
-
391
-static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
392
-{
393
- ARMCPU *cpu = env_archcpu(env);
394
- int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
395
-
396
- if (reg < nregs) {
397
- *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
398
- return 8;
399
- }
400
- if (arm_feature(env, ARM_FEATURE_NEON)) {
401
- nregs += 16;
402
- if (reg < nregs) {
403
- uint64_t *q = aa32_vfp_qreg(env, reg - 32);
404
- q[0] = ldq_le_p(buf);
405
- q[1] = ldq_le_p(buf + 8);
406
- return 16;
407
- }
408
- }
409
- switch (reg - nregs) {
410
- case 0:
411
- env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
412
- return 4;
413
- case 1:
414
- vfp_set_fpscr(env, ldl_p(buf));
415
- return 4;
416
- case 2:
417
- env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30);
418
- return 4;
419
- }
420
- return 0;
421
-}
422
-
423
-static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
424
-{
425
- switch (reg) {
426
- case 0 ... 31:
427
- {
428
- /* 128 bit FP register - quads are in LE order */
429
- uint64_t *q = aa64_vfp_qreg(env, reg);
430
- return gdb_get_reg128(buf, q[1], q[0]);
431
- }
432
- case 32:
433
- /* FPSR */
434
- return gdb_get_reg32(buf, vfp_get_fpsr(env));
435
- case 33:
436
- /* FPCR */
437
- return gdb_get_reg32(buf, vfp_get_fpcr(env));
438
- default:
439
- return 0;
440
- }
441
-}
442
-
443
-static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
444
-{
445
- switch (reg) {
446
- case 0 ... 31:
447
- /* 128 bit FP register */
448
- {
449
- uint64_t *q = aa64_vfp_qreg(env, reg);
450
- q[0] = ldq_le_p(buf);
451
- q[1] = ldq_le_p(buf + 8);
452
- return 16;
453
- }
454
- case 32:
455
- /* FPSR */
456
- vfp_set_fpsr(env, ldl_p(buf));
457
- return 4;
458
- case 33:
459
- /* FPCR */
460
- vfp_set_fpcr(env, ldl_p(buf));
461
- return 4;
462
- default:
463
- return 0;
464
- }
465
-}
466
-
467
static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
468
{
469
assert(ri->fieldoffset);
470
@@ -XXX,XX +XXX,XX @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
471
}
472
}
473
474
-/**
475
- * arm_get/set_gdb_*: get/set a gdb register
476
- * @env: the CPU state
477
- * @buf: a buffer to copy to/from
478
- * @reg: register number (offset from start of group)
479
- *
480
- * We return the number of bytes copied
481
- */
482
-
483
-static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
484
-{
485
- ARMCPU *cpu = env_archcpu(env);
486
- const ARMCPRegInfo *ri;
487
- uint32_t key;
488
-
489
- key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
490
- ri = get_arm_cp_reginfo(cpu->cp_regs, key);
491
- if (ri) {
492
- if (cpreg_field_is_64bit(ri)) {
493
- return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
494
- } else {
495
- return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
496
- }
497
- }
498
- return 0;
499
-}
500
-
501
-static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
502
-{
503
- return 0;
504
-}
505
-
506
-#ifdef TARGET_AARCH64
507
-static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
508
-{
509
- ARMCPU *cpu = env_archcpu(env);
510
-
511
- switch (reg) {
512
- /* The first 32 registers are the zregs */
513
- case 0 ... 31:
514
- {
515
- int vq, len = 0;
516
- for (vq = 0; vq < cpu->sve_max_vq; vq++) {
517
- len += gdb_get_reg128(buf,
518
- env->vfp.zregs[reg].d[vq * 2 + 1],
519
- env->vfp.zregs[reg].d[vq * 2]);
520
- }
521
- return len;
522
- }
523
- case 32:
524
- return gdb_get_reg32(buf, vfp_get_fpsr(env));
525
- case 33:
526
- return gdb_get_reg32(buf, vfp_get_fpcr(env));
527
- /* then 16 predicates and the ffr */
528
- case 34 ... 50:
529
- {
530
- int preg = reg - 34;
531
- int vq, len = 0;
532
- for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
533
- len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
534
- }
535
- return len;
536
- }
537
- case 51:
538
- {
539
- /*
540
- * We report in Vector Granules (VG) which is 64bit in a Z reg
541
- * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
542
- */
543
- int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
544
- return gdb_get_reg64(buf, vq * 2);
545
- }
546
- default:
547
- /* gdbstub asked for something out our range */
548
- qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
549
- break;
550
- }
551
-
552
- return 0;
553
-}
554
-
555
-static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
556
-{
557
- ARMCPU *cpu = env_archcpu(env);
558
-
559
- /* The first 32 registers are the zregs */
560
- switch (reg) {
561
- /* The first 32 registers are the zregs */
562
- case 0 ... 31:
563
- {
564
- int vq, len = 0;
565
- uint64_t *p = (uint64_t *) buf;
566
- for (vq = 0; vq < cpu->sve_max_vq; vq++) {
567
- env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
568
- env->vfp.zregs[reg].d[vq * 2] = *p++;
569
- len += 16;
570
- }
571
- return len;
572
- }
573
- case 32:
574
- vfp_set_fpsr(env, *(uint32_t *)buf);
575
- return 4;
576
- case 33:
577
- vfp_set_fpcr(env, *(uint32_t *)buf);
578
- return 4;
579
- case 34 ... 50:
580
- {
581
- int preg = reg - 34;
582
- int vq, len = 0;
583
- uint64_t *p = (uint64_t *) buf;
584
- for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
585
- env->vfp.pregs[preg].p[vq / 4] = *p++;
586
- len += 8;
587
- }
588
- return len;
589
- }
590
- case 51:
591
- /* cannot set vg via gdbstub */
592
- return 0;
593
- default:
594
- /* gdbstub asked for something out our range */
595
- break;
596
- }
597
-
598
- return 0;
599
-}
600
-#endif /* TARGET_AARCH64 */
601
-
602
static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
603
{
604
/* Return true if the regdef would cause an assertion if you called
605
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
606
#endif
607
}
608
609
-void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
610
-{
611
- CPUState *cs = CPU(cpu);
612
- CPUARMState *env = &cpu->env;
613
-
614
- if (arm_feature(env, ARM_FEATURE_AARCH64)) {
615
- /*
616
- * The lower part of each SVE register aliases to the FPU
617
- * registers so we don't need to include both.
618
- */
619
-#ifdef TARGET_AARCH64
620
- if (isar_feature_aa64_sve(&cpu->isar)) {
621
- gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
622
- arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
623
- "sve-registers.xml", 0);
624
- } else
625
-#endif
626
- {
627
- gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
628
- aarch64_fpu_gdb_set_reg,
629
- 34, "aarch64-fpu.xml", 0);
630
- }
631
- } else if (arm_feature(env, ARM_FEATURE_NEON)) {
632
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
633
- 51, "arm-neon.xml", 0);
634
- } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
635
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
636
- 35, "arm-vfp3.xml", 0);
637
- } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
638
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
639
- 19, "arm-vfp.xml", 0);
640
- }
641
- gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
642
- arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
643
- "system-registers.xml", 0);
644
-
645
-}
646
-
647
/* Sort alphabetically by type name, except for "any". */
648
static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
649
{
650
--
168
--
651
2.20.1
169
2.25.1
652
653
diff view generated by jsdifflib
1
Currently we send VFP XML which includes D0..D15 or D0..D31, plus
1
From: Richard Henderson <richard.henderson@linaro.org>
2
FPSID, FPSCR and FPEXC. The upstream GDB tolerates this, but its
3
definition of this XML feature does not include FPSID or FPEXC. In
4
particular, for M-profile cores there are no FPSID or FPEXC
5
registers, so advertising those is wrong.
6
2
7
Move FPSID and FPEXC into their own bit of XML which we only send for
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
A and R profile cores. This brings our definition of the XML
4
Message-id: 20220708151540.18136-26-richard.henderson@linaro.org
9
org.gnu.gdb.arm.vfp feature into line with GDB's own (at least for
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
non-Neon cores...) and means we don't claim to have FPSID and FPEXC
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
on M-profile.
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 2 ++
10
target/arm/sme_helper.c | 56 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 30 ++++++++++++++++++++
12
4 files changed, 90 insertions(+)
12
13
13
(It seems unlikely to me that any gdbstub users really care about
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
14
being able to look at FPEXC and FPSID; but we've supplied them to gdb
15
for a decade and it's not hard to keep doing so.)
16
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 20210921162901.17508-5-peter.maydell@linaro.org
20
---
21
configs/targets/aarch64-softmmu.mak | 2 +-
22
configs/targets/arm-linux-user.mak | 2 +-
23
configs/targets/arm-softmmu.mak | 2 +-
24
configs/targets/armeb-linux-user.mak | 2 +-
25
target/arm/gdbstub.c | 56 ++++++++++++++++++++--------
26
gdb-xml/arm-neon.xml | 2 -
27
gdb-xml/arm-vfp-sysregs.xml | 17 +++++++++
28
gdb-xml/arm-vfp.xml | 2 -
29
gdb-xml/arm-vfp3.xml | 2 -
30
9 files changed, 61 insertions(+), 26 deletions(-)
31
create mode 100644 gdb-xml/arm-vfp-sysregs.xml
32
33
diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak
34
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
35
--- a/configs/targets/aarch64-softmmu.mak
16
--- a/target/arm/helper-sme.h
36
+++ b/configs/targets/aarch64-softmmu.mak
17
+++ b/target/arm/helper-sme.h
37
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
38
TARGET_ARCH=aarch64
19
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
39
TARGET_BASE_ARCH=arm
20
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
40
TARGET_SUPPORTS_MTTCG=y
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
41
-TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
22
+DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
42
+TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
23
+ void, ptr, ptr, ptr, ptr, ptr, i32)
43
TARGET_NEED_FDT=y
24
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
44
diff --git a/configs/targets/arm-linux-user.mak b/configs/targets/arm-linux-user.mak
45
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
46
--- a/configs/targets/arm-linux-user.mak
26
--- a/target/arm/sme.decode
47
+++ b/configs/targets/arm-linux-user.mak
27
+++ b/target/arm/sme.decode
48
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
49
TARGET_ARCH=arm
29
50
TARGET_SYSTBL_ABI=common,oabi
30
FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
51
TARGET_SYSTBL=syscall.tbl
31
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
52
-TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
32
+
53
+TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
33
+BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
54
TARGET_HAS_BFLT=y
34
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
55
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
56
diff --git a/configs/targets/arm-softmmu.mak b/configs/targets/arm-softmmu.mak
57
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
58
--- a/configs/targets/arm-softmmu.mak
36
--- a/target/arm/sme_helper.c
59
+++ b/configs/targets/arm-softmmu.mak
37
+++ b/target/arm/sme_helper.c
60
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
61
TARGET_ARCH=arm
62
TARGET_SUPPORTS_MTTCG=y
63
-TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
64
+TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
65
TARGET_NEED_FDT=y
66
diff --git a/configs/targets/armeb-linux-user.mak b/configs/targets/armeb-linux-user.mak
67
index XXXXXXX..XXXXXXX 100644
68
--- a/configs/targets/armeb-linux-user.mak
69
+++ b/configs/targets/armeb-linux-user.mak
70
@@ -XXX,XX +XXX,XX @@ TARGET_ARCH=arm
71
TARGET_SYSTBL_ABI=common,oabi
72
TARGET_SYSTBL=syscall.tbl
73
TARGET_WORDS_BIGENDIAN=y
74
-TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
75
+TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml
76
TARGET_HAS_BFLT=y
77
CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y
78
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/gdbstub.c
81
+++ b/target/arm/gdbstub.c
82
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
83
}
84
switch (reg - nregs) {
85
case 0:
86
- return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
87
- case 1:
88
return gdb_get_reg32(buf, vfp_get_fpscr(env));
89
- case 2:
90
- return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
91
}
92
return 0;
93
}
94
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
95
}
39
}
96
}
40
}
97
switch (reg - nregs) {
41
}
98
+ case 0:
42
+
99
+ vfp_set_fpscr(env, ldl_p(buf));
43
+/*
100
+ return 4;
44
+ * Alter PAIR as needed for controlling predicates being false,
45
+ * and for NEG on an enabled row element.
46
+ */
47
+static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
48
+{
49
+ /*
50
+ * The pseudocode uses a conditional negate after the conditional zero.
51
+ * It is simpler here to unconditionally negate before conditional zero.
52
+ */
53
+ pair ^= neg;
54
+ if (!(pg & 1)) {
55
+ pair &= 0xffff0000u;
101
+ }
56
+ }
102
+ return 0;
57
+ if (!(pg & 4)) {
58
+ pair &= 0x0000ffffu;
59
+ }
60
+ return pair;
103
+}
61
+}
104
+
62
+
105
+static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
63
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
64
+ void *vpm, uint32_t desc)
106
+{
65
+{
107
+ switch (reg) {
66
+ intptr_t row, col, oprsz = simd_maxsz(desc);
108
+ case 0:
67
+ uint32_t neg = simd_data(desc) * 0x80008000u;
109
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
68
+ uint16_t *pn = vpn, *pm = vpm;
110
+ case 1:
69
+
111
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
70
+ for (row = 0; row < oprsz; ) {
71
+ uint16_t prow = pn[H2(row >> 4)];
72
+ do {
73
+ void *vza_row = vza + tile_vslice_offset(row);
74
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
75
+
76
+ n = f16mop_adj_pair(n, prow, neg);
77
+
78
+ for (col = 0; col < oprsz; ) {
79
+ uint16_t pcol = pm[H2(col >> 4)];
80
+ do {
81
+ if (prow & pcol & 0b0101) {
82
+ uint32_t *a = vza_row + H1_4(col);
83
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
84
+
85
+ m = f16mop_adj_pair(m, pcol, 0);
86
+ *a = bfdotadd(*a, n, m);
87
+
88
+ col += 4;
89
+ pcol >>= 4;
90
+ }
91
+ } while (col & 15);
92
+ }
93
+ row += 4;
94
+ prow >>= 4;
95
+ } while (row & 15);
112
+ }
96
+ }
113
+ return 0;
97
+}
98
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/arm/translate-sme.c
101
+++ b/target/arm/translate-sme.c
102
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s)
103
TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d)
104
TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d)
105
106
+static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
107
+ gen_helper_gvec_5 *fn)
108
+{
109
+ int svl = streaming_vec_reg_size(s);
110
+ uint32_t desc = simd_desc(svl, svl, a->sub);
111
+ TCGv_ptr za, zn, zm, pn, pm;
112
+
113
+ if (!sme_smza_enabled_check(s)) {
114
+ return true;
115
+ }
116
+
117
+ /* Sum XZR+zad to find ZAd. */
118
+ za = get_tile_rowcol(s, esz, 31, a->zad, false);
119
+ zn = vec_full_reg_ptr(s, a->zn);
120
+ zm = vec_full_reg_ptr(s, a->zm);
121
+ pn = pred_full_reg_ptr(s, a->pn);
122
+ pm = pred_full_reg_ptr(s, a->pm);
123
+
124
+ fn(za, zn, zm, pn, pm, tcg_constant_i32(desc));
125
+
126
+ tcg_temp_free_ptr(za);
127
+ tcg_temp_free_ptr(zn);
128
+ tcg_temp_free_ptr(pn);
129
+ tcg_temp_free_ptr(pm);
130
+ return true;
114
+}
131
+}
115
+
132
+
116
+static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
133
static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
117
+{
134
gen_helper_gvec_5_ptr *fn)
118
+ switch (reg) {
135
{
119
case 0:
136
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
120
env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
137
121
return 4;
138
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
122
case 1:
139
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
123
- vfp_set_fpscr(env, ldl_p(buf));
124
- return 4;
125
- case 2:
126
env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30);
127
return 4;
128
}
129
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
130
34, "aarch64-fpu.xml", 0);
131
}
132
#endif
133
- } else if (arm_feature(env, ARM_FEATURE_NEON)) {
134
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
135
- 51, "arm-neon.xml", 0);
136
- } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
137
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
138
- 35, "arm-vfp3.xml", 0);
139
- } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
140
- gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
141
- 19, "arm-vfp.xml", 0);
142
+ } else {
143
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
144
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
145
+ 49, "arm-neon.xml", 0);
146
+ } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
147
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
148
+ 33, "arm-vfp3.xml", 0);
149
+ } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
150
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
151
+ 17, "arm-vfp.xml", 0);
152
+ }
153
+ if (!arm_feature(env, ARM_FEATURE_M)) {
154
+ /*
155
+ * A and R profile have FP sysregs FPEXC and FPSID that we
156
+ * expose to gdb.
157
+ */
158
+ gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg,
159
+ 2, "arm-vfp-sysregs.xml", 0);
160
+ }
161
}
162
gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
163
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
164
diff --git a/gdb-xml/arm-neon.xml b/gdb-xml/arm-neon.xml
165
index XXXXXXX..XXXXXXX 100644
166
--- a/gdb-xml/arm-neon.xml
167
+++ b/gdb-xml/arm-neon.xml
168
@@ -XXX,XX +XXX,XX @@
169
<reg name="q14" bitsize="128" type="neon_q"/>
170
<reg name="q15" bitsize="128" type="neon_q"/>
171
172
- <reg name="fpsid" bitsize="32" type="int" group="float"/>
173
<reg name="fpscr" bitsize="32" type="int" group="float"/>
174
- <reg name="fpexc" bitsize="32" type="int" group="float"/>
175
</feature>
176
diff --git a/gdb-xml/arm-vfp-sysregs.xml b/gdb-xml/arm-vfp-sysregs.xml
177
new file mode 100644
178
index XXXXXXX..XXXXXXX
179
--- /dev/null
180
+++ b/gdb-xml/arm-vfp-sysregs.xml
181
@@ -XXX,XX +XXX,XX @@
182
+<?xml version="1.0"?>
183
+<!-- Copyright (C) 2021 Linaro Ltd.
184
+
140
+
185
+ Copying and distribution of this file, with or without modification,
141
+/* TODO: FEAT_EBF16 */
186
+ are permitted in any medium without royalty provided the copyright
142
+TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
187
+ notice and this notice are preserved.
188
+
189
+ These are A/R profile VFP system registers. Debugger users probably
190
+ don't really care about these, but because we used to (incorrectly)
191
+ provide them to gdb in the org.gnu.gdb.arm.vfp XML we continue
192
+ to do so via this separate XML.
193
+ -->
194
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
195
+<feature name="org.qemu.gdb.arm.vfp-sysregs">
196
+ <reg name="fpsid" bitsize="32" type="int" group="float"/>
197
+ <reg name="fpexc" bitsize="32" type="int" group="float"/>
198
+</feature>
199
diff --git a/gdb-xml/arm-vfp.xml b/gdb-xml/arm-vfp.xml
200
index XXXXXXX..XXXXXXX 100644
201
--- a/gdb-xml/arm-vfp.xml
202
+++ b/gdb-xml/arm-vfp.xml
203
@@ -XXX,XX +XXX,XX @@
204
<reg name="d14" bitsize="64" type="float"/>
205
<reg name="d15" bitsize="64" type="float"/>
206
207
- <reg name="fpsid" bitsize="32" type="int" group="float"/>
208
<reg name="fpscr" bitsize="32" type="int" group="float"/>
209
- <reg name="fpexc" bitsize="32" type="int" group="float"/>
210
</feature>
211
diff --git a/gdb-xml/arm-vfp3.xml b/gdb-xml/arm-vfp3.xml
212
index XXXXXXX..XXXXXXX 100644
213
--- a/gdb-xml/arm-vfp3.xml
214
+++ b/gdb-xml/arm-vfp3.xml
215
@@ -XXX,XX +XXX,XX @@
216
<reg name="d30" bitsize="64" type="float"/>
217
<reg name="d31" bitsize="64" type="float"/>
218
219
- <reg name="fpsid" bitsize="32" type="int" group="float"/>
220
<reg name="fpscr" bitsize="32" type="int" group="float"/>
221
- <reg name="fpexc" bitsize="32" type="int" group="float"/>
222
</feature>
223
--
143
--
224
2.20.1
144
2.25.1
225
226
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Connect the support for Versal Battery-Backed RAM (BBRAM)
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20220708151540.18136-27-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
target/arm/helper-sme.h | 2 ++
9
target/arm/sme.decode | 1 +
10
target/arm/sme_helper.c | 74 ++++++++++++++++++++++++++++++++++++++
11
target/arm/translate-sme.c | 1 +
12
4 files changed, 78 insertions(+)
4
13
5
The command argument:
14
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
6
-drive if=pflash,index=0,...
7
Can be used to optionally connect the bbram to a backend
8
storage, such that field-programmed values in one
9
invocation can be made available to next invocation.
10
11
The backend storage must be a seekable binary file, and
12
its size must be 36 bytes or larger. A file with all
13
binary 0's is a 'blank'.
14
15
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
16
Message-id: 20210917052400.1249094-6-tong.ho@xilinx.com
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
include/hw/arm/xlnx-versal.h | 5 +++++
21
hw/arm/xlnx-versal-virt.c | 36 ++++++++++++++++++++++++++++++++++++
22
hw/arm/xlnx-versal.c | 18 ++++++++++++++++++
23
hw/arm/Kconfig | 1 +
24
4 files changed, 60 insertions(+)
25
26
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
27
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
28
--- a/include/hw/arm/xlnx-versal.h
16
--- a/target/arm/helper-sme.h
29
+++ b/include/hw/arm/xlnx-versal.h
17
+++ b/target/arm/helper-sme.h
30
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
31
#include "qom/object.h"
19
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
32
#include "hw/usb/xlnx-usb-subsystem.h"
20
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
33
#include "hw/misc/xlnx-versal-xramc.h"
21
34
+#include "hw/nvram/xlnx-bbram.h"
22
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
35
23
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
36
#define TYPE_XLNX_VERSAL "xlnx-versal"
24
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
37
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
25
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
38
@@ -XXX,XX +XXX,XX @@ struct Versal {
26
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
39
} iou;
27
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
40
41
XlnxZynqMPRTC rtc;
42
+ XlnxBBRam bbram;
43
} pmc;
44
45
struct {
46
@@ -XXX,XX +XXX,XX @@ struct Versal {
47
#define VERSAL_GEM1_WAKE_IRQ_0 59
48
#define VERSAL_ADMA_IRQ_0 60
49
#define VERSAL_XRAM_IRQ_0 79
50
+#define VERSAL_BBRAM_APB_IRQ_0 121
51
#define VERSAL_RTC_APB_ERR_IRQ 121
52
#define VERSAL_SD0_IRQ_0 126
53
#define VERSAL_RTC_ALARM_IRQ 142
54
@@ -XXX,XX +XXX,XX @@ struct Versal {
55
56
#define MM_PMC_SD0 0xf1040000U
57
#define MM_PMC_SD0_SIZE 0x10000
58
+#define MM_PMC_BBRAM_CTRL 0xf11f0000
59
+#define MM_PMC_BBRAM_CTRL_SIZE 0x00050
60
#define MM_PMC_CRP 0xf1260000U
61
#define MM_PMC_CRP_SIZE 0x10000
62
#define MM_PMC_RTC 0xf12a0000
63
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
64
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/arm/xlnx-versal-virt.c
29
--- a/target/arm/sme.decode
66
+++ b/hw/arm/xlnx-versal-virt.c
30
+++ b/target/arm/sme.decode
67
@@ -XXX,XX +XXX,XX @@ static void fdt_add_rtc_node(VersalVirt *s)
31
@@ -XXX,XX +XXX,XX @@ FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
68
g_free(name);
32
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
33
34
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
35
+FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
36
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/sme_helper.c
39
+++ b/target/arm/sme_helper.c
40
@@ -XXX,XX +XXX,XX @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
41
return pair;
69
}
42
}
70
43
71
+static void fdt_add_bbram_node(VersalVirt *s)
44
+static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
45
+ float_status *s_std, float_status *s_odd)
72
+{
46
+{
73
+ const char compat[] = TYPE_XLNX_BBRAM;
47
+ float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std);
74
+ const char interrupt_names[] = "bbram-error";
48
+ float64 e1c = float16_to_float64(e1 >> 16, true, s_std);
75
+ char *name = g_strdup_printf("/bbram@%x", MM_PMC_BBRAM_CTRL);
49
+ float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std);
50
+ float64 e2c = float16_to_float64(e2 >> 16, true, s_std);
51
+ float64 t64;
52
+ float32 t32;
76
+
53
+
77
+ qemu_fdt_add_subnode(s->fdt, name);
54
+ /*
55
+ * The ARM pseudocode function FPDot performs both multiplies
56
+ * and the add with a single rounding operation. Emulate this
57
+ * by performing the first multiply in round-to-odd, then doing
58
+ * the second multiply as fused multiply-add, and rounding to
59
+ * float32 all in one step.
60
+ */
61
+ t64 = float64_mul(e1r, e2r, s_odd);
62
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
78
+
63
+
79
+ qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
64
+ /* This conversion is exact, because we've already rounded. */
80
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_BBRAM_APB_IRQ_0,
65
+ t32 = float64_to_float32(t64, s_std);
81
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
66
+
82
+ qemu_fdt_setprop(s->fdt, name, "interrupt-names",
67
+ /* The final accumulation step is not fused. */
83
+ interrupt_names, sizeof(interrupt_names));
68
+ return float32_add(sum, t32, s_std);
84
+ qemu_fdt_setprop_sized_cells(s->fdt, name, "reg",
85
+ 2, MM_PMC_BBRAM_CTRL,
86
+ 2, MM_PMC_BBRAM_CTRL_SIZE);
87
+ qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat));
88
+ g_free(name);
89
+}
69
+}
90
+
70
+
91
static void fdt_nop_memory_nodes(void *fdt, Error **errp)
71
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
92
{
72
+ void *vpm, void *vst, uint32_t desc)
93
Error *err = NULL;
94
@@ -XXX,XX +XXX,XX @@ static void create_virtio_regions(VersalVirt *s)
95
}
96
}
97
98
+static void bbram_attach_drive(XlnxBBRam *dev)
99
+{
73
+{
100
+ DriveInfo *dinfo;
74
+ intptr_t row, col, oprsz = simd_maxsz(desc);
101
+ BlockBackend *blk;
75
+ uint32_t neg = simd_data(desc) * 0x80008000u;
76
+ uint16_t *pn = vpn, *pm = vpm;
77
+ float_status fpst_odd, fpst_std;
102
+
78
+
103
+ dinfo = drive_get_by_index(IF_PFLASH, 0);
79
+ /*
104
+ blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL;
80
+ * Make a copy of float_status because this operation does not
105
+ if (blk) {
81
+ * update the cumulative fp exception status. It also produces
106
+ qdev_prop_set_drive(DEVICE(dev), "drive", blk);
82
+ * default nans. Make a second copy with round-to-odd -- see above.
83
+ */
84
+ fpst_std = *(float_status *)vst;
85
+ set_default_nan_mode(true, &fpst_std);
86
+ fpst_odd = fpst_std;
87
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
88
+
89
+ for (row = 0; row < oprsz; ) {
90
+ uint16_t prow = pn[H2(row >> 4)];
91
+ do {
92
+ void *vza_row = vza + tile_vslice_offset(row);
93
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row));
94
+
95
+ n = f16mop_adj_pair(n, prow, neg);
96
+
97
+ for (col = 0; col < oprsz; ) {
98
+ uint16_t pcol = pm[H2(col >> 4)];
99
+ do {
100
+ if (prow & pcol & 0b0101) {
101
+ uint32_t *a = vza_row + H1_4(col);
102
+ uint32_t m = *(uint32_t *)(vzm + H1_4(col));
103
+
104
+ m = f16mop_adj_pair(m, pcol, 0);
105
+ *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd);
106
+
107
+ col += 4;
108
+ pcol >>= 4;
109
+ }
110
+ } while (col & 15);
111
+ }
112
+ row += 4;
113
+ prow >>= 4;
114
+ } while (row & 15);
107
+ }
115
+ }
108
+}
116
+}
109
+
117
+
110
static void sd_plugin_card(SDHCIState *sd, DriveInfo *di)
118
void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
119
void *vpm, uint32_t desc)
111
{
120
{
112
BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL;
121
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
113
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
114
fdt_add_usb_xhci_nodes(s);
115
fdt_add_sd_nodes(s);
116
fdt_add_rtc_node(s);
117
+ fdt_add_bbram_node(s);
118
fdt_add_cpu_nodes(s, psci_conduit);
119
fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz);
120
fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz);
121
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
122
memory_region_add_subregion_overlap(get_system_memory(),
123
0, &s->soc.fpd.apu.mr, 0);
124
125
+ /* Attach bbram backend, if given */
126
+ bbram_attach_drive(&s->soc.pmc.bbram);
127
+
128
/* Plugin SD cards. */
129
for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) {
130
sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD));
131
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
132
index XXXXXXX..XXXXXXX 100644
122
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/arm/xlnx-versal.c
123
--- a/target/arm/translate-sme.c
134
+++ b/hw/arm/xlnx-versal.c
124
+++ b/target/arm/translate-sme.c
135
@@ -XXX,XX +XXX,XX @@ static void versal_create_xrams(Versal *s, qemu_irq *pic)
125
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
136
}
126
return true;
137
}
127
}
138
128
139
+static void versal_create_bbram(Versal *s, qemu_irq *pic)
129
+TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h)
140
+{
130
TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s)
141
+ SysBusDevice *sbd;
131
TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d)
142
+
132
143
+ object_initialize_child_with_props(OBJECT(s), "bbram", &s->pmc.bbram,
144
+ sizeof(s->pmc.bbram), TYPE_XLNX_BBRAM,
145
+ &error_fatal,
146
+ "crc-zpads", "0",
147
+ NULL);
148
+ sbd = SYS_BUS_DEVICE(&s->pmc.bbram);
149
+
150
+ sysbus_realize(sbd, &error_fatal);
151
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL,
152
+ sysbus_mmio_get_region(sbd, 0));
153
+ sysbus_connect_irq(sbd, 0, pic[VERSAL_BBRAM_APB_IRQ_0]);
154
+}
155
+
156
/* This takes the board allocated linear DDR memory and creates aliases
157
* for each split DDR range/aperture on the Versal address map.
158
*/
159
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
160
versal_create_sds(s, pic);
161
versal_create_rtc(s, pic);
162
versal_create_xrams(s, pic);
163
+ versal_create_bbram(s, pic);
164
versal_map_ddr(s);
165
versal_unimp(s);
166
167
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
168
index XXXXXXX..XXXXXXX 100644
169
--- a/hw/arm/Kconfig
170
+++ b/hw/arm/Kconfig
171
@@ -XXX,XX +XXX,XX @@ config XLNX_VERSAL
172
select XLNX_ZDMA
173
select XLNX_ZYNQMP
174
select OR_IRQ
175
+ select XLNX_BBRAM
176
177
config NPCM7XX
178
bool
179
--
133
--
180
2.20.1
134
2.25.1
181
182
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Connect the support for ZynqMP eFUSE one-time field-programmable
3
This is SMOPA, SUMOPA, USMOPA_s, UMOPA, for both Int8 and Int16.
4
bit array.
5
4
6
The command argument:
7
-drive if=pflash,index=3,...
8
Can be used to optionally connect the bit array to a
9
backend storage, such that field-programmed values
10
in one invocation can be made available to next
11
invocation.
12
13
The backend storage must be a seekable binary file, and
14
its size must be 768 bytes or larger. A file with all
15
binary 0's is a 'blank'.
16
17
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
18
Message-id: 20210917052400.1249094-9-tong.ho@xilinx.com
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-28-richard.henderson@linaro.org
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
9
---
22
include/hw/arm/xlnx-zynqmp.h | 3 +++
10
target/arm/helper-sme.h | 16 ++++++++
23
hw/arm/xlnx-zcu102.c | 15 +++++++++++++++
11
target/arm/sme.decode | 10 +++++
24
hw/arm/xlnx-zynqmp.c | 29 +++++++++++++++++++++++++++++
12
target/arm/sme_helper.c | 82 ++++++++++++++++++++++++++++++++++++++
25
hw/Kconfig | 1 +
13
target/arm/translate-sme.c | 10 +++++
26
4 files changed, 48 insertions(+)
14
4 files changed, 118 insertions(+)
27
15
28
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
16
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
29
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/arm/xlnx-zynqmp.h
18
--- a/target/arm/helper-sme.h
31
+++ b/include/hw/arm/xlnx-zynqmp.h
19
+++ b/target/arm/helper-sme.h
32
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
33
#include "net/can_emu.h"
21
void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
34
#include "hw/dma/xlnx_csu_dma.h"
22
DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG,
35
#include "hw/nvram/xlnx-bbram.h"
23
void, ptr, ptr, ptr, ptr, ptr, i32)
36
+#include "hw/nvram/xlnx-zynqmp-efuse.h"
24
+DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
37
25
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
#define TYPE_XLNX_ZYNQMP "xlnx-zynqmp"
26
+DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
39
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
27
+ void, ptr, ptr, ptr, ptr, ptr, i32)
40
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
28
+DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG,
41
MemoryRegion *ddr_ram;
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
42
MemoryRegion ddr_ram_low, ddr_ram_high;
30
+DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG,
43
XlnxBBRam bbram;
31
+ void, ptr, ptr, ptr, ptr, ptr, i32)
44
+ XlnxEFuse efuse;
32
+DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG,
45
+ XlnxZynqMPEFuse efuse_ctrl;
33
+ void, ptr, ptr, ptr, ptr, ptr, i32)
46
34
+DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG,
47
MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS];
35
+ void, ptr, ptr, ptr, ptr, ptr, i32)
48
36
+DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
49
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
37
+ void, ptr, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, ptr, i32)
40
diff --git a/target/arm/sme.decode b/target/arm/sme.decode
50
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/arm/xlnx-zcu102.c
42
--- a/target/arm/sme.decode
52
+++ b/hw/arm/xlnx-zcu102.c
43
+++ b/target/arm/sme.decode
53
@@ -XXX,XX +XXX,XX @@ static void bbram_attach_drive(XlnxBBRam *dev)
44
@@ -XXX,XX +XXX,XX @@ FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
45
46
BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
47
FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
48
+
49
+SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
50
+SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
51
+USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32
52
+UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32
53
+
54
+SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
55
+SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
56
+USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
57
+UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
58
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/sme_helper.c
61
+++ b/target/arm/sme_helper.c
62
@@ -XXX,XX +XXX,XX @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
63
} while (row & 15);
54
}
64
}
55
}
65
}
56
66
+
57
+static void efuse_attach_drive(XlnxEFuse *dev)
67
+typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool);
68
+
69
+static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm,
70
+ uint8_t *pn, uint8_t *pm,
71
+ uint32_t desc, IMOPFn *fn)
58
+{
72
+{
59
+ DriveInfo *dinfo;
73
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
60
+ BlockBackend *blk;
74
+ bool neg = simd_data(desc);
61
+
75
+
62
+ dinfo = drive_get_by_index(IF_PFLASH, 3);
76
+ for (row = 0; row < oprsz; ++row) {
63
+ blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL;
77
+ uint8_t pa = pn[H1(row)];
64
+ if (blk) {
78
+ uint64_t *za_row = &za[tile_vslice_index(row)];
65
+ qdev_prop_set_drive(DEVICE(dev), "drive", blk);
79
+ uint64_t n = zn[row];
80
+
81
+ for (col = 0; col < oprsz; ++col) {
82
+ uint8_t pb = pm[H1(col)];
83
+ uint64_t *a = &za_row[col];
84
+
85
+ *a = fn(n, zm[col], *a, pa & pb, neg);
86
+ }
66
+ }
87
+ }
67
+}
88
+}
68
+
89
+
69
static void xlnx_zcu102_init(MachineState *machine)
90
+#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
70
{
91
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
71
XlnxZCU102 *s = ZCU102_MACHINE(machine);
92
+{ \
72
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_init(MachineState *machine)
93
+ uint32_t sum0 = 0, sum1 = 0; \
73
/* Attach bbram backend, if given */
94
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
74
bbram_attach_drive(&s->soc.bbram);
95
+ n &= expand_pred_b(p); \
75
96
+ sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
76
+ /* Attach efuse backend, if given */
97
+ sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \
77
+ efuse_attach_drive(&s->soc.efuse);
98
+ sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
78
+
99
+ sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \
79
/* Create and plug in the SD cards */
100
+ sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
80
for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) {
101
+ sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \
81
BusState *bus;
102
+ sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
82
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
103
+ sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \
83
index XXXXXXX..XXXXXXX 100644
104
+ if (neg) { \
84
--- a/hw/arm/xlnx-zynqmp.c
105
+ sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \
85
+++ b/hw/arm/xlnx-zynqmp.c
106
+ } else { \
86
@@ -XXX,XX +XXX,XX @@
107
+ sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \
87
#define BBRAM_ADDR 0xffcd0000
108
+ } \
88
#define BBRAM_IRQ 11
109
+ return ((uint64_t)sum1 << 32) | sum0; \
89
90
+#define EFUSE_ADDR 0xffcc0000
91
+#define EFUSE_IRQ 87
92
+
93
#define SDHCI_CAPABILITIES 0x280737ec6481 /* Datasheet: UG1085 (v1.7) */
94
95
static const uint64_t gem_addr[XLNX_ZYNQMP_NUM_GEMS] = {
96
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_create_bbram(XlnxZynqMPState *s, qemu_irq *gic)
97
sysbus_connect_irq(sbd, 0, gic[BBRAM_IRQ]);
98
}
99
100
+static void xlnx_zynqmp_create_efuse(XlnxZynqMPState *s, qemu_irq *gic)
101
+{
102
+ Object *bits = OBJECT(&s->efuse);
103
+ Object *ctrl = OBJECT(&s->efuse_ctrl);
104
+ SysBusDevice *sbd;
105
+
106
+ object_initialize_child(OBJECT(s), "efuse-ctrl", &s->efuse_ctrl,
107
+ TYPE_XLNX_ZYNQMP_EFUSE);
108
+
109
+ object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits,
110
+ sizeof(s->efuse),
111
+ TYPE_XLNX_EFUSE, &error_abort,
112
+ "efuse-nr", "3",
113
+ "efuse-size", "2048",
114
+ NULL);
115
+
116
+ qdev_realize(DEVICE(bits), NULL, &error_abort);
117
+ object_property_set_link(ctrl, "efuse", bits, &error_abort);
118
+
119
+ sbd = SYS_BUS_DEVICE(ctrl);
120
+ sysbus_realize(sbd, &error_abort);
121
+ sysbus_mmio_map(sbd, 0, EFUSE_ADDR);
122
+ sysbus_connect_irq(sbd, 0, gic[EFUSE_IRQ]);
123
+}
110
+}
124
+
111
+
125
static void xlnx_zynqmp_create_unimp_mmio(XlnxZynqMPState *s)
112
+#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
126
{
113
+static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
127
static const struct UnimpInfo {
114
+{ \
128
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
115
+ uint64_t sum = 0; \
129
sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, gic_spi[RTC_IRQ]);
116
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
130
117
+ n &= expand_pred_h(p); \
131
xlnx_zynqmp_create_bbram(s, gic_spi);
118
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
132
+ xlnx_zynqmp_create_efuse(s, gic_spi);
119
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
133
xlnx_zynqmp_create_unimp_mmio(s);
120
+ sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \
134
121
+ sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \
135
for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) {
122
+ return neg ? a - sum : a + sum; \
136
diff --git a/hw/Kconfig b/hw/Kconfig
123
+}
124
+
125
+DEF_IMOP_32(smopa_s, int8_t, int8_t)
126
+DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
127
+DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
128
+DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
129
+
130
+DEF_IMOP_64(smopa_d, int16_t, int16_t)
131
+DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
132
+DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
133
+DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
134
+
135
+#define DEF_IMOPH(NAME) \
136
+ void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \
137
+ void *vpm, uint32_t desc) \
138
+ { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); }
139
+
140
+DEF_IMOPH(smopa_s)
141
+DEF_IMOPH(umopa_s)
142
+DEF_IMOPH(sumopa_s)
143
+DEF_IMOPH(usmopa_s)
144
+DEF_IMOPH(smopa_d)
145
+DEF_IMOPH(umopa_d)
146
+DEF_IMOPH(sumopa_d)
147
+DEF_IMOPH(usmopa_d)
148
diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c
137
index XXXXXXX..XXXXXXX 100644
149
index XXXXXXX..XXXXXXX 100644
138
--- a/hw/Kconfig
150
--- a/target/arm/translate-sme.c
139
+++ b/hw/Kconfig
151
+++ b/target/arm/translate-sme.c
140
@@ -XXX,XX +XXX,XX @@ config XLNX_ZYNQMP
152
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_f
141
select CAN_BUS
153
142
select PTIMER
154
/* TODO: FEAT_EBF16 */
143
select XLNX_BBRAM
155
TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa)
144
+ select XLNX_EFUSE_ZYNQMP
156
+
157
+TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
158
+TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
159
+TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s)
160
+TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s)
161
+
162
+TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d)
163
+TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
164
+TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
165
+TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
145
--
166
--
146
2.20.1
167
2.25.1
147
148
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This device is present in Versal and ZynqMP product
3
This is an SVE instruction that operates using the SVE vector
4
families to store a 256-bit encryption key.
4
length but that it is present only if SME is implemented.
5
5
6
Co-authored-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Co-authored-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
8
9
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
10
Signed-off-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
11
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
12
Message-id: 20210917052400.1249094-5-tong.ho@xilinx.com
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-29-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
10
---
16
include/hw/nvram/xlnx-bbram.h | 54 ++++
11
target/arm/sve.decode | 20 +++++++++++++
17
hw/nvram/xlnx-bbram.c | 545 ++++++++++++++++++++++++++++++++++
12
target/arm/translate-sve.c | 57 ++++++++++++++++++++++++++++++++++++++
18
hw/nvram/Kconfig | 4 +
13
2 files changed, 77 insertions(+)
19
hw/nvram/meson.build | 1 +
20
4 files changed, 604 insertions(+)
21
create mode 100644 include/hw/nvram/xlnx-bbram.h
22
create mode 100644 hw/nvram/xlnx-bbram.c
23
14
24
diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h
15
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
25
new file mode 100644
16
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX
17
--- a/target/arm/sve.decode
27
--- /dev/null
18
+++ b/target/arm/sve.decode
28
+++ b/include/hw/nvram/xlnx-bbram.h
19
@@ -XXX,XX +XXX,XX @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
29
@@ -XXX,XX +XXX,XX @@
20
30
+/*
21
### SVE2 floating-point bfloat16 dot-product (indexed)
31
+ * QEMU model of the Xilinx BBRAM Battery Backed RAM
22
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
32
+ *
33
+ * Copyright (c) 2015-2021 Xilinx Inc.
34
+ *
35
+ * Written by Edgar E. Iglesias <edgari@xilinx.com>
36
+ *
37
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
38
+ * of this software and associated documentation files (the "Software"), to deal
39
+ * in the Software without restriction, including without limitation the rights
40
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
41
+ * copies of the Software, and to permit persons to whom the Software is
42
+ * furnished to do so, subject to the following conditions:
43
+ *
44
+ * The above copyright notice and this permission notice shall be included in
45
+ * all copies or substantial portions of the Software.
46
+ *
47
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
48
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
49
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
50
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
51
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
52
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
53
+ * THE SOFTWARE.
54
+ */
55
+#ifndef XLNX_BBRAM_H
56
+#define XLNX_BBRAM_H
57
+
23
+
58
+#include "sysemu/block-backend.h"
24
+### SVE broadcast predicate element
59
+#include "hw/qdev-core.h"
60
+#include "hw/irq.h"
61
+#include "hw/sysbus.h"
62
+#include "hw/register.h"
63
+
25
+
64
+#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1)
26
+&psel esz pd pn pm rv imm
27
+%psel_rv 16:2 !function=plus_12
28
+%psel_imm_b 22:2 19:2
29
+%psel_imm_h 22:2 20:1
30
+%psel_imm_s 22:2
31
+%psel_imm_d 23:1
32
+@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \
33
+ &psel rv=%psel_rv
65
+
34
+
66
+#define TYPE_XLNX_BBRAM "xlnx,bbram-ctrl"
35
+PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \
67
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM);
36
+ @psel esz=0 imm=%psel_imm_b
37
+PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \
38
+ @psel esz=1 imm=%psel_imm_h
39
+PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
40
+ @psel esz=2 imm=%psel_imm_s
41
+PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
42
+ @psel esz=3 imm=%psel_imm_d
43
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/translate-sve.c
46
+++ b/target/arm/translate-sve.c
47
@@ -XXX,XX +XXX,XX @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
48
49
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
50
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
68
+
51
+
69
+struct XlnxBBRam {
52
+static bool trans_PSEL(DisasContext *s, arg_psel *a)
70
+ SysBusDevice parent_obj;
53
+{
71
+ qemu_irq irq_bbram;
54
+ int vl = vec_full_reg_size(s);
55
+ int pl = pred_gvec_reg_size(s);
56
+ int elements = vl >> a->esz;
57
+ TCGv_i64 tmp, didx, dbit;
58
+ TCGv_ptr ptr;
72
+
59
+
73
+ BlockBackend *blk;
60
+ if (!dc_isar_feature(aa64_sme, s)) {
74
+
61
+ return false;
75
+ uint32_t crc_zpads;
62
+ }
76
+ bool bbram8_wo;
63
+ if (!sve_access_check(s)) {
77
+ bool blk_ro;
64
+ return true;
78
+
79
+ uint32_t regs[RMAX_XLNX_BBRAM];
80
+ RegisterInfo regs_info[RMAX_XLNX_BBRAM];
81
+};
82
+
83
+#endif
84
diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c
85
new file mode 100644
86
index XXXXXXX..XXXXXXX
87
--- /dev/null
88
+++ b/hw/nvram/xlnx-bbram.c
89
@@ -XXX,XX +XXX,XX @@
90
+/*
91
+ * QEMU model of the Xilinx BBRAM Battery Backed RAM
92
+ *
93
+ * Copyright (c) 2014-2021 Xilinx Inc.
94
+ *
95
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
96
+ * of this software and associated documentation files (the "Software"), to deal
97
+ * in the Software without restriction, including without limitation the rights
98
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
99
+ * copies of the Software, and to permit persons to whom the Software is
100
+ * furnished to do so, subject to the following conditions:
101
+ *
102
+ * The above copyright notice and this permission notice shall be included in
103
+ * all copies or substantial portions of the Software.
104
+ *
105
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
106
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
107
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
108
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
109
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
110
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
111
+ * THE SOFTWARE.
112
+ */
113
+
114
+#include "qemu/osdep.h"
115
+#include "hw/nvram/xlnx-bbram.h"
116
+
117
+#include "qemu/error-report.h"
118
+#include "qemu/log.h"
119
+#include "qapi/error.h"
120
+#include "sysemu/blockdev.h"
121
+#include "migration/vmstate.h"
122
+#include "hw/qdev-properties.h"
123
+#include "hw/qdev-properties-system.h"
124
+#include "hw/nvram/xlnx-efuse.h"
125
+
126
+#ifndef XLNX_BBRAM_ERR_DEBUG
127
+#define XLNX_BBRAM_ERR_DEBUG 0
128
+#endif
129
+
130
+REG32(BBRAM_STATUS, 0x0)
131
+ FIELD(BBRAM_STATUS, AES_CRC_PASS, 9, 1)
132
+ FIELD(BBRAM_STATUS, AES_CRC_DONE, 8, 1)
133
+ FIELD(BBRAM_STATUS, BBRAM_ZEROIZED, 4, 1)
134
+ FIELD(BBRAM_STATUS, PGM_MODE, 0, 1)
135
+REG32(BBRAM_CTRL, 0x4)
136
+ FIELD(BBRAM_CTRL, ZEROIZE, 0, 1)
137
+REG32(PGM_MODE, 0x8)
138
+REG32(BBRAM_AES_CRC, 0xc)
139
+REG32(BBRAM_0, 0x10)
140
+REG32(BBRAM_1, 0x14)
141
+REG32(BBRAM_2, 0x18)
142
+REG32(BBRAM_3, 0x1c)
143
+REG32(BBRAM_4, 0x20)
144
+REG32(BBRAM_5, 0x24)
145
+REG32(BBRAM_6, 0x28)
146
+REG32(BBRAM_7, 0x2c)
147
+REG32(BBRAM_8, 0x30)
148
+REG32(BBRAM_SLVERR, 0x34)
149
+ FIELD(BBRAM_SLVERR, ENABLE, 0, 1)
150
+REG32(BBRAM_ISR, 0x38)
151
+ FIELD(BBRAM_ISR, APB_SLVERR, 0, 1)
152
+REG32(BBRAM_IMR, 0x3c)
153
+ FIELD(BBRAM_IMR, APB_SLVERR, 0, 1)
154
+REG32(BBRAM_IER, 0x40)
155
+ FIELD(BBRAM_IER, APB_SLVERR, 0, 1)
156
+REG32(BBRAM_IDR, 0x44)
157
+ FIELD(BBRAM_IDR, APB_SLVERR, 0, 1)
158
+REG32(BBRAM_MSW_LOCK, 0x4c)
159
+ FIELD(BBRAM_MSW_LOCK, VAL, 0, 1)
160
+
161
+#define R_MAX (R_BBRAM_MSW_LOCK + 1)
162
+
163
+#define RAM_MAX (A_BBRAM_8 + 4 - A_BBRAM_0)
164
+
165
+#define BBRAM_PGM_MAGIC 0x757bdf0d
166
+
167
+QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxBBRam *)0)->regs));
168
+
169
+static bool bbram_msw_locked(XlnxBBRam *s)
170
+{
171
+ return ARRAY_FIELD_EX32(s->regs, BBRAM_MSW_LOCK, VAL) != 0;
172
+}
173
+
174
+static bool bbram_pgm_enabled(XlnxBBRam *s)
175
+{
176
+ return ARRAY_FIELD_EX32(s->regs, BBRAM_STATUS, PGM_MODE) != 0;
177
+}
178
+
179
+static void bbram_bdrv_error(XlnxBBRam *s, int rc, gchar *detail)
180
+{
181
+ Error *errp;
182
+
183
+ error_setg_errno(&errp, -rc, "%s: BBRAM backstore %s failed.",
184
+ blk_name(s->blk), detail);
185
+ error_report("%s", error_get_pretty(errp));
186
+ error_free(errp);
187
+
188
+ g_free(detail);
189
+}
190
+
191
+static void bbram_bdrv_read(XlnxBBRam *s, Error **errp)
192
+{
193
+ uint32_t *ram = &s->regs[R_BBRAM_0];
194
+ int nr = RAM_MAX;
195
+
196
+ if (!s->blk) {
197
+ return;
198
+ }
65
+ }
199
+
66
+
200
+ s->blk_ro = !blk_supports_write_perm(s->blk);
67
+ tmp = tcg_temp_new_i64();
201
+ if (!s->blk_ro) {
68
+ dbit = tcg_temp_new_i64();
202
+ int rc;
69
+ didx = tcg_temp_new_i64();
70
+ ptr = tcg_temp_new_ptr();
203
+
71
+
204
+ rc = blk_set_perm(s->blk,
72
+ /* Compute the predicate element. */
205
+ (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE),
73
+ tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
206
+ BLK_PERM_ALL, NULL);
74
+ if (is_power_of_2(elements)) {
207
+ if (rc) {
75
+ tcg_gen_andi_i64(tmp, tmp, elements - 1);
208
+ s->blk_ro = true;
76
+ } else {
209
+ }
77
+ tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
210
+ }
211
+ if (s->blk_ro) {
212
+ warn_report("%s: Skip saving updates to read-only BBRAM backstore.",
213
+ blk_name(s->blk));
214
+ }
78
+ }
215
+
79
+
216
+ if (blk_pread(s->blk, 0, ram, nr) < 0) {
80
+ /* Extract the predicate byte and bit indices. */
217
+ error_setg(errp,
81
+ tcg_gen_shli_i64(tmp, tmp, a->esz);
218
+ "%s: Failed to read %u bytes from BBRAM backstore.",
82
+ tcg_gen_andi_i64(dbit, tmp, 7);
219
+ blk_name(s->blk), nr);
83
+ tcg_gen_shri_i64(didx, tmp, 3);
220
+ return;
84
+ if (HOST_BIG_ENDIAN) {
85
+ tcg_gen_xori_i64(didx, didx, 7);
221
+ }
86
+ }
222
+
87
+
223
+ /* Convert from little-endian backstore for each 32-bit word */
88
+ /* Load the predicate word. */
224
+ nr /= 4;
89
+ tcg_gen_trunc_i64_ptr(ptr, didx);
225
+ while (nr--) {
90
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
226
+ ram[nr] = le32_to_cpu(ram[nr]);
91
+ tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
227
+ }
92
+
93
+ /* Extract the predicate bit and replicate to MO_64. */
94
+ tcg_gen_shr_i64(tmp, tmp, dbit);
95
+ tcg_gen_andi_i64(tmp, tmp, 1);
96
+ tcg_gen_neg_i64(tmp, tmp);
97
+
98
+ /* Apply to either copy the source, or write zeros. */
99
+ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
100
+ pred_full_reg_offset(s, a->pn), tmp, pl, pl);
101
+
102
+ tcg_temp_free_i64(tmp);
103
+ tcg_temp_free_i64(dbit);
104
+ tcg_temp_free_i64(didx);
105
+ tcg_temp_free_ptr(ptr);
106
+ return true;
228
+}
107
+}
229
+
230
+static void bbram_bdrv_sync(XlnxBBRam *s, uint64_t hwaddr)
231
+{
232
+ uint32_t le32;
233
+ unsigned offset;
234
+ int rc;
235
+
236
+ assert(A_BBRAM_0 <= hwaddr && hwaddr <= A_BBRAM_8);
237
+
238
+ /* Backstore is always in little-endian */
239
+ le32 = cpu_to_le32(s->regs[hwaddr / 4]);
240
+
241
+ /* Update zeroized flag */
242
+ if (le32 && (hwaddr != A_BBRAM_8 || s->bbram8_wo)) {
243
+ ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 0);
244
+ }
245
+
246
+ if (!s->blk || s->blk_ro) {
247
+ return;
248
+ }
249
+
250
+ offset = hwaddr - A_BBRAM_0;
251
+ rc = blk_pwrite(s->blk, offset, &le32, 4, 0);
252
+ if (rc < 0) {
253
+ bbram_bdrv_error(s, rc, g_strdup_printf("write to offset %u", offset));
254
+ }
255
+}
256
+
257
+static void bbram_bdrv_zero(XlnxBBRam *s)
258
+{
259
+ int rc;
260
+
261
+ ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 1);
262
+
263
+ if (!s->blk || s->blk_ro) {
264
+ return;
265
+ }
266
+
267
+ rc = blk_make_zero(s->blk, 0);
268
+ if (rc < 0) {
269
+ bbram_bdrv_error(s, rc, g_strdup("zeroizing"));
270
+ }
271
+
272
+ /* Restore bbram8 if it is non-zero */
273
+ if (s->regs[R_BBRAM_8]) {
274
+ bbram_bdrv_sync(s, A_BBRAM_8);
275
+ }
276
+}
277
+
278
+static void bbram_zeroize(XlnxBBRam *s)
279
+{
280
+ int nr = RAM_MAX - (s->bbram8_wo ? 0 : 4); /* only wo bbram8 is cleared */
281
+
282
+ memset(&s->regs[R_BBRAM_0], 0, nr);
283
+ bbram_bdrv_zero(s);
284
+}
285
+
286
+static void bbram_update_irq(XlnxBBRam *s)
287
+{
288
+ bool pending = s->regs[R_BBRAM_ISR] & ~s->regs[R_BBRAM_IMR];
289
+
290
+ qemu_set_irq(s->irq_bbram, pending);
291
+}
292
+
293
+static void bbram_ctrl_postw(RegisterInfo *reg, uint64_t val64)
294
+{
295
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
296
+ uint32_t val = val64;
297
+
298
+ if (val & R_BBRAM_CTRL_ZEROIZE_MASK) {
299
+ bbram_zeroize(s);
300
+ /* The bit is self clearing */
301
+ s->regs[R_BBRAM_CTRL] &= ~R_BBRAM_CTRL_ZEROIZE_MASK;
302
+ }
303
+}
304
+
305
+static void bbram_pgm_mode_postw(RegisterInfo *reg, uint64_t val64)
306
+{
307
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
308
+ uint32_t val = val64;
309
+
310
+ if (val == BBRAM_PGM_MAGIC) {
311
+ bbram_zeroize(s);
312
+
313
+ /* The status bit is cleared only by POR */
314
+ ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, PGM_MODE, 1);
315
+ }
316
+}
317
+
318
+static void bbram_aes_crc_postw(RegisterInfo *reg, uint64_t val64)
319
+{
320
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
321
+ uint32_t calc_crc;
322
+
323
+ if (!bbram_pgm_enabled(s)) {
324
+ /* We are not in programming mode, don't do anything */
325
+ return;
326
+ }
327
+
328
+ /* Perform the AES integrity check */
329
+ s->regs[R_BBRAM_STATUS] |= R_BBRAM_STATUS_AES_CRC_DONE_MASK;
330
+
331
+ /*
332
+ * Set check status.
333
+ *
334
+ * ZynqMP BBRAM check has a zero-u32 prepended; see:
335
+ * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_bbramps_zynqmp.c#L311
336
+ */
337
+ calc_crc = xlnx_efuse_calc_crc(&s->regs[R_BBRAM_0],
338
+ (R_BBRAM_8 - R_BBRAM_0), s->crc_zpads);
339
+
340
+ ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, AES_CRC_PASS,
341
+ (s->regs[R_BBRAM_AES_CRC] == calc_crc));
342
+}
343
+
344
+static uint64_t bbram_key_prew(RegisterInfo *reg, uint64_t val64)
345
+{
346
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
347
+ uint32_t original_data = *(uint32_t *) reg->data;
348
+
349
+ if (bbram_pgm_enabled(s)) {
350
+ return val64;
351
+ } else {
352
+ /* We are not in programming mode, don't do anything */
353
+ qemu_log_mask(LOG_GUEST_ERROR,
354
+ "Not in programming mode, dropping the write\n");
355
+ return original_data;
356
+ }
357
+}
358
+
359
+static void bbram_key_postw(RegisterInfo *reg, uint64_t val64)
360
+{
361
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
362
+
363
+ bbram_bdrv_sync(s, reg->access->addr);
364
+}
365
+
366
+static uint64_t bbram_wo_postr(RegisterInfo *reg, uint64_t val)
367
+{
368
+ return 0;
369
+}
370
+
371
+static uint64_t bbram_r8_postr(RegisterInfo *reg, uint64_t val)
372
+{
373
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
374
+
375
+ return s->bbram8_wo ? bbram_wo_postr(reg, val) : val;
376
+}
377
+
378
+static bool bbram_r8_readonly(XlnxBBRam *s)
379
+{
380
+ return !bbram_pgm_enabled(s) || bbram_msw_locked(s);
381
+}
382
+
383
+static uint64_t bbram_r8_prew(RegisterInfo *reg, uint64_t val64)
384
+{
385
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
386
+
387
+ if (bbram_r8_readonly(s)) {
388
+ val64 = *(uint32_t *)reg->data;
389
+ }
390
+
391
+ return val64;
392
+}
393
+
394
+static void bbram_r8_postw(RegisterInfo *reg, uint64_t val64)
395
+{
396
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
397
+
398
+ if (!bbram_r8_readonly(s)) {
399
+ bbram_bdrv_sync(s, A_BBRAM_8);
400
+ }
401
+}
402
+
403
+static uint64_t bbram_msw_lock_prew(RegisterInfo *reg, uint64_t val64)
404
+{
405
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
406
+
407
+ /* Never lock if bbram8 is wo; and, only POR can clear the lock */
408
+ if (s->bbram8_wo) {
409
+ val64 = 0;
410
+ } else {
411
+ val64 |= s->regs[R_BBRAM_MSW_LOCK];
412
+ }
413
+
414
+ return val64;
415
+}
416
+
417
+static void bbram_isr_postw(RegisterInfo *reg, uint64_t val64)
418
+{
419
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
420
+
421
+ bbram_update_irq(s);
422
+}
423
+
424
+static uint64_t bbram_ier_prew(RegisterInfo *reg, uint64_t val64)
425
+{
426
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
427
+ uint32_t val = val64;
428
+
429
+ s->regs[R_BBRAM_IMR] &= ~val;
430
+ bbram_update_irq(s);
431
+ return 0;
432
+}
433
+
434
+static uint64_t bbram_idr_prew(RegisterInfo *reg, uint64_t val64)
435
+{
436
+ XlnxBBRam *s = XLNX_BBRAM(reg->opaque);
437
+ uint32_t val = val64;
438
+
439
+ s->regs[R_BBRAM_IMR] |= val;
440
+ bbram_update_irq(s);
441
+ return 0;
442
+}
443
+
444
+static RegisterAccessInfo bbram_ctrl_regs_info[] = {
445
+ { .name = "BBRAM_STATUS", .addr = A_BBRAM_STATUS,
446
+ .rsvd = 0xee,
447
+ .ro = 0x3ff,
448
+ },{ .name = "BBRAM_CTRL", .addr = A_BBRAM_CTRL,
449
+ .post_write = bbram_ctrl_postw,
450
+ },{ .name = "PGM_MODE", .addr = A_PGM_MODE,
451
+ .post_write = bbram_pgm_mode_postw,
452
+ },{ .name = "BBRAM_AES_CRC", .addr = A_BBRAM_AES_CRC,
453
+ .post_write = bbram_aes_crc_postw,
454
+ .post_read = bbram_wo_postr,
455
+ },{ .name = "BBRAM_0", .addr = A_BBRAM_0,
456
+ .pre_write = bbram_key_prew,
457
+ .post_write = bbram_key_postw,
458
+ .post_read = bbram_wo_postr,
459
+ },{ .name = "BBRAM_1", .addr = A_BBRAM_1,
460
+ .pre_write = bbram_key_prew,
461
+ .post_write = bbram_key_postw,
462
+ .post_read = bbram_wo_postr,
463
+ },{ .name = "BBRAM_2", .addr = A_BBRAM_2,
464
+ .pre_write = bbram_key_prew,
465
+ .post_write = bbram_key_postw,
466
+ .post_read = bbram_wo_postr,
467
+ },{ .name = "BBRAM_3", .addr = A_BBRAM_3,
468
+ .pre_write = bbram_key_prew,
469
+ .post_write = bbram_key_postw,
470
+ .post_read = bbram_wo_postr,
471
+ },{ .name = "BBRAM_4", .addr = A_BBRAM_4,
472
+ .pre_write = bbram_key_prew,
473
+ .post_write = bbram_key_postw,
474
+ .post_read = bbram_wo_postr,
475
+ },{ .name = "BBRAM_5", .addr = A_BBRAM_5,
476
+ .pre_write = bbram_key_prew,
477
+ .post_write = bbram_key_postw,
478
+ .post_read = bbram_wo_postr,
479
+ },{ .name = "BBRAM_6", .addr = A_BBRAM_6,
480
+ .pre_write = bbram_key_prew,
481
+ .post_write = bbram_key_postw,
482
+ .post_read = bbram_wo_postr,
483
+ },{ .name = "BBRAM_7", .addr = A_BBRAM_7,
484
+ .pre_write = bbram_key_prew,
485
+ .post_write = bbram_key_postw,
486
+ .post_read = bbram_wo_postr,
487
+ },{ .name = "BBRAM_8", .addr = A_BBRAM_8,
488
+ .pre_write = bbram_r8_prew,
489
+ .post_write = bbram_r8_postw,
490
+ .post_read = bbram_r8_postr,
491
+ },{ .name = "BBRAM_SLVERR", .addr = A_BBRAM_SLVERR,
492
+ .rsvd = ~1,
493
+ },{ .name = "BBRAM_ISR", .addr = A_BBRAM_ISR,
494
+ .w1c = 0x1,
495
+ .post_write = bbram_isr_postw,
496
+ },{ .name = "BBRAM_IMR", .addr = A_BBRAM_IMR,
497
+ .ro = 0x1,
498
+ },{ .name = "BBRAM_IER", .addr = A_BBRAM_IER,
499
+ .pre_write = bbram_ier_prew,
500
+ },{ .name = "BBRAM_IDR", .addr = A_BBRAM_IDR,
501
+ .pre_write = bbram_idr_prew,
502
+ },{ .name = "BBRAM_MSW_LOCK", .addr = A_BBRAM_MSW_LOCK,
503
+ .pre_write = bbram_msw_lock_prew,
504
+ .ro = ~R_BBRAM_MSW_LOCK_VAL_MASK,
505
+ }
506
+};
507
+
508
+static void bbram_ctrl_reset(DeviceState *dev)
509
+{
510
+ XlnxBBRam *s = XLNX_BBRAM(dev);
511
+ unsigned int i;
512
+
513
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
514
+ if (i < R_BBRAM_0 || i > R_BBRAM_8) {
515
+ register_reset(&s->regs_info[i]);
516
+ }
517
+ }
518
+
519
+ bbram_update_irq(s);
520
+}
521
+
522
+static const MemoryRegionOps bbram_ctrl_ops = {
523
+ .read = register_read_memory,
524
+ .write = register_write_memory,
525
+ .endianness = DEVICE_LITTLE_ENDIAN,
526
+ .valid = {
527
+ .min_access_size = 4,
528
+ .max_access_size = 4,
529
+ },
530
+};
531
+
532
+static void bbram_ctrl_realize(DeviceState *dev, Error **errp)
533
+{
534
+ XlnxBBRam *s = XLNX_BBRAM(dev);
535
+
536
+ if (s->crc_zpads) {
537
+ s->bbram8_wo = true;
538
+ }
539
+
540
+ bbram_bdrv_read(s, errp);
541
+}
542
+
543
+static void bbram_ctrl_init(Object *obj)
544
+{
545
+ XlnxBBRam *s = XLNX_BBRAM(obj);
546
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
547
+ RegisterInfoArray *reg_array;
548
+
549
+ reg_array =
550
+ register_init_block32(DEVICE(obj), bbram_ctrl_regs_info,
551
+ ARRAY_SIZE(bbram_ctrl_regs_info),
552
+ s->regs_info, s->regs,
553
+ &bbram_ctrl_ops,
554
+ XLNX_BBRAM_ERR_DEBUG,
555
+ R_MAX * 4);
556
+
557
+ sysbus_init_mmio(sbd, &reg_array->mem);
558
+ sysbus_init_irq(sbd, &s->irq_bbram);
559
+}
560
+
561
+static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name,
562
+ void *opaque, Error **errp)
563
+{
564
+ DeviceState *dev = DEVICE(obj);
565
+
566
+ qdev_prop_drive.set(obj, v, name, opaque, errp);
567
+
568
+ /* Fill initial data if backend is attached after realized */
569
+ if (dev->realized) {
570
+ bbram_bdrv_read(XLNX_BBRAM(obj), errp);
571
+ }
572
+}
573
+
574
+static void bbram_prop_get_drive(Object *obj, Visitor *v, const char *name,
575
+ void *opaque, Error **errp)
576
+{
577
+ qdev_prop_drive.get(obj, v, name, opaque, errp);
578
+}
579
+
580
+static void bbram_prop_release_drive(Object *obj, const char *name,
581
+ void *opaque)
582
+{
583
+ qdev_prop_drive.release(obj, name, opaque);
584
+}
585
+
586
+static const PropertyInfo bbram_prop_drive = {
587
+ .name = "str",
588
+ .description = "Node name or ID of a block device to use as BBRAM backend",
589
+ .realized_set_allowed = true,
590
+ .get = bbram_prop_get_drive,
591
+ .set = bbram_prop_set_drive,
592
+ .release = bbram_prop_release_drive,
593
+};
594
+
595
+static const VMStateDescription vmstate_bbram_ctrl = {
596
+ .name = TYPE_XLNX_BBRAM,
597
+ .version_id = 1,
598
+ .minimum_version_id = 1,
599
+ .fields = (VMStateField[]) {
600
+ VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX),
601
+ VMSTATE_END_OF_LIST(),
602
+ }
603
+};
604
+
605
+static Property bbram_ctrl_props[] = {
606
+ DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *),
607
+ DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1),
608
+ DEFINE_PROP_END_OF_LIST(),
609
+};
610
+
611
+static void bbram_ctrl_class_init(ObjectClass *klass, void *data)
612
+{
613
+ DeviceClass *dc = DEVICE_CLASS(klass);
614
+
615
+ dc->reset = bbram_ctrl_reset;
616
+ dc->realize = bbram_ctrl_realize;
617
+ dc->vmsd = &vmstate_bbram_ctrl;
618
+ device_class_set_props(dc, bbram_ctrl_props);
619
+}
620
+
621
+static const TypeInfo bbram_ctrl_info = {
622
+ .name = TYPE_XLNX_BBRAM,
623
+ .parent = TYPE_SYS_BUS_DEVICE,
624
+ .instance_size = sizeof(XlnxBBRam),
625
+ .class_init = bbram_ctrl_class_init,
626
+ .instance_init = bbram_ctrl_init,
627
+};
628
+
629
+static void bbram_ctrl_register_types(void)
630
+{
631
+ type_register_static(&bbram_ctrl_info);
632
+}
633
+
634
+type_init(bbram_ctrl_register_types)
635
diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig
636
index XXXXXXX..XXXXXXX 100644
637
--- a/hw/nvram/Kconfig
638
+++ b/hw/nvram/Kconfig
639
@@ -XXX,XX +XXX,XX @@ config XLNX_EFUSE_VERSAL
640
config XLNX_EFUSE_ZYNQMP
641
bool
642
select XLNX_EFUSE
643
+
644
+config XLNX_BBRAM
645
+ bool
646
+ select XLNX_EFUSE_CRC
647
diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build
648
index XXXXXXX..XXXXXXX 100644
649
--- a/hw/nvram/meson.build
650
+++ b/hw/nvram/meson.build
651
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files(
652
'xlnx-versal-efuse-ctrl.c'))
653
softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files(
654
'xlnx-zynqmp-efuse.c'))
655
+softmmu_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c'))
656
657
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c'))
658
--
108
--
659
2.20.1
109
2.25.1
660
661
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-30-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper-sve.h | 2 ++
12
target/arm/sve.decode | 1 +
13
target/arm/sve_helper.c | 16 ++++++++++++++++
14
target/arm/translate-sve.c | 2 ++
15
4 files changed, 21 insertions(+)
16
17
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-sve.h
20
+++ b/target/arm/helper-sve.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
22
23
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
24
25
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
26
+
27
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
28
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
29
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
30
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/sve.decode
33
+++ b/target/arm/sve.decode
34
@@ -XXX,XX +XXX,XX @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
35
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
36
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
37
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
38
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
39
40
# SVE vector splice (predicated, destructive)
41
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
42
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/sve_helper.c
45
+++ b/target/arm/sve_helper.c
46
@@ -XXX,XX +XXX,XX @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
47
48
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
49
50
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
51
+{
52
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
53
+ uint64_t *d = vd, *n = vn;
54
+ uint8_t *pg = vg;
55
+
56
+ for (i = 0; i < opr_sz; i += 2) {
57
+ if (pg[H1(i)] & 1) {
58
+ uint64_t n0 = n[i + 0];
59
+ uint64_t n1 = n[i + 1];
60
+ d[i + 0] = n1;
61
+ d[i + 1] = n0;
62
+ }
63
+ }
64
+}
65
+
66
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
67
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
68
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
69
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/translate-sve.c
72
+++ b/target/arm/translate-sve.c
73
@@ -XXX,XX +XXX,XX @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
74
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
75
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
76
77
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
78
+
79
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
80
gen_helper_sve_splice, a, a->esz)
81
82
--
83
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
This is an SVE instruction that operates using the SVE vector
4
length but that it is present only if SME is implemented.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-31-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.h | 18 +++++++
12
target/arm/sve.decode | 5 ++
13
target/arm/translate-sve.c | 102 +++++++++++++++++++++++++++++++++++++
14
target/arm/vec_helper.c | 24 +++++++++
15
4 files changed, 149 insertions(+)
16
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.h
20
+++ b/target/arm/helper.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
22
DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
23
void, ptr, ptr, ptr, ptr, ptr, i32)
24
25
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
26
+ void, ptr, ptr, ptr, ptr, i32)
27
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
28
+ void, ptr, ptr, ptr, ptr, i32)
29
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
30
+ void, ptr, ptr, ptr, ptr, i32)
31
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
32
+ void, ptr, ptr, ptr, ptr, i32)
33
+
34
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
35
+ void, ptr, ptr, ptr, ptr, i32)
36
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
37
+ void, ptr, ptr, ptr, ptr, i32)
38
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
39
+ void, ptr, ptr, ptr, ptr, i32)
40
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
41
+ void, ptr, ptr, ptr, ptr, i32)
42
+
43
#ifdef TARGET_AARCH64
44
#include "helper-a64.h"
45
#include "helper-sve.h"
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
49
+++ b/target/arm/sve.decode
50
@@ -XXX,XX +XXX,XX @@ PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \
51
@psel esz=2 imm=%psel_imm_s
52
PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
53
@psel esz=3 imm=%psel_imm_d
54
+
55
+### SVE clamp
56
+
57
+SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
58
+UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
59
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/translate-sve.c
62
+++ b/target/arm/translate-sve.c
63
@@ -XXX,XX +XXX,XX @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
64
tcg_temp_free_ptr(ptr);
65
return true;
66
}
67
+
68
+static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
69
+{
70
+ tcg_gen_smax_i32(d, a, n);
71
+ tcg_gen_smin_i32(d, d, m);
72
+}
73
+
74
+static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
75
+{
76
+ tcg_gen_smax_i64(d, a, n);
77
+ tcg_gen_smin_i64(d, d, m);
78
+}
79
+
80
+static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
81
+ TCGv_vec m, TCGv_vec a)
82
+{
83
+ tcg_gen_smax_vec(vece, d, a, n);
84
+ tcg_gen_smin_vec(vece, d, d, m);
85
+}
86
+
87
+static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
88
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
89
+{
90
+ static const TCGOpcode vecop[] = {
91
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
92
+ };
93
+ static const GVecGen4 ops[4] = {
94
+ { .fniv = gen_sclamp_vec,
95
+ .fno = gen_helper_gvec_sclamp_b,
96
+ .opt_opc = vecop,
97
+ .vece = MO_8 },
98
+ { .fniv = gen_sclamp_vec,
99
+ .fno = gen_helper_gvec_sclamp_h,
100
+ .opt_opc = vecop,
101
+ .vece = MO_16 },
102
+ { .fni4 = gen_sclamp_i32,
103
+ .fniv = gen_sclamp_vec,
104
+ .fno = gen_helper_gvec_sclamp_s,
105
+ .opt_opc = vecop,
106
+ .vece = MO_32 },
107
+ { .fni8 = gen_sclamp_i64,
108
+ .fniv = gen_sclamp_vec,
109
+ .fno = gen_helper_gvec_sclamp_d,
110
+ .opt_opc = vecop,
111
+ .vece = MO_64,
112
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
113
+ };
114
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
115
+}
116
+
117
+TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
118
+
119
+static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
120
+{
121
+ tcg_gen_umax_i32(d, a, n);
122
+ tcg_gen_umin_i32(d, d, m);
123
+}
124
+
125
+static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
126
+{
127
+ tcg_gen_umax_i64(d, a, n);
128
+ tcg_gen_umin_i64(d, d, m);
129
+}
130
+
131
+static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
132
+ TCGv_vec m, TCGv_vec a)
133
+{
134
+ tcg_gen_umax_vec(vece, d, a, n);
135
+ tcg_gen_umin_vec(vece, d, d, m);
136
+}
137
+
138
+static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
139
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
140
+{
141
+ static const TCGOpcode vecop[] = {
142
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
143
+ };
144
+ static const GVecGen4 ops[4] = {
145
+ { .fniv = gen_uclamp_vec,
146
+ .fno = gen_helper_gvec_uclamp_b,
147
+ .opt_opc = vecop,
148
+ .vece = MO_8 },
149
+ { .fniv = gen_uclamp_vec,
150
+ .fno = gen_helper_gvec_uclamp_h,
151
+ .opt_opc = vecop,
152
+ .vece = MO_16 },
153
+ { .fni4 = gen_uclamp_i32,
154
+ .fniv = gen_uclamp_vec,
155
+ .fno = gen_helper_gvec_uclamp_s,
156
+ .opt_opc = vecop,
157
+ .vece = MO_32 },
158
+ { .fni8 = gen_uclamp_i64,
159
+ .fniv = gen_uclamp_vec,
160
+ .fno = gen_helper_gvec_uclamp_d,
161
+ .opt_opc = vecop,
162
+ .vece = MO_64,
163
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
164
+ };
165
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
166
+}
167
+
168
+TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
169
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/target/arm/vec_helper.c
172
+++ b/target/arm/vec_helper.c
173
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
174
}
175
clear_tail(d, opr_sz, simd_maxsz(desc));
176
}
177
+
178
+#define DO_CLAMP(NAME, TYPE) \
179
+void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
180
+{ \
181
+ intptr_t i, opr_sz = simd_oprsz(desc); \
182
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
183
+ TYPE aa = *(TYPE *)(a + i); \
184
+ TYPE nn = *(TYPE *)(n + i); \
185
+ TYPE mm = *(TYPE *)(m + i); \
186
+ TYPE dd = MIN(MAX(aa, nn), mm); \
187
+ *(TYPE *)(d + i) = dd; \
188
+ } \
189
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
190
+}
191
+
192
+DO_CLAMP(gvec_sclamp_b, int8_t)
193
+DO_CLAMP(gvec_sclamp_h, int16_t)
194
+DO_CLAMP(gvec_sclamp_s, int32_t)
195
+DO_CLAMP(gvec_sclamp_d, int64_t)
196
+
197
+DO_CLAMP(gvec_uclamp_b, uint8_t)
198
+DO_CLAMP(gvec_uclamp_h, uint16_t)
199
+DO_CLAMP(gvec_uclamp_s, uint32_t)
200
+DO_CLAMP(gvec_uclamp_d, uint64_t)
201
--
202
2.25.1
diff view generated by jsdifflib
1
We're going to move this code to a different file; fix the coding
1
From: Richard Henderson <richard.henderson@linaro.org>
2
style first so checkpatch doesn't complain. This includes deleting
3
the spurious 'break' statements after returns in the
4
vfp_gdb_get_reg() function.
5
2
3
We can handle both exception entry and exception return by
4
hooking into aarch64_sve_change_el.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-32-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210921162901.17508-3-peter.maydell@linaro.org
10
---
10
---
11
target/arm/helper.c | 23 ++++++++++++++++-------
11
target/arm/helper.c | 15 +++++++++++++--
12
1 file changed, 16 insertions(+), 7 deletions(-)
12
1 file changed, 13 insertions(+), 2 deletions(-)
13
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
18
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
19
}
19
return;
20
}
20
}
21
switch (reg - nregs) {
21
22
- case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
22
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
23
- case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
23
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
24
- case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
24
+
25
+ case 0:
25
+ /*
26
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
26
+ * Both AArch64.TakeException and AArch64.ExceptionReturn
27
+ case 1:
27
+ * invoke ResetSVEState when taking an exception from, or
28
+ return gdb_get_reg32(buf, vfp_get_fpscr(env));
28
+ * returning to, AArch32 state when PSTATE.SM is enabled.
29
+ case 2:
29
+ */
30
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
30
+ if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) {
31
}
31
+ arm_reset_sve_state(env);
32
return 0;
32
+ return;
33
}
33
+ }
34
@@ -XXX,XX +XXX,XX @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
34
+
35
}
35
/*
36
}
36
* DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
37
switch (reg - nregs) {
37
* at ELx, or not available because the EL is in AArch32 state, then
38
- case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
38
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
39
- case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
39
* we already have the correct register contents when encountering the
40
- case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
40
* vq0->vq0 transition between EL0->EL1.
41
+ case 0:
41
*/
42
+ env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
42
- old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
43
+ return 4;
43
old_len = (old_a64 && !sve_exception_el(env, old_el)
44
+ case 1:
44
? sve_vqm1_for_el(env, old_el) : 0);
45
+ vfp_set_fpscr(env, ldl_p(buf));
45
- new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
46
+ return 4;
46
new_len = (new_a64 && !sve_exception_el(env, new_el)
47
+ case 2:
47
? sve_vqm1_for_el(env, new_el) : 0);
48
+ env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30);
48
49
+ return 4;
50
}
51
return 0;
52
}
53
@@ -XXX,XX +XXX,XX @@ static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
54
return gdb_get_reg32(buf, vfp_get_fpsr(env));
55
case 33:
56
/* FPCR */
57
- return gdb_get_reg32(buf,vfp_get_fpcr(env));
58
+ return gdb_get_reg32(buf, vfp_get_fpcr(env));
59
default:
60
return 0;
61
}
62
--
49
--
63
2.20.1
50
2.25.1
64
65
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Note that SME remains effectively disabled for user-only,
4
because we do not yet set CPACR_EL1.SMEN. This needs to
5
wait until the kernel ABI is implemented.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-33-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
docs/system/arm/emulation.rst | 4 ++++
13
target/arm/cpu64.c | 11 +++++++++++
14
2 files changed, 15 insertions(+)
15
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/arm/emulation.rst
19
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
- FEAT_SHA512 (Advanced SIMD SHA512 instructions)
22
- FEAT_SM3 (Advanced SIMD SM3 instructions)
23
- FEAT_SM4 (Advanced SIMD SM4 instructions)
24
+- FEAT_SME (Scalable Matrix Extension)
25
+- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode)
26
+- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions)
27
+- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
28
- FEAT_SPECRES (Speculation restriction instructions)
29
- FEAT_SSBS (Speculative Store Bypass Safe)
30
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
31
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/cpu64.c
34
+++ b/target/arm/cpu64.c
35
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
36
*/
37
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
38
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
39
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
40
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
41
cpu->isar.id_aa64pfr1 = t;
42
43
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
44
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
45
cpu->isar.id_aa64dfr0 = t;
46
47
+ t = cpu->isar.id_aa64smfr0;
48
+ t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
49
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
50
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
51
+ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
52
+ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
53
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
54
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
55
+ cpu->isar.id_aa64smfr0 = t;
56
+
57
/* Replicate the same data to the 32-bit id registers. */
58
aa32_max_features(cpu);
59
60
--
61
2.25.1
diff view generated by jsdifflib
1
The function ide_bus_new() does an in-place initialization. Rename
1
From: Richard Henderson <richard.henderson@linaro.org>
2
it to ide_bus_init() to follow our _init vs _new convention.
3
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-34-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Reviewed-by: Corey Minyard <cminyard@mvista.com>
8
Reviewed-by: John Snow <jsnow@redhat.com>
9
Acked-by: John Snow <jsnow@redhat.com> (Feel free to merge.)
10
Message-id: 20210923121153.23754-7-peter.maydell@linaro.org
11
---
7
---
12
include/hw/ide/internal.h | 4 ++--
8
linux-user/aarch64/target_cpu.h | 5 ++++-
13
hw/ide/ahci.c | 2 +-
9
1 file changed, 4 insertions(+), 1 deletion(-)
14
hw/ide/cmd646.c | 2 +-
15
hw/ide/isa.c | 2 +-
16
hw/ide/macio.c | 2 +-
17
hw/ide/microdrive.c | 2 +-
18
hw/ide/mmio.c | 2 +-
19
hw/ide/piix.c | 2 +-
20
hw/ide/qdev.c | 2 +-
21
hw/ide/sii3112.c | 2 +-
22
hw/ide/via.c | 2 +-
23
11 files changed, 12 insertions(+), 12 deletions(-)
24
10
25
diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h
11
diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h
26
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/ide/internal.h
13
--- a/linux-user/aarch64/target_cpu.h
28
+++ b/include/hw/ide/internal.h
14
+++ b/linux-user/aarch64/target_cpu.h
29
@@ -XXX,XX +XXX,XX @@ void ide_atapi_cmd(IDEState *s);
15
@@ -XXX,XX +XXX,XX @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags)
30
void ide_atapi_cmd_reply_end(IDEState *s);
16
31
17
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
32
/* hw/ide/qdev.c */
33
-void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
34
- int bus_id, int max_units);
35
+void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
36
+ int bus_id, int max_units);
37
IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive);
38
39
int ide_handle_rw_error(IDEState *s, int error, int op);
40
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/ide/ahci.c
43
+++ b/hw/ide/ahci.c
44
@@ -XXX,XX +XXX,XX @@ void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
45
for (i = 0; i < s->ports; i++) {
46
AHCIDevice *ad = &s->dev[i];
47
48
- ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1);
49
+ ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1);
50
ide_init2(&ad->port, irqs[i]);
51
52
ad->hba = s;
53
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/ide/cmd646.c
56
+++ b/hw/ide/cmd646.c
57
@@ -XXX,XX +XXX,XX @@ static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp)
58
59
qdev_init_gpio_in(ds, cmd646_set_irq, 2);
60
for (i = 0; i < 2; i++) {
61
- ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
62
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
63
ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
64
65
bmdma_init(&d->bus[i], &d->bmdma[i], d);
66
diff --git a/hw/ide/isa.c b/hw/ide/isa.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/hw/ide/isa.c
69
+++ b/hw/ide/isa.c
70
@@ -XXX,XX +XXX,XX @@ static void isa_ide_realizefn(DeviceState *dev, Error **errp)
71
ISADevice *isadev = ISA_DEVICE(dev);
72
ISAIDEState *s = ISA_IDE(dev);
73
74
- ide_bus_new(&s->bus, sizeof(s->bus), dev, 0, 2);
75
+ ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2);
76
ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2);
77
isa_init_irq(isadev, &s->irq, s->isairq);
78
ide_init2(&s->bus, s->irq);
79
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/hw/ide/macio.c
82
+++ b/hw/ide/macio.c
83
@@ -XXX,XX +XXX,XX @@ static void macio_ide_initfn(Object *obj)
84
SysBusDevice *d = SYS_BUS_DEVICE(obj);
85
MACIOIDEState *s = MACIO_IDE(obj);
86
87
- ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
88
+ ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
89
memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
90
sysbus_init_mmio(d, &s->mem);
91
sysbus_init_irq(d, &s->real_ide_irq);
92
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/hw/ide/microdrive.c
95
+++ b/hw/ide/microdrive.c
96
@@ -XXX,XX +XXX,XX @@ static void microdrive_init(Object *obj)
97
{
18
{
98
MicroDriveState *md = MICRODRIVE(obj);
19
- /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
99
20
+ /*
100
- ide_bus_new(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1);
21
+ * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
101
+ ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1);
22
* different from AArch32 Linux, which uses TPIDRRO.
23
*/
24
env->cp15.tpidr_el[0] = newtls;
25
+ /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */
26
+ env->cp15.tpidr2_el0 = 0;
102
}
27
}
103
28
104
static void microdrive_class_init(ObjectClass *oc, void *data)
29
static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
105
diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/hw/ide/mmio.c
108
+++ b/hw/ide/mmio.c
109
@@ -XXX,XX +XXX,XX @@ static void mmio_ide_initfn(Object *obj)
110
SysBusDevice *d = SYS_BUS_DEVICE(obj);
111
MMIOState *s = MMIO_IDE(obj);
112
113
- ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
114
+ ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
115
sysbus_init_irq(d, &s->irq);
116
}
117
118
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/hw/ide/piix.c
121
+++ b/hw/ide/piix.c
122
@@ -XXX,XX +XXX,XX @@ static int pci_piix_init_ports(PCIIDEState *d)
123
int i, ret;
124
125
for (i = 0; i < 2; i++) {
126
- ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2);
127
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2);
128
ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase,
129
port_info[i].iobase2);
130
if (ret) {
131
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
132
index XXXXXXX..XXXXXXX 100644
133
--- a/hw/ide/qdev.c
134
+++ b/hw/ide/qdev.c
135
@@ -XXX,XX +XXX,XX @@ static const TypeInfo ide_bus_info = {
136
.class_init = ide_bus_class_init,
137
};
138
139
-void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
140
+void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev,
141
int bus_id, int max_units)
142
{
143
qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL);
144
diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/hw/ide/sii3112.c
147
+++ b/hw/ide/sii3112.c
148
@@ -XXX,XX +XXX,XX @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp)
149
150
qdev_init_gpio_in(ds, sii3112_set_irq, 2);
151
for (i = 0; i < 2; i++) {
152
- ide_bus_new(&s->bus[i], sizeof(s->bus[i]), ds, i, 1);
153
+ ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1);
154
ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i));
155
156
bmdma_init(&s->bus[i], &s->bmdma[i], s);
157
diff --git a/hw/ide/via.c b/hw/ide/via.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/hw/ide/via.c
160
+++ b/hw/ide/via.c
161
@@ -XXX,XX +XXX,XX @@ static void via_ide_realize(PCIDevice *dev, Error **errp)
162
163
qdev_init_gpio_in(ds, via_ide_set_irq, 2);
164
for (i = 0; i < 2; i++) {
165
- ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
166
+ ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2);
167
ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i));
168
169
bmdma_init(&d->bus[i], &d->bmdma[i], d);
170
--
30
--
171
2.20.1
31
2.25.1
172
173
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-35-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/cpu_loop.c | 9 +++++++++
9
1 file changed, 9 insertions(+)
10
11
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/cpu_loop.c
14
+++ b/linux-user/aarch64/cpu_loop.c
15
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
16
17
switch (trapnr) {
18
case EXCP_SWI:
19
+ /*
20
+ * On syscall, PSTATE.ZA is preserved, along with the ZA matrix.
21
+ * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState.
22
+ */
23
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
24
+ env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0);
25
+ arm_rebuild_hflags(env);
26
+ arm_reset_sve_state(env);
27
+ }
28
ret = do_syscall(env,
29
env->xregs[8],
30
env->xregs[0],
31
--
32
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Make sure to zero the currently reserved fields.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-36-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
linux-user/aarch64/signal.c | 9 ++++++++-
11
1 file changed, 8 insertions(+), 1 deletion(-)
12
13
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/aarch64/signal.c
16
+++ b/linux-user/aarch64/signal.c
17
@@ -XXX,XX +XXX,XX @@ struct target_extra_context {
18
struct target_sve_context {
19
struct target_aarch64_ctx head;
20
uint16_t vl;
21
- uint16_t reserved[3];
22
+ uint16_t flags;
23
+ uint16_t reserved[2];
24
/* The actual SVE data immediately follows. It is laid out
25
* according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
26
* the original struct pointer.
27
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
28
#define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
29
(TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
30
31
+#define TARGET_SVE_SIG_FLAG_SM 1
32
+
33
struct target_rt_sigframe {
34
struct target_siginfo info;
35
struct target_ucontext uc;
36
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
37
{
38
int i, j;
39
40
+ memset(sve, 0, sizeof(*sve));
41
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
42
__put_user(size, &sve->head.size);
43
__put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
44
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
45
+ __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags);
46
+ }
47
48
/* Note that SVE regs are stored as a byte stream, with each byte element
49
* at a subsequent address. This corresponds to a little-endian store
50
--
51
2.25.1
diff view generated by jsdifflib
1
Rename ipack_bus_new_inplace() to ipack_bus_init(), to bring it in to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
line with a "_init for in-place init, _new for allocate-and-return"
3
convention. Drop the 'name' argument, because the only caller does
4
not pass in a name. If a future caller does need to specify the bus
5
name, we should create an ipack_bus_init_named() function at that
6
point.
7
2
3
Fold the return value setting into the goto, so each
4
point of failure need not do both.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-37-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
11
Message-id: 20210923121153.23754-3-peter.maydell@linaro.org
12
---
10
---
13
include/hw/ipack/ipack.h | 8 ++++----
11
linux-user/aarch64/signal.c | 26 +++++++++++---------------
14
hw/ipack/ipack.c | 10 +++++-----
12
1 file changed, 11 insertions(+), 15 deletions(-)
15
hw/ipack/tpci200.c | 4 ++--
16
3 files changed, 11 insertions(+), 11 deletions(-)
17
13
18
diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/ipack/ipack.h
16
--- a/linux-user/aarch64/signal.c
21
+++ b/include/hw/ipack/ipack.h
17
+++ b/linux-user/aarch64/signal.c
22
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ipack_device;
18
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
23
VMSTATE_STRUCT(_field, _state, 1, vmstate_ipack_device, IPackDevice)
19
struct target_sve_context *sve = NULL;
24
20
uint64_t extra_datap = 0;
25
IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot);
21
bool used_extra = false;
26
-void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size,
22
- bool err = false;
27
- DeviceState *parent,
23
int vq = 0, sve_size = 0;
28
- const char *name, uint8_t n_slots,
24
29
- qemu_irq_handler handler);
25
target_restore_general_frame(env, sf);
30
+void ipack_bus_init(IPackBus *bus, size_t bus_size,
26
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
31
+ DeviceState *parent,
27
switch (magic) {
32
+ uint8_t n_slots,
28
case 0:
33
+ qemu_irq_handler handler);
29
if (size != 0) {
34
30
- err = true;
35
#endif
31
- goto exit;
36
diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c
32
+ goto err;
37
index XXXXXXX..XXXXXXX 100644
33
}
38
--- a/hw/ipack/ipack.c
34
if (used_extra) {
39
+++ b/hw/ipack/ipack.c
35
ctx = NULL;
40
@@ -XXX,XX +XXX,XX @@ IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot)
36
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
41
return NULL;
37
38
case TARGET_FPSIMD_MAGIC:
39
if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
40
- err = true;
41
- goto exit;
42
+ goto err;
43
}
44
fpsimd = (struct target_fpsimd_context *)ctx;
45
break;
46
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
47
break;
48
}
49
}
50
- err = true;
51
- goto exit;
52
+ goto err;
53
54
case TARGET_EXTRA_MAGIC:
55
if (extra || size != sizeof(struct target_extra_context)) {
56
- err = true;
57
- goto exit;
58
+ goto err;
59
}
60
__get_user(extra_datap,
61
&((struct target_extra_context *)ctx)->datap);
62
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
63
/* Unknown record -- we certainly didn't generate it.
64
* Did we in fact get out of sync?
65
*/
66
- err = true;
67
- goto exit;
68
+ goto err;
69
}
70
ctx = (void *)ctx + size;
71
}
72
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
73
if (fpsimd) {
74
target_restore_fpsimd_record(env, fpsimd);
75
} else {
76
- err = true;
77
+ goto err;
78
}
79
80
/* SVE data, if present, overwrites FPSIMD data. */
81
if (sve) {
82
target_restore_sve_record(env, sve, vq);
83
}
84
-
85
- exit:
86
unlock_user(extra, extra_datap, 0);
87
- return err;
88
+ return 0;
89
+
90
+ err:
91
+ unlock_user(extra, extra_datap, 0);
92
+ return 1;
42
}
93
}
43
94
44
-void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size,
95
static abi_ulong get_sigframe(struct target_sigaction *ka,
45
- DeviceState *parent,
46
- const char *name, uint8_t n_slots,
47
- qemu_irq_handler handler)
48
+void ipack_bus_init(IPackBus *bus, size_t bus_size,
49
+ DeviceState *parent,
50
+ uint8_t n_slots,
51
+ qemu_irq_handler handler)
52
{
53
- qbus_create_inplace(bus, bus_size, TYPE_IPACK_BUS, parent, name);
54
+ qbus_create_inplace(bus, bus_size, TYPE_IPACK_BUS, parent, NULL);
55
bus->n_slots = n_slots;
56
bus->set_irq = handler;
57
}
58
diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/hw/ipack/tpci200.c
61
+++ b/hw/ipack/tpci200.c
62
@@ -XXX,XX +XXX,XX @@ static void tpci200_realize(PCIDevice *pci_dev, Error **errp)
63
pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las2);
64
pci_register_bar(&s->dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las3);
65
66
- ipack_bus_new_inplace(&s->bus, sizeof(s->bus), DEVICE(pci_dev), NULL,
67
- N_MODULES, tpci200_set_irq);
68
+ ipack_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev),
69
+ N_MODULES, tpci200_set_irq);
70
}
71
72
static const VMStateDescription vmstate_tpci200 = {
73
--
96
--
74
2.20.1
97
2.25.1
75
76
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
In parse_user_sigframe, the kernel rejects duplicate sve records,
4
or records that are smaller than the header. We were silently
5
allowing these cases to pass, dropping the record.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220708151540.18136-38-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
linux-user/aarch64/signal.c | 5 ++++-
13
1 file changed, 4 insertions(+), 1 deletion(-)
14
15
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/signal.c
18
+++ b/linux-user/aarch64/signal.c
19
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
20
break;
21
22
case TARGET_SVE_MAGIC:
23
+ if (sve || size < sizeof(struct target_sve_context)) {
24
+ goto err;
25
+ }
26
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
27
vq = sve_vq(env);
28
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
29
- if (!sve && size == sve_size) {
30
+ if (size == sve_size) {
31
sve = (struct target_sve_context *)ctx;
32
break;
33
}
34
--
35
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20220708151540.18136-39-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
8
linux-user/aarch64/signal.c | 3 +++
9
1 file changed, 3 insertions(+)
10
11
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/aarch64/signal.c
14
+++ b/linux-user/aarch64/signal.c
15
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
16
__get_user(extra_size,
17
&((struct target_extra_context *)ctx)->size);
18
extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
19
+ if (!extra) {
20
+ return 1;
21
+ }
22
break;
23
24
default:
25
--
26
2.25.1
diff view generated by jsdifflib
1
Rename the "allocate and return" qbus creation function to
1
From: Richard Henderson <richard.henderson@linaro.org>
2
qbus_new(), to bring it into line with our _init vs _new convention.
3
2
3
Move the checks out of the parsing loop and into the
4
restore function. This more closely mirrors the code
5
structure in the kernel, and is slightly clearer.
6
7
Reject rather than silently skip incorrect VL and SVE record sizes,
8
bringing our checks in to line with those the kernel does.
9
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20220708151540.18136-40-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
7
Reviewed-by: Corey Minyard <cminyard@mvista.com>
8
Message-id: 20210923121153.23754-6-peter.maydell@linaro.org
9
---
14
---
10
include/hw/qdev-core.h | 2 +-
15
linux-user/aarch64/signal.c | 51 +++++++++++++++++++++++++------------
11
hw/core/bus.c | 2 +-
16
1 file changed, 35 insertions(+), 16 deletions(-)
12
hw/hyperv/vmbus.c | 2 +-
13
hw/i2c/core.c | 2 +-
14
hw/isa/isa-bus.c | 2 +-
15
hw/misc/auxbus.c | 2 +-
16
hw/pci/pci.c | 2 +-
17
hw/ppc/spapr_vio.c | 2 +-
18
hw/s390x/ap-bridge.c | 2 +-
19
hw/s390x/css-bridge.c | 2 +-
20
hw/s390x/s390-pci-bus.c | 2 +-
21
hw/ssi/ssi.c | 2 +-
22
hw/xen/xen-bus.c | 2 +-
23
hw/xen/xen-legacy-backend.c | 2 +-
24
14 files changed, 14 insertions(+), 14 deletions(-)
25
17
26
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
18
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
27
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
28
--- a/include/hw/qdev-core.h
20
--- a/linux-user/aarch64/signal.c
29
+++ b/include/hw/qdev-core.h
21
+++ b/linux-user/aarch64/signal.c
30
@@ -XXX,XX +XXX,XX @@ typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque);
22
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
31
23
}
32
void qbus_init(void *bus, size_t size, const char *typename,
33
DeviceState *parent, const char *name);
34
-BusState *qbus_create(const char *typename, DeviceState *parent, const char *name);
35
+BusState *qbus_new(const char *typename, DeviceState *parent, const char *name);
36
bool qbus_realize(BusState *bus, Error **errp);
37
void qbus_unrealize(BusState *bus);
38
39
diff --git a/hw/core/bus.c b/hw/core/bus.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/core/bus.c
42
+++ b/hw/core/bus.c
43
@@ -XXX,XX +XXX,XX @@ void qbus_init(void *bus, size_t size, const char *typename,
44
qbus_init_internal(bus, parent, name);
45
}
24
}
46
25
47
-BusState *qbus_create(const char *typename, DeviceState *parent, const char *name)
26
-static void target_restore_sve_record(CPUARMState *env,
48
+BusState *qbus_new(const char *typename, DeviceState *parent, const char *name)
27
- struct target_sve_context *sve, int vq)
28
+static bool target_restore_sve_record(CPUARMState *env,
29
+ struct target_sve_context *sve,
30
+ int size)
49
{
31
{
50
BusState *bus;
32
- int i, j;
51
33
+ int i, j, vl, vq;
52
diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c
34
53
index XXXXXXX..XXXXXXX 100644
35
- /* Note that SVE regs are stored as a byte stream, with each byte element
54
--- a/hw/hyperv/vmbus.c
36
+ if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
55
+++ b/hw/hyperv/vmbus.c
37
+ return false;
56
@@ -XXX,XX +XXX,XX @@ static void vmbus_bridge_realize(DeviceState *dev, Error **errp)
38
+ }
57
return;
39
+
40
+ __get_user(vl, &sve->vl);
41
+ vq = sve_vq(env);
42
+
43
+ /* Reject mismatched VL. */
44
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
45
+ return false;
46
+ }
47
+
48
+ /* Accept empty record -- used to clear PSTATE.SM. */
49
+ if (size <= sizeof(*sve)) {
50
+ return true;
51
+ }
52
+
53
+ /* Reject non-empty but incomplete record. */
54
+ if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
55
+ return false;
56
+ }
57
+
58
+ /*
59
+ * Note that SVE regs are stored as a byte stream, with each byte element
60
* at a subsequent address. This corresponds to a little-endian load
61
* of our 64-bit hunks.
62
*/
63
@@ -XXX,XX +XXX,XX @@ static void target_restore_sve_record(CPUARMState *env,
64
}
65
}
58
}
66
}
59
67
+ return true;
60
- bridge->bus = VMBUS(qbus_create(TYPE_VMBUS, dev, "vmbus"));
61
+ bridge->bus = VMBUS(qbus_new(TYPE_VMBUS, dev, "vmbus"));
62
}
68
}
63
69
64
static char *vmbus_bridge_ofw_unit_address(const SysBusDevice *dev)
70
static int target_restore_sigframe(CPUARMState *env,
65
diff --git a/hw/i2c/core.c b/hw/i2c/core.c
71
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
66
index XXXXXXX..XXXXXXX 100644
72
struct target_sve_context *sve = NULL;
67
--- a/hw/i2c/core.c
73
uint64_t extra_datap = 0;
68
+++ b/hw/i2c/core.c
74
bool used_extra = false;
69
@@ -XXX,XX +XXX,XX @@ I2CBus *i2c_init_bus(DeviceState *parent, const char *name)
75
- int vq = 0, sve_size = 0;
70
{
76
+ int sve_size = 0;
71
I2CBus *bus;
77
72
78
target_restore_general_frame(env, sf);
73
- bus = I2C_BUS(qbus_create(TYPE_I2C_BUS, parent, name));
79
74
+ bus = I2C_BUS(qbus_new(TYPE_I2C_BUS, parent, name));
80
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
75
QLIST_INIT(&bus->current_devs);
81
if (sve || size < sizeof(struct target_sve_context)) {
76
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_i2c_bus, bus);
82
goto err;
77
return bus;
83
}
78
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
84
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
79
index XXXXXXX..XXXXXXX 100644
85
- vq = sve_vq(env);
80
--- a/hw/isa/isa-bus.c
86
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
81
+++ b/hw/isa/isa-bus.c
87
- if (size == sve_size) {
82
@@ -XXX,XX +XXX,XX @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space,
88
- sve = (struct target_sve_context *)ctx;
83
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
89
- break;
90
- }
91
- }
92
- goto err;
93
+ sve = (struct target_sve_context *)ctx;
94
+ sve_size = size;
95
+ break;
96
97
case TARGET_EXTRA_MAGIC:
98
if (extra || size != sizeof(struct target_extra_context)) {
99
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
84
}
100
}
85
101
86
- isabus = ISA_BUS(qbus_create(TYPE_ISA_BUS, dev, NULL));
102
/* SVE data, if present, overwrites FPSIMD data. */
87
+ isabus = ISA_BUS(qbus_new(TYPE_ISA_BUS, dev, NULL));
103
- if (sve) {
88
isabus->address_space = address_space;
104
- target_restore_sve_record(env, sve, vq);
89
isabus->address_space_io = address_space_io;
105
+ if (sve && !target_restore_sve_record(env, sve, sve_size)) {
90
return isabus;
106
+ goto err;
91
diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c
107
}
92
index XXXXXXX..XXXXXXX 100644
108
unlock_user(extra, extra_datap, 0);
93
--- a/hw/misc/auxbus.c
94
+++ b/hw/misc/auxbus.c
95
@@ -XXX,XX +XXX,XX @@ AUXBus *aux_bus_init(DeviceState *parent, const char *name)
96
AUXBus *bus;
97
Object *auxtoi2c;
98
99
- bus = AUX_BUS(qbus_create(TYPE_AUX_BUS, parent, name));
100
+ bus = AUX_BUS(qbus_new(TYPE_AUX_BUS, parent, name));
101
auxtoi2c = object_new_with_props(TYPE_AUXTOI2C, OBJECT(bus), "i2c",
102
&error_abort, NULL);
103
104
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/hw/pci/pci.c
107
+++ b/hw/pci/pci.c
108
@@ -XXX,XX +XXX,XX @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
109
{
110
PCIBus *bus;
111
112
- bus = PCI_BUS(qbus_create(typename, parent, name));
113
+ bus = PCI_BUS(qbus_new(typename, parent, name));
114
pci_root_bus_internal_init(bus, parent, address_space_mem,
115
address_space_io, devfn_min);
116
return bus;
117
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/hw/ppc/spapr_vio.c
120
+++ b/hw/ppc/spapr_vio.c
121
@@ -XXX,XX +XXX,XX @@ SpaprVioBus *spapr_vio_bus_init(void)
122
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
123
124
/* Create bus on bridge device */
125
- qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio");
126
+ qbus = qbus_new(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio");
127
bus = SPAPR_VIO_BUS(qbus);
128
bus->next_reg = SPAPR_VIO_REG_BASE;
129
130
diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/hw/s390x/ap-bridge.c
133
+++ b/hw/s390x/ap-bridge.c
134
@@ -XXX,XX +XXX,XX @@ void s390_init_ap(void)
135
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
136
137
/* Create bus on bridge device */
138
- bus = qbus_create(TYPE_AP_BUS, dev, TYPE_AP_BUS);
139
+ bus = qbus_new(TYPE_AP_BUS, dev, TYPE_AP_BUS);
140
141
/* Enable hotplugging */
142
qbus_set_hotplug_handler(bus, OBJECT(dev));
143
diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/hw/s390x/css-bridge.c
146
+++ b/hw/s390x/css-bridge.c
147
@@ -XXX,XX +XXX,XX @@ VirtualCssBus *virtual_css_bus_init(void)
148
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
149
150
/* Create bus on bridge device */
151
- bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css");
152
+ bus = qbus_new(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css");
153
cbus = VIRTUAL_CSS_BUS(bus);
154
155
/* Enable hotplugging */
156
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/hw/s390x/s390-pci-bus.c
159
+++ b/hw/s390x/s390-pci-bus.c
160
@@ -XXX,XX +XXX,XX @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp)
161
qbus_set_hotplug_handler(bus, OBJECT(dev));
162
phb->bus = b;
163
164
- s->bus = S390_PCI_BUS(qbus_create(TYPE_S390_PCI_BUS, dev, NULL));
165
+ s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL));
166
qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev));
167
168
s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
169
diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/hw/ssi/ssi.c
172
+++ b/hw/ssi/ssi.c
173
@@ -XXX,XX +XXX,XX @@ DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name)
174
SSIBus *ssi_create_bus(DeviceState *parent, const char *name)
175
{
176
BusState *bus;
177
- bus = qbus_create(TYPE_SSI_BUS, parent, name);
178
+ bus = qbus_new(TYPE_SSI_BUS, parent, name);
179
return SSI_BUS(bus);
180
}
181
182
diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/hw/xen/xen-bus.c
185
+++ b/hw/xen/xen-bus.c
186
@@ -XXX,XX +XXX,XX @@ type_init(xen_register_types)
187
void xen_bus_init(void)
188
{
189
DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE);
190
- BusState *bus = qbus_create(TYPE_XEN_BUS, dev, NULL);
191
+ BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL);
192
193
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
194
qbus_set_bus_hotplug_handler(bus);
195
diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/hw/xen/xen-legacy-backend.c
198
+++ b/hw/xen/xen-legacy-backend.c
199
@@ -XXX,XX +XXX,XX @@ int xen_be_init(void)
200
201
xen_sysdev = qdev_new(TYPE_XENSYSDEV);
202
sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal);
203
- xen_sysbus = qbus_create(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus");
204
+ xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus");
205
qbus_set_bus_hotplug_handler(xen_sysbus);
206
207
return 0;
109
return 0;
208
--
110
--
209
2.20.1
111
2.25.1
210
211
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Connect the support for Xilinx ZynqMP Battery-Backed RAM (BBRAM)
3
Set the SM bit in the SVE record on signal delivery, create the ZA record.
4
Restore SM and ZA state according to the records present on return.
4
5
5
The command argument:
6
-drive if=pflash,index=2,...
7
Can be used to optionally connect the bbram to a backend
8
storage, such that field-programmed values in one
9
invocation can be made available to next invocation.
10
11
The backend storage must be a seekable binary file, and
12
its size must be 36 bytes or larger. A file with all
13
binary 0's is a 'blank'.
14
15
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
16
Message-id: 20210917052400.1249094-8-tong.ho@xilinx.com
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-41-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
10
---
20
include/hw/arm/xlnx-zynqmp.h | 2 ++
11
linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++---
21
hw/arm/xlnx-zcu102.c | 15 +++++++++++++++
12
1 file changed, 154 insertions(+), 13 deletions(-)
22
hw/arm/xlnx-zynqmp.c | 20 ++++++++++++++++++++
23
hw/Kconfig | 1 +
24
4 files changed, 38 insertions(+)
25
13
26
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
14
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
27
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
28
--- a/include/hw/arm/xlnx-zynqmp.h
16
--- a/linux-user/aarch64/signal.c
29
+++ b/include/hw/arm/xlnx-zynqmp.h
17
+++ b/linux-user/aarch64/signal.c
30
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@ struct target_sve_context {
31
#include "qom/object.h"
19
32
#include "net/can_emu.h"
20
#define TARGET_SVE_SIG_FLAG_SM 1
33
#include "hw/dma/xlnx_csu_dma.h"
21
34
+#include "hw/nvram/xlnx-bbram.h"
22
+#define TARGET_ZA_MAGIC 0x54366345
35
23
+
36
#define TYPE_XLNX_ZYNQMP "xlnx-zynqmp"
24
+struct target_za_context {
37
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
25
+ struct target_aarch64_ctx head;
38
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
26
+ uint16_t vl;
39
27
+ uint16_t reserved[3];
40
MemoryRegion *ddr_ram;
28
+ /* The actual ZA data immediately follows. */
41
MemoryRegion ddr_ram_low, ddr_ram_high;
29
+};
42
+ XlnxBBRam bbram;
30
+
43
31
+#define TARGET_ZA_SIG_REGS_OFFSET \
44
MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS];
32
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
45
33
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
46
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
34
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
47
index XXXXXXX..XXXXXXX 100644
35
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
48
--- a/hw/arm/xlnx-zcu102.c
36
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
49
+++ b/hw/arm/xlnx-zcu102.c
37
+
50
@@ -XXX,XX +XXX,XX @@ static void zcu102_modify_dtb(const struct arm_boot_info *binfo, void *fdt)
38
struct target_rt_sigframe {
51
}
39
struct target_siginfo info;
40
struct target_ucontext uc;
41
@@ -XXX,XX +XXX,XX @@ static void target_setup_end_record(struct target_aarch64_ctx *end)
52
}
42
}
53
43
54
+static void bbram_attach_drive(XlnxBBRam *dev)
44
static void target_setup_sve_record(struct target_sve_context *sve,
45
- CPUARMState *env, int vq, int size)
46
+ CPUARMState *env, int size)
47
{
48
- int i, j;
49
+ int i, j, vq = sve_vq(env);
50
51
memset(sve, 0, sizeof(*sve));
52
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
53
@@ -XXX,XX +XXX,XX @@ static void target_setup_sve_record(struct target_sve_context *sve,
54
}
55
}
56
57
+static void target_setup_za_record(struct target_za_context *za,
58
+ CPUARMState *env, int size)
55
+{
59
+{
56
+ DriveInfo *dinfo;
60
+ int vq = sme_vq(env);
57
+ BlockBackend *blk;
61
+ int vl = vq * TARGET_SVE_VQ_BYTES;
58
+
62
+ int i, j;
59
+ dinfo = drive_get_by_index(IF_PFLASH, 2);
63
+
60
+ blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL;
64
+ memset(za, 0, sizeof(*za));
61
+ if (blk) {
65
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
62
+ qdev_prop_set_drive(DEVICE(dev), "drive", blk);
66
+ __put_user(size, &za->head.size);
67
+ __put_user(vl, &za->vl);
68
+
69
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
70
+ return;
71
+ }
72
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
73
+
74
+ /*
75
+ * Note that ZA vectors are stored as a byte stream,
76
+ * with each byte element at a subsequent address.
77
+ */
78
+ for (i = 0; i < vl; ++i) {
79
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
80
+ for (j = 0; j < vq * 2; ++j) {
81
+ __put_user_e(env->zarray[i].d[j], z + j, le);
82
+ }
63
+ }
83
+ }
64
+}
84
+}
65
+
85
+
66
static void xlnx_zcu102_init(MachineState *machine)
86
static void target_restore_general_frame(CPUARMState *env,
67
{
87
struct target_rt_sigframe *sf)
68
XlnxZCU102 *s = ZCU102_MACHINE(machine);
88
{
69
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_init(MachineState *machine)
89
@@ -XXX,XX +XXX,XX @@ static void target_restore_fpsimd_record(CPUARMState *env,
70
90
71
qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
91
static bool target_restore_sve_record(CPUARMState *env,
72
92
struct target_sve_context *sve,
73
+ /* Attach bbram backend, if given */
93
- int size)
74
+ bbram_attach_drive(&s->soc.bbram);
94
+ int size, int *svcr)
75
+
95
{
76
/* Create and plug in the SD cards */
96
- int i, j, vl, vq;
77
for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) {
97
+ int i, j, vl, vq, flags;
78
BusState *bus;
98
+ bool sm;
79
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
99
80
index XXXXXXX..XXXXXXX 100644
100
- if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
81
--- a/hw/arm/xlnx-zynqmp.c
101
+ __get_user(vl, &sve->vl);
82
+++ b/hw/arm/xlnx-zynqmp.c
102
+ __get_user(flags, &sve->flags);
83
@@ -XXX,XX +XXX,XX @@
103
+
84
#define RTC_ADDR 0xffa60000
104
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
85
#define RTC_IRQ 26
105
+
86
106
+ /* The cpu must support Streaming or Non-streaming SVE. */
87
+#define BBRAM_ADDR 0xffcd0000
107
+ if (sm
88
+#define BBRAM_IRQ 11
108
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
89
+
109
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
90
#define SDHCI_CAPABILITIES 0x280737ec6481 /* Datasheet: UG1085 (v1.7) */
110
return false;
91
111
}
92
static const uint64_t gem_addr[XLNX_ZYNQMP_NUM_GEMS] = {
112
93
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
113
- __get_user(vl, &sve->vl);
94
qdev_realize(DEVICE(&s->rpu_cluster), NULL, &error_fatal);
114
- vq = sve_vq(env);
115
+ /*
116
+ * Note that we cannot use sve_vq() because that depends on the
117
+ * current setting of PSTATE.SM, not the state to be restored.
118
+ */
119
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
120
121
/* Reject mismatched VL. */
122
if (vl != vq * TARGET_SVE_VQ_BYTES) {
123
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
124
return false;
125
}
126
127
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
128
+
129
/*
130
* Note that SVE regs are stored as a byte stream, with each byte element
131
* at a subsequent address. This corresponds to a little-endian load
132
@@ -XXX,XX +XXX,XX @@ static bool target_restore_sve_record(CPUARMState *env,
133
return true;
95
}
134
}
96
135
97
+static void xlnx_zynqmp_create_bbram(XlnxZynqMPState *s, qemu_irq *gic)
136
+static bool target_restore_za_record(CPUARMState *env,
137
+ struct target_za_context *za,
138
+ int size, int *svcr)
98
+{
139
+{
99
+ SysBusDevice *sbd;
140
+ int i, j, vl, vq;
100
+
141
+
101
+ object_initialize_child_with_props(OBJECT(s), "bbram", &s->bbram,
142
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
102
+ sizeof(s->bbram), TYPE_XLNX_BBRAM,
143
+ return false;
103
+ &error_fatal,
144
+ }
104
+ "crc-zpads", "1",
145
+
105
+ NULL);
146
+ __get_user(vl, &za->vl);
106
+ sbd = SYS_BUS_DEVICE(&s->bbram);
147
+ vq = sme_vq(env);
107
+
148
+
108
+ sysbus_realize(sbd, &error_fatal);
149
+ /* Reject mismatched VL. */
109
+ sysbus_mmio_map(sbd, 0, BBRAM_ADDR);
150
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
110
+ sysbus_connect_irq(sbd, 0, gic[BBRAM_IRQ]);
151
+ return false;
152
+ }
153
+
154
+ /* Accept empty record -- used to clear PSTATE.ZA. */
155
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
156
+ return true;
157
+ }
158
+
159
+ /* Reject non-empty but incomplete record. */
160
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
161
+ return false;
162
+ }
163
+
164
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
165
+
166
+ for (i = 0; i < vl; ++i) {
167
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
168
+ for (j = 0; j < vq * 2; ++j) {
169
+ __get_user_e(env->zarray[i].d[j], z + j, le);
170
+ }
171
+ }
172
+ return true;
111
+}
173
+}
112
+
174
+
113
static void xlnx_zynqmp_create_unimp_mmio(XlnxZynqMPState *s)
175
static int target_restore_sigframe(CPUARMState *env,
114
{
176
struct target_rt_sigframe *sf)
115
static const struct UnimpInfo {
177
{
116
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
178
struct target_aarch64_ctx *ctx, *extra = NULL;
117
sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, RTC_ADDR);
179
struct target_fpsimd_context *fpsimd = NULL;
118
sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, gic_spi[RTC_IRQ]);
180
struct target_sve_context *sve = NULL;
119
181
+ struct target_za_context *za = NULL;
120
+ xlnx_zynqmp_create_bbram(s, gic_spi);
182
uint64_t extra_datap = 0;
121
xlnx_zynqmp_create_unimp_mmio(s);
183
bool used_extra = false;
122
184
int sve_size = 0;
123
for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) {
185
+ int za_size = 0;
124
diff --git a/hw/Kconfig b/hw/Kconfig
186
+ int svcr = 0;
125
index XXXXXXX..XXXXXXX 100644
187
126
--- a/hw/Kconfig
188
target_restore_general_frame(env, sf);
127
+++ b/hw/Kconfig
189
128
@@ -XXX,XX +XXX,XX @@ config XLNX_ZYNQMP
190
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
129
select REGISTER
191
sve_size = size;
130
select CAN_BUS
192
break;
131
select PTIMER
193
132
+ select XLNX_BBRAM
194
+ case TARGET_ZA_MAGIC:
195
+ if (za || size < sizeof(struct target_za_context)) {
196
+ goto err;
197
+ }
198
+ za = (struct target_za_context *)ctx;
199
+ za_size = size;
200
+ break;
201
+
202
case TARGET_EXTRA_MAGIC:
203
if (extra || size != sizeof(struct target_extra_context)) {
204
goto err;
205
@@ -XXX,XX +XXX,XX @@ static int target_restore_sigframe(CPUARMState *env,
206
}
207
208
/* SVE data, if present, overwrites FPSIMD data. */
209
- if (sve && !target_restore_sve_record(env, sve, sve_size)) {
210
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
211
goto err;
212
}
213
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
214
+ goto err;
215
+ }
216
+ if (env->svcr != svcr) {
217
+ env->svcr = svcr;
218
+ arm_rebuild_hflags(env);
219
+ }
220
unlock_user(extra, extra_datap, 0);
221
return 0;
222
223
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
224
.total_size = offsetof(struct target_rt_sigframe,
225
uc.tuc_mcontext.__reserved),
226
};
227
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
228
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
229
+ int sve_size = 0, za_size = 0;
230
struct target_rt_sigframe *frame;
231
struct target_rt_frame_record *fr;
232
abi_ulong frame_addr, return_addr;
233
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
234
&layout);
235
236
/* SVE state needs saving only if it exists. */
237
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
238
- vq = sve_vq(env);
239
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
240
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
241
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
242
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
243
sve_ofs = alloc_sigframe_space(sve_size, &layout);
244
}
245
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
246
+ /* ZA state needs saving only if it is enabled. */
247
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
248
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
249
+ } else {
250
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
251
+ }
252
+ za_ofs = alloc_sigframe_space(za_size, &layout);
253
+ }
254
255
if (layout.extra_ofs) {
256
/* Reserve space for the extra end marker. The standard end marker
257
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
258
target_setup_end_record((void *)frame + layout.extra_end_ofs);
259
}
260
if (sve_ofs) {
261
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
262
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
263
+ }
264
+ if (za_ofs) {
265
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
266
}
267
268
/* Set up the stack frame for unwinding. */
269
@@ -XXX,XX +XXX,XX @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
270
env->btype = 2;
271
}
272
273
+ /*
274
+ * Invoke the signal handler with both SM and ZA disabled.
275
+ * When clearing SM, ResetSVEState, per SMSTOP.
276
+ */
277
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
278
+ arm_reset_sve_state(env);
279
+ }
280
+ if (env->svcr) {
281
+ env->svcr = 0;
282
+ arm_rebuild_hflags(env);
283
+ }
284
+
285
if (info) {
286
tswap_siginfo(&frame->info, info);
287
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
133
--
288
--
134
2.20.1
289
2.25.1
135
136
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Add "sve" to the sve prctl functions, to distinguish
4
them from the coming "sme" prctls with similar names.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-42-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 8 ++++----
12
linux-user/syscall.c | 12 ++++++------
13
2 files changed, 10 insertions(+), 10 deletions(-)
14
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/target_prctl.h
18
+++ b/linux-user/aarch64/target_prctl.h
19
@@ -XXX,XX +XXX,XX @@
20
#ifndef AARCH64_TARGET_PRCTL_H
21
#define AARCH64_TARGET_PRCTL_H
22
23
-static abi_long do_prctl_get_vl(CPUArchState *env)
24
+static abi_long do_prctl_sve_get_vl(CPUArchState *env)
25
{
26
ARMCPU *cpu = env_archcpu(env);
27
if (cpu_isar_feature(aa64_sve, cpu)) {
28
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_get_vl(CPUArchState *env)
29
}
30
return -TARGET_EINVAL;
31
}
32
-#define do_prctl_get_vl do_prctl_get_vl
33
+#define do_prctl_sve_get_vl do_prctl_sve_get_vl
34
35
-static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
36
+static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
37
{
38
/*
39
* We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
40
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
41
}
42
return -TARGET_EINVAL;
43
}
44
-#define do_prctl_set_vl do_prctl_set_vl
45
+#define do_prctl_sve_set_vl do_prctl_sve_set_vl
46
47
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
48
{
49
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/linux-user/syscall.c
52
+++ b/linux-user/syscall.c
53
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
54
#ifndef do_prctl_set_fp_mode
55
#define do_prctl_set_fp_mode do_prctl_inval1
56
#endif
57
-#ifndef do_prctl_get_vl
58
-#define do_prctl_get_vl do_prctl_inval0
59
+#ifndef do_prctl_sve_get_vl
60
+#define do_prctl_sve_get_vl do_prctl_inval0
61
#endif
62
-#ifndef do_prctl_set_vl
63
-#define do_prctl_set_vl do_prctl_inval1
64
+#ifndef do_prctl_sve_set_vl
65
+#define do_prctl_sve_set_vl do_prctl_inval1
66
#endif
67
#ifndef do_prctl_reset_keys
68
#define do_prctl_reset_keys do_prctl_inval1
69
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
70
case PR_SET_FP_MODE:
71
return do_prctl_set_fp_mode(env, arg2);
72
case PR_SVE_GET_VL:
73
- return do_prctl_get_vl(env);
74
+ return do_prctl_sve_get_vl(env);
75
case PR_SVE_SET_VL:
76
- return do_prctl_set_vl(env, arg2);
77
+ return do_prctl_sve_set_vl(env, arg2);
78
case PR_PAC_RESET_KEYS:
79
if (arg3 || arg4 || arg5) {
80
return -TARGET_EINVAL;
81
--
82
2.25.1
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
These prctl set the Streaming SVE vector length, which may
4
be completely different from the Normal SVE vector length.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220708151540.18136-43-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 54 +++++++++++++++++++++++++++++++
12
linux-user/syscall.c | 16 +++++++++
13
2 files changed, 70 insertions(+)
14
15
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/aarch64/target_prctl.h
18
+++ b/linux-user/aarch64/target_prctl.h
19
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_get_vl(CPUArchState *env)
20
{
21
ARMCPU *cpu = env_archcpu(env);
22
if (cpu_isar_feature(aa64_sve, cpu)) {
23
+ /* PSTATE.SM is always unset on syscall entry. */
24
return sve_vq(env) * 16;
25
}
26
return -TARGET_EINVAL;
27
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
28
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
29
uint32_t vq, old_vq;
30
31
+ /* PSTATE.SM is always unset on syscall entry. */
32
old_vq = sve_vq(env);
33
34
/*
35
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
36
}
37
#define do_prctl_sve_set_vl do_prctl_sve_set_vl
38
39
+static abi_long do_prctl_sme_get_vl(CPUArchState *env)
40
+{
41
+ ARMCPU *cpu = env_archcpu(env);
42
+ if (cpu_isar_feature(aa64_sme, cpu)) {
43
+ return sme_vq(env) * 16;
44
+ }
45
+ return -TARGET_EINVAL;
46
+}
47
+#define do_prctl_sme_get_vl do_prctl_sme_get_vl
48
+
49
+static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
50
+{
51
+ /*
52
+ * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
53
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
54
+ * i.e. VL=8192, even though the architectural maximum is VQ=16.
55
+ */
56
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))
57
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
58
+ int vq, old_vq;
59
+
60
+ old_vq = sme_vq(env);
61
+
62
+ /*
63
+ * Bound the value of vq, so that we know that it fits into
64
+ * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
65
+ * on syscall entry, we are not modifying the current SVE
66
+ * vector length.
67
+ */
68
+ vq = MAX(arg2 / 16, 1);
69
+ vq = MIN(vq, 16);
70
+ env->vfp.smcr_el[1] =
71
+ FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
72
+
73
+ /* Delay rebuilding hflags until we know if ZA must change. */
74
+ vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
75
+
76
+ if (vq != old_vq) {
77
+ /*
78
+ * PSTATE.ZA state is cleared on any change to SVL.
79
+ * We need not call arm_rebuild_hflags because PSTATE.SM was
80
+ * cleared on syscall entry, so this hasn't changed VL.
81
+ */
82
+ env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
83
+ arm_rebuild_hflags(env);
84
+ }
85
+ return vq * 16;
86
+ }
87
+ return -TARGET_EINVAL;
88
+}
89
+#define do_prctl_sme_set_vl do_prctl_sme_set_vl
90
+
91
static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
92
{
93
ARMCPU *cpu = env_archcpu(env);
94
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/linux-user/syscall.c
97
+++ b/linux-user/syscall.c
98
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
99
#ifndef PR_SET_SYSCALL_USER_DISPATCH
100
# define PR_SET_SYSCALL_USER_DISPATCH 59
101
#endif
102
+#ifndef PR_SME_SET_VL
103
+# define PR_SME_SET_VL 63
104
+# define PR_SME_GET_VL 64
105
+# define PR_SME_VL_LEN_MASK 0xffff
106
+# define PR_SME_VL_INHERIT (1 << 17)
107
+#endif
108
109
#include "target_prctl.h"
110
111
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
112
#ifndef do_prctl_set_unalign
113
#define do_prctl_set_unalign do_prctl_inval1
114
#endif
115
+#ifndef do_prctl_sme_get_vl
116
+#define do_prctl_sme_get_vl do_prctl_inval0
117
+#endif
118
+#ifndef do_prctl_sme_set_vl
119
+#define do_prctl_sme_set_vl do_prctl_inval1
120
+#endif
121
122
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
123
abi_long arg3, abi_long arg4, abi_long arg5)
124
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
125
return do_prctl_sve_get_vl(env);
126
case PR_SVE_SET_VL:
127
return do_prctl_sve_set_vl(env, arg2);
128
+ case PR_SME_GET_VL:
129
+ return do_prctl_sme_get_vl(env);
130
+ case PR_SME_SET_VL:
131
+ return do_prctl_sme_set_vl(env, arg2);
132
case PR_PAC_RESET_KEYS:
133
if (arg3 || arg4 || arg5) {
134
return -TARGET_EINVAL;
135
--
136
2.25.1
diff view generated by jsdifflib
1
From: Tong Ho <tong.ho@xilinx.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Add BBRAM and eFUSE usage to the Xilinx Versal Virt board
3
There's no reason to set CPACR_EL1.ZEN if SVE disabled.
4
document.
5
4
6
Signed-off-by: Tong Ho <tong.ho@xilinx.com>
7
Message-id: 20210917052400.1249094-10-tong.ho@xilinx.com
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-44-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
docs/system/arm/xlnx-versal-virt.rst | 49 ++++++++++++++++++++++++++++
10
target/arm/cpu.c | 7 +++----
12
1 file changed, 49 insertions(+)
11
1 file changed, 3 insertions(+), 4 deletions(-)
13
12
14
diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/system/arm/xlnx-versal-virt.rst
15
--- a/target/arm/cpu.c
17
+++ b/docs/system/arm/xlnx-versal-virt.rst
16
+++ b/target/arm/cpu.c
18
@@ -XXX,XX +XXX,XX @@ Implemented devices:
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
19
- OCM (256KB of On Chip Memory)
18
/* and to the FP/Neon instructions */
20
- XRAM (4MB of on chip Accelerator RAM)
19
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
21
- DDR memory
20
CPACR_EL1, FPEN, 3);
22
+- BBRAM (36 bytes of Battery-backed RAM)
21
- /* and to the SVE instructions */
23
+- eFUSE (3072 bytes of one-time field-programmable bit array)
22
- env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
24
23
- CPACR_EL1, ZEN, 3);
25
QEMU does not yet model any other devices, including the PL and the AI Engine.
24
- /* with reasonable vector length */
26
25
+ /* and to the SVE instructions, with default vector length */
27
@@ -XXX,XX +XXX,XX @@ Run the following at the U-Boot prompt:
26
if (cpu_isar_feature(aa64_sve, cpu)) {
28
fdt set /chosen/dom0 reg <0x00000000 0x40000000 0x0 0x03100000>
27
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
29
booti 30000000 - 20000000
28
+ CPACR_EL1, ZEN, 3);
30
29
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
31
+BBRAM File Backend
30
}
32
+""""""""""""""""""
31
/*
33
+BBRAM can have an optional file backend, which must be a seekable
34
+binary file with a size of 36 bytes or larger. A file with all
35
+binary 0s is a 'blank'.
36
+
37
+To add a file-backend for the BBRAM:
38
+
39
+.. code-block:: bash
40
+
41
+ -drive if=pflash,index=0,file=versal-bbram.bin,format=raw
42
+
43
+To use a different index value, N, from default of 0, add:
44
+
45
+.. code-block:: bash
46
+
47
+ -global xlnx,bbram-ctrl.drive-index=N
48
+
49
+eFUSE File Backend
50
+""""""""""""""""""
51
+eFUSE can have an optional file backend, which must be a seekable
52
+binary file with a size of 3072 bytes or larger. A file with all
53
+binary 0s is a 'blank'.
54
+
55
+To add a file-backend for the eFUSE:
56
+
57
+.. code-block:: bash
58
+
59
+ -drive if=pflash,index=1,file=versal-efuse.bin,format=raw
60
+
61
+To use a different index value, N, from default of 1, add:
62
+
63
+.. code-block:: bash
64
+
65
+ -global xlnx,efuse.drive-index=N
66
+
67
+.. warning::
68
+ In actual physical Versal, BBRAM and eFUSE contain sensitive data.
69
+ The QEMU device models do **not** encrypt nor obfuscate any data
70
+ when holding them in models' memory or when writing them to their
71
+ file backends.
72
+
73
+ Thus, a file backend should be used with caution, and 'format=luks'
74
+ is highly recommended (albeit with usage complexity).
75
+
76
+ Better yet, do not use actual product data when running guest image
77
+ on this Xilinx Versal Virt board.
78
--
32
--
79
2.20.1
33
2.25.1
80
81
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The Allwinner H3 SoC uses Cortex-A7 cores which support virtualization.
3
Enable SME, TPIDR2_EL0, and FA64 if supported by the cpu.
4
However, today we are configuring QEMU to use HVC as PSCI conduit.
5
4
6
That means HVC calls get trapped into QEMU instead of the guest's own
7
emulated CPU and thus break the guest's ability to execute virtualization.
8
9
Fix this by moving to SMC as conduit, freeing up HYP completely to the VM.
10
11
Signed-off-by: Alexander Graf <agraf@csgraf.de>
12
Message-id: 20210920203931.66527-1-agraf@csgraf.de
13
Fixes: 740dafc0ba0 ("hw/arm: add Allwinner H3 System-on-Chip")
14
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
15
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
16
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220708151540.18136-45-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
9
---
20
hw/arm/allwinner-h3.c | 2 +-
10
target/arm/cpu.c | 11 +++++++++++
21
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 11 insertions(+)
22
12
23
diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
24
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
25
--- a/hw/arm/allwinner-h3.c
15
--- a/target/arm/cpu.c
26
+++ b/hw/arm/allwinner-h3.c
16
+++ b/target/arm/cpu.c
27
@@ -XXX,XX +XXX,XX @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
28
18
CPACR_EL1, ZEN, 3);
29
/* Provide Power State Coordination Interface */
19
env->vfp.zcr_el[1] = cpu->sve_default_vq - 1;
30
qdev_prop_set_int32(DEVICE(&s->cpus[i]), "psci-conduit",
20
}
31
- QEMU_PSCI_CONDUIT_HVC);
21
+ /* and for SME instructions, with default vector length, and TPIDR2 */
32
+ QEMU_PSCI_CONDUIT_SMC);
22
+ if (cpu_isar_feature(aa64_sme, cpu)) {
33
23
+ env->cp15.sctlr_el[1] |= SCTLR_EnTP2;
34
/* Disable secondary CPUs */
24
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
35
qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off",
25
+ CPACR_EL1, SMEN, 3);
26
+ env->vfp.smcr_el[1] = cpu->sme_default_vq - 1;
27
+ if (cpu_isar_feature(aa64_sme_fa64, cpu)) {
28
+ env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1],
29
+ SMCR, FA64, 1);
30
+ }
31
+ }
32
/*
33
* Enable 48-bit address space (TODO: take reserved_va into account).
34
* Enable TBI0 but not TBI1.
36
--
35
--
37
2.20.1
36
2.25.1
38
39
diff view generated by jsdifflib
1
The function scsi_bus_new() creates a new SCSI bus; callers can
1
From: Richard Henderson <richard.henderson@linaro.org>
2
either pass in a name argument to specify the name of the new bus, or
3
they can pass in NULL to allow the bus to be given an automatically
4
generated unique name. Almost all callers want to use the
5
autogenerated name; the only exception is the virtio-scsi device.
6
2
7
Taking a name argument that should almost always be NULL is an
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
easy-to-misuse API design -- it encourages callers to think perhaps
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
they should pass in some standard name like "scsi" or "scsi-bus". We
5
Message-id: 20220708151540.18136-46-richard.henderson@linaro.org
10
don't do this anywhere for SCSI, but we do (incorrectly) do it for
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
other bus types such as i2c.
7
---
8
linux-user/elfload.c | 20 ++++++++++++++++++++
9
1 file changed, 20 insertions(+)
12
10
13
The function name also implies that it will return a newly allocated
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
14
object, when it in fact does in-place allocation. We more commonly
15
name such functions foo_init(), with foo_new() being the
16
allocate-and-return variant.
17
18
Replace all the scsi_bus_new() callsites with either:
19
* scsi_bus_init() for the usual case where the caller wants
20
an autogenerated bus name
21
* scsi_bus_init_named() for the rare case where the caller
22
needs to specify the bus name
23
24
and document that for the _named() version it's then the caller's
25
responsibility to think about uniqueness of bus names.
26
27
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
29
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
30
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
31
Message-id: 20210923121153.23754-2-peter.maydell@linaro.org
32
---
33
include/hw/scsi/scsi.h | 30 ++++++++++++++++++++++++++++--
34
hw/scsi/esp-pci.c | 2 +-
35
hw/scsi/esp.c | 2 +-
36
hw/scsi/lsi53c895a.c | 2 +-
37
hw/scsi/megasas.c | 3 +--
38
hw/scsi/mptsas.c | 2 +-
39
hw/scsi/scsi-bus.c | 4 ++--
40
hw/scsi/spapr_vscsi.c | 3 +--
41
hw/scsi/virtio-scsi.c | 4 ++--
42
hw/scsi/vmw_pvscsi.c | 3 +--
43
hw/usb/dev-storage-bot.c | 3 +--
44
hw/usb/dev-storage-classic.c | 4 ++--
45
hw/usb/dev-uas.c | 3 +--
46
13 files changed, 43 insertions(+), 22 deletions(-)
47
48
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
49
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
50
--- a/include/hw/scsi/scsi.h
13
--- a/linux-user/elfload.c
51
+++ b/include/hw/scsi/scsi.h
14
+++ b/linux-user/elfload.c
52
@@ -XXX,XX +XXX,XX @@ struct SCSIBus {
15
@@ -XXX,XX +XXX,XX @@ enum {
53
const SCSIBusInfo *info;
16
ARM_HWCAP2_A64_RNG = 1 << 16,
17
ARM_HWCAP2_A64_BTI = 1 << 17,
18
ARM_HWCAP2_A64_MTE = 1 << 18,
19
+ ARM_HWCAP2_A64_ECV = 1 << 19,
20
+ ARM_HWCAP2_A64_AFP = 1 << 20,
21
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
22
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
23
+ ARM_HWCAP2_A64_SME = 1 << 23,
24
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
25
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
26
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
27
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
28
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
29
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
30
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
54
};
31
};
55
32
56
-void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
33
#define ELF_HWCAP get_elf_hwcap()
57
- const SCSIBusInfo *info, const char *bus_name);
34
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
58
+/**
35
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
59
+ * scsi_bus_init_named: Initialize a SCSI bus with the specified name
36
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
60
+ * @bus: SCSIBus object to initialize
37
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
61
+ * @bus_size: size of @bus object
38
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
62
+ * @host: Device which owns the bus (generally the SCSI controller)
39
+ ARM_HWCAP2_A64_SME_F32F32 |
63
+ * @info: structure defining callbacks etc for the controller
40
+ ARM_HWCAP2_A64_SME_B16F32 |
64
+ * @bus_name: Name to use for this bus
41
+ ARM_HWCAP2_A64_SME_F16F32 |
65
+ *
42
+ ARM_HWCAP2_A64_SME_I8I32));
66
+ * This in-place initializes @bus as a new SCSI bus with a name
43
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
67
+ * provided by the caller. It is the caller's responsibility to make
44
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
68
+ * sure that name does not clash with the name of any other bus in the
45
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
69
+ * system. Unless you need the new bus to have a specific name, you
46
70
+ * should use scsi_bus_new() instead.
47
return hwcaps;
71
+ */
72
+void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
73
+ const SCSIBusInfo *info, const char *bus_name);
74
+
75
+/**
76
+ * scsi_bus_init: Initialize a SCSI bus
77
+ *
78
+ * This in-place-initializes @bus as a new SCSI bus and gives it
79
+ * an automatically generated unique name.
80
+ */
81
+static inline void scsi_bus_init(SCSIBus *bus, size_t bus_size,
82
+ DeviceState *host, const SCSIBusInfo *info)
83
+{
84
+ scsi_bus_init_named(bus, bus_size, host, info, NULL);
85
+}
86
87
static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d)
88
{
89
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/hw/scsi/esp-pci.c
92
+++ b/hw/scsi/esp-pci.c
93
@@ -XXX,XX +XXX,XX @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
94
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
95
s->irq = pci_allocate_irq(dev);
96
97
- scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL);
98
+ scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info);
99
}
48
}
100
101
static void esp_pci_scsi_exit(PCIDevice *d)
102
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/hw/scsi/esp.c
105
+++ b/hw/scsi/esp.c
106
@@ -XXX,XX +XXX,XX @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
107
108
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
109
110
- scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
111
+ scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
112
}
113
114
static void sysbus_esp_hard_reset(DeviceState *dev)
115
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/hw/scsi/lsi53c895a.c
118
+++ b/hw/scsi/lsi53c895a.c
119
@@ -XXX,XX +XXX,XX @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
120
pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io);
121
QTAILQ_INIT(&s->queue);
122
123
- scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL);
124
+ scsi_bus_init(&s->bus, sizeof(s->bus), d, &lsi_scsi_info);
125
}
126
127
static void lsi_scsi_exit(PCIDevice *dev)
128
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/hw/scsi/megasas.c
131
+++ b/hw/scsi/megasas.c
132
@@ -XXX,XX +XXX,XX @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
133
s->frames[i].state = s;
134
}
135
136
- scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
137
- &megasas_scsi_info, NULL);
138
+ scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info);
139
}
140
141
static Property megasas_properties_gen1[] = {
142
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/hw/scsi/mptsas.c
145
+++ b/hw/scsi/mptsas.c
146
@@ -XXX,XX +XXX,XX @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
147
148
s->request_bh = qemu_bh_new(mptsas_fetch_requests, s);
149
150
- scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL);
151
+ scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info);
152
}
153
154
static void mptsas_scsi_uninit(PCIDevice *dev)
155
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/hw/scsi/scsi-bus.c
158
+++ b/hw/scsi/scsi-bus.c
159
@@ -XXX,XX +XXX,XX @@ void scsi_device_unit_attention_reported(SCSIDevice *s)
160
}
161
162
/* Create a scsi bus, and attach devices to it. */
163
-void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
164
- const SCSIBusInfo *info, const char *bus_name)
165
+void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
166
+ const SCSIBusInfo *info, const char *bus_name)
167
{
168
qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
169
bus->busnr = next_scsi_bus++;
170
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/hw/scsi/spapr_vscsi.c
173
+++ b/hw/scsi/spapr_vscsi.c
174
@@ -XXX,XX +XXX,XX @@ static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp)
175
176
dev->crq.SendFunc = vscsi_do_crq;
177
178
- scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
179
- &vscsi_scsi_info, NULL);
180
+ scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info);
181
182
/* ibmvscsi SCSI bus does not allow hotplug. */
183
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
184
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
185
index XXXXXXX..XXXXXXX 100644
186
--- a/hw/scsi/virtio-scsi.c
187
+++ b/hw/scsi/virtio-scsi.c
188
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
189
return;
190
}
191
192
- scsi_bus_new(&s->bus, sizeof(s->bus), dev,
193
- &virtio_scsi_scsi_info, vdev->bus_name);
194
+ scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
195
+ &virtio_scsi_scsi_info, vdev->bus_name);
196
/* override default SCSI bus hotplug-handler, with virtio-scsi's one */
197
qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
198
199
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/hw/scsi/vmw_pvscsi.c
202
+++ b/hw/scsi/vmw_pvscsi.c
203
@@ -XXX,XX +XXX,XX @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
204
205
s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
206
207
- scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev),
208
- &pvscsi_scsi_info, NULL);
209
+ scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info);
210
/* override default SCSI bus hotplug-handler, with pvscsi's one */
211
qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s));
212
pvscsi_reset_state(s);
213
diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/hw/usb/dev-storage-bot.c
216
+++ b/hw/usb/dev-storage-bot.c
217
@@ -XXX,XX +XXX,XX @@ static void usb_msd_bot_realize(USBDevice *dev, Error **errp)
218
s->dev.auto_attach = 0;
219
}
220
221
- scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
222
- &usb_msd_scsi_info_bot, NULL);
223
+ scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_bot);
224
usb_msd_handle_reset(dev);
225
}
226
227
diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c
228
index XXXXXXX..XXXXXXX 100644
229
--- a/hw/usb/dev-storage-classic.c
230
+++ b/hw/usb/dev-storage-classic.c
231
@@ -XXX,XX +XXX,XX @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp)
232
usb_desc_create_serial(dev);
233
usb_desc_init(dev);
234
dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
235
- scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev),
236
- &usb_msd_scsi_info_storage, NULL);
237
+ scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev),
238
+ &usb_msd_scsi_info_storage);
239
scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable,
240
s->conf.bootindex, s->conf.share_rw,
241
s->conf.rerror, s->conf.werror,
242
diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/hw/usb/dev-uas.c
245
+++ b/hw/usb/dev-uas.c
246
@@ -XXX,XX +XXX,XX @@ static void usb_uas_realize(USBDevice *dev, Error **errp)
247
uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas);
248
249
dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE);
250
- scsi_bus_new(&uas->bus, sizeof(uas->bus), DEVICE(dev),
251
- &usb_uas_scsi_info, NULL);
252
+ scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info);
253
}
254
255
static const VMStateDescription vmstate_usb_uas = {
256
--
49
--
257
2.20.1
50
2.25.1
258
259
diff view generated by jsdifflib