1
ARM queue: mostly patches from me, but also the Smartfusion2 board.
1
Hi; here's the latest arm pullreq...
2
2
3
thanks
4
-- PMM
3
-- PMM
5
4
6
The following changes since commit 9ee660e7c138595224b65ddc1c5712549f0a278c:
5
The following changes since commit 03a3a62fbd0aa5227e978eef3c67d3978aec9e5f:
7
6
8
Merge remote-tracking branch 'remotes/yongbok/tags/mips-20170921' into staging (2017-09-21 14:40:32 +0100)
7
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging (2023-09-07 10:29:06 -0400)
9
8
10
are available in the git repository at:
9
are available in the Git repository at:
11
10
12
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20170921
11
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230908
13
12
14
for you to fetch changes up to 6d262dcb7d108eda93813574c2061398084dc795:
13
for you to fetch changes up to c8f2eb5d414b788420b938f2ffdde891aa6c3ae8:
15
14
16
msf2: Add Emcraft's Smartfusion2 SOM kit (2017-09-21 16:36:56 +0100)
15
arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE (2023-09-08 16:41:36 +0100)
17
16
18
----------------------------------------------------------------
17
----------------------------------------------------------------
19
target-arm queue:
18
target-arm queue:
20
* more preparatory work for v8M support
19
* New CPU type: cortex-a710
21
* convert some omap devices away from old_mmio
20
* Implement new architectural features:
22
* remove out of date ARM ARM section references in comments
21
- FEAT_PACQARMA3
23
* add the Smartfusion2 board
22
- FEAT_EPAC
23
- FEAT_Pauth2
24
- FEAT_FPAC
25
- FEAT_FPACCOMBINE
26
- FEAT_TIDCP1
27
* Xilinx Versal: Model the CFU/CFI
28
* Implement RMR_ELx registers
29
* Implement handling of HCR_EL2.TIDCP trap bit
30
* arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
31
* hw/intc/arm_gicv3_its: Avoid maybe-uninitialized error in get_vte()
32
* target/arm: Do not use gen_mte_checkN in trans_STGP
33
* arm64: Restore trapless ptimer access
24
34
25
----------------------------------------------------------------
35
----------------------------------------------------------------
26
Peter Maydell (26):
36
Aaron Lindsay (6):
27
target/arm: Implement MSR/MRS access to NS banked registers
37
target/arm: Add ID_AA64ISAR2_EL1
28
nvic: Add banked exception states
38
target/arm: Add feature detection for FEAT_Pauth2 and extensions
29
nvic: Add cached vectpending_is_s_banked state
39
target/arm: Implement FEAT_EPAC
30
nvic: Add cached vectpending_prio state
40
target/arm: Implement FEAT_Pauth2
31
nvic: Implement AIRCR changes for v8M
41
target/arm: Inform helpers whether a PAC instruction is 'combined'
32
nvic: Make ICSR.RETTOBASE handle banked exceptions
42
target/arm: Implement FEAT_FPAC and FEAT_FPACCOMBINE
33
nvic: Implement NVIC_ITNS<n> registers
34
nvic: Handle banked exceptions in nvic_recompute_state()
35
nvic: Make set_pending and clear_pending take a secure parameter
36
nvic: Make SHPR registers banked
37
nvic: Compare group priority for escalation to HF
38
nvic: In escalation to HardFault, support HF not being priority -1
39
nvic: Implement v8M changes to fixed priority exceptions
40
nvic: Disable the non-secure HardFault if AIRCR.BFHFNMINS is clear
41
nvic: Handle v8M changes in nvic_exec_prio()
42
target/arm: Handle banking in negative-execution-priority check in cpu_mmu_index()
43
nvic: Make ICSR banked for v8M
44
nvic: Make SHCSR banked for v8M
45
nvic: Support banked exceptions in acknowledge and complete
46
target/arm: Remove out of date ARM ARM section references in A64 decoder
47
hw/arm/palm.c: Don't use old_mmio for static_ops
48
hw/gpio/omap_gpio.c: Don't use old_mmio
49
hw/timer/omap_synctimer.c: Don't use old_mmio
50
hw/timer/omap_gptimer: Don't use old_mmio
51
hw/i2c/omap_i2c.c: Don't use old_mmio
52
hw/arm/omap2.c: Don't use old_mmio
53
43
54
Subbaraya Sundeep (5):
44
Colton Lewis (1):
55
msf2: Add Smartfusion2 System timer
45
arm64: Restore trapless ptimer access
56
msf2: Microsemi Smartfusion2 System Register block
57
msf2: Add Smartfusion2 SPI controller
58
msf2: Add Smartfusion2 SoC
59
msf2: Add Emcraft's Smartfusion2 SOM kit
60
46
61
hw/arm/Makefile.objs | 1 +
47
Francisco Iglesias (8):
62
hw/misc/Makefile.objs | 1 +
48
hw/misc: Introduce the Xilinx CFI interface
63
hw/ssi/Makefile.objs | 1 +
49
hw/misc: Introduce a model of Xilinx Versal's CFU_APB
64
hw/timer/Makefile.objs | 1 +
50
hw/misc/xlnx-versal-cfu: Introduce a model of Xilinx Versal CFU_FDRO
65
include/hw/arm/msf2-soc.h | 67 +++
51
hw/misc/xlnx-versal-cfu: Introduce a model of Xilinx Versal's CFU_SFR
66
include/hw/intc/armv7m_nvic.h | 33 +-
52
hw/misc: Introduce a model of Xilinx Versal's CFRAME_REG
67
include/hw/misc/msf2-sysreg.h | 77 ++++
53
hw/misc: Introduce a model of Xilinx Versal's CFRAME_BCAST_REG
68
include/hw/ssi/mss-spi.h | 58 +++
54
hw/arm/xlnx-versal: Connect the CFU_APB, CFU_FDRO and CFU_SFR
69
include/hw/timer/mss-timer.h | 64 +++
55
hw/arm/versal: Connect the CFRAME_REG and CFRAME_BCAST_REG
70
target/arm/cpu.h | 62 ++-
71
hw/arm/msf2-soc.c | 238 +++++++++++
72
hw/arm/msf2-som.c | 105 +++++
73
hw/arm/omap2.c | 49 ++-
74
hw/arm/palm.c | 30 +-
75
hw/gpio/omap_gpio.c | 26 +-
76
hw/i2c/omap_i2c.c | 44 +-
77
hw/intc/armv7m_nvic.c | 913 ++++++++++++++++++++++++++++++++++------
78
hw/misc/msf2-sysreg.c | 160 +++++++
79
hw/ssi/mss-spi.c | 404 ++++++++++++++++++
80
hw/timer/mss-timer.c | 289 +++++++++++++
81
hw/timer/omap_gptimer.c | 49 ++-
82
hw/timer/omap_synctimer.c | 35 +-
83
target/arm/cpu.c | 7 +
84
target/arm/helper.c | 142 ++++++-
85
target/arm/translate-a64.c | 227 +++++-----
86
default-configs/arm-softmmu.mak | 1 +
87
hw/intc/trace-events | 13 +-
88
hw/misc/trace-events | 5 +
89
28 files changed, 2735 insertions(+), 367 deletions(-)
90
create mode 100644 include/hw/arm/msf2-soc.h
91
create mode 100644 include/hw/misc/msf2-sysreg.h
92
create mode 100644 include/hw/ssi/mss-spi.h
93
create mode 100644 include/hw/timer/mss-timer.h
94
create mode 100644 hw/arm/msf2-soc.c
95
create mode 100644 hw/arm/msf2-som.c
96
create mode 100644 hw/misc/msf2-sysreg.c
97
create mode 100644 hw/ssi/mss-spi.c
98
create mode 100644 hw/timer/mss-timer.c
99
56
57
Philippe Mathieu-Daudé (1):
58
hw/intc/arm_gicv3_its: Avoid maybe-uninitialized error in get_vte()
59
60
Richard Henderson (9):
61
tests/tcg/aarch64: Adjust pauth tests for FEAT_FPAC
62
target/arm: Don't change pauth features when changing algorithm
63
target/arm: Implement FEAT_PACQARMA3
64
target/arm: Do not use gen_mte_checkN in trans_STGP
65
target/arm: Implement RMR_ELx
66
target/arm: Implement cortex-a710
67
target/arm: Implement HCR_EL2.TIDCP
68
target/arm: Implement FEAT_TIDCP1
69
target/arm: Enable SCTLR_EL1.TIDCP for user-only
70
71
Shameer Kolothum (1):
72
arm/kvm: Enable support for KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
73
74
MAINTAINERS | 10 +
75
docs/system/arm/cpu-features.rst | 21 +-
76
docs/system/arm/emulation.rst | 8 +
77
docs/system/arm/virt.rst | 1 +
78
include/hw/arm/xlnx-versal.h | 85 +++
79
include/hw/misc/xlnx-cfi-if.h | 59 +++
80
include/hw/misc/xlnx-versal-cframe-reg.h | 303 +++++++++++
81
include/hw/misc/xlnx-versal-cfu.h | 258 ++++++++++
82
include/sysemu/kvm_int.h | 1 +
83
target/arm/cpu.h | 54 +-
84
target/arm/helper.h | 2 +
85
target/arm/syndrome.h | 7 +
86
target/arm/tcg/helper-a64.h | 4 +
87
tests/tcg/aarch64/pauth.h | 23 +
88
accel/kvm/kvm-all.c | 1 +
89
hw/arm/virt.c | 1 +
90
hw/arm/xlnx-versal.c | 155 +++++-
91
hw/intc/arm_gicv3_its.c | 15 +-
92
hw/misc/xlnx-cfi-if.c | 34 ++
93
hw/misc/xlnx-versal-cframe-reg.c | 858 +++++++++++++++++++++++++++++++
94
hw/misc/xlnx-versal-cfu.c | 563 ++++++++++++++++++++
95
target/arm/arm-qmp-cmds.c | 2 +-
96
target/arm/cpu.c | 4 +
97
target/arm/cpu64.c | 86 +++-
98
target/arm/helper.c | 68 ++-
99
target/arm/hvf/hvf.c | 1 +
100
target/arm/kvm.c | 61 +++
101
target/arm/kvm64.c | 3 +
102
target/arm/tcg/cpu64.c | 215 ++++++++
103
target/arm/tcg/op_helper.c | 33 ++
104
target/arm/tcg/pauth_helper.c | 180 +++++--
105
target/arm/tcg/translate-a64.c | 74 +--
106
target/arm/tcg/translate.c | 33 ++
107
tests/qtest/arm-cpu-features.c | 12 +-
108
tests/tcg/aarch64/pauth-2.c | 54 +-
109
tests/tcg/aarch64/pauth-4.c | 18 +-
110
tests/tcg/aarch64/pauth-5.c | 10 +
111
hw/misc/meson.build | 3 +
112
qemu-options.hx | 15 +
113
tests/tcg/aarch64/Makefile.target | 6 +-
114
40 files changed, 3184 insertions(+), 157 deletions(-)
115
create mode 100644 include/hw/misc/xlnx-cfi-if.h
116
create mode 100644 include/hw/misc/xlnx-versal-cframe-reg.h
117
create mode 100644 include/hw/misc/xlnx-versal-cfu.h
118
create mode 100644 tests/tcg/aarch64/pauth.h
119
create mode 100644 hw/misc/xlnx-cfi-if.c
120
create mode 100644 hw/misc/xlnx-versal-cframe-reg.c
121
create mode 100644 hw/misc/xlnx-versal-cfu.c
122
diff view generated by jsdifflib
1
From: Subbaraya Sundeep <sundeep.lkml@gmail.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Emulated Emcraft's Smartfusion2 System On Module starter
3
With FEAT_FPAC, AUT* instructions that fail authentication
4
kit.
4
do not produce an error value but instead fault.
5
5
6
Signed-off-by: Subbaraya Sundeep <sundeep.lkml@gmail.com>
6
For pauth-2, install a signal handler and verify it gets called.
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
8
Message-id: 20170920201737.25723-6-f4bug@amsat.org
8
For pauth-4 and pauth-5, we are explicitly testing the error value,
9
[PMD: drop cpu_model to directly use cpu type]
9
so there's nothing to test with FEAT_FPAC, so exit early.
10
Adjust the makefile to use -cpu neoverse-v1, which has FEAT_EPAC
11
but not FEAT_FPAC.
12
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230829232335.965414-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
17
---
12
hw/arm/Makefile.objs | 2 +-
18
tests/tcg/aarch64/pauth.h | 23 +++++++++++++
13
hw/arm/msf2-som.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++++
19
tests/tcg/aarch64/pauth-2.c | 54 ++++++++++++++++++++++++++-----
14
2 files changed, 106 insertions(+), 1 deletion(-)
20
tests/tcg/aarch64/pauth-4.c | 18 ++++++++---
15
create mode 100644 hw/arm/msf2-som.c
21
tests/tcg/aarch64/pauth-5.c | 10 ++++++
16
22
tests/tcg/aarch64/Makefile.target | 6 +++-
17
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
23
5 files changed, 98 insertions(+), 13 deletions(-)
18
index XXXXXXX..XXXXXXX 100644
24
create mode 100644 tests/tcg/aarch64/pauth.h
19
--- a/hw/arm/Makefile.objs
25
20
+++ b/hw/arm/Makefile.objs
26
diff --git a/tests/tcg/aarch64/pauth.h b/tests/tcg/aarch64/pauth.h
21
@@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
22
obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
23
obj-$(CONFIG_ASPEED_SOC) += aspeed_soc.o aspeed.o
24
obj-$(CONFIG_MPS2) += mps2.o
25
-obj-$(CONFIG_MSF2) += msf2-soc.o
26
+obj-$(CONFIG_MSF2) += msf2-soc.o msf2-som.o
27
diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c
28
new file mode 100644
27
new file mode 100644
29
index XXXXXXX..XXXXXXX
28
index XXXXXXX..XXXXXXX
30
--- /dev/null
29
--- /dev/null
31
+++ b/hw/arm/msf2-som.c
30
+++ b/tests/tcg/aarch64/pauth.h
32
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@
33
+/*
32
+/*
34
+ * SmartFusion2 SOM starter kit(from Emcraft) emulation.
33
+ * Helper for pauth test case
35
+ *
34
+ *
36
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
35
+ * Copyright (c) 2023 Linaro Ltd
37
+ *
36
+ * SPDX-License-Identifier: GPL-2.0-or-later
38
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
39
+ * of this software and associated documentation files (the "Software"), to deal
40
+ * in the Software without restriction, including without limitation the rights
41
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
42
+ * copies of the Software, and to permit persons to whom the Software is
43
+ * furnished to do so, subject to the following conditions:
44
+ *
45
+ * The above copyright notice and this permission notice shall be included in
46
+ * all copies or substantial portions of the Software.
47
+ *
48
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
49
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
50
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
51
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
52
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
53
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
54
+ * THE SOFTWARE.
55
+ */
37
+ */
56
+
38
+
57
+#include "qemu/osdep.h"
39
+#include <assert.h>
58
+#include "qapi/error.h"
40
+#include <sys/auxv.h>
59
+#include "qemu/error-report.h"
41
+
60
+#include "hw/boards.h"
42
+static int get_pac_feature(void)
61
+#include "hw/arm/arm.h"
62
+#include "exec/address-spaces.h"
63
+#include "qemu/cutils.h"
64
+#include "hw/arm/msf2-soc.h"
65
+#include "cpu.h"
66
+
67
+#define DDR_BASE_ADDRESS 0xA0000000
68
+#define DDR_SIZE (64 * M_BYTE)
69
+
70
+#define M2S010_ENVM_SIZE (256 * K_BYTE)
71
+#define M2S010_ESRAM_SIZE (64 * K_BYTE)
72
+
73
+static void emcraft_sf2_s2s010_init(MachineState *machine)
74
+{
43
+{
75
+ DeviceState *dev;
44
+ unsigned long isar1, isar2;
76
+ DeviceState *spi_flash;
45
+
77
+ MSF2State *soc;
46
+ assert(getauxval(AT_HWCAP) & HWCAP_CPUID);
78
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
47
+
79
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
48
+ asm("mrs %0, id_aa64isar1_el1" : "=r"(isar1));
80
+ qemu_irq cs_line;
49
+ asm("mrs %0, S3_0_C0_C6_2" : "=r"(isar2)); /* id_aa64isar2_el1 */
81
+ SSIBus *spi_bus;
50
+
82
+ MemoryRegion *sysmem = get_system_memory();
51
+ return ((isar1 >> 4) & 0xf) /* APA */
83
+ MemoryRegion *ddr = g_new(MemoryRegion, 1);
52
+ | ((isar1 >> 8) & 0xf) /* API */
84
+
53
+ | ((isar2 >> 12) & 0xf); /* APA3 */
85
+ if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) {
54
+}
86
+ error_report("This board can only be used with CPU %s",
55
diff --git a/tests/tcg/aarch64/pauth-2.c b/tests/tcg/aarch64/pauth-2.c
87
+ mc->default_cpu_type);
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tests/tcg/aarch64/pauth-2.c
58
+++ b/tests/tcg/aarch64/pauth-2.c
59
@@ -XXX,XX +XXX,XX @@
60
#include <stdint.h>
61
+#include <signal.h>
62
+#include <stdlib.h>
63
#include <assert.h>
64
+#include "pauth.h"
65
+
66
+
67
+static void sigill(int sig, siginfo_t *info, void *vuc)
68
+{
69
+ ucontext_t *uc = vuc;
70
+ uint64_t test;
71
+
72
+ /* There is only one insn below that is allowed to fault. */
73
+ asm volatile("adr %0, auth2_insn" : "=r"(test));
74
+ assert(test == uc->uc_mcontext.pc);
75
+ exit(0);
76
+}
77
+
78
+static int pac_feature;
79
80
void do_test(uint64_t value)
81
{
82
@@ -XXX,XX +XXX,XX @@ void do_test(uint64_t value)
83
* An invalid salt usually fails authorization, but again there
84
* is a chance of choosing another salt that works.
85
* Iterate until we find another salt which does fail.
86
+ *
87
+ * With FEAT_FPAC, this will SIGILL instead of producing a result.
88
*/
89
for (salt2 = salt1 + 1; ; salt2++) {
90
- asm volatile("autda %0, %2" : "=r"(decode) : "0"(encode), "r"(salt2));
91
+ asm volatile("auth2_insn: autda %0, %2"
92
+ : "=r"(decode) : "0"(encode), "r"(salt2));
93
if (decode != value) {
94
break;
95
}
96
}
97
98
+ assert(pac_feature < 4); /* No FEAT_FPAC */
99
+
100
/* The VA bits, bit 55, and the TBI bits, should be unchanged. */
101
assert(((decode ^ value) & 0xff80ffffffffffffull) == 0);
102
103
/*
104
- * Bits [54:53] are an error indicator based on the key used;
105
- * the DA key above is keynumber 0, so error == 0b01. Otherwise
106
- * bit 55 of the original is sign-extended into the rest of the auth.
107
+ * Without FEAT_Pauth2, bits [54:53] are an error indicator based on
108
+ * the key used; the DA key above is keynumber 0, so error == 0b01.
109
+ * Otherwise, bit 55 of the original is sign-extended into the rest
110
+ * of the auth.
111
*/
112
- if ((value >> 55) & 1) {
113
- assert(((decode >> 48) & 0xff) == 0b10111111);
114
- } else {
115
- assert(((decode >> 48) & 0xff) == 0b00100000);
116
+ if (pac_feature < 3) {
117
+ if ((value >> 55) & 1) {
118
+ assert(((decode >> 48) & 0xff) == 0b10111111);
119
+ } else {
120
+ assert(((decode >> 48) & 0xff) == 0b00100000);
121
+ }
122
}
123
}
124
125
int main()
126
{
127
+ static const struct sigaction sa = {
128
+ .sa_sigaction = sigill,
129
+ .sa_flags = SA_SIGINFO
130
+ };
131
+
132
+ pac_feature = get_pac_feature();
133
+ assert(pac_feature != 0);
134
+
135
+ if (pac_feature >= 4) {
136
+ /* FEAT_FPAC */
137
+ sigaction(SIGILL, &sa, NULL);
88
+ }
138
+ }
89
+
139
+
90
+ memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE,
140
do_test(0);
91
+ &error_fatal);
141
do_test(0xda004acedeadbeefull);
92
+ memory_region_add_subregion(sysmem, DDR_BASE_ADDRESS, ddr);
142
return 0;
93
+
143
diff --git a/tests/tcg/aarch64/pauth-4.c b/tests/tcg/aarch64/pauth-4.c
94
+ dev = qdev_create(NULL, TYPE_MSF2_SOC);
144
index XXXXXXX..XXXXXXX 100644
95
+ qdev_prop_set_string(dev, "part-name", "M2S010");
145
--- a/tests/tcg/aarch64/pauth-4.c
96
+ qdev_prop_set_string(dev, "cpu-type", mc->default_cpu_type);
146
+++ b/tests/tcg/aarch64/pauth-4.c
97
+
147
@@ -XXX,XX +XXX,XX @@
98
+ qdev_prop_set_uint64(dev, "eNVM-size", M2S010_ENVM_SIZE);
148
#include <assert.h>
99
+ qdev_prop_set_uint64(dev, "eSRAM-size", M2S010_ESRAM_SIZE);
149
#include <stdio.h>
150
#include <stdlib.h>
151
+#include "pauth.h"
152
153
#define TESTS 1000
154
155
int main()
156
{
157
+ char base[TESTS];
158
int i, count = 0;
159
float perc;
160
- void *base = malloc(TESTS);
161
+ int pac_feature = get_pac_feature();
100
+
162
+
101
+ /*
163
+ /*
102
+ * CPU clock and peripheral clocks(APB0, APB1)are configurable
164
+ * Exit if no PAuth or FEAT_FPAC, which will SIGILL on AUTIA failure
103
+ * in Libero. CPU clock is divided by APB0 and APB1 divisors for
165
+ * rather than return an error for us to check below.
104
+ * peripherals. Emcraft's SoM kit comes with these settings by default.
105
+ */
166
+ */
106
+ qdev_prop_set_uint32(dev, "m3clk", 142 * 1000000);
167
+ if (pac_feature == 0 || pac_feature >= 4) {
107
+ qdev_prop_set_uint32(dev, "apb0div", 2);
168
+ return 0;
108
+ qdev_prop_set_uint32(dev, "apb1div", 2);
109
+
110
+ object_property_set_bool(OBJECT(dev), true, "realized", &error_fatal);
111
+
112
+ soc = MSF2_SOC(dev);
113
+
114
+ /* Attach SPI flash to SPI0 controller */
115
+ spi_bus = (SSIBus *)qdev_get_child_bus(dev, "spi0");
116
+ spi_flash = ssi_create_slave_no_init(spi_bus, "s25sl12801");
117
+ qdev_prop_set_uint8(spi_flash, "spansion-cr2nv", 1);
118
+ if (dinfo) {
119
+ qdev_prop_set_drive(spi_flash, "drive", blk_by_legacy_dinfo(dinfo),
120
+ &error_fatal);
121
+ }
169
+ }
122
+ qdev_init_nofail(spi_flash);
170
123
+ cs_line = qdev_get_gpio_in_named(spi_flash, SSI_GPIO_CS, 0);
171
for (i = 0; i < TESTS; i++) {
124
+ sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line);
172
uintptr_t in, x, y;
125
+
173
@@ -XXX,XX +XXX,XX @@ int main()
126
+ armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
174
in = i + (uintptr_t) base;
127
+ soc->envm_size);
175
128
+}
176
asm("mov %0, %[in]\n\t"
129
+
177
- "pacia %0, sp\n\t" /* sigill if pauth not supported */
130
+static void emcraft_sf2_machine_init(MachineClass *mc)
178
+ "pacia %0, sp\n\t"
131
+{
179
"eor %0, %0, #4\n\t" /* corrupt single bit */
132
+ mc->desc = "SmartFusion2 SOM kit from Emcraft (M2S010)";
180
"mov %1, %0\n\t"
133
+ mc->init = emcraft_sf2_s2s010_init;
181
"autia %1, sp\n\t" /* validate corrupted pointer */
134
+ mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3");
182
@@ -XXX,XX +XXX,XX @@ int main()
135
+}
183
if (x != y) {
136
+
184
count++;
137
+DEFINE_MACHINE("emcraft-sf2", emcraft_sf2_machine_init)
185
}
186
-
187
}
188
+
189
perc = (float) count / (float) TESTS;
190
- printf("Checks Passed: %0.2f%%", perc * 100.0);
191
+ printf("Checks Passed: %0.2f%%\n", perc * 100.0);
192
assert(perc > 0.95);
193
return 0;
194
}
195
diff --git a/tests/tcg/aarch64/pauth-5.c b/tests/tcg/aarch64/pauth-5.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/tests/tcg/aarch64/pauth-5.c
198
+++ b/tests/tcg/aarch64/pauth-5.c
199
@@ -XXX,XX +XXX,XX @@
200
#include <assert.h>
201
+#include "pauth.h"
202
203
static int x;
204
205
@@ -XXX,XX +XXX,XX @@ int main()
206
{
207
int *p0 = &x, *p1, *p2, *p3;
208
unsigned long salt = 0;
209
+ int pac_feature = get_pac_feature();
210
+
211
+ /*
212
+ * Exit if no PAuth or FEAT_FPAC, which will SIGILL on AUTDA failure
213
+ * rather than return an error for us to check below.
214
+ */
215
+ if (pac_feature == 0 || pac_feature >= 4) {
216
+ return 0;
217
+ }
218
219
/*
220
* With TBI enabled and a 48-bit VA, there are 7 bits of auth, and so
221
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
222
index XXXXXXX..XXXXXXX 100644
223
--- a/tests/tcg/aarch64/Makefile.target
224
+++ b/tests/tcg/aarch64/Makefile.target
225
@@ -XXX,XX +XXX,XX @@ endif
226
ifneq ($(CROSS_CC_HAS_ARMV8_3),)
227
AARCH64_TESTS += pauth-1 pauth-2 pauth-4 pauth-5
228
pauth-%: CFLAGS += -march=armv8.3-a
229
-run-pauth-%: QEMU_OPTS += -cpu max
230
+run-pauth-1: QEMU_OPTS += -cpu max
231
+run-pauth-2: QEMU_OPTS += -cpu max
232
+# Choose a cpu with FEAT_Pauth but without FEAT_FPAC for pauth-[45].
233
+run-pauth-4: QEMU_OPTS += -cpu neoverse-v1
234
+run-pauth-5: QEMU_OPTS += -cpu neoverse-v1
235
endif
236
237
# BTI Tests
138
--
238
--
139
2.7.4
239
2.34.1
140
141
diff view generated by jsdifflib
1
Update armv7m_nvic_acknowledge_irq() and armv7m_nvic_complete_irq()
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
to handle banked exceptions:
3
* acknowledge needs to use the correct vector, which may be
4
in sec_vectors[]
5
* acknowledge needs to return to its caller whether the
6
exception should be taken to secure or non-secure state
7
* complete needs its caller to tell it whether the exception
8
being completed is a secure one or not
9
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1505240046-11454-20-git-send-email-peter.maydell@linaro.org
8
Message-id: 20230829232335.965414-3-richard.henderson@linaro.org
9
[PMM: drop the HVF part of the patch and just comment that
10
we need to do something when the register appears in that API]
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
---
14
target/arm/cpu.h | 15 +++++++++++++--
15
target/arm/cpu.h | 1 +
15
hw/intc/armv7m_nvic.c | 26 ++++++++++++++++++++------
16
target/arm/helper.c | 4 ++--
16
target/arm/helper.c | 8 +++++---
17
target/arm/hvf/hvf.c | 1 +
17
hw/intc/trace-events | 4 ++--
18
target/arm/kvm64.c | 2 ++
18
4 files changed, 40 insertions(+), 13 deletions(-)
19
4 files changed, 6 insertions(+), 2 deletions(-)
19
20
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
25
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
25
* of architecturally banked exceptions.
26
uint32_t dbgdevid1;
26
*/
27
uint64_t id_aa64isar0;
27
void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
28
uint64_t id_aa64isar1;
28
-void armv7m_nvic_acknowledge_irq(void *opaque);
29
+ uint64_t id_aa64isar2;
29
+/**
30
uint64_t id_aa64pfr0;
30
+ * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
31
uint64_t id_aa64pfr1;
31
+ * @opaque: the NVIC
32
uint64_t id_aa64mmfr0;
32
+ *
33
+ * Move the current highest priority pending exception from the pending
34
+ * state to the active state, and update v7m.exception to indicate that
35
+ * it is the exception currently being handled.
36
+ *
37
+ * Returns: true if exception should be taken to Secure state, false for NS
38
+ */
39
+bool armv7m_nvic_acknowledge_irq(void *opaque);
40
/**
41
* armv7m_nvic_complete_irq: complete specified interrupt or exception
42
* @opaque: the NVIC
43
* @irq: the exception number to complete
44
+ * @secure: true if this exception was secure
45
*
46
* Returns: -1 if the irq was not active
47
* 1 if completing this irq brought us back to base (no active irqs)
48
* 0 if there is still an irq active after this one was completed
49
* (Ignoring -1, this is the same as the RETTOBASE value before completion.)
50
*/
51
-int armv7m_nvic_complete_irq(void *opaque, int irq);
52
+int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure);
53
/**
54
* armv7m_nvic_raw_execution_priority: return the raw execution priority
55
* @opaque: the NVIC
56
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/intc/armv7m_nvic.c
59
+++ b/hw/intc/armv7m_nvic.c
60
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
61
}
62
63
/* Make pending IRQ active. */
64
-void armv7m_nvic_acknowledge_irq(void *opaque)
65
+bool armv7m_nvic_acknowledge_irq(void *opaque)
66
{
67
NVICState *s = (NVICState *)opaque;
68
CPUARMState *env = &s->cpu->env;
69
const int pending = s->vectpending;
70
const int running = nvic_exec_prio(s);
71
VecInfo *vec;
72
+ bool targets_secure;
73
74
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
75
76
- vec = &s->vectors[pending];
77
+ if (s->vectpending_is_s_banked) {
78
+ vec = &s->sec_vectors[pending];
79
+ targets_secure = true;
80
+ } else {
81
+ vec = &s->vectors[pending];
82
+ targets_secure = !exc_is_banked(s->vectpending) &&
83
+ exc_targets_secure(s, s->vectpending);
84
+ }
85
86
assert(vec->enabled);
87
assert(vec->pending);
88
89
assert(s->vectpending_prio < running);
90
91
- trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
92
+ trace_nvic_acknowledge_irq(pending, s->vectpending_prio, targets_secure);
93
94
vec->active = 1;
95
vec->pending = 0;
96
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_acknowledge_irq(void *opaque)
97
env->v7m.exception = s->vectpending;
98
99
nvic_irq_update(s);
100
+
101
+ return targets_secure;
102
}
103
104
-int armv7m_nvic_complete_irq(void *opaque, int irq)
105
+int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
106
{
107
NVICState *s = (NVICState *)opaque;
108
VecInfo *vec;
109
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq)
110
111
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
112
113
- vec = &s->vectors[irq];
114
+ if (secure && exc_is_banked(irq)) {
115
+ vec = &s->sec_vectors[irq];
116
+ } else {
117
+ vec = &s->vectors[irq];
118
+ }
119
120
- trace_nvic_complete_irq(irq);
121
+ trace_nvic_complete_irq(irq, secure);
122
123
if (!vec->active) {
124
/* Tell the caller this was an illegal exception return */
125
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
diff --git a/target/arm/helper.c b/target/arm/helper.c
126
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/helper.c
35
--- a/target/arm/helper.c
128
+++ b/target/arm/helper.c
36
+++ b/target/arm/helper.c
129
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
37
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
130
bool return_to_sp_process = false;
38
.access = PL1_R, .type = ARM_CP_CONST,
131
bool return_to_handler = false;
39
.accessfn = access_aa64_tid3,
132
bool rettobase = false;
40
.resetvalue = cpu->isar.id_aa64isar1 },
133
+ bool exc_secure = false;
41
- { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
134
42
+ { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
135
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
43
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
136
* gen_bx_excret() enforces the architectural rule
44
.access = PL1_R, .type = ARM_CP_CONST,
137
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
45
.accessfn = access_aa64_tid3,
138
* which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
46
- .resetvalue = 0 },
139
*/
47
+ .resetvalue = cpu->isar.id_aa64isar2 },
140
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
48
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
141
- int es = excret & R_V7M_EXCRET_ES_MASK;
49
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
142
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
50
.access = PL1_R, .type = ARM_CP_CONST,
143
if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
51
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
144
- env->v7m.faultmask[es] = 0;
145
+ env->v7m.faultmask[exc_secure] = 0;
146
}
147
} else {
148
env->v7m.faultmask[M_REG_NS] = 0;
149
}
150
}
151
152
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
153
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
154
+ exc_secure)) {
155
case -1:
156
/* attempt to exit an exception that isn't active */
157
ufault = true;
158
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
159
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
160
--- a/hw/intc/trace-events
53
--- a/target/arm/hvf/hvf.c
161
+++ b/hw/intc/trace-events
54
+++ b/target/arm/hvf/hvf.c
162
@@ -XXX,XX +XXX,XX @@ nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled"
55
@@ -XXX,XX +XXX,XX @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
163
nvic_set_pending(int irq, bool secure, int en, int prio) "NVIC set pending irq %d secure-bank %d (enabled: %d priority %d)"
56
{ HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
164
nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)"
57
{ HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
165
nvic_set_pending_level(int irq) "NVIC set pending: irq %d higher prio than vectpending: setting irq line to 1"
58
{ HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
166
-nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)"
59
+ /* Add ID_AA64ISAR2_EL1 here when HVF supports it */
167
-nvic_complete_irq(int irq) "NVIC complete IRQ %d"
60
{ HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
168
+nvic_acknowledge_irq(int irq, int prio, bool targets_secure) "NVIC acknowledge IRQ: %d now active (prio %d targets_secure %d)"
61
{ HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
169
+nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)"
62
{ HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
170
nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d"
63
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
171
nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u"
64
index XXXXXXX..XXXXXXX 100644
172
nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u"
65
--- a/target/arm/kvm64.c
66
+++ b/target/arm/kvm64.c
67
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
68
ARM64_SYS_REG(3, 0, 0, 6, 0));
69
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
70
ARM64_SYS_REG(3, 0, 0, 6, 1));
71
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
72
+ ARM64_SYS_REG(3, 0, 0, 6, 2));
73
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
74
ARM64_SYS_REG(3, 0, 0, 7, 0));
75
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
173
--
76
--
174
2.7.4
77
2.34.1
175
78
176
79
diff view generated by jsdifflib
1
Now that we have a banked FAULTMASK register and banked exceptions,
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
we can implement the correct check in cpu_mmu_index() for whether
3
the MPU_CTRL.HFNMIENA bit's effect should apply. This bit causes
4
handlers which have requested a negative execution priority to run
5
with the MPU disabled. In v8M the test has to check this for the
6
current security state and so takes account of banking.
7
2
3
Rename isar_feature_aa64_pauth_arch to isar_feature_aa64_pauth_qarma5
4
to distinguish the other architectural algorithm qarma3.
5
6
Add ARMPauthFeature and isar_feature_pauth_feature to cover the
7
other pauth conditions.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20230829232335.965414-4-richard.henderson@linaro.org
13
Message-Id: <20230609172324.982888-3-aaron@os.amperecomputing.com>
14
[rth: Add ARMPauthFeature and eliminate most other predicates]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1505240046-11454-17-git-send-email-peter.maydell@linaro.org
11
---
17
---
12
target/arm/cpu.h | 21 ++++++++++++++++-----
18
target/arm/cpu.h | 47 +++++++++++++++++++++++++++++------
13
hw/intc/armv7m_nvic.c | 29 +++++++++++++++++++++++++++++
19
target/arm/tcg/pauth_helper.c | 2 +-
14
2 files changed, 45 insertions(+), 5 deletions(-)
20
2 files changed, 40 insertions(+), 9 deletions(-)
15
21
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
24
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq);
26
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
21
* (v8M ARM ARM I_PKLD.)
27
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
22
*/
28
}
23
int armv7m_nvic_raw_execution_priority(void *opaque);
29
24
+/**
30
+/*
25
+ * armv7m_nvic_neg_prio_requested: return true if the requested execution
31
+ * These are the values from APA/API/APA3.
26
+ * priority is negative for the specified security state.
32
+ * In general these must be compared '>=', per the normal Arm ARM
27
+ * @opaque: the NVIC
33
+ * treatment of fields in ID registers.
28
+ * @secure: the security state to test
29
+ * This corresponds to the pseudocode IsReqExecPriNeg().
30
+ */
34
+ */
31
+#ifndef CONFIG_USER_ONLY
35
+typedef enum {
32
+bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure);
36
+ PauthFeat_None = 0,
33
+#else
37
+ PauthFeat_1 = 1,
34
+static inline bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
38
+ PauthFeat_EPAC = 2,
39
+ PauthFeat_2 = 3,
40
+ PauthFeat_FPAC = 4,
41
+ PauthFeat_FPACCOMBINED = 5,
42
+} ARMPauthFeature;
43
+
44
+static inline ARMPauthFeature
45
+isar_feature_pauth_feature(const ARMISARegisters *id)
35
+{
46
+{
36
+ return false;
47
+ /*
37
+}
48
+ * Architecturally, only one of {APA,API,APA3} may be active (non-zero)
38
+#endif
49
+ * and the other two must be zero. Thus we may avoid conditionals.
39
40
/* Interface for defining coprocessor registers.
41
* Registers are defined in tables of arm_cp_reginfo structs
42
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
43
if (arm_feature(env, ARM_FEATURE_M)) {
44
ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv;
45
46
- /* Execution priority is negative if FAULTMASK is set or
47
- * we're in a HardFault or NMI handler.
48
- */
49
- if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
50
- || env->v7m.faultmask[env->v7m.secure]) {
51
+ if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) {
52
mmu_idx = ARMMMUIdx_MNegPri;
53
}
54
55
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/hw/intc/armv7m_nvic.c
58
+++ b/hw/intc/armv7m_nvic.c
59
@@ -XXX,XX +XXX,XX @@ static inline int nvic_exec_prio(NVICState *s)
60
return MIN(running, s->exception_prio);
61
}
62
63
+bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
64
+{
65
+ /* Return true if the requested execution priority is negative
66
+ * for the specified security state, ie that security state
67
+ * has an active NMI or HardFault or has set its FAULTMASK.
68
+ * Note that this is not the same as whether the execution
69
+ * priority is actually negative (for instance AIRCR.PRIS may
70
+ * mean we don't allow FAULTMASK_NS to actually make the execution
71
+ * priority negative). Compare pseudocode IsReqExcPriNeg().
72
+ */
50
+ */
73
+ NVICState *s = opaque;
51
+ return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) |
74
+
52
+ FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) |
75
+ if (s->cpu->env.v7m.faultmask[secure]) {
53
+ FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3));
76
+ return true;
77
+ }
78
+
79
+ if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
80
+ s->vectors[ARMV7M_EXCP_HARD].active) {
81
+ return true;
82
+ }
83
+
84
+ if (s->vectors[ARMV7M_EXCP_NMI].active &&
85
+ exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
86
+ return true;
87
+ }
88
+
89
+ return false;
90
+}
54
+}
91
+
55
+
92
bool armv7m_nvic_can_take_pending_exception(void *opaque)
56
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
93
{
57
{
94
NVICState *s = opaque;
58
/*
59
* Return true if any form of pauth is enabled, as this
60
* predicate controls migration of the 128-bit keys.
61
*/
62
- return (id->id_aa64isar1 &
63
- (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
64
- FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
65
- FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
66
- FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
67
+ return isar_feature_pauth_feature(id) != PauthFeat_None;
68
}
69
70
-static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
71
+static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id)
72
{
73
/*
74
- * Return true if pauth is enabled with the architected QARMA algorithm.
75
- * QEMU will always set APA+GPA to the same value.
76
+ * Return true if pauth is enabled with the architected QARMA5 algorithm.
77
+ * QEMU will always enable or disable both APA and GPA.
78
*/
79
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
80
}
81
82
+static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
83
+{
84
+ /*
85
+ * Return true if pauth is enabled with the architected QARMA3 algorithm.
86
+ * QEMU will always enable or disable both APA3 and GPA3.
87
+ */
88
+ return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0;
89
+}
90
+
91
static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
92
{
93
return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
94
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/tcg/pauth_helper.c
97
+++ b/target/arm/tcg/pauth_helper.c
98
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac_impdef(uint64_t data, uint64_t modifier,
99
static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
100
uint64_t modifier, ARMPACKey key)
101
{
102
- if (cpu_isar_feature(aa64_pauth_arch, env_archcpu(env))) {
103
+ if (cpu_isar_feature(aa64_pauth_qarma5, env_archcpu(env))) {
104
return pauth_computepac_architected(data, modifier, key);
105
} else {
106
return pauth_computepac_impdef(data, modifier, key);
95
--
107
--
96
2.7.4
108
2.34.1
97
98
diff view generated by jsdifflib
1
When escalating to HardFault, we must go into Lockup if we
1
From: Richard Henderson <richard.henderson@linaro.org>
2
can't take the synchronous HardFault because the current
3
execution priority is already at or below the priority of
4
HardFault. In v7M HF is always priority -1 so a simple < 0
5
comparison sufficed; in v8M the priority of HardFault can
6
vary depending on whether it is a Secure or NonSecure
7
HardFault, so we must check against the priority of the
8
HardFault exception vector we're about to use.
9
2
3
We have cpu properties to adjust the pauth algorithm for the
4
purpose of speed of emulation. Retain the set of pauth features
5
supported by the cpu even as the algorithm changes.
6
7
This already affects the neoverse-v1 cpu, which has FEAT_EPAC.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20230829232335.965414-5-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1505240046-11454-13-git-send-email-peter.maydell@linaro.org
13
---
13
---
14
hw/intc/armv7m_nvic.c | 23 ++++++++++++-----------
14
target/arm/cpu64.c | 70 +++++++++++++++++++++++++++---------------
15
1 file changed, 12 insertions(+), 11 deletions(-)
15
target/arm/tcg/cpu64.c | 2 ++
16
2 files changed, 47 insertions(+), 25 deletions(-)
16
17
17
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
18
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/intc/armv7m_nvic.c
20
--- a/target/arm/cpu64.c
20
+++ b/hw/intc/armv7m_nvic.c
21
+++ b/target/arm/cpu64.c
21
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
22
@@ -XXX,XX +XXX,XX @@ void aarch64_add_sme_properties(Object *obj)
23
24
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
25
{
26
- int arch_val = 0, impdef_val = 0;
27
- uint64_t t;
28
+ ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
29
+ uint64_t isar1;
30
31
- /* Exit early if PAuth is enabled, and fall through to disable it */
32
- if ((kvm_enabled() || hvf_enabled()) && cpu->prop_pauth) {
33
- if (!cpu_isar_feature(aa64_pauth, cpu)) {
34
- error_setg(errp, "'pauth' feature not supported by %s on this host",
35
- kvm_enabled() ? "KVM" : "hvf");
36
+ /*
37
+ * These properties enable or disable Pauth as a whole, or change
38
+ * the pauth algorithm, but do not change the set of features that
39
+ * are present. We have saved a copy of those features above and
40
+ * will now place it into the field that chooses the algorithm.
41
+ *
42
+ * Begin by disabling all fields.
43
+ */
44
+ isar1 = cpu->isar.id_aa64isar1;
45
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0);
46
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0);
47
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
48
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
49
+
50
+ if (kvm_enabled() || hvf_enabled()) {
51
+ /*
52
+ * Exit early if PAuth is enabled and fall through to disable it.
53
+ * The algorithm selection properties are not present.
54
+ */
55
+ if (cpu->prop_pauth) {
56
+ if (features == 0) {
57
+ error_setg(errp, "'pauth' feature not supported by "
58
+ "%s on this host", current_accel_name());
59
+ }
60
+ return;
61
+ }
62
+ } else {
63
+ /* Pauth properties are only present when the model supports it. */
64
+ if (features == 0) {
65
+ assert(!cpu->prop_pauth);
66
+ return;
22
}
67
}
23
68
24
if (escalate) {
69
- return;
25
- if (running < 0) {
70
- }
26
- /* We want to escalate to HardFault but we can't take a
71
-
27
- * synchronous HardFault at this point either. This is a
72
- /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
28
- * Lockup condition due to a guest bug. We don't model
73
- if (cpu->prop_pauth) {
29
- * Lockup, so report via cpu_abort() instead.
74
- if (cpu->prop_pauth_impdef) {
30
- */
75
- impdef_val = 1;
31
- cpu_abort(&s->cpu->parent_obj,
76
- } else {
32
- "Lockup: can't escalate %d to HardFault "
77
- arch_val = 1;
33
- "(current priority %d)\n", irq, running);
78
+ if (cpu->prop_pauth) {
34
- }
79
+ if (cpu->prop_pauth_impdef) {
35
80
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
36
- /* We can do the escalation, so we take HardFault instead.
81
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
37
+ /* We need to escalate this exception to a synchronous HardFault.
82
+ } else {
38
* If BFHFNMINS is set then we escalate to the banked HF for
83
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
39
* the target security state of the original exception; otherwise
84
+ isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
40
* we take a Secure HardFault.
41
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
42
} else {
43
vec = &s->vectors[irq];
44
}
45
+ if (running <= vec->prio) {
46
+ /* We want to escalate to HardFault but we can't take the
47
+ * synchronous HardFault at this point either. This is a
48
+ * Lockup condition due to a guest bug. We don't model
49
+ * Lockup, so report via cpu_abort() instead.
50
+ */
51
+ cpu_abort(&s->cpu->parent_obj,
52
+ "Lockup: can't escalate %d to HardFault "
53
+ "(current priority %d)\n", irq, running);
54
+ }
85
+ }
55
+
86
+ } else if (cpu->prop_pauth_impdef) {
56
/* HF may be banked but there is only one shared HFSR */
87
+ error_setg(errp, "cannot enable pauth-impdef without pauth");
57
s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
88
+ error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
58
}
89
}
90
- } else if (cpu->prop_pauth_impdef) {
91
- error_setg(errp, "cannot enable pauth-impdef without pauth");
92
- error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
93
}
94
95
- t = cpu->isar.id_aa64isar1;
96
- t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
97
- t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
98
- t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
99
- t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
100
- cpu->isar.id_aa64isar1 = t;
101
+ cpu->isar.id_aa64isar1 = isar1;
102
}
103
104
static Property arm_cpu_pauth_property =
105
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/tcg/cpu64.c
108
+++ b/target/arm/tcg/cpu64.c
109
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
110
111
t = cpu->isar.id_aa64isar1;
112
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
113
+ t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_1);
114
+ t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
115
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
116
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
117
t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */
59
--
118
--
60
2.7.4
119
2.34.1
61
62
diff view generated by jsdifflib
1
Make the armv7m_nvic_set_pending() and armv7m_nvic_clear_pending()
1
From: Richard Henderson <richard.henderson@linaro.org>
2
functions take a bool indicating whether to pend the secure
3
or non-secure version of a banked interrupt, and update the
4
callsites accordingly.
5
2
6
In most callsites we can simply pass the correct security
3
Implement the QARMA3 cryptographic algorithm for PAC calculation.
7
state in; in a couple of cases we use TODO comments to indicate
4
Implement a cpu feature to select the algorithm and document it.
8
that we will return the code in a subsequent commit.
9
5
6
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20230829232335.965414-6-richard.henderson@linaro.org
11
Message-Id: <20230609172324.982888-4-aaron@os.amperecomputing.com>
12
[rth: Merge cpu feature addition from another patch.]
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 1505240046-11454-10-git-send-email-peter.maydell@linaro.org
13
---
15
---
14
target/arm/cpu.h | 14 ++++++++++-
16
docs/system/arm/cpu-features.rst | 21 ++++++++-----
15
hw/intc/armv7m_nvic.c | 64 ++++++++++++++++++++++++++++++++++++++-------------
17
docs/system/arm/emulation.rst | 3 ++
16
target/arm/helper.c | 24 +++++++++++--------
18
target/arm/cpu.h | 1 +
17
hw/intc/trace-events | 4 ++--
19
target/arm/arm-qmp-cmds.c | 2 +-
18
4 files changed, 77 insertions(+), 29 deletions(-)
20
target/arm/cpu64.c | 24 ++++++++++++--
21
target/arm/tcg/pauth_helper.c | 54 ++++++++++++++++++++++++++------
22
tests/qtest/arm-cpu-features.c | 12 ++++++-
23
7 files changed, 94 insertions(+), 23 deletions(-)
19
24
25
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
26
index XXXXXXX..XXXXXXX 100644
27
--- a/docs/system/arm/cpu-features.rst
28
+++ b/docs/system/arm/cpu-features.rst
29
@@ -XXX,XX +XXX,XX @@ TCG VCPU Features
30
TCG VCPU features are CPU features that are specific to TCG.
31
Below is the list of TCG VCPU features and their descriptions.
32
33
-``pauth-impdef``
34
- When ``FEAT_Pauth`` is enabled, either the *impdef* (Implementation
35
- Defined) algorithm is enabled or the *architected* QARMA algorithm
36
- is enabled. By default the impdef algorithm is disabled, and QARMA
37
- is enabled.
38
+``pauth``
39
+ Enable or disable ``FEAT_Pauth`` entirely.
40
41
- The architected QARMA algorithm has good cryptographic properties,
42
- but can be quite slow to emulate. The impdef algorithm used by QEMU
43
- is non-cryptographic but significantly faster.
44
+``pauth-impdef``
45
+ When ``pauth`` is enabled, select the QEMU implementation defined algorithm.
46
+
47
+``pauth-qarma3``
48
+ When ``pauth`` is enabled, select the architected QARMA3 algorithm.
49
+
50
+Without either ``pauth-impdef`` or ``pauth-qarma3`` enabled,
51
+the architected QARMA5 algorithm is used. The architected QARMA5
52
+and QARMA3 algorithms have good cryptographic properties, but can
53
+be quite slow to emulate. The impdef algorithm used by QEMU is
54
+non-cryptographic but significantly faster.
55
56
SVE CPU Properties
57
==================
58
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
59
index XXXXXXX..XXXXXXX 100644
60
--- a/docs/system/arm/emulation.rst
61
+++ b/docs/system/arm/emulation.rst
62
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
63
- FEAT_MTE (Memory Tagging Extension)
64
- FEAT_MTE2 (Memory Tagging Extension)
65
- FEAT_MTE3 (MTE Asymmetric Fault Handling)
66
+- FEAT_PACIMP (Pointer authentication - IMPLEMENTATION DEFINED algorithm)
67
+- FEAT_PACQARMA3 (Pointer authentication - QARMA3 algorithm)
68
+- FEAT_PACQARMA5 (Pointer authentication - QARMA5 algorithm)
69
- FEAT_PAN (Privileged access never)
70
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
71
- FEAT_PAN3 (Support for SCTLR_ELx.EPAN)
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
72
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
74
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
75
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
76
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
25
return true;
77
*/
26
}
78
bool prop_pauth;
27
#endif
79
bool prop_pauth_impdef;
28
-void armv7m_nvic_set_pending(void *opaque, int irq);
80
+ bool prop_pauth_qarma3;
29
+/**
81
bool prop_lpa2;
30
+ * armv7m_nvic_set_pending: mark the specified exception as pending
82
31
+ * @opaque: the NVIC
83
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
32
+ * @irq: the exception number to mark pending
84
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c
33
+ * @secure: false for non-banked exceptions or for the nonsecure
85
index XXXXXXX..XXXXXXX 100644
34
+ * version of a banked exception, true for the secure version of a banked
86
--- a/target/arm/arm-qmp-cmds.c
35
+ * exception.
87
+++ b/target/arm/arm-qmp-cmds.c
36
+ *
88
@@ -XXX,XX +XXX,XX @@ static const char *cpu_model_advertised_features[] = {
37
+ * Marks the specified exception as pending. Note that we will assert()
89
"sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
38
+ * if @secure is true and @irq does not specify one of the fixed set
90
"sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
39
+ * of architecturally banked exceptions.
91
"kvm-no-adjvtime", "kvm-steal-time",
40
+ */
92
- "pauth", "pauth-impdef",
41
+void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
93
+ "pauth", "pauth-impdef", "pauth-qarma3",
42
void armv7m_nvic_acknowledge_irq(void *opaque);
94
NULL
43
/**
95
};
44
* armv7m_nvic_complete_irq: complete specified interrupt or exception
96
45
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
97
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
46
index XXXXXXX..XXXXXXX 100644
98
index XXXXXXX..XXXXXXX 100644
47
--- a/hw/intc/armv7m_nvic.c
99
--- a/target/arm/cpu64.c
48
+++ b/hw/intc/armv7m_nvic.c
100
+++ b/target/arm/cpu64.c
49
@@ -XXX,XX +XXX,XX @@ static void nvic_irq_update(NVICState *s)
101
@@ -XXX,XX +XXX,XX @@ void aarch64_add_sme_properties(Object *obj)
50
qemu_set_irq(s->excpout, lvl);
102
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
51
}
103
{
52
104
ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
53
-static void armv7m_nvic_clear_pending(void *opaque, int irq)
105
- uint64_t isar1;
54
+/**
106
+ uint64_t isar1, isar2;
55
+ * armv7m_nvic_clear_pending: mark the specified exception as not pending
107
56
+ * @opaque: the NVIC
108
/*
57
+ * @irq: the exception number to mark as not pending
109
* These properties enable or disable Pauth as a whole, or change
58
+ * @secure: false for non-banked exceptions or for the nonsecure
110
@@ -XXX,XX +XXX,XX @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
59
+ * version of a banked exception, true for the secure version of a banked
111
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
60
+ * exception.
112
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
61
+ *
113
62
+ * Marks the specified exception as not pending. Note that we will assert()
114
+ isar2 = cpu->isar.id_aa64isar2;
63
+ * if @secure is true and @irq does not specify one of the fixed set
115
+ isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0);
64
+ * of architecturally banked exceptions.
116
+ isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0);
65
+ */
117
+
66
+static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
118
if (kvm_enabled() || hvf_enabled()) {
67
{
119
/*
68
NVICState *s = (NVICState *)opaque;
120
* Exit early if PAuth is enabled and fall through to disable it.
69
VecInfo *vec;
121
@@ -XXX,XX +XXX,XX @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
70
122
}
71
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
123
72
124
if (cpu->prop_pauth) {
73
- vec = &s->vectors[irq];
125
+ if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) {
74
- trace_nvic_clear_pending(irq, vec->enabled, vec->prio);
126
+ error_setg(errp,
75
+ if (secure) {
127
+ "cannot enable both pauth-impdef and pauth-qarma3");
76
+ assert(exc_is_banked(irq));
128
+ return;
77
+ vec = &s->sec_vectors[irq];
129
+ }
130
+
131
if (cpu->prop_pauth_impdef) {
132
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features);
133
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1);
134
+ } else if (cpu->prop_pauth_qarma3) {
135
+ isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features);
136
+ isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1);
137
} else {
138
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features);
139
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1);
140
}
141
- } else if (cpu->prop_pauth_impdef) {
142
- error_setg(errp, "cannot enable pauth-impdef without pauth");
143
+ } else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) {
144
+ error_setg(errp, "cannot enable pauth-impdef or "
145
+ "pauth-qarma3 without pauth");
146
error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
147
}
148
}
149
150
cpu->isar.id_aa64isar1 = isar1;
151
+ cpu->isar.id_aa64isar2 = isar2;
152
}
153
154
static Property arm_cpu_pauth_property =
155
DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
156
static Property arm_cpu_pauth_impdef_property =
157
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
158
+static Property arm_cpu_pauth_qarma3_property =
159
+ DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false);
160
161
void aarch64_add_pauth_properties(Object *obj)
162
{
163
@@ -XXX,XX +XXX,XX @@ void aarch64_add_pauth_properties(Object *obj)
164
cpu->prop_pauth = cpu_isar_feature(aa64_pauth, cpu);
165
} else {
166
qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
167
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property);
168
}
169
}
170
171
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/target/arm/tcg/pauth_helper.c
174
+++ b/target/arm/tcg/pauth_helper.c
175
@@ -XXX,XX +XXX,XX @@ static uint64_t pac_sub(uint64_t i)
176
return o;
177
}
178
179
+static uint64_t pac_sub1(uint64_t i)
180
+{
181
+ static const uint8_t sub1[16] = {
182
+ 0xa, 0xd, 0xe, 0x6, 0xf, 0x7, 0x3, 0x5,
183
+ 0x9, 0x8, 0x0, 0xc, 0xb, 0x1, 0x2, 0x4,
184
+ };
185
+ uint64_t o = 0;
186
+ int b;
187
+
188
+ for (b = 0; b < 64; b += 4) {
189
+ o |= (uint64_t)sub1[(i >> b) & 0xf] << b;
190
+ }
191
+ return o;
192
+}
193
+
194
static uint64_t pac_inv_sub(uint64_t i)
195
{
196
static const uint8_t inv_sub[16] = {
197
@@ -XXX,XX +XXX,XX @@ static uint64_t tweak_inv_shuffle(uint64_t i)
198
}
199
200
static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
201
- ARMPACKey key)
202
+ ARMPACKey key, bool isqarma3)
203
{
204
static const uint64_t RC[5] = {
205
0x0000000000000000ull,
206
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
207
0x452821E638D01377ull,
208
};
209
const uint64_t alpha = 0xC0AC29B7C97C50DDull;
210
+ int iterations = isqarma3 ? 2 : 4;
211
/*
212
* Note that in the ARM pseudocode, key0 contains bits <127:64>
213
* and key1 contains bits <63:0> of the 128-bit key.
214
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
215
runningmod = modifier;
216
workingval = data ^ key0;
217
218
- for (i = 0; i <= 4; ++i) {
219
+ for (i = 0; i <= iterations; ++i) {
220
roundkey = key1 ^ runningmod;
221
workingval ^= roundkey;
222
workingval ^= RC[i];
223
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
224
workingval = pac_cell_shuffle(workingval);
225
workingval = pac_mult(workingval);
226
}
227
- workingval = pac_sub(workingval);
228
+ if (isqarma3) {
229
+ workingval = pac_sub1(workingval);
230
+ } else {
231
+ workingval = pac_sub(workingval);
232
+ }
233
runningmod = tweak_shuffle(runningmod);
234
}
235
roundkey = modk0 ^ runningmod;
236
workingval ^= roundkey;
237
workingval = pac_cell_shuffle(workingval);
238
workingval = pac_mult(workingval);
239
- workingval = pac_sub(workingval);
240
+ if (isqarma3) {
241
+ workingval = pac_sub1(workingval);
78
+ } else {
242
+ } else {
79
+ vec = &s->vectors[irq];
243
+ workingval = pac_sub(workingval);
80
+ }
244
+ }
81
+ trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
245
workingval = pac_cell_shuffle(workingval);
82
if (vec->pending) {
246
workingval = pac_mult(workingval);
83
vec->pending = 0;
247
workingval ^= key1;
84
nvic_irq_update(s);
248
workingval = pac_cell_inv_shuffle(workingval);
85
}
249
- workingval = pac_inv_sub(workingval);
86
}
250
+ if (isqarma3) {
87
251
+ workingval = pac_sub1(workingval);
88
-void armv7m_nvic_set_pending(void *opaque, int irq)
252
+ } else {
89
+void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
253
+ workingval = pac_inv_sub(workingval);
90
{
254
+ }
91
NVICState *s = (NVICState *)opaque;
255
workingval = pac_mult(workingval);
92
+ bool banked = exc_is_banked(irq);
256
workingval = pac_cell_inv_shuffle(workingval);
93
VecInfo *vec;
257
workingval ^= key0;
94
258
workingval ^= runningmod;
95
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
259
- for (i = 0; i <= 4; ++i) {
96
+ assert(!secure || banked);
260
- workingval = pac_inv_sub(workingval);
97
261
- if (i < 4) {
98
- vec = &s->vectors[irq];
262
+ for (i = 0; i <= iterations; ++i) {
99
- trace_nvic_set_pending(irq, vec->enabled, vec->prio);
263
+ if (isqarma3) {
100
+ vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
264
+ workingval = pac_sub1(workingval);
101
265
+ } else {
102
+ trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio);
266
+ workingval = pac_inv_sub(workingval);
103
267
+ }
104
if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
268
+ if (i < iterations) {
105
/* If a synchronous exception is pending then it may be
269
workingval = pac_mult(workingval);
106
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_set_pending(void *opaque, int irq)
270
workingval = pac_cell_inv_shuffle(workingval);
107
"(current priority %d)\n", irq, running);
108
}
109
110
- /* We can do the escalation, so we take HardFault instead */
111
+ /* We can do the escalation, so we take HardFault instead.
112
+ * If BFHFNMINS is set then we escalate to the banked HF for
113
+ * the target security state of the original exception; otherwise
114
+ * we take a Secure HardFault.
115
+ */
116
irq = ARMV7M_EXCP_HARD;
117
- vec = &s->vectors[irq];
118
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
119
+ (secure ||
120
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
121
+ vec = &s->sec_vectors[irq];
122
+ } else {
123
+ vec = &s->vectors[irq];
124
+ }
125
+ /* HF may be banked but there is only one shared HFSR */
126
s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
127
}
271
}
128
}
272
runningmod = tweak_inv_shuffle(runningmod);
129
@@ -XXX,XX +XXX,XX @@ static void set_irq_level(void *opaque, int n, int level)
273
roundkey = key1 ^ runningmod;
130
if (level != vec->level) {
274
- workingval ^= RC[4 - i];
131
vec->level = level;
275
+ workingval ^= RC[iterations - i];
132
if (level) {
276
workingval ^= roundkey;
133
- armv7m_nvic_set_pending(s, n);
277
workingval ^= alpha;
134
+ armv7m_nvic_set_pending(s, n, false);
278
}
135
}
279
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
136
}
280
uint64_t modifier, ARMPACKey key)
137
}
281
{
138
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
282
if (cpu_isar_feature(aa64_pauth_qarma5, env_archcpu(env))) {
139
}
283
- return pauth_computepac_architected(data, modifier, key);
140
case 0xd04: /* Interrupt Control State. */
284
+ return pauth_computepac_architected(data, modifier, key, false);
141
if (value & (1 << 31)) {
285
+ } else if (cpu_isar_feature(aa64_pauth_qarma3, env_archcpu(env))) {
142
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI);
286
+ return pauth_computepac_architected(data, modifier, key, true);
143
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
287
} else {
144
}
288
return pauth_computepac_impdef(data, modifier, key);
145
if (value & (1 << 28)) {
289
}
146
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV);
290
diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c
147
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
291
index XXXXXXX..XXXXXXX 100644
148
} else if (value & (1 << 27)) {
292
--- a/tests/qtest/arm-cpu-features.c
149
- armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV);
293
+++ b/tests/qtest/arm-cpu-features.c
150
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
294
@@ -XXX,XX +XXX,XX @@ static void pauth_tests_default(QTestState *qts, const char *cpu_type)
151
}
295
{
152
if (value & (1 << 26)) {
296
assert_has_feature_enabled(qts, cpu_type, "pauth");
153
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
297
assert_has_feature_disabled(qts, cpu_type, "pauth-impdef");
154
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
298
+ assert_has_feature_disabled(qts, cpu_type, "pauth-qarma3");
155
} else if (value & (1 << 25)) {
299
assert_set_feature(qts, cpu_type, "pauth", false);
156
- armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK);
300
assert_set_feature(qts, cpu_type, "pauth", true);
157
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
301
assert_set_feature(qts, cpu_type, "pauth-impdef", true);
158
}
302
assert_set_feature(qts, cpu_type, "pauth-impdef", false);
159
break;
303
- assert_error(qts, cpu_type, "cannot enable pauth-impdef without pauth",
160
case 0xd08: /* Vector Table Offset. */
304
+ assert_set_feature(qts, cpu_type, "pauth-qarma3", true);
161
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
305
+ assert_set_feature(qts, cpu_type, "pauth-qarma3", false);
162
{
306
+ assert_error(qts, cpu_type,
163
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
307
+ "cannot enable pauth-impdef or pauth-qarma3 without pauth",
164
if (excnum < s->num_irq) {
308
"{ 'pauth': false, 'pauth-impdef': true }");
165
- armv7m_nvic_set_pending(s, excnum);
309
+ assert_error(qts, cpu_type,
166
+ armv7m_nvic_set_pending(s, excnum, false);
310
+ "cannot enable pauth-impdef or pauth-qarma3 without pauth",
167
}
311
+ "{ 'pauth': false, 'pauth-qarma3': true }");
168
break;
312
+ assert_error(qts, cpu_type,
169
}
313
+ "cannot enable both pauth-impdef and pauth-qarma3",
170
@@ -XXX,XX +XXX,XX @@ static void nvic_systick_trigger(void *opaque, int n, int level)
314
+ "{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true }");
171
/* SysTick just asked us to pend its exception.
315
}
172
* (This is different from an external interrupt line's
316
173
* behaviour.)
317
static void test_query_cpu_model_expansion(const void *data)
174
+ * TODO: when we implement the banked systicks we must make
175
+ * this pend the correct banked exception.
176
*/
177
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
178
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, false);
179
}
180
}
181
182
diff --git a/target/arm/helper.c b/target/arm/helper.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/target/arm/helper.c
185
+++ b/target/arm/helper.c
186
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
187
* stack, directly take a usage fault on the current stack.
188
*/
189
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
190
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
191
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
192
v7m_exception_taken(cpu, excret);
193
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
194
"stackframe: failed exception return integrity check\n");
195
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
196
* exception return excret specified then this is a UsageFault.
197
*/
198
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
199
- /* Take an INVPC UsageFault by pushing the stack again. */
200
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
201
+ /* Take an INVPC UsageFault by pushing the stack again.
202
+ * TODO: the v8M version of this code should target the
203
+ * background state for this exception.
204
+ */
205
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
206
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
207
v7m_push_stack(cpu);
208
v7m_exception_taken(cpu, excret);
209
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
210
handle it. */
211
switch (cs->exception_index) {
212
case EXCP_UDEF:
213
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
214
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
215
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
216
break;
217
case EXCP_NOCP:
218
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
219
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
220
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
221
break;
222
case EXCP_INVSTATE:
223
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
224
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
225
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
226
break;
227
case EXCP_SWI:
228
/* The PC already points to the next instruction. */
229
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
230
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
231
break;
232
case EXCP_PREFETCH_ABORT:
233
case EXCP_DATA_ABORT:
234
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
235
env->v7m.bfar);
236
break;
237
}
238
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
239
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
240
break;
241
default:
242
/* All other FSR values are either MPU faults or "can't happen
243
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
244
env->v7m.mmfar[env->v7m.secure]);
245
break;
246
}
247
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
248
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
249
+ env->v7m.secure);
250
break;
251
}
252
break;
253
@@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
254
return;
255
}
256
}
257
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
258
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
259
break;
260
case EXCP_IRQ:
261
break;
262
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
263
index XXXXXXX..XXXXXXX 100644
264
--- a/hw/intc/trace-events
265
+++ b/hw/intc/trace-events
266
@@ -XXX,XX +XXX,XX @@ nvic_set_prio(int irq, uint8_t prio) "NVIC set irq %d priority %d"
267
nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
268
nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
269
nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled"
270
-nvic_set_pending(int irq, int en, int prio) "NVIC set pending irq %d (enabled: %d priority %d)"
271
-nvic_clear_pending(int irq, int en, int prio) "NVIC clear pending irq %d (enabled: %d priority %d)"
272
+nvic_set_pending(int irq, bool secure, int en, int prio) "NVIC set pending irq %d secure-bank %d (enabled: %d priority %d)"
273
+nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)"
274
nvic_set_pending_level(int irq) "NVIC set pending: irq %d higher prio than vectpending: setting irq line to 1"
275
nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)"
276
nvic_complete_irq(int irq) "NVIC complete IRQ %d"
277
--
318
--
278
2.7.4
319
2.34.1
279
280
diff view generated by jsdifflib
1
If AIRCR.BFHFNMINS is clear, then although NonSecure HardFault
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
can still be pended via SHCSR.HARDFAULTPENDED it mustn't actually
3
preempt execution. The simple way to achieve this is to clear the
4
enable bit for it, since the enable bit isn't guest visible.
5
2
3
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230829232335.965414-7-richard.henderson@linaro.org
8
Message-Id: <20230609172324.982888-5-aaron@os.amperecomputing.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1505240046-11454-15-git-send-email-peter.maydell@linaro.org
9
---
11
---
10
hw/intc/armv7m_nvic.c | 12 ++++++++++--
12
docs/system/arm/emulation.rst | 1 +
11
1 file changed, 10 insertions(+), 2 deletions(-)
13
target/arm/tcg/cpu64.c | 2 +-
14
target/arm/tcg/pauth_helper.c | 16 +++++++++++-----
15
3 files changed, 13 insertions(+), 6 deletions(-)
12
16
13
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/armv7m_nvic.c
19
--- a/docs/system/arm/emulation.rst
16
+++ b/hw/intc/armv7m_nvic.c
20
+++ b/docs/system/arm/emulation.rst
17
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
21
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
18
(R_V7M_AIRCR_SYSRESETREQS_MASK |
22
- FEAT_DotProd (Advanced SIMD dot product instructions)
19
R_V7M_AIRCR_BFHFNMINS_MASK |
23
- FEAT_DoubleFault (Double Fault Extension)
20
R_V7M_AIRCR_PRIS_MASK);
24
- FEAT_E0PD (Preventing EL0 access to halves of address maps)
21
- /* BFHFNMINS changes the priority of Secure HardFault */
25
+- FEAT_EPAC (Enhanced pointer authentication)
22
+ /* BFHFNMINS changes the priority of Secure HardFault, and
26
- FEAT_ETS (Enhanced Translation Synchronization)
23
+ * allows a pending Non-secure HardFault to preempt (which
27
- FEAT_EVT (Enhanced Virtualization Traps)
24
+ * we implement by marking it enabled).
28
- FEAT_FCMA (Floating-point complex number instructions)
25
+ */
29
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
26
if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
30
index XXXXXXX..XXXXXXX 100644
27
s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
31
--- a/target/arm/tcg/cpu64.c
28
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
32
+++ b/target/arm/tcg/cpu64.c
29
} else {
33
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
30
s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
34
31
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
35
t = cpu->isar.id_aa64isar1;
32
}
36
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
33
}
37
- t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_1);
34
nvic_irq_update(s);
38
+ t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_EPAC);
35
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
39
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
36
NVICState *s = NVIC(dev);
40
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
37
41
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
38
s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
42
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
39
- s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
43
index XXXXXXX..XXXXXXX 100644
40
/* MEM, BUS, and USAGE are enabled through
44
--- a/target/arm/tcg/pauth_helper.c
41
* the System Handler Control register
45
+++ b/target/arm/tcg/pauth_helper.c
46
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
47
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
48
ARMPACKey *key, bool data)
49
{
50
+ ARMCPU *cpu = env_archcpu(env);
51
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
52
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false);
53
+ ARMPauthFeature pauth_feature = cpu_isar_feature(pauth_feature, cpu);
54
uint64_t pac, ext_ptr, ext, test;
55
int bot_bit, top_bit;
56
57
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
42
*/
58
*/
43
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
59
test = sextract64(ptr, bot_bit, top_bit - bot_bit);
44
60
if (test != 0 && test != -1) {
45
/* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
61
- /*
46
s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
62
- * Note that our top_bit is one greater than the pseudocode's
47
+ /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
63
- * version, hence "- 2" here.
48
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
64
- */
49
+ } else {
65
- pac ^= MAKE_64BIT_MASK(top_bit - 2, 1);
50
+ s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
66
+ if (pauth_feature == PauthFeat_EPAC) {
67
+ pac = 0;
68
+ } else {
69
+ /*
70
+ * Note that our top_bit is one greater than the pseudocode's
71
+ * version, hence "- 2" here.
72
+ */
73
+ pac ^= MAKE_64BIT_MASK(top_bit - 2, 1);
74
+ }
51
}
75
}
52
76
53
/* Strictly speaking the reset handler should be enabled.
77
/*
54
--
78
--
55
2.7.4
79
2.34.1
56
57
diff view generated by jsdifflib
1
Handle banking of SHCSR: some register bits are banked between
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
Secure and Non-Secure, and some are only accessible to Secure.
3
2
3
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230829232335.965414-8-richard.henderson@linaro.org
8
Message-Id: <20230609172324.982888-6-aaron@os.amperecomputing.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 1505240046-11454-19-git-send-email-peter.maydell@linaro.org
7
---
11
---
8
hw/intc/armv7m_nvic.c | 221 ++++++++++++++++++++++++++++++++++++++------------
12
docs/system/arm/emulation.rst | 1 +
9
1 file changed, 169 insertions(+), 52 deletions(-)
13
target/arm/tcg/cpu64.c | 2 +-
14
target/arm/tcg/pauth_helper.c | 21 +++++++++++++++++----
15
3 files changed, 19 insertions(+), 5 deletions(-)
10
16
11
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
12
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/intc/armv7m_nvic.c
19
--- a/docs/system/arm/emulation.rst
14
+++ b/hw/intc/armv7m_nvic.c
20
+++ b/docs/system/arm/emulation.rst
15
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
21
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
16
val = cpu->env.v7m.ccr[attrs.secure];
22
- FEAT_PAN2 (AT S1E1R and AT S1E1W instruction variants affected by PSTATE.PAN)
17
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
23
- FEAT_PAN3 (Support for SCTLR_ELx.EPAN)
18
return val;
24
- FEAT_PAuth (Pointer authentication)
19
- case 0xd24: /* System Handler Status. */
25
+- FEAT_PAuth2 (Enhacements to pointer authentication)
20
+ case 0xd24: /* System Handler Control and State (SHCSR) */
26
- FEAT_PMULL (PMULL, PMULL2 instructions)
21
val = 0;
27
- FEAT_PMUv3p1 (PMU Extensions v3.1)
22
- if (s->vectors[ARMV7M_EXCP_MEM].active) {
28
- FEAT_PMUv3p4 (PMU Extensions v3.4)
23
- val |= (1 << 0);
29
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
24
- }
30
index XXXXXXX..XXXXXXX 100644
25
- if (s->vectors[ARMV7M_EXCP_BUS].active) {
31
--- a/target/arm/tcg/cpu64.c
26
- val |= (1 << 1);
32
+++ b/target/arm/tcg/cpu64.c
27
- }
33
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
28
- if (s->vectors[ARMV7M_EXCP_USAGE].active) {
34
29
- val |= (1 << 3);
35
t = cpu->isar.id_aa64isar1;
30
+ if (attrs.secure) {
36
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
31
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
37
- t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_EPAC);
32
+ val |= (1 << 0);
38
+ t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_2);
33
+ }
39
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
34
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
40
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
35
+ val |= (1 << 2);
41
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
36
+ }
42
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
37
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
43
index XXXXXXX..XXXXXXX 100644
38
+ val |= (1 << 3);
44
--- a/target/arm/tcg/pauth_helper.c
39
+ }
45
+++ b/target/arm/tcg/pauth_helper.c
40
+ if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
46
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
41
+ val |= (1 << 7);
47
*/
42
+ }
48
test = sextract64(ptr, bot_bit, top_bit - bot_bit);
43
+ if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
49
if (test != 0 && test != -1) {
44
+ val |= (1 << 10);
50
- if (pauth_feature == PauthFeat_EPAC) {
45
+ }
51
+ if (pauth_feature >= PauthFeat_2) {
46
+ if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
52
+ /* No action required */
47
+ val |= (1 << 11);
53
+ } else if (pauth_feature == PauthFeat_EPAC) {
48
+ }
54
pac = 0;
49
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
55
} else {
50
+ val |= (1 << 12);
56
/*
51
+ }
57
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
52
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
58
* Preserve the determination between upper and lower at bit 55,
53
+ val |= (1 << 13);
59
* and insert pointer authentication code.
54
+ }
60
*/
55
+ if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
61
+ if (pauth_feature >= PauthFeat_2) {
56
+ val |= (1 << 15);
62
+ pac ^= ptr;
57
+ }
63
+ }
58
+ if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
64
if (param.tbi) {
59
+ val |= (1 << 16);
65
ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1);
60
+ }
66
pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1);
61
+ if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
67
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
62
+ val |= (1 << 18);
68
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
63
+ }
69
ARMPACKey *key, bool data, int keynumber)
64
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
70
{
65
+ val |= (1 << 21);
71
+ ARMCPU *cpu = env_archcpu(env);
66
+ }
72
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
67
+ /* SecureFault is not banked but is always RAZ/WI to NS */
73
ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data, false);
68
+ if (s->vectors[ARMV7M_EXCP_SECURE].active) {
74
+ ARMPauthFeature pauth_feature = cpu_isar_feature(pauth_feature, cpu);
69
+ val |= (1 << 4);
75
int bot_bit, top_bit;
70
+ }
76
- uint64_t pac, orig_ptr, test;
71
+ if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
77
+ uint64_t pac, orig_ptr, cmp_mask;
72
+ val |= (1 << 19);
78
73
+ }
79
orig_ptr = pauth_original_ptr(ptr, param);
74
+ if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
80
pac = pauth_computepac(env, orig_ptr, modifier, *key);
75
+ val |= (1 << 20);
81
bot_bit = 64 - param.tsz;
76
+ }
82
top_bit = 64 - 8 * param.tbi;
77
+ } else {
83
78
+ if (s->vectors[ARMV7M_EXCP_MEM].active) {
84
- test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1);
79
+ val |= (1 << 0);
85
- if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) {
80
+ }
86
+ cmp_mask = MAKE_64BIT_MASK(bot_bit, top_bit - bot_bit);
81
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
87
+ cmp_mask &= ~MAKE_64BIT_MASK(55, 1);
82
+ /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
83
+ if (s->vectors[ARMV7M_EXCP_HARD].active) {
84
+ val |= (1 << 2);
85
+ }
86
+ if (s->vectors[ARMV7M_EXCP_HARD].pending) {
87
+ val |= (1 << 21);
88
+ }
89
+ }
90
+ if (s->vectors[ARMV7M_EXCP_USAGE].active) {
91
+ val |= (1 << 3);
92
+ }
93
+ if (s->vectors[ARMV7M_EXCP_SVC].active) {
94
+ val |= (1 << 7);
95
+ }
96
+ if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
97
+ val |= (1 << 10);
98
+ }
99
+ if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
100
+ val |= (1 << 11);
101
+ }
102
+ if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
103
+ val |= (1 << 12);
104
+ }
105
+ if (s->vectors[ARMV7M_EXCP_MEM].pending) {
106
+ val |= (1 << 13);
107
+ }
108
+ if (s->vectors[ARMV7M_EXCP_SVC].pending) {
109
+ val |= (1 << 15);
110
+ }
111
+ if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
112
+ val |= (1 << 16);
113
+ }
114
+ if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
115
+ val |= (1 << 18);
116
+ }
117
}
118
- if (s->vectors[ARMV7M_EXCP_SVC].active) {
119
- val |= (1 << 7);
120
+ if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
121
+ if (s->vectors[ARMV7M_EXCP_BUS].active) {
122
+ val |= (1 << 1);
123
+ }
124
+ if (s->vectors[ARMV7M_EXCP_BUS].pending) {
125
+ val |= (1 << 14);
126
+ }
127
+ if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
128
+ val |= (1 << 17);
129
+ }
130
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
131
+ s->vectors[ARMV7M_EXCP_NMI].active) {
132
+ /* NMIACT is not present in v7M */
133
+ val |= (1 << 5);
134
+ }
135
}
136
+
88
+
137
+ /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
89
+ if (pauth_feature >= PauthFeat_2) {
138
if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
90
+ return ptr ^ (pac & cmp_mask);
139
val |= (1 << 8);
91
+ }
140
}
141
- if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
142
- val |= (1 << 10);
143
- }
144
- if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
145
- val |= (1 << 11);
146
- }
147
- if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
148
- val |= (1 << 12);
149
- }
150
- if (s->vectors[ARMV7M_EXCP_MEM].pending) {
151
- val |= (1 << 13);
152
- }
153
- if (s->vectors[ARMV7M_EXCP_BUS].pending) {
154
- val |= (1 << 14);
155
- }
156
- if (s->vectors[ARMV7M_EXCP_SVC].pending) {
157
- val |= (1 << 15);
158
- }
159
- if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
160
- val |= (1 << 16);
161
- }
162
- if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
163
- val |= (1 << 17);
164
- }
165
- if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
166
- val |= (1 << 18);
167
- }
168
return val;
169
case 0xd28: /* Configurable Fault Status. */
170
/* The BFSR bits [15:8] are shared between security states
171
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
172
173
cpu->env.v7m.ccr[attrs.secure] = value;
174
break;
175
- case 0xd24: /* System Handler Control. */
176
- s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
177
- s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
178
- s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
179
- s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
180
+ case 0xd24: /* System Handler Control and State (SHCSR) */
181
+ if (attrs.secure) {
182
+ s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
183
+ /* Secure HardFault active bit cannot be written */
184
+ s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
185
+ s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
186
+ s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
187
+ (value & (1 << 10)) != 0;
188
+ s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
189
+ (value & (1 << 11)) != 0;
190
+ s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
191
+ (value & (1 << 12)) != 0;
192
+ s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
193
+ s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
194
+ s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
195
+ s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
196
+ s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
197
+ (value & (1 << 18)) != 0;
198
+ /* SecureFault not banked, but RAZ/WI to NS */
199
+ s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
200
+ s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
201
+ s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
202
+ } else {
203
+ s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
204
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
205
+ /* HARDFAULTPENDED is not present in v7M */
206
+ s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
207
+ }
208
+ s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
209
+ s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
210
+ s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
211
+ s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
212
+ s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
213
+ s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
214
+ s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
215
+ s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
216
+ s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
217
+ }
218
+ if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
219
+ s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
220
+ s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
221
+ s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
222
+ }
223
+ /* NMIACT can only be written if the write is of a zero, with
224
+ * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
225
+ */
226
+ if (!attrs.secure && cpu->env.v7m.secure &&
227
+ (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
228
+ (value & (1 << 5)) == 0) {
229
+ s->vectors[ARMV7M_EXCP_NMI].active = 0;
230
+ }
231
+ /* HARDFAULTACT can only be written if the write is of a zero
232
+ * to the non-secure HardFault state by the CPU in secure state.
233
+ * The only case where we can be targeting the non-secure HF state
234
+ * when in secure state is if this is a write via the NS alias
235
+ * and BFHFNMINS is 1.
236
+ */
237
+ if (!attrs.secure && cpu->env.v7m.secure &&
238
+ (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
239
+ (value & (1 << 2)) == 0) {
240
+ s->vectors[ARMV7M_EXCP_HARD].active = 0;
241
+ }
242
+
92
+
243
+ /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
93
+ if ((pac ^ ptr) & cmp_mask) {
244
s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
94
int error_code = (keynumber << 1) | (keynumber ^ 1);
245
- s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
95
if (param.tbi) {
246
- s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
96
return deposit64(orig_ptr, 53, 2, error_code);
247
- s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
248
- s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
249
- s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
250
- s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
251
- s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
252
- s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
253
- s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
254
nvic_irq_update(s);
255
break;
256
case 0xd28: /* Configurable Fault Status. */
257
--
97
--
258
2.7.4
98
2.34.1
259
260
diff view generated by jsdifflib
1
In the A64 decoder, we have a lot of references to section numbers
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
from version A.a of the v8A ARM ARM (DDI0487). This version of the
3
document is now long obsolete (we are currently on revision B.a),
4
and various intervening versions renumbered all the sections.
5
2
6
The most recent B.a version of the document doesn't assign
3
An instruction is a 'combined' Pointer Authentication instruction
7
section numbers at all to the individual instruction classes
4
if it does something in addition to PAC -- for instance, branching
8
in the way that the various A.x versions did. The simplest thing
5
to or loading an address from the authenticated pointer.
9
to do is just to delete all the out of date C.x.x references.
10
6
7
Knowing whether a PAC operation is 'combined' is needed to
8
implement FEAT_FPACCOMBINE.
9
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20230829232335.965414-9-richard.henderson@linaro.org
15
Message-Id: <20230609172324.982888-7-aaron@os.amperecomputing.com>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Message-id: 20170915150849.23557-1-peter.maydell@linaro.org
14
---
18
---
15
target/arm/translate-a64.c | 227 +++++++++++++++++++++++----------------------
19
target/arm/tcg/helper-a64.h | 4 ++
16
1 file changed, 114 insertions(+), 113 deletions(-)
20
target/arm/tcg/pauth_helper.c | 71 +++++++++++++++++++++++++++-------
21
target/arm/tcg/translate-a64.c | 12 +++---
22
3 files changed, 68 insertions(+), 19 deletions(-)
17
23
18
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
24
diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h
19
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-a64.c
26
--- a/target/arm/tcg/helper-a64.h
21
+++ b/target/arm/translate-a64.c
27
+++ b/target/arm/tcg/helper-a64.h
22
@@ -XXX,XX +XXX,XX @@ static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
28
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64)
29
DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64)
30
DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64)
31
DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64)
32
+DEF_HELPER_FLAGS_3(autia_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
33
DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64)
34
+DEF_HELPER_FLAGS_3(autib_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
35
DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
36
+DEF_HELPER_FLAGS_3(autda_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
37
DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
38
+DEF_HELPER_FLAGS_3(autdb_combined, TCG_CALL_NO_WG, i64, env, i64, i64)
39
DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
40
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
41
42
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/tcg/pauth_helper.c
45
+++ b/target/arm/tcg/pauth_helper.c
46
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
23
}
47
}
24
48
25
/*
49
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
26
- * the instruction disassembly implemented here matches
50
- ARMPACKey *key, bool data, int keynumber)
27
- * the instruction encoding classifications in chapter 3 (C3)
51
+ ARMPACKey *key, bool data, int keynumber,
28
- * of the ARM Architecture Reference Manual (DDI0487A_a)
52
+ uintptr_t ra, bool is_combined)
29
+ * The instruction disassembly implemented here matches
53
{
30
+ * the instruction encoding classifications in chapter C4
54
ARMCPU *cpu = env_archcpu(env);
31
+ * of the ARM Architecture Reference Manual (DDI0487B_a);
55
ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
32
+ * classification names and decode diagrams here should generally
56
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
33
+ * match up with those in the manual.
57
return pac & 0xffffffff00000000ull;
34
*/
58
}
35
59
36
-/* C3.2.7 Unconditional branch (immediate)
60
-uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
37
+/* Unconditional branch (immediate)
61
+static uint64_t pauth_autia(CPUARMState *env, uint64_t x, uint64_t y,
38
* 31 30 26 25 0
62
+ uintptr_t ra, bool is_combined)
39
* +----+-----------+-------------------------------------+
63
{
40
* | op | 0 0 1 0 1 | imm26 |
64
int el = arm_current_el(env);
41
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
65
if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
42
uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
66
return x;
43
44
if (insn & (1U << 31)) {
45
- /* C5.6.26 BL Branch with link */
46
+ /* BL Branch with link */
47
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
48
}
67
}
49
68
- pauth_check_trap(env, el, GETPC());
50
- /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
69
- return pauth_auth(env, x, y, &env->keys.apia, false, 0);
51
+ /* B Branch / BL Branch with link */
70
+ pauth_check_trap(env, el, ra);
52
gen_goto_tb(s, 0, addr);
71
+ return pauth_auth(env, x, y, &env->keys.apia, false, 0, ra, is_combined);
53
}
72
}
54
73
55
-/* C3.2.1 Compare & branch (immediate)
74
-uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
56
+/* Compare and branch (immediate)
75
+uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
57
* 31 30 25 24 23 5 4 0
76
+{
58
* +----+-------------+----+---------------------+--------+
77
+ return pauth_autia(env, x, y, GETPC(), false);
59
* | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
78
+}
60
@@ -XXX,XX +XXX,XX @@ static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
79
+
61
gen_goto_tb(s, 1, addr);
80
+uint64_t HELPER(autia_combined)(CPUARMState *env, uint64_t x, uint64_t y)
81
+{
82
+ return pauth_autia(env, x, y, GETPC(), true);
83
+}
84
+
85
+static uint64_t pauth_autib(CPUARMState *env, uint64_t x, uint64_t y,
86
+ uintptr_t ra, bool is_combined)
87
{
88
int el = arm_current_el(env);
89
if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
90
return x;
91
}
92
- pauth_check_trap(env, el, GETPC());
93
- return pauth_auth(env, x, y, &env->keys.apib, false, 1);
94
+ pauth_check_trap(env, el, ra);
95
+ return pauth_auth(env, x, y, &env->keys.apib, false, 1, ra, is_combined);
62
}
96
}
63
97
64
-/* C3.2.5 Test & branch (immediate)
98
-uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
65
+/* Test and branch (immediate)
99
+uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
66
* 31 30 25 24 23 19 18 5 4 0
100
+{
67
* +----+-------------+----+-------+-------------+------+
101
+ return pauth_autib(env, x, y, GETPC(), false);
68
* | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
102
+}
69
@@ -XXX,XX +XXX,XX @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn)
103
+
70
gen_goto_tb(s, 1, addr);
104
+uint64_t HELPER(autib_combined)(CPUARMState *env, uint64_t x, uint64_t y)
105
+{
106
+ return pauth_autib(env, x, y, GETPC(), true);
107
+}
108
+
109
+static uint64_t pauth_autda(CPUARMState *env, uint64_t x, uint64_t y,
110
+ uintptr_t ra, bool is_combined)
111
{
112
int el = arm_current_el(env);
113
if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
114
return x;
115
}
116
- pauth_check_trap(env, el, GETPC());
117
- return pauth_auth(env, x, y, &env->keys.apda, true, 0);
118
+ pauth_check_trap(env, el, ra);
119
+ return pauth_auth(env, x, y, &env->keys.apda, true, 0, ra, is_combined);
71
}
120
}
72
121
73
-/* C3.2.2 / C5.6.19 Conditional branch (immediate)
122
-uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
74
+/* Conditional branch (immediate)
123
+uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
75
* 31 25 24 23 5 4 3 0
124
+{
76
* +---------------+----+---------------------+----+------+
125
+ return pauth_autda(env, x, y, GETPC(), false);
77
* | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
126
+}
78
@@ -XXX,XX +XXX,XX @@ static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
127
+
128
+uint64_t HELPER(autda_combined)(CPUARMState *env, uint64_t x, uint64_t y)
129
+{
130
+ return pauth_autda(env, x, y, GETPC(), true);
131
+}
132
+
133
+static uint64_t pauth_autdb(CPUARMState *env, uint64_t x, uint64_t y,
134
+ uintptr_t ra, bool is_combined)
135
{
136
int el = arm_current_el(env);
137
if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
138
return x;
79
}
139
}
140
- pauth_check_trap(env, el, GETPC());
141
- return pauth_auth(env, x, y, &env->keys.apdb, true, 1);
142
+ pauth_check_trap(env, el, ra);
143
+ return pauth_auth(env, x, y, &env->keys.apdb, true, 1, ra, is_combined);
144
+}
145
+
146
+uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
147
+{
148
+ return pauth_autdb(env, x, y, GETPC(), false);
149
+}
150
+
151
+uint64_t HELPER(autdb_combined)(CPUARMState *env, uint64_t x, uint64_t y)
152
+{
153
+ return pauth_autdb(env, x, y, GETPC(), true);
80
}
154
}
81
155
82
-/* C5.6.68 HINT */
156
uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a)
83
+/* HINT instruction group, including various allocated HINTs */
157
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
84
static void handle_hint(DisasContext *s, uint32_t insn,
158
index XXXXXXX..XXXXXXX 100644
85
unsigned int op1, unsigned int op2, unsigned int crm)
159
--- a/target/arm/tcg/translate-a64.c
86
{
160
+++ b/target/arm/tcg/translate-a64.c
87
@@ -XXX,XX +XXX,XX @@ static void handle_sync(DisasContext *s, uint32_t insn,
161
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
162
163
truedst = tcg_temp_new_i64();
164
if (use_key_a) {
165
- gen_helper_autia(truedst, cpu_env, dst, modifier);
166
+ gen_helper_autia_combined(truedst, cpu_env, dst, modifier);
167
} else {
168
- gen_helper_autib(truedst, cpu_env, dst, modifier);
169
+ gen_helper_autib_combined(truedst, cpu_env, dst, modifier);
88
}
170
}
171
return truedst;
89
}
172
}
90
173
@@ -XXX,XX +XXX,XX @@ static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
91
-/* C5.6.130 MSR (immediate) - move immediate to processor state field */
174
92
+/* MSR (immediate) - move immediate to processor state field */
175
if (s->pauth_active) {
93
static void handle_msr_i(DisasContext *s, uint32_t insn,
176
if (!a->m) {
94
unsigned int op1, unsigned int op2, unsigned int crm)
177
- gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
95
{
178
- tcg_constant_i64(0));
96
@@ -XXX,XX +XXX,XX @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
179
+ gen_helper_autda_combined(dirty_addr, cpu_env, dirty_addr,
97
tcg_temp_free_i32(nzcv);
180
+ tcg_constant_i64(0));
98
}
181
} else {
99
182
- gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
100
-/* C5.6.129 MRS - move from system register
183
- tcg_constant_i64(0));
101
- * C5.6.131 MSR (register) - move to system register
184
+ gen_helper_autdb_combined(dirty_addr, cpu_env, dirty_addr,
102
- * C5.6.204 SYS
185
+ tcg_constant_i64(0));
103
- * C5.6.205 SYSL
186
}
104
+/* MRS - move from system register
105
+ * MSR (register) - move to system register
106
+ * SYS
107
+ * SYSL
108
* These are all essentially the same insn in 'read' and 'write'
109
* versions, with varying op0 fields.
110
*/
111
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
112
}
187
}
113
}
188
114
115
-/* C3.2.4 System
116
+/* System
117
* 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
118
* +---------------------+---+-----+-----+-------+-------+-----+------+
119
* | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
120
@@ -XXX,XX +XXX,XX @@ static void disas_system(DisasContext *s, uint32_t insn)
121
return;
122
}
123
switch (crn) {
124
- case 2: /* C5.6.68 HINT */
125
+ case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
126
handle_hint(s, insn, op1, op2, crm);
127
break;
128
case 3: /* CLREX, DSB, DMB, ISB */
129
handle_sync(s, insn, op1, op2, crm);
130
break;
131
- case 4: /* C5.6.130 MSR (immediate) */
132
+ case 4: /* MSR (immediate) */
133
handle_msr_i(s, insn, op1, op2, crm);
134
break;
135
default:
136
@@ -XXX,XX +XXX,XX @@ static void disas_system(DisasContext *s, uint32_t insn)
137
handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
138
}
139
140
-/* C3.2.3 Exception generation
141
+/* Exception generation
142
*
143
* 31 24 23 21 20 5 4 2 1 0
144
* +-----------------+-----+------------------------+-----+----+
145
@@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn)
146
}
147
}
148
149
-/* C3.2.7 Unconditional branch (register)
150
+/* Unconditional branch (register)
151
* 31 25 24 21 20 16 15 10 9 5 4 0
152
* +---------------+-------+-------+-------+------+-------+
153
* | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
154
@@ -XXX,XX +XXX,XX @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
155
s->base.is_jmp = DISAS_JUMP;
156
}
157
158
-/* C3.2 Branches, exception generating and system instructions */
159
+/* Branches, exception generating and system instructions */
160
static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
161
{
162
switch (extract32(insn, 25, 7)) {
163
@@ -XXX,XX +XXX,XX @@ static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
164
return regsize == 64;
165
}
166
167
-/* C3.3.6 Load/store exclusive
168
+/* Load/store exclusive
169
*
170
* 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
171
* +-----+-------------+----+---+----+------+----+-------+------+------+
172
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
173
}
174
175
/*
176
- * C3.3.5 Load register (literal)
177
+ * Load register (literal)
178
*
179
* 31 30 29 27 26 25 24 23 5 4 0
180
* +-----+-------+---+-----+-------------------+-------+
181
@@ -XXX,XX +XXX,XX @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
182
}
183
184
/*
185
- * C5.6.80 LDNP (Load Pair - non-temporal hint)
186
- * C5.6.81 LDP (Load Pair - non vector)
187
- * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
188
- * C5.6.176 STNP (Store Pair - non-temporal hint)
189
- * C5.6.177 STP (Store Pair - non vector)
190
- * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
191
- * C6.3.165 LDP (Load Pair of SIMD&FP)
192
- * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
193
- * C6.3.284 STP (Store Pair of SIMD&FP)
194
+ * LDNP (Load Pair - non-temporal hint)
195
+ * LDP (Load Pair - non vector)
196
+ * LDPSW (Load Pair Signed Word - non vector)
197
+ * STNP (Store Pair - non-temporal hint)
198
+ * STP (Store Pair - non vector)
199
+ * LDNP (Load Pair of SIMD&FP - non-temporal hint)
200
+ * LDP (Load Pair of SIMD&FP)
201
+ * STNP (Store Pair of SIMD&FP - non-temporal hint)
202
+ * STP (Store Pair of SIMD&FP)
203
*
204
* 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
205
* +-----+-------+---+---+-------+---+-----------------------------+
206
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
207
}
208
209
/*
210
- * C3.3.8 Load/store (immediate post-indexed)
211
- * C3.3.9 Load/store (immediate pre-indexed)
212
- * C3.3.12 Load/store (unscaled immediate)
213
+ * Load/store (immediate post-indexed)
214
+ * Load/store (immediate pre-indexed)
215
+ * Load/store (unscaled immediate)
216
*
217
* 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
218
* +----+-------+---+-----+-----+---+--------+-----+------+------+
219
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
220
}
221
222
/*
223
- * C3.3.10 Load/store (register offset)
224
+ * Load/store (register offset)
225
*
226
* 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
227
* +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
228
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
229
}
230
231
/*
232
- * C3.3.13 Load/store (unsigned immediate)
233
+ * Load/store (unsigned immediate)
234
*
235
* 31 30 29 27 26 25 24 23 22 21 10 9 5
236
* +----+-------+---+-----+-----+------------+-------+------+
237
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_reg(DisasContext *s, uint32_t insn)
238
}
239
}
240
241
-/* C3.3.1 AdvSIMD load/store multiple structures
242
+/* AdvSIMD load/store multiple structures
243
*
244
* 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
245
* +---+---+---------------+---+-------------+--------+------+------+------+
246
* | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
247
* +---+---+---------------+---+-------------+--------+------+------+------+
248
*
249
- * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
250
+ * AdvSIMD load/store multiple structures (post-indexed)
251
*
252
* 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
253
* +---+---+---------------+---+---+---------+--------+------+------+------+
254
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
255
tcg_temp_free_i64(tcg_addr);
256
}
257
258
-/* C3.3.3 AdvSIMD load/store single structure
259
+/* AdvSIMD load/store single structure
260
*
261
* 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
262
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
263
* | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
264
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
265
*
266
- * C3.3.4 AdvSIMD load/store single structure (post-indexed)
267
+ * AdvSIMD load/store single structure (post-indexed)
268
*
269
* 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
270
* +---+---+---------------+-----+-----------+-----+---+------+------+------+
271
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
272
tcg_temp_free_i64(tcg_addr);
273
}
274
275
-/* C3.3 Loads and stores */
276
+/* Loads and stores */
277
static void disas_ldst(DisasContext *s, uint32_t insn)
278
{
279
switch (extract32(insn, 24, 6)) {
280
@@ -XXX,XX +XXX,XX @@ static void disas_ldst(DisasContext *s, uint32_t insn)
281
}
282
}
283
284
-/* C3.4.6 PC-rel. addressing
285
+/* PC-rel. addressing
286
* 31 30 29 28 24 23 5 4 0
287
* +----+-------+-----------+-------------------+------+
288
* | op | immlo | 1 0 0 0 0 | immhi | Rd |
289
@@ -XXX,XX +XXX,XX @@ static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
290
}
291
292
/*
293
- * C3.4.1 Add/subtract (immediate)
294
+ * Add/subtract (immediate)
295
*
296
* 31 30 29 28 24 23 22 21 10 9 5 4 0
297
* +--+--+--+-----------+-----+-------------+-----+-----+
298
@@ -XXX,XX +XXX,XX @@ static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
299
return true;
300
}
301
302
-/* C3.4.4 Logical (immediate)
303
+/* Logical (immediate)
304
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
305
* +----+-----+-------------+---+------+------+------+------+
306
* | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
307
@@ -XXX,XX +XXX,XX @@ static void disas_logic_imm(DisasContext *s, uint32_t insn)
308
}
309
310
/*
311
- * C3.4.5 Move wide (immediate)
312
+ * Move wide (immediate)
313
*
314
* 31 30 29 28 23 22 21 20 5 4 0
315
* +--+-----+-------------+-----+----------------+------+
316
@@ -XXX,XX +XXX,XX @@ static void disas_movw_imm(DisasContext *s, uint32_t insn)
317
}
318
}
319
320
-/* C3.4.2 Bitfield
321
+/* Bitfield
322
* 31 30 29 28 23 22 21 16 15 10 9 5 4 0
323
* +----+-----+-------------+---+------+------+------+------+
324
* | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
325
@@ -XXX,XX +XXX,XX @@ static void disas_bitfield(DisasContext *s, uint32_t insn)
326
}
327
}
328
329
-/* C3.4.3 Extract
330
+/* Extract
331
* 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
332
* +----+------+-------------+---+----+------+--------+------+------+
333
* | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
334
@@ -XXX,XX +XXX,XX @@ static void disas_extract(DisasContext *s, uint32_t insn)
335
}
336
}
337
338
-/* C3.4 Data processing - immediate */
339
+/* Data processing - immediate */
340
static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
341
{
342
switch (extract32(insn, 23, 6)) {
343
@@ -XXX,XX +XXX,XX @@ static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
344
}
345
}
346
347
-/* C3.5.10 Logical (shifted register)
348
+/* Logical (shifted register)
349
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
350
* +----+-----+-----------+-------+---+------+--------+------+------+
351
* | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
352
@@ -XXX,XX +XXX,XX @@ static void disas_logic_reg(DisasContext *s, uint32_t insn)
353
}
354
355
/*
356
- * C3.5.1 Add/subtract (extended register)
357
+ * Add/subtract (extended register)
358
*
359
* 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
360
* +--+--+--+-----------+-----+--+-------+------+------+----+----+
361
@@ -XXX,XX +XXX,XX @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
362
}
363
364
/*
365
- * C3.5.2 Add/subtract (shifted register)
366
+ * Add/subtract (shifted register)
367
*
368
* 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
369
* +--+--+--+-----------+-----+--+-------+---------+------+------+
370
@@ -XXX,XX +XXX,XX @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
371
tcg_temp_free_i64(tcg_result);
372
}
373
374
-/* C3.5.9 Data-processing (3 source)
375
-
376
- 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
377
- +--+------+-----------+------+------+----+------+------+------+
378
- |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
379
- +--+------+-----------+------+------+----+------+------+------+
380
-
381
+/* Data-processing (3 source)
382
+ *
383
+ * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
384
+ * +--+------+-----------+------+------+----+------+------+------+
385
+ * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
386
+ * +--+------+-----------+------+------+----+------+------+------+
387
*/
388
static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
389
{
390
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
391
tcg_temp_free_i64(tcg_tmp);
392
}
393
394
-/* C3.5.3 - Add/subtract (with carry)
395
+/* Add/subtract (with carry)
396
* 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
397
* +--+--+--+------------------------+------+---------+------+-----+
398
* |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
399
@@ -XXX,XX +XXX,XX @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
400
}
401
}
402
403
-/* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
404
+/* Conditional compare (immediate / register)
405
* 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
406
* +--+--+--+------------------------+--------+------+----+--+------+--+-----+
407
* |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
408
@@ -XXX,XX +XXX,XX @@ static void disas_cc(DisasContext *s, uint32_t insn)
409
tcg_temp_free_i32(tcg_t2);
410
}
411
412
-/* C3.5.6 Conditional select
413
+/* Conditional select
414
* 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
415
* +----+----+---+-----------------+------+------+-----+------+------+
416
* | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
417
@@ -XXX,XX +XXX,XX @@ static void handle_rbit(DisasContext *s, unsigned int sf,
418
}
419
}
420
421
-/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
422
+/* REV with sf==1, opcode==3 ("REV64") */
423
static void handle_rev64(DisasContext *s, unsigned int sf,
424
unsigned int rn, unsigned int rd)
425
{
426
@@ -XXX,XX +XXX,XX @@ static void handle_rev64(DisasContext *s, unsigned int sf,
427
tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
428
}
429
430
-/* C5.6.149 REV with sf==0, opcode==2
431
- * C5.6.151 REV32 (sf==1, opcode==2)
432
+/* REV with sf==0, opcode==2
433
+ * REV32 (sf==1, opcode==2)
434
*/
435
static void handle_rev32(DisasContext *s, unsigned int sf,
436
unsigned int rn, unsigned int rd)
437
@@ -XXX,XX +XXX,XX @@ static void handle_rev32(DisasContext *s, unsigned int sf,
438
}
439
}
440
441
-/* C5.6.150 REV16 (opcode==1) */
442
+/* REV16 (opcode==1) */
443
static void handle_rev16(DisasContext *s, unsigned int sf,
444
unsigned int rn, unsigned int rd)
445
{
446
@@ -XXX,XX +XXX,XX @@ static void handle_rev16(DisasContext *s, unsigned int sf,
447
tcg_temp_free_i64(tcg_tmp);
448
}
449
450
-/* C3.5.7 Data-processing (1 source)
451
+/* Data-processing (1 source)
452
* 31 30 29 28 21 20 16 15 10 9 5 4 0
453
* +----+---+---+-----------------+---------+--------+------+------+
454
* | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
455
@@ -XXX,XX +XXX,XX @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
456
}
457
}
458
459
-/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
460
+/* LSLV, LSRV, ASRV, RORV */
461
static void handle_shift_reg(DisasContext *s,
462
enum a64_shift_type shift_type, unsigned int sf,
463
unsigned int rm, unsigned int rn, unsigned int rd)
464
@@ -XXX,XX +XXX,XX @@ static void handle_crc32(DisasContext *s,
465
tcg_temp_free_i32(tcg_bytes);
466
}
467
468
-/* C3.5.8 Data-processing (2 source)
469
+/* Data-processing (2 source)
470
* 31 30 29 28 21 20 16 15 10 9 5 4 0
471
* +----+---+---+-----------------+------+--------+------+------+
472
* | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
473
@@ -XXX,XX +XXX,XX @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
474
}
475
}
476
477
-/* C3.5 Data processing - register */
478
+/* Data processing - register */
479
static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
480
{
481
switch (extract32(insn, 24, 5)) {
482
@@ -XXX,XX +XXX,XX @@ static void handle_fp_compare(DisasContext *s, bool is_double,
483
tcg_temp_free_i64(tcg_flags);
484
}
485
486
-/* C3.6.22 Floating point compare
487
+/* Floating point compare
488
* 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
489
* +---+---+---+-----------+------+---+------+-----+---------+------+-------+
490
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
491
@@ -XXX,XX +XXX,XX @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
492
handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
493
}
494
495
-/* C3.6.23 Floating point conditional compare
496
+/* Floating point conditional compare
497
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
498
* +---+---+---+-----------+------+---+------+------+-----+------+----+------+
499
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
500
@@ -XXX,XX +XXX,XX @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
501
}
502
}
503
504
-/* C3.6.24 Floating point conditional select
505
+/* Floating point conditional select
506
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
507
* +---+---+---+-----------+------+---+------+------+-----+------+------+
508
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
509
@@ -XXX,XX +XXX,XX @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
510
tcg_temp_free_i64(t_true);
511
}
512
513
-/* C3.6.25 Floating-point data-processing (1 source) - single precision */
514
+/* Floating-point data-processing (1 source) - single precision */
515
static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
516
{
517
TCGv_ptr fpst;
518
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
519
tcg_temp_free_i32(tcg_res);
520
}
521
522
-/* C3.6.25 Floating-point data-processing (1 source) - double precision */
523
+/* Floating-point data-processing (1 source) - double precision */
524
static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
525
{
526
TCGv_ptr fpst;
527
@@ -XXX,XX +XXX,XX @@ static void handle_fp_fcvt(DisasContext *s, int opcode,
528
}
529
}
530
531
-/* C3.6.25 Floating point data-processing (1 source)
532
+/* Floating point data-processing (1 source)
533
* 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
534
* +---+---+---+-----------+------+---+--------+-----------+------+------+
535
* | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
536
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
537
}
538
}
539
540
-/* C3.6.26 Floating-point data-processing (2 source) - single precision */
541
+/* Floating-point data-processing (2 source) - single precision */
542
static void handle_fp_2src_single(DisasContext *s, int opcode,
543
int rd, int rn, int rm)
544
{
545
@@ -XXX,XX +XXX,XX @@ static void handle_fp_2src_single(DisasContext *s, int opcode,
546
tcg_temp_free_i32(tcg_res);
547
}
548
549
-/* C3.6.26 Floating-point data-processing (2 source) - double precision */
550
+/* Floating-point data-processing (2 source) - double precision */
551
static void handle_fp_2src_double(DisasContext *s, int opcode,
552
int rd, int rn, int rm)
553
{
554
@@ -XXX,XX +XXX,XX @@ static void handle_fp_2src_double(DisasContext *s, int opcode,
555
tcg_temp_free_i64(tcg_res);
556
}
557
558
-/* C3.6.26 Floating point data-processing (2 source)
559
+/* Floating point data-processing (2 source)
560
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
561
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
562
* | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
563
@@ -XXX,XX +XXX,XX @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
564
}
565
}
566
567
-/* C3.6.27 Floating-point data-processing (3 source) - single precision */
568
+/* Floating-point data-processing (3 source) - single precision */
569
static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
570
int rd, int rn, int rm, int ra)
571
{
572
@@ -XXX,XX +XXX,XX @@ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
573
tcg_temp_free_i32(tcg_res);
574
}
575
576
-/* C3.6.27 Floating-point data-processing (3 source) - double precision */
577
+/* Floating-point data-processing (3 source) - double precision */
578
static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
579
int rd, int rn, int rm, int ra)
580
{
581
@@ -XXX,XX +XXX,XX @@ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
582
tcg_temp_free_i64(tcg_res);
583
}
584
585
-/* C3.6.27 Floating point data-processing (3 source)
586
+/* Floating point data-processing (3 source)
587
* 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
588
* +---+---+---+-----------+------+----+------+----+------+------+------+
589
* | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
590
@@ -XXX,XX +XXX,XX @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
591
}
592
}
593
594
-/* C3.6.28 Floating point immediate
595
+/* Floating point immediate
596
* 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
597
* +---+---+---+-----------+------+---+------------+-------+------+------+
598
* | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
599
@@ -XXX,XX +XXX,XX @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
600
tcg_temp_free_i32(tcg_shift);
601
}
602
603
-/* C3.6.29 Floating point <-> fixed point conversions
604
+/* Floating point <-> fixed point conversions
605
* 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
606
* +----+---+---+-----------+------+---+-------+--------+-------+------+------+
607
* | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
608
@@ -XXX,XX +XXX,XX @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
609
}
610
}
611
612
-/* C3.6.30 Floating point <-> integer conversions
613
+/* Floating point <-> integer conversions
614
* 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
615
* +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
616
* | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
617
@@ -XXX,XX +XXX,XX @@ static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
618
tcg_temp_free_i64(tcg_tmp);
619
}
620
621
-/* C3.6.1 EXT
622
+/* EXT
623
* 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
624
* +---+---+-------------+-----+---+------+---+------+---+------+------+
625
* | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
626
@@ -XXX,XX +XXX,XX @@ static void disas_simd_ext(DisasContext *s, uint32_t insn)
627
tcg_temp_free_i64(tcg_resh);
628
}
629
630
-/* C3.6.2 TBL/TBX
631
+/* TBL/TBX
632
* 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
633
* +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
634
* | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
635
@@ -XXX,XX +XXX,XX @@ static void disas_simd_tb(DisasContext *s, uint32_t insn)
636
tcg_temp_free_i64(tcg_resh);
637
}
638
639
-/* C3.6.3 ZIP/UZP/TRN
640
+/* ZIP/UZP/TRN
641
* 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
642
* +---+---+-------------+------+---+------+---+------------------+------+
643
* | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
644
@@ -XXX,XX +XXX,XX @@ static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
645
}
646
}
647
648
-/* C3.6.4 AdvSIMD across lanes
649
+/* AdvSIMD across lanes
650
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
651
* +---+---+---+-----------+------+-----------+--------+-----+------+------+
652
* | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
653
@@ -XXX,XX +XXX,XX @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
654
tcg_temp_free_i64(tcg_res);
655
}
656
657
-/* C6.3.31 DUP (Element, Vector)
658
+/* DUP (Element, Vector)
659
*
660
* 31 30 29 21 20 16 15 10 9 5 4 0
661
* +---+---+-------------------+--------+-------------+------+------+
662
@@ -XXX,XX +XXX,XX @@ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
663
tcg_temp_free_i64(tmp);
664
}
665
666
-/* C6.3.31 DUP (element, scalar)
667
+/* DUP (element, scalar)
668
* 31 21 20 16 15 10 9 5 4 0
669
* +-----------------------+--------+-------------+------+------+
670
* | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
671
@@ -XXX,XX +XXX,XX @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn,
672
tcg_temp_free_i64(tmp);
673
}
674
675
-/* C6.3.32 DUP (General)
676
+/* DUP (General)
677
*
678
* 31 30 29 21 20 16 15 10 9 5 4 0
679
* +---+---+-------------------+--------+-------------+------+------+
680
@@ -XXX,XX +XXX,XX @@ static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
681
}
682
}
683
684
-/* C6.3.150 INS (Element)
685
+/* INS (Element)
686
*
687
* 31 21 20 16 15 14 11 10 9 5 4 0
688
* +-----------------------+--------+------------+---+------+------+
689
@@ -XXX,XX +XXX,XX @@ static void handle_simd_inse(DisasContext *s, int rd, int rn,
690
}
691
692
693
-/* C6.3.151 INS (General)
694
+/* INS (General)
695
*
696
* 31 21 20 16 15 10 9 5 4 0
697
* +-----------------------+--------+-------------+------+------+
698
@@ -XXX,XX +XXX,XX @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
699
}
700
701
/*
702
- * C6.3.321 UMOV (General)
703
- * C6.3.237 SMOV (General)
704
+ * UMOV (General)
705
+ * SMOV (General)
706
*
707
* 31 30 29 21 20 16 15 12 10 9 5 4 0
708
* +---+---+-------------------+--------+-------------+------+------+
709
@@ -XXX,XX +XXX,XX @@ static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
710
}
711
}
712
713
-/* C3.6.5 AdvSIMD copy
714
+/* AdvSIMD copy
715
* 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
716
* +---+---+----+-----------------+------+---+------+---+------+------+
717
* | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
718
@@ -XXX,XX +XXX,XX @@ static void disas_simd_copy(DisasContext *s, uint32_t insn)
719
}
720
}
721
722
-/* C3.6.6 AdvSIMD modified immediate
723
+/* AdvSIMD modified immediate
724
* 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
725
* +---+---+----+---------------------+-----+-------+----+---+-------+------+
726
* | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
727
@@ -XXX,XX +XXX,XX @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
728
tcg_temp_free_i64(tcg_imm);
729
}
730
731
-/* C3.6.7 AdvSIMD scalar copy
732
+/* AdvSIMD scalar copy
733
* 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
734
* +-----+----+-----------------+------+---+------+---+------+------+
735
* | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
736
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
737
handle_simd_dupes(s, rd, rn, imm5);
738
}
739
740
-/* C3.6.8 AdvSIMD scalar pairwise
741
+/* AdvSIMD scalar pairwise
742
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
743
* +-----+---+-----------+------+-----------+--------+-----+------+------+
744
* | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
745
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
746
tcg_temp_free_i32(tcg_rmode);
747
}
748
749
-/* C3.6.9 AdvSIMD scalar shift by immediate
750
+/* AdvSIMD scalar shift by immediate
751
* 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
752
* +-----+---+-------------+------+------+--------+---+------+------+
753
* | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
754
@@ -XXX,XX +XXX,XX @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
755
}
756
}
757
758
-/* C3.6.10 AdvSIMD scalar three different
759
+/* AdvSIMD scalar three different
760
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
761
* +-----+---+-----------+------+---+------+--------+-----+------+------+
762
* | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
763
@@ -XXX,XX +XXX,XX @@ static void handle_3same_float(DisasContext *s, int size, int elements,
764
}
765
}
766
767
-/* C3.6.11 AdvSIMD scalar three same
768
+/* AdvSIMD scalar three same
769
* 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
770
* +-----+---+-----------+------+---+------+--------+---+------+------+
771
* | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
772
@@ -XXX,XX +XXX,XX @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
773
}
774
}
775
776
-/* C3.6.12 AdvSIMD scalar two reg misc
777
+/* AdvSIMD scalar two reg misc
778
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
779
* +-----+---+-----------+------+-----------+--------+-----+------+------+
780
* | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
781
@@ -XXX,XX +XXX,XX @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
782
}
783
784
785
-/* C3.6.14 AdvSIMD shift by immediate
786
+/* AdvSIMD shift by immediate
787
* 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
788
* +---+---+---+-------------+------+------+--------+---+------+------+
789
* | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
790
@@ -XXX,XX +XXX,XX @@ static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
791
tcg_temp_free_i64(tcg_res);
792
}
793
794
-/* C3.6.15 AdvSIMD three different
795
+/* AdvSIMD three different
796
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
797
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
798
* | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
799
@@ -XXX,XX +XXX,XX @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
800
}
801
}
802
803
-/* C3.6.16 AdvSIMD three same
804
+/* AdvSIMD three same
805
* 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
806
* +---+---+---+-----------+------+---+------+--------+---+------+------+
807
* | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
808
@@ -XXX,XX +XXX,XX @@ static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
809
}
810
}
811
812
-/* C3.6.17 AdvSIMD two reg misc
813
+/* AdvSIMD two reg misc
814
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
815
* +---+---+---+-----------+------+-----------+--------+-----+------+------+
816
* | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
817
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
818
}
819
}
820
821
-/* C3.6.13 AdvSIMD scalar x indexed element
822
+/* AdvSIMD scalar x indexed element
823
* 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
824
* +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
825
* | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
826
* +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
827
- * C3.6.18 AdvSIMD vector x indexed element
828
+ * AdvSIMD vector x indexed element
829
* 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
830
* +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
831
* | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
832
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
833
}
834
}
835
836
-/* C3.6.19 Crypto AES
837
+/* Crypto AES
838
* 31 24 23 22 21 17 16 12 11 10 9 5 4 0
839
* +-----------------+------+-----------+--------+-----+------+------+
840
* | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
841
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn)
842
tcg_temp_free_i32(tcg_decrypt);
843
}
844
845
-/* C3.6.20 Crypto three-reg SHA
846
+/* Crypto three-reg SHA
847
* 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
848
* +-----------------+------+---+------+---+--------+-----+------+------+
849
* | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
850
@@ -XXX,XX +XXX,XX @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
851
tcg_temp_free_i32(tcg_rm_regno);
852
}
853
854
-/* C3.6.21 Crypto two-reg SHA
855
+/* Crypto two-reg SHA
856
* 31 24 23 22 21 17 16 12 11 10 9 5 4 0
857
* +-----------------+------+-----------+--------+-----+------+------+
858
* | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
859
--
189
--
860
2.7.4
190
2.34.1
861
191
862
192
diff view generated by jsdifflib
1
Don't use old_mmio in the memory region ops struct.
1
From: Aaron Lindsay <aaron@os.amperecomputing.com>
2
2
3
Signed-off-by: Aaron Lindsay <aaron@os.amperecomputing.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230829232335.965414-10-richard.henderson@linaro.org
7
Message-Id: <20230609172324.982888-8-aaron@os.amperecomputing.com>
8
[rth: Simplify fpac comparison, reusing cmp_mask]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 1505580378-9044-7-git-send-email-peter.maydell@linaro.org
6
---
11
---
7
hw/arm/omap2.c | 49 +++++++++++++++++++++++++++++++++++++------------
12
docs/system/arm/emulation.rst | 2 ++
8
1 file changed, 37 insertions(+), 12 deletions(-)
13
target/arm/syndrome.h | 7 +++++++
14
target/arm/tcg/cpu64.c | 2 +-
15
target/arm/tcg/pauth_helper.c | 18 +++++++++++++++++-
16
4 files changed, 27 insertions(+), 2 deletions(-)
9
17
10
diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c
18
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
11
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
12
--- a/hw/arm/omap2.c
20
--- a/docs/system/arm/emulation.rst
13
+++ b/hw/arm/omap2.c
21
+++ b/docs/system/arm/emulation.rst
14
@@ -XXX,XX +XXX,XX @@ static void omap_sysctl_write(void *opaque, hwaddr addr,
22
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
23
- FEAT_FGT (Fine-Grained Traps)
24
- FEAT_FHM (Floating-point half-precision multiplication instructions)
25
- FEAT_FP16 (Half-precision floating-point data processing)
26
+- FEAT_FPAC (Faulting on AUT* instructions)
27
+- FEAT_FPACCOMBINE (Faulting on combined pointer authentication instructions)
28
- FEAT_FRINTTS (Floating-point to integer instructions)
29
- FEAT_FlagM (Flag manipulation instructions v2)
30
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
31
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/arm/syndrome.h
34
+++ b/target/arm/syndrome.h
35
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
36
EC_SYSTEMREGISTERTRAP = 0x18,
37
EC_SVEACCESSTRAP = 0x19,
38
EC_ERETTRAP = 0x1a,
39
+ EC_PACFAIL = 0x1c,
40
EC_SMETRAP = 0x1d,
41
EC_GPC = 0x1e,
42
EC_INSNABORT = 0x20,
43
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_smetrap(SMEExceptionType etype, bool is_16bit)
44
| (is_16bit ? 0 : ARM_EL_IL) | etype;
45
}
46
47
+static inline uint32_t syn_pacfail(bool data, int keynumber)
48
+{
49
+ int error_code = (data << 1) | keynumber;
50
+ return (EC_PACFAIL << ARM_EL_EC_SHIFT) | ARM_EL_IL | error_code;
51
+}
52
+
53
static inline uint32_t syn_pactrap(void)
54
{
55
return EC_PACTRAP << ARM_EL_EC_SHIFT;
56
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/tcg/cpu64.c
59
+++ b/target/arm/tcg/cpu64.c
60
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
61
62
t = cpu->isar.id_aa64isar1;
63
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
64
- t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_2);
65
+ t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED);
66
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
67
t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
68
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
69
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/tcg/pauth_helper.c
72
+++ b/target/arm/tcg/pauth_helper.c
73
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
15
}
74
}
16
}
75
}
17
76
18
+static uint64_t omap_sysctl_readfn(void *opaque, hwaddr addr,
77
+static G_NORETURN
19
+ unsigned size)
78
+void pauth_fail_exception(CPUARMState *env, bool data,
79
+ int keynumber, uintptr_t ra)
20
+{
80
+{
21
+ switch (size) {
81
+ raise_exception_ra(env, EXCP_UDEF, syn_pacfail(data, keynumber),
22
+ case 1:
82
+ exception_target_el(env), ra);
23
+ return omap_sysctl_read8(opaque, addr);
24
+ case 2:
25
+ return omap_badwidth_read32(opaque, addr); /* TODO */
26
+ case 4:
27
+ return omap_sysctl_read(opaque, addr);
28
+ default:
29
+ g_assert_not_reached();
30
+ }
31
+}
83
+}
32
+
84
+
33
+static void omap_sysctl_writefn(void *opaque, hwaddr addr,
85
static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
34
+ uint64_t value, unsigned size)
86
ARMPACKey *key, bool data, int keynumber,
35
+{
87
uintptr_t ra, bool is_combined)
36
+ switch (size) {
88
@@ -XXX,XX +XXX,XX @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
37
+ case 1:
89
cmp_mask &= ~MAKE_64BIT_MASK(55, 1);
38
+ omap_sysctl_write8(opaque, addr, value);
90
39
+ break;
91
if (pauth_feature >= PauthFeat_2) {
40
+ case 2:
92
- return ptr ^ (pac & cmp_mask);
41
+ omap_badwidth_write32(opaque, addr, value); /* TODO */
93
+ ARMPauthFeature fault_feature =
42
+ break;
94
+ is_combined ? PauthFeat_FPACCOMBINED : PauthFeat_FPAC;
43
+ case 4:
95
+ uint64_t result = ptr ^ (pac & cmp_mask);
44
+ omap_sysctl_write(opaque, addr, value);
45
+ break;
46
+ default:
47
+ g_assert_not_reached();
48
+ }
49
+}
50
+
96
+
51
static const MemoryRegionOps omap_sysctl_ops = {
97
+ if (pauth_feature >= fault_feature
52
- .old_mmio = {
98
+ && ((result ^ sextract64(result, 55, 1)) & cmp_mask)) {
53
- .read = {
99
+ pauth_fail_exception(env, data, keynumber, ra);
54
- omap_sysctl_read8,
100
+ }
55
- omap_badwidth_read32,    /* TODO */
101
+ return result;
56
- omap_sysctl_read,
102
}
57
- },
103
58
- .write = {
104
if ((pac ^ ptr) & cmp_mask) {
59
- omap_sysctl_write8,
60
- omap_badwidth_write32,    /* TODO */
61
- omap_sysctl_write,
62
- },
63
- },
64
+ .read = omap_sysctl_readfn,
65
+ .write = omap_sysctl_writefn,
66
+ .valid.min_access_size = 1,
67
+ .valid.max_access_size = 4,
68
.endianness = DEVICE_NATIVE_ENDIAN,
69
};
70
71
--
105
--
72
2.7.4
106
2.34.1
73
74
diff view generated by jsdifflib
1
In v7M, the fixed-priority exceptions are:
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reset: -3
3
NMI: -2
4
HardFault: -1
5
2
6
In v8M, this changes because Secure HardFault may need
3
Fix when using GCC v11.4 (Ubuntu 11.4.0-1ubuntu1~22.04) with CFLAGS=-Og:
7
to be prioritised above NMI:
8
Reset: -4
9
Secure HardFault if AIRCR.BFHFNMINS == 1: -3
10
NMI: -2
11
Secure HardFault if AIRCR.BFHFNMINS == 0: -1
12
NonSecure HardFault: -1
13
4
14
Make these changes, including support for changing the
5
[4/6] Compiling C object libcommon.fa.p/hw_intc_arm_gicv3_its.c.o
15
priority of Secure HardFault as AIRCR.BFHFNMINS changes.
6
FAILED: libcommon.fa.p/hw_intc_arm_gicv3_its.c.o
7
inlined from ‘lookup_vte’ at hw/intc/arm_gicv3_its.c:453:9,
8
inlined from ‘vmovp_callback’ at hw/intc/arm_gicv3_its.c:1039:14:
9
hw/intc/arm_gicv3_its.c:347:9: error: ‘vte.rdbase’ may be used uninitialized [-Werror=maybe-uninitialized]
10
347 | trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
11
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12
348 | vte->vptaddr, vte->rdbase);
13
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
14
hw/intc/arm_gicv3_its.c: In function ‘vmovp_callback’:
15
hw/intc/arm_gicv3_its.c:1036:13: note: ‘vte’ declared here
16
1036 | VTEntry vte;
17
| ^~~
16
18
19
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
20
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
21
Message-id: 20230831131348.69032-1-philmd@linaro.org
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 1505240046-11454-14-git-send-email-peter.maydell@linaro.org
20
---
23
---
21
hw/intc/armv7m_nvic.c | 22 +++++++++++++++++++---
24
hw/intc/arm_gicv3_its.c | 15 ++++++---------
22
1 file changed, 19 insertions(+), 3 deletions(-)
25
1 file changed, 6 insertions(+), 9 deletions(-)
23
26
24
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
27
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
25
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/intc/armv7m_nvic.c
29
--- a/hw/intc/arm_gicv3_its.c
27
+++ b/hw/intc/armv7m_nvic.c
30
+++ b/hw/intc/arm_gicv3_its.c
28
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
31
@@ -XXX,XX +XXX,XX @@ static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte)
29
(R_V7M_AIRCR_SYSRESETREQS_MASK |
32
if (entry_addr == -1) {
30
R_V7M_AIRCR_BFHFNMINS_MASK |
33
/* No L2 table entry, i.e. no valid VTE, or a memory error */
31
R_V7M_AIRCR_PRIS_MASK);
34
vte->valid = false;
32
+ /* BFHFNMINS changes the priority of Secure HardFault */
35
- goto out;
33
+ if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
36
+ trace_gicv3_its_vte_read_fault(vpeid);
34
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
37
+ return MEMTX_OK;
35
+ } else {
36
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
37
+ }
38
}
39
nvic_irq_update(s);
40
}
41
@@ -XXX,XX +XXX,XX @@ static int nvic_post_load(void *opaque, int version_id)
42
{
43
NVICState *s = opaque;
44
unsigned i;
45
+ int resetprio;
46
47
/* Check for out of range priority settings */
48
- if (s->vectors[ARMV7M_EXCP_RESET].prio != -3 ||
49
+ resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
50
+
51
+ if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
52
s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
53
s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
54
return 1;
55
@@ -XXX,XX +XXX,XX @@ static int nvic_security_post_load(void *opaque, int version_id)
56
int i;
57
58
/* Check for out of range priority settings */
59
- if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1) {
60
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
61
+ && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
62
+ /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
63
+ * if the CPU state has been migrated yet; a mismatch won't
64
+ * cause the emulation to blow up, though.
65
+ */
66
return 1;
67
}
38
}
68
for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
39
vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
69
@@ -XXX,XX +XXX,XX @@ static Property props_nvic[] = {
40
if (res != MEMTX_OK) {
70
41
- goto out;
71
static void armv7m_nvic_reset(DeviceState *dev)
42
+ trace_gicv3_its_vte_read_fault(vpeid);
72
{
43
+ return res;
73
+ int resetprio;
44
}
74
NVICState *s = NVIC(dev);
45
vte->valid = FIELD_EX64(vteval, VTE, VALID);
75
46
vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE);
76
s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
47
vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR);
77
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
48
vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE);
78
s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
49
-out:
79
s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
50
- if (res != MEMTX_OK) {
80
51
- trace_gicv3_its_vte_read_fault(vpeid);
81
- s->vectors[ARMV7M_EXCP_RESET].prio = -3;
52
- } else {
82
+ resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
53
- trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
83
+ s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
54
- vte->vptaddr, vte->rdbase);
84
s->vectors[ARMV7M_EXCP_NMI].prio = -2;
55
- }
85
s->vectors[ARMV7M_EXCP_HARD].prio = -1;
56
+ trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
57
+ vte->vptaddr, vte->rdbase);
58
return res;
59
}
86
60
87
--
61
--
88
2.7.4
62
2.34.1
89
63
90
64
diff view generated by jsdifflib
1
From: Subbaraya Sundeep <sundeep.lkml@gmail.com>
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Smartfusion2 SoC has hardened Microcontroller subsystem
3
Introduce the Xilinx Configuration Frame Interface (CFI) for transmitting
4
and flash based FPGA fabric. This patch adds support for
4
CFI data packets between the Xilinx Configuration Frame Unit models
5
Microcontroller subsystem in the SoC.
5
(CFU_APB, CFU_FDRO and CFU_SFR), the Xilinx CFRAME controller (CFRAME_REG)
6
and the Xilinx CFRAME broadcast controller (CFRAME_BCAST_REG) models (when
7
emulating bitstream programming and readback).
6
8
7
Signed-off-by: Subbaraya Sundeep <sundeep.lkml@gmail.com>
9
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
8
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
10
Reviewed-by: Sai Pavan Boddu <sai.pavan.boddu@amd.com>
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Acked-by: Edgar E. Iglesias <edgar@zeroasic.com>
10
Message-id: 20170920201737.25723-5-f4bug@amsat.org
12
Message-id: 20230831165701.2016397-2-francisco.iglesias@amd.com
11
[PMD: drop cpu_model to directly use cpu type, check m3clk non null]
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
---
14
hw/arm/Makefile.objs | 1 +
15
MAINTAINERS | 6 ++++
15
include/hw/arm/msf2-soc.h | 67 +++++++++++
16
include/hw/misc/xlnx-cfi-if.h | 59 +++++++++++++++++++++++++++++++++++
16
hw/arm/msf2-soc.c | 238 ++++++++++++++++++++++++++++++++++++++++
17
hw/misc/xlnx-cfi-if.c | 34 ++++++++++++++++++++
17
default-configs/arm-softmmu.mak | 1 +
18
hw/misc/meson.build | 1 +
18
4 files changed, 307 insertions(+)
19
4 files changed, 100 insertions(+)
19
create mode 100644 include/hw/arm/msf2-soc.h
20
create mode 100644 include/hw/misc/xlnx-cfi-if.h
20
create mode 100644 hw/arm/msf2-soc.c
21
create mode 100644 hw/misc/xlnx-cfi-if.c
21
22
22
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
23
diff --git a/MAINTAINERS b/MAINTAINERS
23
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/arm/Makefile.objs
25
--- a/MAINTAINERS
25
+++ b/hw/arm/Makefile.objs
26
+++ b/MAINTAINERS
26
@@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
27
@@ -XXX,XX +XXX,XX @@ S: Maintained
27
obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
28
F: hw/ssi/xlnx-versal-ospi.c
28
obj-$(CONFIG_ASPEED_SOC) += aspeed_soc.o aspeed.o
29
F: include/hw/ssi/xlnx-versal-ospi.h
29
obj-$(CONFIG_MPS2) += mps2.o
30
30
+obj-$(CONFIG_MSF2) += msf2-soc.o
31
+Xilinx Versal CFI
31
diff --git a/include/hw/arm/msf2-soc.h b/include/hw/arm/msf2-soc.h
32
+M: Francisco Iglesias <francisco.iglesias@amd.com>
33
+S: Maintained
34
+F: hw/misc/xlnx-cfi-if.c
35
+F: include/hw/misc/xlnx-cfi-if.h
36
+
37
STM32F100
38
M: Alexandre Iooss <erdnaxe@crans.org>
39
L: qemu-arm@nongnu.org
40
diff --git a/include/hw/misc/xlnx-cfi-if.h b/include/hw/misc/xlnx-cfi-if.h
32
new file mode 100644
41
new file mode 100644
33
index XXXXXXX..XXXXXXX
42
index XXXXXXX..XXXXXXX
34
--- /dev/null
43
--- /dev/null
35
+++ b/include/hw/arm/msf2-soc.h
44
+++ b/include/hw/misc/xlnx-cfi-if.h
36
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@
37
+/*
46
+/*
38
+ * Microsemi Smartfusion2 SoC
47
+ * Xilinx CFI interface
39
+ *
48
+ *
40
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
49
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
41
+ *
50
+ *
42
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
51
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
43
+ * of this software and associated documentation files (the "Software"), to deal
44
+ * in the Software without restriction, including without limitation the rights
45
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
46
+ * copies of the Software, and to permit persons to whom the Software is
47
+ * furnished to do so, subject to the following conditions:
48
+ *
52
+ *
49
+ * The above copyright notice and this permission notice shall be included in
53
+ * SPDX-License-Identifier: GPL-2.0-or-later
50
+ * all copies or substantial portions of the Software.
54
+ */
55
+#ifndef XLNX_CFI_IF_H
56
+#define XLNX_CFI_IF_H 1
57
+
58
+#include "qemu/help-texts.h"
59
+#include "hw/hw.h"
60
+#include "qom/object.h"
61
+
62
+#define TYPE_XLNX_CFI_IF "xlnx-cfi-if"
63
+typedef struct XlnxCfiIfClass XlnxCfiIfClass;
64
+DECLARE_CLASS_CHECKERS(XlnxCfiIfClass, XLNX_CFI_IF, TYPE_XLNX_CFI_IF)
65
+
66
+#define XLNX_CFI_IF(obj) \
67
+ INTERFACE_CHECK(XlnxCfiIf, (obj), TYPE_XLNX_CFI_IF)
68
+
69
+typedef enum {
70
+ PACKET_TYPE_CFU = 0x52,
71
+ PACKET_TYPE_CFRAME = 0xA1,
72
+} xlnx_cfi_packet_type;
73
+
74
+typedef enum {
75
+ CFRAME_FAR = 1,
76
+ CFRAME_SFR = 2,
77
+ CFRAME_FDRI = 4,
78
+ CFRAME_CMD = 6,
79
+} xlnx_cfi_reg_addr;
80
+
81
+typedef struct XlnxCfiPacket {
82
+ uint8_t reg_addr;
83
+ uint32_t data[4];
84
+} XlnxCfiPacket;
85
+
86
+typedef struct XlnxCfiIf {
87
+ Object Parent;
88
+} XlnxCfiIf;
89
+
90
+typedef struct XlnxCfiIfClass {
91
+ InterfaceClass parent;
92
+
93
+ void (*cfi_transfer_packet)(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
94
+} XlnxCfiIfClass;
95
+
96
+/**
97
+ * Transfer a XlnxCfiPacket.
51
+ *
98
+ *
52
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
99
+ * @cfi_if: the object implementing this interface
53
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
100
+ * @XlnxCfiPacket: a pointer to the XlnxCfiPacket to transfer
54
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
55
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
56
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
57
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
58
+ * THE SOFTWARE.
59
+ */
101
+ */
102
+void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt);
60
+
103
+
61
+#ifndef HW_ARM_MSF2_SOC_H
104
+#endif /* XLNX_CFI_IF_H */
62
+#define HW_ARM_MSF2_SOC_H
105
diff --git a/hw/misc/xlnx-cfi-if.c b/hw/misc/xlnx-cfi-if.c
63
+
64
+#include "hw/arm/armv7m.h"
65
+#include "hw/timer/mss-timer.h"
66
+#include "hw/misc/msf2-sysreg.h"
67
+#include "hw/ssi/mss-spi.h"
68
+
69
+#define TYPE_MSF2_SOC "msf2-soc"
70
+#define MSF2_SOC(obj) OBJECT_CHECK(MSF2State, (obj), TYPE_MSF2_SOC)
71
+
72
+#define MSF2_NUM_SPIS 2
73
+#define MSF2_NUM_UARTS 2
74
+
75
+/*
76
+ * System timer consists of two programmable 32-bit
77
+ * decrementing counters that generate individual interrupts to
78
+ * the Cortex-M3 processor
79
+ */
80
+#define MSF2_NUM_TIMERS 2
81
+
82
+typedef struct MSF2State {
83
+ /*< private >*/
84
+ SysBusDevice parent_obj;
85
+ /*< public >*/
86
+
87
+ ARMv7MState armv7m;
88
+
89
+ char *cpu_type;
90
+ char *part_name;
91
+ uint64_t envm_size;
92
+ uint64_t esram_size;
93
+
94
+ uint32_t m3clk;
95
+ uint8_t apb0div;
96
+ uint8_t apb1div;
97
+
98
+ MSF2SysregState sysreg;
99
+ MSSTimerState timer;
100
+ MSSSpiState spi[MSF2_NUM_SPIS];
101
+} MSF2State;
102
+
103
+#endif
104
diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c
105
new file mode 100644
106
new file mode 100644
106
index XXXXXXX..XXXXXXX
107
index XXXXXXX..XXXXXXX
107
--- /dev/null
108
--- /dev/null
108
+++ b/hw/arm/msf2-soc.c
109
+++ b/hw/misc/xlnx-cfi-if.c
109
@@ -XXX,XX +XXX,XX @@
110
@@ -XXX,XX +XXX,XX @@
110
+/*
111
+/*
111
+ * SmartFusion2 SoC emulation.
112
+ * Xilinx CFI interface
112
+ *
113
+ *
113
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
114
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
114
+ *
115
+ *
115
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
116
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
116
+ * of this software and associated documentation files (the "Software"), to deal
117
+ * in the Software without restriction, including without limitation the rights
118
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
119
+ * copies of the Software, and to permit persons to whom the Software is
120
+ * furnished to do so, subject to the following conditions:
121
+ *
117
+ *
122
+ * The above copyright notice and this permission notice shall be included in
118
+ * SPDX-License-Identifier: GPL-2.0-or-later
123
+ * all copies or substantial portions of the Software.
124
+ *
125
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
126
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
127
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
128
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
129
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
130
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
131
+ * THE SOFTWARE.
132
+ */
119
+ */
120
+#include "qemu/osdep.h"
121
+#include "hw/misc/xlnx-cfi-if.h"
133
+
122
+
134
+#include "qemu/osdep.h"
123
+void xlnx_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt)
135
+#include "qapi/error.h"
124
+{
136
+#include "qemu-common.h"
125
+ XlnxCfiIfClass *xcic = XLNX_CFI_IF_GET_CLASS(cfi_if);
137
+#include "hw/arm/arm.h"
138
+#include "exec/address-spaces.h"
139
+#include "hw/char/serial.h"
140
+#include "hw/boards.h"
141
+#include "sysemu/block-backend.h"
142
+#include "qemu/cutils.h"
143
+#include "hw/arm/msf2-soc.h"
144
+#include "hw/misc/unimp.h"
145
+
126
+
146
+#define MSF2_TIMER_BASE 0x40004000
127
+ if (xcic->cfi_transfer_packet) {
147
+#define MSF2_SYSREG_BASE 0x40038000
128
+ xcic->cfi_transfer_packet(cfi_if, pkt);
148
+
149
+#define ENVM_BASE_ADDRESS 0x60000000
150
+
151
+#define SRAM_BASE_ADDRESS 0x20000000
152
+
153
+#define MSF2_ENVM_MAX_SIZE (512 * K_BYTE)
154
+
155
+/*
156
+ * eSRAM max size is 80k without SECDED(Single error correction and
157
+ * dual error detection) feature and 64k with SECDED.
158
+ * We do not support SECDED now.
159
+ */
160
+#define MSF2_ESRAM_MAX_SIZE (80 * K_BYTE)
161
+
162
+static const uint32_t spi_addr[MSF2_NUM_SPIS] = { 0x40001000 , 0x40011000 };
163
+static const uint32_t uart_addr[MSF2_NUM_UARTS] = { 0x40000000 , 0x40010000 };
164
+
165
+static const int spi_irq[MSF2_NUM_SPIS] = { 2, 3 };
166
+static const int uart_irq[MSF2_NUM_UARTS] = { 10, 11 };
167
+static const int timer_irq[MSF2_NUM_TIMERS] = { 14, 15 };
168
+
169
+static void m2sxxx_soc_initfn(Object *obj)
170
+{
171
+ MSF2State *s = MSF2_SOC(obj);
172
+ int i;
173
+
174
+ object_initialize(&s->armv7m, sizeof(s->armv7m), TYPE_ARMV7M);
175
+ qdev_set_parent_bus(DEVICE(&s->armv7m), sysbus_get_default());
176
+
177
+ object_initialize(&s->sysreg, sizeof(s->sysreg), TYPE_MSF2_SYSREG);
178
+ qdev_set_parent_bus(DEVICE(&s->sysreg), sysbus_get_default());
179
+
180
+ object_initialize(&s->timer, sizeof(s->timer), TYPE_MSS_TIMER);
181
+ qdev_set_parent_bus(DEVICE(&s->timer), sysbus_get_default());
182
+
183
+ for (i = 0; i < MSF2_NUM_SPIS; i++) {
184
+ object_initialize(&s->spi[i], sizeof(s->spi[i]),
185
+ TYPE_MSS_SPI);
186
+ qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
187
+ }
129
+ }
188
+}
130
+}
189
+
131
+
190
+static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp)
132
+static const TypeInfo xlnx_cfi_if_info = {
133
+ .name = TYPE_XLNX_CFI_IF,
134
+ .parent = TYPE_INTERFACE,
135
+ .class_size = sizeof(XlnxCfiIfClass),
136
+};
137
+
138
+static void xlnx_cfi_if_register_types(void)
191
+{
139
+{
192
+ MSF2State *s = MSF2_SOC(dev_soc);
140
+ type_register_static(&xlnx_cfi_if_info);
193
+ DeviceState *dev, *armv7m;
194
+ SysBusDevice *busdev;
195
+ Error *err = NULL;
196
+ int i;
197
+
198
+ MemoryRegion *system_memory = get_system_memory();
199
+ MemoryRegion *nvm = g_new(MemoryRegion, 1);
200
+ MemoryRegion *nvm_alias = g_new(MemoryRegion, 1);
201
+ MemoryRegion *sram = g_new(MemoryRegion, 1);
202
+
203
+ memory_region_init_rom(nvm, NULL, "MSF2.eNVM", s->envm_size,
204
+ &error_fatal);
205
+ /*
206
+ * On power-on, the eNVM region 0x60000000 is automatically
207
+ * remapped to the Cortex-M3 processor executable region
208
+ * start address (0x0). We do not support remapping other eNVM,
209
+ * eSRAM and DDR regions by guest(via Sysreg) currently.
210
+ */
211
+ memory_region_init_alias(nvm_alias, NULL, "MSF2.eNVM",
212
+ nvm, 0, s->envm_size);
213
+
214
+ memory_region_add_subregion(system_memory, ENVM_BASE_ADDRESS, nvm);
215
+ memory_region_add_subregion(system_memory, 0, nvm_alias);
216
+
217
+ memory_region_init_ram(sram, NULL, "MSF2.eSRAM", s->esram_size,
218
+ &error_fatal);
219
+ memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, sram);
220
+
221
+ armv7m = DEVICE(&s->armv7m);
222
+ qdev_prop_set_uint32(armv7m, "num-irq", 81);
223
+ qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type);
224
+ object_property_set_link(OBJECT(&s->armv7m), OBJECT(get_system_memory()),
225
+ "memory", &error_abort);
226
+ object_property_set_bool(OBJECT(&s->armv7m), true, "realized", &err);
227
+ if (err != NULL) {
228
+ error_propagate(errp, err);
229
+ return;
230
+ }
231
+
232
+ if (!s->m3clk) {
233
+ error_setg(errp, "Invalid m3clk value");
234
+ error_append_hint(errp, "m3clk can not be zero\n");
235
+ return;
236
+ }
237
+ system_clock_scale = NANOSECONDS_PER_SECOND / s->m3clk;
238
+
239
+ for (i = 0; i < MSF2_NUM_UARTS; i++) {
240
+ if (serial_hds[i]) {
241
+ serial_mm_init(get_system_memory(), uart_addr[i], 2,
242
+ qdev_get_gpio_in(armv7m, uart_irq[i]),
243
+ 115200, serial_hds[i], DEVICE_NATIVE_ENDIAN);
244
+ }
245
+ }
246
+
247
+ dev = DEVICE(&s->timer);
248
+ /* APB0 clock is the timer input clock */
249
+ qdev_prop_set_uint32(dev, "clock-frequency", s->m3clk / s->apb0div);
250
+ object_property_set_bool(OBJECT(&s->timer), true, "realized", &err);
251
+ if (err != NULL) {
252
+ error_propagate(errp, err);
253
+ return;
254
+ }
255
+ busdev = SYS_BUS_DEVICE(dev);
256
+ sysbus_mmio_map(busdev, 0, MSF2_TIMER_BASE);
257
+ sysbus_connect_irq(busdev, 0,
258
+ qdev_get_gpio_in(armv7m, timer_irq[0]));
259
+ sysbus_connect_irq(busdev, 1,
260
+ qdev_get_gpio_in(armv7m, timer_irq[1]));
261
+
262
+ dev = DEVICE(&s->sysreg);
263
+ qdev_prop_set_uint32(dev, "apb0divisor", s->apb0div);
264
+ qdev_prop_set_uint32(dev, "apb1divisor", s->apb1div);
265
+ object_property_set_bool(OBJECT(&s->sysreg), true, "realized", &err);
266
+ if (err != NULL) {
267
+ error_propagate(errp, err);
268
+ return;
269
+ }
270
+ busdev = SYS_BUS_DEVICE(dev);
271
+ sysbus_mmio_map(busdev, 0, MSF2_SYSREG_BASE);
272
+
273
+ for (i = 0; i < MSF2_NUM_SPIS; i++) {
274
+ gchar *bus_name;
275
+
276
+ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized", &err);
277
+ if (err != NULL) {
278
+ error_propagate(errp, err);
279
+ return;
280
+ }
281
+
282
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_addr[i]);
283
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
284
+ qdev_get_gpio_in(armv7m, spi_irq[i]));
285
+
286
+ /* Alias controller SPI bus to the SoC itself */
287
+ bus_name = g_strdup_printf("spi%d", i);
288
+ object_property_add_alias(OBJECT(s), bus_name,
289
+ OBJECT(&s->spi[i]), "spi",
290
+ &error_abort);
291
+ g_free(bus_name);
292
+ }
293
+
294
+ /* Below devices are not modelled yet. */
295
+ create_unimplemented_device("i2c_0", 0x40002000, 0x1000);
296
+ create_unimplemented_device("dma", 0x40003000, 0x1000);
297
+ create_unimplemented_device("watchdog", 0x40005000, 0x1000);
298
+ create_unimplemented_device("i2c_1", 0x40012000, 0x1000);
299
+ create_unimplemented_device("gpio", 0x40013000, 0x1000);
300
+ create_unimplemented_device("hs-dma", 0x40014000, 0x1000);
301
+ create_unimplemented_device("can", 0x40015000, 0x1000);
302
+ create_unimplemented_device("rtc", 0x40017000, 0x1000);
303
+ create_unimplemented_device("apb_config", 0x40020000, 0x10000);
304
+ create_unimplemented_device("emac", 0x40041000, 0x1000);
305
+ create_unimplemented_device("usb", 0x40043000, 0x1000);
306
+}
141
+}
307
+
142
+
308
+static Property m2sxxx_soc_properties[] = {
143
+type_init(xlnx_cfi_if_register_types)
309
+ /*
310
+ * part name specifies the type of SmartFusion2 device variant(this
311
+ * property is for information purpose only.
312
+ */
313
+ DEFINE_PROP_STRING("cpu-type", MSF2State, cpu_type),
314
+ DEFINE_PROP_STRING("part-name", MSF2State, part_name),
315
+ DEFINE_PROP_UINT64("eNVM-size", MSF2State, envm_size, MSF2_ENVM_MAX_SIZE),
316
+ DEFINE_PROP_UINT64("eSRAM-size", MSF2State, esram_size,
317
+ MSF2_ESRAM_MAX_SIZE),
318
+ /* Libero GUI shows 100Mhz as default for clocks */
319
+ DEFINE_PROP_UINT32("m3clk", MSF2State, m3clk, 100 * 1000000),
320
+ /* default divisors in Libero GUI */
321
+ DEFINE_PROP_UINT8("apb0div", MSF2State, apb0div, 2),
322
+ DEFINE_PROP_UINT8("apb1div", MSF2State, apb1div, 2),
323
+ DEFINE_PROP_END_OF_LIST(),
324
+};
325
+
144
+
326
+static void m2sxxx_soc_class_init(ObjectClass *klass, void *data)
145
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
327
+{
328
+ DeviceClass *dc = DEVICE_CLASS(klass);
329
+
330
+ dc->realize = m2sxxx_soc_realize;
331
+ dc->props = m2sxxx_soc_properties;
332
+}
333
+
334
+static const TypeInfo m2sxxx_soc_info = {
335
+ .name = TYPE_MSF2_SOC,
336
+ .parent = TYPE_SYS_BUS_DEVICE,
337
+ .instance_size = sizeof(MSF2State),
338
+ .instance_init = m2sxxx_soc_initfn,
339
+ .class_init = m2sxxx_soc_class_init,
340
+};
341
+
342
+static void m2sxxx_soc_types(void)
343
+{
344
+ type_register_static(&m2sxxx_soc_info);
345
+}
346
+
347
+type_init(m2sxxx_soc_types)
348
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
349
index XXXXXXX..XXXXXXX 100644
146
index XXXXXXX..XXXXXXX 100644
350
--- a/default-configs/arm-softmmu.mak
147
--- a/hw/misc/meson.build
351
+++ b/default-configs/arm-softmmu.mak
148
+++ b/hw/misc/meson.build
352
@@ -XXX,XX +XXX,XX @@ CONFIG_ACPI=y
149
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c'))
353
CONFIG_SMBIOS=y
150
system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
354
CONFIG_ASPEED_SOC=y
151
'xlnx-versal-xramc.c',
355
CONFIG_GPIO_KEY=y
152
'xlnx-versal-pmc-iou-slcr.c',
356
+CONFIG_MSF2=y
153
+ 'xlnx-cfi-if.c',
154
))
155
system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
156
system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c'))
357
--
157
--
358
2.7.4
158
2.34.1
359
360
diff view generated by jsdifflib
1
From: Subbaraya Sundeep <sundeep.lkml@gmail.com>
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Modelled Microsemi's Smartfusion2 SPI controller.
3
Introduce a model of the software programming interface (CFU_APB) of
4
Xilinx Versal's Configuration Frame Unit.
4
5
5
Signed-off-by: Subbaraya Sundeep <sundeep.lkml@gmail.com>
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
6
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20230831165701.2016397-3-francisco.iglesias@amd.com
8
Message-id: 20170920201737.25723-4-f4bug@amsat.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
hw/ssi/Makefile.objs | 1 +
11
MAINTAINERS | 2 +
12
include/hw/ssi/mss-spi.h | 58 +++++++
12
include/hw/misc/xlnx-versal-cfu.h | 231 ++++++++++++++++++
13
hw/ssi/mss-spi.c | 404 +++++++++++++++++++++++++++++++++++++++++++++++
13
hw/misc/xlnx-versal-cfu.c | 380 ++++++++++++++++++++++++++++++
14
3 files changed, 463 insertions(+)
14
hw/misc/meson.build | 1 +
15
create mode 100644 include/hw/ssi/mss-spi.h
15
4 files changed, 614 insertions(+)
16
create mode 100644 hw/ssi/mss-spi.c
16
create mode 100644 include/hw/misc/xlnx-versal-cfu.h
17
create mode 100644 hw/misc/xlnx-versal-cfu.c
17
18
18
diff --git a/hw/ssi/Makefile.objs b/hw/ssi/Makefile.objs
19
diff --git a/MAINTAINERS b/MAINTAINERS
19
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/ssi/Makefile.objs
21
--- a/MAINTAINERS
21
+++ b/hw/ssi/Makefile.objs
22
+++ b/MAINTAINERS
22
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
23
@@ -XXX,XX +XXX,XX @@ M: Francisco Iglesias <francisco.iglesias@amd.com>
23
common-obj-$(CONFIG_XILINX_SPIPS) += xilinx_spips.o
24
S: Maintained
24
common-obj-$(CONFIG_ASPEED_SOC) += aspeed_smc.o
25
F: hw/misc/xlnx-cfi-if.c
25
common-obj-$(CONFIG_STM32F2XX_SPI) += stm32f2xx_spi.o
26
F: include/hw/misc/xlnx-cfi-if.h
26
+common-obj-$(CONFIG_MSF2) += mss-spi.o
27
+F: hw/misc/xlnx-versal-cfu.c
27
28
+F: include/hw/misc/xlnx-versal-cfu.h
28
obj-$(CONFIG_OMAP) += omap_spi.o
29
29
obj-$(CONFIG_IMX) += imx_spi.o
30
STM32F100
30
diff --git a/include/hw/ssi/mss-spi.h b/include/hw/ssi/mss-spi.h
31
M: Alexandre Iooss <erdnaxe@crans.org>
32
diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h
31
new file mode 100644
33
new file mode 100644
32
index XXXXXXX..XXXXXXX
34
index XXXXXXX..XXXXXXX
33
--- /dev/null
35
--- /dev/null
34
+++ b/include/hw/ssi/mss-spi.h
36
+++ b/include/hw/misc/xlnx-versal-cfu.h
35
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@
36
+/*
38
+/*
37
+ * Microsemi SmartFusion2 SPI
39
+ * QEMU model of the CFU Configuration Unit.
38
+ *
40
+ *
39
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
41
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
40
+ *
42
+ *
41
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
43
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
42
+ * of this software and associated documentation files (the "Software"), to deal
44
+ *
43
+ * in the Software without restriction, including without limitation the rights
45
+ * SPDX-License-Identifier: GPL-2.0-or-later
44
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
46
+ *
45
+ * copies of the Software, and to permit persons to whom the Software is
47
+ * References:
46
+ * furnished to do so, subject to the following conditions:
48
+ * [1] Versal ACAP Technical Reference Manual,
47
+ *
49
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
48
+ * The above copyright notice and this permission notice shall be included in
50
+ *
49
+ * all copies or substantial portions of the Software.
51
+ * [2] Versal ACAP Register Reference,
50
+ *
52
+ * https://www.xilinx.com/htmldocs/registers/am012/am012-versal-register-reference.html
51
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
52
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
53
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
54
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
55
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
56
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
57
+ * THE SOFTWARE.
58
+ */
53
+ */
59
+
54
+#ifndef HW_MISC_XLNX_VERSAL_CFU_APB_H
60
+#ifndef HW_MSS_SPI_H
55
+#define HW_MISC_XLNX_VERSAL_CFU_APB_H
61
+#define HW_MSS_SPI_H
62
+
56
+
63
+#include "hw/sysbus.h"
57
+#include "hw/sysbus.h"
64
+#include "hw/ssi/ssi.h"
58
+#include "hw/register.h"
65
+#include "qemu/fifo32.h"
59
+#include "hw/misc/xlnx-cfi-if.h"
66
+
60
+
67
+#define TYPE_MSS_SPI "mss-spi"
61
+#define TYPE_XLNX_VERSAL_CFU_APB "xlnx,versal-cfu-apb"
68
+#define MSS_SPI(obj) OBJECT_CHECK(MSSSpiState, (obj), TYPE_MSS_SPI)
62
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
69
+
63
+
70
+#define R_SPI_MAX 16
64
+REG32(CFU_ISR, 0x0)
71
+
65
+ FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
72
+typedef struct MSSSpiState {
66
+ FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
67
+ FIELD(CFU_ISR, SLVERR, 7, 1)
68
+ FIELD(CFU_ISR, DECOMP_ERROR, 6, 1)
69
+ FIELD(CFU_ISR, BAD_CFI_PACKET, 5, 1)
70
+ FIELD(CFU_ISR, AXI_ALIGN_ERROR, 4, 1)
71
+ FIELD(CFU_ISR, CFI_ROW_ERROR, 3, 1)
72
+ FIELD(CFU_ISR, CRC32_ERROR, 2, 1)
73
+ FIELD(CFU_ISR, CRC8_ERROR, 1, 1)
74
+ FIELD(CFU_ISR, SEU_ENDOFCALIB, 0, 1)
75
+REG32(CFU_IMR, 0x4)
76
+ FIELD(CFU_IMR, USR_GTS_EVENT, 9, 1)
77
+ FIELD(CFU_IMR, USR_GSR_EVENT, 8, 1)
78
+ FIELD(CFU_IMR, SLVERR, 7, 1)
79
+ FIELD(CFU_IMR, DECOMP_ERROR, 6, 1)
80
+ FIELD(CFU_IMR, BAD_CFI_PACKET, 5, 1)
81
+ FIELD(CFU_IMR, AXI_ALIGN_ERROR, 4, 1)
82
+ FIELD(CFU_IMR, CFI_ROW_ERROR, 3, 1)
83
+ FIELD(CFU_IMR, CRC32_ERROR, 2, 1)
84
+ FIELD(CFU_IMR, CRC8_ERROR, 1, 1)
85
+ FIELD(CFU_IMR, SEU_ENDOFCALIB, 0, 1)
86
+REG32(CFU_IER, 0x8)
87
+ FIELD(CFU_IER, USR_GTS_EVENT, 9, 1)
88
+ FIELD(CFU_IER, USR_GSR_EVENT, 8, 1)
89
+ FIELD(CFU_IER, SLVERR, 7, 1)
90
+ FIELD(CFU_IER, DECOMP_ERROR, 6, 1)
91
+ FIELD(CFU_IER, BAD_CFI_PACKET, 5, 1)
92
+ FIELD(CFU_IER, AXI_ALIGN_ERROR, 4, 1)
93
+ FIELD(CFU_IER, CFI_ROW_ERROR, 3, 1)
94
+ FIELD(CFU_IER, CRC32_ERROR, 2, 1)
95
+ FIELD(CFU_IER, CRC8_ERROR, 1, 1)
96
+ FIELD(CFU_IER, SEU_ENDOFCALIB, 0, 1)
97
+REG32(CFU_IDR, 0xc)
98
+ FIELD(CFU_IDR, USR_GTS_EVENT, 9, 1)
99
+ FIELD(CFU_IDR, USR_GSR_EVENT, 8, 1)
100
+ FIELD(CFU_IDR, SLVERR, 7, 1)
101
+ FIELD(CFU_IDR, DECOMP_ERROR, 6, 1)
102
+ FIELD(CFU_IDR, BAD_CFI_PACKET, 5, 1)
103
+ FIELD(CFU_IDR, AXI_ALIGN_ERROR, 4, 1)
104
+ FIELD(CFU_IDR, CFI_ROW_ERROR, 3, 1)
105
+ FIELD(CFU_IDR, CRC32_ERROR, 2, 1)
106
+ FIELD(CFU_IDR, CRC8_ERROR, 1, 1)
107
+ FIELD(CFU_IDR, SEU_ENDOFCALIB, 0, 1)
108
+REG32(CFU_ITR, 0x10)
109
+ FIELD(CFU_ITR, USR_GTS_EVENT, 9, 1)
110
+ FIELD(CFU_ITR, USR_GSR_EVENT, 8, 1)
111
+ FIELD(CFU_ITR, SLVERR, 7, 1)
112
+ FIELD(CFU_ITR, DECOMP_ERROR, 6, 1)
113
+ FIELD(CFU_ITR, BAD_CFI_PACKET, 5, 1)
114
+ FIELD(CFU_ITR, AXI_ALIGN_ERROR, 4, 1)
115
+ FIELD(CFU_ITR, CFI_ROW_ERROR, 3, 1)
116
+ FIELD(CFU_ITR, CRC32_ERROR, 2, 1)
117
+ FIELD(CFU_ITR, CRC8_ERROR, 1, 1)
118
+ FIELD(CFU_ITR, SEU_ENDOFCALIB, 0, 1)
119
+REG32(CFU_PROTECT, 0x14)
120
+ FIELD(CFU_PROTECT, ACTIVE, 0, 1)
121
+REG32(CFU_FGCR, 0x18)
122
+ FIELD(CFU_FGCR, GCLK_CAL, 14, 1)
123
+ FIELD(CFU_FGCR, SC_HBC_TRIGGER, 13, 1)
124
+ FIELD(CFU_FGCR, GLOW, 12, 1)
125
+ FIELD(CFU_FGCR, GPWRDWN, 11, 1)
126
+ FIELD(CFU_FGCR, GCAP, 10, 1)
127
+ FIELD(CFU_FGCR, GSCWE, 9, 1)
128
+ FIELD(CFU_FGCR, GHIGH_B, 8, 1)
129
+ FIELD(CFU_FGCR, GMC_B, 7, 1)
130
+ FIELD(CFU_FGCR, GWE, 6, 1)
131
+ FIELD(CFU_FGCR, GRESTORE, 5, 1)
132
+ FIELD(CFU_FGCR, GTS_CFG_B, 4, 1)
133
+ FIELD(CFU_FGCR, GLUTMASK, 3, 1)
134
+ FIELD(CFU_FGCR, EN_GLOBS_B, 2, 1)
135
+ FIELD(CFU_FGCR, EOS, 1, 1)
136
+ FIELD(CFU_FGCR, INIT_COMPLETE, 0, 1)
137
+REG32(CFU_CTL, 0x1c)
138
+ FIELD(CFU_CTL, GSR_GSC, 15, 1)
139
+ FIELD(CFU_CTL, SLVERR_EN, 14, 1)
140
+ FIELD(CFU_CTL, CRC32_RESET, 13, 1)
141
+ FIELD(CFU_CTL, AXI_ERROR_EN, 12, 1)
142
+ FIELD(CFU_CTL, FLUSH_AXI, 11, 1)
143
+ FIELD(CFU_CTL, SSI_PER_SLR_PR, 10, 1)
144
+ FIELD(CFU_CTL, GCAP_CLK_EN, 9, 1)
145
+ FIELD(CFU_CTL, STATUS_SYNC_DISABLE, 8, 1)
146
+ FIELD(CFU_CTL, IGNORE_CFI_ERROR, 7, 1)
147
+ FIELD(CFU_CTL, CFRAME_DISABLE, 6, 1)
148
+ FIELD(CFU_CTL, QWORD_CNT_RESET, 5, 1)
149
+ FIELD(CFU_CTL, CRC8_DISABLE, 4, 1)
150
+ FIELD(CFU_CTL, CRC32_CHECK, 3, 1)
151
+ FIELD(CFU_CTL, DECOMPRESS, 2, 1)
152
+ FIELD(CFU_CTL, SEU_GO, 1, 1)
153
+ FIELD(CFU_CTL, CFI_LOCAL_RESET, 0, 1)
154
+REG32(CFU_CRAM_RW, 0x20)
155
+ FIELD(CFU_CRAM_RW, RFIFO_AFULL_DEPTH, 18, 9)
156
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT_LEFT, 12, 6)
157
+ FIELD(CFU_CRAM_RW, RD_WAVE_CNT, 6, 6)
158
+ FIELD(CFU_CRAM_RW, WR_WAVE_CNT, 0, 6)
159
+REG32(CFU_MASK, 0x28)
160
+REG32(CFU_CRC_EXPECT, 0x2c)
161
+REG32(CFU_CFRAME_LEFT_T0, 0x60)
162
+ FIELD(CFU_CFRAME_LEFT_T0, NUM, 0, 20)
163
+REG32(CFU_CFRAME_LEFT_T1, 0x64)
164
+ FIELD(CFU_CFRAME_LEFT_T1, NUM, 0, 20)
165
+REG32(CFU_CFRAME_LEFT_T2, 0x68)
166
+ FIELD(CFU_CFRAME_LEFT_T2, NUM, 0, 20)
167
+REG32(CFU_ROW_RANGE, 0x6c)
168
+ FIELD(CFU_ROW_RANGE, HALF_FSR, 5, 1)
169
+ FIELD(CFU_ROW_RANGE, NUM, 0, 5)
170
+REG32(CFU_STATUS, 0x100)
171
+ FIELD(CFU_STATUS, SEU_WRITE_ERROR, 30, 1)
172
+ FIELD(CFU_STATUS, FRCNT_ERROR, 29, 1)
173
+ FIELD(CFU_STATUS, RSVD_ERROR, 28, 1)
174
+ FIELD(CFU_STATUS, FDRO_ERROR, 27, 1)
175
+ FIELD(CFU_STATUS, FDRI_ERROR, 26, 1)
176
+ FIELD(CFU_STATUS, FDRI_READ_ERROR, 25, 1)
177
+ FIELD(CFU_STATUS, READ_FDRI_ERROR, 24, 1)
178
+ FIELD(CFU_STATUS, READ_SFR_ERROR, 23, 1)
179
+ FIELD(CFU_STATUS, READ_STREAM_ERROR, 22, 1)
180
+ FIELD(CFU_STATUS, UNKNOWN_STREAM_PKT, 21, 1)
181
+ FIELD(CFU_STATUS, USR_GTS, 20, 1)
182
+ FIELD(CFU_STATUS, USR_GSR, 19, 1)
183
+ FIELD(CFU_STATUS, AXI_BAD_WSTRB, 18, 1)
184
+ FIELD(CFU_STATUS, AXI_BAD_AR_SIZE, 17, 1)
185
+ FIELD(CFU_STATUS, AXI_BAD_AW_SIZE, 16, 1)
186
+ FIELD(CFU_STATUS, AXI_BAD_ARADDR, 15, 1)
187
+ FIELD(CFU_STATUS, AXI_BAD_AWADDR, 14, 1)
188
+ FIELD(CFU_STATUS, SCAN_CLEAR_PASS, 13, 1)
189
+ FIELD(CFU_STATUS, HC_SEC_ERROR, 12, 1)
190
+ FIELD(CFU_STATUS, GHIGH_B_ISHIGH, 11, 1)
191
+ FIELD(CFU_STATUS, GHIGH_B_ISLOW, 10, 1)
192
+ FIELD(CFU_STATUS, GMC_B_ISHIGH, 9, 1)
193
+ FIELD(CFU_STATUS, GMC_B_ISLOW, 8, 1)
194
+ FIELD(CFU_STATUS, GPWRDWN_B_ISHIGH, 7, 1)
195
+ FIELD(CFU_STATUS, CFI_SEU_CRC_ERROR, 6, 1)
196
+ FIELD(CFU_STATUS, CFI_SEU_ECC_ERROR, 5, 1)
197
+ FIELD(CFU_STATUS, CFI_SEU_HEARTBEAT, 4, 1)
198
+ FIELD(CFU_STATUS, SCAN_CLEAR_DONE, 3, 1)
199
+ FIELD(CFU_STATUS, HC_COMPLETE, 2, 1)
200
+ FIELD(CFU_STATUS, CFI_CFRAME_BUSY, 1, 1)
201
+ FIELD(CFU_STATUS, CFU_STREAM_BUSY, 0, 1)
202
+REG32(CFU_INTERNAL_STATUS, 0x104)
203
+ FIELD(CFU_INTERNAL_STATUS, SSI_EOS, 22, 1)
204
+ FIELD(CFU_INTERNAL_STATUS, SSI_GWE, 21, 1)
205
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_EMPTY, 20, 1)
206
+ FIELD(CFU_INTERNAL_STATUS, RFIFO_FULL, 19, 1)
207
+ FIELD(CFU_INTERNAL_STATUS, SEL_SFR, 18, 1)
208
+ FIELD(CFU_INTERNAL_STATUS, STREAM_CFRAME, 17, 1)
209
+ FIELD(CFU_INTERNAL_STATUS, FDRI_PHASE, 16, 1)
210
+ FIELD(CFU_INTERNAL_STATUS, CFI_PIPE_EN, 15, 1)
211
+ FIELD(CFU_INTERNAL_STATUS, AWFIFO_DCNT, 10, 5)
212
+ FIELD(CFU_INTERNAL_STATUS, WFIFO_DCNT, 5, 5)
213
+ FIELD(CFU_INTERNAL_STATUS, REPAIR_BUSY, 4, 1)
214
+ FIELD(CFU_INTERNAL_STATUS, TRIMU_BUSY, 3, 1)
215
+ FIELD(CFU_INTERNAL_STATUS, TRIMB_BUSY, 2, 1)
216
+ FIELD(CFU_INTERNAL_STATUS, HCLEANR_BUSY, 1, 1)
217
+ FIELD(CFU_INTERNAL_STATUS, HCLEAN_BUSY, 0, 1)
218
+REG32(CFU_QWORD_CNT, 0x108)
219
+REG32(CFU_CRC_LIVE, 0x10c)
220
+REG32(CFU_PENDING_READ_CNT, 0x110)
221
+ FIELD(CFU_PENDING_READ_CNT, NUM, 0, 25)
222
+REG32(CFU_FDRI_CNT, 0x114)
223
+REG32(CFU_ECO1, 0x118)
224
+REG32(CFU_ECO2, 0x11c)
225
+
226
+#define R_MAX (R_CFU_ECO2 + 1)
227
+
228
+#define NUM_STREAM 2
229
+#define WFIFO_SZ 4
230
+
231
+struct XlnxVersalCFUAPB {
73
+ SysBusDevice parent_obj;
232
+ SysBusDevice parent_obj;
74
+
233
+ MemoryRegion iomem;
75
+ MemoryRegion mmio;
234
+ MemoryRegion iomem_stream[NUM_STREAM];
76
+
235
+ qemu_irq irq_cfu_imr;
77
+ qemu_irq irq;
236
+
78
+
237
+ /* 128-bit wfifo. */
79
+ qemu_irq cs_line;
238
+ uint32_t wfifo[WFIFO_SZ];
80
+
239
+
81
+ SSIBus *spi;
240
+ uint32_t regs[R_MAX];
82
+
241
+ RegisterInfo regs_info[R_MAX];
83
+ Fifo32 rx_fifo;
242
+
84
+ Fifo32 tx_fifo;
243
+ uint8_t fdri_row_addr;
85
+
244
+
86
+ int fifo_depth;
245
+ struct {
87
+ uint32_t frame_count;
246
+ XlnxCfiIf *cframe[15];
88
+ bool enabled;
247
+ } cfg;
89
+
248
+};
90
+ uint32_t regs[R_SPI_MAX];
249
+
91
+} MSSSpiState;
250
+/**
92
+
251
+ * This is a helper function for updating a CFI data write fifo, an array of 4
93
+#endif /* HW_MSS_SPI_H */
252
+ * uint32_t and 128 bits of data that are allowed to be written through 4
94
diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c
253
+ * sequential 32 bit accesses. After the last index has been written into the
254
+ * write fifo (wfifo), the data is copied to and returned in a secondary fifo
255
+ * provided to the function (wfifo_ret), and the write fifo is cleared
256
+ * (zeroized).
257
+ *
258
+ * @addr: the address used when calculating the wfifo array index to update
259
+ * @value: the value to write into the wfifo array
260
+ * @wfifo: the wfifo to update
261
+ * @wfifo_out: will return the wfifo data when all 128 bits have been written
262
+ *
263
+ * @return: true if all 128 bits have been updated.
264
+ */
265
+bool update_wfifo(hwaddr addr, uint64_t value,
266
+ uint32_t *wfifo, uint32_t *wfifo_ret);
267
+
268
+#endif
269
diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c
95
new file mode 100644
270
new file mode 100644
96
index XXXXXXX..XXXXXXX
271
index XXXXXXX..XXXXXXX
97
--- /dev/null
272
--- /dev/null
98
+++ b/hw/ssi/mss-spi.c
273
+++ b/hw/misc/xlnx-versal-cfu.c
99
@@ -XXX,XX +XXX,XX @@
274
@@ -XXX,XX +XXX,XX @@
100
+/*
275
+/*
101
+ * Block model of SPI controller present in
276
+ * QEMU model of the CFU Configuration Unit.
102
+ * Microsemi's SmartFusion2 and SmartFusion SoCs.
277
+ *
103
+ *
278
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
104
+ * Copyright (C) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
279
+ *
105
+ *
280
+ * Written by Edgar E. Iglesias <edgar.iglesias@gmail.com>,
106
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
281
+ * Sai Pavan Boddu <sai.pavan.boddu@amd.com>,
107
+ * of this software and associated documentation files (the "Software"), to deal
282
+ * Francisco Iglesias <francisco.iglesias@amd.com>
108
+ * in the Software without restriction, including without limitation the rights
283
+ *
109
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
284
+ * SPDX-License-Identifier: GPL-2.0-or-later
110
+ * copies of the Software, and to permit persons to whom the Software is
111
+ * furnished to do so, subject to the following conditions:
112
+ *
113
+ * The above copyright notice and this permission notice shall be included in
114
+ * all copies or substantial portions of the Software.
115
+ *
116
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
117
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
118
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
119
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
120
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
121
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
122
+ * THE SOFTWARE.
123
+ */
285
+ */
124
+
286
+
125
+#include "qemu/osdep.h"
287
+#include "qemu/osdep.h"
126
+#include "hw/ssi/mss-spi.h"
288
+#include "hw/sysbus.h"
289
+#include "hw/register.h"
290
+#include "hw/irq.h"
291
+#include "qemu/bitops.h"
127
+#include "qemu/log.h"
292
+#include "qemu/log.h"
128
+
293
+#include "qemu/units.h"
129
+#ifndef MSS_SPI_ERR_DEBUG
294
+#include "migration/vmstate.h"
130
+#define MSS_SPI_ERR_DEBUG 0
295
+#include "hw/qdev-properties.h"
296
+#include "hw/qdev-properties-system.h"
297
+#include "hw/misc/xlnx-versal-cfu.h"
298
+
299
+#ifndef XLNX_VERSAL_CFU_APB_ERR_DEBUG
300
+#define XLNX_VERSAL_CFU_APB_ERR_DEBUG 0
131
+#endif
301
+#endif
132
+
302
+
133
+#define DB_PRINT_L(lvl, fmt, args...) do { \
303
+#define KEYHOLE_STREAM_4K (4 * KiB)
134
+ if (MSS_SPI_ERR_DEBUG >= lvl) { \
304
+#define KEYHOLE_STREAM_256K (256 * KiB)
135
+ qemu_log("%s: " fmt "\n", __func__, ## args); \
305
+#define CFRAME_BROADCAST_ROW 0x1F
136
+ } \
306
+
137
+} while (0);
307
+bool update_wfifo(hwaddr addr, uint64_t value,
138
+
308
+ uint32_t *wfifo, uint32_t *wfifo_ret)
139
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
309
+{
140
+
310
+ unsigned int idx = extract32(addr, 2, 2);
141
+#define FIFO_CAPACITY 32
311
+
142
+
312
+ wfifo[idx] = value;
143
+#define R_SPI_CONTROL 0
313
+
144
+#define R_SPI_DFSIZE 1
314
+ if (idx == 3) {
145
+#define R_SPI_STATUS 2
315
+ memcpy(wfifo_ret, wfifo, WFIFO_SZ * sizeof(uint32_t));
146
+#define R_SPI_INTCLR 3
316
+ memset(wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
147
+#define R_SPI_RX 4
317
+ return true;
148
+#define R_SPI_TX 5
318
+ }
149
+#define R_SPI_CLKGEN 6
319
+
150
+#define R_SPI_SS 7
320
+ return false;
151
+#define R_SPI_MIS 8
321
+}
152
+#define R_SPI_RIS 9
322
+
153
+
323
+static void cfu_imr_update_irq(XlnxVersalCFUAPB *s)
154
+#define S_TXDONE (1 << 0)
324
+{
155
+#define S_RXRDY (1 << 1)
325
+ bool pending = s->regs[R_CFU_ISR] & ~s->regs[R_CFU_IMR];
156
+#define S_RXCHOVRF (1 << 2)
326
+ qemu_set_irq(s->irq_cfu_imr, pending);
157
+#define S_RXFIFOFUL (1 << 4)
327
+}
158
+#define S_RXFIFOFULNXT (1 << 5)
328
+
159
+#define S_RXFIFOEMP (1 << 6)
329
+static void cfu_isr_postw(RegisterInfo *reg, uint64_t val64)
160
+#define S_RXFIFOEMPNXT (1 << 7)
330
+{
161
+#define S_TXFIFOFUL (1 << 8)
331
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
162
+#define S_TXFIFOFULNXT (1 << 9)
332
+ cfu_imr_update_irq(s);
163
+#define S_TXFIFOEMP (1 << 10)
333
+}
164
+#define S_TXFIFOEMPNXT (1 << 11)
334
+
165
+#define S_FRAMESTART (1 << 12)
335
+static uint64_t cfu_ier_prew(RegisterInfo *reg, uint64_t val64)
166
+#define S_SSEL (1 << 13)
336
+{
167
+#define S_ACTIVE (1 << 14)
337
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
168
+
338
+ uint32_t val = val64;
169
+#define C_ENABLE (1 << 0)
339
+
170
+#define C_MODE (1 << 1)
340
+ s->regs[R_CFU_IMR] &= ~val;
171
+#define C_INTRXDATA (1 << 4)
341
+ cfu_imr_update_irq(s);
172
+#define C_INTTXDATA (1 << 5)
342
+ return 0;
173
+#define C_INTRXOVRFLO (1 << 6)
343
+}
174
+#define C_SPS (1 << 26)
344
+
175
+#define C_BIGFIFO (1 << 29)
345
+static uint64_t cfu_idr_prew(RegisterInfo *reg, uint64_t val64)
176
+#define C_RESET (1 << 31)
346
+{
177
+
347
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
178
+#define FRAMESZ_MASK 0x1F
348
+ uint32_t val = val64;
179
+#define FMCOUNT_MASK 0x00FFFF00
349
+
180
+#define FMCOUNT_SHIFT 8
350
+ s->regs[R_CFU_IMR] |= val;
181
+
351
+ cfu_imr_update_irq(s);
182
+static void txfifo_reset(MSSSpiState *s)
352
+ return 0;
183
+{
353
+}
184
+ fifo32_reset(&s->tx_fifo);
354
+
185
+
355
+static uint64_t cfu_itr_prew(RegisterInfo *reg, uint64_t val64)
186
+ s->regs[R_SPI_STATUS] &= ~S_TXFIFOFUL;
356
+{
187
+ s->regs[R_SPI_STATUS] |= S_TXFIFOEMP;
357
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
188
+}
358
+ uint32_t val = val64;
189
+
359
+
190
+static void rxfifo_reset(MSSSpiState *s)
360
+ s->regs[R_CFU_ISR] |= val;
191
+{
361
+ cfu_imr_update_irq(s);
192
+ fifo32_reset(&s->rx_fifo);
362
+ return 0;
193
+
363
+}
194
+ s->regs[R_SPI_STATUS] &= ~S_RXFIFOFUL;
364
+
195
+ s->regs[R_SPI_STATUS] |= S_RXFIFOEMP;
365
+static void cfu_fgcr_postw(RegisterInfo *reg, uint64_t val64)
196
+}
366
+{
197
+
367
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(reg->opaque);
198
+static void set_fifodepth(MSSSpiState *s)
368
+ uint32_t val = (uint32_t)val64;
199
+{
369
+
200
+ unsigned int size = s->regs[R_SPI_DFSIZE] & FRAMESZ_MASK;
370
+ /* Do a scan. It always looks good. */
201
+
371
+ if (FIELD_EX32(val, CFU_FGCR, SC_HBC_TRIGGER)) {
202
+ if (size <= 8) {
372
+ ARRAY_FIELD_DP32(s->regs, CFU_STATUS, SCAN_CLEAR_PASS, 1);
203
+ s->fifo_depth = 32;
373
+ ARRAY_FIELD_DP32(s->regs, CFU_STATUS, SCAN_CLEAR_DONE, 1);
204
+ } else if (size <= 16) {
374
+ }
205
+ s->fifo_depth = 16;
375
+}
206
+ } else if (size <= 32) {
376
+
207
+ s->fifo_depth = 8;
377
+static const RegisterAccessInfo cfu_apb_regs_info[] = {
208
+ } else {
378
+ { .name = "CFU_ISR", .addr = A_CFU_ISR,
209
+ s->fifo_depth = 4;
379
+ .rsvd = 0xfffffc00,
210
+ }
380
+ .w1c = 0x3ff,
211
+}
381
+ .post_write = cfu_isr_postw,
212
+
382
+ },{ .name = "CFU_IMR", .addr = A_CFU_IMR,
213
+static void update_mis(MSSSpiState *s)
383
+ .reset = 0x3ff,
214
+{
384
+ .rsvd = 0xfffffc00,
215
+ uint32_t reg = s->regs[R_SPI_CONTROL];
385
+ .ro = 0x3ff,
216
+ uint32_t tmp;
386
+ },{ .name = "CFU_IER", .addr = A_CFU_IER,
217
+
387
+ .rsvd = 0xfffffc00,
218
+ /*
388
+ .pre_write = cfu_ier_prew,
219
+ * form the Control register interrupt enable bits
389
+ },{ .name = "CFU_IDR", .addr = A_CFU_IDR,
220
+ * same as RIS, MIS and Interrupt clear registers for simplicity
390
+ .rsvd = 0xfffffc00,
221
+ */
391
+ .pre_write = cfu_idr_prew,
222
+ tmp = ((reg & C_INTRXOVRFLO) >> 4) | ((reg & C_INTRXDATA) >> 3) |
392
+ },{ .name = "CFU_ITR", .addr = A_CFU_ITR,
223
+ ((reg & C_INTTXDATA) >> 5);
393
+ .rsvd = 0xfffffc00,
224
+ s->regs[R_SPI_MIS] |= tmp & s->regs[R_SPI_RIS];
394
+ .pre_write = cfu_itr_prew,
225
+}
395
+ },{ .name = "CFU_PROTECT", .addr = A_CFU_PROTECT,
226
+
396
+ .reset = 0x1,
227
+static void spi_update_irq(MSSSpiState *s)
397
+ },{ .name = "CFU_FGCR", .addr = A_CFU_FGCR,
228
+{
398
+ .rsvd = 0xffff8000,
229
+ int irq;
399
+ .post_write = cfu_fgcr_postw,
230
+
400
+ },{ .name = "CFU_CTL", .addr = A_CFU_CTL,
231
+ update_mis(s);
401
+ .rsvd = 0xffff0000,
232
+ irq = !!(s->regs[R_SPI_MIS]);
402
+ },{ .name = "CFU_CRAM_RW", .addr = A_CFU_CRAM_RW,
233
+
403
+ .reset = 0x401f7d9,
234
+ qemu_set_irq(s->irq, irq);
404
+ .rsvd = 0xf8000000,
235
+}
405
+ },{ .name = "CFU_MASK", .addr = A_CFU_MASK,
236
+
406
+ },{ .name = "CFU_CRC_EXPECT", .addr = A_CFU_CRC_EXPECT,
237
+static void mss_spi_reset(DeviceState *d)
407
+ },{ .name = "CFU_CFRAME_LEFT_T0", .addr = A_CFU_CFRAME_LEFT_T0,
238
+{
408
+ .rsvd = 0xfff00000,
239
+ MSSSpiState *s = MSS_SPI(d);
409
+ },{ .name = "CFU_CFRAME_LEFT_T1", .addr = A_CFU_CFRAME_LEFT_T1,
240
+
410
+ .rsvd = 0xfff00000,
241
+ memset(s->regs, 0, sizeof s->regs);
411
+ },{ .name = "CFU_CFRAME_LEFT_T2", .addr = A_CFU_CFRAME_LEFT_T2,
242
+ s->regs[R_SPI_CONTROL] = 0x80000102;
412
+ .rsvd = 0xfff00000,
243
+ s->regs[R_SPI_DFSIZE] = 0x4;
413
+ },{ .name = "CFU_ROW_RANGE", .addr = A_CFU_ROW_RANGE,
244
+ s->regs[R_SPI_STATUS] = S_SSEL | S_TXFIFOEMP | S_RXFIFOEMP;
414
+ .rsvd = 0xffffffc0,
245
+ s->regs[R_SPI_CLKGEN] = 0x7;
415
+ .ro = 0x3f,
246
+ s->regs[R_SPI_RIS] = 0x0;
416
+ },{ .name = "CFU_STATUS", .addr = A_CFU_STATUS,
247
+
417
+ .rsvd = 0x80000000,
248
+ s->fifo_depth = 4;
418
+ .ro = 0x7fffffff,
249
+ s->frame_count = 1;
419
+ },{ .name = "CFU_INTERNAL_STATUS", .addr = A_CFU_INTERNAL_STATUS,
250
+ s->enabled = false;
420
+ .rsvd = 0xff800000,
251
+
421
+ .ro = 0x7fffff,
252
+ rxfifo_reset(s);
422
+ },{ .name = "CFU_QWORD_CNT", .addr = A_CFU_QWORD_CNT,
253
+ txfifo_reset(s);
423
+ .ro = 0xffffffff,
254
+}
424
+ },{ .name = "CFU_CRC_LIVE", .addr = A_CFU_CRC_LIVE,
255
+
425
+ .ro = 0xffffffff,
256
+static uint64_t
426
+ },{ .name = "CFU_PENDING_READ_CNT", .addr = A_CFU_PENDING_READ_CNT,
257
+spi_read(void *opaque, hwaddr addr, unsigned int size)
427
+ .rsvd = 0xfe000000,
258
+{
428
+ .ro = 0x1ffffff,
259
+ MSSSpiState *s = opaque;
429
+ },{ .name = "CFU_FDRI_CNT", .addr = A_CFU_FDRI_CNT,
260
+ uint32_t ret = 0;
430
+ .ro = 0xffffffff,
261
+
431
+ },{ .name = "CFU_ECO1", .addr = A_CFU_ECO1,
262
+ addr >>= 2;
432
+ },{ .name = "CFU_ECO2", .addr = A_CFU_ECO2,
263
+ switch (addr) {
433
+ }
264
+ case R_SPI_RX:
434
+};
265
+ s->regs[R_SPI_STATUS] &= ~S_RXFIFOFUL;
435
+
266
+ s->regs[R_SPI_STATUS] &= ~S_RXCHOVRF;
436
+static void cfu_apb_reset(DeviceState *dev)
267
+ ret = fifo32_pop(&s->rx_fifo);
437
+{
268
+ if (fifo32_is_empty(&s->rx_fifo)) {
438
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(dev);
269
+ s->regs[R_SPI_STATUS] |= S_RXFIFOEMP;
439
+ unsigned int i;
270
+ }
440
+
271
+ break;
441
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
272
+
442
+ register_reset(&s->regs_info[i]);
273
+ case R_SPI_MIS:
443
+ }
274
+ update_mis(s);
444
+ memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
275
+ ret = s->regs[R_SPI_MIS];
445
+
276
+ break;
446
+ s->regs[R_CFU_STATUS] |= R_CFU_STATUS_HC_COMPLETE_MASK;
277
+
447
+ cfu_imr_update_irq(s);
278
+ default:
448
+}
279
+ if (addr < ARRAY_SIZE(s->regs)) {
449
+
280
+ ret = s->regs[addr];
450
+static const MemoryRegionOps cfu_apb_ops = {
281
+ } else {
451
+ .read = register_read_memory,
282
+ qemu_log_mask(LOG_GUEST_ERROR,
452
+ .write = register_write_memory,
283
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__,
453
+ .endianness = DEVICE_LITTLE_ENDIAN,
284
+ addr * 4);
454
+ .valid = {
285
+ return ret;
455
+ .min_access_size = 4,
286
+ }
456
+ .max_access_size = 4,
287
+ break;
457
+ },
288
+ }
458
+};
289
+
459
+
290
+ DB_PRINT("addr=0x%" HWADDR_PRIx " = 0x%" PRIx32, addr * 4, ret);
460
+static void cfu_transfer_cfi_packet(XlnxVersalCFUAPB *s, uint8_t row_addr,
291
+ spi_update_irq(s);
461
+ XlnxCfiPacket *pkt)
292
+ return ret;
462
+{
293
+}
463
+ if (row_addr == CFRAME_BROADCAST_ROW) {
294
+
464
+ for (int i = 0; i < ARRAY_SIZE(s->cfg.cframe); i++) {
295
+static void assert_cs(MSSSpiState *s)
465
+ if (s->cfg.cframe[i]) {
296
+{
466
+ xlnx_cfi_transfer_packet(s->cfg.cframe[i], pkt);
297
+ qemu_set_irq(s->cs_line, 0);
298
+}
299
+
300
+static void deassert_cs(MSSSpiState *s)
301
+{
302
+ qemu_set_irq(s->cs_line, 1);
303
+}
304
+
305
+static void spi_flush_txfifo(MSSSpiState *s)
306
+{
307
+ uint32_t tx;
308
+ uint32_t rx;
309
+ bool sps = !!(s->regs[R_SPI_CONTROL] & C_SPS);
310
+
311
+ /*
312
+ * Chip Select(CS) is automatically controlled by this controller.
313
+ * If SPS bit is set in Control register then CS is asserted
314
+ * until all the frames set in frame count of Control register are
315
+ * transferred. If SPS is not set then CS pulses between frames.
316
+ * Note that Slave Select register specifies which of the CS line
317
+ * has to be controlled automatically by controller. Bits SS[7:1] are for
318
+ * masters in FPGA fabric since we model only Microcontroller subsystem
319
+ * of Smartfusion2 we control only one CS(SS[0]) line.
320
+ */
321
+ while (!fifo32_is_empty(&s->tx_fifo) && s->frame_count) {
322
+ assert_cs(s);
323
+
324
+ s->regs[R_SPI_STATUS] &= ~(S_TXDONE | S_RXRDY);
325
+
326
+ tx = fifo32_pop(&s->tx_fifo);
327
+ DB_PRINT("data tx:0x%" PRIx32, tx);
328
+ rx = ssi_transfer(s->spi, tx);
329
+ DB_PRINT("data rx:0x%" PRIx32, rx);
330
+
331
+ if (fifo32_num_used(&s->rx_fifo) == s->fifo_depth) {
332
+ s->regs[R_SPI_STATUS] |= S_RXCHOVRF;
333
+ s->regs[R_SPI_RIS] |= S_RXCHOVRF;
334
+ } else {
335
+ fifo32_push(&s->rx_fifo, rx);
336
+ s->regs[R_SPI_STATUS] &= ~S_RXFIFOEMP;
337
+ if (fifo32_num_used(&s->rx_fifo) == (s->fifo_depth - 1)) {
338
+ s->regs[R_SPI_STATUS] |= S_RXFIFOFULNXT;
339
+ } else if (fifo32_num_used(&s->rx_fifo) == s->fifo_depth) {
340
+ s->regs[R_SPI_STATUS] |= S_RXFIFOFUL;
341
+ }
467
+ }
342
+ }
468
+ }
343
+ s->frame_count--;
469
+ } else {
344
+ if (!sps) {
470
+ assert(row_addr < ARRAY_SIZE(s->cfg.cframe));
345
+ deassert_cs(s);
471
+
472
+ if (s->cfg.cframe[row_addr]) {
473
+ xlnx_cfi_transfer_packet(s->cfg.cframe[row_addr], pkt);
474
+ }
475
+ }
476
+}
477
+
478
+static uint64_t cfu_stream_read(void *opaque, hwaddr addr, unsigned size)
479
+{
480
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
481
+ HWADDR_PRIx "\n", __func__, addr);
482
+ return 0;
483
+}
484
+
485
+static void cfu_stream_write(void *opaque, hwaddr addr, uint64_t value,
486
+ unsigned size)
487
+{
488
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(opaque);
489
+ uint32_t wfifo[WFIFO_SZ];
490
+
491
+ if (update_wfifo(addr, value, s->wfifo, wfifo)) {
492
+ uint8_t packet_type, row_addr, reg_addr;
493
+
494
+ packet_type = extract32(wfifo[0], 24, 8);
495
+ row_addr = extract32(wfifo[0], 16, 5);
496
+ reg_addr = extract32(wfifo[0], 8, 6);
497
+
498
+ /* Compressed bitstreams are not supported yet. */
499
+ if (ARRAY_FIELD_EX32(s->regs, CFU_CTL, DECOMPRESS) == 0) {
500
+ if (s->regs[R_CFU_FDRI_CNT]) {
501
+ XlnxCfiPacket pkt = {
502
+ .reg_addr = CFRAME_FDRI,
503
+ .data[0] = wfifo[0],
504
+ .data[1] = wfifo[1],
505
+ .data[2] = wfifo[2],
506
+ .data[3] = wfifo[3]
507
+ };
508
+
509
+ cfu_transfer_cfi_packet(s, s->fdri_row_addr, &pkt);
510
+
511
+ s->regs[R_CFU_FDRI_CNT]--;
512
+
513
+ } else if (packet_type == PACKET_TYPE_CFU &&
514
+ reg_addr == CFRAME_FDRI) {
515
+
516
+ /* Load R_CFU_FDRI_CNT, must be multiple of 25 */
517
+ s->regs[R_CFU_FDRI_CNT] = wfifo[1];
518
+
519
+ /* Store target row_addr */
520
+ s->fdri_row_addr = row_addr;
521
+
522
+ if (wfifo[1] % 25 != 0) {
523
+ qemu_log_mask(LOG_GUEST_ERROR,
524
+ "CFU FDRI_CNT is not loaded with "
525
+ "a multiple of 25 value\n");
526
+ }
527
+
528
+ } else if (packet_type == PACKET_TYPE_CFRAME) {
529
+ XlnxCfiPacket pkt = {
530
+ .reg_addr = reg_addr,
531
+ .data[0] = wfifo[1],
532
+ .data[1] = wfifo[2],
533
+ .data[2] = wfifo[3],
534
+ };
535
+ cfu_transfer_cfi_packet(s, row_addr, &pkt);
536
+ }
346
+ }
537
+ }
347
+ }
538
+ }
348
+
539
+}
349
+ if (!s->frame_count) {
540
+
350
+ s->frame_count = (s->regs[R_SPI_CONTROL] & FMCOUNT_MASK) >>
541
+static const MemoryRegionOps cfu_stream_ops = {
351
+ FMCOUNT_SHIFT;
542
+ .read = cfu_stream_read,
352
+ deassert_cs(s);
543
+ .write = cfu_stream_write,
353
+ s->regs[R_SPI_RIS] |= S_TXDONE | S_RXRDY;
544
+ .endianness = DEVICE_LITTLE_ENDIAN,
354
+ s->regs[R_SPI_STATUS] |= S_TXDONE | S_RXRDY;
355
+ }
356
+}
357
+
358
+static void spi_write(void *opaque, hwaddr addr,
359
+ uint64_t val64, unsigned int size)
360
+{
361
+ MSSSpiState *s = opaque;
362
+ uint32_t value = val64;
363
+
364
+ DB_PRINT("addr=0x%" HWADDR_PRIx " =0x%" PRIx32, addr, value);
365
+ addr >>= 2;
366
+
367
+ switch (addr) {
368
+ case R_SPI_TX:
369
+ /* adding to already full FIFO */
370
+ if (fifo32_num_used(&s->tx_fifo) == s->fifo_depth) {
371
+ break;
372
+ }
373
+ s->regs[R_SPI_STATUS] &= ~S_TXFIFOEMP;
374
+ fifo32_push(&s->tx_fifo, value);
375
+ if (fifo32_num_used(&s->tx_fifo) == (s->fifo_depth - 1)) {
376
+ s->regs[R_SPI_STATUS] |= S_TXFIFOFULNXT;
377
+ } else if (fifo32_num_used(&s->tx_fifo) == s->fifo_depth) {
378
+ s->regs[R_SPI_STATUS] |= S_TXFIFOFUL;
379
+ }
380
+ if (s->enabled) {
381
+ spi_flush_txfifo(s);
382
+ }
383
+ break;
384
+
385
+ case R_SPI_CONTROL:
386
+ s->regs[R_SPI_CONTROL] = value;
387
+ if (value & C_BIGFIFO) {
388
+ set_fifodepth(s);
389
+ } else {
390
+ s->fifo_depth = 4;
391
+ }
392
+ s->enabled = value & C_ENABLE;
393
+ s->frame_count = (value & FMCOUNT_MASK) >> FMCOUNT_SHIFT;
394
+ if (value & C_RESET) {
395
+ mss_spi_reset(DEVICE(s));
396
+ }
397
+ break;
398
+
399
+ case R_SPI_DFSIZE:
400
+ if (s->enabled) {
401
+ break;
402
+ }
403
+ s->regs[R_SPI_DFSIZE] = value;
404
+ break;
405
+
406
+ case R_SPI_INTCLR:
407
+ s->regs[R_SPI_INTCLR] = value;
408
+ if (value & S_TXDONE) {
409
+ s->regs[R_SPI_RIS] &= ~S_TXDONE;
410
+ }
411
+ if (value & S_RXRDY) {
412
+ s->regs[R_SPI_RIS] &= ~S_RXRDY;
413
+ }
414
+ if (value & S_RXCHOVRF) {
415
+ s->regs[R_SPI_RIS] &= ~S_RXCHOVRF;
416
+ }
417
+ break;
418
+
419
+ case R_SPI_MIS:
420
+ case R_SPI_STATUS:
421
+ case R_SPI_RIS:
422
+ qemu_log_mask(LOG_GUEST_ERROR,
423
+ "%s: Write to read only register 0x%" HWADDR_PRIx "\n",
424
+ __func__, addr * 4);
425
+ break;
426
+
427
+ default:
428
+ if (addr < ARRAY_SIZE(s->regs)) {
429
+ s->regs[addr] = value;
430
+ } else {
431
+ qemu_log_mask(LOG_GUEST_ERROR,
432
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__,
433
+ addr * 4);
434
+ }
435
+ break;
436
+ }
437
+
438
+ spi_update_irq(s);
439
+}
440
+
441
+static const MemoryRegionOps spi_ops = {
442
+ .read = spi_read,
443
+ .write = spi_write,
444
+ .endianness = DEVICE_NATIVE_ENDIAN,
445
+ .valid = {
545
+ .valid = {
446
+ .min_access_size = 1,
546
+ .min_access_size = 4,
447
+ .max_access_size = 4
547
+ .max_access_size = 8,
448
+ }
548
+ },
449
+};
549
+};
450
+
550
+
451
+static void mss_spi_realize(DeviceState *dev, Error **errp)
551
+static void cfu_apb_init(Object *obj)
452
+{
552
+{
453
+ MSSSpiState *s = MSS_SPI(dev);
553
+ XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(obj);
454
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
554
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
455
+
555
+ RegisterInfoArray *reg_array;
456
+ s->spi = ssi_create_bus(dev, "spi");
556
+ unsigned int i;
457
+
557
+ char *name;
458
+ sysbus_init_irq(sbd, &s->irq);
558
+
459
+ ssi_auto_connect_slaves(dev, &s->cs_line, s->spi);
559
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_VERSAL_CFU_APB, R_MAX * 4);
460
+ sysbus_init_irq(sbd, &s->cs_line);
560
+ reg_array =
461
+
561
+ register_init_block32(DEVICE(obj), cfu_apb_regs_info,
462
+ memory_region_init_io(&s->mmio, OBJECT(s), &spi_ops, s,
562
+ ARRAY_SIZE(cfu_apb_regs_info),
463
+ TYPE_MSS_SPI, R_SPI_MAX * 4);
563
+ s->regs_info, s->regs,
464
+ sysbus_init_mmio(sbd, &s->mmio);
564
+ &cfu_apb_ops,
465
+
565
+ XLNX_VERSAL_CFU_APB_ERR_DEBUG,
466
+ fifo32_create(&s->tx_fifo, FIFO_CAPACITY);
566
+ R_MAX * 4);
467
+ fifo32_create(&s->rx_fifo, FIFO_CAPACITY);
567
+ memory_region_add_subregion(&s->iomem,
468
+}
568
+ 0x0,
469
+
569
+ &reg_array->mem);
470
+static const VMStateDescription vmstate_mss_spi = {
570
+ sysbus_init_mmio(sbd, &s->iomem);
471
+ .name = TYPE_MSS_SPI,
571
+ for (i = 0; i < NUM_STREAM; i++) {
572
+ name = g_strdup_printf(TYPE_XLNX_VERSAL_CFU_APB "-stream%d", i);
573
+ memory_region_init_io(&s->iomem_stream[i], obj, &cfu_stream_ops, s,
574
+ name, i == 0 ? KEYHOLE_STREAM_4K :
575
+ KEYHOLE_STREAM_256K);
576
+ sysbus_init_mmio(sbd, &s->iomem_stream[i]);
577
+ g_free(name);
578
+ }
579
+ sysbus_init_irq(sbd, &s->irq_cfu_imr);
580
+}
581
+
582
+static Property cfu_props[] = {
583
+ DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0],
584
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
585
+ DEFINE_PROP_LINK("cframe1", XlnxVersalCFUAPB, cfg.cframe[1],
586
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
587
+ DEFINE_PROP_LINK("cframe2", XlnxVersalCFUAPB, cfg.cframe[2],
588
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
589
+ DEFINE_PROP_LINK("cframe3", XlnxVersalCFUAPB, cfg.cframe[3],
590
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
591
+ DEFINE_PROP_LINK("cframe4", XlnxVersalCFUAPB, cfg.cframe[4],
592
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
593
+ DEFINE_PROP_LINK("cframe5", XlnxVersalCFUAPB, cfg.cframe[5],
594
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
595
+ DEFINE_PROP_LINK("cframe6", XlnxVersalCFUAPB, cfg.cframe[6],
596
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
597
+ DEFINE_PROP_LINK("cframe7", XlnxVersalCFUAPB, cfg.cframe[7],
598
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
599
+ DEFINE_PROP_LINK("cframe8", XlnxVersalCFUAPB, cfg.cframe[8],
600
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
601
+ DEFINE_PROP_LINK("cframe9", XlnxVersalCFUAPB, cfg.cframe[9],
602
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
603
+ DEFINE_PROP_LINK("cframe10", XlnxVersalCFUAPB, cfg.cframe[10],
604
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
605
+ DEFINE_PROP_LINK("cframe11", XlnxVersalCFUAPB, cfg.cframe[11],
606
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
607
+ DEFINE_PROP_LINK("cframe12", XlnxVersalCFUAPB, cfg.cframe[12],
608
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
609
+ DEFINE_PROP_LINK("cframe13", XlnxVersalCFUAPB, cfg.cframe[13],
610
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
611
+ DEFINE_PROP_LINK("cframe14", XlnxVersalCFUAPB, cfg.cframe[14],
612
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
613
+ DEFINE_PROP_END_OF_LIST(),
614
+};
615
+
616
+static const VMStateDescription vmstate_cfu_apb = {
617
+ .name = TYPE_XLNX_VERSAL_CFU_APB,
472
+ .version_id = 1,
618
+ .version_id = 1,
473
+ .minimum_version_id = 1,
619
+ .minimum_version_id = 1,
474
+ .fields = (VMStateField[]) {
620
+ .fields = (VMStateField[]) {
475
+ VMSTATE_FIFO32(tx_fifo, MSSSpiState),
621
+ VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUAPB, 4),
476
+ VMSTATE_FIFO32(rx_fifo, MSSSpiState),
622
+ VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFUAPB, R_MAX),
477
+ VMSTATE_UINT32_ARRAY(regs, MSSSpiState, R_SPI_MAX),
623
+ VMSTATE_UINT8(fdri_row_addr, XlnxVersalCFUAPB),
478
+ VMSTATE_END_OF_LIST()
624
+ VMSTATE_END_OF_LIST(),
479
+ }
625
+ }
480
+};
626
+};
481
+
627
+
482
+static void mss_spi_class_init(ObjectClass *klass, void *data)
628
+static void cfu_apb_class_init(ObjectClass *klass, void *data)
483
+{
629
+{
484
+ DeviceClass *dc = DEVICE_CLASS(klass);
630
+ DeviceClass *dc = DEVICE_CLASS(klass);
485
+
631
+
486
+ dc->realize = mss_spi_realize;
632
+ dc->reset = cfu_apb_reset;
487
+ dc->reset = mss_spi_reset;
633
+ dc->vmsd = &vmstate_cfu_apb;
488
+ dc->vmsd = &vmstate_mss_spi;
634
+ device_class_set_props(dc, cfu_props);
489
+}
635
+}
490
+
636
+
491
+static const TypeInfo mss_spi_info = {
637
+static const TypeInfo cfu_apb_info = {
492
+ .name = TYPE_MSS_SPI,
638
+ .name = TYPE_XLNX_VERSAL_CFU_APB,
493
+ .parent = TYPE_SYS_BUS_DEVICE,
639
+ .parent = TYPE_SYS_BUS_DEVICE,
494
+ .instance_size = sizeof(MSSSpiState),
640
+ .instance_size = sizeof(XlnxVersalCFUAPB),
495
+ .class_init = mss_spi_class_init,
641
+ .class_init = cfu_apb_class_init,
642
+ .instance_init = cfu_apb_init,
643
+ .interfaces = (InterfaceInfo[]) {
644
+ { TYPE_XLNX_CFI_IF },
645
+ { }
646
+ }
496
+};
647
+};
497
+
648
+
498
+static void mss_spi_register_types(void)
649
+static void cfu_apb_register_types(void)
499
+{
650
+{
500
+ type_register_static(&mss_spi_info);
651
+ type_register_static(&cfu_apb_info);
501
+}
652
+}
502
+
653
+
503
+type_init(mss_spi_register_types)
654
+type_init(cfu_apb_register_types)
655
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
656
index XXXXXXX..XXXXXXX 100644
657
--- a/hw/misc/meson.build
658
+++ b/hw/misc/meson.build
659
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-crl.c'))
660
system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
661
'xlnx-versal-xramc.c',
662
'xlnx-versal-pmc-iou-slcr.c',
663
+ 'xlnx-versal-cfu.c',
664
'xlnx-cfi-if.c',
665
))
666
system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
504
--
667
--
505
2.7.4
668
2.34.1
506
507
diff view generated by jsdifflib
1
From: Subbaraya Sundeep <sundeep.lkml@gmail.com>
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Added Sytem register block of Smartfusion2.
3
Introduce a model of Xilinx Versal's Configuration Frame Unit's data out
4
This block has PLL registers which are accessed by guest.
4
port (CFU_FDRO).
5
5
6
Signed-off-by: Subbaraya Sundeep <sundeep.lkml@gmail.com>
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
7
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-id: 20230831165701.2016397-4-francisco.iglesias@amd.com
9
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20170920201737.25723-3-f4bug@amsat.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
hw/misc/Makefile.objs | 1 +
11
include/hw/misc/xlnx-versal-cfu.h | 12 ++++
14
include/hw/misc/msf2-sysreg.h | 77 ++++++++++++++++++++
12
hw/misc/xlnx-versal-cfu.c | 96 +++++++++++++++++++++++++++++++
15
hw/misc/msf2-sysreg.c | 160 ++++++++++++++++++++++++++++++++++++++++++
13
2 files changed, 108 insertions(+)
16
hw/misc/trace-events | 5 ++
17
4 files changed, 243 insertions(+)
18
create mode 100644 include/hw/misc/msf2-sysreg.h
19
create mode 100644 hw/misc/msf2-sysreg.c
20
14
21
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
15
diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h
22
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/misc/Makefile.objs
17
--- a/include/hw/misc/xlnx-versal-cfu.h
24
+++ b/hw/misc/Makefile.objs
18
+++ b/include/hw/misc/xlnx-versal-cfu.h
25
@@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
26
obj-$(CONFIG_AUX) += auxbus.o
27
obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
28
obj-y += mmio_interface.o
29
+obj-$(CONFIG_MSF2) += msf2-sysreg.o
30
diff --git a/include/hw/misc/msf2-sysreg.h b/include/hw/misc/msf2-sysreg.h
31
new file mode 100644
32
index XXXXXXX..XXXXXXX
33
--- /dev/null
34
+++ b/include/hw/misc/msf2-sysreg.h
35
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
36
+/*
20
#include "hw/sysbus.h"
37
+ * Microsemi SmartFusion2 SYSREG
21
#include "hw/register.h"
38
+ *
22
#include "hw/misc/xlnx-cfi-if.h"
39
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
23
+#include "qemu/fifo32.h"
40
+ *
24
41
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
25
#define TYPE_XLNX_VERSAL_CFU_APB "xlnx,versal-cfu-apb"
42
+ * of this software and associated documentation files (the "Software"), to deal
26
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
43
+ * in the Software without restriction, including without limitation the rights
27
44
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28
+#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx,versal-cfu-fdro"
45
+ * copies of the Software, and to permit persons to whom the Software is
29
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO)
46
+ * furnished to do so, subject to the following conditions:
47
+ *
48
+ * The above copyright notice and this permission notice shall be included in
49
+ * all copies or substantial portions of the Software.
50
+ *
51
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
52
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
53
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
54
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
55
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
56
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
57
+ * THE SOFTWARE.
58
+ */
59
+
30
+
60
+#ifndef HW_MSF2_SYSREG_H
31
REG32(CFU_ISR, 0x0)
61
+#define HW_MSF2_SYSREG_H
32
FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
33
FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
34
@@ -XXX,XX +XXX,XX @@ struct XlnxVersalCFUAPB {
35
} cfg;
36
};
37
62
+
38
+
63
+#include "hw/sysbus.h"
39
+struct XlnxVersalCFUFDRO {
40
+ SysBusDevice parent_obj;
41
+ MemoryRegion iomem_fdro;
64
+
42
+
65
+enum {
43
+ Fifo32 fdro_data;
66
+ ESRAM_CR = 0x00 / 4,
67
+ ESRAM_MAX_LAT,
68
+ DDR_CR,
69
+ ENVM_CR,
70
+ ENVM_REMAP_BASE_CR,
71
+ ENVM_REMAP_FAB_CR,
72
+ CC_CR,
73
+ CC_REGION_CR,
74
+ CC_LOCK_BASE_ADDR_CR,
75
+ CC_FLUSH_INDX_CR,
76
+ DDRB_BUF_TIMER_CR,
77
+ DDRB_NB_ADDR_CR,
78
+ DDRB_NB_SIZE_CR,
79
+ DDRB_CR,
80
+
81
+ SOFT_RESET_CR = 0x48 / 4,
82
+ M3_CR,
83
+
84
+ GPIO_SYSRESET_SEL_CR = 0x58 / 4,
85
+
86
+ MDDR_CR = 0x60 / 4,
87
+
88
+ MSSDDR_PLL_STATUS_LOW_CR = 0x90 / 4,
89
+ MSSDDR_PLL_STATUS_HIGH_CR,
90
+ MSSDDR_FACC1_CR,
91
+ MSSDDR_FACC2_CR,
92
+
93
+ MSSDDR_PLL_STATUS = 0x150 / 4,
94
+};
44
+};
95
+
45
+
96
+#define MSF2_SYSREG_MMIO_SIZE 0x300
46
/**
47
* This is a helper function for updating a CFI data write fifo, an array of 4
48
* uint32_t and 128 bits of data that are allowed to be written through 4
49
diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/misc/xlnx-versal-cfu.c
52
+++ b/hw/misc/xlnx-versal-cfu.c
53
@@ -XXX,XX +XXX,XX @@ static void cfu_stream_write(void *opaque, hwaddr addr, uint64_t value,
54
}
55
}
56
57
+static uint64_t cfu_fdro_read(void *opaque, hwaddr addr, unsigned size)
58
+{
59
+ XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(opaque);
60
+ uint64_t ret = 0;
97
+
61
+
98
+#define TYPE_MSF2_SYSREG "msf2-sysreg"
62
+ if (!fifo32_is_empty(&s->fdro_data)) {
99
+#define MSF2_SYSREG(obj) OBJECT_CHECK(MSF2SysregState, (obj), TYPE_MSF2_SYSREG)
63
+ ret = fifo32_pop(&s->fdro_data);
100
+
101
+typedef struct MSF2SysregState {
102
+ SysBusDevice parent_obj;
103
+
104
+ MemoryRegion iomem;
105
+
106
+ uint8_t apb0div;
107
+ uint8_t apb1div;
108
+
109
+ uint32_t regs[MSF2_SYSREG_MMIO_SIZE / 4];
110
+} MSF2SysregState;
111
+
112
+#endif /* HW_MSF2_SYSREG_H */
113
diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c
114
new file mode 100644
115
index XXXXXXX..XXXXXXX
116
--- /dev/null
117
+++ b/hw/misc/msf2-sysreg.c
118
@@ -XXX,XX +XXX,XX @@
119
+/*
120
+ * System Register block model of Microsemi SmartFusion2.
121
+ *
122
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
123
+ *
124
+ * This program is free software; you can redistribute it and/or
125
+ * modify it under the terms of the GNU General Public License
126
+ * as published by the Free Software Foundation; either version
127
+ * 2 of the License, or (at your option) any later version.
128
+ *
129
+ * You should have received a copy of the GNU General Public License along
130
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
131
+ */
132
+
133
+#include "qemu/osdep.h"
134
+#include "qapi/error.h"
135
+#include "qemu/log.h"
136
+#include "hw/misc/msf2-sysreg.h"
137
+#include "qemu/error-report.h"
138
+#include "trace.h"
139
+
140
+static inline int msf2_divbits(uint32_t div)
141
+{
142
+ int r = ctz32(div);
143
+
144
+ return (div < 8) ? r : r + 1;
145
+}
146
+
147
+static void msf2_sysreg_reset(DeviceState *d)
148
+{
149
+ MSF2SysregState *s = MSF2_SYSREG(d);
150
+
151
+ s->regs[MSSDDR_PLL_STATUS_LOW_CR] = 0x021A2358;
152
+ s->regs[MSSDDR_PLL_STATUS] = 0x3;
153
+ s->regs[MSSDDR_FACC1_CR] = msf2_divbits(s->apb0div) << 5 |
154
+ msf2_divbits(s->apb1div) << 2;
155
+}
156
+
157
+static uint64_t msf2_sysreg_read(void *opaque, hwaddr offset,
158
+ unsigned size)
159
+{
160
+ MSF2SysregState *s = opaque;
161
+ uint32_t ret = 0;
162
+
163
+ offset >>= 2;
164
+ if (offset < ARRAY_SIZE(s->regs)) {
165
+ ret = s->regs[offset];
166
+ trace_msf2_sysreg_read(offset << 2, ret);
167
+ } else {
168
+ qemu_log_mask(LOG_GUEST_ERROR,
169
+ "%s: Bad offset 0x%08" HWADDR_PRIx "\n", __func__,
170
+ offset << 2);
171
+ }
64
+ }
172
+
65
+
173
+ return ret;
66
+ return ret;
174
+}
67
+}
175
+
68
+
176
+static void msf2_sysreg_write(void *opaque, hwaddr offset,
69
+static void cfu_fdro_write(void *opaque, hwaddr addr, uint64_t value,
177
+ uint64_t val, unsigned size)
70
+ unsigned size)
178
+{
71
+{
179
+ MSF2SysregState *s = opaque;
72
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported write from addr=%"
180
+ uint32_t newval = val;
73
+ HWADDR_PRIx "\n", __func__, addr);
74
+}
181
+
75
+
182
+ offset >>= 2;
76
static const MemoryRegionOps cfu_stream_ops = {
77
.read = cfu_stream_read,
78
.write = cfu_stream_write,
79
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps cfu_stream_ops = {
80
},
81
};
82
83
+static const MemoryRegionOps cfu_fdro_ops = {
84
+ .read = cfu_fdro_read,
85
+ .write = cfu_fdro_write,
86
+ .endianness = DEVICE_LITTLE_ENDIAN,
87
+ .valid = {
88
+ .min_access_size = 4,
89
+ .max_access_size = 4,
90
+ },
91
+};
183
+
92
+
184
+ switch (offset) {
93
static void cfu_apb_init(Object *obj)
185
+ case MSSDDR_PLL_STATUS:
94
{
186
+ trace_msf2_sysreg_write_pll_status();
95
XlnxVersalCFUAPB *s = XLNX_VERSAL_CFU_APB(obj);
187
+ break;
96
@@ -XXX,XX +XXX,XX @@ static void cfu_apb_init(Object *obj)
97
sysbus_init_irq(sbd, &s->irq_cfu_imr);
98
}
99
100
+static void cfu_fdro_init(Object *obj)
101
+{
102
+ XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
103
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
188
+
104
+
189
+ case ESRAM_CR:
105
+ memory_region_init_io(&s->iomem_fdro, obj, &cfu_fdro_ops, s,
190
+ case DDR_CR:
106
+ TYPE_XLNX_VERSAL_CFU_FDRO, KEYHOLE_STREAM_4K);
191
+ case ENVM_REMAP_BASE_CR:
107
+ sysbus_init_mmio(sbd, &s->iomem_fdro);
192
+ if (newval != s->regs[offset]) {
108
+ fifo32_create(&s->fdro_data, 8 * KiB / sizeof(uint32_t));
193
+ qemu_log_mask(LOG_UNIMP,
109
+}
194
+ TYPE_MSF2_SYSREG": remapping not supported\n");
110
+
111
+static void cfu_fdro_reset_enter(Object *obj, ResetType type)
112
+{
113
+ XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
114
+
115
+ fifo32_reset(&s->fdro_data);
116
+}
117
+
118
+static void cfu_fdro_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt)
119
+{
120
+ XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(cfi_if);
121
+
122
+ if (fifo32_num_free(&s->fdro_data) >= ARRAY_SIZE(pkt->data)) {
123
+ for (int i = 0; i < ARRAY_SIZE(pkt->data); i++) {
124
+ fifo32_push(&s->fdro_data, pkt->data[i]);
195
+ }
125
+ }
196
+ break;
126
+ } else {
197
+
127
+ /* It is a programming error to fill the fifo. */
198
+ default:
128
+ qemu_log_mask(LOG_GUEST_ERROR,
199
+ if (offset < ARRAY_SIZE(s->regs)) {
129
+ "CFU_FDRO: CFI data dropped due to full read fifo\n");
200
+ trace_msf2_sysreg_write(offset << 2, newval, s->regs[offset]);
201
+ s->regs[offset] = newval;
202
+ } else {
203
+ qemu_log_mask(LOG_GUEST_ERROR,
204
+ "%s: Bad offset 0x%08" HWADDR_PRIx "\n", __func__,
205
+ offset << 2);
206
+ }
207
+ break;
208
+ }
130
+ }
209
+}
131
+}
210
+
132
+
211
+static const MemoryRegionOps sysreg_ops = {
133
static Property cfu_props[] = {
212
+ .read = msf2_sysreg_read,
134
DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0],
213
+ .write = msf2_sysreg_write,
135
TYPE_XLNX_CFI_IF, XlnxCfiIf *),
214
+ .endianness = DEVICE_NATIVE_ENDIAN,
136
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_cfu_apb = {
215
+};
137
}
216
+
138
};
217
+static void msf2_sysreg_init(Object *obj)
139
218
+{
140
+static const VMStateDescription vmstate_cfu_fdro = {
219
+ MSF2SysregState *s = MSF2_SYSREG(obj);
141
+ .name = TYPE_XLNX_VERSAL_CFU_FDRO,
220
+
221
+ memory_region_init_io(&s->iomem, obj, &sysreg_ops, s, TYPE_MSF2_SYSREG,
222
+ MSF2_SYSREG_MMIO_SIZE);
223
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
224
+}
225
+
226
+static const VMStateDescription vmstate_msf2_sysreg = {
227
+ .name = TYPE_MSF2_SYSREG,
228
+ .version_id = 1,
142
+ .version_id = 1,
229
+ .minimum_version_id = 1,
143
+ .minimum_version_id = 1,
230
+ .fields = (VMStateField[]) {
144
+ .fields = (VMStateField[]) {
231
+ VMSTATE_UINT32_ARRAY(regs, MSF2SysregState, MSF2_SYSREG_MMIO_SIZE / 4),
145
+ VMSTATE_FIFO32(fdro_data, XlnxVersalCFUFDRO),
232
+ VMSTATE_END_OF_LIST()
146
+ VMSTATE_END_OF_LIST(),
233
+ }
147
+ }
234
+};
148
+};
235
+
149
+
236
+static Property msf2_sysreg_properties[] = {
150
static void cfu_apb_class_init(ObjectClass *klass, void *data)
237
+ /* default divisors in Libero GUI */
151
{
238
+ DEFINE_PROP_UINT8("apb0divisor", MSF2SysregState, apb0div, 2),
152
DeviceClass *dc = DEVICE_CLASS(klass);
239
+ DEFINE_PROP_UINT8("apb1divisor", MSF2SysregState, apb1div, 2),
153
@@ -XXX,XX +XXX,XX @@ static void cfu_apb_class_init(ObjectClass *klass, void *data)
240
+ DEFINE_PROP_END_OF_LIST(),
154
device_class_set_props(dc, cfu_props);
155
}
156
157
+static void cfu_fdro_class_init(ObjectClass *klass, void *data)
158
+{
159
+ DeviceClass *dc = DEVICE_CLASS(klass);
160
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
161
+ XlnxCfiIfClass *xcic = XLNX_CFI_IF_CLASS(klass);
162
+
163
+ dc->vmsd = &vmstate_cfu_fdro;
164
+ xcic->cfi_transfer_packet = cfu_fdro_cfi_transfer_packet;
165
+ rc->phases.enter = cfu_fdro_reset_enter;
166
+}
167
+
168
static const TypeInfo cfu_apb_info = {
169
.name = TYPE_XLNX_VERSAL_CFU_APB,
170
.parent = TYPE_SYS_BUS_DEVICE,
171
@@ -XXX,XX +XXX,XX @@ static const TypeInfo cfu_apb_info = {
172
}
173
};
174
175
+static const TypeInfo cfu_fdro_info = {
176
+ .name = TYPE_XLNX_VERSAL_CFU_FDRO,
177
+ .parent = TYPE_SYS_BUS_DEVICE,
178
+ .instance_size = sizeof(XlnxVersalCFUFDRO),
179
+ .class_init = cfu_fdro_class_init,
180
+ .instance_init = cfu_fdro_init,
181
+ .interfaces = (InterfaceInfo[]) {
182
+ { TYPE_XLNX_CFI_IF },
183
+ { }
184
+ }
241
+};
185
+};
242
+
186
+
243
+static void msf2_sysreg_realize(DeviceState *dev, Error **errp)
187
static void cfu_apb_register_types(void)
244
+{
188
{
245
+ MSF2SysregState *s = MSF2_SYSREG(dev);
189
type_register_static(&cfu_apb_info);
246
+
190
+ type_register_static(&cfu_fdro_info);
247
+ if ((s->apb0div > 32 || !is_power_of_2(s->apb0div))
191
}
248
+ || (s->apb1div > 32 || !is_power_of_2(s->apb1div))) {
192
249
+ error_setg(errp, "Invalid apb divisor value");
193
type_init(cfu_apb_register_types)
250
+ error_append_hint(errp, "apb divisor must be a power of 2"
251
+ " and maximum value is 32\n");
252
+ }
253
+}
254
+
255
+static void msf2_sysreg_class_init(ObjectClass *klass, void *data)
256
+{
257
+ DeviceClass *dc = DEVICE_CLASS(klass);
258
+
259
+ dc->vmsd = &vmstate_msf2_sysreg;
260
+ dc->reset = msf2_sysreg_reset;
261
+ dc->props = msf2_sysreg_properties;
262
+ dc->realize = msf2_sysreg_realize;
263
+}
264
+
265
+static const TypeInfo msf2_sysreg_info = {
266
+ .name = TYPE_MSF2_SYSREG,
267
+ .parent = TYPE_SYS_BUS_DEVICE,
268
+ .class_init = msf2_sysreg_class_init,
269
+ .instance_size = sizeof(MSF2SysregState),
270
+ .instance_init = msf2_sysreg_init,
271
+};
272
+
273
+static void msf2_sysreg_register_types(void)
274
+{
275
+ type_register_static(&msf2_sysreg_info);
276
+}
277
+
278
+type_init(msf2_sysreg_register_types)
279
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
280
index XXXXXXX..XXXXXXX 100644
281
--- a/hw/misc/trace-events
282
+++ b/hw/misc/trace-events
283
@@ -XXX,XX +XXX,XX @@ mps2_scc_reset(void) "MPS2 SCC: reset"
284
mps2_scc_leds(char led7, char led6, char led5, char led4, char led3, char led2, char led1, char led0) "MPS2 SCC LEDs: %c%c%c%c%c%c%c%c"
285
mps2_scc_cfg_write(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config write: function %d device %d data 0x%" PRIx32
286
mps2_scc_cfg_read(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config read: function %d device %d data 0x%" PRIx32
287
+
288
+# hw/misc/msf2-sysreg.c
289
+msf2_sysreg_write(uint64_t offset, uint32_t val, uint32_t prev) "msf2-sysreg write: addr 0x%08" HWADDR_PRIx " data 0x%" PRIx32 " prev 0x%" PRIx32
290
+msf2_sysreg_read(uint64_t offset, uint32_t val) "msf2-sysreg read: addr 0x%08" HWADDR_PRIx " data 0x%08" PRIx32
291
+msf2_sysreg_write_pll_status(void) "Invalid write to read only PLL status register"
292
--
194
--
293
2.7.4
195
2.34.1
294
295
diff view generated by jsdifflib
1
Don't use the old_mmio in the memory region ops struct.
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Introduce a model of Xilinx Versal's Configuration Frame Unit's Single
4
Frame Read port (CFU_SFR).
5
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20230831165701.2016397-5-francisco.iglesias@amd.com
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 1505580378-9044-4-git-send-email-peter.maydell@linaro.org
6
---
10
---
7
hw/timer/omap_synctimer.c | 35 +++++++++++++++++++++--------------
11
include/hw/misc/xlnx-versal-cfu.h | 15 ++++++
8
1 file changed, 21 insertions(+), 14 deletions(-)
12
hw/misc/xlnx-versal-cfu.c | 87 +++++++++++++++++++++++++++++++
13
2 files changed, 102 insertions(+)
9
14
10
diff --git a/hw/timer/omap_synctimer.c b/hw/timer/omap_synctimer.c
15
diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/hw/timer/omap_synctimer.c
17
--- a/include/hw/misc/xlnx-versal-cfu.h
13
+++ b/hw/timer/omap_synctimer.c
18
+++ b/include/hw/misc/xlnx-versal-cfu.h
14
@@ -XXX,XX +XXX,XX @@ static uint32_t omap_synctimer_readh(void *opaque, hwaddr addr)
19
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB)
20
#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx,versal-cfu-fdro"
21
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO)
22
23
+#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx,versal-cfu-sfr"
24
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR)
25
+
26
REG32(CFU_ISR, 0x0)
27
FIELD(CFU_ISR, USR_GTS_EVENT, 9, 1)
28
FIELD(CFU_ISR, USR_GSR_EVENT, 8, 1)
29
@@ -XXX,XX +XXX,XX @@ struct XlnxVersalCFUFDRO {
30
Fifo32 fdro_data;
31
};
32
33
+struct XlnxVersalCFUSFR {
34
+ SysBusDevice parent_obj;
35
+ MemoryRegion iomem_sfr;
36
+
37
+ /* 128-bit wfifo. */
38
+ uint32_t wfifo[WFIFO_SZ];
39
+
40
+ struct {
41
+ XlnxVersalCFUAPB *cfu;
42
+ } cfg;
43
+};
44
+
45
/**
46
* This is a helper function for updating a CFI data write fifo, an array of 4
47
* uint32_t and 128 bits of data that are allowed to be written through 4
48
diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/misc/xlnx-versal-cfu.c
51
+++ b/hw/misc/xlnx-versal-cfu.c
52
@@ -XXX,XX +XXX,XX @@ static void cfu_stream_write(void *opaque, hwaddr addr, uint64_t value,
15
}
53
}
16
}
54
}
17
55
18
-static void omap_synctimer_write(void *opaque, hwaddr addr,
56
+static uint64_t cfu_sfr_read(void *opaque, hwaddr addr, unsigned size)
19
- uint32_t value)
20
+static uint64_t omap_synctimer_readfn(void *opaque, hwaddr addr,
21
+ unsigned size)
22
+{
57
+{
23
+ switch (size) {
58
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
24
+ case 1:
59
+ HWADDR_PRIx "\n", __func__, addr);
25
+ return omap_badwidth_read32(opaque, addr);
60
+ return 0;
26
+ case 2:
61
+}
27
+ return omap_synctimer_readh(opaque, addr);
62
+
28
+ case 4:
63
+static void cfu_sfr_write(void *opaque, hwaddr addr, uint64_t value,
29
+ return omap_synctimer_readw(opaque, addr);
64
+ unsigned size)
30
+ default:
65
+{
31
+ g_assert_not_reached();
66
+ XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(opaque);
67
+ uint32_t wfifo[WFIFO_SZ];
68
+
69
+ if (update_wfifo(addr, value, s->wfifo, wfifo)) {
70
+ uint8_t row_addr = extract32(wfifo[0], 23, 5);
71
+ uint32_t frame_addr = extract32(wfifo[0], 0, 23);
72
+ XlnxCfiPacket pkt = { .reg_addr = CFRAME_SFR,
73
+ .data[0] = frame_addr };
74
+
75
+ if (s->cfg.cfu) {
76
+ cfu_transfer_cfi_packet(s->cfg.cfu, row_addr, &pkt);
77
+ }
32
+ }
78
+ }
33
+}
79
+}
34
+
80
+
35
+static void omap_synctimer_writefn(void *opaque, hwaddr addr,
81
static uint64_t cfu_fdro_read(void *opaque, hwaddr addr, unsigned size)
36
+ uint64_t value, unsigned size)
37
{
82
{
38
OMAP_BAD_REG(addr);
83
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(opaque);
84
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps cfu_stream_ops = {
85
},
86
};
87
88
+static const MemoryRegionOps cfu_sfr_ops = {
89
+ .read = cfu_sfr_read,
90
+ .write = cfu_sfr_write,
91
+ .endianness = DEVICE_LITTLE_ENDIAN,
92
+ .valid = {
93
+ .min_access_size = 4,
94
+ .max_access_size = 4,
95
+ },
96
+};
97
+
98
static const MemoryRegionOps cfu_fdro_ops = {
99
.read = cfu_fdro_read,
100
.write = cfu_fdro_write,
101
@@ -XXX,XX +XXX,XX @@ static void cfu_apb_init(Object *obj)
102
sysbus_init_irq(sbd, &s->irq_cfu_imr);
39
}
103
}
40
104
41
static const MemoryRegionOps omap_synctimer_ops = {
105
+static void cfu_sfr_init(Object *obj)
42
- .old_mmio = {
106
+{
43
- .read = {
107
+ XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(obj);
44
- omap_badwidth_read32,
108
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
45
- omap_synctimer_readh,
109
+
46
- omap_synctimer_readw,
110
+ memory_region_init_io(&s->iomem_sfr, obj, &cfu_sfr_ops, s,
47
- },
111
+ TYPE_XLNX_VERSAL_CFU_SFR, KEYHOLE_STREAM_4K);
48
- .write = {
112
+ sysbus_init_mmio(sbd, &s->iomem_sfr);
49
- omap_badwidth_write32,
113
+}
50
- omap_synctimer_write,
114
+
51
- omap_synctimer_write,
115
+static void cfu_sfr_reset_enter(Object *obj, ResetType type)
52
- },
116
+{
53
- },
117
+ XlnxVersalCFUSFR *s = XLNX_VERSAL_CFU_SFR(obj);
54
+ .read = omap_synctimer_readfn,
118
+
55
+ .write = omap_synctimer_writefn,
119
+ memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
56
+ .valid.min_access_size = 1,
120
+}
57
+ .valid.max_access_size = 4,
121
+
58
.endianness = DEVICE_NATIVE_ENDIAN,
122
static void cfu_fdro_init(Object *obj)
123
{
124
XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj);
125
@@ -XXX,XX +XXX,XX @@ static Property cfu_props[] = {
126
DEFINE_PROP_END_OF_LIST(),
59
};
127
};
60
128
129
+static Property cfu_sfr_props[] = {
130
+ DEFINE_PROP_LINK("cfu", XlnxVersalCFUSFR, cfg.cfu,
131
+ TYPE_XLNX_VERSAL_CFU_APB, XlnxVersalCFUAPB *),
132
+ DEFINE_PROP_END_OF_LIST(),
133
+};
134
+
135
static const VMStateDescription vmstate_cfu_apb = {
136
.name = TYPE_XLNX_VERSAL_CFU_APB,
137
.version_id = 1,
138
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_cfu_fdro = {
139
}
140
};
141
142
+static const VMStateDescription vmstate_cfu_sfr = {
143
+ .name = TYPE_XLNX_VERSAL_CFU_SFR,
144
+ .version_id = 1,
145
+ .minimum_version_id = 1,
146
+ .fields = (VMStateField[]) {
147
+ VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUSFR, 4),
148
+ VMSTATE_END_OF_LIST(),
149
+ }
150
+};
151
+
152
static void cfu_apb_class_init(ObjectClass *klass, void *data)
153
{
154
DeviceClass *dc = DEVICE_CLASS(klass);
155
@@ -XXX,XX +XXX,XX @@ static void cfu_fdro_class_init(ObjectClass *klass, void *data)
156
rc->phases.enter = cfu_fdro_reset_enter;
157
}
158
159
+static void cfu_sfr_class_init(ObjectClass *klass, void *data)
160
+{
161
+ DeviceClass *dc = DEVICE_CLASS(klass);
162
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
163
+
164
+ device_class_set_props(dc, cfu_sfr_props);
165
+ dc->vmsd = &vmstate_cfu_sfr;
166
+ rc->phases.enter = cfu_sfr_reset_enter;
167
+}
168
+
169
static const TypeInfo cfu_apb_info = {
170
.name = TYPE_XLNX_VERSAL_CFU_APB,
171
.parent = TYPE_SYS_BUS_DEVICE,
172
@@ -XXX,XX +XXX,XX @@ static const TypeInfo cfu_fdro_info = {
173
}
174
};
175
176
+static const TypeInfo cfu_sfr_info = {
177
+ .name = TYPE_XLNX_VERSAL_CFU_SFR,
178
+ .parent = TYPE_SYS_BUS_DEVICE,
179
+ .instance_size = sizeof(XlnxVersalCFUSFR),
180
+ .class_init = cfu_sfr_class_init,
181
+ .instance_init = cfu_sfr_init,
182
+};
183
+
184
static void cfu_apb_register_types(void)
185
{
186
type_register_static(&cfu_apb_info);
187
type_register_static(&cfu_fdro_info);
188
+ type_register_static(&cfu_sfr_info);
189
}
190
191
type_init(cfu_apb_register_types)
61
--
192
--
62
2.7.4
193
2.34.1
63
64
diff view generated by jsdifflib
1
From: Subbaraya Sundeep <sundeep.lkml@gmail.com>
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Modelled System Timer in Microsemi's Smartfusion2 Soc.
3
Introduce a model of Xilinx Versal's Configuration Frame controller
4
Timer has two 32bit down counters and two interrupts.
4
(CFRAME_REG).
5
5
6
Signed-off-by: Subbaraya Sundeep <sundeep.lkml@gmail.com>
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
7
Reviewed-by: Alistair Francis <alistair.francis@xilinx.com>
7
Message-id: 20230831165701.2016397-6-francisco.iglesias@amd.com
8
Acked-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Message-id: 20170920201737.25723-2-f4bug@amsat.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
hw/timer/Makefile.objs | 1 +
11
MAINTAINERS | 2 +
14
include/hw/timer/mss-timer.h | 64 ++++++++++
12
include/hw/misc/xlnx-versal-cframe-reg.h | 286 ++++++++++
15
hw/timer/mss-timer.c | 289 +++++++++++++++++++++++++++++++++++++++++++
13
hw/misc/xlnx-versal-cframe-reg.c | 697 +++++++++++++++++++++++
16
3 files changed, 354 insertions(+)
14
hw/misc/meson.build | 1 +
17
create mode 100644 include/hw/timer/mss-timer.h
15
4 files changed, 986 insertions(+)
18
create mode 100644 hw/timer/mss-timer.c
16
create mode 100644 include/hw/misc/xlnx-versal-cframe-reg.h
17
create mode 100644 hw/misc/xlnx-versal-cframe-reg.c
19
18
20
diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs
19
diff --git a/MAINTAINERS b/MAINTAINERS
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/timer/Makefile.objs
21
--- a/MAINTAINERS
23
+++ b/hw/timer/Makefile.objs
22
+++ b/MAINTAINERS
24
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o
23
@@ -XXX,XX +XXX,XX @@ F: hw/misc/xlnx-cfi-if.c
25
24
F: include/hw/misc/xlnx-cfi-if.h
26
common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o
25
F: hw/misc/xlnx-versal-cfu.c
27
common-obj-$(CONFIG_CMSDK_APB_TIMER) += cmsdk-apb-timer.o
26
F: include/hw/misc/xlnx-versal-cfu.h
28
+common-obj-$(CONFIG_MSF2) += mss-timer.o
27
+F: hw/misc/xlnx-versal-cframe-reg.c
29
diff --git a/include/hw/timer/mss-timer.h b/include/hw/timer/mss-timer.h
28
+F: include/hw/misc/xlnx-versal-cframe-reg.h
29
30
STM32F100
31
M: Alexandre Iooss <erdnaxe@crans.org>
32
diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h
30
new file mode 100644
33
new file mode 100644
31
index XXXXXXX..XXXXXXX
34
index XXXXXXX..XXXXXXX
32
--- /dev/null
35
--- /dev/null
33
+++ b/include/hw/timer/mss-timer.h
36
+++ b/include/hw/misc/xlnx-versal-cframe-reg.h
34
@@ -XXX,XX +XXX,XX @@
37
@@ -XXX,XX +XXX,XX @@
35
+/*
38
+/*
36
+ * Microsemi SmartFusion2 Timer.
39
+ * QEMU model of the Configuration Frame Control module
37
+ *
40
+ *
38
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>
41
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
39
+ *
42
+ *
40
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
43
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
41
+ * of this software and associated documentation files (the "Software"), to deal
42
+ * in the Software without restriction, including without limitation the rights
43
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
44
+ * copies of the Software, and to permit persons to whom the Software is
45
+ * furnished to do so, subject to the following conditions:
46
+ *
44
+ *
47
+ * The above copyright notice and this permission notice shall be included in
45
+ * SPDX-License-Identifier: GPL-2.0-or-later
48
+ * all copies or substantial portions of the Software.
49
+ *
46
+ *
50
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47
+ * References:
51
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48
+ * [1] Versal ACAP Technical Reference Manual,
52
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
49
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
53
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50
+ *
54
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
51
+ * [2] Versal ACAP Register Reference,
55
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
52
+ * https://www.xilinx.com/htmldocs/registers/am012/am012-versal-register-reference.html
56
+ * THE SOFTWARE.
57
+ */
53
+ */
58
+
54
+#ifndef HW_MISC_XLNX_VERSAL_CFRAME_REG_H
59
+#ifndef HW_MSS_TIMER_H
55
+#define HW_MISC_XLNX_VERSAL_CFRAME_REG_H
60
+#define HW_MSS_TIMER_H
61
+
56
+
62
+#include "hw/sysbus.h"
57
+#include "hw/sysbus.h"
63
+#include "hw/ptimer.h"
58
+#include "hw/register.h"
64
+
59
+#include "hw/misc/xlnx-cfi-if.h"
65
+#define TYPE_MSS_TIMER "mss-timer"
60
+#include "hw/misc/xlnx-versal-cfu.h"
66
+#define MSS_TIMER(obj) OBJECT_CHECK(MSSTimerState, \
61
+#include "qemu/fifo32.h"
67
+ (obj), TYPE_MSS_TIMER)
62
+
63
+#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx,cframe-reg"
64
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG)
68
+
65
+
69
+/*
66
+/*
70
+ * There are two 32-bit down counting timers.
67
+ * The registers in this module are 128 bits wide but it is ok to write
71
+ * Timers 1 and 2 can be concatenated into a single 64-bit Timer
68
+ * and read them through 4 sequential 32 bit accesses (address[3:2] = 0,
72
+ * that operates either in Periodic mode or in One-shot mode.
69
+ * 1, 2, 3).
73
+ * Writing 1 to the TIM64_MODE register bit 0 sets the Timers in 64-bit mode.
74
+ * In 64-bit mode, writing to the 32-bit registers has no effect.
75
+ * Similarly, in 32-bit mode, writing to the 64-bit mode registers
76
+ * has no effect. Only two 32-bit timers are supported currently.
77
+ */
70
+ */
78
+#define NUM_TIMERS 2
71
+REG32(CRC0, 0x0)
79
+
72
+ FIELD(CRC, CRC, 0, 32)
80
+#define R_TIM1_MAX 6
73
+REG32(CRC1, 0x4)
81
+
74
+REG32(CRC2, 0x8)
82
+struct Msf2Timer {
75
+REG32(CRC3, 0xc)
83
+ QEMUBH *bh;
76
+REG32(FAR0, 0x10)
84
+ ptimer_state *ptimer;
77
+ FIELD(FAR0, SEGMENT, 23, 2)
85
+
78
+ FIELD(FAR0, BLOCKTYPE, 20, 3)
86
+ uint32_t regs[R_TIM1_MAX];
79
+ FIELD(FAR0, FRAME_ADDR, 0, 20)
87
+ qemu_irq irq;
80
+REG32(FAR1, 0x14)
81
+REG32(FAR2, 0x18)
82
+REG32(FAR3, 0x1c)
83
+REG32(FAR_SFR0, 0x20)
84
+ FIELD(FAR_SFR0, BLOCKTYPE, 20, 3)
85
+ FIELD(FAR_SFR0, FRAME_ADDR, 0, 20)
86
+REG32(FAR_SFR1, 0x24)
87
+REG32(FAR_SFR2, 0x28)
88
+REG32(FAR_SFR3, 0x2c)
89
+REG32(FDRI0, 0x40)
90
+REG32(FDRI1, 0x44)
91
+REG32(FDRI2, 0x48)
92
+REG32(FDRI3, 0x4c)
93
+REG32(FRCNT0, 0x50)
94
+ FIELD(FRCNT0, FRCNT, 0, 32)
95
+REG32(FRCNT1, 0x54)
96
+REG32(FRCNT2, 0x58)
97
+REG32(FRCNT3, 0x5c)
98
+REG32(CMD0, 0x60)
99
+ FIELD(CMD0, CMD, 0, 5)
100
+REG32(CMD1, 0x64)
101
+REG32(CMD2, 0x68)
102
+REG32(CMD3, 0x6c)
103
+REG32(CR_MASK0, 0x70)
104
+REG32(CR_MASK1, 0x74)
105
+REG32(CR_MASK2, 0x78)
106
+REG32(CR_MASK3, 0x7c)
107
+REG32(CTL0, 0x80)
108
+ FIELD(CTL, PER_FRAME_CRC, 0, 1)
109
+REG32(CTL1, 0x84)
110
+REG32(CTL2, 0x88)
111
+REG32(CTL3, 0x8c)
112
+REG32(CFRM_ISR0, 0x150)
113
+ FIELD(CFRM_ISR0, READ_BROADCAST_ERROR, 21, 1)
114
+ FIELD(CFRM_ISR0, CMD_MISSING_ERROR, 20, 1)
115
+ FIELD(CFRM_ISR0, RW_ROWOFF_ERROR, 19, 1)
116
+ FIELD(CFRM_ISR0, READ_REG_ADDR_ERROR, 18, 1)
117
+ FIELD(CFRM_ISR0, READ_BLK_TYPE_ERROR, 17, 1)
118
+ FIELD(CFRM_ISR0, READ_FRAME_ADDR_ERROR, 16, 1)
119
+ FIELD(CFRM_ISR0, WRITE_REG_ADDR_ERROR, 15, 1)
120
+ FIELD(CFRM_ISR0, WRITE_BLK_TYPE_ERROR, 13, 1)
121
+ FIELD(CFRM_ISR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
122
+ FIELD(CFRM_ISR0, MFW_OVERRUN_ERROR, 11, 1)
123
+ FIELD(CFRM_ISR0, FAR_FIFO_UNDERFLOW, 10, 1)
124
+ FIELD(CFRM_ISR0, FAR_FIFO_OVERFLOW, 9, 1)
125
+ FIELD(CFRM_ISR0, PER_FRAME_SEQ_ERROR, 8, 1)
126
+ FIELD(CFRM_ISR0, CRC_ERROR, 7, 1)
127
+ FIELD(CFRM_ISR0, WRITE_OVERRUN_ERROR, 6, 1)
128
+ FIELD(CFRM_ISR0, READ_OVERRUN_ERROR, 5, 1)
129
+ FIELD(CFRM_ISR0, CMD_INTERRUPT_ERROR, 4, 1)
130
+ FIELD(CFRM_ISR0, WRITE_INTERRUPT_ERROR, 3, 1)
131
+ FIELD(CFRM_ISR0, READ_INTERRUPT_ERROR, 2, 1)
132
+ FIELD(CFRM_ISR0, SEU_CRC_ERROR, 1, 1)
133
+ FIELD(CFRM_ISR0, SEU_ECC_ERROR, 0, 1)
134
+REG32(CFRM_ISR1, 0x154)
135
+REG32(CFRM_ISR2, 0x158)
136
+REG32(CFRM_ISR3, 0x15c)
137
+REG32(CFRM_IMR0, 0x160)
138
+ FIELD(CFRM_IMR0, READ_BROADCAST_ERROR, 21, 1)
139
+ FIELD(CFRM_IMR0, CMD_MISSING_ERROR, 20, 1)
140
+ FIELD(CFRM_IMR0, RW_ROWOFF_ERROR, 19, 1)
141
+ FIELD(CFRM_IMR0, READ_REG_ADDR_ERROR, 18, 1)
142
+ FIELD(CFRM_IMR0, READ_BLK_TYPE_ERROR, 17, 1)
143
+ FIELD(CFRM_IMR0, READ_FRAME_ADDR_ERROR, 16, 1)
144
+ FIELD(CFRM_IMR0, WRITE_REG_ADDR_ERROR, 15, 1)
145
+ FIELD(CFRM_IMR0, WRITE_BLK_TYPE_ERROR, 13, 1)
146
+ FIELD(CFRM_IMR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
147
+ FIELD(CFRM_IMR0, MFW_OVERRUN_ERROR, 11, 1)
148
+ FIELD(CFRM_IMR0, FAR_FIFO_UNDERFLOW, 10, 1)
149
+ FIELD(CFRM_IMR0, FAR_FIFO_OVERFLOW, 9, 1)
150
+ FIELD(CFRM_IMR0, PER_FRAME_SEQ_ERROR, 8, 1)
151
+ FIELD(CFRM_IMR0, CRC_ERROR, 7, 1)
152
+ FIELD(CFRM_IMR0, WRITE_OVERRUN_ERROR, 6, 1)
153
+ FIELD(CFRM_IMR0, READ_OVERRUN_ERROR, 5, 1)
154
+ FIELD(CFRM_IMR0, CMD_INTERRUPT_ERROR, 4, 1)
155
+ FIELD(CFRM_IMR0, WRITE_INTERRUPT_ERROR, 3, 1)
156
+ FIELD(CFRM_IMR0, READ_INTERRUPT_ERROR, 2, 1)
157
+ FIELD(CFRM_IMR0, SEU_CRC_ERROR, 1, 1)
158
+ FIELD(CFRM_IMR0, SEU_ECC_ERROR, 0, 1)
159
+REG32(CFRM_IMR1, 0x164)
160
+REG32(CFRM_IMR2, 0x168)
161
+REG32(CFRM_IMR3, 0x16c)
162
+REG32(CFRM_IER0, 0x170)
163
+ FIELD(CFRM_IER0, READ_BROADCAST_ERROR, 21, 1)
164
+ FIELD(CFRM_IER0, CMD_MISSING_ERROR, 20, 1)
165
+ FIELD(CFRM_IER0, RW_ROWOFF_ERROR, 19, 1)
166
+ FIELD(CFRM_IER0, READ_REG_ADDR_ERROR, 18, 1)
167
+ FIELD(CFRM_IER0, READ_BLK_TYPE_ERROR, 17, 1)
168
+ FIELD(CFRM_IER0, READ_FRAME_ADDR_ERROR, 16, 1)
169
+ FIELD(CFRM_IER0, WRITE_REG_ADDR_ERROR, 15, 1)
170
+ FIELD(CFRM_IER0, WRITE_BLK_TYPE_ERROR, 13, 1)
171
+ FIELD(CFRM_IER0, WRITE_FRAME_ADDR_ERROR, 12, 1)
172
+ FIELD(CFRM_IER0, MFW_OVERRUN_ERROR, 11, 1)
173
+ FIELD(CFRM_IER0, FAR_FIFO_UNDERFLOW, 10, 1)
174
+ FIELD(CFRM_IER0, FAR_FIFO_OVERFLOW, 9, 1)
175
+ FIELD(CFRM_IER0, PER_FRAME_SEQ_ERROR, 8, 1)
176
+ FIELD(CFRM_IER0, CRC_ERROR, 7, 1)
177
+ FIELD(CFRM_IER0, WRITE_OVERRUN_ERROR, 6, 1)
178
+ FIELD(CFRM_IER0, READ_OVERRUN_ERROR, 5, 1)
179
+ FIELD(CFRM_IER0, CMD_INTERRUPT_ERROR, 4, 1)
180
+ FIELD(CFRM_IER0, WRITE_INTERRUPT_ERROR, 3, 1)
181
+ FIELD(CFRM_IER0, READ_INTERRUPT_ERROR, 2, 1)
182
+ FIELD(CFRM_IER0, SEU_CRC_ERROR, 1, 1)
183
+ FIELD(CFRM_IER0, SEU_ECC_ERROR, 0, 1)
184
+REG32(CFRM_IER1, 0x174)
185
+REG32(CFRM_IER2, 0x178)
186
+REG32(CFRM_IER3, 0x17c)
187
+REG32(CFRM_IDR0, 0x180)
188
+ FIELD(CFRM_IDR0, READ_BROADCAST_ERROR, 21, 1)
189
+ FIELD(CFRM_IDR0, CMD_MISSING_ERROR, 20, 1)
190
+ FIELD(CFRM_IDR0, RW_ROWOFF_ERROR, 19, 1)
191
+ FIELD(CFRM_IDR0, READ_REG_ADDR_ERROR, 18, 1)
192
+ FIELD(CFRM_IDR0, READ_BLK_TYPE_ERROR, 17, 1)
193
+ FIELD(CFRM_IDR0, READ_FRAME_ADDR_ERROR, 16, 1)
194
+ FIELD(CFRM_IDR0, WRITE_REG_ADDR_ERROR, 15, 1)
195
+ FIELD(CFRM_IDR0, WRITE_BLK_TYPE_ERROR, 13, 1)
196
+ FIELD(CFRM_IDR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
197
+ FIELD(CFRM_IDR0, MFW_OVERRUN_ERROR, 11, 1)
198
+ FIELD(CFRM_IDR0, FAR_FIFO_UNDERFLOW, 10, 1)
199
+ FIELD(CFRM_IDR0, FAR_FIFO_OVERFLOW, 9, 1)
200
+ FIELD(CFRM_IDR0, PER_FRAME_SEQ_ERROR, 8, 1)
201
+ FIELD(CFRM_IDR0, CRC_ERROR, 7, 1)
202
+ FIELD(CFRM_IDR0, WRITE_OVERRUN_ERROR, 6, 1)
203
+ FIELD(CFRM_IDR0, READ_OVERRUN_ERROR, 5, 1)
204
+ FIELD(CFRM_IDR0, CMD_INTERRUPT_ERROR, 4, 1)
205
+ FIELD(CFRM_IDR0, WRITE_INTERRUPT_ERROR, 3, 1)
206
+ FIELD(CFRM_IDR0, READ_INTERRUPT_ERROR, 2, 1)
207
+ FIELD(CFRM_IDR0, SEU_CRC_ERROR, 1, 1)
208
+ FIELD(CFRM_IDR0, SEU_ECC_ERROR, 0, 1)
209
+REG32(CFRM_IDR1, 0x184)
210
+REG32(CFRM_IDR2, 0x188)
211
+REG32(CFRM_IDR3, 0x18c)
212
+REG32(CFRM_ITR0, 0x190)
213
+ FIELD(CFRM_ITR0, READ_BROADCAST_ERROR, 21, 1)
214
+ FIELD(CFRM_ITR0, CMD_MISSING_ERROR, 20, 1)
215
+ FIELD(CFRM_ITR0, RW_ROWOFF_ERROR, 19, 1)
216
+ FIELD(CFRM_ITR0, READ_REG_ADDR_ERROR, 18, 1)
217
+ FIELD(CFRM_ITR0, READ_BLK_TYPE_ERROR, 17, 1)
218
+ FIELD(CFRM_ITR0, READ_FRAME_ADDR_ERROR, 16, 1)
219
+ FIELD(CFRM_ITR0, WRITE_REG_ADDR_ERROR, 15, 1)
220
+ FIELD(CFRM_ITR0, WRITE_BLK_TYPE_ERROR, 13, 1)
221
+ FIELD(CFRM_ITR0, WRITE_FRAME_ADDR_ERROR, 12, 1)
222
+ FIELD(CFRM_ITR0, MFW_OVERRUN_ERROR, 11, 1)
223
+ FIELD(CFRM_ITR0, FAR_FIFO_UNDERFLOW, 10, 1)
224
+ FIELD(CFRM_ITR0, FAR_FIFO_OVERFLOW, 9, 1)
225
+ FIELD(CFRM_ITR0, PER_FRAME_SEQ_ERROR, 8, 1)
226
+ FIELD(CFRM_ITR0, CRC_ERROR, 7, 1)
227
+ FIELD(CFRM_ITR0, WRITE_OVERRUN_ERROR, 6, 1)
228
+ FIELD(CFRM_ITR0, READ_OVERRUN_ERROR, 5, 1)
229
+ FIELD(CFRM_ITR0, CMD_INTERRUPT_ERROR, 4, 1)
230
+ FIELD(CFRM_ITR0, WRITE_INTERRUPT_ERROR, 3, 1)
231
+ FIELD(CFRM_ITR0, READ_INTERRUPT_ERROR, 2, 1)
232
+ FIELD(CFRM_ITR0, SEU_CRC_ERROR, 1, 1)
233
+ FIELD(CFRM_ITR0, SEU_ECC_ERROR, 0, 1)
234
+REG32(CFRM_ITR1, 0x194)
235
+REG32(CFRM_ITR2, 0x198)
236
+REG32(CFRM_ITR3, 0x19c)
237
+REG32(SEU_SYNDRM00, 0x1a0)
238
+REG32(SEU_SYNDRM01, 0x1a4)
239
+REG32(SEU_SYNDRM02, 0x1a8)
240
+REG32(SEU_SYNDRM03, 0x1ac)
241
+REG32(SEU_SYNDRM10, 0x1b0)
242
+REG32(SEU_SYNDRM11, 0x1b4)
243
+REG32(SEU_SYNDRM12, 0x1b8)
244
+REG32(SEU_SYNDRM13, 0x1bc)
245
+REG32(SEU_SYNDRM20, 0x1c0)
246
+REG32(SEU_SYNDRM21, 0x1c4)
247
+REG32(SEU_SYNDRM22, 0x1c8)
248
+REG32(SEU_SYNDRM23, 0x1cc)
249
+REG32(SEU_SYNDRM30, 0x1d0)
250
+REG32(SEU_SYNDRM31, 0x1d4)
251
+REG32(SEU_SYNDRM32, 0x1d8)
252
+REG32(SEU_SYNDRM33, 0x1dc)
253
+REG32(SEU_VIRTUAL_SYNDRM0, 0x1e0)
254
+REG32(SEU_VIRTUAL_SYNDRM1, 0x1e4)
255
+REG32(SEU_VIRTUAL_SYNDRM2, 0x1e8)
256
+REG32(SEU_VIRTUAL_SYNDRM3, 0x1ec)
257
+REG32(SEU_CRC0, 0x1f0)
258
+REG32(SEU_CRC1, 0x1f4)
259
+REG32(SEU_CRC2, 0x1f8)
260
+REG32(SEU_CRC3, 0x1fc)
261
+REG32(CFRAME_FAR_BOT0, 0x200)
262
+REG32(CFRAME_FAR_BOT1, 0x204)
263
+REG32(CFRAME_FAR_BOT2, 0x208)
264
+REG32(CFRAME_FAR_BOT3, 0x20c)
265
+REG32(CFRAME_FAR_TOP0, 0x210)
266
+REG32(CFRAME_FAR_TOP1, 0x214)
267
+REG32(CFRAME_FAR_TOP2, 0x218)
268
+REG32(CFRAME_FAR_TOP3, 0x21c)
269
+REG32(LAST_FRAME_BOT0, 0x220)
270
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB, 20, 12)
271
+ FIELD(LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME, 0, 20)
272
+REG32(LAST_FRAME_BOT1, 0x224)
273
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB, 28, 4)
274
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME, 8, 20)
275
+ FIELD(LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB, 0, 8)
276
+REG32(LAST_FRAME_BOT2, 0x228)
277
+ FIELD(LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB, 0, 16)
278
+REG32(LAST_FRAME_BOT3, 0x22c)
279
+REG32(LAST_FRAME_TOP0, 0x230)
280
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB, 20, 12)
281
+ FIELD(LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME, 0, 20)
282
+REG32(LAST_FRAME_TOP1, 0x234)
283
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME, 8, 20)
284
+ FIELD(LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB, 0, 8)
285
+REG32(LAST_FRAME_TOP2, 0x238)
286
+REG32(LAST_FRAME_TOP3, 0x23c)
287
+
288
+#define CFRAME_REG_R_MAX (R_LAST_FRAME_TOP3 + 1)
289
+
290
+#define FRAME_NUM_QWORDS 25
291
+#define FRAME_NUM_WORDS (FRAME_NUM_QWORDS * 4) /* 25 * 128 bits */
292
+
293
+typedef struct XlnxCFrame {
294
+ uint32_t data[FRAME_NUM_WORDS];
295
+} XlnxCFrame;
296
+
297
+struct XlnxVersalCFrameReg {
298
+ SysBusDevice parent_obj;
299
+ MemoryRegion iomem;
300
+ MemoryRegion iomem_fdri;
301
+ qemu_irq irq_cfrm_imr;
302
+
303
+ /* 128-bit wfifo. */
304
+ uint32_t wfifo[WFIFO_SZ];
305
+
306
+ uint32_t regs[CFRAME_REG_R_MAX];
307
+ RegisterInfo regs_info[CFRAME_REG_R_MAX];
308
+
309
+ bool rowon;
310
+ bool wcfg;
311
+ bool rcfg;
312
+
313
+ GTree *cframes;
314
+ Fifo32 new_f_data;
315
+
316
+ struct {
317
+ XlnxCfiIf *cfu_fdro;
318
+ uint32_t blktype_num_frames[7];
319
+ } cfg;
320
+ bool row_configured;
88
+};
321
+};
89
+
322
+
90
+typedef struct MSSTimerState {
323
+#endif
91
+ SysBusDevice parent_obj;
324
diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c
92
+
93
+ MemoryRegion mmio;
94
+ uint32_t freq_hz;
95
+ struct Msf2Timer timers[NUM_TIMERS];
96
+} MSSTimerState;
97
+
98
+#endif /* HW_MSS_TIMER_H */
99
diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c
100
new file mode 100644
325
new file mode 100644
101
index XXXXXXX..XXXXXXX
326
index XXXXXXX..XXXXXXX
102
--- /dev/null
327
--- /dev/null
103
+++ b/hw/timer/mss-timer.c
328
+++ b/hw/misc/xlnx-versal-cframe-reg.c
104
@@ -XXX,XX +XXX,XX @@
329
@@ -XXX,XX +XXX,XX @@
105
+/*
330
+/*
106
+ * Block model of System timer present in
331
+ * QEMU model of the Configuration Frame Control module
107
+ * Microsemi's SmartFusion2 and SmartFusion SoCs.
108
+ *
332
+ *
109
+ * Copyright (c) 2017 Subbaraya Sundeep <sundeep.lkml@gmail.com>.
333
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
110
+ *
334
+ *
111
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
335
+ * Written by Francisco Iglesias <francisco.iglesias@amd.com>
112
+ * of this software and associated documentation files (the "Software"), to deal
113
+ * in the Software without restriction, including without limitation the rights
114
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
115
+ * copies of the Software, and to permit persons to whom the Software is
116
+ * furnished to do so, subject to the following conditions:
117
+ *
336
+ *
118
+ * The above copyright notice and this permission notice shall be included in
337
+ * SPDX-License-Identifier: GPL-2.0-or-later
119
+ * all copies or substantial portions of the Software.
120
+ *
121
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
122
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
123
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
124
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
125
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
126
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
127
+ * THE SOFTWARE.
128
+ */
338
+ */
129
+
339
+
130
+#include "qemu/osdep.h"
340
+#include "qemu/osdep.h"
131
+#include "qemu/main-loop.h"
341
+#include "hw/sysbus.h"
342
+#include "hw/register.h"
343
+#include "hw/registerfields.h"
344
+#include "qemu/bitops.h"
132
+#include "qemu/log.h"
345
+#include "qemu/log.h"
133
+#include "hw/timer/mss-timer.h"
346
+#include "qemu/units.h"
134
+
347
+#include "qapi/error.h"
135
+#ifndef MSS_TIMER_ERR_DEBUG
348
+#include "hw/qdev-properties.h"
136
+#define MSS_TIMER_ERR_DEBUG 0
349
+#include "migration/vmstate.h"
350
+#include "hw/irq.h"
351
+#include "hw/misc/xlnx-versal-cframe-reg.h"
352
+
353
+#ifndef XLNX_VERSAL_CFRAME_REG_ERR_DEBUG
354
+#define XLNX_VERSAL_CFRAME_REG_ERR_DEBUG 0
137
+#endif
355
+#endif
138
+
356
+
139
+#define DB_PRINT_L(lvl, fmt, args...) do { \
357
+#define KEYHOLE_STREAM_4K (4 * KiB)
140
+ if (MSS_TIMER_ERR_DEBUG >= lvl) { \
358
+#define N_WORDS_128BIT 4
141
+ qemu_log("%s: " fmt "\n", __func__, ## args); \
359
+
142
+ } \
360
+#define MAX_BLOCKTYPE 6
143
+} while (0);
361
+#define MAX_BLOCKTYPE_FRAMES 0xFFFFF
144
+
362
+
145
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
363
+enum {
146
+
364
+ CFRAME_CMD_WCFG = 1,
147
+#define R_TIM_VAL 0
365
+ CFRAME_CMD_ROWON = 2,
148
+#define R_TIM_LOADVAL 1
366
+ CFRAME_CMD_ROWOFF = 3,
149
+#define R_TIM_BGLOADVAL 2
367
+ CFRAME_CMD_RCFG = 4,
150
+#define R_TIM_CTRL 3
368
+ CFRAME_CMD_DLPARK = 5,
151
+#define R_TIM_RIS 4
369
+};
152
+#define R_TIM_MIS 5
370
+
153
+
371
+static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
154
+#define TIMER_CTRL_ENBL (1 << 0)
372
+{
155
+#define TIMER_CTRL_ONESHOT (1 << 1)
373
+ guint ua = GPOINTER_TO_UINT(a);
156
+#define TIMER_CTRL_INTR (1 << 2)
374
+ guint ub = GPOINTER_TO_UINT(b);
157
+#define TIMER_RIS_ACK (1 << 0)
375
+ return (ua > ub) - (ua < ub);
158
+#define TIMER_RST_CLR (1 << 6)
376
+}
159
+#define TIMER_MODE (1 << 0)
377
+
160
+
378
+static void cfrm_imr_update_irq(XlnxVersalCFrameReg *s)
161
+static void timer_update_irq(struct Msf2Timer *st)
379
+{
162
+{
380
+ bool pending = s->regs[R_CFRM_ISR0] & ~s->regs[R_CFRM_IMR0];
163
+ bool isr, ier;
381
+ qemu_set_irq(s->irq_cfrm_imr, pending);
164
+
382
+}
165
+ isr = !!(st->regs[R_TIM_RIS] & TIMER_RIS_ACK);
383
+
166
+ ier = !!(st->regs[R_TIM_CTRL] & TIMER_CTRL_INTR);
384
+static void cfrm_isr_postw(RegisterInfo *reg, uint64_t val64)
167
+ qemu_set_irq(st->irq, (ier && isr));
385
+{
168
+}
386
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
169
+
387
+ cfrm_imr_update_irq(s);
170
+static void timer_update(struct Msf2Timer *st)
388
+}
171
+{
389
+
172
+ uint64_t count;
390
+static uint64_t cfrm_ier_prew(RegisterInfo *reg, uint64_t val64)
173
+
391
+{
174
+ if (!(st->regs[R_TIM_CTRL] & TIMER_CTRL_ENBL)) {
392
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
175
+ ptimer_stop(st->ptimer);
393
+
394
+ s->regs[R_CFRM_IMR0] &= ~s->regs[R_CFRM_IER0];
395
+ s->regs[R_CFRM_IER0] = 0;
396
+ cfrm_imr_update_irq(s);
397
+ return 0;
398
+}
399
+
400
+static uint64_t cfrm_idr_prew(RegisterInfo *reg, uint64_t val64)
401
+{
402
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
403
+
404
+ s->regs[R_CFRM_IMR0] |= s->regs[R_CFRM_IDR0];
405
+ s->regs[R_CFRM_IDR0] = 0;
406
+ cfrm_imr_update_irq(s);
407
+ return 0;
408
+}
409
+
410
+static uint64_t cfrm_itr_prew(RegisterInfo *reg, uint64_t val64)
411
+{
412
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
413
+
414
+ s->regs[R_CFRM_ISR0] |= s->regs[R_CFRM_ITR0];
415
+ s->regs[R_CFRM_ITR0] = 0;
416
+ cfrm_imr_update_irq(s);
417
+ return 0;
418
+}
419
+
420
+static void cframe_incr_far(XlnxVersalCFrameReg *s)
421
+{
422
+ uint32_t faddr = ARRAY_FIELD_EX32(s->regs, FAR0, FRAME_ADDR);
423
+ uint32_t blktype = ARRAY_FIELD_EX32(s->regs, FAR0, BLOCKTYPE);
424
+
425
+ assert(blktype <= MAX_BLOCKTYPE);
426
+
427
+ faddr++;
428
+ if (faddr > s->cfg.blktype_num_frames[blktype]) {
429
+ /* Restart from 0 and increment block type */
430
+ faddr = 0;
431
+ blktype++;
432
+
433
+ assert(blktype <= MAX_BLOCKTYPE);
434
+
435
+ ARRAY_FIELD_DP32(s->regs, FAR0, BLOCKTYPE, blktype);
436
+ }
437
+
438
+ ARRAY_FIELD_DP32(s->regs, FAR0, FRAME_ADDR, faddr);
439
+}
440
+
441
+static void cfrm_fdri_post_write(RegisterInfo *reg, uint64_t val)
442
+{
443
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
444
+
445
+ if (s->row_configured && s->rowon && s->wcfg) {
446
+
447
+ if (fifo32_num_free(&s->new_f_data) >= N_WORDS_128BIT) {
448
+ fifo32_push(&s->new_f_data, s->regs[R_FDRI0]);
449
+ fifo32_push(&s->new_f_data, s->regs[R_FDRI1]);
450
+ fifo32_push(&s->new_f_data, s->regs[R_FDRI2]);
451
+ fifo32_push(&s->new_f_data, s->regs[R_FDRI3]);
452
+ }
453
+
454
+ if (fifo32_is_full(&s->new_f_data)) {
455
+ uint32_t addr = extract32(s->regs[R_FAR0], 0, 23);
456
+ XlnxCFrame *f = g_new(XlnxCFrame, 1);
457
+
458
+ for (int i = 0; i < FRAME_NUM_WORDS; i++) {
459
+ f->data[i] = fifo32_pop(&s->new_f_data);
460
+ }
461
+
462
+ g_tree_replace(s->cframes, GUINT_TO_POINTER(addr), f);
463
+
464
+ cframe_incr_far(s);
465
+
466
+ fifo32_reset(&s->new_f_data);
467
+ }
468
+ }
469
+}
470
+
471
+static void cfrm_readout_frames(XlnxVersalCFrameReg *s, uint32_t start_addr,
472
+ uint32_t end_addr)
473
+{
474
+ /*
475
+ * NB: when our minimum glib version is at least 2.68 we can improve the
476
+ * performance of the cframe traversal by using g_tree_lookup_node and
477
+ * g_tree_node_next (instead of calling g_tree_lookup for finding each
478
+ * cframe).
479
+ */
480
+ for (uint32_t addr = start_addr; addr < end_addr; addr++) {
481
+ XlnxCFrame *f = g_tree_lookup(s->cframes, GUINT_TO_POINTER(addr));
482
+
483
+ /* Transmit the data if a frame was found */
484
+ if (f) {
485
+ for (int i = 0; i < FRAME_NUM_WORDS; i += 4) {
486
+ XlnxCfiPacket pkt = {};
487
+
488
+ pkt.data[0] = f->data[i];
489
+ pkt.data[1] = f->data[i + 1];
490
+ pkt.data[2] = f->data[i + 2];
491
+ pkt.data[3] = f->data[i + 3];
492
+
493
+ if (s->cfg.cfu_fdro) {
494
+ xlnx_cfi_transfer_packet(s->cfg.cfu_fdro, &pkt);
495
+ }
496
+ }
497
+ }
498
+ }
499
+}
500
+
501
+static void cfrm_frcnt_post_write(RegisterInfo *reg, uint64_t val)
502
+{
503
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
504
+
505
+ if (s->row_configured && s->rowon && s->rcfg) {
506
+ uint32_t start_addr = extract32(s->regs[R_FAR0], 0, 23);
507
+ uint32_t end_addr = start_addr + s->regs[R_FRCNT0] / FRAME_NUM_QWORDS;
508
+
509
+ cfrm_readout_frames(s, start_addr, end_addr);
510
+ }
511
+}
512
+
513
+static void cfrm_cmd_post_write(RegisterInfo *reg, uint64_t val)
514
+{
515
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
516
+
517
+ if (s->row_configured) {
518
+ uint8_t cmd = ARRAY_FIELD_EX32(s->regs, CMD0, CMD);
519
+
520
+ switch (cmd) {
521
+ case CFRAME_CMD_WCFG:
522
+ s->wcfg = true;
523
+ break;
524
+ case CFRAME_CMD_ROWON:
525
+ s->rowon = true;
526
+ break;
527
+ case CFRAME_CMD_ROWOFF:
528
+ s->rowon = false;
529
+ break;
530
+ case CFRAME_CMD_RCFG:
531
+ s->rcfg = true;
532
+ break;
533
+ case CFRAME_CMD_DLPARK:
534
+ s->wcfg = false;
535
+ s->rcfg = false;
536
+ break;
537
+ default:
538
+ break;
539
+ };
540
+ }
541
+}
542
+
543
+static uint64_t cfrm_last_frame_bot_post_read(RegisterInfo *reg,
544
+ uint64_t val64)
545
+{
546
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
547
+ uint64_t val = 0;
548
+
549
+ switch (reg->access->addr) {
550
+ case A_LAST_FRAME_BOT0:
551
+ val = FIELD_DP32(val, LAST_FRAME_BOT0, BLOCKTYPE1_LAST_FRAME_LSB,
552
+ s->cfg.blktype_num_frames[1]);
553
+ val = FIELD_DP32(val, LAST_FRAME_BOT0, BLOCKTYPE0_LAST_FRAME,
554
+ s->cfg.blktype_num_frames[0]);
555
+ break;
556
+ case A_LAST_FRAME_BOT1:
557
+ val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE3_LAST_FRAME_LSB,
558
+ s->cfg.blktype_num_frames[3]);
559
+ val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE2_LAST_FRAME,
560
+ s->cfg.blktype_num_frames[2]);
561
+ val = FIELD_DP32(val, LAST_FRAME_BOT1, BLOCKTYPE1_LAST_FRAME_MSB,
562
+ (s->cfg.blktype_num_frames[1] >> 12));
563
+ break;
564
+ case A_LAST_FRAME_BOT2:
565
+ val = FIELD_DP32(val, LAST_FRAME_BOT2, BLOCKTYPE3_LAST_FRAME_MSB,
566
+ (s->cfg.blktype_num_frames[3] >> 4));
567
+ break;
568
+ case A_LAST_FRAME_BOT3:
569
+ default:
570
+ break;
571
+ }
572
+
573
+ return val;
574
+}
575
+
576
+static uint64_t cfrm_last_frame_top_post_read(RegisterInfo *reg,
577
+ uint64_t val64)
578
+{
579
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
580
+ uint64_t val = 0;
581
+
582
+ switch (reg->access->addr) {
583
+ case A_LAST_FRAME_TOP0:
584
+ val = FIELD_DP32(val, LAST_FRAME_TOP0, BLOCKTYPE5_LAST_FRAME_LSB,
585
+ s->cfg.blktype_num_frames[5]);
586
+ val = FIELD_DP32(val, LAST_FRAME_TOP0, BLOCKTYPE4_LAST_FRAME,
587
+ s->cfg.blktype_num_frames[4]);
588
+ break;
589
+ case A_LAST_FRAME_TOP1:
590
+ val = FIELD_DP32(val, LAST_FRAME_TOP1, BLOCKTYPE6_LAST_FRAME,
591
+ s->cfg.blktype_num_frames[6]);
592
+ val = FIELD_DP32(val, LAST_FRAME_TOP1, BLOCKTYPE5_LAST_FRAME_MSB,
593
+ (s->cfg.blktype_num_frames[5] >> 12));
594
+ break;
595
+ case A_LAST_FRAME_TOP2:
596
+ case A_LAST_FRAME_BOT3:
597
+ default:
598
+ break;
599
+ }
600
+
601
+ return val;
602
+}
603
+
604
+static void cfrm_far_sfr_post_write(RegisterInfo *reg, uint64_t val)
605
+{
606
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(reg->opaque);
607
+
608
+ if (s->row_configured && s->rowon && s->rcfg) {
609
+ uint32_t start_addr = extract32(s->regs[R_FAR_SFR0], 0, 23);
610
+
611
+ /* Readback 1 frame */
612
+ cfrm_readout_frames(s, start_addr, start_addr + 1);
613
+ }
614
+}
615
+
616
+static const RegisterAccessInfo cframe_reg_regs_info[] = {
617
+ { .name = "CRC0", .addr = A_CRC0,
618
+ .rsvd = 0x00000000,
619
+ },{ .name = "CRC1", .addr = A_CRC0,
620
+ .rsvd = 0xffffffff,
621
+ },{ .name = "CRC2", .addr = A_CRC0,
622
+ .rsvd = 0xffffffff,
623
+ },{ .name = "CRC3", .addr = A_CRC0,
624
+ .rsvd = 0xffffffff,
625
+ },{ .name = "FAR0", .addr = A_FAR0,
626
+ .rsvd = 0xfe000000,
627
+ },{ .name = "FAR1", .addr = A_FAR1,
628
+ .rsvd = 0xffffffff,
629
+ },{ .name = "FAR2", .addr = A_FAR2,
630
+ .rsvd = 0xffffffff,
631
+ },{ .name = "FAR3", .addr = A_FAR3,
632
+ .rsvd = 0xffffffff,
633
+ },{ .name = "FAR_SFR0", .addr = A_FAR_SFR0,
634
+ .rsvd = 0xff800000,
635
+ },{ .name = "FAR_SFR1", .addr = A_FAR_SFR1,
636
+ .rsvd = 0xffffffff,
637
+ },{ .name = "FAR_SFR2", .addr = A_FAR_SFR2,
638
+ .rsvd = 0xffffffff,
639
+ },{ .name = "FAR_SFR3", .addr = A_FAR_SFR3,
640
+ .rsvd = 0xffffffff,
641
+ .post_write = cfrm_far_sfr_post_write,
642
+ },{ .name = "FDRI0", .addr = A_FDRI0,
643
+ },{ .name = "FDRI1", .addr = A_FDRI1,
644
+ },{ .name = "FDRI2", .addr = A_FDRI2,
645
+ },{ .name = "FDRI3", .addr = A_FDRI3,
646
+ .post_write = cfrm_fdri_post_write,
647
+ },{ .name = "FRCNT0", .addr = A_FRCNT0,
648
+ .rsvd = 0x00000000,
649
+ },{ .name = "FRCNT1", .addr = A_FRCNT1,
650
+ .rsvd = 0xffffffff,
651
+ },{ .name = "FRCNT2", .addr = A_FRCNT2,
652
+ .rsvd = 0xffffffff,
653
+ },{ .name = "FRCNT3", .addr = A_FRCNT3,
654
+ .rsvd = 0xffffffff,
655
+ .post_write = cfrm_frcnt_post_write
656
+ },{ .name = "CMD0", .addr = A_CMD0,
657
+ .rsvd = 0xffffffe0,
658
+ },{ .name = "CMD1", .addr = A_CMD1,
659
+ .rsvd = 0xffffffff,
660
+ },{ .name = "CMD2", .addr = A_CMD2,
661
+ .rsvd = 0xffffffff,
662
+ },{ .name = "CMD3", .addr = A_CMD3,
663
+ .rsvd = 0xffffffff,
664
+ .post_write = cfrm_cmd_post_write
665
+ },{ .name = "CR_MASK0", .addr = A_CR_MASK0,
666
+ .rsvd = 0x00000000,
667
+ },{ .name = "CR_MASK1", .addr = A_CR_MASK1,
668
+ .rsvd = 0x00000000,
669
+ },{ .name = "CR_MASK2", .addr = A_CR_MASK2,
670
+ .rsvd = 0x00000000,
671
+ },{ .name = "CR_MASK3", .addr = A_CR_MASK3,
672
+ .rsvd = 0xffffffff,
673
+ },{ .name = "CTL0", .addr = A_CTL0,
674
+ .rsvd = 0xfffffff8,
675
+ },{ .name = "CTL1", .addr = A_CTL1,
676
+ .rsvd = 0xffffffff,
677
+ },{ .name = "CTL2", .addr = A_CTL2,
678
+ .rsvd = 0xffffffff,
679
+ },{ .name = "CTL3", .addr = A_CTL3,
680
+ .rsvd = 0xffffffff,
681
+ },{ .name = "CFRM_ISR0", .addr = A_CFRM_ISR0,
682
+ .rsvd = 0xffc04000,
683
+ .w1c = 0x3bfff,
684
+ },{ .name = "CFRM_ISR1", .addr = A_CFRM_ISR1,
685
+ .rsvd = 0xffffffff,
686
+ },{ .name = "CFRM_ISR2", .addr = A_CFRM_ISR2,
687
+ .rsvd = 0xffffffff,
688
+ },{ .name = "CFRM_ISR3", .addr = A_CFRM_ISR3,
689
+ .rsvd = 0xffffffff,
690
+ .post_write = cfrm_isr_postw,
691
+ },{ .name = "CFRM_IMR0", .addr = A_CFRM_IMR0,
692
+ .rsvd = 0xffc04000,
693
+ .ro = 0xfffff,
694
+ .reset = 0x3bfff,
695
+ },{ .name = "CFRM_IMR1", .addr = A_CFRM_IMR1,
696
+ .rsvd = 0xffffffff,
697
+ },{ .name = "CFRM_IMR2", .addr = A_CFRM_IMR2,
698
+ .rsvd = 0xffffffff,
699
+ },{ .name = "CFRM_IMR3", .addr = A_CFRM_IMR3,
700
+ .rsvd = 0xffffffff,
701
+ },{ .name = "CFRM_IER0", .addr = A_CFRM_IER0,
702
+ .rsvd = 0xffc04000,
703
+ },{ .name = "CFRM_IER1", .addr = A_CFRM_IER1,
704
+ .rsvd = 0xffffffff,
705
+ },{ .name = "CFRM_IER2", .addr = A_CFRM_IER2,
706
+ .rsvd = 0xffffffff,
707
+ },{ .name = "CFRM_IER3", .addr = A_CFRM_IER3,
708
+ .rsvd = 0xffffffff,
709
+ .pre_write = cfrm_ier_prew,
710
+ },{ .name = "CFRM_IDR0", .addr = A_CFRM_IDR0,
711
+ .rsvd = 0xffc04000,
712
+ },{ .name = "CFRM_IDR1", .addr = A_CFRM_IDR1,
713
+ .rsvd = 0xffffffff,
714
+ },{ .name = "CFRM_IDR2", .addr = A_CFRM_IDR2,
715
+ .rsvd = 0xffffffff,
716
+ },{ .name = "CFRM_IDR3", .addr = A_CFRM_IDR3,
717
+ .rsvd = 0xffffffff,
718
+ .pre_write = cfrm_idr_prew,
719
+ },{ .name = "CFRM_ITR0", .addr = A_CFRM_ITR0,
720
+ .rsvd = 0xffc04000,
721
+ },{ .name = "CFRM_ITR1", .addr = A_CFRM_ITR1,
722
+ .rsvd = 0xffffffff,
723
+ },{ .name = "CFRM_ITR2", .addr = A_CFRM_ITR2,
724
+ .rsvd = 0xffffffff,
725
+ },{ .name = "CFRM_ITR3", .addr = A_CFRM_ITR3,
726
+ .rsvd = 0xffffffff,
727
+ .pre_write = cfrm_itr_prew,
728
+ },{ .name = "SEU_SYNDRM00", .addr = A_SEU_SYNDRM00,
729
+ },{ .name = "SEU_SYNDRM01", .addr = A_SEU_SYNDRM01,
730
+ },{ .name = "SEU_SYNDRM02", .addr = A_SEU_SYNDRM02,
731
+ },{ .name = "SEU_SYNDRM03", .addr = A_SEU_SYNDRM03,
732
+ },{ .name = "SEU_SYNDRM10", .addr = A_SEU_SYNDRM10,
733
+ },{ .name = "SEU_SYNDRM11", .addr = A_SEU_SYNDRM11,
734
+ },{ .name = "SEU_SYNDRM12", .addr = A_SEU_SYNDRM12,
735
+ },{ .name = "SEU_SYNDRM13", .addr = A_SEU_SYNDRM13,
736
+ },{ .name = "SEU_SYNDRM20", .addr = A_SEU_SYNDRM20,
737
+ },{ .name = "SEU_SYNDRM21", .addr = A_SEU_SYNDRM21,
738
+ },{ .name = "SEU_SYNDRM22", .addr = A_SEU_SYNDRM22,
739
+ },{ .name = "SEU_SYNDRM23", .addr = A_SEU_SYNDRM23,
740
+ },{ .name = "SEU_SYNDRM30", .addr = A_SEU_SYNDRM30,
741
+ },{ .name = "SEU_SYNDRM31", .addr = A_SEU_SYNDRM31,
742
+ },{ .name = "SEU_SYNDRM32", .addr = A_SEU_SYNDRM32,
743
+ },{ .name = "SEU_SYNDRM33", .addr = A_SEU_SYNDRM33,
744
+ },{ .name = "SEU_VIRTUAL_SYNDRM0", .addr = A_SEU_VIRTUAL_SYNDRM0,
745
+ },{ .name = "SEU_VIRTUAL_SYNDRM1", .addr = A_SEU_VIRTUAL_SYNDRM1,
746
+ },{ .name = "SEU_VIRTUAL_SYNDRM2", .addr = A_SEU_VIRTUAL_SYNDRM2,
747
+ },{ .name = "SEU_VIRTUAL_SYNDRM3", .addr = A_SEU_VIRTUAL_SYNDRM3,
748
+ },{ .name = "SEU_CRC0", .addr = A_SEU_CRC0,
749
+ },{ .name = "SEU_CRC1", .addr = A_SEU_CRC1,
750
+ },{ .name = "SEU_CRC2", .addr = A_SEU_CRC2,
751
+ },{ .name = "SEU_CRC3", .addr = A_SEU_CRC3,
752
+ },{ .name = "CFRAME_FAR_BOT0", .addr = A_CFRAME_FAR_BOT0,
753
+ },{ .name = "CFRAME_FAR_BOT1", .addr = A_CFRAME_FAR_BOT1,
754
+ },{ .name = "CFRAME_FAR_BOT2", .addr = A_CFRAME_FAR_BOT2,
755
+ },{ .name = "CFRAME_FAR_BOT3", .addr = A_CFRAME_FAR_BOT3,
756
+ },{ .name = "CFRAME_FAR_TOP0", .addr = A_CFRAME_FAR_TOP0,
757
+ },{ .name = "CFRAME_FAR_TOP1", .addr = A_CFRAME_FAR_TOP1,
758
+ },{ .name = "CFRAME_FAR_TOP2", .addr = A_CFRAME_FAR_TOP2,
759
+ },{ .name = "CFRAME_FAR_TOP3", .addr = A_CFRAME_FAR_TOP3,
760
+ },{ .name = "LAST_FRAME_BOT0", .addr = A_LAST_FRAME_BOT0,
761
+ .ro = 0xffffffff,
762
+ .post_read = cfrm_last_frame_bot_post_read,
763
+ },{ .name = "LAST_FRAME_BOT1", .addr = A_LAST_FRAME_BOT1,
764
+ .ro = 0xffffffff,
765
+ .post_read = cfrm_last_frame_bot_post_read,
766
+ },{ .name = "LAST_FRAME_BOT2", .addr = A_LAST_FRAME_BOT2,
767
+ .ro = 0xffffffff,
768
+ .post_read = cfrm_last_frame_bot_post_read,
769
+ },{ .name = "LAST_FRAME_BOT3", .addr = A_LAST_FRAME_BOT3,
770
+ .ro = 0xffffffff,
771
+ .post_read = cfrm_last_frame_bot_post_read,
772
+ },{ .name = "LAST_FRAME_TOP0", .addr = A_LAST_FRAME_TOP0,
773
+ .ro = 0xffffffff,
774
+ .post_read = cfrm_last_frame_top_post_read,
775
+ },{ .name = "LAST_FRAME_TOP1", .addr = A_LAST_FRAME_TOP1,
776
+ .ro = 0xffffffff,
777
+ .post_read = cfrm_last_frame_top_post_read,
778
+ },{ .name = "LAST_FRAME_TOP2", .addr = A_LAST_FRAME_TOP2,
779
+ .ro = 0xffffffff,
780
+ .post_read = cfrm_last_frame_top_post_read,
781
+ },{ .name = "LAST_FRAME_TOP3", .addr = A_LAST_FRAME_TOP3,
782
+ .ro = 0xffffffff,
783
+ .post_read = cfrm_last_frame_top_post_read,
784
+ }
785
+};
786
+
787
+static void cframe_reg_cfi_transfer_packet(XlnxCfiIf *cfi_if,
788
+ XlnxCfiPacket *pkt)
789
+{
790
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(cfi_if);
791
+ uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
792
+
793
+ if (!s->row_configured) {
176
+ return;
794
+ return;
177
+ }
795
+ }
178
+
796
+
179
+ count = st->regs[R_TIM_LOADVAL];
797
+ switch (pkt->reg_addr) {
180
+ ptimer_set_limit(st->ptimer, count, 1);
798
+ case CFRAME_FAR:
181
+ ptimer_run(st->ptimer, 1);
799
+ s->regs[R_FAR0] = pkt->data[0];
182
+}
800
+ break;
183
+
801
+ case CFRAME_SFR:
184
+static uint64_t
802
+ s->regs[R_FAR_SFR0] = pkt->data[0];
185
+timer_read(void *opaque, hwaddr offset, unsigned int size)
803
+ register_write(&s->regs_info[R_FAR_SFR3], 0,
186
+{
804
+ we, object_get_typename(OBJECT(s)),
187
+ MSSTimerState *t = opaque;
805
+ XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
188
+ hwaddr addr;
806
+ break;
189
+ struct Msf2Timer *st;
807
+ case CFRAME_FDRI:
190
+ uint32_t ret = 0;
808
+ s->regs[R_FDRI0] = pkt->data[0];
191
+ int timer = 0;
809
+ s->regs[R_FDRI1] = pkt->data[1];
192
+ int isr;
810
+ s->regs[R_FDRI2] = pkt->data[2];
193
+ int ier;
811
+ register_write(&s->regs_info[R_FDRI3], pkt->data[3],
194
+
812
+ we, object_get_typename(OBJECT(s)),
195
+ addr = offset >> 2;
813
+ XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
196
+ /*
814
+ break;
197
+ * Two independent timers has same base address.
815
+ case CFRAME_CMD:
198
+ * Based on address passed figure out which timer is being used.
816
+ ARRAY_FIELD_DP32(s->regs, CMD0, CMD, pkt->data[0]);
199
+ */
817
+
200
+ if ((addr >= R_TIM1_MAX) && (addr < NUM_TIMERS * R_TIM1_MAX)) {
818
+ register_write(&s->regs_info[R_CMD3], 0,
201
+ timer = 1;
819
+ we, object_get_typename(OBJECT(s)),
202
+ addr -= R_TIM1_MAX;
820
+ XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
203
+ }
821
+ break;
204
+
205
+ st = &t->timers[timer];
206
+
207
+ switch (addr) {
208
+ case R_TIM_VAL:
209
+ ret = ptimer_get_count(st->ptimer);
210
+ break;
211
+
212
+ case R_TIM_MIS:
213
+ isr = !!(st->regs[R_TIM_RIS] & TIMER_RIS_ACK);
214
+ ier = !!(st->regs[R_TIM_CTRL] & TIMER_CTRL_INTR);
215
+ ret = ier & isr;
216
+ break;
217
+
218
+ default:
822
+ default:
219
+ if (addr < R_TIM1_MAX) {
823
+ break;
220
+ ret = st->regs[addr];
824
+ }
221
+ } else {
825
+}
222
+ qemu_log_mask(LOG_GUEST_ERROR,
826
+
223
+ TYPE_MSS_TIMER": 64-bit mode not supported\n");
827
+static uint64_t cframe_reg_fdri_read(void *opaque, hwaddr addr, unsigned size)
224
+ return ret;
828
+{
225
+ }
829
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
226
+ break;
830
+ HWADDR_PRIx "\n", __func__, addr);
227
+ }
831
+ return 0;
228
+
832
+}
229
+ DB_PRINT("timer=%d 0x%" HWADDR_PRIx "=0x%" PRIx32, timer, offset,
833
+
230
+ ret);
834
+static void cframe_reg_fdri_write(void *opaque, hwaddr addr, uint64_t value,
231
+ return ret;
835
+ unsigned size)
232
+}
836
+{
233
+
837
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(opaque);
234
+static void
838
+ uint32_t wfifo[WFIFO_SZ];
235
+timer_write(void *opaque, hwaddr offset,
839
+
236
+ uint64_t val64, unsigned int size)
840
+ if (update_wfifo(addr, value, s->wfifo, wfifo)) {
237
+{
841
+ uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
238
+ MSSTimerState *t = opaque;
842
+
239
+ hwaddr addr;
843
+ s->regs[R_FDRI0] = wfifo[0];
240
+ struct Msf2Timer *st;
844
+ s->regs[R_FDRI1] = wfifo[1];
241
+ int timer = 0;
845
+ s->regs[R_FDRI2] = wfifo[2];
242
+ uint32_t value = val64;
846
+ register_write(&s->regs_info[R_FDRI3], wfifo[3],
243
+
847
+ we, object_get_typename(OBJECT(s)),
244
+ addr = offset >> 2;
848
+ XLNX_VERSAL_CFRAME_REG_ERR_DEBUG);
245
+ /*
849
+ }
246
+ * Two independent timers has same base address.
850
+}
247
+ * Based on addr passed figure out which timer is being used.
851
+
248
+ */
852
+static void cframe_reg_reset_enter(Object *obj, ResetType type)
249
+ if ((addr >= R_TIM1_MAX) && (addr < NUM_TIMERS * R_TIM1_MAX)) {
853
+{
250
+ timer = 1;
854
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
251
+ addr -= R_TIM1_MAX;
855
+ unsigned int i;
252
+ }
856
+
253
+
857
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
254
+ st = &t->timers[timer];
858
+ register_reset(&s->regs_info[i]);
255
+
859
+ }
256
+ DB_PRINT("addr=0x%" HWADDR_PRIx " val=0x%" PRIx32 " (timer=%d)", offset,
860
+ memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
257
+ value, timer);
861
+ fifo32_reset(&s->new_f_data);
258
+
862
+
259
+ switch (addr) {
863
+ if (g_tree_nnodes(s->cframes)) {
260
+ case R_TIM_CTRL:
864
+ /*
261
+ st->regs[R_TIM_CTRL] = value;
865
+ * Take a reference so when g_tree_destroy() unrefs it we keep the
262
+ timer_update(st);
866
+ * GTree and only destroy its contents. NB: when our minimum
263
+ break;
867
+ * glib version is at least 2.70 we could use g_tree_remove_all().
264
+
868
+ */
265
+ case R_TIM_RIS:
869
+ g_tree_ref(s->cframes);
266
+ if (value & TIMER_RIS_ACK) {
870
+ g_tree_destroy(s->cframes);
267
+ st->regs[R_TIM_RIS] &= ~TIMER_RIS_ACK;
871
+ }
268
+ }
872
+}
269
+ break;
873
+
270
+
874
+static void cframe_reg_reset_hold(Object *obj)
271
+ case R_TIM_LOADVAL:
875
+{
272
+ st->regs[R_TIM_LOADVAL] = value;
876
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
273
+ if (st->regs[R_TIM_CTRL] & TIMER_CTRL_ENBL) {
877
+
274
+ timer_update(st);
878
+ cfrm_imr_update_irq(s);
275
+ }
879
+}
276
+ break;
880
+
277
+
881
+static const MemoryRegionOps cframe_reg_ops = {
278
+ case R_TIM_BGLOADVAL:
882
+ .read = register_read_memory,
279
+ st->regs[R_TIM_BGLOADVAL] = value;
883
+ .write = register_write_memory,
280
+ st->regs[R_TIM_LOADVAL] = value;
884
+ .endianness = DEVICE_LITTLE_ENDIAN,
281
+ break;
885
+ .valid = {
282
+
886
+ .min_access_size = 4,
283
+ case R_TIM_VAL:
887
+ .max_access_size = 4,
284
+ case R_TIM_MIS:
888
+ },
285
+ break;
889
+};
286
+
890
+
287
+ default:
891
+static const MemoryRegionOps cframe_reg_fdri_ops = {
288
+ if (addr < R_TIM1_MAX) {
892
+ .read = cframe_reg_fdri_read,
289
+ st->regs[addr] = value;
893
+ .write = cframe_reg_fdri_write,
290
+ } else {
894
+ .endianness = DEVICE_LITTLE_ENDIAN,
291
+ qemu_log_mask(LOG_GUEST_ERROR,
895
+ .valid = {
292
+ TYPE_MSS_TIMER": 64-bit mode not supported\n");
896
+ .min_access_size = 4,
897
+ .max_access_size = 4,
898
+ },
899
+};
900
+
901
+static void cframe_reg_realize(DeviceState *dev, Error **errp)
902
+{
903
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(dev);
904
+
905
+ for (int i = 0; i < ARRAY_SIZE(s->cfg.blktype_num_frames); i++) {
906
+ if (s->cfg.blktype_num_frames[i] > MAX_BLOCKTYPE_FRAMES) {
907
+ error_setg(errp,
908
+ "blktype-frames%d > 0xFFFFF (max frame per block)",
909
+ i);
293
+ return;
910
+ return;
294
+ }
911
+ }
295
+ break;
912
+ if (s->cfg.blktype_num_frames[i]) {
296
+ }
913
+ s->row_configured = true;
297
+ timer_update_irq(st);
914
+ }
298
+}
915
+ }
299
+
916
+}
300
+static const MemoryRegionOps timer_ops = {
917
+
301
+ .read = timer_read,
918
+static void cframe_reg_init(Object *obj)
302
+ .write = timer_write,
919
+{
303
+ .endianness = DEVICE_NATIVE_ENDIAN,
920
+ XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj);
304
+ .valid = {
921
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
305
+ .min_access_size = 1,
922
+ RegisterInfoArray *reg_array;
306
+ .max_access_size = 4
923
+
307
+ }
924
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_VERSAL_CFRAME_REG,
308
+};
925
+ CFRAME_REG_R_MAX * 4);
309
+
926
+ reg_array =
310
+static void timer_hit(void *opaque)
927
+ register_init_block32(DEVICE(obj), cframe_reg_regs_info,
311
+{
928
+ ARRAY_SIZE(cframe_reg_regs_info),
312
+ struct Msf2Timer *st = opaque;
929
+ s->regs_info, s->regs,
313
+
930
+ &cframe_reg_ops,
314
+ st->regs[R_TIM_RIS] |= TIMER_RIS_ACK;
931
+ XLNX_VERSAL_CFRAME_REG_ERR_DEBUG,
315
+
932
+ CFRAME_REG_R_MAX * 4);
316
+ if (!(st->regs[R_TIM_CTRL] & TIMER_CTRL_ONESHOT)) {
933
+ memory_region_add_subregion(&s->iomem,
317
+ timer_update(st);
934
+ 0x0,
318
+ }
935
+ &reg_array->mem);
319
+ timer_update_irq(st);
936
+ sysbus_init_mmio(sbd, &s->iomem);
320
+}
937
+ memory_region_init_io(&s->iomem_fdri, obj, &cframe_reg_fdri_ops, s,
321
+
938
+ TYPE_XLNX_VERSAL_CFRAME_REG "-fdri",
322
+static void mss_timer_init(Object *obj)
939
+ KEYHOLE_STREAM_4K);
323
+{
940
+ sysbus_init_mmio(sbd, &s->iomem_fdri);
324
+ MSSTimerState *t = MSS_TIMER(obj);
941
+ sysbus_init_irq(sbd, &s->irq_cfrm_imr);
325
+ int i;
942
+
326
+
943
+ s->cframes = g_tree_new_full((GCompareDataFunc)int_cmp, NULL,
327
+ /* Init all the ptimers. */
944
+ NULL, (GDestroyNotify)g_free);
328
+ for (i = 0; i < NUM_TIMERS; i++) {
945
+ fifo32_create(&s->new_f_data, FRAME_NUM_WORDS);
329
+ struct Msf2Timer *st = &t->timers[i];
946
+}
330
+
947
+
331
+ st->bh = qemu_bh_new(timer_hit, st);
948
+static const VMStateDescription vmstate_cframe = {
332
+ st->ptimer = ptimer_init(st->bh, PTIMER_POLICY_DEFAULT);
949
+ .name = "cframe",
333
+ ptimer_set_freq(st->ptimer, t->freq_hz);
334
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &st->irq);
335
+ }
336
+
337
+ memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, TYPE_MSS_TIMER,
338
+ NUM_TIMERS * R_TIM1_MAX * 4);
339
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &t->mmio);
340
+}
341
+
342
+static const VMStateDescription vmstate_timers = {
343
+ .name = "mss-timer-block",
344
+ .version_id = 1,
950
+ .version_id = 1,
345
+ .minimum_version_id = 1,
951
+ .minimum_version_id = 1,
346
+ .fields = (VMStateField[]) {
952
+ .fields = (VMStateField[]) {
347
+ VMSTATE_PTIMER(ptimer, struct Msf2Timer),
953
+ VMSTATE_UINT32_ARRAY(data, XlnxCFrame, FRAME_NUM_WORDS),
348
+ VMSTATE_UINT32_ARRAY(regs, struct Msf2Timer, R_TIM1_MAX),
349
+ VMSTATE_END_OF_LIST()
954
+ VMSTATE_END_OF_LIST()
350
+ }
955
+ }
351
+};
956
+};
352
+
957
+
353
+static const VMStateDescription vmstate_mss_timer = {
958
+static const VMStateDescription vmstate_cframe_reg = {
354
+ .name = TYPE_MSS_TIMER,
959
+ .name = TYPE_XLNX_VERSAL_CFRAME_REG,
355
+ .version_id = 1,
960
+ .version_id = 1,
356
+ .minimum_version_id = 1,
961
+ .minimum_version_id = 1,
357
+ .fields = (VMStateField[]) {
962
+ .fields = (VMStateField[]) {
358
+ VMSTATE_UINT32(freq_hz, MSSTimerState),
963
+ VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameReg, 4),
359
+ VMSTATE_STRUCT_ARRAY(timers, MSSTimerState, NUM_TIMERS, 0,
964
+ VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFrameReg, CFRAME_REG_R_MAX),
360
+ vmstate_timers, struct Msf2Timer),
965
+ VMSTATE_BOOL(rowon, XlnxVersalCFrameReg),
361
+ VMSTATE_END_OF_LIST()
966
+ VMSTATE_BOOL(wcfg, XlnxVersalCFrameReg),
967
+ VMSTATE_BOOL(rcfg, XlnxVersalCFrameReg),
968
+ VMSTATE_GTREE_DIRECT_KEY_V(cframes, XlnxVersalCFrameReg, 1,
969
+ &vmstate_cframe, XlnxCFrame),
970
+ VMSTATE_FIFO32(new_f_data, XlnxVersalCFrameReg),
971
+ VMSTATE_END_OF_LIST(),
362
+ }
972
+ }
363
+};
973
+};
364
+
974
+
365
+static Property mss_timer_properties[] = {
975
+static Property cframe_regs_props[] = {
366
+ /* Libero GUI shows 100Mhz as default for clocks */
976
+ DEFINE_PROP_LINK("cfu-fdro", XlnxVersalCFrameReg, cfg.cfu_fdro,
367
+ DEFINE_PROP_UINT32("clock-frequency", MSSTimerState, freq_hz,
977
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
368
+ 100 * 1000000),
978
+ DEFINE_PROP_UINT32("blktype0-frames", XlnxVersalCFrameReg,
979
+ cfg.blktype_num_frames[0], 0),
980
+ DEFINE_PROP_UINT32("blktype1-frames", XlnxVersalCFrameReg,
981
+ cfg.blktype_num_frames[1], 0),
982
+ DEFINE_PROP_UINT32("blktype2-frames", XlnxVersalCFrameReg,
983
+ cfg.blktype_num_frames[2], 0),
984
+ DEFINE_PROP_UINT32("blktype3-frames", XlnxVersalCFrameReg,
985
+ cfg.blktype_num_frames[3], 0),
986
+ DEFINE_PROP_UINT32("blktype4-frames", XlnxVersalCFrameReg,
987
+ cfg.blktype_num_frames[4], 0),
988
+ DEFINE_PROP_UINT32("blktype5-frames", XlnxVersalCFrameReg,
989
+ cfg.blktype_num_frames[5], 0),
990
+ DEFINE_PROP_UINT32("blktype6-frames", XlnxVersalCFrameReg,
991
+ cfg.blktype_num_frames[6], 0),
369
+ DEFINE_PROP_END_OF_LIST(),
992
+ DEFINE_PROP_END_OF_LIST(),
370
+};
993
+};
371
+
994
+
372
+static void mss_timer_class_init(ObjectClass *klass, void *data)
995
+static void cframe_reg_class_init(ObjectClass *klass, void *data)
373
+{
996
+{
997
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
374
+ DeviceClass *dc = DEVICE_CLASS(klass);
998
+ DeviceClass *dc = DEVICE_CLASS(klass);
375
+
999
+ XlnxCfiIfClass *xcic = XLNX_CFI_IF_CLASS(klass);
376
+ dc->props = mss_timer_properties;
1000
+
377
+ dc->vmsd = &vmstate_mss_timer;
1001
+ dc->vmsd = &vmstate_cframe_reg;
378
+}
1002
+ dc->realize = cframe_reg_realize;
379
+
1003
+ rc->phases.enter = cframe_reg_reset_enter;
380
+static const TypeInfo mss_timer_info = {
1004
+ rc->phases.hold = cframe_reg_reset_hold;
381
+ .name = TYPE_MSS_TIMER,
1005
+ device_class_set_props(dc, cframe_regs_props);
1006
+ xcic->cfi_transfer_packet = cframe_reg_cfi_transfer_packet;
1007
+}
1008
+
1009
+static const TypeInfo cframe_reg_info = {
1010
+ .name = TYPE_XLNX_VERSAL_CFRAME_REG,
382
+ .parent = TYPE_SYS_BUS_DEVICE,
1011
+ .parent = TYPE_SYS_BUS_DEVICE,
383
+ .instance_size = sizeof(MSSTimerState),
1012
+ .instance_size = sizeof(XlnxVersalCFrameReg),
384
+ .instance_init = mss_timer_init,
1013
+ .class_init = cframe_reg_class_init,
385
+ .class_init = mss_timer_class_init,
1014
+ .instance_init = cframe_reg_init,
1015
+ .interfaces = (InterfaceInfo[]) {
1016
+ { TYPE_XLNX_CFI_IF },
1017
+ { }
1018
+ }
386
+};
1019
+};
387
+
1020
+
388
+static void mss_timer_register_types(void)
1021
+static void cframe_reg_register_types(void)
389
+{
1022
+{
390
+ type_register_static(&mss_timer_info);
1023
+ type_register_static(&cframe_reg_info);
391
+}
1024
+}
392
+
1025
+
393
+type_init(mss_timer_register_types)
1026
+type_init(cframe_reg_register_types)
1027
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
1028
index XXXXXXX..XXXXXXX 100644
1029
--- a/hw/misc/meson.build
1030
+++ b/hw/misc/meson.build
1031
@@ -XXX,XX +XXX,XX @@ system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
1032
'xlnx-versal-pmc-iou-slcr.c',
1033
'xlnx-versal-cfu.c',
1034
'xlnx-cfi-if.c',
1035
+ 'xlnx-versal-cframe-reg.c',
1036
))
1037
system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
1038
system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c'))
394
--
1039
--
395
2.7.4
1040
2.34.1
396
397
diff view generated by jsdifflib
1
For the v8M security extension, some exceptions must be banked
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
between security states. Add the new vecinfo array which holds
3
the state for the banked exceptions and migrate it if the
4
CPU the NVIC is attached to implements the security extension.
5
2
3
Introduce a model of Xilinx Versal's Configuration Frame broadcast
4
controller (CFRAME_BCAST_REG).
5
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 20230831165701.2016397-7-francisco.iglesias@amd.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
include/hw/intc/armv7m_nvic.h | 14 ++++++++++++
11
include/hw/misc/xlnx-versal-cframe-reg.h | 17 +++
10
hw/intc/armv7m_nvic.c | 53 ++++++++++++++++++++++++++++++++++++++++++-
12
hw/misc/xlnx-versal-cframe-reg.c | 161 +++++++++++++++++++++++
11
2 files changed, 66 insertions(+), 1 deletion(-)
13
2 files changed, 178 insertions(+)
12
14
13
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
15
diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/hw/intc/armv7m_nvic.h
17
--- a/include/hw/misc/xlnx-versal-cframe-reg.h
16
+++ b/include/hw/intc/armv7m_nvic.h
18
+++ b/include/hw/misc/xlnx-versal-cframe-reg.h
17
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@
18
20
#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx,cframe-reg"
19
/* Highest permitted number of exceptions (architectural limit) */
21
OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG)
20
#define NVIC_MAX_VECTORS 512
22
21
+/* Number of internal exceptions */
23
+#define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg"
22
+#define NVIC_INTERNAL_VECTORS 16
24
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameBcastReg,
23
25
+ XLNX_VERSAL_CFRAME_BCAST_REG)
24
typedef struct VecInfo {
26
+
25
/* Exception priorities can range from -3 to 255; only the unmodifiable
27
/*
26
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
28
* The registers in this module are 128 bits wide but it is ok to write
27
ARMCPU *cpu;
29
* and read them through 4 sequential 32 bit accesses (address[3:2] = 0,
28
30
@@ -XXX,XX +XXX,XX @@ struct XlnxVersalCFrameReg {
29
VecInfo vectors[NVIC_MAX_VECTORS];
31
bool row_configured;
30
+ /* If the v8M security extension is implemented, some of the internal
32
};
31
+ * exceptions are banked between security states (ie there exists both
33
32
+ * a Secure and a NonSecure version of the exception and its state):
34
+struct XlnxVersalCFrameBcastReg {
33
+ * HardFault, MemManage, UsageFault, SVCall, PendSV, SysTick (R_PJHV)
35
+ SysBusDevice parent_obj;
34
+ * The rest (including all the external exceptions) are not banked, though
36
+ MemoryRegion iomem_reg;
35
+ * they may be configurable to target either Secure or NonSecure state.
37
+ MemoryRegion iomem_fdri;
36
+ * We store the secure exception state in sec_vectors[] for the banked
38
+
37
+ * exceptions, and otherwise use only vectors[] (including for exceptions
39
+ /* 128-bit wfifo. */
38
+ * like SecureFault that unconditionally target Secure state).
40
+ uint32_t wfifo[WFIFO_SZ];
39
+ * Entries in sec_vectors[] for non-banked exception numbers are unused.
41
+
40
+ */
42
+ struct {
41
+ VecInfo sec_vectors[NVIC_INTERNAL_VECTORS];
43
+ XlnxCfiIf *cframe[15];
42
uint32_t prigroup;
44
+ } cfg;
43
45
+};
44
/* vectpending and exception_prio are both cached state that can
46
+
45
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
47
#endif
48
diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c
46
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
47
--- a/hw/intc/armv7m_nvic.c
50
--- a/hw/misc/xlnx-versal-cframe-reg.c
48
+++ b/hw/intc/armv7m_nvic.c
51
+++ b/hw/misc/xlnx-versal-cframe-reg.c
49
@@ -XXX,XX +XXX,XX @@
52
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps cframe_reg_fdri_ops = {
50
* For historical reasons QEMU tends to use "interrupt" and
53
},
51
* "exception" more or less interchangeably.
54
};
52
*/
55
53
-#define NVIC_FIRST_IRQ 16
56
+static uint64_t cframes_bcast_reg_read(void *opaque, hwaddr addr, unsigned size)
54
+#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
57
+{
55
#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
58
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
56
59
+ HWADDR_PRIx "\n", __func__, addr);
57
/* Effective running priority of the CPU when no exception is active
60
+ return 0;
58
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_VecInfo = {
61
+}
59
}
62
+
60
};
63
+static void cframes_bcast_write(XlnxVersalCFrameBcastReg *s, uint8_t reg_addr,
61
64
+ uint32_t *wfifo)
62
+static bool nvic_security_needed(void *opaque)
65
+{
63
+{
66
+ XlnxCfiPacket pkt = {
64
+ NVICState *s = opaque;
67
+ .reg_addr = reg_addr,
65
+
68
+ .data[0] = wfifo[0],
66
+ return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
69
+ .data[1] = wfifo[1],
67
+}
70
+ .data[2] = wfifo[2],
68
+
71
+ .data[3] = wfifo[3]
69
+static int nvic_security_post_load(void *opaque, int version_id)
72
+ };
70
+{
73
+
71
+ NVICState *s = opaque;
74
+ for (int i = 0; i < ARRAY_SIZE(s->cfg.cframe); i++) {
72
+ int i;
75
+ if (s->cfg.cframe[i]) {
73
+
76
+ xlnx_cfi_transfer_packet(s->cfg.cframe[i], &pkt);
74
+ /* Check for out of range priority settings */
75
+ if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1) {
76
+ return 1;
77
+ }
78
+ for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
79
+ if (s->sec_vectors[i].prio & ~0xff) {
80
+ return 1;
81
+ }
77
+ }
82
+ }
78
+ }
79
+}
80
+
81
+static void cframes_bcast_reg_write(void *opaque, hwaddr addr, uint64_t value,
82
+ unsigned size)
83
+{
84
+ XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(opaque);
85
+ uint32_t wfifo[WFIFO_SZ];
86
+
87
+ if (update_wfifo(addr, value, s->wfifo, wfifo)) {
88
+ uint8_t reg_addr = extract32(addr, 4, 6);
89
+
90
+ cframes_bcast_write(s, reg_addr, wfifo);
91
+ }
92
+}
93
+
94
+static uint64_t cframes_bcast_fdri_read(void *opaque, hwaddr addr,
95
+ unsigned size)
96
+{
97
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unsupported read from addr=%"
98
+ HWADDR_PRIx "\n", __func__, addr);
83
+ return 0;
99
+ return 0;
84
+}
100
+}
85
+
101
+
86
+static const VMStateDescription vmstate_nvic_security = {
102
+static void cframes_bcast_fdri_write(void *opaque, hwaddr addr, uint64_t value,
87
+ .name = "nvic/m-security",
103
+ unsigned size)
104
+{
105
+ XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(opaque);
106
+ uint32_t wfifo[WFIFO_SZ];
107
+
108
+ if (update_wfifo(addr, value, s->wfifo, wfifo)) {
109
+ cframes_bcast_write(s, CFRAME_FDRI, wfifo);
110
+ }
111
+}
112
+
113
+static const MemoryRegionOps cframes_bcast_reg_reg_ops = {
114
+ .read = cframes_bcast_reg_read,
115
+ .write = cframes_bcast_reg_write,
116
+ .endianness = DEVICE_LITTLE_ENDIAN,
117
+ .valid = {
118
+ .min_access_size = 4,
119
+ .max_access_size = 4,
120
+ },
121
+};
122
+
123
+static const MemoryRegionOps cframes_bcast_reg_fdri_ops = {
124
+ .read = cframes_bcast_fdri_read,
125
+ .write = cframes_bcast_fdri_write,
126
+ .endianness = DEVICE_LITTLE_ENDIAN,
127
+ .valid = {
128
+ .min_access_size = 4,
129
+ .max_access_size = 4,
130
+ },
131
+};
132
+
133
static void cframe_reg_realize(DeviceState *dev, Error **errp)
134
{
135
XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(dev);
136
@@ -XXX,XX +XXX,XX @@ static Property cframe_regs_props[] = {
137
DEFINE_PROP_END_OF_LIST(),
138
};
139
140
+static void cframe_bcast_reg_init(Object *obj)
141
+{
142
+ XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(obj);
143
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
144
+
145
+ memory_region_init_io(&s->iomem_reg, obj, &cframes_bcast_reg_reg_ops, s,
146
+ TYPE_XLNX_VERSAL_CFRAME_BCAST_REG, KEYHOLE_STREAM_4K);
147
+ memory_region_init_io(&s->iomem_fdri, obj, &cframes_bcast_reg_fdri_ops, s,
148
+ TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "-fdri",
149
+ KEYHOLE_STREAM_4K);
150
+ sysbus_init_mmio(sbd, &s->iomem_reg);
151
+ sysbus_init_mmio(sbd, &s->iomem_fdri);
152
+}
153
+
154
+static void cframe_bcast_reg_reset_enter(Object *obj, ResetType type)
155
+{
156
+ XlnxVersalCFrameBcastReg *s = XLNX_VERSAL_CFRAME_BCAST_REG(obj);
157
+
158
+ memset(s->wfifo, 0, WFIFO_SZ * sizeof(uint32_t));
159
+}
160
+
161
+static const VMStateDescription vmstate_cframe_bcast_reg = {
162
+ .name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG,
88
+ .version_id = 1,
163
+ .version_id = 1,
89
+ .minimum_version_id = 1,
164
+ .minimum_version_id = 1,
90
+ .needed = nvic_security_needed,
91
+ .post_load = &nvic_security_post_load,
92
+ .fields = (VMStateField[]) {
165
+ .fields = (VMStateField[]) {
93
+ VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
166
+ VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameBcastReg, 4),
94
+ vmstate_VecInfo, VecInfo),
167
+ VMSTATE_END_OF_LIST(),
95
+ VMSTATE_END_OF_LIST()
168
+ }
96
+ }
169
+};
97
+};
170
+
98
+
171
+static Property cframe_bcast_regs_props[] = {
99
static const VMStateDescription vmstate_nvic = {
172
+ DEFINE_PROP_LINK("cframe0", XlnxVersalCFrameBcastReg, cfg.cframe[0],
100
.name = "armv7m_nvic",
173
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
101
.version_id = 4,
174
+ DEFINE_PROP_LINK("cframe1", XlnxVersalCFrameBcastReg, cfg.cframe[1],
102
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_nvic = {
175
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
103
vmstate_VecInfo, VecInfo),
176
+ DEFINE_PROP_LINK("cframe2", XlnxVersalCFrameBcastReg, cfg.cframe[2],
104
VMSTATE_UINT32(prigroup, NVICState),
177
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
105
VMSTATE_END_OF_LIST()
178
+ DEFINE_PROP_LINK("cframe3", XlnxVersalCFrameBcastReg, cfg.cframe[3],
106
+ },
179
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
107
+ .subsections = (const VMStateDescription*[]) {
180
+ DEFINE_PROP_LINK("cframe4", XlnxVersalCFrameBcastReg, cfg.cframe[4],
108
+ &vmstate_nvic_security,
181
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
109
+ NULL
182
+ DEFINE_PROP_LINK("cframe5", XlnxVersalCFrameBcastReg, cfg.cframe[5],
183
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
184
+ DEFINE_PROP_LINK("cframe6", XlnxVersalCFrameBcastReg, cfg.cframe[6],
185
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
186
+ DEFINE_PROP_LINK("cframe7", XlnxVersalCFrameBcastReg, cfg.cframe[7],
187
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
188
+ DEFINE_PROP_LINK("cframe8", XlnxVersalCFrameBcastReg, cfg.cframe[8],
189
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
190
+ DEFINE_PROP_LINK("cframe9", XlnxVersalCFrameBcastReg, cfg.cframe[9],
191
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
192
+ DEFINE_PROP_LINK("cframe10", XlnxVersalCFrameBcastReg, cfg.cframe[10],
193
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
194
+ DEFINE_PROP_LINK("cframe11", XlnxVersalCFrameBcastReg, cfg.cframe[11],
195
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
196
+ DEFINE_PROP_LINK("cframe12", XlnxVersalCFrameBcastReg, cfg.cframe[12],
197
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
198
+ DEFINE_PROP_LINK("cframe13", XlnxVersalCFrameBcastReg, cfg.cframe[13],
199
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
200
+ DEFINE_PROP_LINK("cframe14", XlnxVersalCFrameBcastReg, cfg.cframe[14],
201
+ TYPE_XLNX_CFI_IF, XlnxCfiIf *),
202
+ DEFINE_PROP_END_OF_LIST(),
203
+};
204
+
205
static void cframe_reg_class_init(ObjectClass *klass, void *data)
206
{
207
ResettableClass *rc = RESETTABLE_CLASS(klass);
208
@@ -XXX,XX +XXX,XX @@ static void cframe_reg_class_init(ObjectClass *klass, void *data)
209
xcic->cfi_transfer_packet = cframe_reg_cfi_transfer_packet;
210
}
211
212
+static void cframe_bcast_reg_class_init(ObjectClass *klass, void *data)
213
+{
214
+ DeviceClass *dc = DEVICE_CLASS(klass);
215
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
216
+
217
+ dc->vmsd = &vmstate_cframe_bcast_reg;
218
+ device_class_set_props(dc, cframe_bcast_regs_props);
219
+ rc->phases.enter = cframe_bcast_reg_reset_enter;
220
+}
221
+
222
static const TypeInfo cframe_reg_info = {
223
.name = TYPE_XLNX_VERSAL_CFRAME_REG,
224
.parent = TYPE_SYS_BUS_DEVICE,
225
@@ -XXX,XX +XXX,XX @@ static const TypeInfo cframe_reg_info = {
110
}
226
}
111
};
227
};
112
228
113
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
229
+static const TypeInfo cframe_bcast_reg_info = {
114
s->vectors[ARMV7M_EXCP_NMI].prio = -2;
230
+ .name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG,
115
s->vectors[ARMV7M_EXCP_HARD].prio = -1;
231
+ .parent = TYPE_SYS_BUS_DEVICE,
116
232
+ .instance_size = sizeof(XlnxVersalCFrameBcastReg),
117
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
233
+ .class_init = cframe_bcast_reg_class_init,
118
+ s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
234
+ .instance_init = cframe_bcast_reg_init,
119
+ s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
235
+};
120
+ s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
236
+
121
+ s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
237
static void cframe_reg_register_types(void)
122
+
238
{
123
+ /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
239
type_register_static(&cframe_reg_info);
124
+ s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
240
+ type_register_static(&cframe_bcast_reg_info);
125
+ }
241
}
126
+
242
127
/* Strictly speaking the reset handler should be enabled.
243
type_init(cframe_reg_register_types)
128
* However, we don't simulate soft resets through the NVIC,
129
* and the reset vector should never be pended.
130
--
244
--
131
2.7.4
245
2.34.1
132
133
diff view generated by jsdifflib
1
Don't use old_mmio in the memory region ops struct.
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Connect the Configuration Frame Unit (CFU_APB, CFU_FDRO and CFU_SFR) to
4
the Versal machine.
5
6
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
7
Acked-by: Edgar E. Iglesias <edgar@zeroasic.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20230831165701.2016397-8-francisco.iglesias@amd.com
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 1505580378-9044-6-git-send-email-peter.maydell@linaro.org
6
---
11
---
7
hw/i2c/omap_i2c.c | 44 ++++++++++++++++++++++++++++++++------------
12
include/hw/arm/xlnx-versal.h | 16 ++++++++++++++
8
1 file changed, 32 insertions(+), 12 deletions(-)
13
hw/arm/xlnx-versal.c | 42 ++++++++++++++++++++++++++++++++++++
14
2 files changed, 58 insertions(+)
9
15
10
diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
11
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
12
--- a/hw/i2c/omap_i2c.c
18
--- a/include/hw/arm/xlnx-versal.h
13
+++ b/hw/i2c/omap_i2c.c
19
+++ b/include/hw/arm/xlnx-versal.h
14
@@ -XXX,XX +XXX,XX @@ static void omap_i2c_writeb(void *opaque, hwaddr addr,
20
@@ -XXX,XX +XXX,XX @@
15
}
21
#include "hw/misc/xlnx-versal-crl.h"
22
#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
23
#include "hw/net/xlnx-versal-canfd.h"
24
+#include "hw/misc/xlnx-versal-cfu.h"
25
26
#define TYPE_XLNX_VERSAL "xlnx-versal"
27
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
28
@@ -XXX,XX +XXX,XX @@ struct Versal {
29
XlnxEFuse efuse;
30
XlnxVersalEFuseCtrl efuse_ctrl;
31
XlnxVersalEFuseCache efuse_cache;
32
+ XlnxVersalCFUAPB cfu_apb;
33
+ XlnxVersalCFUFDRO cfu_fdro;
34
+ XlnxVersalCFUSFR cfu_sfr;
35
36
OrIRQState apb_irq_orgate;
37
} pmc;
38
@@ -XXX,XX +XXX,XX @@ struct Versal {
39
#define VERSAL_GEM1_WAKE_IRQ_0 59
40
#define VERSAL_ADMA_IRQ_0 60
41
#define VERSAL_XRAM_IRQ_0 79
42
+#define VERSAL_CFU_IRQ_0 120
43
#define VERSAL_PMC_APB_IRQ 121
44
#define VERSAL_OSPI_IRQ 124
45
#define VERSAL_SD0_IRQ_0 126
46
@@ -XXX,XX +XXX,XX @@ struct Versal {
47
#define MM_PMC_EFUSE_CACHE 0xf1250000
48
#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00
49
50
+#define MM_PMC_CFU_APB 0xf12b0000
51
+#define MM_PMC_CFU_APB_SIZE 0x10000
52
+#define MM_PMC_CFU_STREAM 0xf12c0000
53
+#define MM_PMC_CFU_STREAM_SIZE 0x1000
54
+#define MM_PMC_CFU_SFR 0xf12c1000
55
+#define MM_PMC_CFU_SFR_SIZE 0x1000
56
+#define MM_PMC_CFU_FDRO 0xf12c2000
57
+#define MM_PMC_CFU_FDRO_SIZE 0x1000
58
+#define MM_PMC_CFU_STREAM_2 0xf1f80000
59
+#define MM_PMC_CFU_STREAM_2_SIZE 0x40000
60
+
61
#define MM_PMC_CRP 0xf1260000U
62
#define MM_PMC_CRP_SIZE 0x10000
63
#define MM_PMC_RTC 0xf12a0000
64
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/arm/xlnx-versal.c
67
+++ b/hw/arm/xlnx-versal.c
68
@@ -XXX,XX +XXX,XX @@ static void versal_create_ospi(Versal *s, qemu_irq *pic)
69
qdev_connect_gpio_out(orgate, 0, pic[VERSAL_OSPI_IRQ]);
16
}
70
}
17
71
18
+static uint64_t omap_i2c_readfn(void *opaque, hwaddr addr,
72
+static void versal_create_cfu(Versal *s, qemu_irq *pic)
19
+ unsigned size)
20
+{
73
+{
21
+ switch (size) {
74
+ SysBusDevice *sbd;
22
+ case 2:
75
+
23
+ return omap_i2c_read(opaque, addr);
76
+ /* CFU FDRO */
24
+ default:
77
+ object_initialize_child(OBJECT(s), "cfu-fdro", &s->pmc.cfu_fdro,
25
+ return omap_badwidth_read16(opaque, addr);
78
+ TYPE_XLNX_VERSAL_CFU_FDRO);
26
+ }
79
+ sbd = SYS_BUS_DEVICE(&s->pmc.cfu_fdro);
80
+
81
+ sysbus_realize(sbd, &error_fatal);
82
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_FDRO,
83
+ sysbus_mmio_get_region(sbd, 0));
84
+
85
+ /* CFU APB */
86
+ object_initialize_child(OBJECT(s), "cfu-apb", &s->pmc.cfu_apb,
87
+ TYPE_XLNX_VERSAL_CFU_APB);
88
+ sbd = SYS_BUS_DEVICE(&s->pmc.cfu_apb);
89
+
90
+ sysbus_realize(sbd, &error_fatal);
91
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_APB,
92
+ sysbus_mmio_get_region(sbd, 0));
93
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM,
94
+ sysbus_mmio_get_region(sbd, 1));
95
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM_2,
96
+ sysbus_mmio_get_region(sbd, 2));
97
+ sysbus_connect_irq(sbd, 0, pic[VERSAL_CFU_IRQ_0]);
98
+
99
+ /* CFU SFR */
100
+ object_initialize_child(OBJECT(s), "cfu-sfr", &s->pmc.cfu_sfr,
101
+ TYPE_XLNX_VERSAL_CFU_SFR);
102
+
103
+ sbd = SYS_BUS_DEVICE(&s->pmc.cfu_sfr);
104
+
105
+ object_property_set_link(OBJECT(&s->pmc.cfu_sfr),
106
+ "cfu", OBJECT(&s->pmc.cfu_apb), &error_abort);
107
+
108
+ sysbus_realize(sbd, &error_fatal);
109
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_SFR,
110
+ sysbus_mmio_get_region(sbd, 0));
27
+}
111
+}
28
+
112
+
29
+static void omap_i2c_writefn(void *opaque, hwaddr addr,
113
static void versal_create_crl(Versal *s, qemu_irq *pic)
30
+ uint64_t value, unsigned size)
114
{
31
+{
115
SysBusDevice *sbd;
32
+ switch (size) {
116
@@ -XXX,XX +XXX,XX @@ static void versal_realize(DeviceState *dev, Error **errp)
33
+ case 1:
117
versal_create_pmc_iou_slcr(s, pic);
34
+ /* Only the last fifo write can be 8 bit. */
118
versal_create_ospi(s, pic);
35
+ omap_i2c_writeb(opaque, addr, value);
119
versal_create_crl(s, pic);
36
+ break;
120
+ versal_create_cfu(s, pic);
37
+ case 2:
121
versal_map_ddr(s);
38
+ omap_i2c_write(opaque, addr, value);
122
versal_unimp(s);
39
+ break;
40
+ default:
41
+ omap_badwidth_write16(opaque, addr, value);
42
+ break;
43
+ }
44
+}
45
+
46
static const MemoryRegionOps omap_i2c_ops = {
47
- .old_mmio = {
48
- .read = {
49
- omap_badwidth_read16,
50
- omap_i2c_read,
51
- omap_badwidth_read16,
52
- },
53
- .write = {
54
- omap_i2c_writeb, /* Only the last fifo write can be 8 bit. */
55
- omap_i2c_write,
56
- omap_badwidth_write16,
57
- },
58
- },
59
+ .read = omap_i2c_readfn,
60
+ .write = omap_i2c_writefn,
61
+ .valid.min_access_size = 1,
62
+ .valid.max_access_size = 4,
63
.endianness = DEVICE_NATIVE_ENDIAN,
64
};
65
123
66
--
124
--
67
2.7.4
125
2.34.1
68
69
diff view generated by jsdifflib
1
Drop the use of old_mmio in the omap2_gpio memory ops.
1
From: Francisco Iglesias <francisco.iglesias@amd.com>
2
2
3
Connect the Configuration Frame controller (CFRAME_REG) and the
4
Configuration Frame broadcast controller (CFRAME_BCAST_REG) to the
5
Versal machine.
6
7
Signed-off-by: Francisco Iglesias <francisco.iglesias@amd.com>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20230831165701.2016397-9-francisco.iglesias@amd.com
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 1505580378-9044-3-git-send-email-peter.maydell@linaro.org
6
---
11
---
7
hw/gpio/omap_gpio.c | 26 ++++++++++++--------------
12
include/hw/arm/xlnx-versal.h | 69 +++++++++++++++++++++
8
1 file changed, 12 insertions(+), 14 deletions(-)
13
hw/arm/xlnx-versal.c | 113 ++++++++++++++++++++++++++++++++++-
14
2 files changed, 181 insertions(+), 1 deletion(-)
9
15
10
diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c
16
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
11
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
12
--- a/hw/gpio/omap_gpio.c
18
--- a/include/hw/arm/xlnx-versal.h
13
+++ b/hw/gpio/omap_gpio.c
19
+++ b/include/hw/arm/xlnx-versal.h
14
@@ -XXX,XX +XXX,XX @@ static void omap2_gpio_module_write(void *opaque, hwaddr addr,
20
@@ -XXX,XX +XXX,XX @@
15
}
21
#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
16
}
22
#include "hw/net/xlnx-versal-canfd.h"
17
23
#include "hw/misc/xlnx-versal-cfu.h"
18
-static uint32_t omap2_gpio_module_readp(void *opaque, hwaddr addr)
24
+#include "hw/misc/xlnx-versal-cframe-reg.h"
19
+static uint64_t omap2_gpio_module_readp(void *opaque, hwaddr addr,
25
20
+ unsigned size)
26
#define TYPE_XLNX_VERSAL "xlnx-versal"
27
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
28
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
29
#define XLNX_VERSAL_NR_IRQS 192
30
#define XLNX_VERSAL_NR_CANFD 2
31
#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000)
32
+#define XLNX_VERSAL_NR_CFRAME 15
33
34
struct Versal {
35
/*< private >*/
36
@@ -XXX,XX +XXX,XX @@ struct Versal {
37
XlnxVersalCFUAPB cfu_apb;
38
XlnxVersalCFUFDRO cfu_fdro;
39
XlnxVersalCFUSFR cfu_sfr;
40
+ XlnxVersalCFrameReg cframe[XLNX_VERSAL_NR_CFRAME];
41
+ XlnxVersalCFrameBcastReg cframe_bcast;
42
43
OrIRQState apb_irq_orgate;
44
} pmc;
45
@@ -XXX,XX +XXX,XX @@ struct Versal {
46
#define MM_PMC_CFU_STREAM_2 0xf1f80000
47
#define MM_PMC_CFU_STREAM_2_SIZE 0x40000
48
49
+#define MM_PMC_CFRAME0_REG 0xf12d0000
50
+#define MM_PMC_CFRAME0_REG_SIZE 0x1000
51
+#define MM_PMC_CFRAME0_FDRI 0xf12d1000
52
+#define MM_PMC_CFRAME0_FDRI_SIZE 0x1000
53
+#define MM_PMC_CFRAME1_REG 0xf12d2000
54
+#define MM_PMC_CFRAME1_REG_SIZE 0x1000
55
+#define MM_PMC_CFRAME1_FDRI 0xf12d3000
56
+#define MM_PMC_CFRAME1_FDRI_SIZE 0x1000
57
+#define MM_PMC_CFRAME2_REG 0xf12d4000
58
+#define MM_PMC_CFRAME2_REG_SIZE 0x1000
59
+#define MM_PMC_CFRAME2_FDRI 0xf12d5000
60
+#define MM_PMC_CFRAME2_FDRI_SIZE 0x1000
61
+#define MM_PMC_CFRAME3_REG 0xf12d6000
62
+#define MM_PMC_CFRAME3_REG_SIZE 0x1000
63
+#define MM_PMC_CFRAME3_FDRI 0xf12d7000
64
+#define MM_PMC_CFRAME3_FDRI_SIZE 0x1000
65
+#define MM_PMC_CFRAME4_REG 0xf12d8000
66
+#define MM_PMC_CFRAME4_REG_SIZE 0x1000
67
+#define MM_PMC_CFRAME4_FDRI 0xf12d9000
68
+#define MM_PMC_CFRAME4_FDRI_SIZE 0x1000
69
+#define MM_PMC_CFRAME5_REG 0xf12da000
70
+#define MM_PMC_CFRAME5_REG_SIZE 0x1000
71
+#define MM_PMC_CFRAME5_FDRI 0xf12db000
72
+#define MM_PMC_CFRAME5_FDRI_SIZE 0x1000
73
+#define MM_PMC_CFRAME6_REG 0xf12dc000
74
+#define MM_PMC_CFRAME6_REG_SIZE 0x1000
75
+#define MM_PMC_CFRAME6_FDRI 0xf12dd000
76
+#define MM_PMC_CFRAME6_FDRI_SIZE 0x1000
77
+#define MM_PMC_CFRAME7_REG 0xf12de000
78
+#define MM_PMC_CFRAME7_REG_SIZE 0x1000
79
+#define MM_PMC_CFRAME7_FDRI 0xf12df000
80
+#define MM_PMC_CFRAME7_FDRI_SIZE 0x1000
81
+#define MM_PMC_CFRAME8_REG 0xf12e0000
82
+#define MM_PMC_CFRAME8_REG_SIZE 0x1000
83
+#define MM_PMC_CFRAME8_FDRI 0xf12e1000
84
+#define MM_PMC_CFRAME8_FDRI_SIZE 0x1000
85
+#define MM_PMC_CFRAME9_REG 0xf12e2000
86
+#define MM_PMC_CFRAME9_REG_SIZE 0x1000
87
+#define MM_PMC_CFRAME9_FDRI 0xf12e3000
88
+#define MM_PMC_CFRAME9_FDRI_SIZE 0x1000
89
+#define MM_PMC_CFRAME10_REG 0xf12e4000
90
+#define MM_PMC_CFRAME10_REG_SIZE 0x1000
91
+#define MM_PMC_CFRAME10_FDRI 0xf12e5000
92
+#define MM_PMC_CFRAME10_FDRI_SIZE 0x1000
93
+#define MM_PMC_CFRAME11_REG 0xf12e6000
94
+#define MM_PMC_CFRAME11_REG_SIZE 0x1000
95
+#define MM_PMC_CFRAME11_FDRI 0xf12e7000
96
+#define MM_PMC_CFRAME11_FDRI_SIZE 0x1000
97
+#define MM_PMC_CFRAME12_REG 0xf12e8000
98
+#define MM_PMC_CFRAME12_REG_SIZE 0x1000
99
+#define MM_PMC_CFRAME12_FDRI 0xf12e9000
100
+#define MM_PMC_CFRAME12_FDRI_SIZE 0x1000
101
+#define MM_PMC_CFRAME13_REG 0xf12ea000
102
+#define MM_PMC_CFRAME13_REG_SIZE 0x1000
103
+#define MM_PMC_CFRAME13_FDRI 0xf12eb000
104
+#define MM_PMC_CFRAME13_FDRI_SIZE 0x1000
105
+#define MM_PMC_CFRAME14_REG 0xf12ec000
106
+#define MM_PMC_CFRAME14_REG_SIZE 0x1000
107
+#define MM_PMC_CFRAME14_FDRI 0xf12ed000
108
+#define MM_PMC_CFRAME14_FDRI_SIZE 0x1000
109
+#define MM_PMC_CFRAME_BCAST_REG 0xf12ee000
110
+#define MM_PMC_CFRAME_BCAST_REG_SIZE 0x1000
111
+#define MM_PMC_CFRAME_BCAST_FDRI 0xf12ef000
112
+#define MM_PMC_CFRAME_BCAST_FDRI_SIZE 0x1000
113
+
114
#define MM_PMC_CRP 0xf1260000U
115
#define MM_PMC_CRP_SIZE 0x10000
116
#define MM_PMC_RTC 0xf12a0000
117
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/hw/arm/xlnx-versal.c
120
+++ b/hw/arm/xlnx-versal.c
121
@@ -XXX,XX +XXX,XX @@
122
#define XLNX_VERSAL_RCPU_TYPE ARM_CPU_TYPE_NAME("cortex-r5f")
123
#define GEM_REVISION 0x40070106
124
125
-#define VERSAL_NUM_PMC_APB_IRQS 3
126
+#define VERSAL_NUM_PMC_APB_IRQS 18
127
#define NUM_OSPI_IRQ_LINES 3
128
129
static void versal_create_apu_cpus(Versal *s)
130
@@ -XXX,XX +XXX,XX @@ static void versal_create_pmc_apb_irq_orgate(Versal *s, qemu_irq *pic)
131
* - RTC
132
* - BBRAM
133
* - PMC SLCR
134
+ * - CFRAME regs (input 3 - 17 to the orgate)
135
*/
136
object_initialize_child(OBJECT(s), "pmc-apb-irq-orgate",
137
&s->pmc.apb_irq_orgate, TYPE_OR_IRQ);
138
@@ -XXX,XX +XXX,XX @@ static void versal_create_ospi(Versal *s, qemu_irq *pic)
139
static void versal_create_cfu(Versal *s, qemu_irq *pic)
21
{
140
{
22
return omap2_gpio_module_read(opaque, addr & ~3) >> ((addr & 3) << 3);
141
SysBusDevice *sbd;
23
}
142
+ DeviceState *dev;
24
143
+ int i;
25
static void omap2_gpio_module_writep(void *opaque, hwaddr addr,
144
+ const struct {
26
- uint32_t value)
145
+ uint64_t reg_base;
27
+ uint64_t value, unsigned size)
146
+ uint64_t fdri_base;
28
{
147
+ } cframe_addr[] = {
29
uint32_t cur = 0;
148
+ { MM_PMC_CFRAME0_REG, MM_PMC_CFRAME0_FDRI },
30
uint32_t mask = 0xffff;
149
+ { MM_PMC_CFRAME1_REG, MM_PMC_CFRAME1_FDRI },
31
150
+ { MM_PMC_CFRAME2_REG, MM_PMC_CFRAME2_FDRI },
32
+ if (size == 4) {
151
+ { MM_PMC_CFRAME3_REG, MM_PMC_CFRAME3_FDRI },
33
+ omap2_gpio_module_write(opaque, addr, value);
152
+ { MM_PMC_CFRAME4_REG, MM_PMC_CFRAME4_FDRI },
34
+ return;
153
+ { MM_PMC_CFRAME5_REG, MM_PMC_CFRAME5_FDRI },
154
+ { MM_PMC_CFRAME6_REG, MM_PMC_CFRAME6_FDRI },
155
+ { MM_PMC_CFRAME7_REG, MM_PMC_CFRAME7_FDRI },
156
+ { MM_PMC_CFRAME8_REG, MM_PMC_CFRAME8_FDRI },
157
+ { MM_PMC_CFRAME9_REG, MM_PMC_CFRAME9_FDRI },
158
+ { MM_PMC_CFRAME10_REG, MM_PMC_CFRAME10_FDRI },
159
+ { MM_PMC_CFRAME11_REG, MM_PMC_CFRAME11_FDRI },
160
+ { MM_PMC_CFRAME12_REG, MM_PMC_CFRAME12_FDRI },
161
+ { MM_PMC_CFRAME13_REG, MM_PMC_CFRAME13_FDRI },
162
+ { MM_PMC_CFRAME14_REG, MM_PMC_CFRAME14_FDRI },
163
+ };
164
+ const struct {
165
+ uint32_t blktype0_frames;
166
+ uint32_t blktype1_frames;
167
+ uint32_t blktype2_frames;
168
+ uint32_t blktype3_frames;
169
+ uint32_t blktype4_frames;
170
+ uint32_t blktype5_frames;
171
+ uint32_t blktype6_frames;
172
+ } cframe_cfg[] = {
173
+ [0] = { 34111, 3528, 12800, 11, 5, 1, 1 },
174
+ [1] = { 38498, 3841, 15361, 13, 7, 3, 1 },
175
+ [2] = { 38498, 3841, 15361, 13, 7, 3, 1 },
176
+ [3] = { 38498, 3841, 15361, 13, 7, 3, 1 },
177
+ };
178
179
/* CFU FDRO */
180
object_initialize_child(OBJECT(s), "cfu-fdro", &s->pmc.cfu_fdro,
181
@@ -XXX,XX +XXX,XX @@ static void versal_create_cfu(Versal *s, qemu_irq *pic)
182
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_FDRO,
183
sysbus_mmio_get_region(sbd, 0));
184
185
+ /* CFRAME REG */
186
+ for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
187
+ g_autofree char *name = g_strdup_printf("cframe%d", i);
188
+
189
+ object_initialize_child(OBJECT(s), name, &s->pmc.cframe[i],
190
+ TYPE_XLNX_VERSAL_CFRAME_REG);
191
+
192
+ sbd = SYS_BUS_DEVICE(&s->pmc.cframe[i]);
193
+ dev = DEVICE(&s->pmc.cframe[i]);
194
+
195
+ if (i < ARRAY_SIZE(cframe_cfg)) {
196
+ object_property_set_int(OBJECT(dev), "blktype0-frames",
197
+ cframe_cfg[i].blktype0_frames,
198
+ &error_abort);
199
+ object_property_set_int(OBJECT(dev), "blktype1-frames",
200
+ cframe_cfg[i].blktype1_frames,
201
+ &error_abort);
202
+ object_property_set_int(OBJECT(dev), "blktype2-frames",
203
+ cframe_cfg[i].blktype2_frames,
204
+ &error_abort);
205
+ object_property_set_int(OBJECT(dev), "blktype3-frames",
206
+ cframe_cfg[i].blktype3_frames,
207
+ &error_abort);
208
+ object_property_set_int(OBJECT(dev), "blktype4-frames",
209
+ cframe_cfg[i].blktype4_frames,
210
+ &error_abort);
211
+ object_property_set_int(OBJECT(dev), "blktype5-frames",
212
+ cframe_cfg[i].blktype5_frames,
213
+ &error_abort);
214
+ object_property_set_int(OBJECT(dev), "blktype6-frames",
215
+ cframe_cfg[i].blktype6_frames,
216
+ &error_abort);
217
+ }
218
+ object_property_set_link(OBJECT(dev), "cfu-fdro",
219
+ OBJECT(&s->pmc.cfu_fdro), &error_fatal);
220
+
221
+ sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal);
222
+
223
+ memory_region_add_subregion(&s->mr_ps, cframe_addr[i].reg_base,
224
+ sysbus_mmio_get_region(sbd, 0));
225
+ memory_region_add_subregion(&s->mr_ps, cframe_addr[i].fdri_base,
226
+ sysbus_mmio_get_region(sbd, 1));
227
+ sysbus_connect_irq(sbd, 0,
228
+ qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate),
229
+ 3 + i));
35
+ }
230
+ }
36
+
231
+
37
switch (addr & ~3) {
232
+ /* CFRAME BCAST */
38
case 0x00:    /* GPIO_REVISION */
233
+ object_initialize_child(OBJECT(s), "cframe_bcast", &s->pmc.cframe_bcast,
39
case 0x14:    /* GPIO_SYSSTATUS */
234
+ TYPE_XLNX_VERSAL_CFRAME_BCAST_REG);
40
@@ -XXX,XX +XXX,XX @@ static void omap2_gpio_module_writep(void *opaque, hwaddr addr,
235
+
41
}
236
+ sbd = SYS_BUS_DEVICE(&s->pmc.cframe_bcast);
42
237
+ dev = DEVICE(&s->pmc.cframe_bcast);
43
static const MemoryRegionOps omap2_gpio_module_ops = {
238
+
44
- .old_mmio = {
239
+ for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
45
- .read = {
240
+ g_autofree char *propname = g_strdup_printf("cframe%d", i);
46
- omap2_gpio_module_readp,
241
+ object_property_set_link(OBJECT(dev), propname,
47
- omap2_gpio_module_readp,
242
+ OBJECT(&s->pmc.cframe[i]), &error_fatal);
48
- omap2_gpio_module_read,
243
+ }
49
- },
244
+
50
- .write = {
245
+ sysbus_realize(sbd, &error_fatal);
51
- omap2_gpio_module_writep,
246
+
52
- omap2_gpio_module_writep,
247
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_REG,
53
- omap2_gpio_module_write,
248
+ sysbus_mmio_get_region(sbd, 0));
54
- },
249
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_FDRI,
55
- },
250
+ sysbus_mmio_get_region(sbd, 1));
56
+ .read = omap2_gpio_module_readp,
251
+
57
+ .write = omap2_gpio_module_writep,
252
/* CFU APB */
58
+ .valid.min_access_size = 1,
253
object_initialize_child(OBJECT(s), "cfu-apb", &s->pmc.cfu_apb,
59
+ .valid.max_access_size = 4,
254
TYPE_XLNX_VERSAL_CFU_APB);
60
.endianness = DEVICE_NATIVE_ENDIAN,
255
sbd = SYS_BUS_DEVICE(&s->pmc.cfu_apb);
61
};
256
+ dev = DEVICE(&s->pmc.cfu_apb);
62
257
+
258
+ for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) {
259
+ g_autofree char *propname = g_strdup_printf("cframe%d", i);
260
+ object_property_set_link(OBJECT(dev), propname,
261
+ OBJECT(&s->pmc.cframe[i]), &error_fatal);
262
+ }
263
264
sysbus_realize(sbd, &error_fatal);
265
memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_APB,
63
--
266
--
64
2.7.4
267
2.34.1
65
66
diff view generated by jsdifflib
1
Update nvic_exec_prio() to support the v8M changes:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* BASEPRI, FAULTMASK and PRIMASK are all banked
3
* AIRCR.PRIS can affect NS priorities
4
* AIRCR.BFHFNMINS affects FAULTMASK behaviour
5
2
6
These changes mean that it's no longer possible to
3
STGP writes to tag memory, it does not check it.
7
definitely say that if FAULTMASK is set it overrides
4
This happened to work because we wrote tag memory first
8
PRIMASK, and if PRIMASK is set it overrides BASEPRI
5
so that the check always succeeded.
9
(since if PRIMASK_NS is set and AIRCR.PRIS is set then
10
whether that 0x80 priority should take effect or the
11
priority in BASEPRI_S depends on the value of BASEPRI_S,
12
for instance). So we switch to the same approach used
13
by the pseudocode of working through BASEPRI, PRIMASK
14
and FAULTMASK and overriding the previous values if
15
needed.
16
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230901203103.136408-1-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-id: 1505240046-11454-16-git-send-email-peter.maydell@linaro.org
20
---
11
---
21
hw/intc/armv7m_nvic.c | 51 ++++++++++++++++++++++++++++++++++++++++++---------
12
target/arm/tcg/translate-a64.c | 41 +++++++++++++---------------------
22
1 file changed, 42 insertions(+), 9 deletions(-)
13
1 file changed, 15 insertions(+), 26 deletions(-)
23
14
24
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
15
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
25
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/intc/armv7m_nvic.c
17
--- a/target/arm/tcg/translate-a64.c
27
+++ b/hw/intc/armv7m_nvic.c
18
+++ b/target/arm/tcg/translate-a64.c
28
@@ -XXX,XX +XXX,XX @@ static void nvic_recompute_state(NVICState *s)
19
@@ -XXX,XX +XXX,XX @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
29
static inline int nvic_exec_prio(NVICState *s)
20
tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
30
{
21
}
31
CPUARMState *env = &s->cpu->env;
22
32
- int running;
23
- if (!s->ata) {
33
+ int running = NVIC_NOEXC_PRIO;
24
- /*
34
25
- * TODO: We could rely on the stores below, at least for
35
- if (env->v7m.faultmask[env->v7m.secure]) {
26
- * system mode, if we arrange to add MO_ALIGN_16.
36
- running = -1;
27
- */
37
- } else if (env->v7m.primask[env->v7m.secure]) {
28
- gen_helper_stg_stub(cpu_env, dirty_addr);
38
+ if (env->v7m.basepri[M_REG_NS] > 0) {
29
- } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
39
+ running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
30
- gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
40
+ }
31
- } else {
41
+
32
- gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
42
+ if (env->v7m.basepri[M_REG_S] > 0) {
33
- }
43
+ int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
34
-
44
+ if (running > basepri) {
35
- mop = finalize_memop(s, MO_64);
45
+ running = basepri;
36
- clean_addr = gen_mte_checkN(s, dirty_addr, true, false, 2 << MO_64, mop);
37
-
38
+ clean_addr = clean_data_tbi(s, dirty_addr);
39
tcg_rt = cpu_reg(s, a->rt);
40
tcg_rt2 = cpu_reg(s, a->rt2);
41
42
/*
43
- * STGP is defined as two 8-byte memory operations and one tag operation.
44
- * We implement it as one single 16-byte memory operation for convenience.
45
- * Rebuild mop as for STP.
46
- * TODO: The atomicity with LSE2 is stronger than required.
47
- * Need a form of MO_ATOM_WITHIN16_PAIR that never requires
48
- * 16-byte atomicity.
49
+ * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE,
50
+ * and one tag operation. We implement it as one single aligned 16-byte
51
+ * memory operation for convenience. Note that the alignment ensures
52
+ * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store.
53
*/
54
- mop = MO_128;
55
- if (s->align_mem) {
56
- mop |= MO_ALIGN_8;
57
- }
58
- mop = finalize_memop_pair(s, mop);
59
+ mop = finalize_memop_atom(s, MO_128 | MO_ALIGN, MO_ATOM_IFALIGN_PAIR);
60
61
tmp = tcg_temp_new_i128();
62
if (s->be_data == MO_LE) {
63
@@ -XXX,XX +XXX,XX @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
64
}
65
tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
66
67
+ /* Perform the tag store, if tag access enabled. */
68
+ if (s->ata) {
69
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
70
+ gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
71
+ } else {
72
+ gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
46
+ }
73
+ }
47
+ }
74
+ }
48
+
75
+
49
+ if (env->v7m.primask[M_REG_NS]) {
76
op_addr_ldstpair_post(s, a, dirty_addr, offset);
50
+ if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
77
return true;
51
+ if (running > NVIC_NS_PRIO_LIMIT) {
52
+ running = NVIC_NS_PRIO_LIMIT;
53
+ }
54
+ } else {
55
+ running = 0;
56
+ }
57
+ }
58
+
59
+ if (env->v7m.primask[M_REG_S]) {
60
running = 0;
61
- } else if (env->v7m.basepri[env->v7m.secure] > 0) {
62
- running = env->v7m.basepri[env->v7m.secure] &
63
- nvic_gprio_mask(s, env->v7m.secure);
64
- } else {
65
- running = NVIC_NOEXC_PRIO; /* lower than any possible priority */
66
}
67
+
68
+ if (env->v7m.faultmask[M_REG_NS]) {
69
+ if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
70
+ running = -1;
71
+ } else {
72
+ if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
73
+ if (running > NVIC_NS_PRIO_LIMIT) {
74
+ running = NVIC_NS_PRIO_LIMIT;
75
+ }
76
+ } else {
77
+ running = 0;
78
+ }
79
+ }
80
+ }
81
+
82
+ if (env->v7m.faultmask[M_REG_S]) {
83
+ running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
84
+ }
85
+
86
/* consider priority of active handler */
87
return MIN(running, s->exception_prio);
88
}
78
}
89
--
79
--
90
2.7.4
80
2.34.1
91
92
diff view generated by jsdifflib
1
Update the static_ops functions to use new-style mmio
1
From: Colton Lewis <coltonlewis@google.com>
2
rather than the legacy old_mmio functions.
3
2
3
Due to recent KVM changes, QEMU is setting a ptimer offset resulting
4
in unintended trap and emulate access and a consequent performance
5
hit. Filter out the PTIMER_CNT register to restore trapless ptimer
6
access.
7
8
Quoting Andrew Jones:
9
10
Simply reading the CNT register and writing back the same value is
11
enough to set an offset, since the timer will have certainly moved
12
past whatever value was read by the time it's written. QEMU
13
frequently saves and restores all registers in the get-reg-list array,
14
unless they've been explicitly filtered out (with Linux commit
15
680232a94c12, KVM_REG_ARM_PTIMER_CNT is now in the array). So, to
16
restore trapless ptimer accesses, we need a QEMU patch to filter out
17
the register.
18
19
See
20
https://lore.kernel.org/kvmarm/gsntttsonus5.fsf@coltonlewis-kvm.c.googlers.com/T/#m0770023762a821db2a3f0dd0a7dc6aa54e0d0da9
21
for additional context.
22
23
Cc: qemu-stable@nongnu.org
24
Signed-off-by: Andrew Jones <andrew.jones@linux.dev>
25
Signed-off-by: Colton Lewis <coltonlewis@google.com>
26
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
27
Tested-by: Colton Lewis <coltonlewis@google.com>
28
Message-id: 20230831190052.129045-1-coltonlewis@google.com
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 1505580378-9044-2-git-send-email-peter.maydell@linaro.org
7
---
30
---
8
hw/arm/palm.c | 30 ++++++++++--------------------
31
target/arm/kvm64.c | 1 +
9
1 file changed, 10 insertions(+), 20 deletions(-)
32
1 file changed, 1 insertion(+)
10
33
11
diff --git a/hw/arm/palm.c b/hw/arm/palm.c
34
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
12
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/arm/palm.c
36
--- a/target/arm/kvm64.c
14
+++ b/hw/arm/palm.c
37
+++ b/target/arm/kvm64.c
15
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@ typedef struct CPRegStateLevel {
16
#include "exec/address-spaces.h"
39
*/
17
#include "cpu.h"
40
static const CPRegStateLevel non_runtime_cpregs[] = {
18
41
{ KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
19
-static uint32_t static_readb(void *opaque, hwaddr offset)
42
+ { KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE },
20
+static uint64_t static_read(void *opaque, hwaddr offset, unsigned size)
21
{
22
- uint32_t *val = (uint32_t *) opaque;
23
- return *val >> ((offset & 3) << 3);
24
-}
25
+ uint32_t *val = (uint32_t *)opaque;
26
+ uint32_t sizemask = 7 >> size;
27
28
-static uint32_t static_readh(void *opaque, hwaddr offset)
29
-{
30
- uint32_t *val = (uint32_t *) opaque;
31
- return *val >> ((offset & 1) << 3);
32
-}
33
-
34
-static uint32_t static_readw(void *opaque, hwaddr offset)
35
-{
36
- uint32_t *val = (uint32_t *) opaque;
37
- return *val >> ((offset & 0) << 3);
38
+ return *val >> ((offset & sizemask) << 3);
39
}
40
41
-static void static_write(void *opaque, hwaddr offset,
42
- uint32_t value)
43
+static void static_write(void *opaque, hwaddr offset, uint64_t value,
44
+ unsigned size)
45
{
46
#ifdef SPY
47
printf("%s: value %08lx written at " PA_FMT "\n",
48
@@ -XXX,XX +XXX,XX @@ static void static_write(void *opaque, hwaddr offset,
49
}
50
51
static const MemoryRegionOps static_ops = {
52
- .old_mmio = {
53
- .read = { static_readb, static_readh, static_readw, },
54
- .write = { static_write, static_write, static_write, },
55
- },
56
+ .read = static_read,
57
+ .write = static_write,
58
+ .valid.min_access_size = 1,
59
+ .valid.max_access_size = 4,
60
.endianness = DEVICE_NATIVE_ENDIAN,
61
};
43
};
62
44
45
int kvm_arm_cpreg_level(uint64_t regidx)
63
--
46
--
64
2.7.4
47
2.34.1
65
66
diff view generated by jsdifflib
1
In v8M the MSR and MRS instructions have extra register value
1
From: Richard Henderson <richard.henderson@linaro.org>
2
encodings to allow secure code to access the non-secure banked
3
version of various special registers.
4
2
5
(We don't implement the MSPLIM_NS or PSPLIM_NS aliases, because
3
Provide a stub implementation, as a write is a "request".
6
we don't currently implement the stack limit registers at all.)
7
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230831232441.66020-2-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1505240046-11454-2-git-send-email-peter.maydell@linaro.org
11
---
9
---
12
target/arm/helper.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++++
10
target/arm/helper.c | 64 +++++++++++++++++++++++++++++----------------
13
1 file changed, 110 insertions(+)
11
1 file changed, 41 insertions(+), 23 deletions(-)
14
12
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
15
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
16
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
17
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
20
break;
18
};
21
case 20: /* CONTROL */
19
modify_arm_cp_regs(v8_idregs, v8_user_idregs);
22
return env->v7m.control[env->v7m.secure];
20
#endif
23
+ case 0x94: /* CONTROL_NS */
21
- /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
24
+ /* We have to handle this here because unprivileged Secure code
22
+ /*
25
+ * can read the NS CONTROL register.
23
+ * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
24
+ * TODO: For RMR, a write with bit 1 set should do something with
25
+ * cpu_reset(). In the meantime, "the bit is strictly a request",
26
+ * so we are in spec just ignoring writes.
26
+ */
27
+ */
27
+ if (!env->v7m.secure) {
28
if (!arm_feature(env, ARM_FEATURE_EL3) &&
28
+ return 0;
29
!arm_feature(env, ARM_FEATURE_EL2)) {
29
+ }
30
- ARMCPRegInfo rvbar = {
30
+ return env->v7m.control[M_REG_NS];
31
- .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
32
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
33
- .access = PL1_R,
34
- .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
35
+ ARMCPRegInfo el1_reset_regs[] = {
36
+ { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
37
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
38
+ .access = PL1_R,
39
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
40
+ { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
41
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
42
+ .access = PL1_RW, .type = ARM_CP_CONST,
43
+ .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
44
};
45
- define_one_arm_cp_reg(cpu, &rvbar);
46
+ define_arm_cp_regs(cpu, el1_reset_regs);
47
}
48
define_arm_cp_regs(cpu, v8_idregs);
49
define_arm_cp_regs(cpu, v8_cp_reginfo);
50
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
51
if (cpu_isar_feature(aa64_sel2, cpu)) {
52
define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
53
}
54
- /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
55
+ /*
56
+ * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
57
+ * See commentary near RMR_EL1.
58
+ */
59
if (!arm_feature(env, ARM_FEATURE_EL3)) {
60
- ARMCPRegInfo rvbar[] = {
61
- {
62
- .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
63
- .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
64
- .access = PL2_R,
65
- .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
66
- },
67
- { .name = "RVBAR", .type = ARM_CP_ALIAS,
68
- .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
69
- .access = PL2_R,
70
- .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
71
- },
72
+ static const ARMCPRegInfo el2_reset_regs[] = {
73
+ { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
74
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
75
+ .access = PL2_R,
76
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
77
+ { .name = "RVBAR", .type = ARM_CP_ALIAS,
78
+ .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
79
+ .access = PL2_R,
80
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
81
+ { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
82
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
83
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
84
};
85
- define_arm_cp_regs(cpu, rvbar);
86
+ define_arm_cp_regs(cpu, el2_reset_regs);
87
}
31
}
88
}
32
89
33
if (el == 0) {
90
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
34
return 0; /* unprivileged reads others as zero */
91
{ .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
35
}
92
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
36
93
.access = PL3_R,
37
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
94
- .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
38
+ switch (reg) {
95
- },
39
+ case 0x88: /* MSP_NS */
96
+ .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
40
+ if (!env->v7m.secure) {
97
+ { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
41
+ return 0;
98
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
42
+ }
99
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
43
+ return env->v7m.other_ss_msp;
100
+ { .name = "RMR", .state = ARM_CP_STATE_AA32,
44
+ case 0x89: /* PSP_NS */
101
+ .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
45
+ if (!env->v7m.secure) {
102
+ .access = PL3_RW, .type = ARM_CP_CONST,
46
+ return 0;
103
+ .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
47
+ }
104
{ .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
48
+ return env->v7m.other_ss_psp;
105
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
49
+ case 0x90: /* PRIMASK_NS */
106
.access = PL3_RW,
50
+ if (!env->v7m.secure) {
51
+ return 0;
52
+ }
53
+ return env->v7m.primask[M_REG_NS];
54
+ case 0x91: /* BASEPRI_NS */
55
+ if (!env->v7m.secure) {
56
+ return 0;
57
+ }
58
+ return env->v7m.basepri[M_REG_NS];
59
+ case 0x93: /* FAULTMASK_NS */
60
+ if (!env->v7m.secure) {
61
+ return 0;
62
+ }
63
+ return env->v7m.faultmask[M_REG_NS];
64
+ case 0x98: /* SP_NS */
65
+ {
66
+ /* This gives the non-secure SP selected based on whether we're
67
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
68
+ */
69
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
70
+
71
+ if (!env->v7m.secure) {
72
+ return 0;
73
+ }
74
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
75
+ return env->v7m.other_ss_psp;
76
+ } else {
77
+ return env->v7m.other_ss_msp;
78
+ }
79
+ }
80
+ default:
81
+ break;
82
+ }
83
+ }
84
+
85
switch (reg) {
86
case 8: /* MSP */
87
return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
88
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
89
return;
90
}
91
92
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
93
+ switch (reg) {
94
+ case 0x88: /* MSP_NS */
95
+ if (!env->v7m.secure) {
96
+ return;
97
+ }
98
+ env->v7m.other_ss_msp = val;
99
+ return;
100
+ case 0x89: /* PSP_NS */
101
+ if (!env->v7m.secure) {
102
+ return;
103
+ }
104
+ env->v7m.other_ss_psp = val;
105
+ return;
106
+ case 0x90: /* PRIMASK_NS */
107
+ if (!env->v7m.secure) {
108
+ return;
109
+ }
110
+ env->v7m.primask[M_REG_NS] = val & 1;
111
+ return;
112
+ case 0x91: /* BASEPRI_NS */
113
+ if (!env->v7m.secure) {
114
+ return;
115
+ }
116
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
117
+ return;
118
+ case 0x93: /* FAULTMASK_NS */
119
+ if (!env->v7m.secure) {
120
+ return;
121
+ }
122
+ env->v7m.faultmask[M_REG_NS] = val & 1;
123
+ return;
124
+ case 0x98: /* SP_NS */
125
+ {
126
+ /* This gives the non-secure SP selected based on whether we're
127
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
128
+ */
129
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
130
+
131
+ if (!env->v7m.secure) {
132
+ return;
133
+ }
134
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
135
+ env->v7m.other_ss_psp = val;
136
+ } else {
137
+ env->v7m.other_ss_msp = val;
138
+ }
139
+ return;
140
+ }
141
+ default:
142
+ break;
143
+ }
144
+ }
145
+
146
switch (reg) {
147
case 0 ... 7: /* xPSR sub-fields */
148
/* only APSR is actually writable */
149
--
107
--
150
2.7.4
108
2.34.1
151
152
diff view generated by jsdifflib
Deleted patch
1
With banked exceptions, just the exception number in
2
s->vectpending is no longer sufficient to uniquely identify
3
the pending exception. Add a vectpending_is_s_banked bool
4
which is true if the exception is using the sec_vectors[]
5
array.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Message-id: 1505240046-11454-4-git-send-email-peter.maydell@linaro.org
9
---
10
include/hw/intc/armv7m_nvic.h | 11 +++++++++--
11
hw/intc/armv7m_nvic.c | 1 +
12
2 files changed, 10 insertions(+), 2 deletions(-)
13
14
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/hw/intc/armv7m_nvic.h
17
+++ b/include/hw/intc/armv7m_nvic.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
19
VecInfo sec_vectors[NVIC_INTERNAL_VECTORS];
20
uint32_t prigroup;
21
22
- /* vectpending and exception_prio are both cached state that can
23
- * be recalculated from the vectors[] array and the prigroup field.
24
+ /* The following fields are all cached state that can be recalculated
25
+ * from the vectors[] and sec_vectors[] arrays and the prigroup field:
26
+ * - vectpending
27
+ * - vectpending_is_secure
28
+ * - exception_prio
29
*/
30
unsigned int vectpending; /* highest prio pending enabled exception */
31
+ /* true if vectpending is a banked secure exception, ie it is in
32
+ * sec_vectors[] rather than vectors[]
33
+ */
34
+ bool vectpending_is_s_banked;
35
int exception_prio; /* group prio of the highest prio active exception */
36
37
MemoryRegion sysregmem;
38
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/intc/armv7m_nvic.c
41
+++ b/hw/intc/armv7m_nvic.c
42
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
43
44
s->exception_prio = NVIC_NOEXC_PRIO;
45
s->vectpending = 0;
46
+ s->vectpending_is_s_banked = false;
47
}
48
49
static void nvic_systick_trigger(void *opaque, int n, int level)
50
--
51
2.7.4
52
53
diff view generated by jsdifflib
1
Don't use the old_mmio struct in memory region ops.
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The cortex-a710 is a first generation ARMv9.0-A processor.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230831232441.66020-3-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 1505580378-9044-5-git-send-email-peter.maydell@linaro.org
6
---
9
---
7
hw/timer/omap_gptimer.c | 49 +++++++++++++++++++++++++++++++++++++------------
10
docs/system/arm/virt.rst | 1 +
8
1 file changed, 37 insertions(+), 12 deletions(-)
11
hw/arm/virt.c | 1 +
12
target/arm/tcg/cpu64.c | 212 +++++++++++++++++++++++++++++++++++++++
13
3 files changed, 214 insertions(+)
9
14
10
diff --git a/hw/timer/omap_gptimer.c b/hw/timer/omap_gptimer.c
15
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/hw/timer/omap_gptimer.c
17
--- a/docs/system/arm/virt.rst
13
+++ b/hw/timer/omap_gptimer.c
18
+++ b/docs/system/arm/virt.rst
14
@@ -XXX,XX +XXX,XX @@ static void omap_gp_timer_writeh(void *opaque, hwaddr addr,
19
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
15
s->writeh = (uint16_t) value;
20
- ``cortex-a57`` (64-bit)
21
- ``cortex-a72`` (64-bit)
22
- ``cortex-a76`` (64-bit)
23
+- ``cortex-a710`` (64-bit)
24
- ``a64fx`` (64-bit)
25
- ``host`` (with KVM only)
26
- ``neoverse-n1`` (64-bit)
27
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/hw/arm/virt.c
30
+++ b/hw/arm/virt.c
31
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
32
ARM_CPU_TYPE_NAME("cortex-a55"),
33
ARM_CPU_TYPE_NAME("cortex-a72"),
34
ARM_CPU_TYPE_NAME("cortex-a76"),
35
+ ARM_CPU_TYPE_NAME("cortex-a710"),
36
ARM_CPU_TYPE_NAME("a64fx"),
37
ARM_CPU_TYPE_NAME("neoverse-n1"),
38
ARM_CPU_TYPE_NAME("neoverse-v1"),
39
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/tcg/cpu64.c
42
+++ b/target/arm/tcg/cpu64.c
43
@@ -XXX,XX +XXX,XX @@ static void aarch64_neoverse_v1_initfn(Object *obj)
44
aarch64_add_sve_properties(obj);
16
}
45
}
17
46
18
+static uint64_t omap_gp_timer_readfn(void *opaque, hwaddr addr,
47
+static const ARMCPRegInfo cortex_a710_cp_reginfo[] = {
19
+ unsigned size)
48
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
49
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
50
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
51
+ .accessfn = access_actlr_w },
52
+ { .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
53
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
54
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
55
+ .accessfn = access_actlr_w },
56
+ { .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
57
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
58
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
59
+ .accessfn = access_actlr_w },
60
+ { .name = "CPUACTLR4_EL1", .state = ARM_CP_STATE_AA64,
61
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 3,
62
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
63
+ .accessfn = access_actlr_w },
64
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
65
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
66
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
67
+ .accessfn = access_actlr_w },
68
+ { .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
69
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
70
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
71
+ .accessfn = access_actlr_w },
72
+ { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
73
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 4,
74
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
75
+ { .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
76
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
77
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
78
+ .accessfn = access_actlr_w },
79
+ { .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
80
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
81
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
82
+ { .name = "CPUACTLR5_EL1", .state = ARM_CP_STATE_AA64,
83
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 0,
84
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
85
+ .accessfn = access_actlr_w },
86
+ { .name = "CPUACTLR6_EL1", .state = ARM_CP_STATE_AA64,
87
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 1,
88
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
89
+ .accessfn = access_actlr_w },
90
+ { .name = "CPUACTLR7_EL1", .state = ARM_CP_STATE_AA64,
91
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 8, .opc2 = 2,
92
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
93
+ .accessfn = access_actlr_w },
94
+ { .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
95
+ .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
96
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
97
+ { .name = "AVTCR_EL2", .state = ARM_CP_STATE_AA64,
98
+ .opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 1,
99
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
100
+ { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
101
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
102
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
103
+ { .name = "CPUPPMCR2_EL3", .state = ARM_CP_STATE_AA64,
104
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 1,
105
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
106
+ { .name = "CPUPPMCR4_EL3", .state = ARM_CP_STATE_AA64,
107
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 4,
108
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
109
+ { .name = "CPUPPMCR5_EL3", .state = ARM_CP_STATE_AA64,
110
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 5,
111
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
112
+ { .name = "CPUPPMCR6_EL3", .state = ARM_CP_STATE_AA64,
113
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 6,
114
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
115
+ { .name = "CPUACTLR_EL3", .state = ARM_CP_STATE_AA64,
116
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 4, .opc2 = 0,
117
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
118
+ { .name = "ATCR_EL3", .state = ARM_CP_STATE_AA64,
119
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 7, .opc2 = 0,
120
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
121
+ { .name = "CPUPSELR_EL3", .state = ARM_CP_STATE_AA64,
122
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 0,
123
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
124
+ { .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
125
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
126
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
127
+ { .name = "CPUPOR_EL3", .state = ARM_CP_STATE_AA64,
128
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 2,
129
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
130
+ { .name = "CPUPMR_EL3", .state = ARM_CP_STATE_AA64,
131
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 3,
132
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
133
+ { .name = "CPUPOR2_EL3", .state = ARM_CP_STATE_AA64,
134
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 4,
135
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
136
+ { .name = "CPUPMR2_EL3", .state = ARM_CP_STATE_AA64,
137
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 5,
138
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
139
+ { .name = "CPUPFR_EL3", .state = ARM_CP_STATE_AA64,
140
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 6,
141
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
142
+
143
+ /*
144
+ * Stub RAMINDEX, as we don't actually implement caches, BTB,
145
+ * or anything else with cpu internal memory.
146
+ * "Read" zeros into the IDATA* and DDATA* output registers.
147
+ */
148
+ { .name = "RAMINDEX_EL3", .state = ARM_CP_STATE_AA64,
149
+ .opc0 = 1, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
150
+ .access = PL3_W, .type = ARM_CP_CONST, .resetvalue = 0 },
151
+ { .name = "IDATA0_EL3", .state = ARM_CP_STATE_AA64,
152
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 0,
153
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
154
+ { .name = "IDATA1_EL3", .state = ARM_CP_STATE_AA64,
155
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 1,
156
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
157
+ { .name = "IDATA2_EL3", .state = ARM_CP_STATE_AA64,
158
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 0, .opc2 = 2,
159
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
160
+ { .name = "DDATA0_EL3", .state = ARM_CP_STATE_AA64,
161
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 0,
162
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
163
+ { .name = "DDATA1_EL3", .state = ARM_CP_STATE_AA64,
164
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 1,
165
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
166
+ { .name = "DDATA2_EL3", .state = ARM_CP_STATE_AA64,
167
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 1, .opc2 = 2,
168
+ .access = PL3_R, .type = ARM_CP_CONST, .resetvalue = 0 },
169
+};
170
+
171
+static void aarch64_a710_initfn(Object *obj)
20
+{
172
+{
21
+ switch (size) {
173
+ ARMCPU *cpu = ARM_CPU(obj);
22
+ case 1:
174
+
23
+ return omap_badwidth_read32(opaque, addr);
175
+ cpu->dtb_compatible = "arm,cortex-a710";
24
+ case 2:
176
+ set_feature(&cpu->env, ARM_FEATURE_V8);
25
+ return omap_gp_timer_readh(opaque, addr);
177
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
26
+ case 4:
178
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
27
+ return omap_gp_timer_readw(opaque, addr);
179
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
28
+ default:
180
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
29
+ g_assert_not_reached();
181
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
30
+ }
182
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
183
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
184
+
185
+ /* Ordered by Section B.4: AArch64 registers */
186
+ cpu->midr = 0x412FD471; /* r2p1 */
187
+ cpu->revidr = 0;
188
+ cpu->isar.id_pfr0 = 0x21110131;
189
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
190
+ cpu->isar.id_dfr0 = 0x16011099;
191
+ cpu->id_afr0 = 0;
192
+ cpu->isar.id_mmfr0 = 0x10201105;
193
+ cpu->isar.id_mmfr1 = 0x40000000;
194
+ cpu->isar.id_mmfr2 = 0x01260000;
195
+ cpu->isar.id_mmfr3 = 0x02122211;
196
+ cpu->isar.id_isar0 = 0x02101110;
197
+ cpu->isar.id_isar1 = 0x13112111;
198
+ cpu->isar.id_isar2 = 0x21232042;
199
+ cpu->isar.id_isar3 = 0x01112131;
200
+ cpu->isar.id_isar4 = 0x00010142;
201
+ cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
202
+ cpu->isar.id_mmfr4 = 0x21021110;
203
+ cpu->isar.id_isar6 = 0x01111111;
204
+ cpu->isar.mvfr0 = 0x10110222;
205
+ cpu->isar.mvfr1 = 0x13211111;
206
+ cpu->isar.mvfr2 = 0x00000043;
207
+ cpu->isar.id_pfr2 = 0x00000011;
208
+ cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
209
+ cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
210
+ cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
211
+ cpu->isar.id_aa64dfr0 = 0x000011f010305611ull;
212
+ cpu->isar.id_aa64dfr1 = 0;
213
+ cpu->id_aa64afr0 = 0;
214
+ cpu->id_aa64afr1 = 0;
215
+ cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */
216
+ cpu->isar.id_aa64isar1 = 0x0010111101211032ull;
217
+ cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull;
218
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
219
+ cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull;
220
+ cpu->clidr = 0x0000001482000023ull;
221
+ cpu->gm_blocksize = 4;
222
+ cpu->ctr = 0x000000049444c004ull;
223
+ cpu->dcz_blocksize = 4;
224
+ /* TODO FEAT_MPAM: mpamidr_el1 = 0x0000_0001_0006_003f */
225
+
226
+ /* Section B.5.2: PMCR_EL0 */
227
+ cpu->isar.reset_pmcr_el0 = 0xa000; /* with 20 counters */
228
+
229
+ /* Section B.6.7: ICH_VTR_EL2 */
230
+ cpu->gic_num_lrs = 4;
231
+ cpu->gic_vpribits = 5;
232
+ cpu->gic_vprebits = 5;
233
+ cpu->gic_pribits = 5;
234
+
235
+ /* Section 14: Scalable Vector Extensions support */
236
+ cpu->sve_vq.supported = 1 << 0; /* 128bit */
237
+
238
+ /*
239
+ * The cortex-a710 TRM does not list CCSIDR values. The layout of
240
+ * the caches are in text in Table 7-1, Table 8-1, and Table 9-1.
241
+ *
242
+ * L1: 4-way set associative 64-byte line size, total either 32K or 64K.
243
+ * L2: 8-way set associative 64 byte line size, total either 256K or 512K.
244
+ */
245
+ cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
246
+ cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
247
+ cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */
248
+
249
+ /* FIXME: Not documented -- copied from neoverse-v1 */
250
+ cpu->reset_sctlr = 0x30c50838;
251
+
252
+ define_arm_cp_regs(cpu, cortex_a710_cp_reginfo);
253
+
254
+ aarch64_add_pauth_properties(obj);
255
+ aarch64_add_sve_properties(obj);
31
+}
256
+}
32
+
257
+
33
+static void omap_gp_timer_writefn(void *opaque, hwaddr addr,
258
/*
34
+ uint64_t value, unsigned size)
259
* -cpu max: a CPU with as many features enabled as our emulation supports.
35
+{
260
* The version of '-cpu max' for qemu-system-arm is defined in cpu32.c;
36
+ switch (size) {
261
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
37
+ case 1:
262
{ .name = "cortex-a55", .initfn = aarch64_a55_initfn },
38
+ omap_badwidth_write32(opaque, addr, value);
263
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
39
+ break;
264
{ .name = "cortex-a76", .initfn = aarch64_a76_initfn },
40
+ case 2:
265
+ { .name = "cortex-a710", .initfn = aarch64_a710_initfn },
41
+ omap_gp_timer_writeh(opaque, addr, value);
266
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
42
+ break;
267
{ .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn },
43
+ case 4:
268
{ .name = "neoverse-v1", .initfn = aarch64_neoverse_v1_initfn },
44
+ omap_gp_timer_write(opaque, addr, value);
45
+ break;
46
+ default:
47
+ g_assert_not_reached();
48
+ }
49
+}
50
+
51
static const MemoryRegionOps omap_gp_timer_ops = {
52
- .old_mmio = {
53
- .read = {
54
- omap_badwidth_read32,
55
- omap_gp_timer_readh,
56
- omap_gp_timer_readw,
57
- },
58
- .write = {
59
- omap_badwidth_write32,
60
- omap_gp_timer_writeh,
61
- omap_gp_timer_write,
62
- },
63
- },
64
+ .read = omap_gp_timer_readfn,
65
+ .write = omap_gp_timer_writefn,
66
+ .valid.min_access_size = 1,
67
+ .valid.max_access_size = 4,
68
.endianness = DEVICE_NATIVE_ENDIAN,
69
};
70
71
--
269
--
72
2.7.4
270
2.34.1
73
74
diff view generated by jsdifflib
1
Update the nvic_recompute_state() code to handle the security
1
From: Richard Henderson <richard.henderson@linaro.org>
2
extension and its associated banked registers.
3
2
4
Code that uses the resulting cached state (ie the irq
3
Perform the check for EL2 enabled in the security space and the
5
acknowledge and complete code) will be updated in a later
4
TIDCP bit in an out-of-line helper.
6
commit.
7
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230831232441.66020-4-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 1505240046-11454-9-git-send-email-peter.maydell@linaro.org
11
---
10
---
12
hw/intc/armv7m_nvic.c | 151 ++++++++++++++++++++++++++++++++++++++++++++++++--
11
target/arm/helper.h | 1 +
13
hw/intc/trace-events | 1 +
12
target/arm/tcg/op_helper.c | 13 +++++++++++++
14
2 files changed, 147 insertions(+), 5 deletions(-)
13
target/arm/tcg/translate-a64.c | 16 ++++++++++++++--
14
target/arm/tcg/translate.c | 27 +++++++++++++++++++++++++++
15
4 files changed, 55 insertions(+), 2 deletions(-)
15
16
16
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/armv7m_nvic.c
19
--- a/target/arm/helper.h
19
+++ b/hw/intc/armv7m_nvic.c
20
+++ b/target/arm/helper.h
20
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
21
* (higher than the highest possible priority value)
22
22
*/
23
DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
23
#define NVIC_NOEXC_PRIO 0x100
24
DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
24
+/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
25
+DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
25
+#define NVIC_NS_PRIO_LIMIT 0x80
26
DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
26
27
DEF_HELPER_2(get_cp_reg, i32, env, cptr)
27
static const uint8_t nvic_id[] = {
28
DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
28
0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
29
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
29
@@ -XXX,XX +XXX,XX @@ static bool nvic_isrpending(NVICState *s)
30
index XXXXXXX..XXXXXXX 100644
30
return false;
31
--- a/target/arm/tcg/op_helper.c
32
+++ b/target/arm/tcg/op_helper.c
33
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
34
return ri;
31
}
35
}
32
36
33
+static bool exc_is_banked(int exc)
37
+/*
38
+ * Test for HCR_EL2.TIDCP at EL1.
39
+ * Since implementation defined registers are rare, and within QEMU
40
+ * most of them are no-op, do not waste HFLAGS space for this and
41
+ * always use a helper.
42
+ */
43
+void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
34
+{
44
+{
35
+ /* Return true if this is one of the limited set of exceptions which
45
+ if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
36
+ * are banked (and thus have state in sec_vectors[])
46
+ raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
37
+ */
38
+ return exc == ARMV7M_EXCP_HARD ||
39
+ exc == ARMV7M_EXCP_MEM ||
40
+ exc == ARMV7M_EXCP_USAGE ||
41
+ exc == ARMV7M_EXCP_SVC ||
42
+ exc == ARMV7M_EXCP_PENDSV ||
43
+ exc == ARMV7M_EXCP_SYSTICK;
44
+}
45
+
46
/* Return a mask word which clears the subpriority bits from
47
* a priority value for an M-profile exception, leaving only
48
* the group priority.
49
*/
50
-static inline uint32_t nvic_gprio_mask(NVICState *s)
51
+static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
52
+{
53
+ return ~0U << (s->prigroup[secure] + 1);
54
+}
55
+
56
+static bool exc_targets_secure(NVICState *s, int exc)
57
+{
58
+ /* Return true if this non-banked exception targets Secure state. */
59
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
60
+ return false;
61
+ }
62
+
63
+ if (exc >= NVIC_FIRST_IRQ) {
64
+ return !s->itns[exc];
65
+ }
66
+
67
+ /* Function shouldn't be called for banked exceptions. */
68
+ assert(!exc_is_banked(exc));
69
+
70
+ switch (exc) {
71
+ case ARMV7M_EXCP_NMI:
72
+ case ARMV7M_EXCP_BUS:
73
+ return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
74
+ case ARMV7M_EXCP_SECURE:
75
+ return true;
76
+ case ARMV7M_EXCP_DEBUG:
77
+ /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
78
+ return false;
79
+ default:
80
+ /* reset, and reserved (unused) low exception numbers.
81
+ * We'll get called by code that loops through all the exception
82
+ * numbers, but it doesn't matter what we return here as these
83
+ * non-existent exceptions will never be pended or active.
84
+ */
85
+ return true;
86
+ }
47
+ }
87
+}
48
+}
88
+
49
+
89
+static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
50
void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
51
{
52
const ARMCPRegInfo *ri = rip;
53
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/tcg/translate-a64.c
56
+++ b/target/arm/tcg/translate-a64.c
57
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, bool isread,
58
bool need_exit_tb = false;
59
TCGv_ptr tcg_ri = NULL;
60
TCGv_i64 tcg_rt;
61
+ uint32_t syndrome;
62
+
63
+ if (crn == 11 || crn == 15) {
64
+ /*
65
+ * Check for TIDCP trap, which must take precedence over
66
+ * the UNDEF for "no such register" etc.
67
+ */
68
+ syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
69
+ switch (s->current_el) {
70
+ case 1:
71
+ gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
72
+ break;
73
+ }
74
+ }
75
76
if (!ri) {
77
/* Unknown register; this might be a guest error or a QEMU
78
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, bool isread,
79
/* Emit code to perform further access permissions checks at
80
* runtime; this may result in an exception.
81
*/
82
- uint32_t syndrome;
83
-
84
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
85
gen_a64_update_pc(s, 0);
86
tcg_ri = tcg_temp_new_ptr();
87
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/tcg/translate.c
90
+++ b/target/arm/tcg/translate.c
91
@@ -XXX,XX +XXX,XX @@ void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
92
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
93
}
94
95
+static bool aa32_cpreg_encoding_in_impdef_space(uint8_t crn, uint8_t crm)
90
+{
96
+{
91
+ /* Return the group priority for this exception, given its raw
97
+ static const uint16_t mask[3] = {
92
+ * (group-and-subgroup) priority value and whether it is targeting
98
+ 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */
93
+ * secure state or not.
99
+ 0b0000000100010011, /* crn == 10, crm == {c0, c1, c4, c8} */
94
+ */
100
+ 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */
95
+ if (rawprio < 0) {
101
+ };
96
+ return rawprio;
102
+
103
+ if (crn >= 9 && crn <= 11) {
104
+ return (mask[crn - 9] >> crm) & 1;
97
+ }
105
+ }
98
+ rawprio &= nvic_gprio_mask(s, targets_secure);
106
+ return false;
99
+ /* AIRCR.PRIS causes us to squash all NS priorities into the
100
+ * lower half of the total range
101
+ */
102
+ if (!targets_secure &&
103
+ (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
104
+ rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
105
+ }
106
+ return rawprio;
107
+}
107
+}
108
+
108
+
109
+/* Recompute vectpending and exception_prio for a CPU which implements
109
static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
110
+ * the Security extension
110
int opc1, int crn, int crm, int opc2,
111
+ */
111
bool isread, int rt, int rt2)
112
+static void nvic_recompute_state_secure(NVICState *s)
112
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
113
{
113
}
114
- return ~0U << (s->prigroup[M_REG_NS] + 1);
114
}
115
+ int i, bank;
115
116
+ int pend_prio = NVIC_NOEXC_PRIO;
116
+ if (cpnum == 15 && aa32_cpreg_encoding_in_impdef_space(crn, crm)) {
117
+ int active_prio = NVIC_NOEXC_PRIO;
117
+ /*
118
+ int pend_irq = 0;
118
+ * Check for TIDCP trap, which must take precedence over the UNDEF
119
+ bool pending_is_s_banked = false;
119
+ * for "no such register" etc. It shares precedence with HSTR,
120
+
120
+ * but raises the same exception, so order doesn't matter.
121
+ /* R_CQRV: precedence is by:
121
+ */
122
+ * - lowest group priority; if both the same then
122
+ switch (s->current_el) {
123
+ * - lowest subpriority; if both the same then
123
+ case 1:
124
+ * - lowest exception number; if both the same (ie banked) then
124
+ gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
125
+ * - secure exception takes precedence
125
+ break;
126
+ * Compare pseudocode RawExecutionPriority.
127
+ * Annoyingly, now we have two prigroup values (for S and NS)
128
+ * we can't do the loop comparison on raw priority values.
129
+ */
130
+ for (i = 1; i < s->num_irq; i++) {
131
+ for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
132
+ VecInfo *vec;
133
+ int prio;
134
+ bool targets_secure;
135
+
136
+ if (bank == M_REG_S) {
137
+ if (!exc_is_banked(i)) {
138
+ continue;
139
+ }
140
+ vec = &s->sec_vectors[i];
141
+ targets_secure = true;
142
+ } else {
143
+ vec = &s->vectors[i];
144
+ targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
145
+ }
146
+
147
+ prio = exc_group_prio(s, vec->prio, targets_secure);
148
+ if (vec->enabled && vec->pending && prio < pend_prio) {
149
+ pend_prio = prio;
150
+ pend_irq = i;
151
+ pending_is_s_banked = (bank == M_REG_S);
152
+ }
153
+ if (vec->active && prio < active_prio) {
154
+ active_prio = prio;
155
+ }
156
+ }
126
+ }
157
+ }
127
+ }
158
+
128
+
159
+ s->vectpending_is_s_banked = pending_is_s_banked;
129
if (!ri) {
160
+ s->vectpending = pend_irq;
130
/*
161
+ s->vectpending_prio = pend_prio;
131
* Unknown register; this might be a guest error or a QEMU
162
+ s->exception_prio = active_prio;
163
+
164
+ trace_nvic_recompute_state_secure(s->vectpending,
165
+ s->vectpending_is_s_banked,
166
+ s->vectpending_prio,
167
+ s->exception_prio);
168
}
169
170
/* Recompute vectpending and exception_prio */
171
@@ -XXX,XX +XXX,XX @@ static void nvic_recompute_state(NVICState *s)
172
int active_prio = NVIC_NOEXC_PRIO;
173
int pend_irq = 0;
174
175
+ /* In theory we could write one function that handled both
176
+ * the "security extension present" and "not present"; however
177
+ * the security related changes significantly complicate the
178
+ * recomputation just by themselves and mixing both cases together
179
+ * would be even worse, so we retain a separate non-secure-only
180
+ * version for CPUs which don't implement the security extension.
181
+ */
182
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
183
+ nvic_recompute_state_secure(s);
184
+ return;
185
+ }
186
+
187
for (i = 1; i < s->num_irq; i++) {
188
VecInfo *vec = &s->vectors[i];
189
190
@@ -XXX,XX +XXX,XX @@ static void nvic_recompute_state(NVICState *s)
191
}
192
193
if (active_prio > 0) {
194
- active_prio &= nvic_gprio_mask(s);
195
+ active_prio &= nvic_gprio_mask(s, false);
196
}
197
198
if (pend_prio > 0) {
199
- pend_prio &= nvic_gprio_mask(s);
200
+ pend_prio &= nvic_gprio_mask(s, false);
201
}
202
203
s->vectpending = pend_irq;
204
@@ -XXX,XX +XXX,XX @@ static inline int nvic_exec_prio(NVICState *s)
205
} else if (env->v7m.primask[env->v7m.secure]) {
206
running = 0;
207
} else if (env->v7m.basepri[env->v7m.secure] > 0) {
208
- running = env->v7m.basepri[env->v7m.secure] & nvic_gprio_mask(s);
209
+ running = env->v7m.basepri[env->v7m.secure] &
210
+ nvic_gprio_mask(s, env->v7m.secure);
211
} else {
212
running = NVIC_NOEXC_PRIO; /* lower than any possible priority */
213
}
214
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
215
index XXXXXXX..XXXXXXX 100644
216
--- a/hw/intc/trace-events
217
+++ b/hw/intc/trace-events
218
@@ -XXX,XX +XXX,XX @@ gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending S
219
220
# hw/intc/armv7m_nvic.c
221
nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
222
+nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d"
223
nvic_set_prio(int irq, uint8_t prio) "NVIC set irq %d priority %d"
224
nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
225
nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
226
--
132
--
227
2.7.4
133
2.34.1
228
229
diff view generated by jsdifflib
1
Make the set_prio() function take a bool indicating
1
From: Richard Henderson <richard.henderson@linaro.org>
2
whether to pend the secure or non-secure version of a banked
3
interrupt, and use this to implement the correct banking
4
semantics for the SHPR registers.
5
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20230831232441.66020-5-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 1505240046-11454-11-git-send-email-peter.maydell@linaro.org
9
---
7
---
10
hw/intc/armv7m_nvic.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++-----
8
docs/system/arm/emulation.rst | 1 +
11
hw/intc/trace-events | 2 +-
9
target/arm/cpu.h | 5 +++++
12
2 files changed, 88 insertions(+), 10 deletions(-)
10
target/arm/helper.h | 1 +
11
target/arm/tcg/cpu64.c | 1 +
12
target/arm/tcg/op_helper.c | 20 ++++++++++++++++++++
13
target/arm/tcg/translate-a64.c | 5 +++++
14
target/arm/tcg/translate.c | 6 ++++++
15
7 files changed, 39 insertions(+)
13
16
14
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/armv7m_nvic.c
19
--- a/docs/system/arm/emulation.rst
17
+++ b/hw/intc/armv7m_nvic.c
20
+++ b/docs/system/arm/emulation.rst
18
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_raw_execution_priority(void *opaque)
21
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
19
return s->exception_prio;
22
- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions)
23
- FEAT_SPECRES (Speculation restriction instructions)
24
- FEAT_SSBS (Speculative Store Bypass Safe)
25
+- FEAT_TIDCP1 (EL0 use of IMPLEMENTATION DEFINED functionality)
26
- FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain)
27
- FEAT_TLBIRANGE (TLB invalidate range instructions)
28
- FEAT_TTCNP (Translation table Common not private translations)
29
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/cpu.h
32
+++ b/target/arm/cpu.h
33
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
34
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
20
}
35
}
21
36
22
-/* caller must call nvic_irq_update() after this */
37
+static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id)
23
-static void set_prio(NVICState *s, unsigned irq, uint8_t prio)
38
+{
24
+/* caller must call nvic_irq_update() after this.
39
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR1, TIDCP1) != 0;
25
+ * secure indicates the bank to use for banked exceptions (we assert if
26
+ * we are passed secure=true for a non-banked exception).
27
+ */
28
+static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
29
{
30
assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
31
assert(irq < s->num_irq);
32
33
- s->vectors[irq].prio = prio;
34
+ if (secure) {
35
+ assert(exc_is_banked(irq));
36
+ s->sec_vectors[irq].prio = prio;
37
+ } else {
38
+ s->vectors[irq].prio = prio;
39
+ }
40
+
41
+ trace_nvic_set_prio(irq, secure, prio);
42
+}
40
+}
43
+
41
+
44
+/* Return the current raw priority register value.
42
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
45
+ * secure indicates the bank to use for banked exceptions (we assert if
43
{
46
+ * we are passed secure=true for a non-banked exception).
44
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
47
+ */
45
diff --git a/target/arm/helper.h b/target/arm/helper.h
48
+static int get_prio(NVICState *s, unsigned irq, bool secure)
46
index XXXXXXX..XXXXXXX 100644
49
+{
47
--- a/target/arm/helper.h
50
+ assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
48
+++ b/target/arm/helper.h
51
+ assert(irq < s->num_irq);
49
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
52
50
53
- trace_nvic_set_prio(irq, prio);
51
DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
54
+ if (secure) {
52
DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
55
+ assert(exc_is_banked(irq));
53
+DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
56
+ return s->sec_vectors[irq].prio;
54
DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
57
+ } else {
55
DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
58
+ return s->vectors[irq].prio;
56
DEF_HELPER_2(get_cp_reg, i32, env, cptr)
59
+ }
57
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
60
}
58
index XXXXXXX..XXXXXXX 100644
61
59
--- a/target/arm/tcg/cpu64.c
62
/* Recompute state and assert irq line accordingly.
60
+++ b/target/arm/tcg/cpu64.c
63
@@ -XXX,XX +XXX,XX @@ static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
61
@@ -XXX,XX +XXX,XX @@ void aarch64_max_tcg_initfn(Object *obj)
62
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
63
t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */
64
t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
65
+ t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */
66
cpu->isar.id_aa64mmfr1 = t;
67
68
t = cpu->isar.id_aa64mmfr2;
69
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/arm/tcg/op_helper.c
72
+++ b/target/arm/tcg/op_helper.c
73
@@ -XXX,XX +XXX,XX @@ void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
64
}
74
}
65
}
75
}
66
76
67
+static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
77
+/*
78
+ * Similarly, for FEAT_TIDCP1 at EL0.
79
+ * We have already checked for the presence of the feature.
80
+ */
81
+void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
68
+{
82
+{
69
+ /* Behaviour for the SHPR register field for this exception:
83
+ /* See arm_sctlr(), but we also need the sctlr el. */
70
+ * return M_REG_NS to use the nonsecure vector (including for
84
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
71
+ * non-banked exceptions), M_REG_S for the secure version of
85
+ int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
72
+ * a banked exception, and -1 if this field should RAZ/WI.
86
+
87
+ /*
88
+ * The bit is not valid unless the target el is aa64, but since the
89
+ * bit test is simpler perform that first and check validity after.
73
+ */
90
+ */
74
+ switch (exc) {
91
+ if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
75
+ case ARMV7M_EXCP_MEM:
92
+ && arm_el_is_aa64(env, target_el)) {
76
+ case ARMV7M_EXCP_USAGE:
93
+ raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
77
+ case ARMV7M_EXCP_SVC:
78
+ case ARMV7M_EXCP_PENDSV:
79
+ case ARMV7M_EXCP_SYSTICK:
80
+ /* Banked exceptions */
81
+ return attrs.secure;
82
+ case ARMV7M_EXCP_BUS:
83
+ /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
84
+ if (!attrs.secure &&
85
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
86
+ return -1;
87
+ }
88
+ return M_REG_NS;
89
+ case ARMV7M_EXCP_SECURE:
90
+ /* Not banked, RAZ/WI from nonsecure */
91
+ if (!attrs.secure) {
92
+ return -1;
93
+ }
94
+ return M_REG_NS;
95
+ case ARMV7M_EXCP_DEBUG:
96
+ /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
97
+ return M_REG_NS;
98
+ case 8 ... 10:
99
+ case 13:
100
+ /* RES0 */
101
+ return -1;
102
+ default:
103
+ /* Not reachable due to decode of SHPR register addresses */
104
+ g_assert_not_reached();
105
+ }
94
+ }
106
+}
95
+}
107
+
96
+
108
static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
97
void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
109
uint64_t *data, unsigned size,
98
{
110
MemTxAttrs attrs)
99
const ARMCPRegInfo *ri = rip;
111
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
100
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
112
}
101
index XXXXXXX..XXXXXXX 100644
113
}
102
--- a/target/arm/tcg/translate-a64.c
114
break;
103
+++ b/target/arm/tcg/translate-a64.c
115
- case 0xd18 ... 0xd23: /* System Handler Priority. */
104
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, bool isread,
116
+ case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
105
*/
117
val = 0;
106
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
118
for (i = 0; i < size; i++) {
107
switch (s->current_el) {
119
- val |= s->vectors[(offset - 0xd14) + i].prio << (i * 8);
108
+ case 0:
120
+ unsigned hdlidx = (offset - 0xd14) + i;
109
+ if (dc_isar_feature(aa64_tidcp1, s)) {
121
+ int sbank = shpr_bank(s, hdlidx, attrs);
110
+ gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
122
+
123
+ if (sbank < 0) {
124
+ continue;
125
+ }
111
+ }
126
+ val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
112
+ break;
127
}
113
case 1:
128
break;
114
gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
129
case 0xfe0 ... 0xfff: /* ID. */
115
break;
130
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
116
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
131
117
index XXXXXXX..XXXXXXX 100644
132
for (i = 0; i < size && startvec + i < s->num_irq; i++) {
118
--- a/target/arm/tcg/translate.c
133
if (attrs.secure || s->itns[startvec + i]) {
119
+++ b/target/arm/tcg/translate.c
134
- set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
120
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
135
+ set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
121
* but raises the same exception, so order doesn't matter.
136
}
122
*/
137
}
123
switch (s->current_el) {
138
nvic_irq_update(s);
124
+ case 0:
139
return MEMTX_OK;
125
+ if (arm_dc_feature(s, ARM_FEATURE_AARCH64)
140
- case 0xd18 ... 0xd23: /* System Handler Priority. */
126
+ && dc_isar_feature(aa64_tidcp1, s)) {
141
+ case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
127
+ gen_helper_tidcp_el0(cpu_env, tcg_constant_i32(syndrome));
142
for (i = 0; i < size; i++) {
143
unsigned hdlidx = (offset - 0xd14) + i;
144
- set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
145
+ int newprio = extract32(value, i * 8, 8);
146
+ int sbank = shpr_bank(s, hdlidx, attrs);
147
+
148
+ if (sbank < 0) {
149
+ continue;
150
+ }
128
+ }
151
+ set_prio(s, hdlidx, sbank, newprio);
129
+ break;
152
}
130
case 1:
153
nvic_irq_update(s);
131
gen_helper_tidcp_el1(cpu_env, tcg_constant_i32(syndrome));
154
return MEMTX_OK;
132
break;
155
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
156
index XXXXXXX..XXXXXXX 100644
157
--- a/hw/intc/trace-events
158
+++ b/hw/intc/trace-events
159
@@ -XXX,XX +XXX,XX @@ gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending S
160
# hw/intc/armv7m_nvic.c
161
nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
162
nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d"
163
-nvic_set_prio(int irq, uint8_t prio) "NVIC set irq %d priority %d"
164
+nvic_set_prio(int irq, bool secure, uint8_t prio) "NVIC set irq %d secure-bank %d priority %d"
165
nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
166
nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
167
nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled"
168
--
133
--
169
2.7.4
134
2.34.1
170
171
diff view generated by jsdifflib
1
The Application Interrupt and Reset Control Register has some changes
1
From: Richard Henderson <richard.henderson@linaro.org>
2
for v8M:
3
* new bits SYSRESETREQS, BFHFNMINS and PRIS: these all have
4
real state if the security extension is implemented and otherwise
5
are constant
6
* the PRIGROUP field is banked between security states
7
* non-secure code can be blocked from using the SYSRESET bit
8
to reset the system if SYSRESETREQS is set
9
2
10
Implement the new state and the changes to register read and write.
3
The linux kernel detects and enables this bit. Once trapped,
11
For the moment we ignore the effects of the secure PRIGROUP.
4
EC_SYSTEMREGISTERTRAP is treated like EC_UNCATEGORIZED, so
12
We will implement the effects of PRIS and BFHFNMIS later.
5
no changes required within linux-user/aarch64/cpu_loop.c.
13
6
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230831232441.66020-6-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 1505240046-11454-6-git-send-email-peter.maydell@linaro.org
17
---
11
---
18
include/hw/intc/armv7m_nvic.h | 3 ++-
12
target/arm/cpu.c | 4 ++++
19
target/arm/cpu.h | 12 +++++++++++
13
1 file changed, 4 insertions(+)
20
hw/intc/armv7m_nvic.c | 49 +++++++++++++++++++++++++++++++++----------
21
target/arm/cpu.c | 7 +++++++
22
4 files changed, 59 insertions(+), 12 deletions(-)
23
14
24
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/hw/intc/armv7m_nvic.h
27
+++ b/include/hw/intc/armv7m_nvic.h
28
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
29
* Entries in sec_vectors[] for non-banked exception numbers are unused.
30
*/
31
VecInfo sec_vectors[NVIC_INTERNAL_VECTORS];
32
- uint32_t prigroup;
33
+ /* The PRIGROUP field in AIRCR is banked */
34
+ uint32_t prigroup[M_REG_NUM_BANKS];
35
36
/* The following fields are all cached state that can be recalculated
37
* from the vectors[] and sec_vectors[] arrays and the prigroup field:
38
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/cpu.h
41
+++ b/target/arm/cpu.h
42
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
43
int exception;
44
uint32_t primask[M_REG_NUM_BANKS];
45
uint32_t faultmask[M_REG_NUM_BANKS];
46
+ uint32_t aircr; /* only holds r/w state if security extn implemented */
47
uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
48
} v7m;
49
50
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_CCR, STKALIGN, 9, 1)
51
FIELD(V7M_CCR, DC, 16, 1)
52
FIELD(V7M_CCR, IC, 17, 1)
53
54
+/* V7M AIRCR bits */
55
+FIELD(V7M_AIRCR, VECTRESET, 0, 1)
56
+FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
57
+FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
58
+FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
59
+FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
60
+FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
61
+FIELD(V7M_AIRCR, PRIS, 14, 1)
62
+FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
63
+FIELD(V7M_AIRCR, VECTKEY, 16, 16)
64
+
65
/* V7M CFSR bits for MMFSR */
66
FIELD(V7M_CFSR, IACCVIOL, 0, 1)
67
FIELD(V7M_CFSR, DACCVIOL, 1, 1)
68
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/hw/intc/armv7m_nvic.c
71
+++ b/hw/intc/armv7m_nvic.c
72
@@ -XXX,XX +XXX,XX @@ static bool nvic_isrpending(NVICState *s)
73
*/
74
static inline uint32_t nvic_gprio_mask(NVICState *s)
75
{
76
- return ~0U << (s->prigroup + 1);
77
+ return ~0U << (s->prigroup[M_REG_NS] + 1);
78
}
79
80
/* Recompute vectpending and exception_prio */
81
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
82
return val;
83
case 0xd08: /* Vector Table Offset. */
84
return cpu->env.v7m.vecbase[attrs.secure];
85
- case 0xd0c: /* Application Interrupt/Reset Control. */
86
- return 0xfa050000 | (s->prigroup << 8);
87
+ case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
88
+ val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
89
+ if (attrs.secure) {
90
+ /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
91
+ val |= cpu->env.v7m.aircr;
92
+ } else {
93
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
94
+ /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
95
+ * security isn't supported then BFHFNMINS is RAO (and
96
+ * the bit in env.v7m.aircr is always set).
97
+ */
98
+ val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
99
+ }
100
+ }
101
+ return val;
102
case 0xd10: /* System Control. */
103
/* TODO: Implement SLEEPONEXIT. */
104
return 0;
105
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
106
case 0xd08: /* Vector Table Offset. */
107
cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
108
break;
109
- case 0xd0c: /* Application Interrupt/Reset Control. */
110
- if ((value >> 16) == 0x05fa) {
111
- if (value & 4) {
112
- qemu_irq_pulse(s->sysresetreq);
113
+ case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
114
+ if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
115
+ if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
116
+ if (attrs.secure ||
117
+ !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
118
+ qemu_irq_pulse(s->sysresetreq);
119
+ }
120
}
121
- if (value & 2) {
122
+ if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
123
qemu_log_mask(LOG_GUEST_ERROR,
124
"Setting VECTCLRACTIVE when not in DEBUG mode "
125
"is UNPREDICTABLE\n");
126
}
127
- if (value & 1) {
128
+ if (value & R_V7M_AIRCR_VECTRESET_MASK) {
129
+ /* NB: this bit is RES0 in v8M */
130
qemu_log_mask(LOG_GUEST_ERROR,
131
"Setting VECTRESET when not in DEBUG mode "
132
"is UNPREDICTABLE\n");
133
}
134
- s->prigroup = extract32(value, 8, 3);
135
+ s->prigroup[attrs.secure] = extract32(value,
136
+ R_V7M_AIRCR_PRIGROUP_SHIFT,
137
+ R_V7M_AIRCR_PRIGROUP_LENGTH);
138
+ if (attrs.secure) {
139
+ /* These bits are only writable by secure */
140
+ cpu->env.v7m.aircr = value &
141
+ (R_V7M_AIRCR_SYSRESETREQS_MASK |
142
+ R_V7M_AIRCR_BFHFNMINS_MASK |
143
+ R_V7M_AIRCR_PRIS_MASK);
144
+ }
145
nvic_irq_update(s);
146
}
147
break;
148
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_nvic_security = {
149
.fields = (VMStateField[]) {
150
VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
151
vmstate_VecInfo, VecInfo),
152
+ VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
153
VMSTATE_END_OF_LIST()
154
}
155
};
156
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_nvic = {
157
.fields = (VMStateField[]) {
158
VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
159
vmstate_VecInfo, VecInfo),
160
- VMSTATE_UINT32(prigroup, NVICState),
161
+ VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
162
VMSTATE_END_OF_LIST()
163
},
164
.subsections = (const VMStateDescription*[]) {
165
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
166
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
167
--- a/target/arm/cpu.c
17
--- a/target/arm/cpu.c
168
+++ b/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
169
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset_hold(Object *obj)
170
20
SCTLR_EnDA | SCTLR_EnDB);
171
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
21
/* Trap on btype=3 for PACIxSP. */
172
env->v7m.secure = true;
22
env->cp15.sctlr_el[1] |= SCTLR_BT0;
173
+ } else {
23
+ /* Trap on implementation defined registers. */
174
+ /* This bit resets to 0 if security is supported, but 1 if
24
+ if (cpu_isar_feature(aa64_tidcp1, cpu)) {
175
+ * it is not. The bit is not present in v7M, but we set it
25
+ env->cp15.sctlr_el[1] |= SCTLR_TIDCP;
176
+ * here so we can avoid having to make checks on it conditional
26
+ }
177
+ * on ARM_FEATURE_V8 (we don't let the guest see the bit).
27
/* and to the FP/Neon instructions */
178
+ */
28
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
179
+ env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
29
CPACR_EL1, FPEN, 3);
180
}
181
182
/* In v7M the reset value of this bit is IMPDEF, but ARM recommends
183
--
30
--
184
2.7.4
31
2.34.1
185
186
diff view generated by jsdifflib
1
Instead of looking up the pending priority
1
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
2
in nvic_pending_prio(), cache it in a new state struct
3
field. The calculation of the pending priority given
4
the interrupt number is more complicated in v8M with
5
the security extension, so the caching will be worthwhile.
6
2
7
This changes nvic_pending_prio() from returning a full
3
Now that we have Eager Page Split support added for ARM in the kernel,
8
(group + subpriority) priority value to returning a group
4
enable it in Qemu. This adds,
9
priority. This doesn't require changes to its callsites
5
-eager-split-size to -accel sub-options to set the eager page split chunk size.
10
because we use it only in comparisons of the form
6
-enable KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE.
11
execution_prio > nvic_pending_prio()
12
and execution priority is always a group priority, so
13
a test (exec prio > full prio) is true if and only if
14
(execprio > group_prio).
15
7
16
(Architecturally the expected comparison is with the
8
The chunk size specifies how many pages to break at a time, using a
17
group priority for this sort of "would we preempt" test;
9
single allocation. Bigger the chunk size, more pages need to be
18
we were only doing a test with a full priority as an
10
allocated ahead of time.
19
optimisation to avoid the mask, which is possible
20
precisely because the two comparisons always give the
21
same answer.)
22
11
12
Reviewed-by: Gavin Shan <gshan@redhat.com>
13
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
14
Message-id: 20230905091246.1931-1-shameerali.kolothum.thodi@huawei.com
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 1505240046-11454-5-git-send-email-peter.maydell@linaro.org
26
---
16
---
27
include/hw/intc/armv7m_nvic.h | 2 ++
17
include/sysemu/kvm_int.h | 1 +
28
hw/intc/armv7m_nvic.c | 23 +++++++++++++----------
18
accel/kvm/kvm-all.c | 1 +
29
hw/intc/trace-events | 2 +-
19
target/arm/kvm.c | 61 ++++++++++++++++++++++++++++++++++++++++
30
3 files changed, 16 insertions(+), 11 deletions(-)
20
qemu-options.hx | 15 ++++++++++
21
4 files changed, 78 insertions(+)
31
22
32
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
23
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
33
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
34
--- a/include/hw/intc/armv7m_nvic.h
25
--- a/include/sysemu/kvm_int.h
35
+++ b/include/hw/intc/armv7m_nvic.h
26
+++ b/include/sysemu/kvm_int.h
36
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
27
@@ -XXX,XX +XXX,XX @@ struct KVMState
37
* - vectpending
28
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
38
* - vectpending_is_secure
29
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
39
* - exception_prio
30
bool kvm_dirty_ring_with_bitmap;
40
+ * - vectpending_prio
31
+ uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
41
*/
32
struct KVMDirtyRingReaper reaper;
42
unsigned int vectpending; /* highest prio pending enabled exception */
33
NotifyVmexitOption notify_vmexit;
43
/* true if vectpending is a banked secure exception, ie it is in
34
uint32_t notify_window;
44
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
35
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
45
*/
46
bool vectpending_is_s_banked;
47
int exception_prio; /* group prio of the highest prio active exception */
48
+ int vectpending_prio; /* group prio of the exeception in vectpending */
49
50
MemoryRegion sysregmem;
51
MemoryRegion sysreg_ns_mem;
52
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
53
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/intc/armv7m_nvic.c
37
--- a/accel/kvm/kvm-all.c
55
+++ b/hw/intc/armv7m_nvic.c
38
+++ b/accel/kvm/kvm-all.c
56
@@ -XXX,XX +XXX,XX @@ static const uint8_t nvic_id[] = {
39
@@ -XXX,XX +XXX,XX @@ static void kvm_accel_instance_init(Object *obj)
57
40
/* KVM dirty ring is by default off */
58
static int nvic_pending_prio(NVICState *s)
41
s->kvm_dirty_ring_size = 0;
59
{
42
s->kvm_dirty_ring_with_bitmap = false;
60
- /* return the priority of the current pending interrupt,
43
+ s->kvm_eager_split_size = 0;
61
+ /* return the group priority of the current pending interrupt,
44
s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN;
62
* or NVIC_NOEXC_PRIO if no interrupt is pending
45
s->notify_window = 0;
63
*/
46
s->xen_version = 0;
64
- return s->vectpending ? s->vectors[s->vectpending].prio : NVIC_NOEXC_PRIO;
47
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
65
+ return s->vectpending_prio;
48
index XXXXXXX..XXXXXXX 100644
66
}
49
--- a/target/arm/kvm.c
67
50
+++ b/target/arm/kvm.c
68
/* Return the value of the ISCR RETTOBASE bit:
51
@@ -XXX,XX +XXX,XX @@
69
@@ -XXX,XX +XXX,XX @@ static void nvic_recompute_state(NVICState *s)
52
#include "exec/address-spaces.h"
70
active_prio &= nvic_gprio_mask(s);
53
#include "hw/boards.h"
54
#include "hw/irq.h"
55
+#include "qapi/visitor.h"
56
#include "qemu/log.h"
57
58
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
59
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init(MachineState *ms, KVMState *s)
60
}
71
}
61
}
72
62
73
+ if (pend_prio > 0) {
63
+ if (s->kvm_eager_split_size) {
74
+ pend_prio &= nvic_gprio_mask(s);
64
+ uint32_t sizes;
65
+
66
+ sizes = kvm_vm_check_extension(s, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES);
67
+ if (!sizes) {
68
+ s->kvm_eager_split_size = 0;
69
+ warn_report("Eager Page Split support not available");
70
+ } else if (!(s->kvm_eager_split_size & sizes)) {
71
+ error_report("Eager Page Split requested chunk size not valid");
72
+ ret = -EINVAL;
73
+ } else {
74
+ ret = kvm_vm_enable_cap(s, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE, 0,
75
+ s->kvm_eager_split_size);
76
+ if (ret < 0) {
77
+ error_report("Enabling of Eager Page Split failed: %s",
78
+ strerror(-ret));
79
+ }
80
+ }
75
+ }
81
+ }
76
+
82
+
77
s->vectpending = pend_irq;
83
kvm_arm_init_debug(s);
78
+ s->vectpending_prio = pend_prio;
84
79
s->exception_prio = active_prio;
85
return ret;
80
86
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_cpu_check_are_resettable(void)
81
- trace_nvic_recompute_state(s->vectpending, s->exception_prio);
87
return true;
82
+ trace_nvic_recompute_state(s->vectpending,
83
+ s->vectpending_prio,
84
+ s->exception_prio);
85
}
88
}
86
89
87
/* Return the current execution priority of the CPU
90
+static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v,
88
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_acknowledge_irq(void *opaque)
91
+ const char *name, void *opaque,
89
CPUARMState *env = &s->cpu->env;
92
+ Error **errp)
90
const int pending = s->vectpending;
93
+{
91
const int running = nvic_exec_prio(s);
94
+ KVMState *s = KVM_STATE(obj);
92
- int pendgroupprio;
95
+ uint64_t value = s->kvm_eager_split_size;
93
VecInfo *vec;
96
+
94
97
+ visit_type_size(v, name, &value, errp);
95
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
98
+}
96
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_acknowledge_irq(void *opaque)
99
+
97
assert(vec->enabled);
100
+static void kvm_arch_set_eager_split_size(Object *obj, Visitor *v,
98
assert(vec->pending);
101
+ const char *name, void *opaque,
99
102
+ Error **errp)
100
- pendgroupprio = vec->prio;
103
+{
101
- if (pendgroupprio > 0) {
104
+ KVMState *s = KVM_STATE(obj);
102
- pendgroupprio &= nvic_gprio_mask(s);
105
+ uint64_t value;
103
- }
106
+
104
- assert(pendgroupprio < running);
107
+ if (s->fd != -1) {
105
+ assert(s->vectpending_prio < running);
108
+ error_setg(errp, "Unable to set early-split-size after KVM has been initialized");
106
109
+ return;
107
- trace_nvic_acknowledge_irq(pending, vec->prio);
110
+ }
108
+ trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
111
+
109
112
+ if (!visit_type_size(v, name, &value, errp)) {
110
vec->active = 1;
113
+ return;
111
vec->pending = 0;
114
+ }
112
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
115
+
113
s->exception_prio = NVIC_NOEXC_PRIO;
116
+ if (value && !is_power_of_2(value)) {
114
s->vectpending = 0;
117
+ error_setg(errp, "early-split-size must be a power of two");
115
s->vectpending_is_s_banked = false;
118
+ return;
116
+ s->vectpending_prio = NVIC_NOEXC_PRIO;
119
+ }
120
+
121
+ s->kvm_eager_split_size = value;
122
+}
123
+
124
void kvm_arch_accel_class_init(ObjectClass *oc)
125
{
126
+ object_class_property_add(oc, "eager-split-size", "size",
127
+ kvm_arch_get_eager_split_size,
128
+ kvm_arch_set_eager_split_size, NULL, NULL);
129
+
130
+ object_class_property_set_description(oc, "eager-split-size",
131
+ "Eager Page Split chunk size for hugepages. (default: 0, disabled)");
117
}
132
}
118
133
diff --git a/qemu-options.hx b/qemu-options.hx
119
static void nvic_systick_trigger(void *opaque, int n, int level)
120
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
121
index XXXXXXX..XXXXXXX 100644
134
index XXXXXXX..XXXXXXX 100644
122
--- a/hw/intc/trace-events
135
--- a/qemu-options.hx
123
+++ b/hw/intc/trace-events
136
+++ b/qemu-options.hx
124
@@ -XXX,XX +XXX,XX @@ gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x
137
@@ -XXX,XX +XXX,XX @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
125
gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d"
138
" split-wx=on|off (enable TCG split w^x mapping)\n"
126
139
" tb-size=n (TCG translation block cache size)\n"
127
# hw/intc/armv7m_nvic.c
140
" dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n"
128
-nvic_recompute_state(int vectpending, int exception_prio) "NVIC state recomputed: vectpending %d exception_prio %d"
141
+ " eager-split-size=n (KVM Eager Page Split chunk size, default 0, disabled. ARM only)\n"
129
+nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
142
" notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n"
130
nvic_set_prio(int irq, uint8_t prio) "NVIC set irq %d priority %d"
143
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
131
nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d"
144
SRST
132
nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d"
145
@@ -XXX,XX +XXX,XX @@ SRST
146
is disabled (dirty-ring-size=0). When enabled, KVM will instead
147
record dirty pages in a bitmap.
148
149
+ ``eager-split-size=n``
150
+ KVM implements dirty page logging at the PAGE_SIZE granularity and
151
+ enabling dirty-logging on a huge-page requires breaking it into
152
+ PAGE_SIZE pages in the first place. KVM on ARM does this splitting
153
+ lazily by default. There are performance benefits in doing huge-page
154
+ split eagerly, especially in situations where TLBI costs associated
155
+ with break-before-make sequences are considerable and also if guest
156
+ workloads are read intensive. The size here specifies how many pages
157
+ to break at a time and needs to be a valid block size which is
158
+ 1GB/2MB/4KB, 32MB/16KB and 512MB/64KB for 4KB/16KB/64KB PAGE_SIZE
159
+ respectively. Be wary of specifying a higher size as it will have an
160
+ impact on the memory. By default, this feature is disabled
161
+ (eager-split-size=0).
162
+
163
``notify-vmexit=run|internal-error|disable,notify-window=n``
164
Enables or disables notify VM exit support on x86 host and specify
165
the corresponding notify window to trigger the VM exit if enabled.
133
--
166
--
134
2.7.4
167
2.34.1
135
136
diff view generated by jsdifflib
Deleted patch
1
Update the code in nvic_rettobase() so that it checks the
2
sec_vectors[] array as well as the vectors[] array if needed.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 1505240046-11454-7-git-send-email-peter.maydell@linaro.org
7
---
8
hw/intc/armv7m_nvic.c | 5 ++++-
9
1 file changed, 4 insertions(+), 1 deletion(-)
10
11
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/intc/armv7m_nvic.c
14
+++ b/hw/intc/armv7m_nvic.c
15
@@ -XXX,XX +XXX,XX @@ static int nvic_pending_prio(NVICState *s)
16
static bool nvic_rettobase(NVICState *s)
17
{
18
int irq, nhand = 0;
19
+ bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
20
21
for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
22
- if (s->vectors[irq].active) {
23
+ if (s->vectors[irq].active ||
24
+ (check_sec && irq < NVIC_INTERNAL_VECTORS &&
25
+ s->sec_vectors[irq].active)) {
26
nhand++;
27
if (nhand == 2) {
28
return 0;
29
--
30
2.7.4
31
32
diff view generated by jsdifflib
Deleted patch
1
For v8M, the NVIC has a new set of registers per interrupt,
2
NVIC_ITNS<n>. These determine whether the interrupt targets Secure
3
or Non-secure state. Implement the register read/write code for
4
these, and make them cause NVIC_IABR, NVIC_ICER, NVIC_ISER,
5
NVIC_ICPR, NVIC_IPR and NVIC_ISPR to RAZ/WI for non-secure
6
accesses to fields corresponding to interrupts which are
7
configured to target secure state.
8
1
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 1505240046-11454-8-git-send-email-peter.maydell@linaro.org
12
---
13
include/hw/intc/armv7m_nvic.h | 3 ++
14
hw/intc/armv7m_nvic.c | 74 +++++++++++++++++++++++++++++++++++++++----
15
2 files changed, 70 insertions(+), 7 deletions(-)
16
17
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/intc/armv7m_nvic.h
20
+++ b/include/hw/intc/armv7m_nvic.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct NVICState {
22
/* The PRIGROUP field in AIRCR is banked */
23
uint32_t prigroup[M_REG_NUM_BANKS];
24
25
+ /* v8M NVIC_ITNS state (stored as a bool per bit) */
26
+ bool itns[NVIC_MAX_VECTORS];
27
+
28
/* The following fields are all cached state that can be recalculated
29
* from the vectors[] and sec_vectors[] arrays and the prigroup field:
30
* - vectpending
31
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/intc/armv7m_nvic.c
34
+++ b/hw/intc/armv7m_nvic.c
35
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
36
switch (offset) {
37
case 4: /* Interrupt Control Type. */
38
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
39
+ case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
40
+ {
41
+ int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
42
+ int i;
43
+
44
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
45
+ goto bad_offset;
46
+ }
47
+ if (!attrs.secure) {
48
+ return 0;
49
+ }
50
+ val = 0;
51
+ for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
52
+ if (s->itns[startvec + i]) {
53
+ val |= (1 << i);
54
+ }
55
+ }
56
+ return val;
57
+ }
58
case 0xd00: /* CPUID Base. */
59
return cpu->midr;
60
case 0xd04: /* Interrupt Control State. */
61
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
62
ARMCPU *cpu = s->cpu;
63
64
switch (offset) {
65
+ case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
66
+ {
67
+ int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
68
+ int i;
69
+
70
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
71
+ goto bad_offset;
72
+ }
73
+ if (!attrs.secure) {
74
+ break;
75
+ }
76
+ for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
77
+ s->itns[startvec + i] = (value >> i) & 1;
78
+ }
79
+ nvic_irq_update(s);
80
+ break;
81
+ }
82
case 0xd04: /* Interrupt Control State. */
83
if (value & (1 << 31)) {
84
armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI);
85
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
86
startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
87
88
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
89
- if (s->vectors[startvec + i].enabled) {
90
+ if (s->vectors[startvec + i].enabled &&
91
+ (attrs.secure || s->itns[startvec + i])) {
92
val |= (1 << i);
93
}
94
}
95
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
96
val = 0;
97
startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
98
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
99
- if (s->vectors[startvec + i].pending) {
100
+ if (s->vectors[startvec + i].pending &&
101
+ (attrs.secure || s->itns[startvec + i])) {
102
val |= (1 << i);
103
}
104
}
105
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
106
startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
107
108
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
109
- if (s->vectors[startvec + i].active) {
110
+ if (s->vectors[startvec + i].active &&
111
+ (attrs.secure || s->itns[startvec + i])) {
112
val |= (1 << i);
113
}
114
}
115
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
116
startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
117
118
for (i = 0; i < size && startvec + i < s->num_irq; i++) {
119
- val |= s->vectors[startvec + i].prio << (8 * i);
120
+ if (attrs.secure || s->itns[startvec + i]) {
121
+ val |= s->vectors[startvec + i].prio << (8 * i);
122
+ }
123
}
124
break;
125
case 0xd18 ... 0xd23: /* System Handler Priority. */
126
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
127
startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
128
129
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
130
- if (value & (1 << i)) {
131
+ if (value & (1 << i) &&
132
+ (attrs.secure || s->itns[startvec + i])) {
133
s->vectors[startvec + i].enabled = setval;
134
}
135
}
136
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
137
startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
138
139
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
140
- if (value & (1 << i)) {
141
+ if (value & (1 << i) &&
142
+ (attrs.secure || s->itns[startvec + i])) {
143
s->vectors[startvec + i].pending = setval;
144
}
145
}
146
@@ -XXX,XX +XXX,XX @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
147
startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
148
149
for (i = 0; i < size && startvec + i < s->num_irq; i++) {
150
- set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
151
+ if (attrs.secure || s->itns[startvec + i]) {
152
+ set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
153
+ }
154
}
155
nvic_irq_update(s);
156
return MEMTX_OK;
157
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_nvic_security = {
158
VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
159
vmstate_VecInfo, VecInfo),
160
VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
161
+ VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
162
VMSTATE_END_OF_LIST()
163
}
164
};
165
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev)
166
s->vectpending = 0;
167
s->vectpending_is_s_banked = false;
168
s->vectpending_prio = NVIC_NOEXC_PRIO;
169
+
170
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
171
+ memset(s->itns, 0, sizeof(s->itns));
172
+ } else {
173
+ /* This state is constant and not guest accessible in a non-security
174
+ * NVIC; we set the bits to true to avoid having to do a feature
175
+ * bit check in the NVIC enable/pend/etc register accessors.
176
+ */
177
+ int i;
178
+
179
+ for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
180
+ s->itns[i] = true;
181
+ }
182
+ }
183
}
184
185
static void nvic_systick_trigger(void *opaque, int n, int level)
186
--
187
2.7.4
188
189
diff view generated by jsdifflib
Deleted patch
1
In armv7m_nvic_set_pending() we have to compare the
2
priority of an exception against the execution priority
3
to decide whether it needs to be escalated to HardFault.
4
In the specification this is a comparison against the
5
exception's group priority; for v7M we implemented it
6
as a comparison against the raw exception priority
7
because the two comparisons will always give the
8
same answer. For v8M the existence of AIRCR.PRIS and
9
the possibility of different PRIGROUP values for secure
10
and nonsecure exceptions means we need to explicitly
11
calculate the vector's group priority for this check.
12
1
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 1505240046-11454-12-git-send-email-peter.maydell@linaro.org
16
---
17
hw/intc/armv7m_nvic.c | 2 +-
18
1 file changed, 1 insertion(+), 1 deletion(-)
19
20
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/armv7m_nvic.c
23
+++ b/hw/intc/armv7m_nvic.c
24
@@ -XXX,XX +XXX,XX @@ void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
25
int running = nvic_exec_prio(s);
26
bool escalate = false;
27
28
- if (vec->prio >= running) {
29
+ if (exc_group_prio(s, vec->prio, secure) >= running) {
30
trace_nvic_escalate_prio(irq, vec->prio, running);
31
escalate = true;
32
} else if (!vec->enabled) {
33
--
34
2.7.4
35
36
diff view generated by jsdifflib
Deleted patch
1
The ICSR NVIC register is banked for v8M. This doesn't
2
require any new state, but it does mean that some bits
3
are controlled by BFHNFNMINS and some bits must work
4
with the correct banked exception. There is also a new
5
in v8M PENDNMICLR bit.
6
1
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 1505240046-11454-18-git-send-email-peter.maydell@linaro.org
10
---
11
hw/intc/armv7m_nvic.c | 45 ++++++++++++++++++++++++++++++++-------------
12
1 file changed, 32 insertions(+), 13 deletions(-)
13
14
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/intc/armv7m_nvic.c
17
+++ b/hw/intc/armv7m_nvic.c
18
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
19
}
20
case 0xd00: /* CPUID Base. */
21
return cpu->midr;
22
- case 0xd04: /* Interrupt Control State. */
23
+ case 0xd04: /* Interrupt Control State (ICSR) */
24
/* VECTACTIVE */
25
val = cpu->env.v7m.exception;
26
/* VECTPENDING */
27
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
28
if (nvic_rettobase(s)) {
29
val |= (1 << 11);
30
}
31
- /* PENDSTSET */
32
- if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
33
- val |= (1 << 26);
34
- }
35
- /* PENDSVSET */
36
- if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
37
- val |= (1 << 28);
38
+ if (attrs.secure) {
39
+ /* PENDSTSET */
40
+ if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
41
+ val |= (1 << 26);
42
+ }
43
+ /* PENDSVSET */
44
+ if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
45
+ val |= (1 << 28);
46
+ }
47
+ } else {
48
+ /* PENDSTSET */
49
+ if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
50
+ val |= (1 << 26);
51
+ }
52
+ /* PENDSVSET */
53
+ if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
54
+ val |= (1 << 28);
55
+ }
56
}
57
/* NMIPENDSET */
58
- if (s->vectors[ARMV7M_EXCP_NMI].pending) {
59
+ if ((cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
60
+ s->vectors[ARMV7M_EXCP_NMI].pending) {
61
val |= (1 << 31);
62
}
63
- /* ISRPREEMPT not implemented */
64
+ /* ISRPREEMPT: RES0 when halting debug not implemented */
65
+ /* STTNS: RES0 for the Main Extension */
66
return val;
67
case 0xd08: /* Vector Table Offset. */
68
return cpu->env.v7m.vecbase[attrs.secure];
69
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
70
nvic_irq_update(s);
71
break;
72
}
73
- case 0xd04: /* Interrupt Control State. */
74
- if (value & (1 << 31)) {
75
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
76
+ case 0xd04: /* Interrupt Control State (ICSR) */
77
+ if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
78
+ if (value & (1 << 31)) {
79
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
80
+ } else if (value & (1 << 30) &&
81
+ arm_feature(&cpu->env, ARM_FEATURE_V8)) {
82
+ /* PENDNMICLR didn't exist in v7M */
83
+ armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
84
+ }
85
}
86
if (value & (1 << 28)) {
87
armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
88
--
89
2.7.4
90
91
diff view generated by jsdifflib