1
First pullreq for 6.0: mostly my v8.1M work, plus some other
1
arm queue: big stuff here is my MVE codegen optimisation,
2
bits and pieces. (I still have a lot of stuff in my to-review
2
and Alex's Apple Silicon hvf support.
3
folder, which I may or may not get to before the Christmas break...)
4
3
5
thanks
6
-- PMM
4
-- PMM
7
5
8
The following changes since commit 5e7b204dbfae9a562fc73684986f936b97f63877:
6
The following changes since commit 7adb961995a3744f51396502b33ad04a56a317c3:
9
7
10
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2020-12-09 20:08:54 +0000)
8
Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-virtiofs-20210916' into staging (2021-09-19 18:53:29 +0100)
11
9
12
are available in the Git repository at:
10
are available in the Git repository at:
13
11
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201210
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210920
15
13
16
for you to fetch changes up to 71f916be1c7e9ede0e37d9cabc781b5a9e8638ff:
14
for you to fetch changes up to 1dc5a60bfe406bc1122d68cbdefda38d23134b27:
17
15
18
hw/arm/armv7m: Correct typo in QOM object name (2020-12-10 11:44:56 +0000)
16
target/arm: Optimize MVE 1op-immediate insns (2021-09-20 14:18:01 +0100)
19
17
20
----------------------------------------------------------------
18
----------------------------------------------------------------
21
target-arm queue:
19
target-arm queue:
22
* hw/arm/smmuv3: Fix up L1STD_SPAN decoding
20
* Optimize codegen for MVE when predication not active
23
* xlnx-zynqmp: Support Xilinx ZynqMP CAN controllers
21
* hvf: Add Apple Silicon support
24
* sbsa-ref: allow to use Cortex-A53/57/72 cpus
22
* hw/intc: Set GIC maintenance interrupt level to only 0 or 1
25
* Various minor code cleanups
23
* Fix mishandling of MVE FPSCR.LTPSIZE reset for usermode emulator
26
* hw/intc/armv7m_nvic: Make all of system PPB range be RAZWI/BusFault
24
* elf2dmp: Fix coverity nits
27
* Implement more pieces of ARMv8.1M support
28
25
29
----------------------------------------------------------------
26
----------------------------------------------------------------
30
Alex Chen (4):
27
Alexander Graf (7):
31
i.MX25: Fix bad printf format specifiers
28
arm: Move PMC register definitions to internals.h
32
i.MX31: Fix bad printf format specifiers
29
hvf: Add execute to dirty log permission bitmap
33
i.MX6: Fix bad printf format specifiers
30
hvf: Introduce hvf_arch_init() callback
34
i.MX6ul: Fix bad printf format specifiers
31
hvf: Add Apple Silicon support
32
hvf: arm: Implement PSCI handling
33
arm: Add Hypervisor.framework build target
34
hvf: arm: Add rudimentary PMC support
35
35
36
Havard Skinnemoen (1):
36
Peter Collingbourne (1):
37
tests/qtest/npcm7xx_rng-test: dump random data on failure
37
arm/hvf: Add a WFI handler
38
38
39
Kunkun Jiang (1):
39
Peter Maydell (18):
40
hw/arm/smmuv3: Fix up L1STD_SPAN decoding
40
elf2dmp: Check curl_easy_setopt() return value
41
elf2dmp: Fail cleanly if PDB file specifies zero block_size
42
target/arm: Don't skip M-profile reset entirely in user mode
43
target/arm: Always clear exclusive monitor on reset
44
target/arm: Consolidate ifdef blocks in reset
45
hvf: arm: Implement -cpu host
46
target/arm: Avoid goto_tb if we're trying to exit to the main loop
47
target/arm: Enforce that FPDSCR.LTPSIZE is 4 on inbound migration
48
target/arm: Add TB flag for "MVE insns not predicated"
49
target/arm: Optimize MVE logic ops
50
target/arm: Optimize MVE arithmetic ops
51
target/arm: Optimize MVE VNEG, VABS
52
target/arm: Optimize MVE VDUP
53
target/arm: Optimize MVE VMVN
54
target/arm: Optimize MVE VSHL, VSHR immediate forms
55
target/arm: Optimize MVE VSHLL and VMOVL
56
target/arm: Optimize MVE VSLI and VSRI
57
target/arm: Optimize MVE 1op-immediate insns
41
58
42
Marcin Juszkiewicz (1):
59
Shashi Mallela (1):
43
sbsa-ref: allow to use Cortex-A53/57/72 cpus
60
hw/intc: Set GIC maintenance interrupt level to only 0 or 1
44
61
45
Peter Maydell (25):
62
meson.build | 8 +
46
hw/intc/armv7m_nvic: Make all of system PPB range be RAZWI/BusFault
63
include/sysemu/hvf_int.h | 12 +-
47
target/arm: Implement v8.1M PXN extension
64
target/arm/cpu.h | 6 +-
48
target/arm: Don't clobber ID_PFR1.Security on M-profile cores
65
target/arm/hvf_arm.h | 18 +
49
target/arm: Implement VSCCLRM insn
66
target/arm/internals.h | 44 ++
50
target/arm: Implement CLRM instruction
67
target/arm/kvm_arm.h | 2 -
51
target/arm: Enforce M-profile VMRS/VMSR register restrictions
68
target/arm/translate.h | 2 +
52
target/arm: Refactor M-profile VMSR/VMRS handling
69
accel/hvf/hvf-accel-ops.c | 21 +-
53
target/arm: Move general-use constant expanders up in translate.c
70
contrib/elf2dmp/download.c | 22 +-
54
target/arm: Implement VLDR/VSTR system register
71
contrib/elf2dmp/pdb.c | 4 +
55
target/arm: Implement M-profile FPSCR_nzcvqc
72
hw/intc/arm_gicv3_cpuif.c | 5 +-
56
target/arm: Use new FPCR_NZCV_MASK constant
73
target/arm/cpu.c | 56 +-
57
target/arm: Factor out preserve-fp-state from full_vfp_access_check()
74
target/arm/helper.c | 77 ++-
58
target/arm: Implement FPCXT_S fp system register
75
target/arm/hvf/hvf.c | 1278 +++++++++++++++++++++++++++++++++++++++++
59
hw/intc/armv7m_nvic: Update FPDSCR masking for v8.1M
76
target/arm/machine.c | 13 +
60
target/arm: For v8.1M, always clear R0-R3, R12, APSR, EPSR on exception entry
77
target/arm/translate-m-nocp.c | 8 +-
61
target/arm: In v8.1M, don't set HFSR.FORCED on vector table fetch failures
78
target/arm/translate-mve.c | 310 +++++++---
62
target/arm: Implement v8.1M REVIDR register
79
target/arm/translate-vfp.c | 33 +-
63
target/arm: Implement new v8.1M NOCP check for exception return
80
target/arm/translate.c | 42 +-
64
target/arm: Implement new v8.1M VLLDM and VLSTM encodings
81
target/i386/hvf/hvf.c | 10 +
65
hw/intc/armv7m_nvic: Support v8.1M CCR.TRD bit
82
MAINTAINERS | 5 +
66
target/arm: Implement CCR_S.TRD behaviour for SG insns
83
target/arm/hvf/meson.build | 3 +
67
hw/intc/armv7m_nvic: Fix "return from inactive handler" check
84
target/arm/hvf/trace-events | 11 +
68
target/arm: Implement M-profile "minimal RAS implementation"
85
target/arm/meson.build | 2 +
69
hw/intc/armv7m_nvic: Implement read/write for RAS register block
86
24 files changed, 1824 insertions(+), 168 deletions(-)
70
hw/arm/armv7m: Correct typo in QOM object name
87
create mode 100644 target/arm/hvf_arm.h
88
create mode 100644 target/arm/hvf/hvf.c
89
create mode 100644 target/arm/hvf/meson.build
90
create mode 100644 target/arm/hvf/trace-events
71
91
72
Vikram Garhwal (4):
73
hw/net/can: Introduce Xilinx ZynqMP CAN controller
74
xlnx-zynqmp: Connect Xilinx ZynqMP CAN controllers
75
tests/qtest: Introduce tests for Xilinx ZynqMP CAN controller
76
MAINTAINERS: Add maintainer entry for Xilinx ZynqMP CAN controller
77
78
meson.build | 1 +
79
hw/arm/smmuv3-internal.h | 2 +-
80
hw/net/can/trace.h | 1 +
81
include/hw/arm/xlnx-zynqmp.h | 8 +
82
include/hw/intc/armv7m_nvic.h | 2 +
83
include/hw/net/xlnx-zynqmp-can.h | 78 +++
84
target/arm/cpu.h | 46 ++
85
target/arm/m-nocp.decode | 10 +-
86
target/arm/t32.decode | 10 +-
87
target/arm/vfp.decode | 14 +
88
hw/arm/armv7m.c | 4 +-
89
hw/arm/sbsa-ref.c | 23 +-
90
hw/arm/xlnx-zcu102.c | 20 +
91
hw/arm/xlnx-zynqmp.c | 34 ++
92
hw/intc/armv7m_nvic.c | 246 ++++++--
93
hw/misc/imx25_ccm.c | 12 +-
94
hw/misc/imx31_ccm.c | 14 +-
95
hw/misc/imx6_ccm.c | 20 +-
96
hw/misc/imx6_src.c | 2 +-
97
hw/misc/imx6ul_ccm.c | 4 +-
98
hw/misc/imx_ccm.c | 4 +-
99
hw/net/can/xlnx-zynqmp-can.c | 1161 ++++++++++++++++++++++++++++++++++++++
100
target/arm/cpu.c | 5 +-
101
target/arm/helper.c | 7 +-
102
target/arm/m_helper.c | 130 ++++-
103
target/arm/translate.c | 105 +++-
104
tests/qtest/npcm7xx_rng-test.c | 12 +
105
tests/qtest/xlnx-can-test.c | 360 ++++++++++++
106
MAINTAINERS | 8 +
107
hw/Kconfig | 1 +
108
hw/net/can/meson.build | 1 +
109
hw/net/can/trace-events | 9 +
110
target/arm/translate-vfp.c.inc | 511 ++++++++++++++++-
111
tests/qtest/meson.build | 1 +
112
34 files changed, 2713 insertions(+), 153 deletions(-)
113
create mode 100644 hw/net/can/trace.h
114
create mode 100644 include/hw/net/xlnx-zynqmp-can.h
115
create mode 100644 hw/net/can/xlnx-zynqmp-can.c
116
create mode 100644 tests/qtest/xlnx-can-test.c
117
create mode 100644 hw/net/can/trace-events
118
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
Coverity points out that we aren't checking the return value
2
from curl_easy_setopt().
2
3
3
Connect CAN0 and CAN1 on the ZynqMP.
4
Fixes: Coverity CID 1458895
4
5
Inspired-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
7
Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
7
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
8
Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
8
Message-id: 1605728926-352690-3-git-send-email-fnu.vikram@xilinx.com
9
Message-id: 20210910170656.366592-2-philmd@redhat.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
include/hw/arm/xlnx-zynqmp.h | 8 ++++++++
12
contrib/elf2dmp/download.c | 22 ++++++++++------------
12
hw/arm/xlnx-zcu102.c | 20 ++++++++++++++++++++
13
1 file changed, 10 insertions(+), 12 deletions(-)
13
hw/arm/xlnx-zynqmp.c | 34 ++++++++++++++++++++++++++++++++++
14
3 files changed, 62 insertions(+)
15
14
16
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
15
diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/arm/xlnx-zynqmp.h
17
--- a/contrib/elf2dmp/download.c
19
+++ b/include/hw/arm/xlnx-zynqmp.h
18
+++ b/contrib/elf2dmp/download.c
20
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ int download_url(const char *name, const char *url)
21
#include "hw/intc/arm_gic.h"
20
goto out_curl;
22
#include "hw/net/cadence_gem.h"
23
#include "hw/char/cadence_uart.h"
24
+#include "hw/net/xlnx-zynqmp-can.h"
25
#include "hw/ide/ahci.h"
26
#include "hw/sd/sdhci.h"
27
#include "hw/ssi/xilinx_spips.h"
28
@@ -XXX,XX +XXX,XX @@
29
#include "hw/cpu/cluster.h"
30
#include "target/arm/cpu.h"
31
#include "qom/object.h"
32
+#include "net/can_emu.h"
33
34
#define TYPE_XLNX_ZYNQMP "xlnx,zynqmp"
35
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
36
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
37
#define XLNX_ZYNQMP_NUM_RPU_CPUS 2
38
#define XLNX_ZYNQMP_NUM_GEMS 4
39
#define XLNX_ZYNQMP_NUM_UARTS 2
40
+#define XLNX_ZYNQMP_NUM_CAN 2
41
+#define XLNX_ZYNQMP_CAN_REF_CLK (24 * 1000 * 1000)
42
#define XLNX_ZYNQMP_NUM_SDHCI 2
43
#define XLNX_ZYNQMP_NUM_SPIS 2
44
#define XLNX_ZYNQMP_NUM_GDMA_CH 8
45
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
46
47
CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS];
48
CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS];
49
+ XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN];
50
SysbusAHCIState sata;
51
SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI];
52
XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS];
53
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
54
bool virt;
55
/* Has the RPU subsystem? */
56
bool has_rpu;
57
+
58
+ /* CAN bus. */
59
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
60
};
61
62
#endif
63
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/arm/xlnx-zcu102.c
66
+++ b/hw/arm/xlnx-zcu102.c
67
@@ -XXX,XX +XXX,XX @@
68
#include "sysemu/qtest.h"
69
#include "sysemu/device_tree.h"
70
#include "qom/object.h"
71
+#include "net/can_emu.h"
72
73
struct XlnxZCU102 {
74
MachineState parent_obj;
75
@@ -XXX,XX +XXX,XX @@ struct XlnxZCU102 {
76
bool secure;
77
bool virt;
78
79
+ CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN];
80
+
81
struct arm_boot_info binfo;
82
};
83
84
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_init(MachineState *machine)
85
object_property_set_bool(OBJECT(&s->soc), "virtualization", s->virt,
86
&error_fatal);
87
88
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
89
+ gchar *bus_name = g_strdup_printf("canbus%d", i);
90
+
91
+ object_property_set_link(OBJECT(&s->soc), bus_name,
92
+ OBJECT(s->canbus[i]), &error_fatal);
93
+ g_free(bus_name);
94
+ }
95
+
96
qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
97
98
/* Create and plug in the SD cards */
99
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_machine_instance_init(Object *obj)
100
s->secure = false;
101
/* Default to virt (EL2) being disabled */
102
s->virt = false;
103
+ object_property_add_link(obj, "xlnx-zcu102.canbus0", TYPE_CAN_BUS,
104
+ (Object **)&s->canbus[0],
105
+ object_property_allow_set_link,
106
+ 0);
107
+
108
+ object_property_add_link(obj, "xlnx-zcu102.canbus1", TYPE_CAN_BUS,
109
+ (Object **)&s->canbus[1],
110
+ object_property_allow_set_link,
111
+ 0);
112
}
113
114
static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data)
115
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/hw/arm/xlnx-zynqmp.c
118
+++ b/hw/arm/xlnx-zynqmp.c
119
@@ -XXX,XX +XXX,XX @@ static const int uart_intr[XLNX_ZYNQMP_NUM_UARTS] = {
120
21, 22,
121
};
122
123
+static const uint64_t can_addr[XLNX_ZYNQMP_NUM_CAN] = {
124
+ 0xFF060000, 0xFF070000,
125
+};
126
+
127
+static const int can_intr[XLNX_ZYNQMP_NUM_CAN] = {
128
+ 23, 24,
129
+};
130
+
131
static const uint64_t sdhci_addr[XLNX_ZYNQMP_NUM_SDHCI] = {
132
0xFF160000, 0xFF170000,
133
};
134
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_init(Object *obj)
135
TYPE_CADENCE_UART);
136
}
21
}
137
22
138
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
23
- curl_easy_setopt(curl, CURLOPT_URL, url);
139
+ object_initialize_child(obj, "can[*]", &s->can[i],
24
- curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL);
140
+ TYPE_XLNX_ZYNQMP_CAN);
25
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, file);
141
+ }
26
- curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
142
+
27
- curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0);
143
object_initialize_child(obj, "sata", &s->sata, TYPE_SYSBUS_AHCI);
28
-
144
29
- if (curl_easy_perform(curl) != CURLE_OK) {
145
for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) {
30
- err = 1;
146
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
31
- fclose(file);
147
gic_spi[uart_intr[i]]);
32
+ if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK
33
+ || curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK
34
+ || curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK
35
+ || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK
36
+ || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK
37
+ || curl_easy_perform(curl) != CURLE_OK) {
38
unlink(name);
39
- goto out_curl;
40
+ fclose(file);
41
+ err = 1;
42
+ } else {
43
+ err = fclose(file);
148
}
44
}
149
45
150
+ for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) {
46
- err = fclose(file);
151
+ object_property_set_int(OBJECT(&s->can[i]), "ext_clk_freq",
47
-
152
+ XLNX_ZYNQMP_CAN_REF_CLK, &error_abort);
48
out_curl:
153
+
49
curl_easy_cleanup(curl);
154
+ object_property_set_link(OBJECT(&s->can[i]), "canbus",
155
+ OBJECT(s->canbus[i]), &error_fatal);
156
+
157
+ sysbus_realize(SYS_BUS_DEVICE(&s->can[i]), &err);
158
+ if (err) {
159
+ error_propagate(errp, err);
160
+ return;
161
+ }
162
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->can[i]), 0, can_addr[i]);
163
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->can[i]), 0,
164
+ gic_spi[can_intr[i]]);
165
+ }
166
+
167
object_property_set_int(OBJECT(&s->sata), "num-ports", SATA_NUM_PORTS,
168
&error_abort);
169
if (!sysbus_realize(SYS_BUS_DEVICE(&s->sata), errp)) {
170
@@ -XXX,XX +XXX,XX @@ static Property xlnx_zynqmp_props[] = {
171
DEFINE_PROP_BOOL("has_rpu", XlnxZynqMPState, has_rpu, false),
172
DEFINE_PROP_LINK("ddr-ram", XlnxZynqMPState, ddr_ram, TYPE_MEMORY_REGION,
173
MemoryRegion *),
174
+ DEFINE_PROP_LINK("canbus0", XlnxZynqMPState, canbus[0], TYPE_CAN_BUS,
175
+ CanBusState *),
176
+ DEFINE_PROP_LINK("canbus1", XlnxZynqMPState, canbus[1], TYPE_CAN_BUS,
177
+ CanBusState *),
178
DEFINE_PROP_END_OF_LIST()
179
};
180
50
181
--
51
--
182
2.20.1
52
2.20.1
183
53
184
54
diff view generated by jsdifflib
1
The RAS feature has a block of memory-mapped registers at offset
1
Coverity points out that if the PDB file we're trying to read
2
0x5000 within the PPB. For a "minimal RAS" implementation we provide
2
has a header specifying a block_size of zero then we will
3
no error records and so the only registers that exist in the block
3
end up trying to divide by zero in pdb_ds_read_file().
4
are ERRIIDR and ERRDEVID.
4
Check for this and fail cleanly instead.
5
5
6
The "RAZ/WI for privileged, BusFault for nonprivileged" behaviour
6
Fixes: Coverity CID 1458869
7
of the "nvic-default" region is actually valid for minimal-RAS,
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
so the main benefit of providing an explicit implementation of
8
Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
9
the register block is more accurate LOG_UNIMP messages, and a
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
framework for where we could add a real RAS implementation later
10
Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu>
11
if necessary.
11
Message-id: 20210910170656.366592-3-philmd@redhat.com
12
Message-Id: <20210901143910.17112-3-peter.maydell@linaro.org>
13
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
---
15
contrib/elf2dmp/pdb.c | 4 ++++
16
1 file changed, 4 insertions(+)
12
17
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20201119215617.29887-27-peter.maydell@linaro.org
16
---
17
include/hw/intc/armv7m_nvic.h | 1 +
18
hw/intc/armv7m_nvic.c | 56 +++++++++++++++++++++++++++++++++++
19
2 files changed, 57 insertions(+)
20
21
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
22
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/intc/armv7m_nvic.h
20
--- a/contrib/elf2dmp/pdb.c
24
+++ b/include/hw/intc/armv7m_nvic.h
21
+++ b/contrib/elf2dmp/pdb.c
25
@@ -XXX,XX +XXX,XX @@ struct NVICState {
22
@@ -XXX,XX +XXX,XX @@ out_symbols:
26
MemoryRegion sysreg_ns_mem;
23
27
MemoryRegion systickmem;
24
static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr)
28
MemoryRegion systick_ns_mem;
25
{
29
+ MemoryRegion ras_mem;
26
+ if (hdr->block_size == 0) {
30
MemoryRegion container;
27
+ return 1;
31
MemoryRegion defaultmem;
32
33
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/intc/armv7m_nvic.c
36
+++ b/hw/intc/armv7m_nvic.c
37
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps nvic_systick_ops = {
38
.endianness = DEVICE_NATIVE_ENDIAN,
39
};
40
41
+
42
+static MemTxResult ras_read(void *opaque, hwaddr addr,
43
+ uint64_t *data, unsigned size,
44
+ MemTxAttrs attrs)
45
+{
46
+ if (attrs.user) {
47
+ return MEMTX_ERROR;
48
+ }
28
+ }
49
+
29
+
50
+ switch (addr) {
30
memset(r->file_used, 0, sizeof(r->file_used));
51
+ case 0xe10: /* ERRIIDR */
31
r->ds.header = hdr;
52
+ /* architect field = Arm; product/variant/revision 0 */
32
r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr +
53
+ *data = 0x43b;
54
+ break;
55
+ case 0xfc8: /* ERRDEVID */
56
+ /* Minimal RAS: we implement 0 error record indexes */
57
+ *data = 0;
58
+ break;
59
+ default:
60
+ qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n",
61
+ (uint32_t)addr);
62
+ *data = 0;
63
+ break;
64
+ }
65
+ return MEMTX_OK;
66
+}
67
+
68
+static MemTxResult ras_write(void *opaque, hwaddr addr,
69
+ uint64_t value, unsigned size,
70
+ MemTxAttrs attrs)
71
+{
72
+ if (attrs.user) {
73
+ return MEMTX_ERROR;
74
+ }
75
+
76
+ switch (addr) {
77
+ default:
78
+ qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n",
79
+ (uint32_t)addr);
80
+ break;
81
+ }
82
+ return MEMTX_OK;
83
+}
84
+
85
+static const MemoryRegionOps ras_ops = {
86
+ .read_with_attrs = ras_read,
87
+ .write_with_attrs = ras_write,
88
+ .endianness = DEVICE_NATIVE_ENDIAN,
89
+};
90
+
91
/*
92
* Unassigned portions of the PPB space are RAZ/WI for privileged
93
* accesses, and fault for non-privileged accesses.
94
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
95
&s->systick_ns_mem, 1);
96
}
97
98
+ if (cpu_isar_feature(aa32_ras, s->cpu)) {
99
+ memory_region_init_io(&s->ras_mem, OBJECT(s),
100
+ &ras_ops, s, "nvic_ras", 0x1000);
101
+ memory_region_add_subregion(&s->container, 0x5000, &s->ras_mem);
102
+ }
103
+
104
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
105
}
106
107
--
33
--
108
2.20.1
34
2.20.1
109
35
110
36
diff view generated by jsdifflib
1
The FPDSCR register has a similar layout to the FPSCR. In v8.1M it
1
Currently all of the M-profile specific code in arm_cpu_reset() is
2
gains new fields FZ16 (if half-precision floating point is supported)
2
inside a !defined(CONFIG_USER_ONLY) ifdef block. This is
3
and LTPSIZE (always reads as 4). Update the reset value and the code
3
unintentional: it happened because originally the only
4
that handles writes to this register accordingly.
4
M-profile-specific handling was the setup of the initial SP and PC
5
from the vector table, which is system-emulation only. But then we
6
added a lot of other M-profile setup to the same "if (ARM_FEATURE_M)"
7
code block without noticing that it was all inside a not-user-mode
8
ifdef. This has generally been harmless, but with the addition of
9
v8.1M low-overhead-loop support we ran into a problem: the reset of
10
FPSCR.LTPSIZE to 4 was only being done for system emulation mode, so
11
if a user-mode guest tried to execute the LE instruction it would
12
incorrectly take a UsageFault.
5
13
14
Adjust the ifdefs so only the really system-emulation specific parts
15
are covered. Because this means we now run some reset code that sets
16
up initial values in the FPCCR and similar FPU related registers,
17
explicitly set up the registers controlling FPU context handling in
18
user-emulation mode so that the FPU works by design and not by
19
chance.
20
21
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/613
22
Cc: qemu-stable@nongnu.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-16-peter.maydell@linaro.org
25
Message-id: 20210914120725.24992-2-peter.maydell@linaro.org
9
---
26
---
10
target/arm/cpu.h | 5 +++++
27
target/arm/cpu.c | 19 +++++++++++++++++++
11
hw/intc/armv7m_nvic.c | 9 ++++++++-
28
1 file changed, 19 insertions(+)
12
target/arm/cpu.c | 3 +++
13
3 files changed, 16 insertions(+), 1 deletion(-)
14
29
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
20
#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
21
#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
22
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
23
+#define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */
24
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
25
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
26
+#define FPCR_AHP (1 << 26) /* Alternative half-precision */
27
#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
28
#define FPCR_V (1 << 28) /* FP overflow flag */
29
#define FPCR_C (1 << 29) /* FP carry flag */
30
#define FPCR_Z (1 << 30) /* FP zero flag */
31
#define FPCR_N (1 << 31) /* FP negative flag */
32
33
+#define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
34
+#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
35
+
36
#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
37
#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
38
39
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/intc/armv7m_nvic.c
42
+++ b/hw/intc/armv7m_nvic.c
43
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
44
break;
45
case 0xf3c: /* FPDSCR */
46
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
47
- value &= 0x07c00000;
48
+ uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
49
+ if (cpu_isar_feature(any_fp16, cpu)) {
50
+ mask |= FPCR_FZ16;
51
+ }
52
+ value &= mask;
53
+ if (cpu_isar_feature(aa32_lob, cpu)) {
54
+ value |= 4 << FPCR_LTPSIZE_SHIFT;
55
+ }
56
cpu->env.v7m.fpdscr[attrs.secure] = value;
57
}
58
break;
59
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
60
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/cpu.c
32
--- a/target/arm/cpu.c
62
+++ b/target/arm/cpu.c
33
+++ b/target/arm/cpu.c
63
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
34
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
64
* always reset to 4.
35
env->uncached_cpsr = ARM_CPU_MODE_SVC;
65
*/
36
}
66
env->v7m.ltpsize = 4;
37
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
67
+ /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
38
+#endif
68
+ env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
39
69
+ env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
40
if (arm_feature(env, ARM_FEATURE_M)) {
41
+#ifndef CONFIG_USER_ONLY
42
uint32_t initial_msp; /* Loaded from 0x0 */
43
uint32_t initial_pc; /* Loaded from 0x4 */
44
uint8_t *rom;
45
uint32_t vecbase;
46
+#endif
47
48
if (cpu_isar_feature(aa32_lob, cpu)) {
49
/*
50
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
51
env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
52
R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
70
}
53
}
71
54
+
72
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
55
+#ifndef CONFIG_USER_ONLY
56
/* Unlike A/R profile, M profile defines the reset LR value */
57
env->regs[14] = 0xffffffff;
58
59
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
60
env->regs[13] = initial_msp & 0xFFFFFFFC;
61
env->regs[15] = initial_pc & ~1;
62
env->thumb = initial_pc & 1;
63
+#else
64
+ /*
65
+ * For user mode we run non-secure and with access to the FPU.
66
+ * The FPU context is active (ie does not need further setup)
67
+ * and is owned by non-secure.
68
+ */
69
+ env->v7m.secure = false;
70
+ env->v7m.nsacr = 0xcff;
71
+ env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
72
+ env->v7m.fpccr[M_REG_S] &=
73
+ ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
74
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
75
+#endif
76
}
77
78
+#ifndef CONFIG_USER_ONLY
79
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
80
* executing as AArch32 then check if highvecs are enabled and
81
* adjust the PC accordingly.
73
--
82
--
74
2.20.1
83
2.20.1
75
84
76
85
diff view generated by jsdifflib
1
In arm_cpu_realizefn() we check whether the board code disabled EL3
1
There's no particular reason why the exclusive monitor should
2
via the has_el3 CPU object property, which we create if the CPU
2
be only cleared on reset in system emulation mode. It doesn't
3
starts with the ARM_FEATURE_EL3 feature bit. If it is disabled, then
3
hurt if it isn't cleared in user mode, but we might as well
4
we turn off ARM_FEATURE_EL3 and also zero out the relevant fields in
4
reduce the amount of code we have that's inside an ifdef.
5
the ID_PFR1 and ID_AA64PFR0 registers.
6
7
This codepath was incorrectly being taken for M-profile CPUs, which
8
do not have an EL3 and don't set ARM_FEATURE_EL3, but which may have
9
the M-profile Security extension and so should have non-zero values
10
in the ID_PFR1.Security field.
11
12
Restrict the handling of the feature flag to A/R-profile cores.
13
5
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20201119215617.29887-4-peter.maydell@linaro.org
8
Message-id: 20210914120725.24992-3-peter.maydell@linaro.org
17
---
9
---
18
target/arm/cpu.c | 2 +-
10
target/arm/cpu.c | 6 +++---
19
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 3 insertions(+), 3 deletions(-)
20
12
21
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
13
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/target/arm/cpu.c
15
--- a/target/arm/cpu.c
24
+++ b/target/arm/cpu.c
16
+++ b/target/arm/cpu.c
25
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
17
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
26
}
18
env->regs[15] = 0xFFFF0000;
27
}
19
}
28
20
29
- if (!cpu->has_el3) {
21
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
30
+ if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
22
+#endif
31
/* If the has_el3 CPU property is disabled then we need to disable the
23
+
32
* feature.
24
/* M profile requires that reset clears the exclusive monitor;
33
*/
25
* A profile does not, but clearing it makes more sense than having it
26
* set with an exclusive access on address zero.
27
*/
28
arm_clear_exclusive(env);
29
30
- env->vfp.xregs[ARM_VFP_FPEXC] = 0;
31
-#endif
32
-
33
if (arm_feature(env, ARM_FEATURE_PMSA)) {
34
if (cpu->pmsav7_dregion > 0) {
35
if (arm_feature(env, ARM_FEATURE_V8)) {
34
--
36
--
35
2.20.1
37
2.20.1
36
38
37
39
diff view generated by jsdifflib
1
In commit 077d7449100d824a4 we added code to handle the v8M
1
Move an ifndef CONFIG_USER_ONLY code block up in arm_cpu_reset() so
2
requirement that returns from NMI or HardFault forcibly deactivate
2
it can be merged with another earlier one.
3
those exceptions regardless of what interrupt the guest is trying to
4
deactivate. Unfortunately this broke the handling of the "illegal
5
exception return because the returning exception number is not
6
active" check for those cases. In the pseudocode this test is done
7
on the exception the guest asks to return from, but because our
8
implementation was doing this in armv7m_nvic_complete_irq() after the
9
new "deactivate NMI/HardFault regardless" code we ended up doing the
10
test on the VecInfo for that exception instead, which usually meant
11
failing to raise the illegal exception return fault.
12
13
In the case for "configurable exception targeting the opposite
14
security state" we detected the illegal-return case but went ahead
15
and deactivated the VecInfo anyway, which is wrong because that is
16
the VecInfo for the other security state.
17
18
Rearrange the code so that we first identify the illegal return
19
cases, then see if we really need to deactivate NMI or HardFault
20
instead, and finally do the deactivation.
21
3
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 20201119215617.29887-25-peter.maydell@linaro.org
6
Message-id: 20210914120725.24992-4-peter.maydell@linaro.org
25
---
7
---
26
hw/intc/armv7m_nvic.c | 59 +++++++++++++++++++++++--------------------
8
target/arm/cpu.c | 22 ++++++++++------------
27
1 file changed, 32 insertions(+), 27 deletions(-)
9
1 file changed, 10 insertions(+), 12 deletions(-)
28
10
29
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
11
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/intc/armv7m_nvic.c
13
--- a/target/arm/cpu.c
32
+++ b/hw/intc/armv7m_nvic.c
14
+++ b/target/arm/cpu.c
33
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
15
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
34
{
16
env->uncached_cpsr = ARM_CPU_MODE_SVC;
35
NVICState *s = (NVICState *)opaque;
17
}
36
VecInfo *vec = NULL;
18
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
37
- int ret;
38
+ int ret = 0;
39
40
assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
41
42
+ trace_nvic_complete_irq(irq, secure);
43
+
19
+
44
+ if (secure && exc_is_banked(irq)) {
20
+ /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
45
+ vec = &s->sec_vectors[irq];
21
+ * executing as AArch32 then check if highvecs are enabled and
46
+ } else {
22
+ * adjust the PC accordingly.
47
+ vec = &s->vectors[irq];
23
+ */
24
+ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
25
+ env->regs[15] = 0xFFFF0000;
48
+ }
26
+ }
49
+
27
+
50
+ /*
28
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
51
+ * Identify illegal exception return cases. We can't immediately
29
#endif
52
+ * return at this point because we still need to deactivate
30
53
+ * (either this exception or NMI/HardFault) first.
31
if (arm_feature(env, ARM_FEATURE_M)) {
54
+ */
32
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
55
+ if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
33
#endif
56
+ /*
57
+ * Return from a configurable exception targeting the opposite
58
+ * security state from the one we're trying to complete it for.
59
+ * Clear vec because it's not really the VecInfo for this
60
+ * (irq, secstate) so we mustn't deactivate it.
61
+ */
62
+ ret = -1;
63
+ vec = NULL;
64
+ } else if (!vec->active) {
65
+ /* Return from an inactive interrupt */
66
+ ret = -1;
67
+ } else {
68
+ /* Legal return, we will return the RETTOBASE bit value to the caller */
69
+ ret = nvic_rettobase(s);
70
+ }
71
+
72
/*
73
* For negative priorities, v8M will forcibly deactivate the appropriate
74
* NMI or HardFault regardless of what interrupt we're being asked to
75
@@ -XXX,XX +XXX,XX @@ int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
76
}
34
}
77
35
78
if (!vec) {
36
-#ifndef CONFIG_USER_ONLY
79
- if (secure && exc_is_banked(irq)) {
37
- /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
80
- vec = &s->sec_vectors[irq];
38
- * executing as AArch32 then check if highvecs are enabled and
81
- } else {
39
- * adjust the PC accordingly.
82
- vec = &s->vectors[irq];
40
- */
83
- }
41
- if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
42
- env->regs[15] = 0xFFFF0000;
84
- }
43
- }
85
-
44
-
86
- trace_nvic_complete_irq(irq, secure);
45
- env->vfp.xregs[ARM_VFP_FPEXC] = 0;
46
-#endif
87
-
47
-
88
- if (!vec->active) {
48
/* M profile requires that reset clears the exclusive monitor;
89
- /* Tell the caller this was an illegal exception return */
49
* A profile does not, but clearing it makes more sense than having it
90
- return -1;
50
* set with an exclusive access on address zero.
91
- }
92
-
93
- /*
94
- * If this is a configurable exception and it is currently
95
- * targeting the opposite security state from the one we're trying
96
- * to complete it for, this counts as an illegal exception return.
97
- * We still need to deactivate whatever vector the logic above has
98
- * selected, though, as it might not be the same as the one for the
99
- * requested exception number.
100
- */
101
- if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
102
- ret = -1;
103
- } else {
104
- ret = nvic_rettobase(s);
105
+ return ret;
106
}
107
108
vec->active = 0;
109
--
51
--
110
2.20.1
52
2.20.1
111
53
112
54
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Shashi Mallela <shashi.mallela@linaro.org>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
During sbsa acs level 3 testing, it is seen that the GIC maintenance
4
argument of type "unsigned int".
4
interrupts are not triggered and the related test cases fail. This
5
is because we were incorrectly passing the value of the MISR register
6
(from maintenance_interrupt_state()) to qemu_set_irq() as the level
7
argument, whereas the device on the other end of this irq line
8
expects a 0/1 value.
5
9
6
Reported-by: Euler Robot <euler.robot@huawei.com>
10
Fix the logic to pass a 0/1 level indication, rather than a
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
11
0/not-0 value.
8
Message-id: 20201126111109.112238-2-alex.chen@huawei.com
12
13
Fixes: c5fc89b36c0 ("hw/intc/arm_gicv3: Implement gicv3_cpuif_virt_update()")
14
Signed-off-by: Shashi Mallela <shashi.mallela@linaro.org>
15
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
16
Message-id: 20210915205809.59068-1-shashi.mallela@linaro.org
17
[PMM: tweaked commit message; collapsed nested if()s into one]
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
20
---
12
hw/misc/imx25_ccm.c | 12 ++++++------
21
hw/intc/arm_gicv3_cpuif.c | 5 +++--
13
1 file changed, 6 insertions(+), 6 deletions(-)
22
1 file changed, 3 insertions(+), 2 deletions(-)
14
23
15
diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c
24
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
16
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/misc/imx25_ccm.c
26
--- a/hw/intc/arm_gicv3_cpuif.c
18
+++ b/hw/misc/imx25_ccm.c
27
+++ b/hw/intc/arm_gicv3_cpuif.c
19
@@ -XXX,XX +XXX,XX @@ static const char *imx25_ccm_reg_name(uint32_t reg)
28
@@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
20
case IMX25_CCM_LPIMR1_REG:
29
}
21
return "lpimr1";
22
default:
23
- sprintf(unknown, "[%d ?]", reg);
24
+ sprintf(unknown, "[%u ?]", reg);
25
return unknown;
26
}
30
}
27
}
31
28
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_mpll_clk(IMXCCMState *dev)
32
- if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
29
freq = imx_ccm_calc_pll(s->reg[IMX25_CCM_MPCTL_REG], CKIH_FREQ);
33
- maintlevel = maintenance_interrupt_state(cs);
34
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
35
+ maintenance_interrupt_state(cs) != 0) {
36
+ maintlevel = 1;
30
}
37
}
31
38
32
- DPRINTF("freq = %d\n", freq);
39
trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
33
+ DPRINTF("freq = %u\n", freq);
34
35
return freq;
36
}
37
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_mcu_clk(IMXCCMState *dev)
38
39
freq = freq / (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], ARM_CLK_DIV));
40
41
- DPRINTF("freq = %d\n", freq);
42
+ DPRINTF("freq = %u\n", freq);
43
44
return freq;
45
}
46
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_ahb_clk(IMXCCMState *dev)
47
freq = imx25_ccm_get_mcu_clk(dev)
48
/ (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], AHB_CLK_DIV));
49
50
- DPRINTF("freq = %d\n", freq);
51
+ DPRINTF("freq = %u\n", freq);
52
53
return freq;
54
}
55
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_ipg_clk(IMXCCMState *dev)
56
57
freq = imx25_ccm_get_ahb_clk(dev) / 2;
58
59
- DPRINTF("freq = %d\n", freq);
60
+ DPRINTF("freq = %u\n", freq);
61
62
return freq;
63
}
64
@@ -XXX,XX +XXX,XX @@ static uint32_t imx25_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
65
break;
66
}
67
68
- DPRINTF("Clock = %d) = %d\n", clock, freq);
69
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
70
71
return freq;
72
}
73
--
40
--
74
2.20.1
41
2.20.1
75
42
76
43
diff view generated by jsdifflib
1
In v8.1M the PXN architecture extension adds a new PXN bit to the
1
From: Alexander Graf <agraf@csgraf.de>
2
MPU_RLAR registers, which forbids execution of code in the region
3
from a privileged mode.
4
2
5
This is another feature which is just in the generic "in v8.1M" set
3
We will need PMC register definitions in accel specific code later.
6
and has no ID register field indicating its presence.
4
Move all constant definitions to common arm headers so we can reuse
5
them.
7
6
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Message-id: 20210916155404.86958-2-agraf@csgraf.de
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20201119215617.29887-3-peter.maydell@linaro.org
11
---
11
---
12
target/arm/helper.c | 7 ++++++-
12
target/arm/internals.h | 44 ++++++++++++++++++++++++++++++++++++++++++
13
1 file changed, 6 insertions(+), 1 deletion(-)
13
target/arm/helper.c | 44 ------------------------------------------
14
2 files changed, 44 insertions(+), 44 deletions(-)
14
15
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/internals.h
19
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ enum MVEECIState {
21
/* All other values reserved */
22
};
23
24
+/* Definitions for the PMU registers */
25
+#define PMCRN_MASK 0xf800
26
+#define PMCRN_SHIFT 11
27
+#define PMCRLC 0x40
28
+#define PMCRDP 0x20
29
+#define PMCRX 0x10
30
+#define PMCRD 0x8
31
+#define PMCRC 0x4
32
+#define PMCRP 0x2
33
+#define PMCRE 0x1
34
+/*
35
+ * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
36
+ * which can be written as 1 to trigger behaviour but which stay RAZ).
37
+ */
38
+#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
39
+
40
+#define PMXEVTYPER_P 0x80000000
41
+#define PMXEVTYPER_U 0x40000000
42
+#define PMXEVTYPER_NSK 0x20000000
43
+#define PMXEVTYPER_NSU 0x10000000
44
+#define PMXEVTYPER_NSH 0x08000000
45
+#define PMXEVTYPER_M 0x04000000
46
+#define PMXEVTYPER_MT 0x02000000
47
+#define PMXEVTYPER_EVTCOUNT 0x0000ffff
48
+#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
49
+ PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
50
+ PMXEVTYPER_M | PMXEVTYPER_MT | \
51
+ PMXEVTYPER_EVTCOUNT)
52
+
53
+#define PMCCFILTR 0xf8000000
54
+#define PMCCFILTR_M PMXEVTYPER_M
55
+#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
56
+
57
+static inline uint32_t pmu_num_counters(CPUARMState *env)
58
+{
59
+ return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
60
+}
61
+
62
+/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
63
+static inline uint64_t pmu_counter_mask(CPUARMState *env)
64
+{
65
+ return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
66
+}
67
+
68
#endif
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
69
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
70
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
71
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
72
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
73
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
20
} else {
74
REGINFO_SENTINEL
21
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
75
};
22
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
76
23
+ bool pxn = false;
77
-/* Definitions for the PMU registers */
24
+
78
-#define PMCRN_MASK 0xf800
25
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
79
-#define PMCRN_SHIFT 11
26
+ pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
80
-#define PMCRLC 0x40
27
+ }
81
-#define PMCRDP 0x20
28
82
-#define PMCRX 0x10
29
if (m_is_system_region(env, address)) {
83
-#define PMCRD 0x8
30
/* System space is always execute never */
84
-#define PMCRC 0x4
31
@@ -XXX,XX +XXX,XX @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
85
-#define PMCRP 0x2
32
}
86
-#define PMCRE 0x1
33
87
-/*
34
*prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
88
- * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
35
- if (*prot && !xn) {
89
- * which can be written as 1 to trigger behaviour but which stay RAZ).
36
+ if (*prot && !xn && !(pxn && !is_user)) {
90
- */
37
*prot |= PAGE_EXEC;
91
-#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
38
}
92
-
39
/* We don't need to look the attribute up in the MAIR0/MAIR1
93
-#define PMXEVTYPER_P 0x80000000
94
-#define PMXEVTYPER_U 0x40000000
95
-#define PMXEVTYPER_NSK 0x20000000
96
-#define PMXEVTYPER_NSU 0x10000000
97
-#define PMXEVTYPER_NSH 0x08000000
98
-#define PMXEVTYPER_M 0x04000000
99
-#define PMXEVTYPER_MT 0x02000000
100
-#define PMXEVTYPER_EVTCOUNT 0x0000ffff
101
-#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
102
- PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
103
- PMXEVTYPER_M | PMXEVTYPER_MT | \
104
- PMXEVTYPER_EVTCOUNT)
105
-
106
-#define PMCCFILTR 0xf8000000
107
-#define PMCCFILTR_M PMXEVTYPER_M
108
-#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
109
-
110
-static inline uint32_t pmu_num_counters(CPUARMState *env)
111
-{
112
- return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
113
-}
114
-
115
-/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
116
-static inline uint64_t pmu_counter_mask(CPUARMState *env)
117
-{
118
- return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
119
-}
120
-
121
typedef struct pm_event {
122
uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
123
/* If the event is supported on this CPU (used to generate PMCEID[01]) */
40
--
124
--
41
2.20.1
125
2.20.1
42
126
43
127
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
Hvf's permission bitmap during and after dirty logging does not include
4
argument of type "unsigned int".
4
the HV_MEMORY_EXEC permission. At least on Apple Silicon, this leads to
5
instruction faults once dirty logging was enabled.
5
6
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Add the bit to make it work properly.
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
8
8
Message-id: 20201126111109.112238-5-alex.chen@huawei.com
9
Signed-off-by: Alexander Graf <agraf@csgraf.de>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20210916155404.86958-3-agraf@csgraf.de
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
13
---
12
hw/misc/imx6ul_ccm.c | 4 ++--
14
accel/hvf/hvf-accel-ops.c | 4 ++--
13
1 file changed, 2 insertions(+), 2 deletions(-)
15
1 file changed, 2 insertions(+), 2 deletions(-)
14
16
15
diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c
17
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/misc/imx6ul_ccm.c
19
--- a/accel/hvf/hvf-accel-ops.c
18
+++ b/hw/misc/imx6ul_ccm.c
20
+++ b/accel/hvf/hvf-accel-ops.c
19
@@ -XXX,XX +XXX,XX @@ static const char *imx6ul_ccm_reg_name(uint32_t reg)
21
@@ -XXX,XX +XXX,XX @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
20
case CCM_CMEOR:
22
if (on) {
21
return "CMEOR";
23
slot->flags |= HVF_SLOT_LOG;
22
default:
24
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
23
- sprintf(unknown, "%d ?", reg);
25
- HV_MEMORY_READ);
24
+ sprintf(unknown, "%u ?", reg);
26
+ HV_MEMORY_READ | HV_MEMORY_EXEC);
25
return unknown;
27
/* stop tracking region*/
28
} else {
29
slot->flags &= ~HVF_SLOT_LOG;
30
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
31
- HV_MEMORY_READ | HV_MEMORY_WRITE);
32
+ HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
26
}
33
}
27
}
34
}
28
@@ -XXX,XX +XXX,XX @@ static const char *imx6ul_analog_reg_name(uint32_t reg)
35
29
case USB_ANALOG_DIGPROG:
30
return "USB_ANALOG_DIGPROG";
31
default:
32
- sprintf(unknown, "%d ?", reg);
33
+ sprintf(unknown, "%u ?", reg);
34
return unknown;
35
}
36
}
37
--
36
--
38
2.20.1
37
2.20.1
39
38
40
39
diff view generated by jsdifflib
1
From: Kunkun Jiang <jiangkunkun@huawei.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
Accroding to the SMMUv3 spec, the SPAN field of Level1 Stream Table
3
We will need to install a migration helper for the ARM hvf backend.
4
Descriptor is 5 bits([4:0]).
4
Let's introduce an arch callback for the overall hvf init chain to
5
do so.
5
6
6
Fixes: 9bde7f0674f(hw/arm/smmuv3: Implement translate callback)
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
8
Message-id: 20201124023711.1184-1-jiangkunkun@huawei.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Acked-by: Eric Auger <eric.auger@redhat.com>
9
Message-id: 20210916155404.86958-4-agraf@csgraf.de
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
11
---
13
hw/arm/smmuv3-internal.h | 2 +-
12
include/sysemu/hvf_int.h | 1 +
14
1 file changed, 1 insertion(+), 1 deletion(-)
13
accel/hvf/hvf-accel-ops.c | 3 ++-
14
target/i386/hvf/hvf.c | 5 +++++
15
3 files changed, 8 insertions(+), 1 deletion(-)
15
16
16
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
17
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/smmuv3-internal.h
19
--- a/include/sysemu/hvf_int.h
19
+++ b/hw/arm/smmuv3-internal.h
20
+++ b/include/sysemu/hvf_int.h
20
@@ -XXX,XX +XXX,XX @@ static inline uint64_t l1std_l2ptr(STEDesc *desc)
21
@@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state {
21
return hi << 32 | lo;
22
};
23
24
void assert_hvf_ok(hv_return_t ret);
25
+int hvf_arch_init(void);
26
int hvf_arch_init_vcpu(CPUState *cpu);
27
void hvf_arch_vcpu_destroy(CPUState *cpu);
28
int hvf_vcpu_exec(CPUState *);
29
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/hvf/hvf-accel-ops.c
32
+++ b/accel/hvf/hvf-accel-ops.c
33
@@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms)
34
35
hvf_state = s;
36
memory_listener_register(&hvf_memory_listener, &address_space_memory);
37
- return 0;
38
+
39
+ return hvf_arch_init();
22
}
40
}
23
41
24
-#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 4))
42
static void hvf_accel_class_init(ObjectClass *oc, void *data)
25
+#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 5))
43
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
26
44
index XXXXXXX..XXXXXXX 100644
27
#endif
45
--- a/target/i386/hvf/hvf.c
46
+++ b/target/i386/hvf/hvf.c
47
@@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
48
return env->apic_bus_freq != 0;
49
}
50
51
+int hvf_arch_init(void)
52
+{
53
+ return 0;
54
+}
55
+
56
int hvf_arch_init_vcpu(CPUState *cpu)
57
{
58
X86CPU *x86cpu = X86_CPU(cpu);
28
--
59
--
29
2.20.1
60
2.20.1
30
61
31
62
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
The QTests perform five tests on the Xilinx ZynqMP CAN controller:
3
With Apple Silicon available to the masses, it's a good time to add support
4
Tests the CAN controller in loopback, sleep and snoop mode.
4
for driving its virtualization extensions from QEMU.
5
Tests filtering of incoming CAN messages.
6
5
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
This patch adds all necessary architecture specific code to get basic VMs
8
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
7
working, including save/restore.
9
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
8
10
Message-id: 1605728926-352690-4-git-send-email-fnu.vikram@xilinx.com
9
Known limitations:
10
11
- WFI handling is missing (follows in later patch)
12
- No watchpoint/breakpoint support
13
14
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
16
Reviewed-by: Sergio Lopez <slp@redhat.com>
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Message-id: 20210916155404.86958-5-agraf@csgraf.de
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
20
---
13
tests/qtest/xlnx-can-test.c | 360 ++++++++++++++++++++++++++++++++++++
21
meson.build | 1 +
14
tests/qtest/meson.build | 1 +
22
include/sysemu/hvf_int.h | 10 +-
15
2 files changed, 361 insertions(+)
23
accel/hvf/hvf-accel-ops.c | 9 +
16
create mode 100644 tests/qtest/xlnx-can-test.c
24
target/arm/hvf/hvf.c | 794 ++++++++++++++++++++++++++++++++++++
25
target/i386/hvf/hvf.c | 5 +
26
MAINTAINERS | 5 +
27
target/arm/hvf/trace-events | 10 +
28
7 files changed, 833 insertions(+), 1 deletion(-)
29
create mode 100644 target/arm/hvf/hvf.c
30
create mode 100644 target/arm/hvf/trace-events
17
31
18
diff --git a/tests/qtest/xlnx-can-test.c b/tests/qtest/xlnx-can-test.c
32
diff --git a/meson.build b/meson.build
33
index XXXXXXX..XXXXXXX 100644
34
--- a/meson.build
35
+++ b/meson.build
36
@@ -XXX,XX +XXX,XX @@ if have_system or have_user
37
'accel/tcg',
38
'hw/core',
39
'target/arm',
40
+ 'target/arm/hvf',
41
'target/hppa',
42
'target/i386',
43
'target/i386/kvm',
44
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/include/sysemu/hvf_int.h
47
+++ b/include/sysemu/hvf_int.h
48
@@ -XXX,XX +XXX,XX @@
49
#ifndef HVF_INT_H
50
#define HVF_INT_H
51
52
+#ifdef __aarch64__
53
+#include <Hypervisor/Hypervisor.h>
54
+#else
55
#include <Hypervisor/hv.h>
56
+#endif
57
58
/* hvf_slot flags */
59
#define HVF_SLOT_LOG (1 << 0)
60
@@ -XXX,XX +XXX,XX @@ struct HVFState {
61
int num_slots;
62
63
hvf_vcpu_caps *hvf_caps;
64
+ uint64_t vtimer_offset;
65
};
66
extern HVFState *hvf_state;
67
68
struct hvf_vcpu_state {
69
- int fd;
70
+ uint64_t fd;
71
+ void *exit;
72
+ bool vtimer_masked;
73
};
74
75
void assert_hvf_ok(hv_return_t ret);
76
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *);
77
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
78
int hvf_put_registers(CPUState *);
79
int hvf_get_registers(CPUState *);
80
+void hvf_kick_vcpu_thread(CPUState *cpu);
81
82
#endif
83
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/accel/hvf/hvf-accel-ops.c
86
+++ b/accel/hvf/hvf-accel-ops.c
87
@@ -XXX,XX +XXX,XX @@
88
89
HVFState *hvf_state;
90
91
+#ifdef __aarch64__
92
+#define HV_VM_DEFAULT NULL
93
+#endif
94
+
95
/* Memory slots */
96
97
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
98
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
99
pthread_sigmask(SIG_BLOCK, NULL, &set);
100
sigdelset(&set, SIG_IPI);
101
102
+#ifdef __aarch64__
103
+ r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
104
+#else
105
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
106
+#endif
107
cpu->vcpu_dirty = 1;
108
assert_hvf_ok(r);
109
110
@@ -XXX,XX +XXX,XX @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
111
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
112
113
ops->create_vcpu_thread = hvf_start_vcpu_thread;
114
+ ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
115
116
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
117
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
118
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
19
new file mode 100644
119
new file mode 100644
20
index XXXXXXX..XXXXXXX
120
index XXXXXXX..XXXXXXX
21
--- /dev/null
121
--- /dev/null
22
+++ b/tests/qtest/xlnx-can-test.c
122
+++ b/target/arm/hvf/hvf.c
23
@@ -XXX,XX +XXX,XX @@
123
@@ -XXX,XX +XXX,XX @@
24
+/*
124
+/*
25
+ * QTests for the Xilinx ZynqMP CAN controller.
125
+ * QEMU Hypervisor.framework support for Apple Silicon
126
+
127
+ * Copyright 2020 Alexander Graf <agraf@csgraf.de>
26
+ *
128
+ *
27
+ * Copyright (c) 2020 Xilinx Inc.
129
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
130
+ * See the COPYING file in the top-level directory.
28
+ *
131
+ *
29
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
30
+ *
31
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
32
+ * of this software and associated documentation files (the "Software"), to deal
33
+ * in the Software without restriction, including without limitation the rights
34
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
35
+ * copies of the Software, and to permit persons to whom the Software is
36
+ * furnished to do so, subject to the following conditions:
37
+ *
38
+ * The above copyright notice and this permission notice shall be included in
39
+ * all copies or substantial portions of the Software.
40
+ *
41
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
44
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
46
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
47
+ * THE SOFTWARE.
48
+ */
132
+ */
49
+
133
+
50
+#include "qemu/osdep.h"
134
+#include "qemu/osdep.h"
51
+#include "libqos/libqtest.h"
135
+#include "qemu-common.h"
52
+
136
+#include "qemu/error-report.h"
53
+/* Base address. */
137
+
54
+#define CAN0_BASE_ADDR 0xFF060000
138
+#include "sysemu/runstate.h"
55
+#define CAN1_BASE_ADDR 0xFF070000
139
+#include "sysemu/hvf.h"
56
+
140
+#include "sysemu/hvf_int.h"
57
+/* Register addresses. */
141
+#include "sysemu/hw_accel.h"
58
+#define R_SRR_OFFSET 0x00
142
+
59
+#define R_MSR_OFFSET 0x04
143
+#include <mach/mach_time.h>
60
+#define R_SR_OFFSET 0x18
144
+
61
+#define R_ISR_OFFSET 0x1C
145
+#include "exec/address-spaces.h"
62
+#define R_ICR_OFFSET 0x24
146
+#include "hw/irq.h"
63
+#define R_TXID_OFFSET 0x30
147
+#include "qemu/main-loop.h"
64
+#define R_TXDLC_OFFSET 0x34
148
+#include "sysemu/cpus.h"
65
+#define R_TXDATA1_OFFSET 0x38
149
+#include "target/arm/cpu.h"
66
+#define R_TXDATA2_OFFSET 0x3C
150
+#include "target/arm/internals.h"
67
+#define R_RXID_OFFSET 0x50
151
+#include "trace/trace-target_arm_hvf.h"
68
+#define R_RXDLC_OFFSET 0x54
152
+#include "migration/vmstate.h"
69
+#define R_RXDATA1_OFFSET 0x58
153
+
70
+#define R_RXDATA2_OFFSET 0x5C
154
+#define HVF_SYSREG(crn, crm, op0, op1, op2) \
71
+#define R_AFR 0x60
155
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
72
+#define R_AFMR1 0x64
156
+#define PL1_WRITE_MASK 0x4
73
+#define R_AFIR1 0x68
157
+
74
+#define R_AFMR2 0x6C
158
+#define SYSREG(op0, op1, crn, crm, op2) \
75
+#define R_AFIR2 0x70
159
+ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
76
+#define R_AFMR3 0x74
160
+#define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7)
77
+#define R_AFIR3 0x78
161
+#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
78
+#define R_AFMR4 0x7C
162
+#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
79
+#define R_AFIR4 0x80
163
+#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
80
+
164
+#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
81
+/* CAN modes. */
165
+
82
+#define CONFIG_MODE 0x00
166
+#define WFX_IS_WFE (1 << 0)
83
+#define NORMAL_MODE 0x00
167
+
84
+#define LOOPBACK_MODE 0x02
168
+#define TMR_CTL_ENABLE (1 << 0)
85
+#define SNOOP_MODE 0x04
169
+#define TMR_CTL_IMASK (1 << 1)
86
+#define SLEEP_MODE 0x01
170
+#define TMR_CTL_ISTATUS (1 << 2)
87
+#define ENABLE_CAN (1 << 1)
171
+
88
+#define STATUS_NORMAL_MODE (1 << 3)
172
+typedef struct HVFVTimer {
89
+#define STATUS_LOOPBACK_MODE (1 << 1)
173
+ /* Vtimer value during migration and paused state */
90
+#define STATUS_SNOOP_MODE (1 << 12)
174
+ uint64_t vtimer_val;
91
+#define STATUS_SLEEP_MODE (1 << 2)
175
+} HVFVTimer;
92
+#define ISR_TXOK (1 << 1)
176
+
93
+#define ISR_RXOK (1 << 4)
177
+static HVFVTimer vtimer;
94
+
178
+
95
+static void match_rx_tx_data(const uint32_t *buf_tx, const uint32_t *buf_rx,
179
+struct hvf_reg_match {
96
+ uint8_t can_timestamp)
180
+ int reg;
97
+{
181
+ uint64_t offset;
98
+ uint16_t size = 0;
182
+};
99
+ uint8_t len = 4;
183
+
100
+
184
+static const struct hvf_reg_match hvf_reg_match[] = {
101
+ while (size < len) {
185
+ { HV_REG_X0, offsetof(CPUARMState, xregs[0]) },
102
+ if (R_RXID_OFFSET + 4 * size == R_RXDLC_OFFSET) {
186
+ { HV_REG_X1, offsetof(CPUARMState, xregs[1]) },
103
+ g_assert_cmpint(buf_rx[size], ==, buf_tx[size] + can_timestamp);
187
+ { HV_REG_X2, offsetof(CPUARMState, xregs[2]) },
188
+ { HV_REG_X3, offsetof(CPUARMState, xregs[3]) },
189
+ { HV_REG_X4, offsetof(CPUARMState, xregs[4]) },
190
+ { HV_REG_X5, offsetof(CPUARMState, xregs[5]) },
191
+ { HV_REG_X6, offsetof(CPUARMState, xregs[6]) },
192
+ { HV_REG_X7, offsetof(CPUARMState, xregs[7]) },
193
+ { HV_REG_X8, offsetof(CPUARMState, xregs[8]) },
194
+ { HV_REG_X9, offsetof(CPUARMState, xregs[9]) },
195
+ { HV_REG_X10, offsetof(CPUARMState, xregs[10]) },
196
+ { HV_REG_X11, offsetof(CPUARMState, xregs[11]) },
197
+ { HV_REG_X12, offsetof(CPUARMState, xregs[12]) },
198
+ { HV_REG_X13, offsetof(CPUARMState, xregs[13]) },
199
+ { HV_REG_X14, offsetof(CPUARMState, xregs[14]) },
200
+ { HV_REG_X15, offsetof(CPUARMState, xregs[15]) },
201
+ { HV_REG_X16, offsetof(CPUARMState, xregs[16]) },
202
+ { HV_REG_X17, offsetof(CPUARMState, xregs[17]) },
203
+ { HV_REG_X18, offsetof(CPUARMState, xregs[18]) },
204
+ { HV_REG_X19, offsetof(CPUARMState, xregs[19]) },
205
+ { HV_REG_X20, offsetof(CPUARMState, xregs[20]) },
206
+ { HV_REG_X21, offsetof(CPUARMState, xregs[21]) },
207
+ { HV_REG_X22, offsetof(CPUARMState, xregs[22]) },
208
+ { HV_REG_X23, offsetof(CPUARMState, xregs[23]) },
209
+ { HV_REG_X24, offsetof(CPUARMState, xregs[24]) },
210
+ { HV_REG_X25, offsetof(CPUARMState, xregs[25]) },
211
+ { HV_REG_X26, offsetof(CPUARMState, xregs[26]) },
212
+ { HV_REG_X27, offsetof(CPUARMState, xregs[27]) },
213
+ { HV_REG_X28, offsetof(CPUARMState, xregs[28]) },
214
+ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
215
+ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
216
+ { HV_REG_PC, offsetof(CPUARMState, pc) },
217
+};
218
+
219
+static const struct hvf_reg_match hvf_fpreg_match[] = {
220
+ { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) },
221
+ { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) },
222
+ { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) },
223
+ { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) },
224
+ { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) },
225
+ { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) },
226
+ { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) },
227
+ { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) },
228
+ { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) },
229
+ { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) },
230
+ { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
231
+ { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
232
+ { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
233
+ { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
234
+ { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
235
+ { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
236
+ { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
237
+ { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
238
+ { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
239
+ { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
240
+ { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
241
+ { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
242
+ { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
243
+ { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
244
+ { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
245
+ { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
246
+ { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
247
+ { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
248
+ { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
249
+ { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
250
+ { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
251
+ { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
252
+};
253
+
254
+struct hvf_sreg_match {
255
+ int reg;
256
+ uint32_t key;
257
+ uint32_t cp_idx;
258
+};
259
+
260
+static struct hvf_sreg_match hvf_sreg_match[] = {
261
+ { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
262
+ { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
263
+ { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
264
+ { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
265
+
266
+ { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
267
+ { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
268
+ { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
269
+ { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
270
+
271
+ { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
272
+ { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
273
+ { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
274
+ { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
275
+
276
+ { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
277
+ { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
278
+ { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
279
+ { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
280
+
281
+ { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
282
+ { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
283
+ { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
284
+ { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
285
+
286
+ { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
287
+ { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
288
+ { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
289
+ { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
290
+
291
+ { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
292
+ { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
293
+ { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
294
+ { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
295
+
296
+ { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
297
+ { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
298
+ { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
299
+ { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
300
+
301
+ { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
302
+ { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
303
+ { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
304
+ { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
305
+
306
+ { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
307
+ { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
308
+ { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
309
+ { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
310
+
311
+ { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
312
+ { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
313
+ { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
314
+ { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
315
+
316
+ { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
317
+ { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
318
+ { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
319
+ { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
320
+
321
+ { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
322
+ { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
323
+ { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
324
+ { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
325
+
326
+ { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
327
+ { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
328
+ { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
329
+ { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
330
+
331
+ { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
332
+ { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
333
+ { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
334
+ { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
335
+
336
+ { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
337
+ { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
338
+ { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
339
+ { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
340
+
341
+#ifdef SYNC_NO_RAW_REGS
342
+ /*
343
+ * The registers below are manually synced on init because they are
344
+ * marked as NO_RAW. We still list them to make number space sync easier.
345
+ */
346
+ { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
347
+ { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
348
+ { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
349
+ { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
350
+#endif
351
+ { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
352
+ { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
353
+ { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
354
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
355
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
356
+#ifdef SYNC_NO_MMFR0
357
+ /* We keep the hardware MMFR0 around. HW limits are there anyway */
358
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
359
+#endif
360
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
361
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
362
+
363
+ { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
364
+ { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
365
+ { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
366
+ { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
367
+ { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
368
+ { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
369
+
370
+ { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
371
+ { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
372
+ { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
373
+ { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
374
+ { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
375
+ { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
376
+ { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
377
+ { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
378
+ { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
379
+ { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
380
+
381
+ { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
382
+ { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
383
+ { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
384
+ { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
385
+ { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
386
+ { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
387
+ { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
388
+ { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
389
+ { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
390
+ { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
391
+ { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
392
+ { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
393
+ { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
394
+ { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
395
+ { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
396
+ { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
397
+ { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
398
+ { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
399
+ { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
400
+ { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
401
+};
402
+
403
+int hvf_get_registers(CPUState *cpu)
404
+{
405
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
406
+ CPUARMState *env = &arm_cpu->env;
407
+ hv_return_t ret;
408
+ uint64_t val;
409
+ hv_simd_fp_uchar16_t fpval;
410
+ int i;
411
+
412
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
413
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
414
+ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
415
+ assert_hvf_ok(ret);
416
+ }
417
+
418
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
419
+ ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
420
+ &fpval);
421
+ memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
422
+ assert_hvf_ok(ret);
423
+ }
424
+
425
+ val = 0;
426
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
427
+ assert_hvf_ok(ret);
428
+ vfp_set_fpcr(env, val);
429
+
430
+ val = 0;
431
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
432
+ assert_hvf_ok(ret);
433
+ vfp_set_fpsr(env, val);
434
+
435
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
436
+ assert_hvf_ok(ret);
437
+ pstate_write(env, val);
438
+
439
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
440
+ if (hvf_sreg_match[i].cp_idx == -1) {
441
+ continue;
442
+ }
443
+
444
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
445
+ assert_hvf_ok(ret);
446
+
447
+ arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
448
+ }
449
+ assert(write_list_to_cpustate(arm_cpu));
450
+
451
+ aarch64_restore_sp(env, arm_current_el(env));
452
+
453
+ return 0;
454
+}
455
+
456
+int hvf_put_registers(CPUState *cpu)
457
+{
458
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
459
+ CPUARMState *env = &arm_cpu->env;
460
+ hv_return_t ret;
461
+ uint64_t val;
462
+ hv_simd_fp_uchar16_t fpval;
463
+ int i;
464
+
465
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
466
+ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
467
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
468
+ assert_hvf_ok(ret);
469
+ }
470
+
471
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
472
+ memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
473
+ ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
474
+ fpval);
475
+ assert_hvf_ok(ret);
476
+ }
477
+
478
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
479
+ assert_hvf_ok(ret);
480
+
481
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
482
+ assert_hvf_ok(ret);
483
+
484
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
485
+ assert_hvf_ok(ret);
486
+
487
+ aarch64_save_sp(env, arm_current_el(env));
488
+
489
+ assert(write_cpustate_to_list(arm_cpu, false));
490
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
491
+ if (hvf_sreg_match[i].cp_idx == -1) {
492
+ continue;
493
+ }
494
+
495
+ val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
496
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
497
+ assert_hvf_ok(ret);
498
+ }
499
+
500
+ ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
501
+ assert_hvf_ok(ret);
502
+
503
+ return 0;
504
+}
505
+
506
+static void flush_cpu_state(CPUState *cpu)
507
+{
508
+ if (cpu->vcpu_dirty) {
509
+ hvf_put_registers(cpu);
510
+ cpu->vcpu_dirty = false;
511
+ }
512
+}
513
+
514
+static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
515
+{
516
+ hv_return_t r;
517
+
518
+ flush_cpu_state(cpu);
519
+
520
+ if (rt < 31) {
521
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
522
+ assert_hvf_ok(r);
523
+ }
524
+}
525
+
526
+static uint64_t hvf_get_reg(CPUState *cpu, int rt)
527
+{
528
+ uint64_t val = 0;
529
+ hv_return_t r;
530
+
531
+ flush_cpu_state(cpu);
532
+
533
+ if (rt < 31) {
534
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
535
+ assert_hvf_ok(r);
536
+ }
537
+
538
+ return val;
539
+}
540
+
541
+void hvf_arch_vcpu_destroy(CPUState *cpu)
542
+{
543
+}
544
+
545
+int hvf_arch_init_vcpu(CPUState *cpu)
546
+{
547
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
548
+ CPUARMState *env = &arm_cpu->env;
549
+ uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
550
+ uint32_t sregs_cnt = 0;
551
+ uint64_t pfr;
552
+ hv_return_t ret;
553
+ int i;
554
+
555
+ env->aarch64 = 1;
556
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
557
+
558
+ /* Allocate enough space for our sysreg sync */
559
+ arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
560
+ sregs_match_len);
561
+ arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
562
+ sregs_match_len);
563
+ arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
564
+ arm_cpu->cpreg_vmstate_indexes,
565
+ sregs_match_len);
566
+ arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
567
+ arm_cpu->cpreg_vmstate_values,
568
+ sregs_match_len);
569
+
570
+ memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
571
+
572
+ /* Populate cp list for all known sysregs */
573
+ for (i = 0; i < sregs_match_len; i++) {
574
+ const ARMCPRegInfo *ri;
575
+ uint32_t key = hvf_sreg_match[i].key;
576
+
577
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
578
+ if (ri) {
579
+ assert(!(ri->type & ARM_CP_NO_RAW));
580
+ hvf_sreg_match[i].cp_idx = sregs_cnt;
581
+ arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
104
+ } else {
582
+ } else {
105
+ g_assert_cmpint(buf_rx[size], ==, buf_tx[size]);
583
+ hvf_sreg_match[i].cp_idx = -1;
106
+ }
584
+ }
107
+
585
+ }
108
+ size++;
586
+ arm_cpu->cpreg_array_len = sregs_cnt;
109
+ }
587
+ arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
110
+}
588
+
111
+
589
+ assert(write_cpustate_to_list(arm_cpu, false));
112
+static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx)
590
+
113
+{
591
+ /* Set CP_NO_RAW system registers on init */
114
+ uint32_t int_status;
592
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
115
+
593
+ arm_cpu->midr);
116
+ /* Read the interrupt on CAN rx. */
594
+ assert_hvf_ok(ret);
117
+ int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_RXOK;
595
+
118
+
596
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
119
+ g_assert_cmpint(int_status, ==, ISR_RXOK);
597
+ arm_cpu->mp_affinity);
120
+
598
+ assert_hvf_ok(ret);
121
+ /* Read the RX register data for CAN. */
599
+
122
+ buf_rx[0] = qtest_readl(qts, can_base_addr + R_RXID_OFFSET);
600
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
123
+ buf_rx[1] = qtest_readl(qts, can_base_addr + R_RXDLC_OFFSET);
601
+ assert_hvf_ok(ret);
124
+ buf_rx[2] = qtest_readl(qts, can_base_addr + R_RXDATA1_OFFSET);
602
+ pfr |= env->gicv3state ? (1 << 24) : 0;
125
+ buf_rx[3] = qtest_readl(qts, can_base_addr + R_RXDATA2_OFFSET);
603
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
126
+
604
+ assert_hvf_ok(ret);
127
+ /* Clear the RX interrupt. */
605
+
128
+ qtest_writel(qts, CAN1_BASE_ADDR + R_ICR_OFFSET, ISR_RXOK);
606
+ /* We're limited to underlying hardware caps, override internal versions */
129
+}
607
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
130
+
608
+ &arm_cpu->isar.id_aa64mmfr0);
131
+static void send_data(QTestState *qts, uint64_t can_base_addr,
609
+ assert_hvf_ok(ret);
132
+ const uint32_t *buf_tx)
610
+
133
+{
611
+ return 0;
134
+ uint32_t int_status;
612
+}
135
+
613
+
136
+ /* Write the TX register data for CAN. */
614
+void hvf_kick_vcpu_thread(CPUState *cpu)
137
+ qtest_writel(qts, can_base_addr + R_TXID_OFFSET, buf_tx[0]);
615
+{
138
+ qtest_writel(qts, can_base_addr + R_TXDLC_OFFSET, buf_tx[1]);
616
+ hv_vcpus_exit(&cpu->hvf->fd, 1);
139
+ qtest_writel(qts, can_base_addr + R_TXDATA1_OFFSET, buf_tx[2]);
617
+}
140
+ qtest_writel(qts, can_base_addr + R_TXDATA2_OFFSET, buf_tx[3]);
618
+
141
+
619
+static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
142
+ /* Read the interrupt on CAN for tx. */
620
+ uint32_t syndrome)
143
+ int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_TXOK;
621
+{
144
+
622
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
145
+ g_assert_cmpint(int_status, ==, ISR_TXOK);
623
+ CPUARMState *env = &arm_cpu->env;
146
+
624
+
147
+ /* Clear the interrupt for tx. */
625
+ cpu->exception_index = excp;
148
+ qtest_writel(qts, CAN0_BASE_ADDR + R_ICR_OFFSET, ISR_TXOK);
626
+ env->exception.target_el = 1;
149
+}
627
+ env->exception.syndrome = syndrome;
150
+
628
+
151
+/*
629
+ arm_cpu_do_interrupt(cpu);
152
+ * This test will be transferring data from CAN0 and CAN1 through canbus. CAN0
630
+}
153
+ * initiate the data transfer to can-bus, CAN1 receives the data. Test compares
631
+
154
+ * the data sent from CAN0 with received on CAN1.
632
+static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
155
+ */
633
+{
156
+static void test_can_bus(void)
634
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
157
+{
635
+ CPUARMState *env = &arm_cpu->env;
158
+ const uint32_t buf_tx[4] = { 0xFF, 0x80000000, 0x12345678, 0x87654321 };
636
+ uint64_t val = 0;
159
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
637
+
160
+ uint32_t status = 0;
638
+ switch (reg) {
161
+ uint8_t can_timestamp = 1;
639
+ case SYSREG_CNTPCT_EL0:
162
+
640
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
163
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
641
+ gt_cntfrq_period_ns(arm_cpu);
164
+ " -object can-bus,id=canbus0"
642
+ break;
165
+ " -machine xlnx-zcu102.canbus0=canbus0"
643
+ case SYSREG_OSLSR_EL1:
166
+ " -machine xlnx-zcu102.canbus1=canbus0"
644
+ val = env->cp15.oslsr_el1;
167
+ );
645
+ break;
168
+
646
+ case SYSREG_OSDLR_EL1:
169
+ /* Configure the CAN0 and CAN1. */
647
+ /* Dummy register */
170
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
648
+ break;
171
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
649
+ default:
172
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
650
+ cpu_synchronize_state(cpu);
173
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
651
+ trace_hvf_unhandled_sysreg_read(env->pc, reg,
174
+
652
+ (reg >> 20) & 0x3,
175
+ /* Check here if CAN0 and CAN1 are in normal mode. */
653
+ (reg >> 14) & 0x7,
176
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
654
+ (reg >> 10) & 0xf,
177
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
655
+ (reg >> 1) & 0xf,
178
+
656
+ (reg >> 17) & 0x7);
179
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
657
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
180
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
658
+ return 1;
181
+
659
+ }
182
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
660
+
183
+
661
+ trace_hvf_sysreg_read(reg,
184
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
662
+ (reg >> 20) & 0x3,
185
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
663
+ (reg >> 14) & 0x7,
186
+
664
+ (reg >> 10) & 0xf,
187
+ qtest_quit(qts);
665
+ (reg >> 1) & 0xf,
188
+}
666
+ (reg >> 17) & 0x7,
189
+
667
+ val);
190
+/*
668
+ hvf_set_reg(cpu, rt, val);
191
+ * This test is performing loopback mode on CAN0 and CAN1. Data sent from TX of
669
+
192
+ * each CAN0 and CAN1 are compared with RX register data for respective CAN.
670
+ return 0;
193
+ */
671
+}
194
+static void test_can_loopback(void)
672
+
195
+{
673
+static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
196
+ uint32_t buf_tx[4] = { 0xFF, 0x80000000, 0x12345678, 0x87654321 };
674
+{
197
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
675
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
198
+ uint32_t status = 0;
676
+ CPUARMState *env = &arm_cpu->env;
199
+
677
+
200
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
678
+ trace_hvf_sysreg_write(reg,
201
+ " -object can-bus,id=canbus0"
679
+ (reg >> 20) & 0x3,
202
+ " -machine xlnx-zcu102.canbus0=canbus0"
680
+ (reg >> 14) & 0x7,
203
+ " -machine xlnx-zcu102.canbus1=canbus0"
681
+ (reg >> 10) & 0xf,
204
+ );
682
+ (reg >> 1) & 0xf,
205
+
683
+ (reg >> 17) & 0x7,
206
+ /* Configure the CAN0 in loopback mode. */
684
+ val);
207
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
685
+
208
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, LOOPBACK_MODE);
686
+ switch (reg) {
209
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
687
+ case SYSREG_OSLAR_EL1:
210
+
688
+ env->cp15.oslsr_el1 = val & 1;
211
+ /* Check here if CAN0 is set in loopback mode. */
689
+ break;
212
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
690
+ case SYSREG_OSDLR_EL1:
213
+
691
+ /* Dummy register */
214
+ g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE);
692
+ break;
215
+
693
+ default:
216
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
694
+ cpu_synchronize_state(cpu);
217
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
695
+ trace_hvf_unhandled_sysreg_write(env->pc, reg,
218
+ match_rx_tx_data(buf_tx, buf_rx, 0);
696
+ (reg >> 20) & 0x3,
219
+
697
+ (reg >> 14) & 0x7,
220
+ /* Configure the CAN1 in loopback mode. */
698
+ (reg >> 10) & 0xf,
221
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
699
+ (reg >> 1) & 0xf,
222
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, LOOPBACK_MODE);
700
+ (reg >> 17) & 0x7);
223
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
701
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
224
+
702
+ return 1;
225
+ /* Check here if CAN1 is set in loopback mode. */
703
+ }
226
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
704
+
227
+
705
+ return 0;
228
+ g_assert_cmpint(status, ==, STATUS_LOOPBACK_MODE);
706
+}
229
+
707
+
230
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
708
+static int hvf_inject_interrupts(CPUState *cpu)
231
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
709
+{
232
+ match_rx_tx_data(buf_tx, buf_rx, 0);
710
+ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
233
+
711
+ trace_hvf_inject_fiq();
234
+ qtest_quit(qts);
712
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
235
+}
713
+ true);
236
+
714
+ }
237
+/*
715
+
238
+ * Enable filters for CAN1. This will filter incoming messages with ID. In this
716
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
239
+ * test message will pass through filter 2.
717
+ trace_hvf_inject_irq();
240
+ */
718
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
241
+static void test_can_filter(void)
719
+ true);
242
+{
720
+ }
243
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
721
+
244
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
722
+ return 0;
245
+ uint32_t status = 0;
723
+}
246
+ uint8_t can_timestamp = 1;
724
+
247
+
725
+static uint64_t hvf_vtimer_val_raw(void)
248
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
726
+{
249
+ " -object can-bus,id=canbus0"
250
+ " -machine xlnx-zcu102.canbus0=canbus0"
251
+ " -machine xlnx-zcu102.canbus1=canbus0"
252
+ );
253
+
254
+ /* Configure the CAN0 and CAN1. */
255
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
256
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
257
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
258
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
259
+
260
+ /* Check here if CAN0 and CAN1 are in normal mode. */
261
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
262
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
263
+
264
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
265
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
266
+
267
+ /* Set filter for CAN1 for incoming messages. */
268
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFR, 0x0);
269
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR1, 0xF7);
270
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR1, 0x121F);
271
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR2, 0x5431);
272
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR2, 0x14);
273
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR3, 0x1234);
274
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR3, 0x5431);
275
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFMR4, 0xFFF);
276
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFIR4, 0x1234);
277
+
278
+ qtest_writel(qts, CAN1_BASE_ADDR + R_AFR, 0xF);
279
+
280
+ send_data(qts, CAN0_BASE_ADDR, buf_tx);
281
+
282
+ read_data(qts, CAN1_BASE_ADDR, buf_rx);
283
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
284
+
285
+ qtest_quit(qts);
286
+}
287
+
288
+/* Testing sleep mode on CAN0 while CAN1 is in normal mode. */
289
+static void test_can_sleepmode(void)
290
+{
291
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
292
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
293
+ uint32_t status = 0;
294
+ uint8_t can_timestamp = 1;
295
+
296
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
297
+ " -object can-bus,id=canbus0"
298
+ " -machine xlnx-zcu102.canbus0=canbus0"
299
+ " -machine xlnx-zcu102.canbus1=canbus0"
300
+ );
301
+
302
+ /* Configure the CAN0. */
303
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
304
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, SLEEP_MODE);
305
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
306
+
307
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
308
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
309
+
310
+ /* Check here if CAN0 is in SLEEP mode and CAN1 in normal mode. */
311
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
312
+ g_assert_cmpint(status, ==, STATUS_SLEEP_MODE);
313
+
314
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
315
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
316
+
317
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
318
+
319
+ /*
727
+ /*
320
+ * Once CAN1 sends data on can-bus. CAN0 should exit sleep mode.
728
+ * mach_absolute_time() returns the vtimer value without the VM
321
+ * Check the CAN0 status now. It should exit the sleep mode and receive the
729
+ * offset that we define. Add our own offset on top.
322
+ * incoming data.
323
+ */
730
+ */
324
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
731
+ return mach_absolute_time() - hvf_state->vtimer_offset;
325
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
732
+}
326
+
733
+
327
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
734
+static void hvf_sync_vtimer(CPUState *cpu)
328
+
735
+{
329
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
736
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
330
+
737
+ hv_return_t r;
331
+ qtest_quit(qts);
738
+ uint64_t ctl;
332
+}
739
+ bool irq_state;
333
+
740
+
334
+/* Testing Snoop mode on CAN0 while CAN1 is in normal mode. */
741
+ if (!cpu->hvf->vtimer_masked) {
335
+static void test_can_snoopmode(void)
742
+ /* We will get notified on vtimer changes by hvf, nothing to do */
336
+{
743
+ return;
337
+ uint32_t buf_tx[4] = { 0x14, 0x80000000, 0x12345678, 0x87654321 };
744
+ }
338
+ uint32_t buf_rx[4] = { 0x00, 0x00, 0x00, 0x00 };
745
+
339
+ uint32_t status = 0;
746
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
340
+ uint8_t can_timestamp = 1;
747
+ assert_hvf_ok(r);
341
+
748
+
342
+ QTestState *qts = qtest_init("-machine xlnx-zcu102"
749
+ irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
343
+ " -object can-bus,id=canbus0"
750
+ (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
344
+ " -machine xlnx-zcu102.canbus0=canbus0"
751
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
345
+ " -machine xlnx-zcu102.canbus1=canbus0"
752
+
346
+ );
753
+ if (!irq_state) {
347
+
754
+ /* Timer no longer asserting, we can unmask it */
348
+ /* Configure the CAN0. */
755
+ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
349
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, CONFIG_MODE);
756
+ cpu->hvf->vtimer_masked = false;
350
+ qtest_writel(qts, CAN0_BASE_ADDR + R_MSR_OFFSET, SNOOP_MODE);
757
+ }
351
+ qtest_writel(qts, CAN0_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
758
+}
352
+
759
+
353
+ qtest_writel(qts, CAN1_BASE_ADDR + R_SRR_OFFSET, ENABLE_CAN);
760
+int hvf_vcpu_exec(CPUState *cpu)
354
+ qtest_writel(qts, CAN1_BASE_ADDR + R_MSR_OFFSET, NORMAL_MODE);
761
+{
355
+
762
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
356
+ /* Check here if CAN0 is in SNOOP mode and CAN1 in normal mode. */
763
+ CPUARMState *env = &arm_cpu->env;
357
+ status = qtest_readl(qts, CAN0_BASE_ADDR + R_SR_OFFSET);
764
+ hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
358
+ g_assert_cmpint(status, ==, STATUS_SNOOP_MODE);
765
+ hv_return_t r;
359
+
766
+ bool advance_pc = false;
360
+ status = qtest_readl(qts, CAN1_BASE_ADDR + R_SR_OFFSET);
767
+
361
+ g_assert_cmpint(status, ==, STATUS_NORMAL_MODE);
768
+ if (hvf_inject_interrupts(cpu)) {
362
+
769
+ return EXCP_INTERRUPT;
363
+ send_data(qts, CAN1_BASE_ADDR, buf_tx);
770
+ }
364
+
771
+
365
+ read_data(qts, CAN0_BASE_ADDR, buf_rx);
772
+ if (cpu->halted) {
366
+
773
+ return EXCP_HLT;
367
+ match_rx_tx_data(buf_tx, buf_rx, can_timestamp);
774
+ }
368
+
775
+
369
+ qtest_quit(qts);
776
+ flush_cpu_state(cpu);
370
+}
777
+
371
+
778
+ qemu_mutex_unlock_iothread();
372
+int main(int argc, char **argv)
779
+ assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
373
+{
780
+
374
+ g_test_init(&argc, &argv, NULL);
781
+ /* handle VMEXIT */
375
+
782
+ uint64_t exit_reason = hvf_exit->reason;
376
+ qtest_add_func("/net/can/can_bus", test_can_bus);
783
+ uint64_t syndrome = hvf_exit->exception.syndrome;
377
+ qtest_add_func("/net/can/can_loopback", test_can_loopback);
784
+ uint32_t ec = syn_get_ec(syndrome);
378
+ qtest_add_func("/net/can/can_filter", test_can_filter);
785
+
379
+ qtest_add_func("/net/can/can_test_snoopmode", test_can_snoopmode);
786
+ qemu_mutex_lock_iothread();
380
+ qtest_add_func("/net/can/can_test_sleepmode", test_can_sleepmode);
787
+ switch (exit_reason) {
381
+
788
+ case HV_EXIT_REASON_EXCEPTION:
382
+ return g_test_run();
789
+ /* This is the main one, handle below. */
383
+}
790
+ break;
384
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
791
+ case HV_EXIT_REASON_VTIMER_ACTIVATED:
792
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
793
+ cpu->hvf->vtimer_masked = true;
794
+ return 0;
795
+ case HV_EXIT_REASON_CANCELED:
796
+ /* we got kicked, no exit to process */
797
+ return 0;
798
+ default:
799
+ assert(0);
800
+ }
801
+
802
+ hvf_sync_vtimer(cpu);
803
+
804
+ switch (ec) {
805
+ case EC_DATAABORT: {
806
+ bool isv = syndrome & ARM_EL_ISV;
807
+ bool iswrite = (syndrome >> 6) & 1;
808
+ bool s1ptw = (syndrome >> 7) & 1;
809
+ uint32_t sas = (syndrome >> 22) & 3;
810
+ uint32_t len = 1 << sas;
811
+ uint32_t srt = (syndrome >> 16) & 0x1f;
812
+ uint64_t val = 0;
813
+
814
+ trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
815
+ hvf_exit->exception.physical_address, isv,
816
+ iswrite, s1ptw, len, srt);
817
+
818
+ assert(isv);
819
+
820
+ if (iswrite) {
821
+ val = hvf_get_reg(cpu, srt);
822
+ address_space_write(&address_space_memory,
823
+ hvf_exit->exception.physical_address,
824
+ MEMTXATTRS_UNSPECIFIED, &val, len);
825
+ } else {
826
+ address_space_read(&address_space_memory,
827
+ hvf_exit->exception.physical_address,
828
+ MEMTXATTRS_UNSPECIFIED, &val, len);
829
+ hvf_set_reg(cpu, srt, val);
830
+ }
831
+
832
+ advance_pc = true;
833
+ break;
834
+ }
835
+ case EC_SYSTEMREGISTERTRAP: {
836
+ bool isread = (syndrome >> 0) & 1;
837
+ uint32_t rt = (syndrome >> 5) & 0x1f;
838
+ uint32_t reg = syndrome & SYSREG_MASK;
839
+ uint64_t val;
840
+ int ret = 0;
841
+
842
+ if (isread) {
843
+ ret = hvf_sysreg_read(cpu, reg, rt);
844
+ } else {
845
+ val = hvf_get_reg(cpu, rt);
846
+ ret = hvf_sysreg_write(cpu, reg, val);
847
+ }
848
+
849
+ advance_pc = !ret;
850
+ break;
851
+ }
852
+ case EC_WFX_TRAP:
853
+ advance_pc = true;
854
+ break;
855
+ case EC_AA64_HVC:
856
+ cpu_synchronize_state(cpu);
857
+ trace_hvf_unknown_hvc(env->xregs[0]);
858
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
859
+ env->xregs[0] = -1;
860
+ break;
861
+ case EC_AA64_SMC:
862
+ cpu_synchronize_state(cpu);
863
+ trace_hvf_unknown_smc(env->xregs[0]);
864
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
865
+ break;
866
+ default:
867
+ cpu_synchronize_state(cpu);
868
+ trace_hvf_exit(syndrome, ec, env->pc);
869
+ error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
870
+ }
871
+
872
+ if (advance_pc) {
873
+ uint64_t pc;
874
+
875
+ flush_cpu_state(cpu);
876
+
877
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
878
+ assert_hvf_ok(r);
879
+ pc += 4;
880
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
881
+ assert_hvf_ok(r);
882
+ }
883
+
884
+ return 0;
885
+}
886
+
887
+static const VMStateDescription vmstate_hvf_vtimer = {
888
+ .name = "hvf-vtimer",
889
+ .version_id = 1,
890
+ .minimum_version_id = 1,
891
+ .fields = (VMStateField[]) {
892
+ VMSTATE_UINT64(vtimer_val, HVFVTimer),
893
+ VMSTATE_END_OF_LIST()
894
+ },
895
+};
896
+
897
+static void hvf_vm_state_change(void *opaque, bool running, RunState state)
898
+{
899
+ HVFVTimer *s = opaque;
900
+
901
+ if (running) {
902
+ /* Update vtimer offset on all CPUs */
903
+ hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
904
+ cpu_synchronize_all_states();
905
+ } else {
906
+ /* Remember vtimer value on every pause */
907
+ s->vtimer_val = hvf_vtimer_val_raw();
908
+ }
909
+}
910
+
911
+int hvf_arch_init(void)
912
+{
913
+ hvf_state->vtimer_offset = mach_absolute_time();
914
+ vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
915
+ qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
916
+ return 0;
917
+}
918
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
385
index XXXXXXX..XXXXXXX 100644
919
index XXXXXXX..XXXXXXX 100644
386
--- a/tests/qtest/meson.build
920
--- a/target/i386/hvf/hvf.c
387
+++ b/tests/qtest/meson.build
921
+++ b/target/i386/hvf/hvf.c
388
@@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \
922
@@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
389
['arm-cpu-features',
923
return env->apic_bus_freq != 0;
390
'numa-test',
924
}
391
'boot-serial-test',
925
392
+ 'xlnx-can-test',
926
+void hvf_kick_vcpu_thread(CPUState *cpu)
393
'migration-test']
927
+{
394
928
+ cpus_kick_thread(cpu);
395
qtests_s390x = \
929
+}
930
+
931
int hvf_arch_init(void)
932
{
933
return 0;
934
diff --git a/MAINTAINERS b/MAINTAINERS
935
index XXXXXXX..XXXXXXX 100644
936
--- a/MAINTAINERS
937
+++ b/MAINTAINERS
938
@@ -XXX,XX +XXX,XX @@ F: accel/accel-*.c
939
F: accel/Makefile.objs
940
F: accel/stubs/Makefile.objs
941
942
+Apple Silicon HVF CPUs
943
+M: Alexander Graf <agraf@csgraf.de>
944
+S: Maintained
945
+F: target/arm/hvf/
946
+
947
X86 HVF CPUs
948
M: Cameron Esfahani <dirty@apple.com>
949
M: Roman Bolshakov <r.bolshakov@yadro.com>
950
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
951
new file mode 100644
952
index XXXXXXX..XXXXXXX
953
--- /dev/null
954
+++ b/target/arm/hvf/trace-events
955
@@ -XXX,XX +XXX,XX @@
956
+hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
957
+hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
958
+hvf_inject_fiq(void) "injecting FIQ"
959
+hvf_inject_irq(void) "injecting IRQ"
960
+hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
961
+hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
962
+hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
963
+hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
964
+hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
965
+hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
396
--
966
--
397
2.20.1
967
2.20.1
398
968
399
969
diff view generated by jsdifflib
1
Implement the new-in-v8.1M VLDR/VSTR variants which directly
1
From: Peter Collingbourne <pcc@google.com>
2
read or write FP system registers to memory.
3
2
3
Sleep on WFI until the VTIMER is due but allow ourselves to be woken
4
up on IPI.
5
6
In this implementation IPI is blocked on the CPU thread at startup and
7
pselect() is used to atomically unblock the signal and begin sleeping.
8
The signal is sent unconditionally so there's no need to worry about
9
races between actually sleeping and the "we think we're sleeping"
10
state. It may lead to an extra wakeup but that's better than missing
11
it entirely.
12
13
Signed-off-by: Peter Collingbourne <pcc@google.com>
14
Signed-off-by: Alexander Graf <agraf@csgraf.de>
15
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
16
Reviewed-by: Sergio Lopez <slp@redhat.com>
17
Message-id: 20210916155404.86958-6-agraf@csgraf.de
18
[agraf: Remove unused 'set' variable, always advance PC on WFX trap,
19
support vm stop / continue operations and cntv offsets]
20
Signed-off-by: Alexander Graf <agraf@csgraf.de>
21
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
22
Reviewed-by: Sergio Lopez <slp@redhat.com>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201119215617.29887-10-peter.maydell@linaro.org
7
---
24
---
8
target/arm/vfp.decode | 14 ++++++
25
include/sysemu/hvf_int.h | 1 +
9
target/arm/translate-vfp.c.inc | 91 ++++++++++++++++++++++++++++++++++
26
accel/hvf/hvf-accel-ops.c | 5 +--
10
2 files changed, 105 insertions(+)
27
target/arm/hvf/hvf.c | 79 +++++++++++++++++++++++++++++++++++++++
28
3 files changed, 82 insertions(+), 3 deletions(-)
11
29
12
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
30
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
13
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/vfp.decode
32
--- a/include/sysemu/hvf_int.h
15
+++ b/target/arm/vfp.decode
33
+++ b/include/sysemu/hvf_int.h
16
@@ -XXX,XX +XXX,XX @@ VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
34
@@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state {
17
VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
35
uint64_t fd;
18
VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
36
void *exit;
19
37
bool vtimer_masked;
20
+# M-profile VLDR/VSTR to sysreg
38
+ sigset_t unblock_ipi_mask;
21
+%vldr_sysreg 22:1 13:3
39
};
22
+%imm7_0x4 0:7 !function=times_4
40
23
+
41
void assert_hvf_ok(hv_return_t ret);
24
+&vldr_sysreg rn reg imm a w p
42
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
25
+@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
26
+ reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
27
+
28
+# P=0 W=0 is SEE "Related encodings", so split into two patterns
29
+VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
30
+VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
31
+VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
32
+VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
33
+
34
# We split the load/store multiple up into two patterns to avoid
35
# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
36
# grouping:
37
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
38
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate-vfp.c.inc
44
--- a/accel/hvf/hvf-accel-ops.c
40
+++ b/target/arm/translate-vfp.c.inc
45
+++ b/accel/hvf/hvf-accel-ops.c
41
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
46
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
42
return true;
47
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
48
49
/* init cpu signals */
50
- sigset_t set;
51
struct sigaction sigact;
52
53
memset(&sigact, 0, sizeof(sigact));
54
sigact.sa_handler = dummy_signal;
55
sigaction(SIG_IPI, &sigact, NULL);
56
57
- pthread_sigmask(SIG_BLOCK, NULL, &set);
58
- sigdelset(&set, SIG_IPI);
59
+ pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
60
+ sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
61
62
#ifdef __aarch64__
63
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
64
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/hvf/hvf.c
67
+++ b/target/arm/hvf/hvf.c
68
@@ -XXX,XX +XXX,XX @@
69
* QEMU Hypervisor.framework support for Apple Silicon
70
71
* Copyright 2020 Alexander Graf <agraf@csgraf.de>
72
+ * Copyright 2020 Google LLC
73
*
74
* This work is licensed under the terms of the GNU GPL, version 2 or later.
75
* See the COPYING file in the top-level directory.
76
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu)
77
78
void hvf_kick_vcpu_thread(CPUState *cpu)
79
{
80
+ cpus_kick_thread(cpu);
81
hv_vcpus_exit(&cpu->hvf->fd, 1);
43
}
82
}
44
83
45
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
84
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_vtimer_val_raw(void)
85
return mach_absolute_time() - hvf_state->vtimer_offset;
86
}
87
88
+static uint64_t hvf_vtimer_val(void)
46
+{
89
+{
47
+ arg_vldr_sysreg *a = opaque;
90
+ if (!runstate_is_running()) {
48
+ uint32_t offset = a->imm;
91
+ /* VM is paused, the vtimer value is in vtimer.vtimer_val */
49
+ TCGv_i32 addr;
92
+ return vtimer.vtimer_val;
50
+
51
+ if (!a->a) {
52
+ offset = - offset;
53
+ }
93
+ }
54
+
94
+
55
+ addr = load_reg(s, a->rn);
95
+ return hvf_vtimer_val_raw();
56
+ if (a->p) {
96
+}
57
+ tcg_gen_addi_i32(addr, addr, offset);
97
+
98
+static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
99
+{
100
+ /*
101
+ * Use pselect to sleep so that other threads can IPI us while we're
102
+ * sleeping.
103
+ */
104
+ qatomic_mb_set(&cpu->thread_kicked, false);
105
+ qemu_mutex_unlock_iothread();
106
+ pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
107
+ qemu_mutex_lock_iothread();
108
+}
109
+
110
+static void hvf_wfi(CPUState *cpu)
111
+{
112
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
113
+ struct timespec ts;
114
+ hv_return_t r;
115
+ uint64_t ctl;
116
+ uint64_t cval;
117
+ int64_t ticks_to_sleep;
118
+ uint64_t seconds;
119
+ uint64_t nanos;
120
+ uint32_t cntfrq;
121
+
122
+ if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
123
+ /* Interrupt pending, no need to wait */
124
+ return;
58
+ }
125
+ }
59
+
126
+
60
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
127
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
61
+ gen_helper_v8m_stackcheck(cpu_env, addr);
128
+ assert_hvf_ok(r);
129
+
130
+ if (!(ctl & 1) || (ctl & 2)) {
131
+ /* Timer disabled or masked, just wait for an IPI. */
132
+ hvf_wait_for_ipi(cpu, NULL);
133
+ return;
62
+ }
134
+ }
63
+
135
+
64
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
136
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
65
+ MO_UL | MO_ALIGN | s->be_data);
137
+ assert_hvf_ok(r);
66
+ tcg_temp_free_i32(value);
67
+
138
+
68
+ if (a->w) {
139
+ ticks_to_sleep = cval - hvf_vtimer_val();
69
+ /* writeback */
140
+ if (ticks_to_sleep < 0) {
70
+ if (!a->p) {
141
+ return;
71
+ tcg_gen_addi_i32(addr, addr, offset);
72
+ }
73
+ store_reg(s, a->rn, addr);
74
+ } else {
75
+ tcg_temp_free_i32(addr);
76
+ }
142
+ }
143
+
144
+ cntfrq = gt_cntfrq_period_ns(arm_cpu);
145
+ seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
146
+ ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
147
+ nanos = ticks_to_sleep * cntfrq;
148
+
149
+ /*
150
+ * Don't sleep for less than the time a context switch would take,
151
+ * so that we can satisfy fast timer requests on the same CPU.
152
+ * Measurements on M1 show the sweet spot to be ~2ms.
153
+ */
154
+ if (!seconds && nanos < (2 * SCALE_MS)) {
155
+ return;
156
+ }
157
+
158
+ ts = (struct timespec) { seconds, nanos };
159
+ hvf_wait_for_ipi(cpu, &ts);
77
+}
160
+}
78
+
161
+
79
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
162
static void hvf_sync_vtimer(CPUState *cpu)
80
+{
163
{
81
+ arg_vldr_sysreg *a = opaque;
164
ARMCPU *arm_cpu = ARM_CPU(cpu);
82
+ uint32_t offset = a->imm;
165
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
83
+ TCGv_i32 addr;
166
}
84
+ TCGv_i32 value = tcg_temp_new_i32();
167
case EC_WFX_TRAP:
85
+
168
advance_pc = true;
86
+ if (!a->a) {
169
+ if (!(syndrome & WFX_IS_WFE)) {
87
+ offset = - offset;
170
+ hvf_wfi(cpu);
88
+ }
89
+
90
+ addr = load_reg(s, a->rn);
91
+ if (a->p) {
92
+ tcg_gen_addi_i32(addr, addr, offset);
93
+ }
94
+
95
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
96
+ gen_helper_v8m_stackcheck(cpu_env, addr);
97
+ }
98
+
99
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
100
+ MO_UL | MO_ALIGN | s->be_data);
101
+
102
+ if (a->w) {
103
+ /* writeback */
104
+ if (!a->p) {
105
+ tcg_gen_addi_i32(addr, addr, offset);
106
+ }
171
+ }
107
+ store_reg(s, a->rn, addr);
172
break;
108
+ } else {
173
case EC_AA64_HVC:
109
+ tcg_temp_free_i32(addr);
174
cpu_synchronize_state(cpu);
110
+ }
111
+ return value;
112
+}
113
+
114
+static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
115
+{
116
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
117
+ return false;
118
+ }
119
+ if (a->rn == 15) {
120
+ return false;
121
+ }
122
+ return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
123
+}
124
+
125
+static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
126
+{
127
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
128
+ return false;
129
+ }
130
+ if (a->rn == 15) {
131
+ return false;
132
+ }
133
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
134
+}
135
+
136
static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
137
{
138
TCGv_i32 tmp;
139
--
175
--
140
2.20.1
176
2.20.1
141
177
142
178
diff view generated by jsdifflib
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
1
Now that we have working system register sync, we push more target CPU
2
2
properties into the virtual machine. That might be useful in some
3
The Xilinx ZynqMP CAN controller is developed based on SocketCAN, QEMU CAN bus
3
situations, but is not the typical case that users want.
4
implementation. Bus connection and socketCAN connection for each CAN module
4
5
can be set through command lines.
5
So let's add a -cpu host option that allows them to explicitly pass all
6
6
CPU capabilities of their host CPU into the guest.
7
Example for using single CAN:
7
8
-object can-bus,id=canbus0 \
8
Signed-off-by: Alexander Graf <agraf@csgraf.de>
9
-machine xlnx-zcu102.canbus0=canbus0 \
9
Acked-by: Roman Bolshakov <r.bolshakov@yadro.com>
10
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0
10
Reviewed-by: Sergio Lopez <slp@redhat.com>
11
12
Example for connecting both CAN to same virtual CAN on host machine:
13
-object can-bus,id=canbus0 -object can-bus,id=canbus1 \
14
-machine xlnx-zcu102.canbus0=canbus0 \
15
-machine xlnx-zcu102.canbus1=canbus1 \
16
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0 \
17
-object can-host-socketcan,id=socketcan1,if=vcan0,canbus=canbus1
18
19
To create virtual CAN on the host machine, please check the QEMU CAN docs:
20
https://github.com/qemu/qemu/blob/master/docs/can.txt
21
22
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
23
Message-id: 1605728926-352690-2-git-send-email-fnu.vikram@xilinx.com
24
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20210916155404.86958-7-agraf@csgraf.de
13
[PMM: drop unnecessary #include line from .h file]
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
---
15
---
27
meson.build | 1 +
16
target/arm/cpu.h | 2 +
28
hw/net/can/trace.h | 1 +
17
target/arm/hvf_arm.h | 18 +++++++++
29
include/hw/net/xlnx-zynqmp-can.h | 78 ++
18
target/arm/kvm_arm.h | 2 -
30
hw/net/can/xlnx-zynqmp-can.c | 1161 ++++++++++++++++++++++++++++++
19
target/arm/cpu.c | 13 ++++--
31
hw/Kconfig | 1 +
20
target/arm/hvf/hvf.c | 95 ++++++++++++++++++++++++++++++++++++++++++++
32
hw/net/can/meson.build | 1 +
21
5 files changed, 124 insertions(+), 6 deletions(-)
33
hw/net/can/trace-events | 9 +
22
create mode 100644 target/arm/hvf_arm.h
34
7 files changed, 1252 insertions(+)
23
35
create mode 100644 hw/net/can/trace.h
24
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
36
create mode 100644 include/hw/net/xlnx-zynqmp-can.h
25
index XXXXXXX..XXXXXXX 100644
37
create mode 100644 hw/net/can/xlnx-zynqmp-can.c
26
--- a/target/arm/cpu.h
38
create mode 100644 hw/net/can/trace-events
27
+++ b/target/arm/cpu.h
39
28
@@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
40
diff --git a/meson.build b/meson.build
29
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
41
index XXXXXXX..XXXXXXX 100644
30
#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
42
--- a/meson.build
31
43
+++ b/meson.build
32
+#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
44
@@ -XXX,XX +XXX,XX @@ if have_system
33
+
45
'hw/misc',
34
#define cpu_signal_handler cpu_arm_signal_handler
46
'hw/misc/macio',
35
#define cpu_list arm_cpu_list
47
'hw/net',
36
48
+ 'hw/net/can',
37
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
49
'hw/nvram',
50
'hw/pci',
51
'hw/pci-host',
52
diff --git a/hw/net/can/trace.h b/hw/net/can/trace.h
53
new file mode 100644
38
new file mode 100644
54
index XXXXXXX..XXXXXXX
39
index XXXXXXX..XXXXXXX
55
--- /dev/null
40
--- /dev/null
56
+++ b/hw/net/can/trace.h
41
+++ b/target/arm/hvf_arm.h
57
@@ -0,0 +1 @@
58
+#include "trace/trace-hw_net_can.h"
59
diff --git a/include/hw/net/xlnx-zynqmp-can.h b/include/hw/net/xlnx-zynqmp-can.h
60
new file mode 100644
61
index XXXXXXX..XXXXXXX
62
--- /dev/null
63
+++ b/include/hw/net/xlnx-zynqmp-can.h
64
@@ -XXX,XX +XXX,XX @@
42
@@ -XXX,XX +XXX,XX @@
65
+/*
43
+/*
66
+ * QEMU model of the Xilinx ZynqMP CAN controller.
44
+ * QEMU Hypervisor.framework (HVF) support -- ARM specifics
67
+ *
45
+ *
68
+ * Copyright (c) 2020 Xilinx Inc.
46
+ * Copyright (c) 2021 Alexander Graf
69
+ *
47
+ *
70
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
48
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
49
+ * See the COPYING file in the top-level directory.
71
+ *
50
+ *
72
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
73
+ * Pavel Pisa.
74
+ *
75
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
76
+ * of this software and associated documentation files (the "Software"), to deal
77
+ * in the Software without restriction, including without limitation the rights
78
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
79
+ * copies of the Software, and to permit persons to whom the Software is
80
+ * furnished to do so, subject to the following conditions:
81
+ *
82
+ * The above copyright notice and this permission notice shall be included in
83
+ * all copies or substantial portions of the Software.
84
+ *
85
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
86
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
87
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
88
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
89
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
90
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
91
+ * THE SOFTWARE.
92
+ */
51
+ */
93
+
52
+
94
+#ifndef XLNX_ZYNQMP_CAN_H
53
+#ifndef QEMU_HVF_ARM_H
95
+#define XLNX_ZYNQMP_CAN_H
54
+#define QEMU_HVF_ARM_H
96
+
55
+
97
+#include "hw/register.h"
56
+#include "cpu.h"
98
+#include "net/can_emu.h"
57
+
99
+#include "net/can_host.h"
58
+void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu);
100
+#include "qemu/fifo32.h"
101
+#include "hw/ptimer.h"
102
+#include "hw/qdev-clock.h"
103
+
104
+#define TYPE_XLNX_ZYNQMP_CAN "xlnx.zynqmp-can"
105
+
106
+#define XLNX_ZYNQMP_CAN(obj) \
107
+ OBJECT_CHECK(XlnxZynqMPCANState, (obj), TYPE_XLNX_ZYNQMP_CAN)
108
+
109
+#define MAX_CAN_CTRLS 2
110
+#define XLNX_ZYNQMP_CAN_R_MAX (0x84 / 4)
111
+#define MAILBOX_CAPACITY 64
112
+#define CAN_TIMER_MAX 0XFFFFUL
113
+#define CAN_DEFAULT_CLOCK (24 * 1000 * 1000)
114
+
115
+/* Each CAN_FRAME will have 4 * 32bit size. */
116
+#define CAN_FRAME_SIZE 4
117
+#define RXFIFO_SIZE (MAILBOX_CAPACITY * CAN_FRAME_SIZE)
118
+
119
+typedef struct XlnxZynqMPCANState {
120
+ SysBusDevice parent_obj;
121
+ MemoryRegion iomem;
122
+
123
+ qemu_irq irq;
124
+
125
+ CanBusClientState bus_client;
126
+ CanBusState *canbus;
127
+
128
+ struct {
129
+ uint32_t ext_clk_freq;
130
+ } cfg;
131
+
132
+ RegisterInfo reg_info[XLNX_ZYNQMP_CAN_R_MAX];
133
+ uint32_t regs[XLNX_ZYNQMP_CAN_R_MAX];
134
+
135
+ Fifo32 rx_fifo;
136
+ Fifo32 tx_fifo;
137
+ Fifo32 txhpb_fifo;
138
+
139
+ ptimer_state *can_timer;
140
+} XlnxZynqMPCANState;
141
+
59
+
142
+#endif
60
+#endif
143
diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c
61
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
144
new file mode 100644
62
index XXXXXXX..XXXXXXX 100644
145
index XXXXXXX..XXXXXXX
63
--- a/target/arm/kvm_arm.h
146
--- /dev/null
64
+++ b/target/arm/kvm_arm.h
147
+++ b/hw/net/can/xlnx-zynqmp-can.c
65
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
66
*/
67
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
68
69
-#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
70
-
71
/**
72
* ARMHostCPUFeatures: information about the host CPU (identified
73
* by asking the host kernel)
74
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/cpu.c
77
+++ b/target/arm/cpu.c
148
@@ -XXX,XX +XXX,XX @@
78
@@ -XXX,XX +XXX,XX @@
149
+/*
79
#include "sysemu/tcg.h"
150
+ * QEMU model of the Xilinx ZynqMP CAN controller.
80
#include "sysemu/hw_accel.h"
151
+ * This implementation is based on the following datasheet:
81
#include "kvm_arm.h"
152
+ * https://www.xilinx.com/support/documentation/user_guides/ug1085-zynq-ultrascale-trm.pdf
82
+#include "hvf_arm.h"
153
+ *
83
#include "disas/capstone.h"
154
+ * Copyright (c) 2020 Xilinx Inc.
84
#include "fpu/softfloat.h"
155
+ *
85
156
+ * Written-by: Vikram Garhwal<fnu.vikram@xilinx.com>
86
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
157
+ *
87
* this is the first point where we can report it.
158
+ * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and
88
*/
159
+ * Pavel Pisa
89
if (cpu->host_cpu_probe_failed) {
160
+ *
90
- if (!kvm_enabled()) {
161
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
91
- error_setg(errp, "The 'host' CPU type can only be used with KVM");
162
+ * of this software and associated documentation files (the "Software"), to deal
92
+ if (!kvm_enabled() && !hvf_enabled()) {
163
+ * in the Software without restriction, including without limitation the rights
93
+ error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
164
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
94
} else {
165
+ * copies of the Software, and to permit persons to whom the Software is
95
error_setg(errp, "Failed to retrieve host CPU features");
166
+ * furnished to do so, subject to the following conditions:
96
}
167
+ *
97
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
168
+ * The above copyright notice and this permission notice shall be included in
98
#endif /* CONFIG_TCG */
169
+ * all copies or substantial portions of the Software.
99
}
170
+ *
100
171
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
101
-#ifdef CONFIG_KVM
172
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
102
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
173
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
103
static void arm_host_initfn(Object *obj)
174
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
104
{
175
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
105
ARMCPU *cpu = ARM_CPU(obj);
176
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
106
177
+ * THE SOFTWARE.
107
+#ifdef CONFIG_KVM
178
+ */
108
kvm_arm_set_cpu_features_from_host(cpu);
179
+
109
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
180
+#include "qemu/osdep.h"
110
aarch64_add_sve_properties(obj);
181
+#include "hw/sysbus.h"
111
}
182
+#include "hw/register.h"
112
+#else
183
+#include "hw/irq.h"
113
+ hvf_arm_set_cpu_features_from_host(cpu);
184
+#include "qapi/error.h"
185
+#include "qemu/bitops.h"
186
+#include "qemu/log.h"
187
+#include "qemu/cutils.h"
188
+#include "sysemu/sysemu.h"
189
+#include "migration/vmstate.h"
190
+#include "hw/qdev-properties.h"
191
+#include "net/can_emu.h"
192
+#include "net/can_host.h"
193
+#include "qemu/event_notifier.h"
194
+#include "qom/object_interfaces.h"
195
+#include "hw/net/xlnx-zynqmp-can.h"
196
+#include "trace.h"
197
+
198
+#ifndef XLNX_ZYNQMP_CAN_ERR_DEBUG
199
+#define XLNX_ZYNQMP_CAN_ERR_DEBUG 0
200
+#endif
114
+#endif
201
+
115
arm_cpu_post_init(obj);
202
+#define MAX_DLC 8
116
}
203
+#undef ERROR
117
204
+
118
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_register_types(void)
205
+REG32(SOFTWARE_RESET_REGISTER, 0x0)
119
{
206
+ FIELD(SOFTWARE_RESET_REGISTER, CEN, 1, 1)
120
type_register_static(&arm_cpu_type_info);
207
+ FIELD(SOFTWARE_RESET_REGISTER, SRST, 0, 1)
121
208
+REG32(MODE_SELECT_REGISTER, 0x4)
122
-#ifdef CONFIG_KVM
209
+ FIELD(MODE_SELECT_REGISTER, SNOOP, 2, 1)
123
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
210
+ FIELD(MODE_SELECT_REGISTER, LBACK, 1, 1)
124
type_register_static(&host_arm_cpu_type_info);
211
+ FIELD(MODE_SELECT_REGISTER, SLEEP, 0, 1)
125
#endif
212
+REG32(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x8)
126
}
213
+ FIELD(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, BRP, 0, 8)
127
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
214
+REG32(ARBITRATION_PHASE_BIT_TIMING_REGISTER, 0xc)
128
index XXXXXXX..XXXXXXX 100644
215
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, SJW, 7, 2)
129
--- a/target/arm/hvf/hvf.c
216
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS2, 4, 3)
130
+++ b/target/arm/hvf/hvf.c
217
+ FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS1, 0, 4)
131
@@ -XXX,XX +XXX,XX @@
218
+REG32(ERROR_COUNTER_REGISTER, 0x10)
132
#include "sysemu/hvf.h"
219
+ FIELD(ERROR_COUNTER_REGISTER, REC, 8, 8)
133
#include "sysemu/hvf_int.h"
220
+ FIELD(ERROR_COUNTER_REGISTER, TEC, 0, 8)
134
#include "sysemu/hw_accel.h"
221
+REG32(ERROR_STATUS_REGISTER, 0x14)
135
+#include "hvf_arm.h"
222
+ FIELD(ERROR_STATUS_REGISTER, ACKER, 4, 1)
136
223
+ FIELD(ERROR_STATUS_REGISTER, BERR, 3, 1)
137
#include <mach/mach_time.h>
224
+ FIELD(ERROR_STATUS_REGISTER, STER, 2, 1)
138
225
+ FIELD(ERROR_STATUS_REGISTER, FMER, 1, 1)
139
@@ -XXX,XX +XXX,XX @@ typedef struct HVFVTimer {
226
+ FIELD(ERROR_STATUS_REGISTER, CRCER, 0, 1)
140
227
+REG32(STATUS_REGISTER, 0x18)
141
static HVFVTimer vtimer;
228
+ FIELD(STATUS_REGISTER, SNOOP, 12, 1)
142
229
+ FIELD(STATUS_REGISTER, ACFBSY, 11, 1)
143
+typedef struct ARMHostCPUFeatures {
230
+ FIELD(STATUS_REGISTER, TXFLL, 10, 1)
144
+ ARMISARegisters isar;
231
+ FIELD(STATUS_REGISTER, TXBFLL, 9, 1)
145
+ uint64_t features;
232
+ FIELD(STATUS_REGISTER, ESTAT, 7, 2)
146
+ uint64_t midr;
233
+ FIELD(STATUS_REGISTER, ERRWRN, 6, 1)
147
+ uint32_t reset_sctlr;
234
+ FIELD(STATUS_REGISTER, BBSY, 5, 1)
148
+ const char *dtb_compatible;
235
+ FIELD(STATUS_REGISTER, BIDLE, 4, 1)
149
+} ARMHostCPUFeatures;
236
+ FIELD(STATUS_REGISTER, NORMAL, 3, 1)
150
+
237
+ FIELD(STATUS_REGISTER, SLEEP, 2, 1)
151
+static ARMHostCPUFeatures arm_host_cpu_features;
238
+ FIELD(STATUS_REGISTER, LBACK, 1, 1)
152
+
239
+ FIELD(STATUS_REGISTER, CONFIG, 0, 1)
153
struct hvf_reg_match {
240
+REG32(INTERRUPT_STATUS_REGISTER, 0x1c)
154
int reg;
241
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFEMP, 14, 1)
155
uint64_t offset;
242
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFWMEMP, 13, 1)
156
@@ -XXX,XX +XXX,XX @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
243
+ FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL, 12, 1)
157
return val;
244
+ FIELD(INTERRUPT_STATUS_REGISTER, WKUP, 11, 1)
158
}
245
+ FIELD(INTERRUPT_STATUS_REGISTER, SLP, 10, 1)
159
246
+ FIELD(INTERRUPT_STATUS_REGISTER, BSOFF, 9, 1)
160
+static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
247
+ FIELD(INTERRUPT_STATUS_REGISTER, ERROR, 8, 1)
248
+ FIELD(INTERRUPT_STATUS_REGISTER, RXNEMP, 7, 1)
249
+ FIELD(INTERRUPT_STATUS_REGISTER, RXOFLW, 6, 1)
250
+ FIELD(INTERRUPT_STATUS_REGISTER, RXUFLW, 5, 1)
251
+ FIELD(INTERRUPT_STATUS_REGISTER, RXOK, 4, 1)
252
+ FIELD(INTERRUPT_STATUS_REGISTER, TXBFLL, 3, 1)
253
+ FIELD(INTERRUPT_STATUS_REGISTER, TXFLL, 2, 1)
254
+ FIELD(INTERRUPT_STATUS_REGISTER, TXOK, 1, 1)
255
+ FIELD(INTERRUPT_STATUS_REGISTER, ARBLST, 0, 1)
256
+REG32(INTERRUPT_ENABLE_REGISTER, 0x20)
257
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFEMP, 14, 1)
258
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFWMEMP, 13, 1)
259
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL, 12, 1)
260
+ FIELD(INTERRUPT_ENABLE_REGISTER, EWKUP, 11, 1)
261
+ FIELD(INTERRUPT_ENABLE_REGISTER, ESLP, 10, 1)
262
+ FIELD(INTERRUPT_ENABLE_REGISTER, EBSOFF, 9, 1)
263
+ FIELD(INTERRUPT_ENABLE_REGISTER, EERROR, 8, 1)
264
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXNEMP, 7, 1)
265
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXOFLW, 6, 1)
266
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXUFLW, 5, 1)
267
+ FIELD(INTERRUPT_ENABLE_REGISTER, ERXOK, 4, 1)
268
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXBFLL, 3, 1)
269
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXFLL, 2, 1)
270
+ FIELD(INTERRUPT_ENABLE_REGISTER, ETXOK, 1, 1)
271
+ FIELD(INTERRUPT_ENABLE_REGISTER, EARBLST, 0, 1)
272
+REG32(INTERRUPT_CLEAR_REGISTER, 0x24)
273
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFEMP, 14, 1)
274
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFWMEMP, 13, 1)
275
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL, 12, 1)
276
+ FIELD(INTERRUPT_CLEAR_REGISTER, CWKUP, 11, 1)
277
+ FIELD(INTERRUPT_CLEAR_REGISTER, CSLP, 10, 1)
278
+ FIELD(INTERRUPT_CLEAR_REGISTER, CBSOFF, 9, 1)
279
+ FIELD(INTERRUPT_CLEAR_REGISTER, CERROR, 8, 1)
280
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXNEMP, 7, 1)
281
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXOFLW, 6, 1)
282
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXUFLW, 5, 1)
283
+ FIELD(INTERRUPT_CLEAR_REGISTER, CRXOK, 4, 1)
284
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXBFLL, 3, 1)
285
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXFLL, 2, 1)
286
+ FIELD(INTERRUPT_CLEAR_REGISTER, CTXOK, 1, 1)
287
+ FIELD(INTERRUPT_CLEAR_REGISTER, CARBLST, 0, 1)
288
+REG32(TIMESTAMP_REGISTER, 0x28)
289
+ FIELD(TIMESTAMP_REGISTER, CTS, 0, 1)
290
+REG32(WIR, 0x2c)
291
+ FIELD(WIR, EW, 8, 8)
292
+ FIELD(WIR, FW, 0, 8)
293
+REG32(TXFIFO_ID, 0x30)
294
+ FIELD(TXFIFO_ID, IDH, 21, 11)
295
+ FIELD(TXFIFO_ID, SRRRTR, 20, 1)
296
+ FIELD(TXFIFO_ID, IDE, 19, 1)
297
+ FIELD(TXFIFO_ID, IDL, 1, 18)
298
+ FIELD(TXFIFO_ID, RTR, 0, 1)
299
+REG32(TXFIFO_DLC, 0x34)
300
+ FIELD(TXFIFO_DLC, DLC, 28, 4)
301
+REG32(TXFIFO_DATA1, 0x38)
302
+ FIELD(TXFIFO_DATA1, DB0, 24, 8)
303
+ FIELD(TXFIFO_DATA1, DB1, 16, 8)
304
+ FIELD(TXFIFO_DATA1, DB2, 8, 8)
305
+ FIELD(TXFIFO_DATA1, DB3, 0, 8)
306
+REG32(TXFIFO_DATA2, 0x3c)
307
+ FIELD(TXFIFO_DATA2, DB4, 24, 8)
308
+ FIELD(TXFIFO_DATA2, DB5, 16, 8)
309
+ FIELD(TXFIFO_DATA2, DB6, 8, 8)
310
+ FIELD(TXFIFO_DATA2, DB7, 0, 8)
311
+REG32(TXHPB_ID, 0x40)
312
+ FIELD(TXHPB_ID, IDH, 21, 11)
313
+ FIELD(TXHPB_ID, SRRRTR, 20, 1)
314
+ FIELD(TXHPB_ID, IDE, 19, 1)
315
+ FIELD(TXHPB_ID, IDL, 1, 18)
316
+ FIELD(TXHPB_ID, RTR, 0, 1)
317
+REG32(TXHPB_DLC, 0x44)
318
+ FIELD(TXHPB_DLC, DLC, 28, 4)
319
+REG32(TXHPB_DATA1, 0x48)
320
+ FIELD(TXHPB_DATA1, DB0, 24, 8)
321
+ FIELD(TXHPB_DATA1, DB1, 16, 8)
322
+ FIELD(TXHPB_DATA1, DB2, 8, 8)
323
+ FIELD(TXHPB_DATA1, DB3, 0, 8)
324
+REG32(TXHPB_DATA2, 0x4c)
325
+ FIELD(TXHPB_DATA2, DB4, 24, 8)
326
+ FIELD(TXHPB_DATA2, DB5, 16, 8)
327
+ FIELD(TXHPB_DATA2, DB6, 8, 8)
328
+ FIELD(TXHPB_DATA2, DB7, 0, 8)
329
+REG32(RXFIFO_ID, 0x50)
330
+ FIELD(RXFIFO_ID, IDH, 21, 11)
331
+ FIELD(RXFIFO_ID, SRRRTR, 20, 1)
332
+ FIELD(RXFIFO_ID, IDE, 19, 1)
333
+ FIELD(RXFIFO_ID, IDL, 1, 18)
334
+ FIELD(RXFIFO_ID, RTR, 0, 1)
335
+REG32(RXFIFO_DLC, 0x54)
336
+ FIELD(RXFIFO_DLC, DLC, 28, 4)
337
+ FIELD(RXFIFO_DLC, RXT, 0, 16)
338
+REG32(RXFIFO_DATA1, 0x58)
339
+ FIELD(RXFIFO_DATA1, DB0, 24, 8)
340
+ FIELD(RXFIFO_DATA1, DB1, 16, 8)
341
+ FIELD(RXFIFO_DATA1, DB2, 8, 8)
342
+ FIELD(RXFIFO_DATA1, DB3, 0, 8)
343
+REG32(RXFIFO_DATA2, 0x5c)
344
+ FIELD(RXFIFO_DATA2, DB4, 24, 8)
345
+ FIELD(RXFIFO_DATA2, DB5, 16, 8)
346
+ FIELD(RXFIFO_DATA2, DB6, 8, 8)
347
+ FIELD(RXFIFO_DATA2, DB7, 0, 8)
348
+REG32(AFR, 0x60)
349
+ FIELD(AFR, UAF4, 3, 1)
350
+ FIELD(AFR, UAF3, 2, 1)
351
+ FIELD(AFR, UAF2, 1, 1)
352
+ FIELD(AFR, UAF1, 0, 1)
353
+REG32(AFMR1, 0x64)
354
+ FIELD(AFMR1, AMIDH, 21, 11)
355
+ FIELD(AFMR1, AMSRR, 20, 1)
356
+ FIELD(AFMR1, AMIDE, 19, 1)
357
+ FIELD(AFMR1, AMIDL, 1, 18)
358
+ FIELD(AFMR1, AMRTR, 0, 1)
359
+REG32(AFIR1, 0x68)
360
+ FIELD(AFIR1, AIIDH, 21, 11)
361
+ FIELD(AFIR1, AISRR, 20, 1)
362
+ FIELD(AFIR1, AIIDE, 19, 1)
363
+ FIELD(AFIR1, AIIDL, 1, 18)
364
+ FIELD(AFIR1, AIRTR, 0, 1)
365
+REG32(AFMR2, 0x6c)
366
+ FIELD(AFMR2, AMIDH, 21, 11)
367
+ FIELD(AFMR2, AMSRR, 20, 1)
368
+ FIELD(AFMR2, AMIDE, 19, 1)
369
+ FIELD(AFMR2, AMIDL, 1, 18)
370
+ FIELD(AFMR2, AMRTR, 0, 1)
371
+REG32(AFIR2, 0x70)
372
+ FIELD(AFIR2, AIIDH, 21, 11)
373
+ FIELD(AFIR2, AISRR, 20, 1)
374
+ FIELD(AFIR2, AIIDE, 19, 1)
375
+ FIELD(AFIR2, AIIDL, 1, 18)
376
+ FIELD(AFIR2, AIRTR, 0, 1)
377
+REG32(AFMR3, 0x74)
378
+ FIELD(AFMR3, AMIDH, 21, 11)
379
+ FIELD(AFMR3, AMSRR, 20, 1)
380
+ FIELD(AFMR3, AMIDE, 19, 1)
381
+ FIELD(AFMR3, AMIDL, 1, 18)
382
+ FIELD(AFMR3, AMRTR, 0, 1)
383
+REG32(AFIR3, 0x78)
384
+ FIELD(AFIR3, AIIDH, 21, 11)
385
+ FIELD(AFIR3, AISRR, 20, 1)
386
+ FIELD(AFIR3, AIIDE, 19, 1)
387
+ FIELD(AFIR3, AIIDL, 1, 18)
388
+ FIELD(AFIR3, AIRTR, 0, 1)
389
+REG32(AFMR4, 0x7c)
390
+ FIELD(AFMR4, AMIDH, 21, 11)
391
+ FIELD(AFMR4, AMSRR, 20, 1)
392
+ FIELD(AFMR4, AMIDE, 19, 1)
393
+ FIELD(AFMR4, AMIDL, 1, 18)
394
+ FIELD(AFMR4, AMRTR, 0, 1)
395
+REG32(AFIR4, 0x80)
396
+ FIELD(AFIR4, AIIDH, 21, 11)
397
+ FIELD(AFIR4, AISRR, 20, 1)
398
+ FIELD(AFIR4, AIIDE, 19, 1)
399
+ FIELD(AFIR4, AIIDL, 1, 18)
400
+ FIELD(AFIR4, AIRTR, 0, 1)
401
+
402
+static void can_update_irq(XlnxZynqMPCANState *s)
403
+{
161
+{
404
+ uint32_t irq;
162
+ ARMISARegisters host_isar = {};
405
+
163
+ const struct isar_regs {
406
+ /* Watermark register interrupts. */
164
+ int reg;
407
+ if ((fifo32_num_free(&s->tx_fifo) / CAN_FRAME_SIZE) >
165
+ uint64_t *val;
408
+ ARRAY_FIELD_EX32(s->regs, WIR, EW)) {
166
+ } regs[] = {
409
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFWMEMP, 1);
167
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
410
+ }
168
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
411
+
169
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
412
+ if ((fifo32_num_used(&s->rx_fifo) / CAN_FRAME_SIZE) >
170
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
413
+ ARRAY_FIELD_EX32(s->regs, WIR, FW)) {
171
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
414
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL, 1);
172
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
415
+ }
173
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
416
+
174
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
417
+ /* RX Interrupts. */
175
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
418
+ if (fifo32_num_used(&s->rx_fifo) >= CAN_FRAME_SIZE) {
176
+ };
419
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXNEMP, 1);
177
+ hv_vcpu_t fd;
420
+ }
178
+ hv_return_t r = HV_SUCCESS;
421
+
179
+ hv_vcpu_exit_t *exit;
422
+ /* TX interrupts. */
180
+ int i;
423
+ if (fifo32_is_empty(&s->tx_fifo)) {
181
+
424
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFEMP, 1);
182
+ ahcf->dtb_compatible = "arm,arm-v8";
425
+ }
183
+ ahcf->features = (1ULL << ARM_FEATURE_V8) |
426
+
184
+ (1ULL << ARM_FEATURE_NEON) |
427
+ if (fifo32_is_full(&s->tx_fifo)) {
185
+ (1ULL << ARM_FEATURE_AARCH64) |
428
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFLL, 1);
186
+ (1ULL << ARM_FEATURE_PMU) |
429
+ }
187
+ (1ULL << ARM_FEATURE_GENERIC_TIMER);
430
+
188
+
431
+ if (fifo32_is_full(&s->txhpb_fifo)) {
189
+ /* We set up a small vcpu to extract host registers */
432
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXBFLL, 1);
190
+
433
+ }
191
+ if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
434
+
192
+ return false;
435
+ irq = s->regs[R_INTERRUPT_STATUS_REGISTER];
193
+ }
436
+ irq &= s->regs[R_INTERRUPT_ENABLE_REGISTER];
194
+
437
+
195
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
438
+ trace_xlnx_can_update_irq(s->regs[R_INTERRUPT_STATUS_REGISTER],
196
+ r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
439
+ s->regs[R_INTERRUPT_ENABLE_REGISTER], irq);
197
+ }
440
+ qemu_set_irq(s->irq, irq);
198
+ r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
199
+ r |= hv_vcpu_destroy(fd);
200
+
201
+ ahcf->isar = host_isar;
202
+
203
+ /*
204
+ * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
205
+ * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
206
+ */
207
+ ahcf->reset_sctlr = 0x30100180;
208
+ /*
209
+ * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
210
+ * let's disable it on boot and then allow guest software to turn it on by
211
+ * setting it to 0.
212
+ */
213
+ ahcf->reset_sctlr |= 0x00800000;
214
+
215
+ /* Make sure we don't advertise AArch32 support for EL0/EL1 */
216
+ if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
217
+ return false;
218
+ }
219
+
220
+ return r == HV_SUCCESS;
441
+}
221
+}
442
+
222
+
443
+static void can_ier_post_write(RegisterInfo *reg, uint64_t val)
223
+void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
444
+{
224
+{
445
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
225
+ if (!arm_host_cpu_features.dtb_compatible) {
446
+
226
+ if (!hvf_enabled() ||
447
+ can_update_irq(s);
227
+ !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
448
+}
449
+
450
+static uint64_t can_icr_pre_write(RegisterInfo *reg, uint64_t val)
451
+{
452
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
453
+
454
+ s->regs[R_INTERRUPT_STATUS_REGISTER] &= ~val;
455
+ can_update_irq(s);
456
+
457
+ return 0;
458
+}
459
+
460
+static void can_config_reset(XlnxZynqMPCANState *s)
461
+{
462
+ /* Reset all the configuration registers. */
463
+ register_reset(&s->reg_info[R_SOFTWARE_RESET_REGISTER]);
464
+ register_reset(&s->reg_info[R_MODE_SELECT_REGISTER]);
465
+ register_reset(
466
+ &s->reg_info[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER]);
467
+ register_reset(&s->reg_info[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER]);
468
+ register_reset(&s->reg_info[R_STATUS_REGISTER]);
469
+ register_reset(&s->reg_info[R_INTERRUPT_STATUS_REGISTER]);
470
+ register_reset(&s->reg_info[R_INTERRUPT_ENABLE_REGISTER]);
471
+ register_reset(&s->reg_info[R_INTERRUPT_CLEAR_REGISTER]);
472
+ register_reset(&s->reg_info[R_WIR]);
473
+}
474
+
475
+static void can_config_mode(XlnxZynqMPCANState *s)
476
+{
477
+ register_reset(&s->reg_info[R_ERROR_COUNTER_REGISTER]);
478
+ register_reset(&s->reg_info[R_ERROR_STATUS_REGISTER]);
479
+
480
+ /* Put XlnxZynqMPCAN in configuration mode. */
481
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 1);
482
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, 0);
483
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, 0);
484
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, BSOFF, 0);
485
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ERROR, 0);
486
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 0);
487
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 0);
488
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 0);
489
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ARBLST, 0);
490
+
491
+ can_update_irq(s);
492
+}
493
+
494
+static void update_status_register_mode_bits(XlnxZynqMPCANState *s)
495
+{
496
+ bool sleep_status = ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP);
497
+ bool sleep_mode = ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP);
498
+ /* Wake up interrupt bit. */
499
+ bool wakeup_irq_val = sleep_status && (sleep_mode == 0);
500
+ /* Sleep interrupt bit. */
501
+ bool sleep_irq_val = sleep_mode && (sleep_status == 0);
502
+
503
+ /* Clear previous core mode status bits. */
504
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 0);
505
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 0);
506
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 0);
507
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 0);
508
+
509
+ /* set current mode bit and generate irqs accordingly. */
510
+ if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, LBACK)) {
511
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 1);
512
+ } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP)) {
513
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 1);
514
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP,
515
+ sleep_irq_val);
516
+ } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) {
517
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 1);
518
+ } else {
519
+ /*
520
+ * If all bits are zero then XlnxZynqMPCAN is set in normal mode.
521
+ */
522
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 1);
523
+ /* Set wakeup interrupt bit. */
524
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP,
525
+ wakeup_irq_val);
526
+ }
527
+
528
+ can_update_irq(s);
529
+}
530
+
531
+static void can_exit_sleep_mode(XlnxZynqMPCANState *s)
532
+{
533
+ ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, 0);
534
+ update_status_register_mode_bits(s);
535
+}
536
+
537
+static void generate_frame(qemu_can_frame *frame, uint32_t *data)
538
+{
539
+ frame->can_id = data[0];
540
+ frame->can_dlc = FIELD_EX32(data[1], TXFIFO_DLC, DLC);
541
+
542
+ frame->data[0] = FIELD_EX32(data[2], TXFIFO_DATA1, DB3);
543
+ frame->data[1] = FIELD_EX32(data[2], TXFIFO_DATA1, DB2);
544
+ frame->data[2] = FIELD_EX32(data[2], TXFIFO_DATA1, DB1);
545
+ frame->data[3] = FIELD_EX32(data[2], TXFIFO_DATA1, DB0);
546
+
547
+ frame->data[4] = FIELD_EX32(data[3], TXFIFO_DATA2, DB7);
548
+ frame->data[5] = FIELD_EX32(data[3], TXFIFO_DATA2, DB6);
549
+ frame->data[6] = FIELD_EX32(data[3], TXFIFO_DATA2, DB5);
550
+ frame->data[7] = FIELD_EX32(data[3], TXFIFO_DATA2, DB4);
551
+}
552
+
553
+static bool tx_ready_check(XlnxZynqMPCANState *s)
554
+{
555
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) {
556
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
557
+
558
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while"
559
+ " data while controller is in reset mode.\n",
560
+ path);
561
+ return false;
562
+ }
563
+
564
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
565
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
566
+
567
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer"
568
+ " data while controller is in configuration mode. Reset"
569
+ " the core so operations can start fresh.\n",
570
+ path);
571
+ return false;
572
+ }
573
+
574
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) {
575
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
576
+
577
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer"
578
+ " data while controller is in SNOOP MODE.\n",
579
+ path);
580
+ return false;
581
+ }
582
+
583
+ return true;
584
+}
585
+
586
+static void transfer_fifo(XlnxZynqMPCANState *s, Fifo32 *fifo)
587
+{
588
+ qemu_can_frame frame;
589
+ uint32_t data[CAN_FRAME_SIZE];
590
+ int i;
591
+ bool can_tx = tx_ready_check(s);
592
+
593
+ if (!can_tx) {
594
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
595
+
596
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is not enabled for data"
597
+ " transfer.\n", path);
598
+ can_update_irq(s);
599
+ return;
600
+ }
601
+
602
+ while (!fifo32_is_empty(fifo)) {
603
+ for (i = 0; i < CAN_FRAME_SIZE; i++) {
604
+ data[i] = fifo32_pop(fifo);
605
+ }
606
+
607
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) {
608
+ /*
228
+ /*
609
+ * Controller is in loopback. In Loopback mode, the CAN core
229
+ * We can't report this error yet, so flag that we need to
610
+ * transmits a recessive bitstream on to the XlnxZynqMPCAN Bus.
230
+ * in arm_cpu_realizefn().
611
+ * Any message transmitted is looped back to the RX line and
612
+ * acknowledged. The XlnxZynqMPCAN core receives any message
613
+ * that it transmits.
614
+ */
231
+ */
615
+ if (fifo32_is_full(&s->rx_fifo)) {
232
+ cpu->host_cpu_probe_failed = true;
616
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1);
617
+ } else {
618
+ for (i = 0; i < CAN_FRAME_SIZE; i++) {
619
+ fifo32_push(&s->rx_fifo, data[i]);
620
+ }
621
+
622
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1);
623
+ }
624
+ } else {
625
+ /* Normal mode Tx. */
626
+ generate_frame(&frame, data);
627
+
628
+ trace_xlnx_can_tx_data(frame.can_id, frame.can_dlc,
629
+ frame.data[0], frame.data[1],
630
+ frame.data[2], frame.data[3],
631
+ frame.data[4], frame.data[5],
632
+ frame.data[6], frame.data[7]);
633
+ can_bus_client_send(&s->bus_client, &frame, 1);
634
+ }
635
+ }
636
+
637
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 1);
638
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, TXBFLL, 0);
639
+
640
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) {
641
+ can_exit_sleep_mode(s);
642
+ }
643
+
644
+ can_update_irq(s);
645
+}
646
+
647
+static uint64_t can_srr_pre_write(RegisterInfo *reg, uint64_t val)
648
+{
649
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
650
+
651
+ ARRAY_FIELD_DP32(s->regs, SOFTWARE_RESET_REGISTER, CEN,
652
+ FIELD_EX32(val, SOFTWARE_RESET_REGISTER, CEN));
653
+
654
+ if (FIELD_EX32(val, SOFTWARE_RESET_REGISTER, SRST)) {
655
+ trace_xlnx_can_reset(val);
656
+
657
+ /* First, core will do software reset then will enter in config mode. */
658
+ can_config_reset(s);
659
+ }
660
+
661
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
662
+ can_config_mode(s);
663
+ } else {
664
+ /*
665
+ * Leave config mode. Now XlnxZynqMPCAN core will enter normal,
666
+ * sleep, snoop or loopback mode depending upon LBACK, SLEEP, SNOOP
667
+ * register states.
668
+ */
669
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 0);
670
+
671
+ ptimer_transaction_begin(s->can_timer);
672
+ ptimer_set_count(s->can_timer, 0);
673
+ ptimer_transaction_commit(s->can_timer);
674
+
675
+ /* XlnxZynqMPCAN is out of config mode. It will send pending data. */
676
+ transfer_fifo(s, &s->txhpb_fifo);
677
+ transfer_fifo(s, &s->tx_fifo);
678
+ }
679
+
680
+ update_status_register_mode_bits(s);
681
+
682
+ return s->regs[R_SOFTWARE_RESET_REGISTER];
683
+}
684
+
685
+static uint64_t can_msr_pre_write(RegisterInfo *reg, uint64_t val)
686
+{
687
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
688
+ uint8_t multi_mode;
689
+
690
+ /*
691
+ * Multiple mode set check. This is done to make sure user doesn't set
692
+ * multiple modes.
693
+ */
694
+ multi_mode = FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK) +
695
+ FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP) +
696
+ FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP);
697
+
698
+ if (multi_mode > 1) {
699
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
700
+
701
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to config"
702
+ " several modes simultaneously. One mode will be selected"
703
+ " according to their priority: LBACK > SLEEP > SNOOP.\n",
704
+ path);
705
+ }
706
+
707
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) {
708
+ /* We are in configuration mode, any mode can be selected. */
709
+ s->regs[R_MODE_SELECT_REGISTER] = val;
710
+ } else {
711
+ bool sleep_mode_bit = FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP);
712
+
713
+ ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, sleep_mode_bit);
714
+
715
+ if (FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK)) {
716
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
717
+
718
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set"
719
+ " LBACK mode without setting CEN bit as 0.\n",
720
+ path);
721
+ } else if (FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP)) {
722
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
723
+
724
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set"
725
+ " SNOOP mode without setting CEN bit as 0.\n",
726
+ path);
727
+ }
728
+
729
+ update_status_register_mode_bits(s);
730
+ }
731
+
732
+ return s->regs[R_MODE_SELECT_REGISTER];
733
+}
734
+
735
+static uint64_t can_brpr_pre_write(RegisterInfo *reg, uint64_t val)
736
+{
737
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
738
+
739
+ /* Only allow writes when in config mode. */
740
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
741
+ return s->regs[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER];
742
+ }
743
+
744
+ return val;
745
+}
746
+
747
+static uint64_t can_btr_pre_write(RegisterInfo *reg, uint64_t val)
748
+{
749
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
750
+
751
+ /* Only allow writes when in config mode. */
752
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
753
+ return s->regs[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER];
754
+ }
755
+
756
+ return val;
757
+}
758
+
759
+static uint64_t can_tcr_pre_write(RegisterInfo *reg, uint64_t val)
760
+{
761
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
762
+
763
+ if (FIELD_EX32(val, TIMESTAMP_REGISTER, CTS)) {
764
+ ptimer_transaction_begin(s->can_timer);
765
+ ptimer_set_count(s->can_timer, 0);
766
+ ptimer_transaction_commit(s->can_timer);
767
+ }
768
+
769
+ return 0;
770
+}
771
+
772
+static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame)
773
+{
774
+ bool filter_pass = false;
775
+ uint16_t timestamp = 0;
776
+
777
+ /* If no filter is enabled. Message will be stored in FIFO. */
778
+ if (!((ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) |
779
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) |
780
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) |
781
+ (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)))) {
782
+ filter_pass = true;
783
+ }
784
+
785
+ /*
786
+ * Messages that pass any of the acceptance filters will be stored in
787
+ * the RX FIFO.
788
+ */
789
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) {
790
+ uint32_t id_masked = s->regs[R_AFMR1] & frame->can_id;
791
+ uint32_t filter_id_masked = s->regs[R_AFMR1] & s->regs[R_AFIR1];
792
+
793
+ if (filter_id_masked == id_masked) {
794
+ filter_pass = true;
795
+ }
796
+ }
797
+
798
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) {
799
+ uint32_t id_masked = s->regs[R_AFMR2] & frame->can_id;
800
+ uint32_t filter_id_masked = s->regs[R_AFMR2] & s->regs[R_AFIR2];
801
+
802
+ if (filter_id_masked == id_masked) {
803
+ filter_pass = true;
804
+ }
805
+ }
806
+
807
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) {
808
+ uint32_t id_masked = s->regs[R_AFMR3] & frame->can_id;
809
+ uint32_t filter_id_masked = s->regs[R_AFMR3] & s->regs[R_AFIR3];
810
+
811
+ if (filter_id_masked == id_masked) {
812
+ filter_pass = true;
813
+ }
814
+ }
815
+
816
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) {
817
+ uint32_t id_masked = s->regs[R_AFMR4] & frame->can_id;
818
+ uint32_t filter_id_masked = s->regs[R_AFMR4] & s->regs[R_AFIR4];
819
+
820
+ if (filter_id_masked == id_masked) {
821
+ filter_pass = true;
822
+ }
823
+ }
824
+
825
+ if (!filter_pass) {
826
+ trace_xlnx_can_rx_fifo_filter_reject(frame->can_id, frame->can_dlc);
827
+ return;
828
+ }
829
+
830
+ /* Store the message in fifo if it passed through any of the filters. */
831
+ if (filter_pass && frame->can_dlc <= MAX_DLC) {
832
+
833
+ if (fifo32_is_full(&s->rx_fifo)) {
834
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1);
835
+ } else {
836
+ timestamp = CAN_TIMER_MAX - ptimer_get_count(s->can_timer);
837
+
838
+ fifo32_push(&s->rx_fifo, frame->can_id);
839
+
840
+ fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DLC_DLC_SHIFT,
841
+ R_RXFIFO_DLC_DLC_LENGTH,
842
+ frame->can_dlc) |
843
+ deposit32(0, R_RXFIFO_DLC_RXT_SHIFT,
844
+ R_RXFIFO_DLC_RXT_LENGTH,
845
+ timestamp));
846
+
847
+ /* First 32 bit of the data. */
848
+ fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT,
849
+ R_TXFIFO_DATA1_DB3_LENGTH,
850
+ frame->data[0]) |
851
+ deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT,
852
+ R_TXFIFO_DATA1_DB2_LENGTH,
853
+ frame->data[1]) |
854
+ deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT,
855
+ R_TXFIFO_DATA1_DB1_LENGTH,
856
+ frame->data[2]) |
857
+ deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT,
858
+ R_TXFIFO_DATA1_DB0_LENGTH,
859
+ frame->data[3]));
860
+ /* Last 32 bit of the data. */
861
+ fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT,
862
+ R_TXFIFO_DATA2_DB7_LENGTH,
863
+ frame->data[4]) |
864
+ deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT,
865
+ R_TXFIFO_DATA2_DB6_LENGTH,
866
+ frame->data[5]) |
867
+ deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT,
868
+ R_TXFIFO_DATA2_DB5_LENGTH,
869
+ frame->data[6]) |
870
+ deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT,
871
+ R_TXFIFO_DATA2_DB4_LENGTH,
872
+ frame->data[7]));
873
+
874
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1);
875
+ trace_xlnx_can_rx_data(frame->can_id, frame->can_dlc,
876
+ frame->data[0], frame->data[1],
877
+ frame->data[2], frame->data[3],
878
+ frame->data[4], frame->data[5],
879
+ frame->data[6], frame->data[7]);
880
+ }
881
+
882
+ can_update_irq(s);
883
+ }
884
+}
885
+
886
+static uint64_t can_rxfifo_pre_read(RegisterInfo *reg, uint64_t val)
887
+{
888
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
889
+
890
+ if (!fifo32_is_empty(&s->rx_fifo)) {
891
+ val = fifo32_pop(&s->rx_fifo);
892
+ } else {
893
+ ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXUFLW, 1);
894
+ }
895
+
896
+ can_update_irq(s);
897
+ return val;
898
+}
899
+
900
+static void can_filter_enable_post_write(RegisterInfo *reg, uint64_t val)
901
+{
902
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
903
+
904
+ if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1) &&
905
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF2) &&
906
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF3) &&
907
+ ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) {
908
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 1);
909
+ } else {
910
+ ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 0);
911
+ }
912
+}
913
+
914
+static uint64_t can_filter_mask_pre_write(RegisterInfo *reg, uint64_t val)
915
+{
916
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
917
+ uint32_t reg_idx = (reg->access->addr) / 4;
918
+ uint32_t filter_number = (reg_idx - R_AFMR1) / 2;
919
+
920
+ /* modify an acceptance filter, the corresponding UAF bit should be '0'. */
921
+ if (!(s->regs[R_AFR] & (1 << filter_number))) {
922
+ s->regs[reg_idx] = val;
923
+
924
+ trace_xlnx_can_filter_mask_pre_write(filter_number, s->regs[reg_idx]);
925
+ } else {
926
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
927
+
928
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d"
929
+ " mask is not set as corresponding UAF bit is not 0.\n",
930
+ path, filter_number + 1);
931
+ }
932
+
933
+ return s->regs[reg_idx];
934
+}
935
+
936
+static uint64_t can_filter_id_pre_write(RegisterInfo *reg, uint64_t val)
937
+{
938
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
939
+ uint32_t reg_idx = (reg->access->addr) / 4;
940
+ uint32_t filter_number = (reg_idx - R_AFIR1) / 2;
941
+
942
+ if (!(s->regs[R_AFR] & (1 << filter_number))) {
943
+ s->regs[reg_idx] = val;
944
+
945
+ trace_xlnx_can_filter_id_pre_write(filter_number, s->regs[reg_idx]);
946
+ } else {
947
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
948
+
949
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d"
950
+ " id is not set as corresponding UAF bit is not 0.\n",
951
+ path, filter_number + 1);
952
+ }
953
+
954
+ return s->regs[reg_idx];
955
+}
956
+
957
+static void can_tx_post_write(RegisterInfo *reg, uint64_t val)
958
+{
959
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque);
960
+
961
+ bool is_txhpb = reg->access->addr > A_TXFIFO_DATA2;
962
+
963
+ bool initiate_transfer = (reg->access->addr == A_TXFIFO_DATA2) ||
964
+ (reg->access->addr == A_TXHPB_DATA2);
965
+
966
+ Fifo32 *f = is_txhpb ? &s->txhpb_fifo : &s->tx_fifo;
967
+
968
+ if (!fifo32_is_full(f)) {
969
+ fifo32_push(f, val);
970
+ } else {
971
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
972
+
973
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: TX FIFO is full.\n", path);
974
+ }
975
+
976
+ /* Initiate the message send if TX register is written. */
977
+ if (initiate_transfer &&
978
+ ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) {
979
+ transfer_fifo(s, f);
980
+ }
981
+
982
+ can_update_irq(s);
983
+}
984
+
985
+static const RegisterAccessInfo can_regs_info[] = {
986
+ { .name = "SOFTWARE_RESET_REGISTER",
987
+ .addr = A_SOFTWARE_RESET_REGISTER,
988
+ .rsvd = 0xfffffffc,
989
+ .pre_write = can_srr_pre_write,
990
+ },{ .name = "MODE_SELECT_REGISTER",
991
+ .addr = A_MODE_SELECT_REGISTER,
992
+ .rsvd = 0xfffffff8,
993
+ .pre_write = can_msr_pre_write,
994
+ },{ .name = "ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER",
995
+ .addr = A_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER,
996
+ .rsvd = 0xffffff00,
997
+ .pre_write = can_brpr_pre_write,
998
+ },{ .name = "ARBITRATION_PHASE_BIT_TIMING_REGISTER",
999
+ .addr = A_ARBITRATION_PHASE_BIT_TIMING_REGISTER,
1000
+ .rsvd = 0xfffffe00,
1001
+ .pre_write = can_btr_pre_write,
1002
+ },{ .name = "ERROR_COUNTER_REGISTER",
1003
+ .addr = A_ERROR_COUNTER_REGISTER,
1004
+ .rsvd = 0xffff0000,
1005
+ .ro = 0xffffffff,
1006
+ },{ .name = "ERROR_STATUS_REGISTER",
1007
+ .addr = A_ERROR_STATUS_REGISTER,
1008
+ .rsvd = 0xffffffe0,
1009
+ .w1c = 0x1f,
1010
+ },{ .name = "STATUS_REGISTER", .addr = A_STATUS_REGISTER,
1011
+ .reset = 0x1,
1012
+ .rsvd = 0xffffe000,
1013
+ .ro = 0x1fff,
1014
+ },{ .name = "INTERRUPT_STATUS_REGISTER",
1015
+ .addr = A_INTERRUPT_STATUS_REGISTER,
1016
+ .reset = 0x6000,
1017
+ .rsvd = 0xffff8000,
1018
+ .ro = 0x7fff,
1019
+ },{ .name = "INTERRUPT_ENABLE_REGISTER",
1020
+ .addr = A_INTERRUPT_ENABLE_REGISTER,
1021
+ .rsvd = 0xffff8000,
1022
+ .post_write = can_ier_post_write,
1023
+ },{ .name = "INTERRUPT_CLEAR_REGISTER",
1024
+ .addr = A_INTERRUPT_CLEAR_REGISTER,
1025
+ .rsvd = 0xffff8000,
1026
+ .pre_write = can_icr_pre_write,
1027
+ },{ .name = "TIMESTAMP_REGISTER",
1028
+ .addr = A_TIMESTAMP_REGISTER,
1029
+ .rsvd = 0xfffffffe,
1030
+ .pre_write = can_tcr_pre_write,
1031
+ },{ .name = "WIR", .addr = A_WIR,
1032
+ .reset = 0x3f3f,
1033
+ .rsvd = 0xffff0000,
1034
+ },{ .name = "TXFIFO_ID", .addr = A_TXFIFO_ID,
1035
+ .post_write = can_tx_post_write,
1036
+ },{ .name = "TXFIFO_DLC", .addr = A_TXFIFO_DLC,
1037
+ .rsvd = 0xfffffff,
1038
+ .post_write = can_tx_post_write,
1039
+ },{ .name = "TXFIFO_DATA1", .addr = A_TXFIFO_DATA1,
1040
+ .post_write = can_tx_post_write,
1041
+ },{ .name = "TXFIFO_DATA2", .addr = A_TXFIFO_DATA2,
1042
+ .post_write = can_tx_post_write,
1043
+ },{ .name = "TXHPB_ID", .addr = A_TXHPB_ID,
1044
+ .post_write = can_tx_post_write,
1045
+ },{ .name = "TXHPB_DLC", .addr = A_TXHPB_DLC,
1046
+ .rsvd = 0xfffffff,
1047
+ .post_write = can_tx_post_write,
1048
+ },{ .name = "TXHPB_DATA1", .addr = A_TXHPB_DATA1,
1049
+ .post_write = can_tx_post_write,
1050
+ },{ .name = "TXHPB_DATA2", .addr = A_TXHPB_DATA2,
1051
+ .post_write = can_tx_post_write,
1052
+ },{ .name = "RXFIFO_ID", .addr = A_RXFIFO_ID,
1053
+ .ro = 0xffffffff,
1054
+ .post_read = can_rxfifo_pre_read,
1055
+ },{ .name = "RXFIFO_DLC", .addr = A_RXFIFO_DLC,
1056
+ .rsvd = 0xfff0000,
1057
+ .post_read = can_rxfifo_pre_read,
1058
+ },{ .name = "RXFIFO_DATA1", .addr = A_RXFIFO_DATA1,
1059
+ .post_read = can_rxfifo_pre_read,
1060
+ },{ .name = "RXFIFO_DATA2", .addr = A_RXFIFO_DATA2,
1061
+ .post_read = can_rxfifo_pre_read,
1062
+ },{ .name = "AFR", .addr = A_AFR,
1063
+ .rsvd = 0xfffffff0,
1064
+ .post_write = can_filter_enable_post_write,
1065
+ },{ .name = "AFMR1", .addr = A_AFMR1,
1066
+ .pre_write = can_filter_mask_pre_write,
1067
+ },{ .name = "AFIR1", .addr = A_AFIR1,
1068
+ .pre_write = can_filter_id_pre_write,
1069
+ },{ .name = "AFMR2", .addr = A_AFMR2,
1070
+ .pre_write = can_filter_mask_pre_write,
1071
+ },{ .name = "AFIR2", .addr = A_AFIR2,
1072
+ .pre_write = can_filter_id_pre_write,
1073
+ },{ .name = "AFMR3", .addr = A_AFMR3,
1074
+ .pre_write = can_filter_mask_pre_write,
1075
+ },{ .name = "AFIR3", .addr = A_AFIR3,
1076
+ .pre_write = can_filter_id_pre_write,
1077
+ },{ .name = "AFMR4", .addr = A_AFMR4,
1078
+ .pre_write = can_filter_mask_pre_write,
1079
+ },{ .name = "AFIR4", .addr = A_AFIR4,
1080
+ .pre_write = can_filter_id_pre_write,
1081
+ }
1082
+};
1083
+
1084
+static void xlnx_zynqmp_can_ptimer_cb(void *opaque)
1085
+{
1086
+ /* No action required on the timer rollover. */
1087
+}
1088
+
1089
+static const MemoryRegionOps can_ops = {
1090
+ .read = register_read_memory,
1091
+ .write = register_write_memory,
1092
+ .endianness = DEVICE_LITTLE_ENDIAN,
1093
+ .valid = {
1094
+ .min_access_size = 4,
1095
+ .max_access_size = 4,
1096
+ },
1097
+};
1098
+
1099
+static void xlnx_zynqmp_can_reset_init(Object *obj, ResetType type)
1100
+{
1101
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
1102
+ unsigned int i;
1103
+
1104
+ for (i = R_RXFIFO_ID; i < ARRAY_SIZE(s->reg_info); ++i) {
1105
+ register_reset(&s->reg_info[i]);
1106
+ }
1107
+
1108
+ ptimer_transaction_begin(s->can_timer);
1109
+ ptimer_set_count(s->can_timer, 0);
1110
+ ptimer_transaction_commit(s->can_timer);
1111
+}
1112
+
1113
+static void xlnx_zynqmp_can_reset_hold(Object *obj)
1114
+{
1115
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
1116
+ unsigned int i;
1117
+
1118
+ for (i = 0; i < R_RXFIFO_ID; ++i) {
1119
+ register_reset(&s->reg_info[i]);
1120
+ }
1121
+
1122
+ /*
1123
+ * Reset FIFOs when CAN model is reset. This will clear the fifo writes
1124
+ * done by post_write which gets called from register_reset function,
1125
+ * post_write handle will not be able to trigger tx because CAN will be
1126
+ * disabled when software_reset_register is cleared first.
1127
+ */
1128
+ fifo32_reset(&s->rx_fifo);
1129
+ fifo32_reset(&s->tx_fifo);
1130
+ fifo32_reset(&s->txhpb_fifo);
1131
+}
1132
+
1133
+static bool xlnx_zynqmp_can_can_receive(CanBusClientState *client)
1134
+{
1135
+ XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState,
1136
+ bus_client);
1137
+
1138
+ if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) {
1139
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1140
+
1141
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is in reset state.\n",
1142
+ path);
1143
+ return false;
1144
+ }
1145
+
1146
+ if ((ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) == 0) {
1147
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1148
+
1149
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is disabled. Incoming"
1150
+ " messages will be discarded.\n", path);
1151
+ return false;
1152
+ }
1153
+
1154
+ return true;
1155
+}
1156
+
1157
+static ssize_t xlnx_zynqmp_can_receive(CanBusClientState *client,
1158
+ const qemu_can_frame *buf, size_t buf_size) {
1159
+ XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState,
1160
+ bus_client);
1161
+ const qemu_can_frame *frame = buf;
1162
+
1163
+ if (buf_size <= 0) {
1164
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1165
+
1166
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Error in the data received.\n",
1167
+ path);
1168
+ return 0;
1169
+ }
1170
+
1171
+ if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) {
1172
+ /* Snoop Mode: Just keep the data. no response back. */
1173
+ update_rx_fifo(s, frame);
1174
+ } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP))) {
1175
+ /*
1176
+ * XlnxZynqMPCAN is in sleep mode. Any data on bus will bring it to wake
1177
+ * up state.
1178
+ */
1179
+ can_exit_sleep_mode(s);
1180
+ update_rx_fifo(s, frame);
1181
+ } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) == 0) {
1182
+ update_rx_fifo(s, frame);
1183
+ } else {
1184
+ /*
1185
+ * XlnxZynqMPCAN will not participate in normal bus communication
1186
+ * and will not receive any messages transmitted by other CAN nodes.
1187
+ */
1188
+ trace_xlnx_can_rx_discard(s->regs[R_STATUS_REGISTER]);
1189
+ }
1190
+
1191
+ return 1;
1192
+}
1193
+
1194
+static CanBusClientInfo can_xilinx_bus_client_info = {
1195
+ .can_receive = xlnx_zynqmp_can_can_receive,
1196
+ .receive = xlnx_zynqmp_can_receive,
1197
+};
1198
+
1199
+static int xlnx_zynqmp_can_connect_to_bus(XlnxZynqMPCANState *s,
1200
+ CanBusState *bus)
1201
+{
1202
+ s->bus_client.info = &can_xilinx_bus_client_info;
1203
+
1204
+ if (can_bus_insert_client(bus, &s->bus_client) < 0) {
1205
+ return -1;
1206
+ }
1207
+ return 0;
1208
+}
1209
+
1210
+static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp)
1211
+{
1212
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(dev);
1213
+
1214
+ if (s->canbus) {
1215
+ if (xlnx_zynqmp_can_connect_to_bus(s, s->canbus) < 0) {
1216
+ g_autofree char *path = object_get_canonical_path(OBJECT(s));
1217
+
1218
+ error_setg(errp, "%s: xlnx_zynqmp_can_connect_to_bus"
1219
+ " failed.", path);
1220
+ return;
233
+ return;
1221
+ }
234
+ }
1222
+ }
235
+ }
1223
+
236
+
1224
+ /* Create RX FIFO, TXFIFO, TXHPB storage. */
237
+ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
1225
+ fifo32_create(&s->rx_fifo, RXFIFO_SIZE);
238
+ cpu->isar = arm_host_cpu_features.isar;
1226
+ fifo32_create(&s->tx_fifo, RXFIFO_SIZE);
239
+ cpu->env.features = arm_host_cpu_features.features;
1227
+ fifo32_create(&s->txhpb_fifo, CAN_FRAME_SIZE);
240
+ cpu->midr = arm_host_cpu_features.midr;
1228
+
241
+ cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
1229
+ /* Allocate a new timer. */
1230
+ s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s,
1231
+ PTIMER_POLICY_DEFAULT);
1232
+
1233
+ ptimer_transaction_begin(s->can_timer);
1234
+
1235
+ ptimer_set_freq(s->can_timer, s->cfg.ext_clk_freq);
1236
+ ptimer_set_limit(s->can_timer, CAN_TIMER_MAX, 1);
1237
+ ptimer_run(s->can_timer, 0);
1238
+ ptimer_transaction_commit(s->can_timer);
1239
+}
242
+}
1240
+
243
+
1241
+static void xlnx_zynqmp_can_init(Object *obj)
244
void hvf_arch_vcpu_destroy(CPUState *cpu)
1242
+{
245
{
1243
+ XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj);
246
}
1244
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1245
+
1246
+ RegisterInfoArray *reg_array;
1247
+
1248
+ memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_CAN,
1249
+ XLNX_ZYNQMP_CAN_R_MAX * 4);
1250
+ reg_array = register_init_block32(DEVICE(obj), can_regs_info,
1251
+ ARRAY_SIZE(can_regs_info),
1252
+ s->reg_info, s->regs,
1253
+ &can_ops,
1254
+ XLNX_ZYNQMP_CAN_ERR_DEBUG,
1255
+ XLNX_ZYNQMP_CAN_R_MAX * 4);
1256
+
1257
+ memory_region_add_subregion(&s->iomem, 0x00, &reg_array->mem);
1258
+ sysbus_init_mmio(sbd, &s->iomem);
1259
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
1260
+}
1261
+
1262
+static const VMStateDescription vmstate_can = {
1263
+ .name = TYPE_XLNX_ZYNQMP_CAN,
1264
+ .version_id = 1,
1265
+ .minimum_version_id = 1,
1266
+ .fields = (VMStateField[]) {
1267
+ VMSTATE_FIFO32(rx_fifo, XlnxZynqMPCANState),
1268
+ VMSTATE_FIFO32(tx_fifo, XlnxZynqMPCANState),
1269
+ VMSTATE_FIFO32(txhpb_fifo, XlnxZynqMPCANState),
1270
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCANState, XLNX_ZYNQMP_CAN_R_MAX),
1271
+ VMSTATE_PTIMER(can_timer, XlnxZynqMPCANState),
1272
+ VMSTATE_END_OF_LIST(),
1273
+ }
1274
+};
1275
+
1276
+static Property xlnx_zynqmp_can_properties[] = {
1277
+ DEFINE_PROP_UINT32("ext_clk_freq", XlnxZynqMPCANState, cfg.ext_clk_freq,
1278
+ CAN_DEFAULT_CLOCK),
1279
+ DEFINE_PROP_LINK("canbus", XlnxZynqMPCANState, canbus, TYPE_CAN_BUS,
1280
+ CanBusState *),
1281
+ DEFINE_PROP_END_OF_LIST(),
1282
+};
1283
+
1284
+static void xlnx_zynqmp_can_class_init(ObjectClass *klass, void *data)
1285
+{
1286
+ DeviceClass *dc = DEVICE_CLASS(klass);
1287
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
1288
+
1289
+ rc->phases.enter = xlnx_zynqmp_can_reset_init;
1290
+ rc->phases.hold = xlnx_zynqmp_can_reset_hold;
1291
+ dc->realize = xlnx_zynqmp_can_realize;
1292
+ device_class_set_props(dc, xlnx_zynqmp_can_properties);
1293
+ dc->vmsd = &vmstate_can;
1294
+}
1295
+
1296
+static const TypeInfo can_info = {
1297
+ .name = TYPE_XLNX_ZYNQMP_CAN,
1298
+ .parent = TYPE_SYS_BUS_DEVICE,
1299
+ .instance_size = sizeof(XlnxZynqMPCANState),
1300
+ .class_init = xlnx_zynqmp_can_class_init,
1301
+ .instance_init = xlnx_zynqmp_can_init,
1302
+};
1303
+
1304
+static void can_register_types(void)
1305
+{
1306
+ type_register_static(&can_info);
1307
+}
1308
+
1309
+type_init(can_register_types)
1310
diff --git a/hw/Kconfig b/hw/Kconfig
1311
index XXXXXXX..XXXXXXX 100644
1312
--- a/hw/Kconfig
1313
+++ b/hw/Kconfig
1314
@@ -XXX,XX +XXX,XX @@ config XILINX_AXI
1315
config XLNX_ZYNQMP
1316
bool
1317
select REGISTER
1318
+ select CAN_BUS
1319
diff --git a/hw/net/can/meson.build b/hw/net/can/meson.build
1320
index XXXXXXX..XXXXXXX 100644
1321
--- a/hw/net/can/meson.build
1322
+++ b/hw/net/can/meson.build
1323
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_pcm3680_pci.c'))
1324
softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c'))
1325
softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c'))
1326
softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c'))
1327
+softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c'))
1328
diff --git a/hw/net/can/trace-events b/hw/net/can/trace-events
1329
new file mode 100644
1330
index XXXXXXX..XXXXXXX
1331
--- /dev/null
1332
+++ b/hw/net/can/trace-events
1333
@@ -XXX,XX +XXX,XX @@
1334
+# xlnx-zynqmp-can.c
1335
+xlnx_can_update_irq(uint32_t isr, uint32_t ier, uint32_t irq) "ISR: 0x%08x IER: 0x%08x IRQ: 0x%08x"
1336
+xlnx_can_reset(uint32_t val) "Resetting controller with value = 0x%08x"
1337
+xlnx_can_rx_fifo_filter_reject(uint32_t id, uint8_t dlc) "Frame: ID: 0x%08x DLC: 0x%02x"
1338
+xlnx_can_filter_id_pre_write(uint8_t filter_num, uint32_t value) "Filter%d ID: 0x%08x"
1339
+xlnx_can_filter_mask_pre_write(uint8_t filter_num, uint32_t value) "Filter%d MASK: 0x%08x"
1340
+xlnx_can_tx_data(uint32_t id, uint8_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
1341
+xlnx_can_rx_data(uint32_t id, uint32_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
1342
+xlnx_can_rx_discard(uint32_t status) "Controller is not enabled for bus communication. Status Register: 0x%08x"
1343
--
247
--
1344
2.20.1
248
2.20.1
1345
249
1346
250
diff view generated by jsdifflib
Deleted patch
1
From: Vikram Garhwal <fnu.vikram@xilinx.com>
2
1
3
Reviewed-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
6
Message-id: 1605728926-352690-5-git-send-email-fnu.vikram@xilinx.com
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
---
9
MAINTAINERS | 8 ++++++++
10
1 file changed, 8 insertions(+)
11
12
diff --git a/MAINTAINERS b/MAINTAINERS
13
index XXXXXXX..XXXXXXX 100644
14
--- a/MAINTAINERS
15
+++ b/MAINTAINERS
16
@@ -XXX,XX +XXX,XX @@ F: hw/net/opencores_eth.c
17
18
Devices
19
-------
20
+Xilinx CAN
21
+M: Vikram Garhwal <fnu.vikram@xilinx.com>
22
+M: Francisco Iglesias <francisco.iglesias@xilinx.com>
23
+S: Maintained
24
+F: hw/net/can/xlnx-*
25
+F: include/hw/net/xlnx-*
26
+F: tests/qtest/xlnx-can-test*
27
+
28
EDU
29
M: Jiri Slaby <jslaby@suse.cz>
30
S: Maintained
31
--
32
2.20.1
33
34
diff view generated by jsdifflib
1
In v8.1M the new CLRM instruction allows zeroing an arbitrary set of
1
From: Alexander Graf <agraf@csgraf.de>
2
the general-purpose registers and APSR. Implement this.
2
3
3
We need to handle PSCI calls. Most of the TCG code works for us,
4
The encoding is a subset of the LDMIA T2 encoding, using what would
4
but we can simplify it to only handle aa64 mode and we need to
5
be Rn=0b1111 (which UNDEFs for LDMIA).
5
handle SUSPEND differently.
6
6
7
This patch takes the TCG code as template and duplicates it in HVF.
8
9
To tell the guest that we support PSCI 0.2 now, update the check in
10
arm_cpu_initfn() as well.
11
12
Signed-off-by: Alexander Graf <agraf@csgraf.de>
13
Reviewed-by: Sergio Lopez <slp@redhat.com>
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Message-id: 20210916155404.86958-8-agraf@csgraf.de
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20201119215617.29887-6-peter.maydell@linaro.org
10
---
17
---
11
target/arm/t32.decode | 6 +++++-
18
target/arm/cpu.c | 4 +-
12
target/arm/translate.c | 38 ++++++++++++++++++++++++++++++++++++++
19
target/arm/hvf/hvf.c | 141 ++++++++++++++++++++++++++++++++++--
13
2 files changed, 43 insertions(+), 1 deletion(-)
20
target/arm/hvf/trace-events | 1 +
14
21
3 files changed, 139 insertions(+), 7 deletions(-)
15
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
22
23
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/t32.decode
25
--- a/target/arm/cpu.c
18
+++ b/target/arm/t32.decode
26
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ UXTAB 1111 1010 0101 .... 1111 .... 10.. .... @rrr_rot
27
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
20
28
cpu->psci_version = 1; /* By default assume PSCI v0.1 */
21
STM_t32 1110 1000 10.0 .... ................ @ldstm i=1 b=0
29
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
22
STM_t32 1110 1001 00.0 .... ................ @ldstm i=0 b=1
30
23
-LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
31
- if (tcg_enabled()) {
32
- cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
33
+ if (tcg_enabled() || hvf_enabled()) {
34
+ cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */
35
}
36
}
37
38
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/arm/hvf/hvf.c
41
+++ b/target/arm/hvf/hvf.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "hw/irq.h"
44
#include "qemu/main-loop.h"
45
#include "sysemu/cpus.h"
46
+#include "arm-powerctl.h"
47
#include "target/arm/cpu.h"
48
#include "target/arm/internals.h"
49
#include "trace/trace-target_arm_hvf.h"
50
@@ -XXX,XX +XXX,XX @@
51
#define TMR_CTL_IMASK (1 << 1)
52
#define TMR_CTL_ISTATUS (1 << 2)
53
54
+static void hvf_wfi(CPUState *cpu);
55
+
56
typedef struct HVFVTimer {
57
/* Vtimer value during migration and paused state */
58
uint64_t vtimer_val;
59
@@ -XXX,XX +XXX,XX @@ static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
60
arm_cpu_do_interrupt(cpu);
61
}
62
63
+static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
24
+{
64
+{
25
+ # Rn=15 UNDEFs for LDM; M-profile CLRM uses that encoding
65
+ int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity);
26
+ CLRM 1110 1000 1001 1111 list:16
66
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
27
+ LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
28
+}
67
+}
29
LDM_t32 1110 1001 00.1 .... ................ @ldstm i=0 b=1
68
+
30
69
+/*
31
&rfe !extern rn w pu
70
+ * Handle a PSCI call.
32
diff --git a/target/arm/translate.c b/target/arm/translate.c
71
+ *
33
index XXXXXXX..XXXXXXX 100644
72
+ * Returns 0 on success
34
--- a/target/arm/translate.c
73
+ * -1 when the PSCI call is unknown,
35
+++ b/target/arm/translate.c
74
+ */
36
@@ -XXX,XX +XXX,XX @@ static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
75
+static bool hvf_handle_psci_call(CPUState *cpu)
37
return do_ldm(s, a, 1);
38
}
39
40
+static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
41
+{
76
+{
42
+ int i;
77
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
43
+ TCGv_i32 zero;
78
+ CPUARMState *env = &arm_cpu->env;
44
+
79
+ uint64_t param[4] = {
45
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
80
+ env->xregs[0],
81
+ env->xregs[1],
82
+ env->xregs[2],
83
+ env->xregs[3]
84
+ };
85
+ uint64_t context_id, mpidr;
86
+ bool target_aarch64 = true;
87
+ CPUState *target_cpu_state;
88
+ ARMCPU *target_cpu;
89
+ target_ulong entry;
90
+ int target_el = 1;
91
+ int32_t ret = 0;
92
+
93
+ trace_hvf_psci_call(param[0], param[1], param[2], param[3],
94
+ arm_cpu->mp_affinity);
95
+
96
+ switch (param[0]) {
97
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
98
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
99
+ break;
100
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
101
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
102
+ break;
103
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
104
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
105
+ mpidr = param[1];
106
+
107
+ switch (param[2]) {
108
+ case 0:
109
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
110
+ if (!target_cpu_state) {
111
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
112
+ break;
113
+ }
114
+ target_cpu = ARM_CPU(target_cpu_state);
115
+
116
+ ret = target_cpu->power_state;
117
+ break;
118
+ default:
119
+ /* Everything above affinity level 0 is always on. */
120
+ ret = 0;
121
+ }
122
+ break;
123
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
124
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
125
+ /*
126
+ * QEMU reset and shutdown are async requests, but PSCI
127
+ * mandates that we never return from the reset/shutdown
128
+ * call, so power the CPU off now so it doesn't execute
129
+ * anything further.
130
+ */
131
+ hvf_psci_cpu_off(arm_cpu);
132
+ break;
133
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
134
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
135
+ hvf_psci_cpu_off(arm_cpu);
136
+ break;
137
+ case QEMU_PSCI_0_1_FN_CPU_ON:
138
+ case QEMU_PSCI_0_2_FN_CPU_ON:
139
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
140
+ mpidr = param[1];
141
+ entry = param[2];
142
+ context_id = param[3];
143
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
144
+ target_el, target_aarch64);
145
+ break;
146
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
147
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
148
+ hvf_psci_cpu_off(arm_cpu);
149
+ break;
150
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
151
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
152
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
153
+ /* Affinity levels are not supported in QEMU */
154
+ if (param[1] & 0xfffe0000) {
155
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
156
+ break;
157
+ }
158
+ /* Powerdown is not supported, we always go into WFI */
159
+ env->xregs[0] = 0;
160
+ hvf_wfi(cpu);
161
+ break;
162
+ case QEMU_PSCI_0_1_FN_MIGRATE:
163
+ case QEMU_PSCI_0_2_FN_MIGRATE:
164
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
165
+ break;
166
+ default:
46
+ return false;
167
+ return false;
47
+ }
168
+ }
48
+
169
+
49
+ if (extract32(a->list, 13, 1)) {
170
+ env->xregs[0] = ret;
50
+ return false;
51
+ }
52
+
53
+ if (!a->list) {
54
+ /* UNPREDICTABLE; we choose to UNDEF */
55
+ return false;
56
+ }
57
+
58
+ zero = tcg_const_i32(0);
59
+ for (i = 0; i < 15; i++) {
60
+ if (extract32(a->list, i, 1)) {
61
+ /* Clear R[i] */
62
+ tcg_gen_mov_i32(cpu_R[i], zero);
63
+ }
64
+ }
65
+ if (extract32(a->list, 15, 1)) {
66
+ /*
67
+ * Clear APSR (by calling the MSR helper with the same argument
68
+ * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
69
+ */
70
+ TCGv_i32 maskreg = tcg_const_i32(0xc << 8);
71
+ gen_helper_v7m_msr(cpu_env, maskreg, zero);
72
+ tcg_temp_free_i32(maskreg);
73
+ }
74
+ tcg_temp_free_i32(zero);
75
+ return true;
171
+ return true;
76
+}
172
+}
77
+
173
+
78
/*
174
static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
79
* Branch, branch with link
175
{
80
*/
176
ARMCPU *arm_cpu = ARM_CPU(cpu);
177
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
178
break;
179
case EC_AA64_HVC:
180
cpu_synchronize_state(cpu);
181
- trace_hvf_unknown_hvc(env->xregs[0]);
182
- /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
183
- env->xregs[0] = -1;
184
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
185
+ if (!hvf_handle_psci_call(cpu)) {
186
+ trace_hvf_unknown_hvc(env->xregs[0]);
187
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
188
+ env->xregs[0] = -1;
189
+ }
190
+ } else {
191
+ trace_hvf_unknown_hvc(env->xregs[0]);
192
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
193
+ }
194
break;
195
case EC_AA64_SMC:
196
cpu_synchronize_state(cpu);
197
- trace_hvf_unknown_smc(env->xregs[0]);
198
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
199
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
200
+ advance_pc = true;
201
+
202
+ if (!hvf_handle_psci_call(cpu)) {
203
+ trace_hvf_unknown_smc(env->xregs[0]);
204
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
205
+ env->xregs[0] = -1;
206
+ }
207
+ } else {
208
+ trace_hvf_unknown_smc(env->xregs[0]);
209
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
210
+ }
211
break;
212
default:
213
cpu_synchronize_state(cpu);
214
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
215
index XXXXXXX..XXXXXXX 100644
216
--- a/target/arm/hvf/trace-events
217
+++ b/target/arm/hvf/trace-events
218
@@ -XXX,XX +XXX,XX @@ hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_
219
hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
220
hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
221
hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
222
+hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x"
81
--
223
--
82
2.20.1
224
2.20.1
83
225
84
226
diff view generated by jsdifflib
1
From: Alex Chen <alex.chen@huawei.com>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
We should use printf format specifier "%u" instead of "%d" for
3
Now that we have all logic in place that we need to handle Hypervisor.framework
4
argument of type "unsigned int".
4
on Apple Silicon systems, let's add CONFIG_HVF for aarch64 as well so that we
5
can build it.
5
6
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
8
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
Message-id: 20201126111109.112238-4-alex.chen@huawei.com
9
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com> (x86 only)
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
12
Message-id: 20210916155404.86958-9-agraf@csgraf.de
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
14
---
12
hw/misc/imx6_ccm.c | 20 ++++++++++----------
15
meson.build | 7 +++++++
13
hw/misc/imx6_src.c | 2 +-
16
target/arm/hvf/meson.build | 3 +++
14
2 files changed, 11 insertions(+), 11 deletions(-)
17
target/arm/meson.build | 2 ++
18
3 files changed, 12 insertions(+)
19
create mode 100644 target/arm/hvf/meson.build
15
20
16
diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c
21
diff --git a/meson.build b/meson.build
17
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/misc/imx6_ccm.c
23
--- a/meson.build
19
+++ b/hw/misc/imx6_ccm.c
24
+++ b/meson.build
20
@@ -XXX,XX +XXX,XX @@ static const char *imx6_ccm_reg_name(uint32_t reg)
25
@@ -XXX,XX +XXX,XX @@ else
21
case CCM_CMEOR:
26
endif
22
return "CMEOR";
27
23
default:
28
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
24
- sprintf(unknown, "%d ?", reg);
29
+
25
+ sprintf(unknown, "%u ?", reg);
30
+if cpu in ['aarch64']
26
return unknown;
31
+ accelerator_targets += {
27
}
32
+ 'CONFIG_HVF': ['aarch64-softmmu']
28
}
33
+ }
29
@@ -XXX,XX +XXX,XX @@ static const char *imx6_analog_reg_name(uint32_t reg)
34
+endif
30
case USB_ANALOG_DIGPROG:
35
+
31
return "USB_ANALOG_DIGPROG";
36
if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
32
default:
37
# i386 emulator provides xenpv machine type for multiple architectures
33
- sprintf(unknown, "%d ?", reg);
38
accelerator_targets += {
34
+ sprintf(unknown, "%u ?", reg);
39
diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build
35
return unknown;
40
new file mode 100644
36
}
41
index XXXXXXX..XXXXXXX
37
}
42
--- /dev/null
38
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_clk(IMX6CCMState *dev)
43
+++ b/target/arm/hvf/meson.build
39
freq *= 20;
44
@@ -XXX,XX +XXX,XX @@
40
}
45
+arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
41
46
+ 'hvf.c',
42
- DPRINTF("freq = %d\n", (uint32_t)freq);
47
+))
43
+ DPRINTF("freq = %u\n", (uint32_t)freq);
48
diff --git a/target/arm/meson.build b/target/arm/meson.build
44
45
return freq;
46
}
47
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_pfd0_clk(IMX6CCMState *dev)
48
freq = imx6_analog_get_pll2_clk(dev) * 18
49
/ EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD0_FRAC);
50
51
- DPRINTF("freq = %d\n", (uint32_t)freq);
52
+ DPRINTF("freq = %u\n", (uint32_t)freq);
53
54
return freq;
55
}
56
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_pll2_pfd2_clk(IMX6CCMState *dev)
57
freq = imx6_analog_get_pll2_clk(dev) * 18
58
/ EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD2_FRAC);
59
60
- DPRINTF("freq = %d\n", (uint32_t)freq);
61
+ DPRINTF("freq = %u\n", (uint32_t)freq);
62
63
return freq;
64
}
65
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_analog_get_periph_clk(IMX6CCMState *dev)
66
break;
67
}
68
69
- DPRINTF("freq = %d\n", (uint32_t)freq);
70
+ DPRINTF("freq = %u\n", (uint32_t)freq);
71
72
return freq;
73
}
74
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_ahb_clk(IMX6CCMState *dev)
75
freq = imx6_analog_get_periph_clk(dev)
76
/ (1 + EXTRACT(dev->ccm[CCM_CBCDR], AHB_PODF));
77
78
- DPRINTF("freq = %d\n", (uint32_t)freq);
79
+ DPRINTF("freq = %u\n", (uint32_t)freq);
80
81
return freq;
82
}
83
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_ipg_clk(IMX6CCMState *dev)
84
freq = imx6_ccm_get_ahb_clk(dev)
85
/ (1 + EXTRACT(dev->ccm[CCM_CBCDR], IPG_PODF));
86
87
- DPRINTF("freq = %d\n", (uint32_t)freq);
88
+ DPRINTF("freq = %u\n", (uint32_t)freq);
89
90
return freq;
91
}
92
@@ -XXX,XX +XXX,XX @@ static uint64_t imx6_ccm_get_per_clk(IMX6CCMState *dev)
93
freq = imx6_ccm_get_ipg_clk(dev)
94
/ (1 + EXTRACT(dev->ccm[CCM_CSCMR1], PERCLK_PODF));
95
96
- DPRINTF("freq = %d\n", (uint32_t)freq);
97
+ DPRINTF("freq = %u\n", (uint32_t)freq);
98
99
return freq;
100
}
101
@@ -XXX,XX +XXX,XX @@ static uint32_t imx6_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
102
break;
103
}
104
105
- DPRINTF("Clock = %d) = %d\n", clock, freq);
106
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
107
108
return freq;
109
}
110
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
111
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
112
--- a/hw/misc/imx6_src.c
50
--- a/target/arm/meson.build
113
+++ b/hw/misc/imx6_src.c
51
+++ b/target/arm/meson.build
114
@@ -XXX,XX +XXX,XX @@ static const char *imx6_src_reg_name(uint32_t reg)
52
@@ -XXX,XX +XXX,XX @@ arm_softmmu_ss.add(files(
115
case SRC_GPR10:
53
'psci.c',
116
return "SRC_GPR10";
54
))
117
default:
55
118
- sprintf(unknown, "%d ?", reg);
56
+subdir('hvf')
119
+ sprintf(unknown, "%u ?", reg);
57
+
120
return unknown;
58
target_arch += {'arm': arm_ss}
121
}
59
target_softmmu_arch += {'arm': arm_softmmu_ss}
122
}
123
--
60
--
124
2.20.1
61
2.20.1
125
62
126
63
diff view generated by jsdifflib
1
From: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
1
From: Alexander Graf <agraf@csgraf.de>
2
2
3
Trusted Firmware now supports A72 on sbsa-ref by default [1] so enable
3
We can expose cycle counters on the PMU easily. To be as compatible as
4
it for QEMU as well. A53 was already enabled there.
4
possible, let's do so, but make sure we don't expose any other architectural
5
5
counters that we can not model yet.
6
1. https://review.trustedfirmware.org/c/TF-A/trusted-firmware-a/+/7117
6
7
7
This allows OSs to work that require PMU support.
8
Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
8
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Alexander Graf <agraf@csgraf.de>
10
Message-id: 20201120141705.246690-1-marcin.juszkiewicz@linaro.org
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20210916155404.86958-10-agraf@csgraf.de
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
13
---
14
hw/arm/sbsa-ref.c | 23 ++++++++++++++++++++---
14
target/arm/hvf/hvf.c | 179 +++++++++++++++++++++++++++++++++++++++++++
15
1 file changed, 20 insertions(+), 3 deletions(-)
15
1 file changed, 179 insertions(+)
16
16
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
17
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/sbsa-ref.c
19
--- a/target/arm/hvf/hvf.c
20
+++ b/hw/arm/sbsa-ref.c
20
+++ b/target/arm/hvf/hvf.c
21
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
21
@@ -XXX,XX +XXX,XX @@
22
[SBSA_GWDT] = 16,
22
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
23
};
23
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
24
24
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
25
+static const char * const valid_cpus[] = {
25
+#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
26
+ ARM_CPU_TYPE_NAME("cortex-a53"),
26
+#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
27
+ ARM_CPU_TYPE_NAME("cortex-a57"),
27
+#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
28
+ ARM_CPU_TYPE_NAME("cortex-a72"),
28
+#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
29
+};
29
+#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
30
+
30
+#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
31
+static bool cpu_type_valid(const char *cpu)
31
+#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
32
+{
32
+#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
33
+ int i;
33
+#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
34
+
34
+#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
35
+ for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) {
35
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
36
+ if (strcmp(cpu, valid_cpus[i]) == 0) {
36
+#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
37
+ return true;
37
38
+ }
38
#define WFX_IS_WFE (1 << 0)
39
+ }
39
40
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
41
val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
42
gt_cntfrq_period_ns(arm_cpu);
43
break;
44
+ case SYSREG_PMCR_EL0:
45
+ val = env->cp15.c9_pmcr;
46
+ break;
47
+ case SYSREG_PMCCNTR_EL0:
48
+ pmu_op_start(env);
49
+ val = env->cp15.c15_ccnt;
50
+ pmu_op_finish(env);
51
+ break;
52
+ case SYSREG_PMCNTENCLR_EL0:
53
+ val = env->cp15.c9_pmcnten;
54
+ break;
55
+ case SYSREG_PMOVSCLR_EL0:
56
+ val = env->cp15.c9_pmovsr;
57
+ break;
58
+ case SYSREG_PMSELR_EL0:
59
+ val = env->cp15.c9_pmselr;
60
+ break;
61
+ case SYSREG_PMINTENCLR_EL1:
62
+ val = env->cp15.c9_pminten;
63
+ break;
64
+ case SYSREG_PMCCFILTR_EL0:
65
+ val = env->cp15.pmccfiltr_el0;
66
+ break;
67
+ case SYSREG_PMCNTENSET_EL0:
68
+ val = env->cp15.c9_pmcnten;
69
+ break;
70
+ case SYSREG_PMUSERENR_EL0:
71
+ val = env->cp15.c9_pmuserenr;
72
+ break;
73
+ case SYSREG_PMCEID0_EL0:
74
+ case SYSREG_PMCEID1_EL0:
75
+ /* We can't really count anything yet, declare all events invalid */
76
+ val = 0;
77
+ break;
78
case SYSREG_OSLSR_EL1:
79
val = env->cp15.oslsr_el1;
80
break;
81
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
82
return 0;
83
}
84
85
+static void pmu_update_irq(CPUARMState *env)
86
+{
87
+ ARMCPU *cpu = env_archcpu(env);
88
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
89
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
90
+}
91
+
92
+static bool pmu_event_supported(uint16_t number)
93
+{
40
+ return false;
94
+ return false;
41
+}
95
+}
42
+
96
+
43
static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx)
97
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
98
+ * the current EL, security state, and register configuration.
99
+ */
100
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
101
+{
102
+ uint64_t filter;
103
+ bool enabled, filtered = true;
104
+ int el = arm_current_el(env);
105
+
106
+ enabled = (env->cp15.c9_pmcr & PMCRE) &&
107
+ (env->cp15.c9_pmcnten & (1 << counter));
108
+
109
+ if (counter == 31) {
110
+ filter = env->cp15.pmccfiltr_el0;
111
+ } else {
112
+ filter = env->cp15.c14_pmevtyper[counter];
113
+ }
114
+
115
+ if (el == 0) {
116
+ filtered = filter & PMXEVTYPER_U;
117
+ } else if (el == 1) {
118
+ filtered = filter & PMXEVTYPER_P;
119
+ }
120
+
121
+ if (counter != 31) {
122
+ /*
123
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
124
+ * support
125
+ */
126
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
127
+ if (!pmu_event_supported(event)) {
128
+ return false;
129
+ }
130
+ }
131
+
132
+ return enabled && !filtered;
133
+}
134
+
135
+static void pmswinc_write(CPUARMState *env, uint64_t value)
136
+{
137
+ unsigned int i;
138
+ for (i = 0; i < pmu_num_counters(env); i++) {
139
+ /* Increment a counter's count iff: */
140
+ if ((value & (1 << i)) && /* counter's bit is set */
141
+ /* counter is enabled and not filtered */
142
+ pmu_counter_enabled(env, i) &&
143
+ /* counter is SW_INCR */
144
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
145
+ /*
146
+ * Detect if this write causes an overflow since we can't predict
147
+ * PMSWINC overflows like we can for other events
148
+ */
149
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
150
+
151
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
152
+ env->cp15.c9_pmovsr |= (1 << i);
153
+ pmu_update_irq(env);
154
+ }
155
+
156
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
157
+ }
158
+ }
159
+}
160
+
161
static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
44
{
162
{
45
uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER;
163
ARMCPU *arm_cpu = ARM_CPU(cpu);
46
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
164
@@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
47
const CPUArchIdList *possible_cpus;
165
val);
48
int n, sbsa_max_cpus;
166
49
167
switch (reg) {
50
- if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a57"))) {
168
+ case SYSREG_PMCCNTR_EL0:
51
- error_report("sbsa-ref: CPU type other than the built-in "
169
+ pmu_op_start(env);
52
- "cortex-a57 not supported");
170
+ env->cp15.c15_ccnt = val;
53
+ if (!cpu_type_valid(machine->cpu_type)) {
171
+ pmu_op_finish(env);
54
+ error_report("mach-virt: CPU type %s not supported", machine->cpu_type);
172
+ break;
55
exit(1);
173
+ case SYSREG_PMCR_EL0:
56
}
174
+ pmu_op_start(env);
57
175
+
176
+ if (val & PMCRC) {
177
+ /* The counter has been reset */
178
+ env->cp15.c15_ccnt = 0;
179
+ }
180
+
181
+ if (val & PMCRP) {
182
+ unsigned int i;
183
+ for (i = 0; i < pmu_num_counters(env); i++) {
184
+ env->cp15.c14_pmevcntr[i] = 0;
185
+ }
186
+ }
187
+
188
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
189
+ env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
190
+
191
+ pmu_op_finish(env);
192
+ break;
193
+ case SYSREG_PMUSERENR_EL0:
194
+ env->cp15.c9_pmuserenr = val & 0xf;
195
+ break;
196
+ case SYSREG_PMCNTENSET_EL0:
197
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
198
+ break;
199
+ case SYSREG_PMCNTENCLR_EL0:
200
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
201
+ break;
202
+ case SYSREG_PMINTENCLR_EL1:
203
+ pmu_op_start(env);
204
+ env->cp15.c9_pminten |= val;
205
+ pmu_op_finish(env);
206
+ break;
207
+ case SYSREG_PMOVSCLR_EL0:
208
+ pmu_op_start(env);
209
+ env->cp15.c9_pmovsr &= ~val;
210
+ pmu_op_finish(env);
211
+ break;
212
+ case SYSREG_PMSWINC_EL0:
213
+ pmu_op_start(env);
214
+ pmswinc_write(env, val);
215
+ pmu_op_finish(env);
216
+ break;
217
+ case SYSREG_PMSELR_EL0:
218
+ env->cp15.c9_pmselr = val & 0x1f;
219
+ break;
220
+ case SYSREG_PMCCFILTR_EL0:
221
+ pmu_op_start(env);
222
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
223
+ pmu_op_finish(env);
224
+ break;
225
case SYSREG_OSLAR_EL1:
226
env->cp15.oslsr_el1 = val & 1;
227
break;
58
--
228
--
59
2.20.1
229
2.20.1
60
230
61
231
diff view generated by jsdifflib
1
The constant-expander functions like negate, plus_2, etc, are
1
Currently gen_jmp_tb() assumes that if it is called then the jump it
2
generally useful; move them up in translate.c so we can use them in
2
is handling is the only reason that we might be trying to end the TB,
3
the VFP/Neon decoders as well as in the A32/T32/T16 decoders.
3
so it will use goto_tb if it can. This is usually the case: mostly
4
"we did something that means we must end the TB" happens on a
5
non-branch instruction. However, there are cases where we decide
6
early in handling an instruction that we need to end the TB and
7
return to the main loop, and then the insn is a complex one that
8
involves gen_jmp_tb(). For instance, for M-profile FP instructions,
9
in gen_preserve_fp_state() which is called from vfp_access_check() we
10
want to force an exit to the main loop if lazy state preservation is
11
active and we are in icount mode.
12
13
Make gen_jmp_tb() look at the current value of is_jmp, and only use
14
goto_tb if the previous is_jmp was DISAS_NEXT or DISAS_TOO_MANY.
4
15
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-9-peter.maydell@linaro.org
18
Message-id: 20210913095440.13462-2-peter.maydell@linaro.org
8
---
19
---
9
target/arm/translate.c | 46 +++++++++++++++++++++++-------------------
20
target/arm/translate.c | 34 +++++++++++++++++++++++++++++++++-
10
1 file changed, 25 insertions(+), 21 deletions(-)
21
1 file changed, 33 insertions(+), 1 deletion(-)
11
22
12
diff --git a/target/arm/translate.c b/target/arm/translate.c
23
diff --git a/target/arm/translate.c b/target/arm/translate.c
13
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/translate.c
25
--- a/target/arm/translate.c
15
+++ b/target/arm/translate.c
26
+++ b/target/arm/translate.c
16
@@ -XXX,XX +XXX,XX @@ static void arm_gen_condlabel(DisasContext *s)
27
@@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
28
/* An indirect jump so that we still trigger the debug exception. */
29
gen_set_pc_im(s, dest);
30
s->base.is_jmp = DISAS_JUMP;
31
- } else {
32
+ return;
33
+ }
34
+ switch (s->base.is_jmp) {
35
+ case DISAS_NEXT:
36
+ case DISAS_TOO_MANY:
37
+ case DISAS_NORETURN:
38
+ /*
39
+ * The normal case: just go to the destination TB.
40
+ * NB: NORETURN happens if we generate code like
41
+ * gen_brcondi(l);
42
+ * gen_jmp();
43
+ * gen_set_label(l);
44
+ * gen_jmp();
45
+ * on the second call to gen_jmp().
46
+ */
47
gen_goto_tb(s, tbno, dest);
48
+ break;
49
+ case DISAS_UPDATE_NOCHAIN:
50
+ case DISAS_UPDATE_EXIT:
51
+ /*
52
+ * We already decided we're leaving the TB for some other reason.
53
+ * Avoid using goto_tb so we really do exit back to the main loop
54
+ * and don't chain to another TB.
55
+ */
56
+ gen_set_pc_im(s, dest);
57
+ gen_goto_ptr();
58
+ s->base.is_jmp = DISAS_NORETURN;
59
+ break;
60
+ default:
61
+ /*
62
+ * We shouldn't be emitting code for a jump and also have
63
+ * is_jmp set to one of the special cases like DISAS_SWI.
64
+ */
65
+ g_assert_not_reached();
17
}
66
}
18
}
67
}
19
68
20
+/*
21
+ * Constant expanders for the decoders.
22
+ */
23
+
24
+static int negate(DisasContext *s, int x)
25
+{
26
+ return -x;
27
+}
28
+
29
+static int plus_2(DisasContext *s, int x)
30
+{
31
+ return x + 2;
32
+}
33
+
34
+static int times_2(DisasContext *s, int x)
35
+{
36
+ return x * 2;
37
+}
38
+
39
+static int times_4(DisasContext *s, int x)
40
+{
41
+ return x * 4;
42
+}
43
+
44
/* Flags for the disas_set_da_iss info argument:
45
* lower bits hold the Rt register number, higher bits are flags.
46
*/
47
@@ -XXX,XX +XXX,XX @@ static void arm_skip_unless(DisasContext *s, uint32_t cond)
48
49
50
/*
51
- * Constant expanders for the decoders.
52
+ * Constant expanders used by T16/T32 decode
53
*/
54
55
-static int negate(DisasContext *s, int x)
56
-{
57
- return -x;
58
-}
59
-
60
-static int plus_2(DisasContext *s, int x)
61
-{
62
- return x + 2;
63
-}
64
-
65
-static int times_2(DisasContext *s, int x)
66
-{
67
- return x * 2;
68
-}
69
-
70
-static int times_4(DisasContext *s, int x)
71
-{
72
- return x * 4;
73
-}
74
-
75
/* Return only the rotation part of T32ExpandImm. */
76
static int t32_expandimm_rot(DisasContext *s, int x)
77
{
78
--
69
--
79
2.20.1
70
2.20.1
80
71
81
72
diff view generated by jsdifflib
1
v8.1M adds new encodings of VLLDM and VLSTM (where bit 7 is set).
1
Architecturally, for an M-profile CPU with the LOB feature the
2
The only difference is that:
2
LTPSIZE field in FPDSCR is always constant 4. QEMU's implementation
3
* the old T1 encodings UNDEF if the implementation implements 32
3
enforces this everywhere, except that we don't check that it is true
4
Dregs (this is currently architecturally impossible for M-profile)
4
in incoming migration data.
5
* the new T2 encodings have the implementation-defined option to
6
read from memory (discarding the data) or write UNKNOWN values to
7
memory for the stack slots that would be D16-D31
8
5
9
We choose not to make those accesses, so for us the two
6
We're going to add come in gen_update_fp_context() which relies on
10
instructions behave identically assuming they don't UNDEF.
7
the "always 4" property. Since this is TCG-only, we don't actually
8
need to be robust to bogus incoming migration data, and the effect of
9
it being wrong would be wrong code generation rather than a QEMU
10
crash; but if it did ever happen somehow it would be very difficult
11
to track down the cause. Add a check so that we fail the inbound
12
migration if the FPDSCR.LTPSIZE value is incorrect.
11
13
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20201119215617.29887-21-peter.maydell@linaro.org
16
Message-id: 20210913095440.13462-3-peter.maydell@linaro.org
15
---
17
---
16
target/arm/m-nocp.decode | 2 +-
18
target/arm/machine.c | 13 +++++++++++++
17
target/arm/translate-vfp.c.inc | 25 +++++++++++++++++++++++++
19
1 file changed, 13 insertions(+)
18
2 files changed, 26 insertions(+), 1 deletion(-)
19
20
20
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
21
diff --git a/target/arm/machine.c b/target/arm/machine.c
21
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/m-nocp.decode
23
--- a/target/arm/machine.c
23
+++ b/target/arm/m-nocp.decode
24
+++ b/target/arm/machine.c
24
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ static int cpu_post_load(void *opaque, int version_id)
25
26
hw_breakpoint_update_all(cpu);
26
{
27
hw_watchpoint_update_all(cpu);
27
# Special cases which do not take an early NOCP: VLLDM and VLSTM
28
28
- VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 0000 0000
29
+ /*
29
+ VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 op:1 000 0000
30
+ * TCG gen_update_fp_context() relies on the invariant that
30
# VSCCLRM (new in v8.1M) is similar:
31
+ * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
31
VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
32
+ * forbid bogus incoming data with some other value.
32
VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
33
+ */
33
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
34
+ if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
34
index XXXXXXX..XXXXXXX 100644
35
+ if (extract32(env->v7m.fpdscr[M_REG_NS],
35
--- a/target/arm/translate-vfp.c.inc
36
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
36
+++ b/target/arm/translate-vfp.c.inc
37
+ extract32(env->v7m.fpdscr[M_REG_S],
37
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
38
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
38
!arm_dc_feature(s, ARM_FEATURE_V8)) {
39
+ return -1;
39
return false;
40
}
41
+
42
+ if (a->op) {
43
+ /*
44
+ * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
45
+ * to take the IMPDEF option to make memory accesses to the stack
46
+ * slots that correspond to the D16-D31 registers (discarding
47
+ * read data and writing UNKNOWN values), so for us the T2
48
+ * encoding behaves identically to the T1 encoding.
49
+ */
50
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
51
+ return false;
52
+ }
53
+ } else {
54
+ /*
55
+ * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
56
+ * This is currently architecturally impossible, but we add the
57
+ * check to stay in line with the pseudocode. Note that we must
58
+ * emit code for the UNDEF so it takes precedence over the NOCP.
59
+ */
60
+ if (dc_isar_feature(aa32_simd_r32, s)) {
61
+ unallocated_encoding(s);
62
+ return true;
63
+ }
40
+ }
64
+ }
41
+ }
65
+
42
if (!kvm_enabled()) {
66
/*
43
pmu_op_finish(&cpu->env);
67
* If not secure, UNDEF. We must emit code for this
44
}
68
* rather than returning false so that this takes
69
--
45
--
70
2.20.1
46
2.20.1
71
47
72
48
diff view generated by jsdifflib
1
Implement the v8.1M VSCCLRM insn, which zeros floating point
1
Our current codegen for MVE always calls out to helper functions,
2
registers if there is an active floating point context.
2
because some byte lanes might be predicated. The common case is that
3
This requires support in write_neon_element32() for the MO_32
3
in fact there is no predication active and all lanes should be
4
element size, so add it.
4
updated together, so we can produce better code by detecting that and
5
5
using the TCG generic vector infrastructure.
6
Because we want to use arm_gen_condlabel(), we need to move
6
7
the definition of that function up in translate.c so it is
7
Add a TB flag that is set when we can guarantee that there is no
8
before the #include of translate-vfp.c.inc.
8
active MVE predication, and a bool in the DisasContext. Subsequent
9
patches will use this flag to generate improved code for some
10
instructions.
11
12
In most cases when the predication state changes we simply end the TB
13
after that instruction. For the code called from vfp_access_check()
14
that handles lazy state preservation and creating a new FP context,
15
we can usually avoid having to try to end the TB because luckily the
16
new value of the flag following the register changes in those
17
sequences doesn't depend on any runtime decisions. We do have to end
18
the TB if the guest has enabled lazy FP state preservation but not
19
automatic state preservation, but this is an odd corner case that is
20
not going to be common in real-world code.
9
21
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20201119215617.29887-5-peter.maydell@linaro.org
24
Message-id: 20210913095440.13462-4-peter.maydell@linaro.org
13
---
25
---
14
target/arm/cpu.h | 9 ++++
26
target/arm/cpu.h | 4 +++-
15
target/arm/m-nocp.decode | 8 +++-
27
target/arm/translate.h | 2 ++
16
target/arm/translate.c | 21 +++++----
28
target/arm/helper.c | 33 +++++++++++++++++++++++++++++++++
17
target/arm/translate-vfp.c.inc | 84 ++++++++++++++++++++++++++++++++++
29
target/arm/translate-m-nocp.c | 8 +++++++-
18
4 files changed, 111 insertions(+), 11 deletions(-)
30
target/arm/translate-mve.c | 13 ++++++++++++-
31
target/arm/translate-vfp.c | 33 +++++++++++++++++++++++++++------
32
target/arm/translate.c | 8 ++++++++
33
7 files changed, 92 insertions(+), 9 deletions(-)
19
34
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
35
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
37
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
38
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
39
@@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU;
25
return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
40
* | TBFLAG_AM32 | +-----+----------+
26
}
41
* | | |TBFLAG_M32|
27
42
* +-------------+----------------+----------+
28
+static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
43
- * 31 23 5 4 0
44
+ * 31 23 6 5 0
45
*
46
* Unless otherwise noted, these bits are cached in env->hflags.
47
*/
48
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
49
FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
50
/* Set if FPCCR.S does not match current security state */
51
FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
52
+/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
53
+FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
54
55
/*
56
* Bit usage when in AArch64 state
57
diff --git a/target/arm/translate.h b/target/arm/translate.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate.h
60
+++ b/target/arm/translate.h
61
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
62
bool align_mem;
63
/* True if PSTATE.IL is set */
64
bool pstate_il;
65
+ /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
66
+ bool mve_no_pred;
67
/*
68
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
69
* < 0, set by the current instruction.
70
diff --git a/target/arm/helper.c b/target/arm/helper.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/arm/helper.c
73
+++ b/target/arm/helper.c
74
@@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
75
#endif
76
}
77
78
+static bool mve_no_pred(CPUARMState *env)
29
+{
79
+{
30
+ /*
80
+ /*
31
+ * Return true if M-profile state handling insns
81
+ * Return true if there is definitely no predication of MVE
32
+ * (VSCCLRM, CLRM, FPCTX access insns) are implemented
82
+ * instructions by VPR or LTPSIZE. (Returning false even if there
83
+ * isn't any predication is OK; generated code will just be
84
+ * a little worse.)
85
+ * If the CPU does not implement MVE then this TB flag is always 0.
86
+ *
87
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
88
+ * logic in gen_update_fp_context() needs to be updated to match.
89
+ *
90
+ * We do not include the effect of the ECI bits here -- they are
91
+ * tracked in other TB flags. This simplifies the logic for
92
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
93
+ * and thus need to end the TB?".
33
+ */
94
+ */
34
+ return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
95
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
35
+}
36
+
37
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
38
{
39
/* Sadly this is encoded differently for A-profile and M-profile */
40
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/m-nocp.decode
43
+++ b/target/arm/m-nocp.decode
44
@@ -XXX,XX +XXX,XX @@
45
# If the coprocessor is not present or disabled then we will generate
46
# the NOCP exception; otherwise we let the insn through to the main decode.
47
48
+%vd_dp 22:1 12:4
49
+%vd_sp 12:4 22:1
50
+
51
&nocp cp
52
53
{
54
# Special cases which do not take an early NOCP: VLLDM and VLSTM
55
VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 0000 0000
56
- # TODO: VSCCLRM (new in v8.1M) is similar:
57
- #VSCCLRM 1110 1100 1-01 1111 ---- 1011 ---- ---0
58
+ # VSCCLRM (new in v8.1M) is similar:
59
+ VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
60
+ VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
61
62
NOCP 111- 1110 ---- ---- ---- cp:4 ---- ---- &nocp
63
NOCP 111- 110- ---- ---- ---- cp:4 ---- ---- &nocp
64
diff --git a/target/arm/translate.c b/target/arm/translate.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate.c
67
+++ b/target/arm/translate.c
68
@@ -XXX,XX +XXX,XX @@ void arm_translate_init(void)
69
a64_translate_init();
70
}
71
72
+/* Generate a label used for skipping this instruction */
73
+static void arm_gen_condlabel(DisasContext *s)
74
+{
75
+ if (!s->condjmp) {
76
+ s->condlabel = gen_new_label();
77
+ s->condjmp = 1;
78
+ }
79
+}
80
+
81
/* Flags for the disas_set_da_iss info argument:
82
* lower bits hold the Rt register number, higher bits are flags.
83
*/
84
@@ -XXX,XX +XXX,XX @@ static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
85
long off = neon_element_offset(reg, ele, memop);
86
87
switch (memop) {
88
+ case MO_32:
89
+ tcg_gen_st32_i64(src, cpu_env, off);
90
+ break;
91
case MO_64:
92
tcg_gen_st_i64(src, cpu_env, off);
93
break;
94
@@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s,
95
s->base.is_jmp = DISAS_UPDATE_EXIT;
96
}
97
98
-/* Generate a label used for skipping this instruction */
99
-static void arm_gen_condlabel(DisasContext *s)
100
-{
101
- if (!s->condjmp) {
102
- s->condlabel = gen_new_label();
103
- s->condjmp = 1;
104
- }
105
-}
106
-
107
/* Skip this instruction if the ARM condition is false */
108
static void arm_skip_unless(DisasContext *s, uint32_t cond)
109
{
110
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/translate-vfp.c.inc
113
+++ b/target/arm/translate-vfp.c.inc
114
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
115
return true;
116
}
117
118
+static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
119
+{
120
+ int btmreg, topreg;
121
+ TCGv_i64 zero;
122
+ TCGv_i32 aspen, sfpa;
123
+
124
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
125
+ /* Before v8.1M, fall through in decode to NOCP check */
126
+ return false;
96
+ return false;
127
+ }
97
+ }
128
+
98
+ if (env->v7m.vpr) {
129
+ /* Explicitly UNDEF because this takes precedence over NOCP */
99
+ return false;
130
+ if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
131
+ unallocated_encoding(s);
132
+ return true;
133
+ }
100
+ }
134
+
101
+ if (env->v7m.ltpsize < 4) {
135
+ if (!dc_isar_feature(aa32_vfp_simd, s)) {
102
+ return false;
136
+ /* NOP if we have neither FP nor MVE */
137
+ return true;
138
+ }
103
+ }
139
+
140
+ /*
141
+ * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
142
+ * active floating point context so we must NOP (without doing
143
+ * any lazy state preservation or the NOCP check).
144
+ */
145
+ aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
146
+ sfpa = load_cpu_field(v7m.control[M_REG_S]);
147
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
148
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
149
+ tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
150
+ tcg_gen_or_i32(sfpa, sfpa, aspen);
151
+ arm_gen_condlabel(s);
152
+ tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
153
+
154
+ if (s->fp_excp_el != 0) {
155
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
156
+ syn_uncategorized(), s->fp_excp_el);
157
+ return true;
158
+ }
159
+
160
+ topreg = a->vd + a->imm - 1;
161
+ btmreg = a->vd;
162
+
163
+ /* Convert to Sreg numbers if the insn specified in Dregs */
164
+ if (a->size == 3) {
165
+ topreg = topreg * 2 + 1;
166
+ btmreg *= 2;
167
+ }
168
+
169
+ if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
170
+ /* UNPREDICTABLE: we choose to undef */
171
+ unallocated_encoding(s);
172
+ return true;
173
+ }
174
+
175
+ /* Silently ignore requests to clear D16-D31 if they don't exist */
176
+ if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
177
+ topreg = 31;
178
+ }
179
+
180
+ if (!vfp_access_check(s)) {
181
+ return true;
182
+ }
183
+
184
+ /* Zero the Sregs from btmreg to topreg inclusive. */
185
+ zero = tcg_const_i64(0);
186
+ if (btmreg & 1) {
187
+ write_neon_element64(zero, btmreg >> 1, 1, MO_32);
188
+ btmreg++;
189
+ }
190
+ for (; btmreg + 1 <= topreg; btmreg += 2) {
191
+ write_neon_element64(zero, btmreg >> 1, 0, MO_64);
192
+ }
193
+ if (btmreg == topreg) {
194
+ write_neon_element64(zero, btmreg >> 1, 0, MO_32);
195
+ btmreg++;
196
+ }
197
+ assert(btmreg == topreg + 1);
198
+ /* TODO: when MVE is implemented, zero VPR here */
199
+ return true;
104
+ return true;
200
+}
105
+}
201
+
106
+
202
static bool trans_NOCP(DisasContext *s, arg_nocp *a)
107
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
108
target_ulong *cs_base, uint32_t *pflags)
203
{
109
{
204
/*
110
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
111
if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
112
DP_TBFLAG_M32(flags, LSPACT, 1);
113
}
114
+
115
+ if (mve_no_pred(env)) {
116
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
117
+ }
118
} else {
119
/*
120
* Note that XSCALE_CPAR shares bits with VECSTRIDE.
121
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/arm/translate-m-nocp.c
124
+++ b/target/arm/translate-m-nocp.c
125
@@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
126
127
clear_eci_state(s);
128
129
- /* End the TB, because we have updated FP control bits */
130
+ /*
131
+ * End the TB, because we have updated FP control bits,
132
+ * and possibly VPR or LTPSIZE.
133
+ */
134
s->base.is_jmp = DISAS_UPDATE_EXIT;
135
return true;
136
}
137
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
138
store_cpu_field(control, v7m.control[M_REG_S]);
139
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
140
gen_helper_vfp_set_fpscr(cpu_env, tmp);
141
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
142
tcg_temp_free_i32(tmp);
143
tcg_temp_free_i32(sfpa);
144
break;
145
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
146
}
147
tmp = loadfn(s, opaque, true);
148
store_cpu_field(tmp, v7m.vpr);
149
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
150
break;
151
case ARM_VFP_P0:
152
{
153
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
154
tcg_gen_deposit_i32(vpr, vpr, tmp,
155
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
156
store_cpu_field(vpr, v7m.vpr);
157
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
158
tcg_temp_free_i32(tmp);
159
break;
160
}
161
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/target/arm/translate-mve.c
164
+++ b/target/arm/translate-mve.c
165
@@ -XXX,XX +XXX,XX @@ DO_LOGIC(VORR, gen_helper_mve_vorr)
166
DO_LOGIC(VORN, gen_helper_mve_vorn)
167
DO_LOGIC(VEOR, gen_helper_mve_veor)
168
169
-DO_LOGIC(VPSEL, gen_helper_mve_vpsel)
170
+static bool trans_VPSEL(DisasContext *s, arg_2op *a)
171
+{
172
+ /* This insn updates predication bits */
173
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
174
+ return do_2op(s, a, gen_helper_mve_vpsel);
175
+}
176
177
#define DO_2OP(INSN, FN) \
178
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
179
@@ -XXX,XX +XXX,XX @@ static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a)
180
}
181
182
gen_helper_mve_vpnot(cpu_env);
183
+ /* This insn updates predication bits */
184
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
185
mve_update_eci(s);
186
return true;
187
}
188
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
189
/* VPT */
190
gen_vpst(s, a->mask);
191
}
192
+ /* This insn updates predication bits */
193
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
194
mve_update_eci(s);
195
return true;
196
}
197
@@ -XXX,XX +XXX,XX @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a,
198
/* VPT */
199
gen_vpst(s, a->mask);
200
}
201
+ /* This insn updates predication bits */
202
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
203
mve_update_eci(s);
204
return true;
205
}
206
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/arm/translate-vfp.c
209
+++ b/target/arm/translate-vfp.c
210
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
211
* Generate code for M-profile lazy FP state preservation if needed;
212
* this corresponds to the pseudocode PreserveFPState() function.
213
*/
214
-static void gen_preserve_fp_state(DisasContext *s)
215
+static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
216
{
217
if (s->v7m_lspact) {
218
/*
219
@@ -XXX,XX +XXX,XX @@ static void gen_preserve_fp_state(DisasContext *s)
220
* any further FP insns in this TB.
221
*/
222
s->v7m_lspact = false;
223
+ /*
224
+ * The helper might have zeroed VPR, so we do not know the
225
+ * correct value for the MVE_NO_PRED TB flag any more.
226
+ * If we're about to create a new fp context then that
227
+ * will precisely determine the MVE_NO_PRED value (see
228
+ * gen_update_fp_context()). Otherwise, we must:
229
+ * - set s->mve_no_pred to false, so this instruction
230
+ * is generated to use helper functions
231
+ * - end the TB now, without chaining to the next TB
232
+ */
233
+ if (skip_context_update || !s->v7m_new_fp_ctxt_needed) {
234
+ s->mve_no_pred = false;
235
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
236
+ }
237
}
238
}
239
240
@@ -XXX,XX +XXX,XX @@ static void gen_update_fp_context(DisasContext *s)
241
TCGv_i32 z32 = tcg_const_i32(0);
242
store_cpu_field(z32, v7m.vpr);
243
}
244
-
245
/*
246
- * We don't need to arrange to end the TB, because the only
247
- * parts of FPSCR which we cache in the TB flags are the VECLEN
248
- * and VECSTRIDE, and those don't exist for M-profile.
249
+ * We just updated the FPSCR and VPR. Some of this state is cached
250
+ * in the MVE_NO_PRED TB flag. We want to avoid having to end the
251
+ * TB here, which means we need the new value of the MVE_NO_PRED
252
+ * flag to be exactly known here and the same for all executions.
253
+ * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is
254
+ * always set to 0, so the new MVE_NO_PRED flag is always 1
255
+ * if and only if we have MVE.
256
+ *
257
+ * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE,
258
+ * but those do not exist for M-profile, so are not relevant here.)
259
*/
260
+ s->mve_no_pred = dc_isar_feature(aa32_mve, s);
261
262
if (s->v8m_secure) {
263
bits |= R_V7M_CONTROL_SFPA_MASK;
264
@@ -XXX,XX +XXX,XX @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
265
/* Handle M-profile lazy FP state mechanics */
266
267
/* Trigger lazy-state preservation if necessary */
268
- gen_preserve_fp_state(s);
269
+ gen_preserve_fp_state(s, skip_context_update);
270
271
if (!skip_context_update) {
272
/* Update ownership of FP context and create new FP context if needed */
273
diff --git a/target/arm/translate.c b/target/arm/translate.c
274
index XXXXXXX..XXXXXXX 100644
275
--- a/target/arm/translate.c
276
+++ b/target/arm/translate.c
277
@@ -XXX,XX +XXX,XX @@ static bool trans_DLS(DisasContext *s, arg_DLS *a)
278
/* DLSTP: set FPSCR.LTPSIZE */
279
tmp = tcg_const_i32(a->size);
280
store_cpu_field(tmp, v7m.ltpsize);
281
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
282
}
283
return true;
284
}
285
@@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a)
286
assert(ok);
287
tmp = tcg_const_i32(a->size);
288
store_cpu_field(tmp, v7m.ltpsize);
289
+ /*
290
+ * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
291
+ * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
292
+ */
293
}
294
gen_jmp_tb(s, s->base.pc_next, 1);
295
296
@@ -XXX,XX +XXX,XX @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
297
gen_helper_mve_vctp(cpu_env, masklen);
298
tcg_temp_free_i32(masklen);
299
tcg_temp_free_i32(rn_shifted);
300
+ /* This insn updates predication bits */
301
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
302
mve_update_eci(s);
303
return true;
304
}
305
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
306
dc->v7m_new_fp_ctxt_needed =
307
EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
308
dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
309
+ dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
310
} else {
311
dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
312
dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
205
--
313
--
206
2.20.1
314
2.20.1
207
315
208
316
diff view generated by jsdifflib
1
From: Havard Skinnemoen <hskinnemoen@google.com>
1
When not predicating, implement the MVE bitwise logical insns
2
directly using TCG vector operations.
2
3
3
Dump the collected random data after a randomness test failure.
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210913095440.13462-5-peter.maydell@linaro.org
8
---
9
target/arm/translate-mve.c | 51 +++++++++++++++++++++++++++-----------
10
1 file changed, 36 insertions(+), 15 deletions(-)
4
11
5
Note that this relies on the test having called
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
6
g_test_set_nonfatal_assertions() so we don't abort immediately on the
7
assertion failure.
8
9
Signed-off-by: Havard Skinnemoen <hskinnemoen@google.com>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
[PMM: minor commit message tweak]
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
tests/qtest/npcm7xx_rng-test.c | 12 ++++++++++++
15
1 file changed, 12 insertions(+)
16
17
diff --git a/tests/qtest/npcm7xx_rng-test.c b/tests/qtest/npcm7xx_rng-test.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/qtest/npcm7xx_rng-test.c
14
--- a/target/arm/translate-mve.c
20
+++ b/tests/qtest/npcm7xx_rng-test.c
15
+++ b/target/arm/translate-mve.c
21
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr mve_qreg_ptr(unsigned reg)
22
17
return ret;
23
#include "libqtest-single.h"
18
}
24
#include "qemu/bitops.h"
19
25
+#include "qemu-common.h"
20
+static bool mve_no_predication(DisasContext *s)
26
27
#define RNG_BASE_ADDR 0xf000b000
28
29
@@ -XXX,XX +XXX,XX @@
30
/* Number of bits to collect for randomness tests. */
31
#define TEST_INPUT_BITS (128)
32
33
+static void dump_buf_if_failed(const uint8_t *buf, size_t size)
34
+{
21
+{
35
+ if (g_test_failed()) {
22
+ /*
36
+ qemu_hexdump(stderr, "", buf, size);
23
+ * Return true if we are executing the entire MVE instruction
37
+ }
24
+ * with no predication or partial-execution, and so we can safely
25
+ * use an inline TCG vector implementation.
26
+ */
27
+ return s->eci == 0 && s->mve_no_pred;
38
+}
28
+}
39
+
29
+
40
static void rng_writeb(unsigned int offset, uint8_t value)
30
static bool mve_check_qreg_bank(DisasContext *s, int qmask)
41
{
31
{
42
writeb(RNG_BASE_ADDR + offset, value);
32
/*
43
@@ -XXX,XX +XXX,XX @@ static void test_continuous_monobit(void)
33
@@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
34
return do_1op(s, a, fns[a->size]);
35
}
36
37
-static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
38
+static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn,
39
+ GVecGen3Fn *vecfn)
40
{
41
TCGv_ptr qd, qn, qm;
42
43
@@ -XXX,XX +XXX,XX @@ static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn)
44
return true;
44
}
45
}
45
46
46
g_assert_cmpfloat(calc_monobit_p(buf, sizeof(buf)), >, 0.01);
47
- qd = mve_qreg_ptr(a->qd);
47
+ dump_buf_if_failed(buf, sizeof(buf));
48
- qn = mve_qreg_ptr(a->qn);
49
- qm = mve_qreg_ptr(a->qm);
50
- fn(cpu_env, qd, qn, qm);
51
- tcg_temp_free_ptr(qd);
52
- tcg_temp_free_ptr(qn);
53
- tcg_temp_free_ptr(qm);
54
+ if (vecfn && mve_no_predication(s)) {
55
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qn),
56
+ mve_qreg_offset(a->qm), 16, 16);
57
+ } else {
58
+ qd = mve_qreg_ptr(a->qd);
59
+ qn = mve_qreg_ptr(a->qn);
60
+ qm = mve_qreg_ptr(a->qm);
61
+ fn(cpu_env, qd, qn, qm);
62
+ tcg_temp_free_ptr(qd);
63
+ tcg_temp_free_ptr(qn);
64
+ tcg_temp_free_ptr(qm);
65
+ }
66
mve_update_eci(s);
67
return true;
48
}
68
}
49
69
50
/*
70
-#define DO_LOGIC(INSN, HELPER) \
51
@@ -XXX,XX +XXX,XX @@ static void test_continuous_runs(void)
71
+static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn *fn)
72
+{
73
+ return do_2op_vec(s, a, fn, NULL);
74
+}
75
+
76
+#define DO_LOGIC(INSN, HELPER, VECFN) \
77
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
78
{ \
79
- return do_2op(s, a, HELPER); \
80
+ return do_2op_vec(s, a, HELPER, VECFN); \
52
}
81
}
53
82
54
g_assert_cmpfloat(calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE), >, 0.01);
83
-DO_LOGIC(VAND, gen_helper_mve_vand)
55
+ dump_buf_if_failed(buf.c, sizeof(buf));
84
-DO_LOGIC(VBIC, gen_helper_mve_vbic)
56
}
85
-DO_LOGIC(VORR, gen_helper_mve_vorr)
57
86
-DO_LOGIC(VORN, gen_helper_mve_vorn)
58
/*
87
-DO_LOGIC(VEOR, gen_helper_mve_veor)
59
@@ -XXX,XX +XXX,XX @@ static void test_first_byte_monobit(void)
88
+DO_LOGIC(VAND, gen_helper_mve_vand, tcg_gen_gvec_and)
60
}
89
+DO_LOGIC(VBIC, gen_helper_mve_vbic, tcg_gen_gvec_andc)
61
90
+DO_LOGIC(VORR, gen_helper_mve_vorr, tcg_gen_gvec_or)
62
g_assert_cmpfloat(calc_monobit_p(buf, sizeof(buf)), >, 0.01);
91
+DO_LOGIC(VORN, gen_helper_mve_vorn, tcg_gen_gvec_orc)
63
+ dump_buf_if_failed(buf, sizeof(buf));
92
+DO_LOGIC(VEOR, gen_helper_mve_veor, tcg_gen_gvec_xor)
64
}
93
65
94
static bool trans_VPSEL(DisasContext *s, arg_2op *a)
66
/*
95
{
67
@@ -XXX,XX +XXX,XX @@ static void test_first_byte_runs(void)
68
}
69
70
g_assert_cmpfloat(calc_runs_p(buf.l, sizeof(buf) * BITS_PER_BYTE), >, 0.01);
71
+ dump_buf_if_failed(buf.c, sizeof(buf));
72
}
73
74
int main(int argc, char **argv)
75
--
96
--
76
2.20.1
97
2.20.1
77
98
78
99
diff view generated by jsdifflib
Deleted patch
1
From: Alex Chen <alex.chen@huawei.com>
2
1
3
We should use printf format specifier "%u" instead of "%d" for
4
argument of type "unsigned int".
5
6
Reported-by: Euler Robot <euler.robot@huawei.com>
7
Signed-off-by: Alex Chen <alex.chen@huawei.com>
8
Message-id: 20201126111109.112238-3-alex.chen@huawei.com
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
hw/misc/imx31_ccm.c | 14 +++++++-------
13
hw/misc/imx_ccm.c | 4 ++--
14
2 files changed, 9 insertions(+), 9 deletions(-)
15
16
diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/misc/imx31_ccm.c
19
+++ b/hw/misc/imx31_ccm.c
20
@@ -XXX,XX +XXX,XX @@ static const char *imx31_ccm_reg_name(uint32_t reg)
21
case IMX31_CCM_PDR2_REG:
22
return "PDR2";
23
default:
24
- sprintf(unknown, "[%d ?]", reg);
25
+ sprintf(unknown, "[%u ?]", reg);
26
return unknown;
27
}
28
}
29
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_pll_ref_clk(IMXCCMState *dev)
30
freq = CKIH_FREQ;
31
}
32
33
- DPRINTF("freq = %d\n", freq);
34
+ DPRINTF("freq = %u\n", freq);
35
36
return freq;
37
}
38
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_mpll_clk(IMXCCMState *dev)
39
freq = imx_ccm_calc_pll(s->reg[IMX31_CCM_MPCTL_REG],
40
imx31_ccm_get_pll_ref_clk(dev));
41
42
- DPRINTF("freq = %d\n", freq);
43
+ DPRINTF("freq = %u\n", freq);
44
45
return freq;
46
}
47
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_mcu_main_clk(IMXCCMState *dev)
48
freq = imx31_ccm_get_mpll_clk(dev);
49
}
50
51
- DPRINTF("freq = %d\n", freq);
52
+ DPRINTF("freq = %u\n", freq);
53
54
return freq;
55
}
56
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_hclk_clk(IMXCCMState *dev)
57
freq = imx31_ccm_get_mcu_main_clk(dev)
58
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], MAX));
59
60
- DPRINTF("freq = %d\n", freq);
61
+ DPRINTF("freq = %u\n", freq);
62
63
return freq;
64
}
65
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_ipg_clk(IMXCCMState *dev)
66
freq = imx31_ccm_get_hclk_clk(dev)
67
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], IPG));
68
69
- DPRINTF("freq = %d\n", freq);
70
+ DPRINTF("freq = %u\n", freq);
71
72
return freq;
73
}
74
@@ -XXX,XX +XXX,XX @@ static uint32_t imx31_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
75
break;
76
}
77
78
- DPRINTF("Clock = %d) = %d\n", clock, freq);
79
+ DPRINTF("Clock = %d) = %u\n", clock, freq);
80
81
return freq;
82
}
83
diff --git a/hw/misc/imx_ccm.c b/hw/misc/imx_ccm.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/hw/misc/imx_ccm.c
86
+++ b/hw/misc/imx_ccm.c
87
@@ -XXX,XX +XXX,XX @@ uint32_t imx_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
88
freq = klass->get_clock_frequency(dev, clock);
89
}
90
91
- DPRINTF("(clock = %d) = %d\n", clock, freq);
92
+ DPRINTF("(clock = %d) = %u\n", clock, freq);
93
94
return freq;
95
}
96
@@ -XXX,XX +XXX,XX @@ uint32_t imx_ccm_calc_pll(uint32_t pllreg, uint32_t base_freq)
97
freq = ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) /
98
(mfd * pd)) << 10;
99
100
- DPRINTF("(pllreg = 0x%08x, base_freq = %d) = %d\n", pllreg, base_freq,
101
+ DPRINTF("(pllreg = 0x%08x, base_freq = %u) = %d\n", pllreg, base_freq,
102
freq);
103
104
return freq;
105
--
106
2.20.1
107
108
diff view generated by jsdifflib
1
Correct a typo in the name we give the NVIC object.
1
Optimize MVE arithmetic ops when we have a TCG
2
vector operation we can use.
2
3
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20201119215617.29887-28-peter.maydell@linaro.org
7
Message-id: 20210913095440.13462-6-peter.maydell@linaro.org
7
---
8
---
8
hw/arm/armv7m.c | 2 +-
9
target/arm/translate-mve.c | 20 +++++++++++---------
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 11 insertions(+), 9 deletions(-)
10
11
11
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/arm/armv7m.c
14
--- a/target/arm/translate-mve.c
14
+++ b/hw/arm/armv7m.c
15
+++ b/target/arm/translate-mve.c
15
@@ -XXX,XX +XXX,XX @@ static void armv7m_instance_init(Object *obj)
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a)
16
17
return do_2op(s, a, gen_helper_mve_vpsel);
17
memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX);
18
}
18
19
19
- object_initialize_child(obj, "nvnic", &s->nvic, TYPE_NVIC);
20
-#define DO_2OP(INSN, FN) \
20
+ object_initialize_child(obj, "nvic", &s->nvic, TYPE_NVIC);
21
+#define DO_2OP_VEC(INSN, FN, VECFN) \
21
object_property_add_alias(obj, "num-irq",
22
static bool trans_##INSN(DisasContext *s, arg_2op *a) \
22
OBJECT(&s->nvic), "num-irq");
23
{ \
23
24
static MVEGenTwoOpFn * const fns[] = { \
25
@@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a)
26
gen_helper_mve_##FN##w, \
27
NULL, \
28
}; \
29
- return do_2op(s, a, fns[a->size]); \
30
+ return do_2op_vec(s, a, fns[a->size], VECFN); \
31
}
32
33
-DO_2OP(VADD, vadd)
34
-DO_2OP(VSUB, vsub)
35
-DO_2OP(VMUL, vmul)
36
+#define DO_2OP(INSN, FN) DO_2OP_VEC(INSN, FN, NULL)
37
+
38
+DO_2OP_VEC(VADD, vadd, tcg_gen_gvec_add)
39
+DO_2OP_VEC(VSUB, vsub, tcg_gen_gvec_sub)
40
+DO_2OP_VEC(VMUL, vmul, tcg_gen_gvec_mul)
41
DO_2OP(VMULH_S, vmulhs)
42
DO_2OP(VMULH_U, vmulhu)
43
DO_2OP(VRMULH_S, vrmulhs)
44
DO_2OP(VRMULH_U, vrmulhu)
45
-DO_2OP(VMAX_S, vmaxs)
46
-DO_2OP(VMAX_U, vmaxu)
47
-DO_2OP(VMIN_S, vmins)
48
-DO_2OP(VMIN_U, vminu)
49
+DO_2OP_VEC(VMAX_S, vmaxs, tcg_gen_gvec_smax)
50
+DO_2OP_VEC(VMAX_U, vmaxu, tcg_gen_gvec_umax)
51
+DO_2OP_VEC(VMIN_S, vmins, tcg_gen_gvec_smin)
52
+DO_2OP_VEC(VMIN_U, vminu, tcg_gen_gvec_umin)
53
DO_2OP(VABD_S, vabds)
54
DO_2OP(VABD_U, vabdu)
55
DO_2OP(VHADD_S, vhadds)
24
--
56
--
25
2.20.1
57
2.20.1
26
58
27
59
diff view generated by jsdifflib
1
For v8.1M the architecture mandates that CPUs must provide at
1
Optimize the MVE VNEG and VABS insns by using TCG
2
least the "minimal RAS implementation" from the Reliability,
2
vector ops when possible.
3
Availability and Serviceability extension. This consists of:
4
* an ESB instruction which is a NOP
5
-- since it is in the HINT space we need only add a comment
6
* an RFSR register which will RAZ/WI
7
* a RAZ/WI AIRCR.IESB bit
8
-- the code which handles writes to AIRCR does not allow setting
9
of RES0 bits, so we already treat this as RAZ/WI; add a comment
10
noting that this is deliberate
11
* minimal implementation of the RAS register block at 0xe0005000
12
-- this will be in a subsequent commit
13
* setting the ID_PFR0.RAS field to 0b0010
14
-- we will do this when we add the Cortex-M55 CPU model
15
3
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20201119215617.29887-26-peter.maydell@linaro.org
7
Message-id: 20210913095440.13462-7-peter.maydell@linaro.org
19
---
8
---
20
target/arm/cpu.h | 14 ++++++++++++++
9
target/arm/translate-mve.c | 32 ++++++++++++++++++++++----------
21
target/arm/t32.decode | 4 ++++
10
1 file changed, 22 insertions(+), 10 deletions(-)
22
hw/intc/armv7m_nvic.c | 13 +++++++++++++
23
3 files changed, 31 insertions(+)
24
11
25
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
26
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu.h
14
--- a/target/arm/translate-mve.c
28
+++ b/target/arm/cpu.h
15
+++ b/target/arm/translate-mve.c
29
@@ -XXX,XX +XXX,XX @@ FIELD(ID_MMFR4, LSM, 20, 4)
16
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
30
FIELD(ID_MMFR4, CCIDX, 24, 4)
17
return true;
31
FIELD(ID_MMFR4, EVT, 28, 4)
32
33
+FIELD(ID_PFR0, STATE0, 0, 4)
34
+FIELD(ID_PFR0, STATE1, 4, 4)
35
+FIELD(ID_PFR0, STATE2, 8, 4)
36
+FIELD(ID_PFR0, STATE3, 12, 4)
37
+FIELD(ID_PFR0, CSV2, 16, 4)
38
+FIELD(ID_PFR0, AMU, 20, 4)
39
+FIELD(ID_PFR0, DIT, 24, 4)
40
+FIELD(ID_PFR0, RAS, 28, 4)
41
+
42
FIELD(ID_PFR1, PROGMOD, 0, 4)
43
FIELD(ID_PFR1, SECURITY, 4, 4)
44
FIELD(ID_PFR1, MPROGMOD, 8, 4)
45
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
46
return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
47
}
18
}
48
19
49
+static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
20
-static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
21
+static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn,
22
+ GVecGen2Fn vecfn)
23
{
24
TCGv_ptr qd, qm;
25
26
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
27
return true;
28
}
29
30
- qd = mve_qreg_ptr(a->qd);
31
- qm = mve_qreg_ptr(a->qm);
32
- fn(cpu_env, qd, qm);
33
- tcg_temp_free_ptr(qd);
34
- tcg_temp_free_ptr(qm);
35
+ if (vecfn && mve_no_predication(s)) {
36
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), 16, 16);
37
+ } else {
38
+ qd = mve_qreg_ptr(a->qd);
39
+ qm = mve_qreg_ptr(a->qm);
40
+ fn(cpu_env, qd, qm);
41
+ tcg_temp_free_ptr(qd);
42
+ tcg_temp_free_ptr(qm);
43
+ }
44
mve_update_eci(s);
45
return true;
46
}
47
48
-#define DO_1OP(INSN, FN) \
49
+static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
50
+{
50
+{
51
+ return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
51
+ return do_1op_vec(s, a, fn, NULL);
52
+}
52
+}
53
+
53
+
54
static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
54
+#define DO_1OP_VEC(INSN, FN, VECFN) \
55
{
55
static bool trans_##INSN(DisasContext *s, arg_1op *a) \
56
return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
56
{ \
57
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
57
static MVEGenOneOpFn * const fns[] = { \
58
index XXXXXXX..XXXXXXX 100644
58
@@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
59
--- a/target/arm/t32.decode
59
gen_helper_mve_##FN##w, \
60
+++ b/target/arm/t32.decode
60
NULL, \
61
@@ -XXX,XX +XXX,XX @@ CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm
61
}; \
62
# SEV 1111 0011 1010 1111 1000 0000 0000 0100
62
- return do_1op(s, a, fns[a->size]); \
63
# SEVL 1111 0011 1010 1111 1000 0000 0000 0101
63
+ return do_1op_vec(s, a, fns[a->size], VECFN); \
64
64
}
65
+ # For M-profile minimal-RAS ESB can be a NOP, which is the
65
66
+ # default behaviour since it is in the hint space.
66
+#define DO_1OP(INSN, FN) DO_1OP_VEC(INSN, FN, NULL)
67
+ # ESB 1111 0011 1010 1111 1000 0000 0001 0000
68
+
67
+
69
# The canonical nop ends in 0000 0000, but the whole rest
68
DO_1OP(VCLZ, vclz)
70
# of the space is "reserved hint, behaves as nop".
69
DO_1OP(VCLS, vcls)
71
NOP 1111 0011 1010 1111 1000 0000 ---- ----
70
-DO_1OP(VABS, vabs)
72
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
71
-DO_1OP(VNEG, vneg)
73
index XXXXXXX..XXXXXXX 100644
72
+DO_1OP_VEC(VABS, vabs, tcg_gen_gvec_abs)
74
--- a/hw/intc/armv7m_nvic.c
73
+DO_1OP_VEC(VNEG, vneg, tcg_gen_gvec_neg)
75
+++ b/hw/intc/armv7m_nvic.c
74
DO_1OP(VQABS, vqabs)
76
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
75
DO_1OP(VQNEG, vqneg)
77
return 0;
76
DO_1OP(VMAXA, vmaxa)
78
}
79
return cpu->env.v7m.sfar;
80
+ case 0xf04: /* RFSR */
81
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
82
+ goto bad_offset;
83
+ }
84
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
85
+ return 0;
86
case 0xf34: /* FPCCR */
87
if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
88
return 0;
89
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
90
R_V7M_AIRCR_PRIGROUP_SHIFT,
91
R_V7M_AIRCR_PRIGROUP_LENGTH);
92
}
93
+ /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
94
if (attrs.secure) {
95
/* These bits are only writable by secure */
96
cpu->env.v7m.aircr = value &
97
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
98
}
99
break;
100
}
101
+ case 0xf04: /* RFSR */
102
+ if (!cpu_isar_feature(aa32_ras, cpu)) {
103
+ goto bad_offset;
104
+ }
105
+ /* We provide minimal-RAS only: RFSR is RAZ/WI */
106
+ break;
107
case 0xf34: /* FPCCR */
108
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
109
/* Not all bits here are banked. */
110
--
77
--
111
2.20.1
78
2.20.1
112
79
113
80
diff view generated by jsdifflib
1
v8.1M introduces a new TRD flag in the CCR register, which enables
1
Optimize the MVE VDUP insns by using TCG vector ops when possible.
2
checking for stack frame integrity signatures on SG instructions.
3
This bit is not banked, and is always RAZ/WI to Non-secure code.
4
Adjust the code for handling CCR reads and writes to handle this.
5
2
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-23-peter.maydell@linaro.org
5
Message-id: 20210913095440.13462-8-peter.maydell@linaro.org
9
---
6
---
10
target/arm/cpu.h | 2 ++
7
target/arm/translate-mve.c | 12 ++++++++----
11
hw/intc/armv7m_nvic.c | 26 ++++++++++++++++++--------
8
1 file changed, 8 insertions(+), 4 deletions(-)
12
2 files changed, 20 insertions(+), 8 deletions(-)
13
9
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
10
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
12
--- a/target/arm/translate-mve.c
17
+++ b/target/arm/cpu.h
13
+++ b/target/arm/translate-mve.c
18
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
19
FIELD(V7M_CCR, DC, 16, 1)
15
return true;
20
FIELD(V7M_CCR, IC, 17, 1)
16
}
21
FIELD(V7M_CCR, BP, 18, 1)
17
22
+FIELD(V7M_CCR, LOB, 19, 1)
18
- qd = mve_qreg_ptr(a->qd);
23
+FIELD(V7M_CCR, TRD, 20, 1)
19
rt = load_reg(s, a->rt);
24
20
- tcg_gen_dup_i32(a->size, rt, rt);
25
/* V7M SCR bits */
21
- gen_helper_mve_vdup(cpu_env, qd, rt);
26
FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
22
- tcg_temp_free_ptr(qd);
27
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
23
+ if (mve_no_predication(s)) {
28
index XXXXXXX..XXXXXXX 100644
24
+ tcg_gen_gvec_dup_i32(a->size, mve_qreg_offset(a->qd), 16, 16, rt);
29
--- a/hw/intc/armv7m_nvic.c
25
+ } else {
30
+++ b/hw/intc/armv7m_nvic.c
26
+ qd = mve_qreg_ptr(a->qd);
31
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
27
+ tcg_gen_dup_i32(a->size, rt, rt);
32
}
28
+ gen_helper_mve_vdup(cpu_env, qd, rt);
33
return cpu->env.v7m.scr[attrs.secure];
29
+ tcg_temp_free_ptr(qd);
34
case 0xd14: /* Configuration Control. */
35
- /* The BFHFNMIGN bit is the only non-banked bit; we
36
- * keep it in the non-secure copy of the register.
37
+ /*
38
+ * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
39
+ * and TRD (stored in the S copy of the register)
40
*/
41
val = cpu->env.v7m.ccr[attrs.secure];
42
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
43
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
44
cpu->env.v7m.scr[attrs.secure] = value;
45
break;
46
case 0xd14: /* Configuration Control. */
47
+ {
48
+ uint32_t mask;
49
+
50
if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
51
goto bad_offset;
52
}
53
54
/* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
55
- value &= (R_V7M_CCR_STKALIGN_MASK |
56
- R_V7M_CCR_BFHFNMIGN_MASK |
57
- R_V7M_CCR_DIV_0_TRP_MASK |
58
- R_V7M_CCR_UNALIGN_TRP_MASK |
59
- R_V7M_CCR_USERSETMPEND_MASK |
60
- R_V7M_CCR_NONBASETHRDENA_MASK);
61
+ mask = R_V7M_CCR_STKALIGN_MASK |
62
+ R_V7M_CCR_BFHFNMIGN_MASK |
63
+ R_V7M_CCR_DIV_0_TRP_MASK |
64
+ R_V7M_CCR_UNALIGN_TRP_MASK |
65
+ R_V7M_CCR_USERSETMPEND_MASK |
66
+ R_V7M_CCR_NONBASETHRDENA_MASK;
67
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
68
+ /* TRD is always RAZ/WI from NS */
69
+ mask |= R_V7M_CCR_TRD_MASK;
70
+ }
71
+ value &= mask;
72
73
if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
74
/* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
75
@@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
76
77
cpu->env.v7m.ccr[attrs.secure] = value;
78
break;
79
+ }
30
+ }
80
case 0xd24: /* System Handler Control and State (SHCSR) */
31
tcg_temp_free_i32(rt);
81
if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
32
mve_update_eci(s);
82
goto bad_offset;
33
return true;
83
--
34
--
84
2.20.1
35
2.20.1
85
36
86
37
diff view generated by jsdifflib
1
Factor out the code which handles M-profile lazy FP state preservation
1
Optimize the MVE VMVN insn by using TCG vector ops when possible.
2
from full_vfp_access_check(); accesses to the FPCXT_NS register are
3
a special case which need to do just this part (corresponding in the
4
pseudocode to the PreserveFPState() function), and not the full
5
set of actions matching the pseudocode ExecuteFPCheck() which
6
normal FP instructions need to do.
7
2
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Message-id: 20210913095440.13462-9-peter.maydell@linaro.org
11
Message-id: 20201119215617.29887-13-peter.maydell@linaro.org
12
---
6
---
13
target/arm/translate-vfp.c.inc | 45 ++++++++++++++++++++--------------
7
target/arm/translate-mve.c | 2 +-
14
1 file changed, 27 insertions(+), 18 deletions(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
15
9
16
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
10
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.c.inc
12
--- a/target/arm/translate-mve.c
19
+++ b/target/arm/translate-vfp.c.inc
13
+++ b/target/arm/translate-mve.c
20
@@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top)
14
@@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_1op *a)
21
return offs;
15
16
static bool trans_VMVN(DisasContext *s, arg_1op *a)
17
{
18
- return do_1op(s, a, gen_helper_mve_vmvn);
19
+ return do_1op_vec(s, a, gen_helper_mve_vmvn, tcg_gen_gvec_not);
22
}
20
}
23
21
24
+/*
22
static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
25
+ * Generate code for M-profile lazy FP state preservation if needed;
26
+ * this corresponds to the pseudocode PreserveFPState() function.
27
+ */
28
+static void gen_preserve_fp_state(DisasContext *s)
29
+{
30
+ if (s->v7m_lspact) {
31
+ /*
32
+ * Lazy state saving affects external memory and also the NVIC,
33
+ * so we must mark it as an IO operation for icount (and cause
34
+ * this to be the last insn in the TB).
35
+ */
36
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
37
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
38
+ gen_io_start();
39
+ }
40
+ gen_helper_v7m_preserve_fp_state(cpu_env);
41
+ /*
42
+ * If the preserve_fp_state helper doesn't throw an exception
43
+ * then it will clear LSPACT; we don't need to repeat this for
44
+ * any further FP insns in this TB.
45
+ */
46
+ s->v7m_lspact = false;
47
+ }
48
+}
49
+
50
/*
51
* Check that VFP access is enabled. If it is, do the necessary
52
* M-profile lazy-FP handling and then return true.
53
@@ -XXX,XX +XXX,XX @@ static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
54
/* Handle M-profile lazy FP state mechanics */
55
56
/* Trigger lazy-state preservation if necessary */
57
- if (s->v7m_lspact) {
58
- /*
59
- * Lazy state saving affects external memory and also the NVIC,
60
- * so we must mark it as an IO operation for icount (and cause
61
- * this to be the last insn in the TB).
62
- */
63
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
64
- s->base.is_jmp = DISAS_UPDATE_EXIT;
65
- gen_io_start();
66
- }
67
- gen_helper_v7m_preserve_fp_state(cpu_env);
68
- /*
69
- * If the preserve_fp_state helper doesn't throw an exception
70
- * then it will clear LSPACT; we don't need to repeat this for
71
- * any further FP insns in this TB.
72
- */
73
- s->v7m_lspact = false;
74
- }
75
+ gen_preserve_fp_state(s);
76
77
/* Update ownership of FP context: set FPCCR.S to match current state */
78
if (s->v8m_fpccr_s_wrong) {
79
--
23
--
80
2.20.1
24
2.20.1
81
25
82
26
diff view generated by jsdifflib
1
For M-profile CPUs, the range from 0xe0000000 to 0xe00fffff is the
1
Optimize the MVE VSHL and VSHR immediate forms by using TCG vector
2
Private Peripheral Bus range, which includes all of the memory mapped
2
ops when possible.
3
devices and registers that are part of the CPU itself, including the
4
NVIC, systick timer, and debug and trace components like the Data
5
Watchpoint and Trace unit (DWT). Within this large region, the range
6
0xe000e000 to 0xe000efff is the System Control Space (NVIC, system
7
registers, systick) and 0xe002e000 to 0exe002efff is its Non-secure
8
alias.
9
10
The architecture is clear that within the SCS unimplemented registers
11
should be RES0 for privileged accesses and generate BusFault for
12
unprivileged accesses, and we currently implement this.
13
14
It is less clear about how to handle accesses to unimplemented
15
regions of the wider PPB. Unprivileged accesses should definitely
16
cause BusFaults (R_DQQS), but the behaviour of privileged accesses is
17
not given as a general rule. However, the register definitions of
18
individual registers for components like the DWT all state that they
19
are RES0 if the relevant component is not implemented, so the
20
simplest way to provide that is to provide RAZ/WI for the whole range
21
for privileged accesses. (The v7M Arm ARM does say that reserved
22
registers should be UNK/SBZP.)
23
24
Expand the container MemoryRegion that the NVIC exposes so that
25
it covers the whole PPB space. This means:
26
* moving the address that the ARMV7M device maps it to down by
27
0xe000 bytes
28
* moving the off and the offsets within the container of all the
29
subregions forward by 0xe000 bytes
30
* adding a new default MemoryRegion that covers the whole container
31
at a lower priority than anything else and which provides the
32
RAZWI/BusFault behaviour
33
3
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
36
Message-id: 20201119215617.29887-2-peter.maydell@linaro.org
6
Message-id: 20210913095440.13462-10-peter.maydell@linaro.org
37
---
7
---
38
include/hw/intc/armv7m_nvic.h | 1 +
8
target/arm/translate-mve.c | 83 +++++++++++++++++++++++++++++---------
39
hw/arm/armv7m.c | 2 +-
9
1 file changed, 63 insertions(+), 20 deletions(-)
40
hw/intc/armv7m_nvic.c | 78 ++++++++++++++++++++++++++++++-----
41
3 files changed, 69 insertions(+), 12 deletions(-)
42
10
43
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
44
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
45
--- a/include/hw/intc/armv7m_nvic.h
13
--- a/target/arm/translate-mve.c
46
+++ b/include/hw/intc/armv7m_nvic.h
14
+++ b/target/arm/translate-mve.c
47
@@ -XXX,XX +XXX,XX @@ struct NVICState {
15
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
48
MemoryRegion systickmem;
16
return do_1imm(s, a, fn);
49
MemoryRegion systick_ns_mem;
17
}
50
MemoryRegion container;
18
51
+ MemoryRegion defaultmem;
19
-static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
52
20
- bool negateshift)
53
uint32_t num_irq;
21
+static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
54
qemu_irq excpout;
22
+ bool negateshift, GVecGen2iFn vecfn)
55
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
23
{
56
index XXXXXXX..XXXXXXX 100644
24
TCGv_ptr qd, qm;
57
--- a/hw/arm/armv7m.c
25
int shift = a->shift;
58
+++ b/hw/arm/armv7m.c
26
@@ -XXX,XX +XXX,XX @@ static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
59
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
27
shift = -shift;
60
sysbus_connect_irq(sbd, 0,
28
}
61
qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
29
62
30
- qd = mve_qreg_ptr(a->qd);
63
- memory_region_add_subregion(&s->container, 0xe000e000,
31
- qm = mve_qreg_ptr(a->qm);
64
+ memory_region_add_subregion(&s->container, 0xe0000000,
32
- fn(cpu_env, qd, qm, tcg_constant_i32(shift));
65
sysbus_mmio_get_region(sbd, 0));
33
- tcg_temp_free_ptr(qd);
66
34
- tcg_temp_free_ptr(qm);
67
for (i = 0; i < ARRAY_SIZE(s->bitband); i++) {
35
+ if (vecfn && mve_no_predication(s)) {
68
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
36
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm),
69
index XXXXXXX..XXXXXXX 100644
37
+ shift, 16, 16);
70
--- a/hw/intc/armv7m_nvic.c
38
+ } else {
71
+++ b/hw/intc/armv7m_nvic.c
39
+ qd = mve_qreg_ptr(a->qd);
72
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps nvic_systick_ops = {
40
+ qm = mve_qreg_ptr(a->qm);
73
.endianness = DEVICE_NATIVE_ENDIAN,
41
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
74
};
42
+ tcg_temp_free_ptr(qd);
75
43
+ tcg_temp_free_ptr(qm);
76
+/*
44
+ }
77
+ * Unassigned portions of the PPB space are RAZ/WI for privileged
45
mve_update_eci(s);
78
+ * accesses, and fault for non-privileged accesses.
46
return true;
79
+ */
47
}
80
+static MemTxResult ppb_default_read(void *opaque, hwaddr addr,
48
81
+ uint64_t *data, unsigned size,
49
-#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
82
+ MemTxAttrs attrs)
50
- static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
51
- { \
52
- static MVEGenTwoOpShiftFn * const fns[] = { \
53
- gen_helper_mve_##FN##b, \
54
- gen_helper_mve_##FN##h, \
55
- gen_helper_mve_##FN##w, \
56
- NULL, \
57
- }; \
58
- return do_2shift(s, a, fns[a->size], NEGATESHIFT); \
59
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
60
+ bool negateshift)
83
+{
61
+{
84
+ qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n",
62
+ return do_2shift_vec(s, a, fn, negateshift, NULL);
85
+ (uint32_t)addr);
86
+ if (attrs.user) {
87
+ return MEMTX_ERROR;
88
+ }
89
+ *data = 0;
90
+ return MEMTX_OK;
91
+}
63
+}
92
+
64
+
93
+static MemTxResult ppb_default_write(void *opaque, hwaddr addr,
65
+#define DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, VECFN) \
94
+ uint64_t value, unsigned size,
66
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
95
+ MemTxAttrs attrs)
67
+ { \
68
+ static MVEGenTwoOpShiftFn * const fns[] = { \
69
+ gen_helper_mve_##FN##b, \
70
+ gen_helper_mve_##FN##h, \
71
+ gen_helper_mve_##FN##w, \
72
+ NULL, \
73
+ }; \
74
+ return do_2shift_vec(s, a, fns[a->size], NEGATESHIFT, VECFN); \
75
}
76
77
-DO_2SHIFT(VSHLI, vshli_u, false)
78
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
79
+ DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, NULL)
80
+
81
+static void do_gvec_shri_s(unsigned vece, uint32_t dofs, uint32_t aofs,
82
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
96
+{
83
+{
97
+ qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n",
84
+ /*
98
+ (uint32_t)addr);
85
+ * We get here with a negated shift count, and we must handle
99
+ if (attrs.user) {
86
+ * shifts by the element size, which tcg_gen_gvec_sari() does not do.
100
+ return MEMTX_ERROR;
87
+ */
88
+ shift = -shift;
89
+ if (shift == (8 << vece)) {
90
+ shift--;
101
+ }
91
+ }
102
+ return MEMTX_OK;
92
+ tcg_gen_gvec_sari(vece, dofs, aofs, shift, oprsz, maxsz);
103
+}
93
+}
104
+
94
+
105
+static const MemoryRegionOps ppb_default_ops = {
95
+static void do_gvec_shri_u(unsigned vece, uint32_t dofs, uint32_t aofs,
106
+ .read_with_attrs = ppb_default_read,
96
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
107
+ .write_with_attrs = ppb_default_write,
97
+{
108
+ .endianness = DEVICE_NATIVE_ENDIAN,
98
+ /*
109
+ .valid.min_access_size = 1,
99
+ * We get here with a negated shift count, and we must handle
110
+ .valid.max_access_size = 8,
100
+ * shifts by the element size, which tcg_gen_gvec_shri() does not do.
111
+};
101
+ */
102
+ shift = -shift;
103
+ if (shift == (8 << vece)) {
104
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, 0);
105
+ } else {
106
+ tcg_gen_gvec_shri(vece, dofs, aofs, shift, oprsz, maxsz);
107
+ }
108
+}
112
+
109
+
113
static int nvic_post_load(void *opaque, int version_id)
110
+DO_2SHIFT_VEC(VSHLI, vshli_u, false, tcg_gen_gvec_shli)
114
{
111
DO_2SHIFT(VQSHLI_S, vqshli_s, false)
115
NVICState *s = opaque;
112
DO_2SHIFT(VQSHLI_U, vqshli_u, false)
116
@@ -XXX,XX +XXX,XX @@ static void nvic_systick_trigger(void *opaque, int n, int level)
113
DO_2SHIFT(VQSHLUI, vqshlui_s, false)
117
static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
114
/* These right shifts use a left-shift helper with negated shift count */
118
{
115
-DO_2SHIFT(VSHRI_S, vshli_s, true)
119
NVICState *s = NVIC(dev);
116
-DO_2SHIFT(VSHRI_U, vshli_u, true)
120
- int regionlen;
117
+DO_2SHIFT_VEC(VSHRI_S, vshli_s, true, do_gvec_shri_s)
121
118
+DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u)
122
/* The armv7m container object will have set our CPU pointer */
119
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
123
if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
120
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
124
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
125
M_REG_S));
126
}
127
128
- /* The NVIC and System Control Space (SCS) starts at 0xe000e000
129
+ /*
130
+ * This device provides a single sysbus memory region which
131
+ * represents the whole of the "System PPB" space. This is the
132
+ * range from 0xe0000000 to 0xe00fffff and includes the NVIC,
133
+ * the System Control Space (system registers), the systick timer,
134
+ * and for CPUs with the Security extension an NS banked version
135
+ * of all of these.
136
+ *
137
+ * The default behaviour for unimplemented registers/ranges
138
+ * (for instance the Data Watchpoint and Trace unit at 0xe0001000)
139
+ * is to RAZ/WI for privileged access and BusFault for non-privileged
140
+ * access.
141
+ *
142
+ * The NVIC and System Control Space (SCS) starts at 0xe000e000
143
* and looks like this:
144
* 0x004 - ICTR
145
* 0x010 - 0xff - systick
146
@@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
147
* generally code determining which banked register to use should
148
* use attrs.secure; code determining actual behaviour of the system
149
* should use env->v7m.secure.
150
+ *
151
+ * The container covers the whole PPB space. Within it the priority
152
+ * of overlapping regions is:
153
+ * - default region (for RAZ/WI and BusFault) : -1
154
+ * - system register regions : 0
155
+ * - systick : 1
156
+ * This is because the systick device is a small block of registers
157
+ * in the middle of the other system control registers.
158
*/
159
- regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
160
- memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
161
- /* The system register region goes at the bottom of the priority
162
- * stack as it covers the whole page.
163
- */
164
+ memory_region_init(&s->container, OBJECT(s), "nvic", 0x100000);
165
+ memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s,
166
+ "nvic-default", 0x100000);
167
+ memory_region_add_subregion_overlap(&s->container, 0, &s->defaultmem, -1);
168
memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
169
"nvic_sysregs", 0x1000);
170
- memory_region_add_subregion(&s->container, 0, &s->sysregmem);
171
+ memory_region_add_subregion(&s->container, 0xe000, &s->sysregmem);
172
173
memory_region_init_io(&s->systickmem, OBJECT(s),
174
&nvic_systick_ops, s,
175
"nvic_systick", 0xe0);
176
177
- memory_region_add_subregion_overlap(&s->container, 0x10,
178
+ memory_region_add_subregion_overlap(&s->container, 0xe010,
179
&s->systickmem, 1);
180
181
if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
182
memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
183
&nvic_sysreg_ns_ops, &s->sysregmem,
184
"nvic_sysregs_ns", 0x1000);
185
- memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
186
+ memory_region_add_subregion(&s->container, 0x2e000, &s->sysreg_ns_mem);
187
memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
188
&nvic_sysreg_ns_ops, &s->systickmem,
189
"nvic_systick_ns", 0xe0);
190
- memory_region_add_subregion_overlap(&s->container, 0x20010,
191
+ memory_region_add_subregion_overlap(&s->container, 0x2e010,
192
&s->systick_ns_mem, 1);
193
}
194
121
195
--
122
--
196
2.20.1
123
2.20.1
197
124
198
125
diff view generated by jsdifflib
Deleted patch
1
For M-profile before v8.1M, the only valid register for VMSR/VMRS is
2
the FPSCR. We have a comment that states this, but the actual logic
3
to forbid accesses for any other register value is missing, so we
4
would end up with A-profile style behaviour. Add the missing check.
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-7-peter.maydell@linaro.org
9
---
10
target/arm/translate-vfp.c.inc | 5 ++++-
11
1 file changed, 4 insertions(+), 1 deletion(-)
12
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
16
+++ b/target/arm/translate-vfp.c.inc
17
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
18
* Accesses to R15 are UNPREDICTABLE; we choose to undef.
19
* (FPSCR -> r15 is a special case which writes to the PSR flags.)
20
*/
21
- if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) {
22
+ if (a->reg != ARM_VFP_FPSCR) {
23
+ return false;
24
+ }
25
+ if (a->rt == 15 && !a->l) {
26
return false;
27
}
28
}
29
--
30
2.20.1
31
32
diff view generated by jsdifflib
1
Currently M-profile borrows the A-profile code for VMSR and VMRS
1
Optimize the MVE VSHLL insns by using TCG vector ops when possible.
2
(access to the FP system registers), because all it needs to support
2
This includes the VMOVL insn, which we handle in mve.decode as "VSHLL
3
is the FPSCR. In v8.1M things become significantly more complicated
3
with zero shift count".
4
in two ways:
5
6
* there are several new FP system registers; some have side effects
7
on read, and one (FPCXT_NS) needs to avoid the usual
8
vfp_access_check() and the "only if FPU implemented" check
9
10
* all sysregs are now accessible both by VMRS/VMSR (which
11
reads/writes a general purpose register) and also by VLDR/VSTR
12
(which reads/writes them directly to memory)
13
14
Refactor the structure of how we handle VMSR/VMRS to cope with this:
15
16
* keep the M-profile code entirely separate from the A-profile code
17
18
* abstract out the "read or write the general purpose register" part
19
of the code into a loadfn or storefn function pointer, so we can
20
reuse it for VLDR/VSTR.
21
4
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Message-id: 20201119215617.29887-8-peter.maydell@linaro.org
7
Message-id: 20210913095440.13462-11-peter.maydell@linaro.org
25
---
8
---
26
target/arm/cpu.h | 3 +
9
target/arm/translate-mve.c | 67 +++++++++++++++++++++++++++++++++-----
27
target/arm/translate-vfp.c.inc | 182 ++++++++++++++++++++++++++++++---
10
1 file changed, 59 insertions(+), 8 deletions(-)
28
2 files changed, 171 insertions(+), 14 deletions(-)
29
11
30
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
12
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
31
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/cpu.h
14
--- a/target/arm/translate-mve.c
33
+++ b/target/arm/cpu.h
15
+++ b/target/arm/translate-mve.c
34
@@ -XXX,XX +XXX,XX @@ enum arm_cpu_mode {
16
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u)
35
#define ARM_VFP_FPINST 9
17
DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s)
36
#define ARM_VFP_FPINST2 10
18
DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u)
37
19
38
+/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
20
-#define DO_VSHLL(INSN, FN) \
39
+#define QEMU_VFP_FPSCR_NZCV 0xffff
21
- static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
40
+
22
- { \
41
/* iwMMXt coprocessor control registers. */
23
- static MVEGenTwoOpShiftFn * const fns[] = { \
42
#define ARM_IWMMXT_wCID 0
24
- gen_helper_mve_##FN##b, \
43
#define ARM_IWMMXT_wCon 1
25
- gen_helper_mve_##FN##h, \
44
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
26
- }; \
45
index XXXXXXX..XXXXXXX 100644
27
- return do_2shift(s, a, fns[a->size], false); \
46
--- a/target/arm/translate-vfp.c.inc
28
+#define DO_VSHLL(INSN, FN) \
47
+++ b/target/arm/translate-vfp.c.inc
29
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
48
@@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
30
+ { \
49
return true;
31
+ static MVEGenTwoOpShiftFn * const fns[] = { \
50
}
32
+ gen_helper_mve_##FN##b, \
33
+ gen_helper_mve_##FN##h, \
34
+ }; \
35
+ return do_2shift_vec(s, a, fns[a->size], false, do_gvec_##FN); \
36
}
51
37
52
+/*
38
+/*
53
+ * M-profile provides two different sets of instructions that can
39
+ * For the VSHLL vector helpers, the vece is the size of the input
54
+ * access floating point system registers: VMSR/VMRS (which move
40
+ * (ie MO_8 or MO_16); the helpers want to work in the output size.
55
+ * to/from a general purpose register) and VLDR/VSTR sysreg (which
41
+ * The shift count can be 0..<input size>, inclusive. (0 is VMOVL.)
56
+ * move directly to/from memory). In some cases there are also side
57
+ * effects which must happen after any write to memory (which could
58
+ * cause an exception). So we implement the common logic for the
59
+ * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
60
+ * which take pointers to callback functions which will perform the
61
+ * actual "read/write general purpose register" and "read/write
62
+ * memory" operations.
63
+ */
42
+ */
64
+
43
+static void do_gvec_vshllbs(unsigned vece, uint32_t dofs, uint32_t aofs,
65
+/*
44
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
66
+ * Emit code to store the sysreg to its final destination; frees the
67
+ * TCG temp 'value' it is passed.
68
+ */
69
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
70
+/*
71
+ * Emit code to load the value to be copied to the sysreg; returns
72
+ * a new TCG temporary
73
+ */
74
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
75
+
76
+/* Common decode/access checks for fp sysreg read/write */
77
+typedef enum FPSysRegCheckResult {
78
+ FPSysRegCheckFailed, /* caller should return false */
79
+ FPSysRegCheckDone, /* caller should return true */
80
+ FPSysRegCheckContinue, /* caller should continue generating code */
81
+} FPSysRegCheckResult;
82
+
83
+static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
84
+{
45
+{
85
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
46
+ unsigned ovece = vece + 1;
86
+ return FPSysRegCheckFailed;
47
+ unsigned ibits = vece == MO_8 ? 8 : 16;
87
+ }
48
+ tcg_gen_gvec_shli(ovece, dofs, aofs, ibits, oprsz, maxsz);
88
+
49
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
89
+ switch (regno) {
90
+ case ARM_VFP_FPSCR:
91
+ case QEMU_VFP_FPSCR_NZCV:
92
+ break;
93
+ default:
94
+ return FPSysRegCheckFailed;
95
+ }
96
+
97
+ if (!vfp_access_check(s)) {
98
+ return FPSysRegCheckDone;
99
+ }
100
+
101
+ return FPSysRegCheckContinue;
102
+}
50
+}
103
+
51
+
104
+static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
52
+static void do_gvec_vshllbu(unsigned vece, uint32_t dofs, uint32_t aofs,
105
+
53
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
106
+ fp_sysreg_loadfn *loadfn,
107
+ void *opaque)
108
+{
54
+{
109
+ /* Do a write to an M-profile floating point system register */
55
+ unsigned ovece = vece + 1;
110
+ TCGv_i32 tmp;
56
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
111
+
57
+ ovece == MO_16 ? 0xff : 0xffff, oprsz, maxsz);
112
+ switch (fp_sysreg_checks(s, regno)) {
58
+ tcg_gen_gvec_shli(ovece, dofs, dofs, shift, oprsz, maxsz);
113
+ case FPSysRegCheckFailed:
114
+ return false;
115
+ case FPSysRegCheckDone:
116
+ return true;
117
+ case FPSysRegCheckContinue:
118
+ break;
119
+ }
120
+
121
+ switch (regno) {
122
+ case ARM_VFP_FPSCR:
123
+ tmp = loadfn(s, opaque);
124
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
125
+ tcg_temp_free_i32(tmp);
126
+ gen_lookup_tb(s);
127
+ break;
128
+ default:
129
+ g_assert_not_reached();
130
+ }
131
+ return true;
132
+}
59
+}
133
+
60
+
134
+static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
61
+static void do_gvec_vshllts(unsigned vece, uint32_t dofs, uint32_t aofs,
135
+ fp_sysreg_storefn *storefn,
62
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
136
+ void *opaque)
137
+{
63
+{
138
+ /* Do a read from an M-profile floating point system register */
64
+ unsigned ovece = vece + 1;
139
+ TCGv_i32 tmp;
65
+ unsigned ibits = vece == MO_8 ? 8 : 16;
140
+
66
+ if (shift == 0) {
141
+ switch (fp_sysreg_checks(s, regno)) {
67
+ tcg_gen_gvec_sari(ovece, dofs, aofs, ibits, oprsz, maxsz);
142
+ case FPSysRegCheckFailed:
143
+ return false;
144
+ case FPSysRegCheckDone:
145
+ return true;
146
+ case FPSysRegCheckContinue:
147
+ break;
148
+ }
149
+
150
+ switch (regno) {
151
+ case ARM_VFP_FPSCR:
152
+ tmp = tcg_temp_new_i32();
153
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
154
+ storefn(s, opaque, tmp);
155
+ break;
156
+ case QEMU_VFP_FPSCR_NZCV:
157
+ /*
158
+ * Read just NZCV; this is a special case to avoid the
159
+ * helper call for the "VMRS to CPSR.NZCV" insn.
160
+ */
161
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
162
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
163
+ storefn(s, opaque, tmp);
164
+ break;
165
+ default:
166
+ g_assert_not_reached();
167
+ }
168
+ return true;
169
+}
170
+
171
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
172
+{
173
+ arg_VMSR_VMRS *a = opaque;
174
+
175
+ if (a->rt == 15) {
176
+ /* Set the 4 flag bits in the CPSR */
177
+ gen_set_nzcv(value);
178
+ tcg_temp_free_i32(value);
179
+ } else {
68
+ } else {
180
+ store_reg(s, a->rt, value);
69
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
70
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
71
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
181
+ }
72
+ }
182
+}
73
+}
183
+
74
+
184
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
75
+static void do_gvec_vshlltu(unsigned vece, uint32_t dofs, uint32_t aofs,
76
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
185
+{
77
+{
186
+ arg_VMSR_VMRS *a = opaque;
78
+ unsigned ovece = vece + 1;
187
+
79
+ unsigned ibits = vece == MO_8 ? 8 : 16;
188
+ return load_reg(s, a->rt);
80
+ if (shift == 0) {
189
+}
81
+ tcg_gen_gvec_shri(ovece, dofs, aofs, ibits, oprsz, maxsz);
190
+
191
+static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
192
+{
193
+ /*
194
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
195
+ * FPSCR -> r15 is a special case which writes to the PSR flags;
196
+ * set a->reg to a special value to tell gen_M_fp_sysreg_read()
197
+ * we only care about the top 4 bits of FPSCR there.
198
+ */
199
+ if (a->rt == 15) {
200
+ if (a->l && a->reg == ARM_VFP_FPSCR) {
201
+ a->reg = QEMU_VFP_FPSCR_NZCV;
202
+ } else {
203
+ return false;
204
+ }
205
+ }
206
+
207
+ if (a->l) {
208
+ /* VMRS, move FP system register to gp register */
209
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
210
+ } else {
82
+ } else {
211
+ /* VMSR, move gp register to FP system register */
83
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
212
+ return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
84
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
85
+ tcg_gen_gvec_shri(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
213
+ }
86
+ }
214
+}
87
+}
215
+
88
+
216
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
89
DO_VSHLL(VSHLL_BS, vshllbs)
217
{
90
DO_VSHLL(VSHLL_BU, vshllbu)
218
TCGv_i32 tmp;
91
DO_VSHLL(VSHLL_TS, vshllts)
219
bool ignore_vfp_enabled = false;
220
221
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
222
- return false;
223
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
224
+ return gen_M_VMSR_VMRS(s, a);
225
}
226
227
- if (arm_dc_feature(s, ARM_FEATURE_M)) {
228
- /*
229
- * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
230
- * Accesses to R15 are UNPREDICTABLE; we choose to undef.
231
- * (FPSCR -> r15 is a special case which writes to the PSR flags.)
232
- */
233
- if (a->reg != ARM_VFP_FPSCR) {
234
- return false;
235
- }
236
- if (a->rt == 15 && !a->l) {
237
- return false;
238
- }
239
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
240
+ return false;
241
}
242
243
switch (a->reg) {
244
--
92
--
245
2.20.1
93
2.20.1
246
94
247
95
diff view generated by jsdifflib
Deleted patch
1
v8.1M defines a new FP system register FPSCR_nzcvqc; this behaves
2
like the existing FPSCR, except that it reads and writes only bits
3
[31:27] of the FPSCR (the N, Z, C, V and QC flag bits). (Unlike the
4
FPSCR, the special case for Rt=15 of writing the CPSR.NZCV is not
5
permitted.)
6
1
7
Implement the register. Since we don't yet implement MVE, we handle
8
the QC bit as RES0, with todo comments for where we will need to add
9
support later.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20201119215617.29887-11-peter.maydell@linaro.org
14
---
15
target/arm/cpu.h | 13 +++++++++++++
16
target/arm/translate-vfp.c.inc | 27 +++++++++++++++++++++++++++
17
2 files changed, 40 insertions(+)
18
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
23
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
24
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
25
#define FPCR_DN (1 << 25) /* Default NaN enable bit */
26
#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
27
+#define FPCR_V (1 << 28) /* FP overflow flag */
28
+#define FPCR_C (1 << 29) /* FP carry flag */
29
+#define FPCR_Z (1 << 30) /* FP zero flag */
30
+#define FPCR_N (1 << 31) /* FP negative flag */
31
+
32
+#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
33
+#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
34
35
static inline uint32_t vfp_get_fpsr(CPUARMState *env)
36
{
37
@@ -XXX,XX +XXX,XX @@ enum arm_cpu_mode {
38
#define ARM_VFP_FPEXC 8
39
#define ARM_VFP_FPINST 9
40
#define ARM_VFP_FPINST2 10
41
+/* These ones are M-profile only */
42
+#define ARM_VFP_FPSCR_NZCVQC 2
43
+#define ARM_VFP_VPR 12
44
+#define ARM_VFP_P0 13
45
+#define ARM_VFP_FPCXT_NS 14
46
+#define ARM_VFP_FPCXT_S 15
47
48
/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
49
#define QEMU_VFP_FPSCR_NZCV 0xffff
50
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/arm/translate-vfp.c.inc
53
+++ b/target/arm/translate-vfp.c.inc
54
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
55
case ARM_VFP_FPSCR:
56
case QEMU_VFP_FPSCR_NZCV:
57
break;
58
+ case ARM_VFP_FPSCR_NZCVQC:
59
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
60
+ return false;
61
+ }
62
+ break;
63
default:
64
return FPSysRegCheckFailed;
65
}
66
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
67
tcg_temp_free_i32(tmp);
68
gen_lookup_tb(s);
69
break;
70
+ case ARM_VFP_FPSCR_NZCVQC:
71
+ {
72
+ TCGv_i32 fpscr;
73
+ tmp = loadfn(s, opaque);
74
+ /*
75
+ * TODO: when we implement MVE, write the QC bit.
76
+ * For non-MVE, QC is RES0.
77
+ */
78
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
79
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
80
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
81
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
82
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
83
+ tcg_temp_free_i32(tmp);
84
+ break;
85
+ }
86
default:
87
g_assert_not_reached();
88
}
89
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
90
gen_helper_vfp_get_fpscr(tmp, cpu_env);
91
storefn(s, opaque, tmp);
92
break;
93
+ case ARM_VFP_FPSCR_NZCVQC:
94
+ /*
95
+ * TODO: MVE has a QC bit, which we probably won't store
96
+ * in the xregs[] field. For non-MVE, where QC is RES0,
97
+ * we can just fall through to the FPSCR_NZCV case.
98
+ */
99
case QEMU_VFP_FPSCR_NZCV:
100
/*
101
* Read just NZCV; this is a special case to avoid the
102
--
103
2.20.1
104
105
diff view generated by jsdifflib
1
We defined a constant name for the mask of NZCV bits in the FPCR/FPSCR
1
Optimize the MVE shift-and-insert insns by using TCG
2
in the previous commit; use it in a couple of places in existing code,
2
vector ops when possible.
3
where we're masking out everything except NZCV for the "load to Rt=15
4
sets CPSR.NZCV" special case.
5
3
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-12-peter.maydell@linaro.org
6
Message-id: 20210913095440.13462-12-peter.maydell@linaro.org
9
---
7
---
10
target/arm/translate-vfp.c.inc | 4 ++--
8
target/arm/translate-mve.c | 4 ++--
11
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 2 deletions(-)
12
10
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
13
--- a/target/arm/translate-mve.c
16
+++ b/target/arm/translate-vfp.c.inc
14
+++ b/target/arm/translate-mve.c
17
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
15
@@ -XXX,XX +XXX,XX @@ DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u)
18
* helper call for the "VMRS to CPSR.NZCV" insn.
16
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
19
*/
17
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
20
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
18
21
- tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
19
-DO_2SHIFT(VSRI, vsri, false)
22
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
20
-DO_2SHIFT(VSLI, vsli, false)
23
storefn(s, opaque, tmp);
21
+DO_2SHIFT_VEC(VSRI, vsri, false, gen_gvec_sri)
24
break;
22
+DO_2SHIFT_VEC(VSLI, vsli, false, gen_gvec_sli)
25
default:
23
26
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
24
#define DO_2SHIFT_FP(INSN, FN) \
27
case ARM_VFP_FPSCR:
25
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
28
if (a->rt == 15) {
29
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
30
- tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
31
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
32
} else {
33
tmp = tcg_temp_new_i32();
34
gen_helper_vfp_get_fpscr(tmp, cpu_env);
35
--
26
--
36
2.20.1
27
2.20.1
37
28
38
29
diff view generated by jsdifflib
Deleted patch
1
Implement the new-in-v8.1M FPCXT_S floating point system register.
2
This is for saving and restoring the secure floating point context,
3
and it reads and writes bits [27:0] from the FPSCR and the
4
CONTROL.SFPA bit in bit [31].
5
1
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20201119215617.29887-14-peter.maydell@linaro.org
9
---
10
target/arm/translate-vfp.c.inc | 58 ++++++++++++++++++++++++++++++++++
11
1 file changed, 58 insertions(+)
12
13
diff --git a/target/arm/translate-vfp.c.inc b/target/arm/translate-vfp.c.inc
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/translate-vfp.c.inc
16
+++ b/target/arm/translate-vfp.c.inc
17
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
18
return false;
19
}
20
break;
21
+ case ARM_VFP_FPCXT_S:
22
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
23
+ return false;
24
+ }
25
+ if (!s->v8m_secure) {
26
+ return false;
27
+ }
28
+ break;
29
default:
30
return FPSysRegCheckFailed;
31
}
32
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
33
tcg_temp_free_i32(tmp);
34
break;
35
}
36
+ case ARM_VFP_FPCXT_S:
37
+ {
38
+ TCGv_i32 sfpa, control, fpscr;
39
+ /* Set FPSCR[27:0] and CONTROL.SFPA from value */
40
+ tmp = loadfn(s, opaque);
41
+ sfpa = tcg_temp_new_i32();
42
+ tcg_gen_shri_i32(sfpa, tmp, 31);
43
+ control = load_cpu_field(v7m.control[M_REG_S]);
44
+ tcg_gen_deposit_i32(control, control, sfpa,
45
+ R_V7M_CONTROL_SFPA_SHIFT, 1);
46
+ store_cpu_field(control, v7m.control[M_REG_S]);
47
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
48
+ tcg_gen_andi_i32(fpscr, fpscr, FPCR_NZCV_MASK);
49
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
50
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
51
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
52
+ tcg_temp_free_i32(tmp);
53
+ tcg_temp_free_i32(sfpa);
54
+ break;
55
+ }
56
default:
57
g_assert_not_reached();
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
60
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
61
storefn(s, opaque, tmp);
62
break;
63
+ case ARM_VFP_FPCXT_S:
64
+ {
65
+ TCGv_i32 control, sfpa, fpscr;
66
+ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
67
+ tmp = tcg_temp_new_i32();
68
+ sfpa = tcg_temp_new_i32();
69
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
70
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
71
+ control = load_cpu_field(v7m.control[M_REG_S]);
72
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
73
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
74
+ tcg_gen_or_i32(tmp, tmp, sfpa);
75
+ tcg_temp_free_i32(sfpa);
76
+ /*
77
+ * Store result before updating FPSCR etc, in case
78
+ * it is a memory write which causes an exception.
79
+ */
80
+ storefn(s, opaque, tmp);
81
+ /*
82
+ * Now we must reset FPSCR from FPDSCR_NS, and clear
83
+ * CONTROL.SFPA; so we'll end the TB here.
84
+ */
85
+ tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
86
+ store_cpu_field(control, v7m.control[M_REG_S]);
87
+ fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
88
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
89
+ tcg_temp_free_i32(fpscr);
90
+ gen_lookup_tb(s);
91
+ break;
92
+ }
93
default:
94
g_assert_not_reached();
95
}
96
--
97
2.20.1
98
99
diff view generated by jsdifflib
Deleted patch
1
In v8.0M, on exception entry the registers R0-R3, R12, APSR and EPSR
2
are zeroed for an exception taken to Non-secure state; for an
3
exception taken to Secure state they become UNKNOWN, and we chose to
4
leave them at their previous values.
5
1
6
In v8.1M the behaviour is specified more tightly and these registers
7
are always zeroed regardless of the security state that the exception
8
targets (see rule R_KPZV). Implement this.
9
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20201119215617.29887-17-peter.maydell@linaro.org
13
---
14
target/arm/m_helper.c | 16 ++++++++++++----
15
1 file changed, 12 insertions(+), 4 deletions(-)
16
17
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/m_helper.c
20
+++ b/target/arm/m_helper.c
21
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
22
* Clear registers if necessary to prevent non-secure exception
23
* code being able to see register values from secure code.
24
* Where register values become architecturally UNKNOWN we leave
25
- * them with their previous values.
26
+ * them with their previous values. v8.1M is tighter than v8.0M
27
+ * here and always zeroes the caller-saved registers regardless
28
+ * of the security state the exception is targeting.
29
*/
30
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
31
- if (!targets_secure) {
32
+ if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
33
/*
34
* Always clear the caller-saved registers (they have been
35
* pushed to the stack earlier in v7m_push_stack()).
36
@@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
37
* v7m_push_callee_stack()).
38
*/
39
int i;
40
+ /*
41
+ * r4..r11 are callee-saves, zero only if background
42
+ * state was Secure (EXCRET.S == 1) and exception
43
+ * targets Non-secure state
44
+ */
45
+ bool zero_callee_saves = !targets_secure &&
46
+ (lr & R_V7M_EXCRET_S_MASK);
47
48
for (i = 0; i < 13; i++) {
49
- /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
50
- if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
51
+ if (i < 4 || i > 11 || zero_callee_saves) {
52
env->regs[i] = 0;
53
}
54
}
55
--
56
2.20.1
57
58
diff view generated by jsdifflib
Deleted patch
1
In v8.1M, vector table fetch failures don't set HFSR.FORCED (see rule
2
R_LLRP). (In previous versions of the architecture this was either
3
required or IMPDEF.)
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-18-peter.maydell@linaro.org
8
---
9
target/arm/m_helper.c | 6 +++++-
10
1 file changed, 5 insertions(+), 1 deletion(-)
11
12
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/m_helper.c
15
+++ b/target/arm/m_helper.c
16
@@ -XXX,XX +XXX,XX @@ load_fail:
17
* The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
18
* secure); otherwise it targets the same security state as the
19
* underlying exception.
20
+ * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
21
*/
22
if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
23
exc_secure = true;
24
}
25
- env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
26
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
27
+ if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
28
+ env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
29
+ }
30
armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
31
return false;
32
}
33
--
34
2.20.1
35
36
diff view generated by jsdifflib
Deleted patch
1
In v8.1M a REVIDR register is defined, which is at address 0xe00ecfc
2
and is a read-only IMPDEF register providing implementation specific
3
minor revision information, like the v8A REVIDR_EL1. Implement this.
4
1
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-19-peter.maydell@linaro.org
8
---
9
hw/intc/armv7m_nvic.c | 5 +++++
10
1 file changed, 5 insertions(+)
11
12
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/intc/armv7m_nvic.c
15
+++ b/hw/intc/armv7m_nvic.c
16
@@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
17
}
18
return val;
19
}
20
+ case 0xcfc:
21
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
22
+ goto bad_offset;
23
+ }
24
+ return cpu->revidr;
25
case 0xd00: /* CPUID Base. */
26
return cpu->midr;
27
case 0xd04: /* Interrupt Control State (ICSR) */
28
--
29
2.20.1
30
31
diff view generated by jsdifflib
Deleted patch
1
In v8.1M a new exception return check is added which may cause a NOCP
2
UsageFault (see rule R_XLTP): before we clear s0..s15 and the FPSCR
3
we must check whether access to CP10 from the Security state of the
4
returning exception is disabled; if it is then we must take a fault.
5
1
6
(Note that for our implementation CPPWR is always RAZ/WI and so can
7
never cause CP10 accesses to fail.)
8
9
The other v8.1M change to this register-clearing code is that if MVE
10
is implemented VPR must also be cleared, so add a TODO comment to
11
that effect.
12
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20201119215617.29887-20-peter.maydell@linaro.org
16
---
17
target/arm/m_helper.c | 22 +++++++++++++++++++++-
18
1 file changed, 21 insertions(+), 1 deletion(-)
19
20
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/m_helper.c
23
+++ b/target/arm/m_helper.c
24
@@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu)
25
v7m_exception_taken(cpu, excret, true, false);
26
return;
27
} else {
28
- /* Clear s0..s15 and FPSCR */
29
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
30
+ /* v8.1M adds this NOCP check */
31
+ bool nsacr_pass = exc_secure ||
32
+ extract32(env->v7m.nsacr, 10, 1);
33
+ bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
34
+ if (!nsacr_pass) {
35
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
36
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
37
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
38
+ "stackframe: NSACR prevents clearing FPU registers\n");
39
+ v7m_exception_taken(cpu, excret, true, false);
40
+ } else if (!cpacr_pass) {
41
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
42
+ exc_secure);
43
+ env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
44
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
45
+ "stackframe: CPACR prevents clearing FPU registers\n");
46
+ v7m_exception_taken(cpu, excret, true, false);
47
+ }
48
+ }
49
+ /* Clear s0..s15 and FPSCR; TODO also VPR when MVE is implemented */
50
int i;
51
52
for (i = 0; i < 16; i += 2) {
53
--
54
2.20.1
55
56
diff view generated by jsdifflib
1
v8.1M introduces a new TRD flag in the CCR register, which enables
1
Optimize the MVE 1op-immediate insns (VORR, VBIC, VMOV) to
2
checking for stack frame integrity signatures on SG instructions.
2
use TCG vector ops when possible.
3
Add the code in the SG insn implementation for the new behaviour.
4
3
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20201119215617.29887-24-peter.maydell@linaro.org
6
Message-id: 20210913095440.13462-13-peter.maydell@linaro.org
8
---
7
---
9
target/arm/m_helper.c | 86 +++++++++++++++++++++++++++++++++++++++++++
8
target/arm/translate-mve.c | 26 +++++++++++++++++++++-----
10
1 file changed, 86 insertions(+)
9
1 file changed, 21 insertions(+), 5 deletions(-)
11
10
12
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
11
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/target/arm/m_helper.c
13
--- a/target/arm/translate-mve.c
15
+++ b/target/arm/m_helper.c
14
+++ b/target/arm/translate-mve.c
16
@@ -XXX,XX +XXX,XX @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
17
return true;
16
return true;
18
}
17
}
19
18
20
+static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
19
-static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
21
+ uint32_t addr, uint32_t *spdata)
20
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn,
21
+ GVecGen2iFn *vecfn)
22
{
23
TCGv_ptr qd;
24
uint64_t imm;
25
@@ -XXX,XX +XXX,XX @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn)
26
27
imm = asimd_imm_const(a->imm, a->cmode, a->op);
28
29
- qd = mve_qreg_ptr(a->qd);
30
- fn(cpu_env, qd, tcg_constant_i64(imm));
31
- tcg_temp_free_ptr(qd);
32
+ if (vecfn && mve_no_predication(s)) {
33
+ vecfn(MO_64, mve_qreg_offset(a->qd), mve_qreg_offset(a->qd),
34
+ imm, 16, 16);
35
+ } else {
36
+ qd = mve_qreg_ptr(a->qd);
37
+ fn(cpu_env, qd, tcg_constant_i64(imm));
38
+ tcg_temp_free_ptr(qd);
39
+ }
40
mve_update_eci(s);
41
return true;
42
}
43
44
+static void gen_gvec_vmovi(unsigned vece, uint32_t dofs, uint32_t aofs,
45
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
22
+{
46
+{
23
+ /*
47
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, c);
24
+ * Read a word of data from the stack for the SG instruction,
25
+ * writing the value into *spdata. If the load succeeds, return
26
+ * true; otherwise pend an appropriate exception and return false.
27
+ * (We can't use data load helpers here that throw an exception
28
+ * because of the context we're called in, which is halfway through
29
+ * arm_v7m_cpu_do_interrupt().)
30
+ */
31
+ CPUState *cs = CPU(cpu);
32
+ CPUARMState *env = &cpu->env;
33
+ MemTxAttrs attrs = {};
34
+ MemTxResult txres;
35
+ target_ulong page_size;
36
+ hwaddr physaddr;
37
+ int prot;
38
+ ARMMMUFaultInfo fi = {};
39
+ ARMCacheAttrs cacheattrs = {};
40
+ uint32_t value;
41
+
42
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
43
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
44
+ /* MPU/SAU lookup failed */
45
+ if (fi.type == ARMFault_QEMU_SFault) {
46
+ qemu_log_mask(CPU_LOG_INT,
47
+ "...SecureFault during stack word read\n");
48
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
49
+ env->v7m.sfar = addr;
50
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
51
+ } else {
52
+ qemu_log_mask(CPU_LOG_INT,
53
+ "...MemManageFault during stack word read\n");
54
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
55
+ R_V7M_CFSR_MMARVALID_MASK;
56
+ env->v7m.mmfar[M_REG_S] = addr;
57
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
58
+ }
59
+ return false;
60
+ }
61
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
62
+ attrs, &txres);
63
+ if (txres != MEMTX_OK) {
64
+ /* BusFault trying to read the data */
65
+ qemu_log_mask(CPU_LOG_INT,
66
+ "...BusFault during stack word read\n");
67
+ env->v7m.cfsr[M_REG_NS] |=
68
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
69
+ env->v7m.bfar = addr;
70
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
71
+ return false;
72
+ }
73
+
74
+ *spdata = value;
75
+ return true;
76
+}
48
+}
77
+
49
+
78
static bool v7m_handle_execute_nsc(ARMCPU *cpu)
50
static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
79
{
51
{
80
/*
52
/* Handle decode of cmode/op here between VORR/VBIC/VMOV */
81
@@ -XXX,XX +XXX,XX @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
53
MVEGenOneOpImmFn *fn;
82
*/
54
+ GVecGen2iFn *vecfn;
83
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
55
84
", executing it\n", env->regs[15]);
56
if ((a->cmode & 1) && a->cmode < 12) {
85
+
57
if (a->op) {
86
+ if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
58
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
87
+ !arm_v7m_is_handler_mode(env)) {
59
* so the VBIC becomes a logical AND operation.
88
+ /*
60
*/
89
+ * v8.1M exception stack frame integrity check. Note that we
61
fn = gen_helper_mve_vandi;
90
+ * must perform the memory access even if CCR_S.TRD is zero
62
+ vecfn = tcg_gen_gvec_andi;
91
+ * and we aren't going to check what the data loaded is.
63
} else {
92
+ */
64
fn = gen_helper_mve_vorri;
93
+ uint32_t spdata, sp;
65
+ vecfn = tcg_gen_gvec_ori;
94
+
66
}
95
+ /*
67
} else {
96
+ * We know we are currently NS, so the S stack pointers must be
68
/* There is one unallocated cmode/op combination in this space */
97
+ * in other_ss_{psp,msp}, not in regs[13]/other_sp.
69
@@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
98
+ */
70
}
99
+ sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
71
/* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
100
+ if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
72
fn = gen_helper_mve_vmovi;
101
+ /* Stack access failed and an exception has been pended */
73
+ vecfn = gen_gvec_vmovi;
102
+ return false;
74
}
103
+ }
75
- return do_1imm(s, a, fn);
104
+
76
+ return do_1imm(s, a, fn, vecfn);
105
+ if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
77
}
106
+ if (((spdata & ~1) == 0xfefa125a) ||
78
107
+ !(env->v7m.control[M_REG_S] & 1)) {
79
static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
108
+ goto gen_invep;
109
+ }
110
+ }
111
+ }
112
+
113
env->regs[14] &= ~1;
114
env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
115
switch_v7m_security_state(env, true);
116
--
80
--
117
2.20.1
81
2.20.1
118
82
119
83
diff view generated by jsdifflib