1
The following changes since commit a97978bcc2d1f650c7d411428806e5b03082b8c7:
1
target-arm queue: the big stuff here is the final part of
2
rth's patches for Cortex-A76 and Neoverse-N1 support;
3
also present are Gavin's NUMA series and a few other things.
2
4
3
Merge remote-tracking branch 'remotes/dg-gitlab/tags/ppc-for-6.1-20210603' into staging (2021-06-03 10:00:35 +0100)
5
thanks
6
-- PMM
7
8
The following changes since commit 554623226f800acf48a2ed568900c1c968ec9a8b:
9
10
Merge tag 'qemu-sparc-20220508' of https://github.com/mcayland/qemu into staging (2022-05-08 17:03:26 -0500)
4
11
5
are available in the Git repository at:
12
are available in the Git repository at:
6
13
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210603
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220509
8
15
9
for you to fetch changes up to 1c861885894d840235954060050d240259f5340b:
16
for you to fetch changes up to ae9141d4a3265553503bf07d3574b40f84615a34:
10
17
11
tests/unit/test-vmstate: Assert that dup() and mkstemp() succeed (2021-06-03 16:43:27 +0100)
18
hw/acpi/aml-build: Use existing CPU topology to build PPTT table (2022-05-09 11:47:55 +0100)
12
19
13
----------------------------------------------------------------
20
----------------------------------------------------------------
14
target-arm queue:
21
target-arm queue:
15
* Some not-yet-enabled preliminaries for M-profile MVE support
22
* MAINTAINERS/.mailmap: update email for Leif Lindholm
16
* Consistently use "Cortex-Axx", not "Cortex Axx" in docs, comments
23
* hw/arm: add version information to sbsa-ref machine DT
17
* docs: Fix installation of man pages with Sphinx 4.x
24
* Enable new features for -cpu max:
18
* Mark LDS{MIN,MAX} as signed operations
25
FEAT_Debugv8p2, FEAT_Debugv8p4, FEAT_RAS (minimal version only),
19
* Fix missing syndrome value for DAIF and PAC check exceptions
26
FEAT_IESB, FEAT_CSV2, FEAT_CSV2_2, FEAT_CSV3, FEAT_DGH
20
* Implement BFloat16 extensions
27
* Emulate Cortex-A76
21
* Refactoring of hvf accelerator code in preparation for aarch64 support
28
* Emulate Neoverse-N1
22
* Fix some coverity nits in test code
29
* Fix the virt board default NUMA topology
23
30
24
----------------------------------------------------------------
31
----------------------------------------------------------------
25
Alexander Graf (12):
32
Gavin Shan (6):
26
hvf: Move assert_hvf_ok() into common directory
33
qapi/machine.json: Add cluster-id
27
hvf: Move vcpu thread functions into common directory
34
qtest/numa-test: Specify CPU topology in aarch64_numa_cpu()
28
hvf: Move cpu functions into common directory
35
hw/arm/virt: Consider SMP configuration in CPU topology
29
hvf: Move hvf internal definitions into common header
36
qtest/numa-test: Correct CPU and NUMA association in aarch64_numa_cpu()
30
hvf: Make hvf_set_phys_mem() static
37
hw/arm/virt: Fix CPU's default NUMA node ID
31
hvf: Remove use of hv_uvaddr_t and hv_gpaddr_t
38
hw/acpi/aml-build: Use existing CPU topology to build PPTT table
32
hvf: Split out common code on vcpu init and destroy
33
hvf: Use cpu_synchronize_state()
34
hvf: Make synchronize functions static
35
hvf: Remove hvf-accel-ops.h
36
hvf: Introduce hvf vcpu struct
37
hvf: Simplify post reset/init/loadvm hooks
38
39
39
Damien Goutte-Gattat (1):
40
Leif Lindholm (2):
40
docs: Fix installation of man pages with Sphinx 4.x
41
MAINTAINERS/.mailmap: update email for Leif Lindholm
42
hw/arm: add versioning to sbsa-ref machine DT
41
43
42
Jamie Iles (4):
44
Richard Henderson (24):
43
target/arm: fix missing exception class
45
target/arm: Handle cpreg registration for missing EL
44
target/arm: fold do_raise_exception into raise_exception
46
target/arm: Drop EL3 no EL2 fallbacks
45
target/arm: use raise_exception_ra for MTE check failure
47
target/arm: Merge zcr reginfo
46
target/arm: use raise_exception_ra for stack limit exception
48
target/arm: Adjust definition of CONTEXTIDR_EL2
49
target/arm: Move cortex impdef sysregs to cpu_tcg.c
50
target/arm: Update qemu-system-arm -cpu max to cortex-a57
51
target/arm: Set ID_DFR0.PerfMon for qemu-system-arm -cpu max
52
target/arm: Split out aa32_max_features
53
target/arm: Annotate arm_max_initfn with FEAT identifiers
54
target/arm: Use field names for manipulating EL2 and EL3 modes
55
target/arm: Enable FEAT_Debugv8p2 for -cpu max
56
target/arm: Enable FEAT_Debugv8p4 for -cpu max
57
target/arm: Add minimal RAS registers
58
target/arm: Enable SCR and HCR bits for RAS
59
target/arm: Implement virtual SError exceptions
60
target/arm: Implement ESB instruction
61
target/arm: Enable FEAT_RAS for -cpu max
62
target/arm: Enable FEAT_IESB for -cpu max
63
target/arm: Enable FEAT_CSV2 for -cpu max
64
target/arm: Enable FEAT_CSV2_2 for -cpu max
65
target/arm: Enable FEAT_CSV3 for -cpu max
66
target/arm: Enable FEAT_DGH for -cpu max
67
target/arm: Define cortex-a76
68
target/arm: Define neoverse-n1
47
69
48
Peter Maydell (15):
70
docs/system/arm/emulation.rst | 10 +
49
target/arm: Add isar feature check functions for MVE
71
docs/system/arm/virt.rst | 2 +
50
target/arm: Update feature checks for insns which are "MVE or FP"
72
qapi/machine.json | 6 +-
51
target/arm: Move fpsp/fpdp isar check into callers of do_vfp_2op_sp/dp
73
target/arm/cpregs.h | 11 +
52
target/arm: Add MVE check to VMOV_reg_sp and VMOV_reg_dp
74
target/arm/cpu.h | 23 ++
53
target/arm: Fix return values in fp_sysreg_checks()
75
target/arm/helper.h | 1 +
54
target/arm: Implement M-profile VPR register
76
target/arm/internals.h | 16 ++
55
target/arm: Make FPSCR.LTPSIZE writable for MVE
77
target/arm/syndrome.h | 5 +
56
target/arm: Allow board models to specify initial NS VTOR
78
target/arm/a32.decode | 16 +-
57
arm: Consistently use "Cortex-Axx", not "Cortex Axx"
79
target/arm/t32.decode | 18 +-
58
tests/qtest/bios-tables-test: Check for dup2() failure
80
hw/acpi/aml-build.c | 111 ++++----
59
tests/qtest/e1000e-test: Check qemu_recv() succeeded
81
hw/arm/sbsa-ref.c | 16 ++
60
tests/qtest/hd-geo-test: Fix checks on mkstemp() return value
82
hw/arm/virt.c | 21 +-
61
tests/qtest/pflash-cfi02-test: Avoid potential integer overflow
83
hw/core/machine-hmp-cmds.c | 4 +
62
tests/qtest/tpm-tests: Remove unnecessary NULL checks
84
hw/core/machine.c | 16 ++
63
tests/unit/test-vmstate: Assert that dup() and mkstemp() succeed
85
target/arm/cpu.c | 66 ++++-
64
86
target/arm/cpu64.c | 353 ++++++++++++++-----------
65
Richard Henderson (13):
87
target/arm/cpu_tcg.c | 227 +++++++++++-----
66
target/arm: Mark LDS{MIN,MAX} as signed operations
88
target/arm/helper.c | 600 +++++++++++++++++++++++++-----------------
67
target/arm: Add isar_feature_{aa32, aa64, aa64_sve}_bf16
89
target/arm/op_helper.c | 43 +++
68
target/arm: Unify unallocated path in disas_fp_1src
90
target/arm/translate-a64.c | 18 ++
69
target/arm: Implement scalar float32 to bfloat16 conversion
91
target/arm/translate.c | 23 ++
70
target/arm: Implement vector float32 to bfloat16 conversion
92
tests/qtest/numa-test.c | 19 +-
71
softfpu: Add float_round_to_odd_inf
93
.mailmap | 3 +-
72
target/arm: Implement bfloat16 dot product (vector)
94
MAINTAINERS | 2 +-
73
target/arm: Implement bfloat16 dot product (indexed)
95
25 files changed, 1068 insertions(+), 562 deletions(-)
74
target/arm: Implement bfloat16 matrix multiply accumulate
75
target/arm: Implement bfloat widening fma (vector)
76
target/arm: Implement bfloat widening fma (indexed)
77
linux-user/aarch64: Enable hwcap bits for bfloat16
78
target/arm: Enable BFloat16 extensions
79
80
docs/conf.py | 1 +
81
docs/system/arm/aspeed.rst | 4 +-
82
docs/system/arm/nuvoton.rst | 6 +-
83
docs/system/arm/sabrelite.rst | 2 +-
84
include/fpu/softfloat-types.h | 4 +-
85
include/hw/arm/allwinner-h3.h | 2 +-
86
include/hw/arm/armv7m.h | 2 +
87
include/hw/core/cpu.h | 3 +-
88
include/sysemu/hvf_int.h | 58 +++++
89
target/arm/cpu.h | 48 +++-
90
target/arm/helper-sve.h | 4 +
91
target/arm/helper.h | 15 ++
92
target/i386/hvf/hvf-accel-ops.h | 23 --
93
target/i386/hvf/hvf-i386.h | 33 +--
94
target/i386/hvf/vmx.h | 24 +-
95
target/i386/hvf/x86hvf.h | 2 -
96
target/arm/neon-dp.decode | 1 +
97
target/arm/neon-shared.decode | 11 +
98
target/arm/sve.decode | 19 +-
99
target/arm/vfp.decode | 2 +
100
accel/hvf/hvf-accel-ops.c | 471 ++++++++++++++++++++++++++++++++++++++++
101
accel/hvf/hvf-all.c | 47 ++++
102
hw/arm/armv7m.c | 7 +
103
hw/arm/aspeed.c | 6 +-
104
hw/arm/mcimx6ul-evk.c | 2 +-
105
hw/arm/mcimx7d-sabre.c | 2 +-
106
hw/arm/npcm7xx_boards.c | 4 +-
107
hw/arm/sabrelite.c | 2 +-
108
hw/misc/npcm7xx_clk.c | 2 +-
109
linux-user/elfload.c | 2 +
110
target/arm/cpu.c | 13 ++
111
target/arm/cpu64.c | 3 +
112
target/arm/cpu_tcg.c | 1 +
113
target/arm/m_helper.c | 5 +-
114
target/arm/machine.c | 20 ++
115
target/arm/mte_helper.c | 12 +-
116
target/arm/op_helper.c | 32 ++-
117
target/arm/sve_helper.c | 2 +
118
target/arm/translate-a64.c | 155 +++++++++++--
119
target/arm/translate-neon.c | 91 ++++++++
120
target/arm/translate-sve.c | 112 ++++++++++
121
target/arm/translate-vfp.c | 164 ++++++++++----
122
target/arm/vec_helper.c | 140 +++++++++++-
123
target/arm/vfp_helper.c | 21 +-
124
target/i386/hvf/hvf-accel-ops.c | 146 -------------
125
target/i386/hvf/hvf.c | 464 +++++----------------------------------
126
target/i386/hvf/x86.c | 28 +--
127
target/i386/hvf/x86_descr.c | 26 +--
128
target/i386/hvf/x86_emu.c | 62 +++---
129
target/i386/hvf/x86_mmu.c | 4 +-
130
target/i386/hvf/x86_task.c | 12 +-
131
target/i386/hvf/x86hvf.c | 222 +++++++++----------
132
tests/qtest/bios-tables-test.c | 8 +-
133
tests/qtest/e1000e-test.c | 3 +-
134
tests/qtest/hd-geo-test.c | 4 +-
135
tests/qtest/pflash-cfi02-test.c | 2 +-
136
tests/qtest/tpm-tests.c | 12 +-
137
tests/unit/test-vmstate.c | 5 +-
138
fpu/softfloat-parts.c.inc | 6 +-
139
MAINTAINERS | 8 +
140
accel/hvf/meson.build | 7 +
141
accel/meson.build | 1 +
142
target/i386/hvf/meson.build | 1 -
143
63 files changed, 1666 insertions(+), 935 deletions(-)
144
create mode 100644 include/sysemu/hvf_int.h
145
delete mode 100644 target/i386/hvf/hvf-accel-ops.h
146
create mode 100644 accel/hvf/hvf-accel-ops.c
147
create mode 100644 accel/hvf/hvf-all.c
148
delete mode 100644 target/i386/hvf/hvf-accel-ops.c
149
create mode 100644 accel/hvf/meson.build
150
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Leif Lindholm <quic_llindhol@quicinc.com>
2
2
3
Until now, Hypervisor.framework has only been available on x86_64 systems.
3
NUVIA was acquired by Qualcomm in March 2021, but kept functioning on
4
With Apple Silicon shipping now, it extends its reach to aarch64. To
4
separate infrastructure for a transitional period. We've now switched
5
prepare for support for multiple architectures, let's start moving common
5
over to contributing as Qualcomm Innovation Center (quicinc), so update
6
code out into its own accel directory.
6
my email address to reflect this.
7
7
8
This patch moves assert_hvf_ok() and introduces generic build infrastructure.
8
Signed-off-by: Leif Lindholm <quic_llindhol@quicinc.com>
9
9
Message-id: 20220505113740.75565-1-quic_llindhol@quicinc.com
10
Signed-off-by: Alexander Graf <agraf@csgraf.de>
10
Cc: Leif Lindholm <leif@nuviainc.com>
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
11
Cc: Peter Maydell <peter.maydell@linaro.org>
12
Message-id: 20210519202253.76782-2-agraf@csgraf.de
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
[Fixed commit message typo]
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
15
---
16
include/sysemu/hvf_int.h | 18 +++++++++++++++
16
.mailmap | 3 ++-
17
accel/hvf/hvf-all.c | 47 ++++++++++++++++++++++++++++++++++++++++
17
MAINTAINERS | 2 +-
18
target/i386/hvf/hvf.c | 33 +---------------------------
18
2 files changed, 3 insertions(+), 2 deletions(-)
19
MAINTAINERS | 8 +++++++
20
accel/hvf/meson.build | 6 +++++
21
accel/meson.build | 1 +
22
6 files changed, 81 insertions(+), 32 deletions(-)
23
create mode 100644 include/sysemu/hvf_int.h
24
create mode 100644 accel/hvf/hvf-all.c
25
create mode 100644 accel/hvf/meson.build
26
19
27
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
20
diff --git a/.mailmap b/.mailmap
28
new file mode 100644
29
index XXXXXXX..XXXXXXX
30
--- /dev/null
31
+++ b/include/sysemu/hvf_int.h
32
@@ -XXX,XX +XXX,XX @@
33
+/*
34
+ * QEMU Hypervisor.framework (HVF) support
35
+ *
36
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
37
+ * See the COPYING file in the top-level directory.
38
+ *
39
+ */
40
+
41
+/* header to be included in HVF-specific code */
42
+
43
+#ifndef HVF_INT_H
44
+#define HVF_INT_H
45
+
46
+#include <Hypervisor/hv.h>
47
+
48
+void assert_hvf_ok(hv_return_t ret);
49
+
50
+#endif
51
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
52
new file mode 100644
53
index XXXXXXX..XXXXXXX
54
--- /dev/null
55
+++ b/accel/hvf/hvf-all.c
56
@@ -XXX,XX +XXX,XX @@
57
+/*
58
+ * QEMU Hypervisor.framework support
59
+ *
60
+ * This work is licensed under the terms of the GNU GPL, version 2. See
61
+ * the COPYING file in the top-level directory.
62
+ *
63
+ * Contributions after 2012-01-13 are licensed under the terms of the
64
+ * GNU GPL, version 2 or (at your option) any later version.
65
+ */
66
+
67
+#include "qemu/osdep.h"
68
+#include "qemu-common.h"
69
+#include "qemu/error-report.h"
70
+#include "sysemu/hvf.h"
71
+#include "sysemu/hvf_int.h"
72
+
73
+void assert_hvf_ok(hv_return_t ret)
74
+{
75
+ if (ret == HV_SUCCESS) {
76
+ return;
77
+ }
78
+
79
+ switch (ret) {
80
+ case HV_ERROR:
81
+ error_report("Error: HV_ERROR");
82
+ break;
83
+ case HV_BUSY:
84
+ error_report("Error: HV_BUSY");
85
+ break;
86
+ case HV_BAD_ARGUMENT:
87
+ error_report("Error: HV_BAD_ARGUMENT");
88
+ break;
89
+ case HV_NO_RESOURCES:
90
+ error_report("Error: HV_NO_RESOURCES");
91
+ break;
92
+ case HV_NO_DEVICE:
93
+ error_report("Error: HV_NO_DEVICE");
94
+ break;
95
+ case HV_UNSUPPORTED:
96
+ error_report("Error: HV_UNSUPPORTED");
97
+ break;
98
+ default:
99
+ error_report("Unknown Error");
100
+ }
101
+
102
+ abort();
103
+}
104
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
105
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
106
--- a/target/i386/hvf/hvf.c
22
--- a/.mailmap
107
+++ b/target/i386/hvf/hvf.c
23
+++ b/.mailmap
108
@@ -XXX,XX +XXX,XX @@
24
@@ -XXX,XX +XXX,XX @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
109
#include "qemu/error-report.h"
25
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
110
26
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
111
#include "sysemu/hvf.h"
27
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
112
+#include "sysemu/hvf_int.h"
28
-Leif Lindholm <leif@nuviainc.com> <leif.lindholm@linaro.org>
113
#include "sysemu/runstate.h"
29
+Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
114
#include "hvf-i386.h"
30
+Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
115
#include "vmcs.h"
31
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
116
@@ -XXX,XX +XXX,XX @@
32
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
117
33
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
118
HVFState *hvf_state;
119
120
-static void assert_hvf_ok(hv_return_t ret)
121
-{
122
- if (ret == HV_SUCCESS) {
123
- return;
124
- }
125
-
126
- switch (ret) {
127
- case HV_ERROR:
128
- error_report("Error: HV_ERROR");
129
- break;
130
- case HV_BUSY:
131
- error_report("Error: HV_BUSY");
132
- break;
133
- case HV_BAD_ARGUMENT:
134
- error_report("Error: HV_BAD_ARGUMENT");
135
- break;
136
- case HV_NO_RESOURCES:
137
- error_report("Error: HV_NO_RESOURCES");
138
- break;
139
- case HV_NO_DEVICE:
140
- error_report("Error: HV_NO_DEVICE");
141
- break;
142
- case HV_UNSUPPORTED:
143
- error_report("Error: HV_UNSUPPORTED");
144
- break;
145
- default:
146
- error_report("Unknown Error");
147
- }
148
-
149
- abort();
150
-}
151
-
152
/* Memory slots */
153
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
154
{
155
diff --git a/MAINTAINERS b/MAINTAINERS
34
diff --git a/MAINTAINERS b/MAINTAINERS
156
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
157
--- a/MAINTAINERS
36
--- a/MAINTAINERS
158
+++ b/MAINTAINERS
37
+++ b/MAINTAINERS
159
@@ -XXX,XX +XXX,XX @@ M: Roman Bolshakov <r.bolshakov@yadro.com>
38
@@ -XXX,XX +XXX,XX @@ F: include/hw/ssi/imx_spi.h
160
W: https://wiki.qemu.org/Features/HVF
39
SBSA-REF
40
M: Radoslaw Biernacki <rad@semihalf.com>
41
M: Peter Maydell <peter.maydell@linaro.org>
42
-R: Leif Lindholm <leif@nuviainc.com>
43
+R: Leif Lindholm <quic_llindhol@quicinc.com>
44
L: qemu-arm@nongnu.org
161
S: Maintained
45
S: Maintained
162
F: target/i386/hvf/
46
F: hw/arm/sbsa-ref.c
163
+
164
+HVF
165
+M: Cameron Esfahani <dirty@apple.com>
166
+M: Roman Bolshakov <r.bolshakov@yadro.com>
167
+W: https://wiki.qemu.org/Features/HVF
168
+S: Maintained
169
+F: accel/hvf/
170
F: include/sysemu/hvf.h
171
+F: include/sysemu/hvf_int.h
172
173
WHPX CPUs
174
M: Sunil Muthuswamy <sunilmut@microsoft.com>
175
diff --git a/accel/hvf/meson.build b/accel/hvf/meson.build
176
new file mode 100644
177
index XXXXXXX..XXXXXXX
178
--- /dev/null
179
+++ b/accel/hvf/meson.build
180
@@ -XXX,XX +XXX,XX @@
181
+hvf_ss = ss.source_set()
182
+hvf_ss.add(files(
183
+ 'hvf-all.c',
184
+))
185
+
186
+specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)
187
diff --git a/accel/meson.build b/accel/meson.build
188
index XXXXXXX..XXXXXXX 100644
189
--- a/accel/meson.build
190
+++ b/accel/meson.build
191
@@ -XXX,XX +XXX,XX @@ specific_ss.add(files('accel-common.c'))
192
softmmu_ss.add(files('accel-softmmu.c'))
193
user_ss.add(files('accel-user.c'))
194
195
+subdir('hvf')
196
subdir('qtest')
197
subdir('kvm')
198
subdir('tcg')
199
--
47
--
200
2.20.1
48
2.25.1
201
49
202
50
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
For Arm BFDOT and BFMMLA, we need a version of round-to-odd
3
More gracefully handle cpregs when EL2 and/or EL3 are missing.
4
that overflows to infinity, instead of the max normal number.
4
If the reg is entirely inaccessible, do not register it at all.
5
5
If the reg is for EL2, and EL3 is present but EL2 is not,
6
Cc: Alex Bennée <alex.bennee@linaro.org>
6
either discard, squash to res0, const, or keep unchanged.
7
8
Per rule RJFFP, mark the 4 aarch32 hypervisor access registers
9
with ARM_CP_EL3_NO_EL2_KEEP, and mark all of the EL2 address
10
translation and tlb invalidation "regs" ARM_CP_EL3_NO_EL2_UNDEF.
11
Mark the 2 virtualization processor id regs ARM_CP_EL3_NO_EL2_C_NZ.
12
13
This will simplify cpreg registration for conditional arm features.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-6-richard.henderson@linaro.org
17
Message-id: 20220506180242.216785-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
19
---
12
include/fpu/softfloat-types.h | 4 +++-
20
target/arm/cpregs.h | 11 +++
13
fpu/softfloat-parts.c.inc | 6 ++++--
21
target/arm/helper.c | 178 ++++++++++++++++++++++++++++++--------------
14
2 files changed, 7 insertions(+), 3 deletions(-)
22
2 files changed, 133 insertions(+), 56 deletions(-)
15
23
16
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
24
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
17
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
18
--- a/include/fpu/softfloat-types.h
26
--- a/target/arm/cpregs.h
19
+++ b/include/fpu/softfloat-types.h
27
+++ b/target/arm/cpregs.h
20
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
28
@@ -XXX,XX +XXX,XX @@ enum {
21
float_round_up = 2,
29
ARM_CP_SVE = 1 << 14,
22
float_round_to_zero = 3,
30
/* Flag: Do not expose in gdb sysreg xml. */
23
float_round_ties_away = 4,
31
ARM_CP_NO_GDB = 1 << 15,
24
- /* Not an IEEE rounding mode: round to the closest odd mantissa value */
32
+ /*
25
+ /* Not an IEEE rounding mode: round to closest odd, overflow to max */
33
+ * Flags: If EL3 but not EL2...
26
float_round_to_odd = 5,
34
+ * - UNDEF: discard the cpreg,
27
+ /* Not an IEEE rounding mode: round to closest odd, overflow to inf */
35
+ * - KEEP: retain the cpreg as is,
28
+ float_round_to_odd_inf = 6,
36
+ * - C_NZ: set const on the cpreg, but retain resetvalue,
29
} FloatRoundMode;
37
+ * - else: set const on the cpreg, zero resetvalue, aka RES0.
38
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
39
+ */
40
+ ARM_CP_EL3_NO_EL2_UNDEF = 1 << 16,
41
+ ARM_CP_EL3_NO_EL2_KEEP = 1 << 17,
42
+ ARM_CP_EL3_NO_EL2_C_NZ = 1 << 18,
43
};
30
44
31
/*
45
/*
32
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
46
diff --git a/target/arm/helper.c b/target/arm/helper.c
33
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat-parts.c.inc
48
--- a/target/arm/helper.c
35
+++ b/fpu/softfloat-parts.c.inc
49
+++ b/target/arm/helper.c
36
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
50
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
37
g_assert_not_reached();
51
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
52
{ .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
53
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
54
- .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_FPU,
55
+ .access = PL2_RW,
56
+ .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
57
.fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
58
{ .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
59
.opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
60
- .access = PL2_RW, .resetvalue = 0,
61
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
62
.writefn = dacr_write, .raw_writefn = raw_write,
63
.fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
64
{ .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
65
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
66
- .access = PL2_RW, .resetvalue = 0,
67
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
68
.fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
69
{ .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
70
.type = ARM_CP_ALIAS,
71
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
72
.writefn = tlbimva_hyp_is_write },
73
{ .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
74
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
75
- .type = ARM_CP_NO_RAW, .access = PL2_W,
76
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
77
.writefn = tlbi_aa64_alle2_write },
78
{ .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
79
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
80
- .type = ARM_CP_NO_RAW, .access = PL2_W,
81
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
82
.writefn = tlbi_aa64_vae2_write },
83
{ .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
84
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
85
- .access = PL2_W, .type = ARM_CP_NO_RAW,
86
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
87
.writefn = tlbi_aa64_vae2_write },
88
{ .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
89
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
90
- .access = PL2_W, .type = ARM_CP_NO_RAW,
91
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
92
.writefn = tlbi_aa64_alle2is_write },
93
{ .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
94
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
95
- .type = ARM_CP_NO_RAW, .access = PL2_W,
96
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
97
.writefn = tlbi_aa64_vae2is_write },
98
{ .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
99
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
100
- .access = PL2_W, .type = ARM_CP_NO_RAW,
101
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
102
.writefn = tlbi_aa64_vae2is_write },
103
#ifndef CONFIG_USER_ONLY
104
/* Unlike the other EL2-related AT operations, these must
105
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
106
{ .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
107
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
108
.access = PL2_W, .accessfn = at_s1e2_access,
109
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
110
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
111
+ .writefn = ats_write64 },
112
{ .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
113
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
114
.access = PL2_W, .accessfn = at_s1e2_access,
115
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
116
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
117
+ .writefn = ats_write64 },
118
/* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
119
* if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
120
* with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
121
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
122
{ .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
123
.opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
124
.access = PL2_RW, .accessfn = access_tda,
125
- .type = ARM_CP_NOP },
126
+ .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
127
/* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
128
* Channel but Linux may try to access this register. The 32-bit
129
* alias is DBGDCCINT.
130
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
131
.access = PL2_W, .type = ARM_CP_NOP },
132
{ .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
133
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
134
- .access = PL2_W, .type = ARM_CP_NO_RAW,
135
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
136
.writefn = tlbi_aa64_rvae2is_write },
137
{ .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
138
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
139
- .access = PL2_W, .type = ARM_CP_NO_RAW,
140
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
141
.writefn = tlbi_aa64_rvae2is_write },
142
{ .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
143
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
144
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbirange_reginfo[] = {
145
.access = PL2_W, .type = ARM_CP_NOP },
146
{ .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
147
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
148
- .access = PL2_W, .type = ARM_CP_NO_RAW,
149
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
150
.writefn = tlbi_aa64_rvae2is_write },
151
{ .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
152
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
153
- .access = PL2_W, .type = ARM_CP_NO_RAW,
154
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
155
.writefn = tlbi_aa64_rvae2is_write },
156
{ .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
157
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
158
- .access = PL2_W, .type = ARM_CP_NO_RAW,
159
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
160
.writefn = tlbi_aa64_rvae2_write },
161
{ .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
162
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
163
- .access = PL2_W, .type = ARM_CP_NO_RAW,
164
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
165
.writefn = tlbi_aa64_rvae2_write },
166
{ .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
167
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
168
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbios_reginfo[] = {
169
.writefn = tlbi_aa64_vae1is_write },
170
{ .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
171
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
172
- .access = PL2_W, .type = ARM_CP_NO_RAW,
173
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
174
.writefn = tlbi_aa64_alle2is_write },
175
{ .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
176
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
177
- .access = PL2_W, .type = ARM_CP_NO_RAW,
178
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
179
.writefn = tlbi_aa64_vae2is_write },
180
{ .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
181
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
182
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo tlbios_reginfo[] = {
183
.writefn = tlbi_aa64_alle1is_write },
184
{ .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
185
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
186
- .access = PL2_W, .type = ARM_CP_NO_RAW,
187
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
188
.writefn = tlbi_aa64_vae2is_write },
189
{ .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
190
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
191
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
192
{ .name = "VPIDR", .state = ARM_CP_STATE_AA32,
193
.cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
194
.access = PL2_RW, .accessfn = access_el3_aa32ns,
195
- .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
196
+ .resetvalue = cpu->midr,
197
+ .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
198
.fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
199
{ .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
200
.opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
201
.access = PL2_RW, .resetvalue = cpu->midr,
202
+ .type = ARM_CP_EL3_NO_EL2_C_NZ,
203
.fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
204
{ .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
205
.cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
206
.access = PL2_RW, .accessfn = access_el3_aa32ns,
207
- .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
208
+ .resetvalue = vmpidr_def,
209
+ .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
210
.fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
211
{ .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
212
.opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
213
- .access = PL2_RW,
214
- .resetvalue = vmpidr_def,
215
+ .access = PL2_RW, .resetvalue = vmpidr_def,
216
+ .type = ARM_CP_EL3_NO_EL2_C_NZ,
217
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
218
};
219
define_arm_cp_regs(cpu, vpidr_regs);
220
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
221
int crm, int opc1, int opc2,
222
const char *name)
223
{
224
+ CPUARMState *env = &cpu->env;
225
uint32_t key;
226
ARMCPRegInfo *r2;
227
bool is64 = r->type & ARM_CP_64BIT;
228
bool ns = secstate & ARM_CP_SECSTATE_NS;
229
int cp = r->cp;
230
- bool isbanked;
231
size_t name_len;
232
+ bool make_const;
233
234
switch (state) {
235
case ARM_CP_STATE_AA32:
236
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
237
}
38
}
238
}
39
239
40
+ overflow_norm = false;
240
+ /*
41
switch (s->float_rounding_mode) {
241
+ * Eliminate registers that are not present because the EL is missing.
42
case float_round_nearest_even:
242
+ * Doing this here makes it easier to put all registers for a given
43
- overflow_norm = false;
243
+ * feature into the same ARMCPRegInfo array and define them all at once.
44
inc = ((p->frac_lo & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
244
+ */
45
break;
245
+ make_const = false;
46
case float_round_ties_away:
246
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
47
- overflow_norm = false;
247
+ /*
48
inc = frac_lsbm1;
248
+ * An EL2 register without EL2 but with EL3 is (usually) RES0.
49
break;
249
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
50
case float_round_to_zero:
250
+ */
51
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
251
+ int min_el = ctz32(r->access) / 2;
52
break;
252
+ if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
53
case float_round_to_odd:
253
+ if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
54
overflow_norm = true;
254
+ return;
55
+ /* fall through */
255
+ }
56
+ case float_round_to_odd_inf:
256
+ make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
57
inc = p->frac_lo & frac_lsb ? 0 : round_mask;
257
+ }
58
break;
258
+ } else {
59
default:
259
+ CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
60
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon)(FloatPartsN *p, float_status *s,
260
+ ? PL2_RW : PL1_RW);
61
? frac_lsbm1 : 0);
261
+ if ((r->access & max_el) == 0) {
62
break;
262
+ return;
63
case float_round_to_odd:
263
+ }
64
+ case float_round_to_odd_inf:
264
+ }
65
inc = p->frac_lo & frac_lsb ? 0 : round_mask;
265
+
66
break;
266
/* Combine cpreg and name into one allocation. */
67
default:
267
name_len = strlen(name) + 1;
268
r2 = g_malloc(sizeof(*r2) + name_len);
269
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
270
r2->opaque = opaque;
271
}
272
273
- isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
274
- if (isbanked) {
275
+ if (make_const) {
276
+ /* This should not have been a very special register to begin. */
277
+ int old_special = r2->type & ARM_CP_SPECIAL_MASK;
278
+ assert(old_special == 0 || old_special == ARM_CP_NOP);
279
/*
280
- * Register is banked (using both entries in array).
281
- * Overwriting fieldoffset as the array is only used to define
282
- * banked registers but later only fieldoffset is used.
283
+ * Set the special function to CONST, retaining the other flags.
284
+ * This is important for e.g. ARM_CP_SVE so that we still
285
+ * take the SVE trap if CPTR_EL3.EZ == 0.
286
*/
287
- r2->fieldoffset = r->bank_fieldoffsets[ns];
288
- }
289
+ r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
290
+ /*
291
+ * Usually, these registers become RES0, but there are a few
292
+ * special cases like VPIDR_EL2 which have a constant non-zero
293
+ * value with writes ignored.
294
+ */
295
+ if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
296
+ r2->resetvalue = 0;
297
+ }
298
+ /*
299
+ * ARM_CP_CONST has precedence, so removing the callbacks and
300
+ * offsets are not strictly necessary, but it is potentially
301
+ * less confusing to debug later.
302
+ */
303
+ r2->readfn = NULL;
304
+ r2->writefn = NULL;
305
+ r2->raw_readfn = NULL;
306
+ r2->raw_writefn = NULL;
307
+ r2->resetfn = NULL;
308
+ r2->fieldoffset = 0;
309
+ r2->bank_fieldoffsets[0] = 0;
310
+ r2->bank_fieldoffsets[1] = 0;
311
+ } else {
312
+ bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
313
314
- if (state == ARM_CP_STATE_AA32) {
315
if (isbanked) {
316
/*
317
- * If the register is banked then we don't need to migrate or
318
- * reset the 32-bit instance in certain cases:
319
- *
320
- * 1) If the register has both 32-bit and 64-bit instances then we
321
- * can count on the 64-bit instance taking care of the
322
- * non-secure bank.
323
- * 2) If ARMv8 is enabled then we can count on a 64-bit version
324
- * taking care of the secure bank. This requires that separate
325
- * 32 and 64-bit definitions are provided.
326
+ * Register is banked (using both entries in array).
327
+ * Overwriting fieldoffset as the array is only used to define
328
+ * banked registers but later only fieldoffset is used.
329
*/
330
- if ((r->state == ARM_CP_STATE_BOTH && ns) ||
331
- (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
332
+ r2->fieldoffset = r->bank_fieldoffsets[ns];
333
+ }
334
+ if (state == ARM_CP_STATE_AA32) {
335
+ if (isbanked) {
336
+ /*
337
+ * If the register is banked then we don't need to migrate or
338
+ * reset the 32-bit instance in certain cases:
339
+ *
340
+ * 1) If the register has both 32-bit and 64-bit instances
341
+ * then we can count on the 64-bit instance taking care
342
+ * of the non-secure bank.
343
+ * 2) If ARMv8 is enabled then we can count on a 64-bit
344
+ * version taking care of the secure bank. This requires
345
+ * that separate 32 and 64-bit definitions are provided.
346
+ */
347
+ if ((r->state == ARM_CP_STATE_BOTH && ns) ||
348
+ (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
349
+ r2->type |= ARM_CP_ALIAS;
350
+ }
351
+ } else if ((secstate != r->secure) && !ns) {
352
+ /*
353
+ * The register is not banked so we only want to allow
354
+ * migration of the non-secure instance.
355
+ */
356
r2->type |= ARM_CP_ALIAS;
357
}
358
- } else if ((secstate != r->secure) && !ns) {
359
- /*
360
- * The register is not banked so we only want to allow migration
361
- * of the non-secure instance.
362
- */
363
- r2->type |= ARM_CP_ALIAS;
364
- }
365
366
- if (HOST_BIG_ENDIAN &&
367
- r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
368
- r2->fieldoffset += sizeof(uint32_t);
369
+ if (HOST_BIG_ENDIAN &&
370
+ r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
371
+ r2->fieldoffset += sizeof(uint32_t);
372
+ }
373
}
374
}
375
376
@@ -XXX,XX +XXX,XX @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
377
* multiple times. Special registers (ie NOP/WFI) are
378
* never migratable and not even raw-accessible.
379
*/
380
- if (r->type & ARM_CP_SPECIAL_MASK) {
381
+ if (r2->type & ARM_CP_SPECIAL_MASK) {
382
r2->type |= ARM_CP_NO_RAW;
383
}
384
if (((r->crm == CP_ANY) && crm != 0) ||
68
--
385
--
69
2.20.1
386
2.25.1
70
71
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFDOT for both AArch64 AdvSIMD and SVE,
3
Drop el3_no_el2_cp_reginfo, el3_no_el2_v8_cp_reginfo, and the local
4
and VDOT.BF16 for AArch32 NEON.
4
vpidr_regs definition, and rely on the squashing to ARM_CP_CONST
5
5
while registering for v8.
6
7
This is a behavior change for v7 cpus with Security Extensions and
8
without Virtualization Extensions, in that the virtualization cpregs
9
are now correctly not present. This would be a migration compatibility
10
break, except that we have an existing bug in which migration of 32-bit
11
cpus with Security Extensions enabled does not work.
12
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525225817.400336-7-richard.henderson@linaro.org
15
Message-id: 20220506180242.216785-3-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
17
---
11
target/arm/helper.h | 3 +++
18
target/arm/helper.c | 158 ++++----------------------------------------
12
target/arm/neon-shared.decode | 2 ++
19
1 file changed, 13 insertions(+), 145 deletions(-)
13
target/arm/sve.decode | 3 +++
20
14
target/arm/translate-a64.c | 20 ++++++++++++++++++
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
target/arm/translate-neon.c | 9 ++++++++
16
target/arm/translate-sve.c | 12 +++++++++++
17
target/arm/vec_helper.c | 40 +++++++++++++++++++++++++++++++++++
18
7 files changed, 89 insertions(+)
19
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
21
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
23
--- a/target/arm/helper.c
23
+++ b/target/arm/helper.h
24
+++ b/target/arm/helper.c
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
25
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
25
DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
26
.fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
26
void, ptr, ptr, ptr, ptr, i32)
27
};
27
28
28
+DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
29
-/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
29
+ void, ptr, ptr, ptr, ptr, i32)
30
-static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
30
+
31
- { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
31
#ifdef TARGET_AARCH64
32
- .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
32
#include "helper-a64.h"
33
- .access = PL2_RW,
33
#include "helper-sve.h"
34
- .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
34
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
35
- { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
35
index XXXXXXX..XXXXXXX 100644
36
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
36
--- a/target/arm/neon-shared.decode
37
- .access = PL2_RW,
37
+++ b/target/arm/neon-shared.decode
38
- .type = ARM_CP_CONST, .resetvalue = 0 },
38
@@ -XXX,XX +XXX,XX @@ VUDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 1 .... \
39
- { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
39
vm=%vm_dp vn=%vn_dp vd=%vd_dp
40
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
40
VUSDOT 1111 110 01 . 10 .... .... 1101 . q:1 . 0 .... \
41
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
41
vm=%vm_dp vn=%vn_dp vd=%vd_dp
42
- { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
42
+VDOT_b16 1111 110 00 . 00 .... .... 1101 . q:1 . 0 .... \
43
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
43
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
44
- .access = PL2_RW,
44
45
- .type = ARM_CP_CONST, .resetvalue = 0 },
45
# VFM[AS]L
46
- { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
46
VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
47
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
47
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
48
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
48
index XXXXXXX..XXXXXXX 100644
49
- { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
49
--- a/target/arm/sve.decode
50
- .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
50
+++ b/target/arm/sve.decode
51
- .access = PL2_RW, .type = ARM_CP_CONST,
51
@@ -XXX,XX +XXX,XX @@ FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
52
- .resetvalue = 0 },
52
FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
53
- { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
53
FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
54
- .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
54
55
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
55
+### SVE2 floating-point bfloat16 dot-product
56
- { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
56
+BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
57
- .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
57
+
58
- .access = PL2_RW, .type = ARM_CP_CONST,
58
### SVE2 floating-point multiply-add long (indexed)
59
- .resetvalue = 0 },
59
FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
60
- { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
60
FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
61
- .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
61
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
62
- .access = PL2_RW, .type = ARM_CP_CONST,
62
index XXXXXXX..XXXXXXX 100644
63
- .resetvalue = 0 },
63
--- a/target/arm/translate-a64.c
64
- { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
64
+++ b/target/arm/translate-a64.c
65
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
65
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
66
- .access = PL2_RW, .type = ARM_CP_CONST,
66
}
67
- .resetvalue = 0 },
67
feature = dc_isar_feature(aa64_fcma, s);
68
- { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
68
break;
69
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
69
+ case 0x1f: /* BFDOT */
70
- .access = PL2_RW, .type = ARM_CP_CONST,
70
+ switch (size) {
71
- .resetvalue = 0 },
71
+ case 1:
72
- { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
72
+ feature = dc_isar_feature(aa64_bf16, s);
73
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
73
+ break;
74
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
74
+ default:
75
- { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
75
+ unallocated_encoding(s);
76
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
76
+ return;
77
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
77
+ }
78
- .type = ARM_CP_CONST, .resetvalue = 0 },
78
+ break;
79
- { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
79
default:
80
- .cp = 15, .opc1 = 6, .crm = 2,
80
unallocated_encoding(s);
81
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
81
return;
82
- .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
82
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
83
- { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
83
}
84
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
84
return;
85
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
85
86
- { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
86
+ case 0xf: /* BFDOT */
87
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
87
+ switch (size) {
88
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
88
+ case 1:
89
- { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
89
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
90
- .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
90
+ break;
91
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
91
+ default:
92
- { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
92
+ g_assert_not_reached();
93
- .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
93
+ }
94
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
94
+ return;
95
- { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
95
+
96
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
96
default:
97
- .resetvalue = 0 },
97
g_assert_not_reached();
98
- { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
99
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
100
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
101
- { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
102
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
103
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
104
- { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
105
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
106
- .resetvalue = 0 },
107
- { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
108
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
109
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
110
- { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
111
- .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
112
- .resetvalue = 0 },
113
- { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
114
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
115
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
116
- { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
117
- .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
118
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
119
- { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
120
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
121
- .access = PL2_RW, .accessfn = access_tda,
122
- .type = ARM_CP_CONST, .resetvalue = 0 },
123
- { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
124
- .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
125
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
126
- .type = ARM_CP_CONST, .resetvalue = 0 },
127
- { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
128
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
129
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
130
- { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
131
- .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
132
- .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
133
- { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
134
- .type = ARM_CP_CONST,
135
- .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
136
- .access = PL2_RW, .resetvalue = 0 },
137
-};
138
-
139
-/* Ditto, but for registers which exist in ARMv8 but not v7 */
140
-static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
141
- { .name = "HCR2", .state = ARM_CP_STATE_AA32,
142
- .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
143
- .access = PL2_RW,
144
- .type = ARM_CP_CONST, .resetvalue = 0 },
145
-};
146
-
147
static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
148
{
149
ARMCPU *cpu = env_archcpu(env);
150
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
151
define_arm_cp_regs(cpu, v8_idregs);
152
define_arm_cp_regs(cpu, v8_cp_reginfo);
98
}
153
}
99
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
154
- if (arm_feature(env, ARM_FEATURE_EL2)) {
100
index XXXXXXX..XXXXXXX 100644
101
--- a/target/arm/translate-neon.c
102
+++ b/target/arm/translate-neon.c
103
@@ -XXX,XX +XXX,XX @@ static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
104
gen_helper_gvec_usdot_b);
105
}
106
107
+static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
108
+{
109
+ if (!dc_isar_feature(aa32_bf16, s)) {
110
+ return false;
111
+ }
112
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
113
+ gen_helper_gvec_bfdot);
114
+}
115
+
116
static bool trans_VFML(DisasContext *s, arg_VFML *a)
117
{
118
int opr_sz;
119
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/translate-sve.c
122
+++ b/target/arm/translate-sve.c
123
@@ -XXX,XX +XXX,XX @@ static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a)
124
{
125
return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0);
126
}
127
+
128
+static bool trans_BFDOT_zzzz(DisasContext *s, arg_rrrr_esz *a)
129
+{
130
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
131
+ return false;
132
+ }
133
+ if (sve_access_check(s)) {
134
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot,
135
+ a->rd, a->rn, a->rm, a->ra, 0);
136
+ }
137
+ return true;
138
+}
139
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/target/arm/vec_helper.c
142
+++ b/target/arm/vec_helper.c
143
@@ -XXX,XX +XXX,XX @@ static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
144
DO_MMLA_B(gvec_smmla_b, do_smmla_b)
145
DO_MMLA_B(gvec_ummla_b, do_ummla_b)
146
DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)
147
+
148
+/*
149
+ * BFloat16 Dot Product
150
+ */
151
+
152
+static float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2)
153
+{
154
+ /* FPCR is ignored for BFDOT and BFMMLA. */
155
+ float_status bf_status = {
156
+ .tininess_before_rounding = float_tininess_before_rounding,
157
+ .float_rounding_mode = float_round_to_odd_inf,
158
+ .flush_to_zero = true,
159
+ .flush_inputs_to_zero = true,
160
+ .default_nan_mode = true,
161
+ };
162
+ float32 t1, t2;
163
+
155
+
164
+ /*
156
+ /*
165
+ * Extract each BFloat16 from the element pair, and shift
157
+ * Register the base EL2 cpregs.
166
+ * them such that they become float32.
158
+ * Pre v8, these registers are implemented only as part of the
159
+ * Virtualization Extensions (EL2 present). Beginning with v8,
160
+ * if EL2 is missing but EL3 is enabled, mostly these become
161
+ * RES0 from EL3, with some specific exceptions.
167
+ */
162
+ */
168
+ t1 = float32_mul(e1 << 16, e2 << 16, &bf_status);
163
+ if (arm_feature(env, ARM_FEATURE_EL2)
169
+ t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, &bf_status);
164
+ || (arm_feature(env, ARM_FEATURE_EL3)
170
+ t1 = float32_add(t1, t2, &bf_status);
165
+ && arm_feature(env, ARM_FEATURE_V8))) {
171
+ t1 = float32_add(sum, t1, &bf_status);
166
uint64_t vmpidr_def = mpidr_read_val(env);
167
ARMCPRegInfo vpidr_regs[] = {
168
{ .name = "VPIDR", .state = ARM_CP_STATE_AA32,
169
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
170
};
171
define_one_arm_cp_reg(cpu, &rvbar);
172
}
173
- } else {
174
- /* If EL2 is missing but higher ELs are enabled, we need to
175
- * register the no_el2 reginfos.
176
- */
177
- if (arm_feature(env, ARM_FEATURE_EL3)) {
178
- /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
179
- * of MIDR_EL1 and MPIDR_EL1.
180
- */
181
- ARMCPRegInfo vpidr_regs[] = {
182
- { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
183
- .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
184
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
185
- .type = ARM_CP_CONST, .resetvalue = cpu->midr,
186
- .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
187
- { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
188
- .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
189
- .access = PL2_RW, .accessfn = access_el3_aa32ns,
190
- .type = ARM_CP_NO_RAW,
191
- .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
192
- };
193
- define_arm_cp_regs(cpu, vpidr_regs);
194
- define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
195
- if (arm_feature(env, ARM_FEATURE_V8)) {
196
- define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
197
- }
198
- }
199
}
172
+
200
+
173
+ return t1;
201
+ /* Register the base EL3 cpregs. */
174
+}
202
if (arm_feature(env, ARM_FEATURE_EL3)) {
175
+
203
define_arm_cp_regs(cpu, el3_cp_reginfo);
176
+void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
204
ARMCPRegInfo el3_regs[] = {
177
+{
178
+ intptr_t i, opr_sz = simd_oprsz(desc);
179
+ float32 *d = vd, *a = va;
180
+ uint32_t *n = vn, *m = vm;
181
+
182
+ for (i = 0; i < opr_sz / 4; ++i) {
183
+ d[i] = bfdotadd(a[i], n[i], m[i]);
184
+ }
185
+ clear_tail(d, opr_sz, simd_maxsz(desc));
186
+}
187
--
205
--
188
2.20.1
206
2.25.1
189
190
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
There is no reason to call the hvf specific hvf_cpu_synchronize_state()
3
Drop zcr_no_el2_reginfo and merge the 3 registers into one array,
4
when we can just use the generic cpu_synchronize_state() instead. This
4
now that ZCR_EL2 can be squashed to RES0 and ZCR_EL3 dropped
5
allows us to have less dependency on internal function definitions and
5
while registering.
6
allows us to make hvf_cpu_synchronize_state() static.
7
6
8
Signed-off-by: Alexander Graf <agraf@csgraf.de>
9
Reviewed-by: Sergio Lopez <slp@redhat.com>
10
Message-id: 20210519202253.76782-9-agraf@csgraf.de
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-4-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
11
---
14
accel/hvf/hvf-accel-ops.h | 1 -
12
target/arm/helper.c | 55 ++++++++++++++-------------------------------
15
accel/hvf/hvf-accel-ops.c | 2 +-
13
1 file changed, 17 insertions(+), 38 deletions(-)
16
target/i386/hvf/x86hvf.c | 9 ++++-----
17
3 files changed, 5 insertions(+), 7 deletions(-)
18
14
19
diff --git a/accel/hvf/hvf-accel-ops.h b/accel/hvf/hvf-accel-ops.h
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/hvf/hvf-accel-ops.h
17
--- a/target/arm/helper.c
22
+++ b/accel/hvf/hvf-accel-ops.h
18
+++ b/target/arm/helper.c
23
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
24
#include "sysemu/cpus.h"
25
26
int hvf_vcpu_exec(CPUState *);
27
-void hvf_cpu_synchronize_state(CPUState *);
28
void hvf_cpu_synchronize_post_reset(CPUState *);
29
void hvf_cpu_synchronize_post_init(CPUState *);
30
void hvf_cpu_synchronize_pre_loadvm(CPUState *);
31
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/accel/hvf/hvf-accel-ops.c
34
+++ b/accel/hvf/hvf-accel-ops.c
35
@@ -XXX,XX +XXX,XX @@ static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
36
}
20
}
37
}
21
}
38
22
39
-void hvf_cpu_synchronize_state(CPUState *cpu)
23
-static const ARMCPRegInfo zcr_el1_reginfo = {
40
+static void hvf_cpu_synchronize_state(CPUState *cpu)
24
- .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
41
{
25
- .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
42
if (!cpu->vcpu_dirty) {
26
- .access = PL1_RW, .type = ARM_CP_SVE,
43
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
27
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
44
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
28
- .writefn = zcr_write, .raw_writefn = raw_write
45
index XXXXXXX..XXXXXXX 100644
29
-};
46
--- a/target/i386/hvf/x86hvf.c
47
+++ b/target/i386/hvf/x86hvf.c
48
@@ -XXX,XX +XXX,XX @@
49
#include "cpu.h"
50
#include "x86_descr.h"
51
#include "x86_decode.h"
52
+#include "sysemu/hw_accel.h"
53
54
#include "hw/i386/apic_internal.h"
55
56
#include <Hypervisor/hv.h>
57
#include <Hypervisor/hv_vmx.h>
58
59
-#include "accel/hvf/hvf-accel-ops.h"
60
-
30
-
61
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
31
-static const ARMCPRegInfo zcr_el2_reginfo = {
62
SegmentCache *qseg, bool is_tr)
32
- .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
63
{
33
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
64
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
34
- .access = PL2_RW, .type = ARM_CP_SVE,
65
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
35
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
66
36
- .writefn = zcr_write, .raw_writefn = raw_write
67
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
37
-};
68
- hvf_cpu_synchronize_state(cpu_state);
38
-
69
+ cpu_synchronize_state(cpu_state);
39
-static const ARMCPRegInfo zcr_no_el2_reginfo = {
70
do_cpu_init(cpu);
40
- .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
41
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
42
- .access = PL2_RW, .type = ARM_CP_SVE,
43
- .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
44
-};
45
-
46
-static const ARMCPRegInfo zcr_el3_reginfo = {
47
- .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
48
- .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
49
- .access = PL3_RW, .type = ARM_CP_SVE,
50
- .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
51
- .writefn = zcr_write, .raw_writefn = raw_write
52
+static const ARMCPRegInfo zcr_reginfo[] = {
53
+ { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
54
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
55
+ .access = PL1_RW, .type = ARM_CP_SVE,
56
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
57
+ .writefn = zcr_write, .raw_writefn = raw_write },
58
+ { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
59
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
60
+ .access = PL2_RW, .type = ARM_CP_SVE,
61
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
62
+ .writefn = zcr_write, .raw_writefn = raw_write },
63
+ { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
64
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
65
+ .access = PL3_RW, .type = ARM_CP_SVE,
66
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
67
+ .writefn = zcr_write, .raw_writefn = raw_write },
68
};
69
70
void hw_watchpoint_update(ARMCPU *cpu, int n)
71
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
71
}
72
}
72
73
73
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
74
if (cpu_isar_feature(aa64_sve, cpu)) {
74
cpu_state->halted = 0;
75
- define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
76
- if (arm_feature(env, ARM_FEATURE_EL2)) {
77
- define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
78
- } else {
79
- define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
80
- }
81
- if (arm_feature(env, ARM_FEATURE_EL3)) {
82
- define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
83
- }
84
+ define_arm_cp_regs(cpu, zcr_reginfo);
75
}
85
}
76
if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
86
77
- hvf_cpu_synchronize_state(cpu_state);
87
#ifdef TARGET_AARCH64
78
+ cpu_synchronize_state(cpu_state);
79
do_cpu_sipi(cpu);
80
}
81
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
82
cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
83
- hvf_cpu_synchronize_state(cpu_state);
84
+ cpu_synchronize_state(cpu_state);
85
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
86
env->tpr_access_type);
87
}
88
--
88
--
89
2.20.1
89
2.25.1
90
91
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
This register is present for either VHE or Debugv8p2.
2
4
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 20210525225817.400336-3-richard.henderson@linaro.org
7
Message-id: 20220506180242.216785-5-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
9
---
8
target/arm/translate-a64.c | 15 ++++++---------
10
target/arm/helper.c | 15 +++++++++++----
9
1 file changed, 6 insertions(+), 9 deletions(-)
11
1 file changed, 11 insertions(+), 4 deletions(-)
10
12
11
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-a64.c
15
--- a/target/arm/helper.c
14
+++ b/target/arm/translate-a64.c
16
+++ b/target/arm/helper.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
17
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo jazelle_regs[] = {
16
int rd = extract32(insn, 0, 5);
18
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
17
19
};
18
if (mos) {
20
19
- unallocated_encoding(s);
21
+static const ARMCPRegInfo contextidr_el2 = {
20
- return;
22
+ .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
21
+ goto do_unallocated;
23
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
24
+ .access = PL2_RW,
25
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
26
+};
27
+
28
static const ARMCPRegInfo vhe_reginfo[] = {
29
- { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
30
- .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
31
- .access = PL2_RW,
32
- .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
33
{ .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
34
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
35
.access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
36
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
37
define_one_arm_cp_reg(cpu, &ssbs_reginfo);
22
}
38
}
23
39
24
switch (opcode) {
40
+ if (cpu_isar_feature(aa64_vh, cpu) ||
25
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
41
+ cpu_isar_feature(aa64_debugv8p2, cpu)) {
26
/* FCVT between half, single and double precision */
42
+ define_one_arm_cp_reg(cpu, &contextidr_el2);
27
int dtype = extract32(opcode, 0, 2);
43
+ }
28
if (type == 2 || dtype == type) {
44
if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
29
- unallocated_encoding(s);
45
define_arm_cp_regs(cpu, vhe_reginfo);
30
- return;
31
+ goto do_unallocated;
32
}
33
if (!fp_access_check(s)) {
34
return;
35
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
36
37
case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
38
if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
39
- unallocated_encoding(s);
40
- return;
41
+ goto do_unallocated;
42
}
43
/* fall through */
44
case 0x0 ... 0x3:
45
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
46
break;
47
case 3:
48
if (!dc_isar_feature(aa64_fp16, s)) {
49
- unallocated_encoding(s);
50
- return;
51
+ goto do_unallocated;
52
}
53
54
if (!fp_access_check(s)) {
55
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
56
handle_fp_1src_half(s, opcode, rd, rn);
57
break;
58
default:
59
- unallocated_encoding(s);
60
+ goto do_unallocated;
61
}
62
break;
63
64
default:
65
+ do_unallocated:
66
unallocated_encoding(s);
67
break;
68
}
46
}
69
--
47
--
70
2.20.1
48
2.25.1
71
72
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Until now, Hypervisor.framework has only been available on x86_64 systems.
3
Previously we were defining some of these in user-only mode,
4
With Apple Silicon shipping now, it extends its reach to aarch64. To
4
but none of them are accessible from user-only, therefore
5
prepare for support for multiple architectures, let's start moving common
5
define them only in system mode.
6
code out into its own accel directory.
6
7
7
This will shortly be used from cpu_tcg.c also.
8
This patch moves CPU and memory operations over. While at it, make sure
8
9
the code is consumable on non-i386 systems.
10
11
Signed-off-by: Alexander Graf <agraf@csgraf.de>
12
Reviewed-by: Sergio Lopez <slp@redhat.com>
13
Message-id: 20210519202253.76782-4-agraf@csgraf.de
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220506180242.216785-6-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
13
---
17
include/sysemu/hvf_int.h | 4 +
14
target/arm/internals.h | 6 ++++
18
target/i386/hvf/hvf-i386.h | 2 -
15
target/arm/cpu64.c | 64 +++---------------------------------------
19
target/i386/hvf/x86hvf.h | 2 -
16
target/arm/cpu_tcg.c | 59 ++++++++++++++++++++++++++++++++++++++
20
accel/hvf/hvf-accel-ops.c | 308 ++++++++++++++++++++++++++++++++++++-
17
3 files changed, 69 insertions(+), 60 deletions(-)
21
target/i386/hvf/hvf.c | 302 ------------------------------------
18
22
5 files changed, 311 insertions(+), 307 deletions(-)
19
diff --git a/target/arm/internals.h b/target/arm/internals.h
23
24
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
25
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
26
--- a/include/sysemu/hvf_int.h
21
--- a/target/arm/internals.h
27
+++ b/include/sysemu/hvf_int.h
22
+++ b/target/arm/internals.h
23
@@ -XXX,XX +XXX,XX @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
24
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
25
#endif
26
27
+#ifdef CONFIG_USER_ONLY
28
+static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
29
+#else
30
+void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
31
+#endif
32
+
33
#endif
34
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/cpu64.c
37
+++ b/target/arm/cpu64.c
28
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@
29
39
#include "hvf_arm.h"
30
#include <Hypervisor/hv.h>
40
#include "qapi/visitor.h"
31
41
#include "hw/qdev-properties.h"
32
+void hvf_set_phys_mem(MemoryRegionSection *, bool);
42
-#include "cpregs.h"
33
void assert_hvf_ok(hv_return_t ret);
43
+#include "internals.h"
34
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
44
35
+int hvf_put_registers(CPUState *);
45
36
+int hvf_get_registers(CPUState *);
46
-#ifndef CONFIG_USER_ONLY
37
47
-static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
38
#endif
48
-{
39
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
49
- ARMCPU *cpu = env_archcpu(env);
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/i386/hvf/hvf-i386.h
42
+++ b/target/i386/hvf/hvf-i386.h
43
@@ -XXX,XX +XXX,XX @@ struct HVFState {
44
};
45
extern HVFState *hvf_state;
46
47
-void hvf_set_phys_mem(MemoryRegionSection *, bool);
48
void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
49
-hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
50
51
#ifdef NEED_CPU_H
52
/* Functions exported to host specific mode */
53
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/i386/hvf/x86hvf.h
56
+++ b/target/i386/hvf/x86hvf.h
57
@@ -XXX,XX +XXX,XX @@
58
#include "x86_descr.h"
59
60
int hvf_process_events(CPUState *);
61
-int hvf_put_registers(CPUState *);
62
-int hvf_get_registers(CPUState *);
63
bool hvf_inject_interrupts(CPUState *);
64
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
65
SegmentCache *qseg, bool is_tr);
66
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/accel/hvf/hvf-accel-ops.c
69
+++ b/accel/hvf/hvf-accel-ops.c
70
@@ -XXX,XX +XXX,XX @@
71
#include "qemu/osdep.h"
72
#include "qemu/error-report.h"
73
#include "qemu/main-loop.h"
74
+#include "exec/address-spaces.h"
75
+#include "exec/exec-all.h"
76
+#include "sysemu/cpus.h"
77
#include "sysemu/hvf.h"
78
+#include "sysemu/hvf_int.h"
79
#include "sysemu/runstate.h"
80
-#include "target/i386/cpu.h"
81
#include "qemu/guest-random.h"
82
83
#include "hvf-accel-ops.h"
84
85
+HVFState *hvf_state;
86
+
87
+/* Memory slots */
88
+
89
+hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
90
+{
91
+ hvf_slot *slot;
92
+ int x;
93
+ for (x = 0; x < hvf_state->num_slots; ++x) {
94
+ slot = &hvf_state->slots[x];
95
+ if (slot->size && start < (slot->start + slot->size) &&
96
+ (start + size) > slot->start) {
97
+ return slot;
98
+ }
99
+ }
100
+ return NULL;
101
+}
102
+
103
+struct mac_slot {
104
+ int present;
105
+ uint64_t size;
106
+ uint64_t gpa_start;
107
+ uint64_t gva;
108
+};
109
+
110
+struct mac_slot mac_slots[32];
111
+
112
+static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
113
+{
114
+ struct mac_slot *macslot;
115
+ hv_return_t ret;
116
+
117
+ macslot = &mac_slots[slot->slot_id];
118
+
119
+ if (macslot->present) {
120
+ if (macslot->size != slot->size) {
121
+ macslot->present = 0;
122
+ ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
123
+ assert_hvf_ok(ret);
124
+ }
125
+ }
126
+
127
+ if (!slot->size) {
128
+ return 0;
129
+ }
130
+
131
+ macslot->present = 1;
132
+ macslot->gpa_start = slot->start;
133
+ macslot->size = slot->size;
134
+ ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
135
+ assert_hvf_ok(ret);
136
+ return 0;
137
+}
138
+
139
+void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
140
+{
141
+ hvf_slot *mem;
142
+ MemoryRegion *area = section->mr;
143
+ bool writeable = !area->readonly && !area->rom_device;
144
+ hv_memory_flags_t flags;
145
+
146
+ if (!memory_region_is_ram(area)) {
147
+ if (writeable) {
148
+ return;
149
+ } else if (!memory_region_is_romd(area)) {
150
+ /*
151
+ * If the memory device is not in romd_mode, then we actually want
152
+ * to remove the hvf memory slot so all accesses will trap.
153
+ */
154
+ add = false;
155
+ }
156
+ }
157
+
158
+ mem = hvf_find_overlap_slot(
159
+ section->offset_within_address_space,
160
+ int128_get64(section->size));
161
+
162
+ if (mem && add) {
163
+ if (mem->size == int128_get64(section->size) &&
164
+ mem->start == section->offset_within_address_space &&
165
+ mem->mem == (memory_region_get_ram_ptr(area) +
166
+ section->offset_within_region)) {
167
+ return; /* Same region was attempted to register, go away. */
168
+ }
169
+ }
170
+
171
+ /* Region needs to be reset. set the size to 0 and remap it. */
172
+ if (mem) {
173
+ mem->size = 0;
174
+ if (do_hvf_set_memory(mem, 0)) {
175
+ error_report("Failed to reset overlapping slot");
176
+ abort();
177
+ }
178
+ }
179
+
180
+ if (!add) {
181
+ return;
182
+ }
183
+
184
+ if (area->readonly ||
185
+ (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
186
+ flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
187
+ } else {
188
+ flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
189
+ }
190
+
191
+ /* Now make a new slot. */
192
+ int x;
193
+
194
+ for (x = 0; x < hvf_state->num_slots; ++x) {
195
+ mem = &hvf_state->slots[x];
196
+ if (!mem->size) {
197
+ break;
198
+ }
199
+ }
200
+
201
+ if (x == hvf_state->num_slots) {
202
+ error_report("No free slots");
203
+ abort();
204
+ }
205
+
206
+ mem->size = int128_get64(section->size);
207
+ mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
208
+ mem->start = section->offset_within_address_space;
209
+ mem->region = area;
210
+
211
+ if (do_hvf_set_memory(mem, flags)) {
212
+ error_report("Error registering new memory slot");
213
+ abort();
214
+ }
215
+}
216
+
217
+static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
218
+{
219
+ if (!cpu->vcpu_dirty) {
220
+ hvf_get_registers(cpu);
221
+ cpu->vcpu_dirty = true;
222
+ }
223
+}
224
+
225
+void hvf_cpu_synchronize_state(CPUState *cpu)
226
+{
227
+ if (!cpu->vcpu_dirty) {
228
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
229
+ }
230
+}
231
+
232
+static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
233
+ run_on_cpu_data arg)
234
+{
235
+ hvf_put_registers(cpu);
236
+ cpu->vcpu_dirty = false;
237
+}
238
+
239
+void hvf_cpu_synchronize_post_reset(CPUState *cpu)
240
+{
241
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
242
+}
243
+
244
+static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
245
+ run_on_cpu_data arg)
246
+{
247
+ hvf_put_registers(cpu);
248
+ cpu->vcpu_dirty = false;
249
+}
250
+
251
+void hvf_cpu_synchronize_post_init(CPUState *cpu)
252
+{
253
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
254
+}
255
+
256
+static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
257
+ run_on_cpu_data arg)
258
+{
259
+ cpu->vcpu_dirty = true;
260
+}
261
+
262
+void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
263
+{
264
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
265
+}
266
+
267
+static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
268
+{
269
+ hvf_slot *slot;
270
+
271
+ slot = hvf_find_overlap_slot(
272
+ section->offset_within_address_space,
273
+ int128_get64(section->size));
274
+
275
+ /* protect region against writes; begin tracking it */
276
+ if (on) {
277
+ slot->flags |= HVF_SLOT_LOG;
278
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
279
+ HV_MEMORY_READ);
280
+ /* stop tracking region*/
281
+ } else {
282
+ slot->flags &= ~HVF_SLOT_LOG;
283
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
284
+ HV_MEMORY_READ | HV_MEMORY_WRITE);
285
+ }
286
+}
287
+
288
+static void hvf_log_start(MemoryListener *listener,
289
+ MemoryRegionSection *section, int old, int new)
290
+{
291
+ if (old != 0) {
292
+ return;
293
+ }
294
+
295
+ hvf_set_dirty_tracking(section, 1);
296
+}
297
+
298
+static void hvf_log_stop(MemoryListener *listener,
299
+ MemoryRegionSection *section, int old, int new)
300
+{
301
+ if (new != 0) {
302
+ return;
303
+ }
304
+
305
+ hvf_set_dirty_tracking(section, 0);
306
+}
307
+
308
+static void hvf_log_sync(MemoryListener *listener,
309
+ MemoryRegionSection *section)
310
+{
311
+ /*
312
+ * sync of dirty pages is handled elsewhere; just make sure we keep
313
+ * tracking the region.
314
+ */
315
+ hvf_set_dirty_tracking(section, 1);
316
+}
317
+
318
+static void hvf_region_add(MemoryListener *listener,
319
+ MemoryRegionSection *section)
320
+{
321
+ hvf_set_phys_mem(section, true);
322
+}
323
+
324
+static void hvf_region_del(MemoryListener *listener,
325
+ MemoryRegionSection *section)
326
+{
327
+ hvf_set_phys_mem(section, false);
328
+}
329
+
330
+static MemoryListener hvf_memory_listener = {
331
+ .priority = 10,
332
+ .region_add = hvf_region_add,
333
+ .region_del = hvf_region_del,
334
+ .log_start = hvf_log_start,
335
+ .log_stop = hvf_log_stop,
336
+ .log_sync = hvf_log_sync,
337
+};
338
+
339
+static void dummy_signal(int sig)
340
+{
341
+}
342
+
343
+bool hvf_allowed;
344
+
345
+static int hvf_accel_init(MachineState *ms)
346
+{
347
+ int x;
348
+ hv_return_t ret;
349
+ HVFState *s;
350
+
351
+ ret = hv_vm_create(HV_VM_DEFAULT);
352
+ assert_hvf_ok(ret);
353
+
354
+ s = g_new0(HVFState, 1);
355
+
356
+ s->num_slots = 32;
357
+ for (x = 0; x < s->num_slots; ++x) {
358
+ s->slots[x].size = 0;
359
+ s->slots[x].slot_id = x;
360
+ }
361
+
362
+ hvf_state = s;
363
+ memory_listener_register(&hvf_memory_listener, &address_space_memory);
364
+ return 0;
365
+}
366
+
367
+static void hvf_accel_class_init(ObjectClass *oc, void *data)
368
+{
369
+ AccelClass *ac = ACCEL_CLASS(oc);
370
+ ac->name = "HVF";
371
+ ac->init_machine = hvf_accel_init;
372
+ ac->allowed = &hvf_allowed;
373
+}
374
+
375
+static const TypeInfo hvf_accel_type = {
376
+ .name = TYPE_HVF_ACCEL,
377
+ .parent = TYPE_ACCEL,
378
+ .class_init = hvf_accel_class_init,
379
+};
380
+
381
+static void hvf_type_init(void)
382
+{
383
+ type_register_static(&hvf_accel_type);
384
+}
385
+
386
+type_init(hvf_type_init);
387
+
388
/*
389
* The HVF-specific vCPU thread function. This one should only run when the host
390
* CPU supports the VMX "unrestricted guest" feature.
391
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/i386/hvf/hvf.c
394
+++ b/target/i386/hvf/hvf.c
395
@@ -XXX,XX +XXX,XX @@
396
397
#include "hvf-accel-ops.h"
398
399
-HVFState *hvf_state;
400
-
50
-
401
-/* Memory slots */
51
- /* Number of cores is in [25:24]; otherwise we RAZ */
402
-hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
52
- return (cpu->core_count - 1) << 24;
403
-{
404
- hvf_slot *slot;
405
- int x;
406
- for (x = 0; x < hvf_state->num_slots; ++x) {
407
- slot = &hvf_state->slots[x];
408
- if (slot->size && start < (slot->start + slot->size) &&
409
- (start + size) > slot->start) {
410
- return slot;
411
- }
412
- }
413
- return NULL;
414
-}
53
-}
54
-#endif
415
-
55
-
416
-struct mac_slot {
56
-static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
417
- int present;
57
-#ifndef CONFIG_USER_ONLY
418
- uint64_t size;
58
- { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
419
- uint64_t gpa_start;
59
- .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
420
- uint64_t gva;
60
- .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
61
- .writefn = arm_cp_write_ignore },
62
- { .name = "L2CTLR",
63
- .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
64
- .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
65
- .writefn = arm_cp_write_ignore },
66
-#endif
67
- { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
68
- .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
69
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
70
- { .name = "L2ECTLR",
71
- .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
72
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
73
- { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
74
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
75
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
76
- { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
77
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
78
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
79
- { .name = "CPUACTLR",
80
- .cp = 15, .opc1 = 0, .crm = 15,
81
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
82
- { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
83
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
84
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
85
- { .name = "CPUECTLR",
86
- .cp = 15, .opc1 = 1, .crm = 15,
87
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
88
- { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
89
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
90
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
91
- { .name = "CPUMERRSR",
92
- .cp = 15, .opc1 = 2, .crm = 15,
93
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
94
- { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
95
- .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
96
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
97
- { .name = "L2MERRSR",
98
- .cp = 15, .opc1 = 3, .crm = 15,
99
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
421
-};
100
-};
422
-
101
-
423
-struct mac_slot mac_slots[32];
102
static void aarch64_a57_initfn(Object *obj)
424
-
425
-static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
426
-{
427
- struct mac_slot *macslot;
428
- hv_return_t ret;
429
-
430
- macslot = &mac_slots[slot->slot_id];
431
-
432
- if (macslot->present) {
433
- if (macslot->size != slot->size) {
434
- macslot->present = 0;
435
- ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
436
- assert_hvf_ok(ret);
437
- }
438
- }
439
-
440
- if (!slot->size) {
441
- return 0;
442
- }
443
-
444
- macslot->present = 1;
445
- macslot->gpa_start = slot->start;
446
- macslot->size = slot->size;
447
- ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
448
- assert_hvf_ok(ret);
449
- return 0;
450
-}
451
-
452
-void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
453
-{
454
- hvf_slot *mem;
455
- MemoryRegion *area = section->mr;
456
- bool writeable = !area->readonly && !area->rom_device;
457
- hv_memory_flags_t flags;
458
-
459
- if (!memory_region_is_ram(area)) {
460
- if (writeable) {
461
- return;
462
- } else if (!memory_region_is_romd(area)) {
463
- /*
464
- * If the memory device is not in romd_mode, then we actually want
465
- * to remove the hvf memory slot so all accesses will trap.
466
- */
467
- add = false;
468
- }
469
- }
470
-
471
- mem = hvf_find_overlap_slot(
472
- section->offset_within_address_space,
473
- int128_get64(section->size));
474
-
475
- if (mem && add) {
476
- if (mem->size == int128_get64(section->size) &&
477
- mem->start == section->offset_within_address_space &&
478
- mem->mem == (memory_region_get_ram_ptr(area) +
479
- section->offset_within_region)) {
480
- return; /* Same region was attempted to register, go away. */
481
- }
482
- }
483
-
484
- /* Region needs to be reset. set the size to 0 and remap it. */
485
- if (mem) {
486
- mem->size = 0;
487
- if (do_hvf_set_memory(mem, 0)) {
488
- error_report("Failed to reset overlapping slot");
489
- abort();
490
- }
491
- }
492
-
493
- if (!add) {
494
- return;
495
- }
496
-
497
- if (area->readonly ||
498
- (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
499
- flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
500
- } else {
501
- flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
502
- }
503
-
504
- /* Now make a new slot. */
505
- int x;
506
-
507
- for (x = 0; x < hvf_state->num_slots; ++x) {
508
- mem = &hvf_state->slots[x];
509
- if (!mem->size) {
510
- break;
511
- }
512
- }
513
-
514
- if (x == hvf_state->num_slots) {
515
- error_report("No free slots");
516
- abort();
517
- }
518
-
519
- mem->size = int128_get64(section->size);
520
- mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
521
- mem->start = section->offset_within_address_space;
522
- mem->region = area;
523
-
524
- if (do_hvf_set_memory(mem, flags)) {
525
- error_report("Error registering new memory slot");
526
- abort();
527
- }
528
-}
529
-
530
void vmx_update_tpr(CPUState *cpu)
531
{
103
{
532
/* TODO: need integrate APIC handling */
104
ARMCPU *cpu = ARM_CPU(obj);
533
@@ -XXX,XX +XXX,XX @@ void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
105
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
534
}
106
cpu->gic_num_lrs = 4;
107
cpu->gic_vpribits = 5;
108
cpu->gic_vprebits = 5;
109
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
110
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
535
}
111
}
536
112
537
-static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
113
static void aarch64_a53_initfn(Object *obj)
538
-{
114
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
539
- if (!cpu->vcpu_dirty) {
115
cpu->gic_num_lrs = 4;
540
- hvf_get_registers(cpu);
116
cpu->gic_vpribits = 5;
541
- cpu->vcpu_dirty = true;
117
cpu->gic_vprebits = 5;
542
- }
118
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
543
-}
119
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
544
-
545
-void hvf_cpu_synchronize_state(CPUState *cpu)
546
-{
547
- if (!cpu->vcpu_dirty) {
548
- run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
549
- }
550
-}
551
-
552
-static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
553
- run_on_cpu_data arg)
554
-{
555
- hvf_put_registers(cpu);
556
- cpu->vcpu_dirty = false;
557
-}
558
-
559
-void hvf_cpu_synchronize_post_reset(CPUState *cpu)
560
-{
561
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
562
-}
563
-
564
-static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
565
- run_on_cpu_data arg)
566
-{
567
- hvf_put_registers(cpu);
568
- cpu->vcpu_dirty = false;
569
-}
570
-
571
-void hvf_cpu_synchronize_post_init(CPUState *cpu)
572
-{
573
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
574
-}
575
-
576
-static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
577
- run_on_cpu_data arg)
578
-{
579
- cpu->vcpu_dirty = true;
580
-}
581
-
582
-void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
583
-{
584
- run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
585
-}
586
-
587
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
588
{
589
int read, write;
590
@@ -XXX,XX +XXX,XX @@ static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
591
return false;
592
}
120
}
593
121
594
-static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
122
static void aarch64_a72_initfn(Object *obj)
595
-{
123
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
596
- hvf_slot *slot;
124
cpu->gic_num_lrs = 4;
597
-
125
cpu->gic_vpribits = 5;
598
- slot = hvf_find_overlap_slot(
126
cpu->gic_vprebits = 5;
599
- section->offset_within_address_space,
127
- define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
600
- int128_get64(section->size));
128
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
601
-
602
- /* protect region against writes; begin tracking it */
603
- if (on) {
604
- slot->flags |= HVF_SLOT_LOG;
605
- hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
606
- HV_MEMORY_READ);
607
- /* stop tracking region*/
608
- } else {
609
- slot->flags &= ~HVF_SLOT_LOG;
610
- hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
611
- HV_MEMORY_READ | HV_MEMORY_WRITE);
612
- }
613
-}
614
-
615
-static void hvf_log_start(MemoryListener *listener,
616
- MemoryRegionSection *section, int old, int new)
617
-{
618
- if (old != 0) {
619
- return;
620
- }
621
-
622
- hvf_set_dirty_tracking(section, 1);
623
-}
624
-
625
-static void hvf_log_stop(MemoryListener *listener,
626
- MemoryRegionSection *section, int old, int new)
627
-{
628
- if (new != 0) {
629
- return;
630
- }
631
-
632
- hvf_set_dirty_tracking(section, 0);
633
-}
634
-
635
-static void hvf_log_sync(MemoryListener *listener,
636
- MemoryRegionSection *section)
637
-{
638
- /*
639
- * sync of dirty pages is handled elsewhere; just make sure we keep
640
- * tracking the region.
641
- */
642
- hvf_set_dirty_tracking(section, 1);
643
-}
644
-
645
-static void hvf_region_add(MemoryListener *listener,
646
- MemoryRegionSection *section)
647
-{
648
- hvf_set_phys_mem(section, true);
649
-}
650
-
651
-static void hvf_region_del(MemoryListener *listener,
652
- MemoryRegionSection *section)
653
-{
654
- hvf_set_phys_mem(section, false);
655
-}
656
-
657
-static MemoryListener hvf_memory_listener = {
658
- .priority = 10,
659
- .region_add = hvf_region_add,
660
- .region_del = hvf_region_del,
661
- .log_start = hvf_log_start,
662
- .log_stop = hvf_log_stop,
663
- .log_sync = hvf_log_sync,
664
-};
665
-
666
void hvf_vcpu_destroy(CPUState *cpu)
667
{
668
X86CPU *x86_cpu = X86_CPU(cpu);
669
@@ -XXX,XX +XXX,XX @@ void hvf_vcpu_destroy(CPUState *cpu)
670
assert_hvf_ok(ret);
671
}
129
}
672
130
673
-static void dummy_signal(int sig)
131
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
674
-{
132
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
675
-}
133
index XXXXXXX..XXXXXXX 100644
676
-
134
--- a/target/arm/cpu_tcg.c
677
static void init_tsc_freq(CPUX86State *env)
135
+++ b/target/arm/cpu_tcg.c
678
{
136
@@ -XXX,XX +XXX,XX @@
679
size_t length;
137
#endif
680
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
138
#include "cpregs.h"
681
139
682
return ret;
140
+#ifndef CONFIG_USER_ONLY
683
}
141
+static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
684
-
142
+{
685
-bool hvf_allowed;
143
+ ARMCPU *cpu = env_archcpu(env);
686
-
144
+
687
-static int hvf_accel_init(MachineState *ms)
145
+ /* Number of cores is in [25:24]; otherwise we RAZ */
688
-{
146
+ return (cpu->core_count - 1) << 24;
689
- int x;
147
+}
690
- hv_return_t ret;
148
+
691
- HVFState *s;
149
+static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
692
-
150
+ { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
693
- ret = hv_vm_create(HV_VM_DEFAULT);
151
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
694
- assert_hvf_ok(ret);
152
+ .access = PL1_RW, .readfn = l2ctlr_read,
695
-
153
+ .writefn = arm_cp_write_ignore },
696
- s = g_new0(HVFState, 1);
154
+ { .name = "L2CTLR",
697
-
155
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
698
- s->num_slots = 32;
156
+ .access = PL1_RW, .readfn = l2ctlr_read,
699
- for (x = 0; x < s->num_slots; ++x) {
157
+ .writefn = arm_cp_write_ignore },
700
- s->slots[x].size = 0;
158
+ { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
701
- s->slots[x].slot_id = x;
159
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
702
- }
160
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
703
-
161
+ { .name = "L2ECTLR",
704
- hvf_state = s;
162
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
705
- memory_listener_register(&hvf_memory_listener, &address_space_memory);
163
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
706
- return 0;
164
+ { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
707
-}
165
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
708
-
166
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
709
-static void hvf_accel_class_init(ObjectClass *oc, void *data)
167
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
710
-{
168
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
711
- AccelClass *ac = ACCEL_CLASS(oc);
169
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
712
- ac->name = "HVF";
170
+ { .name = "CPUACTLR",
713
- ac->init_machine = hvf_accel_init;
171
+ .cp = 15, .opc1 = 0, .crm = 15,
714
- ac->allowed = &hvf_allowed;
172
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
715
-}
173
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
716
-
174
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
717
-static const TypeInfo hvf_accel_type = {
175
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
718
- .name = TYPE_HVF_ACCEL,
176
+ { .name = "CPUECTLR",
719
- .parent = TYPE_ACCEL,
177
+ .cp = 15, .opc1 = 1, .crm = 15,
720
- .class_init = hvf_accel_class_init,
178
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
721
-};
179
+ { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
722
-
180
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
723
-static void hvf_type_init(void)
181
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
724
-{
182
+ { .name = "CPUMERRSR",
725
- type_register_static(&hvf_accel_type);
183
+ .cp = 15, .opc1 = 2, .crm = 15,
726
-}
184
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
727
-
185
+ { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
728
-type_init(hvf_type_init);
186
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
187
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
188
+ { .name = "L2MERRSR",
189
+ .cp = 15, .opc1 = 3, .crm = 15,
190
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
191
+};
192
+
193
+void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu)
194
+{
195
+ define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
196
+}
197
+#endif /* !CONFIG_USER_ONLY */
198
+
199
/* CPU models. These are not needed for the AArch64 linux-user build. */
200
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
201
729
--
202
--
730
2.20.1
203
2.25.1
731
732
diff view generated by jsdifflib
1
From: Damien Goutte-Gattat <dgouttegattat@incenp.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The 4.x branch of Sphinx introduces a breaking change, as generated man
3
Instead of starting with cortex-a15 and adding v8 features to
4
pages are now written to subdirectories corresponding to the manual
4
a v7 cpu, begin with a v8 cpu stripped of its aarch64 features.
5
section they belong to. This results in `make install` erroring out when
5
This fixes the long-standing to-do where we only enabled v8
6
attempting to install the man pages, because they are not where it
6
features for user-only.
7
expects to find them.
8
7
9
This patch restores the behavior of Sphinx 3.x regarding man pages.
10
11
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/256
12
Signed-off-by: Damien Goutte-Gattat <dgouttegattat@incenp.org>
13
Message-id: 20210503161422.15028-1-dgouttegattat@incenp.org
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220506180242.216785-7-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
12
---
17
docs/conf.py | 1 +
13
target/arm/cpu_tcg.c | 151 ++++++++++++++++++++++++++-----------------
18
1 file changed, 1 insertion(+)
14
1 file changed, 92 insertions(+), 59 deletions(-)
19
15
20
diff --git a/docs/conf.py b/docs/conf.py
16
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/docs/conf.py
18
--- a/target/arm/cpu_tcg.c
23
+++ b/docs/conf.py
19
+++ b/target/arm/cpu_tcg.c
24
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
25
['Stefan Hajnoczi <stefanha@redhat.com>',
21
static void arm_max_initfn(Object *obj)
26
'Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>'], 1),
22
{
27
]
23
ARMCPU *cpu = ARM_CPU(obj);
28
+man_make_section_directory = False
24
+ uint32_t t;
29
25
30
# -- Options for Texinfo output -------------------------------------------
26
- cortex_a15_initfn(obj);
27
+ /* aarch64_a57_initfn, advertising none of the aarch64 features */
28
+ cpu->dtb_compatible = "arm,cortex-a57";
29
+ set_feature(&cpu->env, ARM_FEATURE_V8);
30
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
31
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
32
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
33
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
34
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
35
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
36
+ cpu->midr = 0x411fd070;
37
+ cpu->revidr = 0x00000000;
38
+ cpu->reset_fpsid = 0x41034070;
39
+ cpu->isar.mvfr0 = 0x10110222;
40
+ cpu->isar.mvfr1 = 0x12111111;
41
+ cpu->isar.mvfr2 = 0x00000043;
42
+ cpu->ctr = 0x8444c004;
43
+ cpu->reset_sctlr = 0x00c50838;
44
+ cpu->isar.id_pfr0 = 0x00000131;
45
+ cpu->isar.id_pfr1 = 0x00011011;
46
+ cpu->isar.id_dfr0 = 0x03010066;
47
+ cpu->id_afr0 = 0x00000000;
48
+ cpu->isar.id_mmfr0 = 0x10101105;
49
+ cpu->isar.id_mmfr1 = 0x40000000;
50
+ cpu->isar.id_mmfr2 = 0x01260000;
51
+ cpu->isar.id_mmfr3 = 0x02102211;
52
+ cpu->isar.id_isar0 = 0x02101110;
53
+ cpu->isar.id_isar1 = 0x13112111;
54
+ cpu->isar.id_isar2 = 0x21232042;
55
+ cpu->isar.id_isar3 = 0x01112131;
56
+ cpu->isar.id_isar4 = 0x00011142;
57
+ cpu->isar.id_isar5 = 0x00011121;
58
+ cpu->isar.id_isar6 = 0;
59
+ cpu->isar.dbgdidr = 0x3516d000;
60
+ cpu->clidr = 0x0a200023;
61
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
62
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
63
+ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
64
+ define_cortex_a72_a57_a53_cp_reginfo(cpu);
65
66
- /* old-style VFP short-vector support */
67
- cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
68
+ /* Add additional features supported by QEMU */
69
+ t = cpu->isar.id_isar5;
70
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
71
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
72
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
73
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
74
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
75
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
76
+ cpu->isar.id_isar5 = t;
77
+
78
+ t = cpu->isar.id_isar6;
79
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
80
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
81
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
82
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
83
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
84
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
85
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
86
+ cpu->isar.id_isar6 = t;
87
+
88
+ t = cpu->isar.mvfr1;
89
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
90
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
91
+ cpu->isar.mvfr1 = t;
92
+
93
+ t = cpu->isar.mvfr2;
94
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
95
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
96
+ cpu->isar.mvfr2 = t;
97
+
98
+ t = cpu->isar.id_mmfr3;
99
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
100
+ cpu->isar.id_mmfr3 = t;
101
+
102
+ t = cpu->isar.id_mmfr4;
103
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
104
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
105
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
106
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
107
+ cpu->isar.id_mmfr4 = t;
108
+
109
+ t = cpu->isar.id_pfr0;
110
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
111
+ cpu->isar.id_pfr0 = t;
112
+
113
+ t = cpu->isar.id_pfr2;
114
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
115
+ cpu->isar.id_pfr2 = t;
116
117
#ifdef CONFIG_USER_ONLY
118
/*
119
- * We don't set these in system emulation mode for the moment,
120
- * since we don't correctly set (all of) the ID registers to
121
- * advertise them.
122
+ * Break with true ARMv8 and add back old-style VFP short-vector support.
123
+ * Only do this for user-mode, where -cpu max is the default, so that
124
+ * older v6 and v7 programs are more likely to work without adjustment.
125
*/
126
- set_feature(&cpu->env, ARM_FEATURE_V8);
127
- {
128
- uint32_t t;
129
-
130
- t = cpu->isar.id_isar5;
131
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
132
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
133
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
134
- t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
135
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
136
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
137
- cpu->isar.id_isar5 = t;
138
-
139
- t = cpu->isar.id_isar6;
140
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
141
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
142
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
143
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
144
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
145
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
146
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
147
- cpu->isar.id_isar6 = t;
148
-
149
- t = cpu->isar.mvfr1;
150
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
151
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
152
- cpu->isar.mvfr1 = t;
153
-
154
- t = cpu->isar.mvfr2;
155
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
156
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
157
- cpu->isar.mvfr2 = t;
158
-
159
- t = cpu->isar.id_mmfr3;
160
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
161
- cpu->isar.id_mmfr3 = t;
162
-
163
- t = cpu->isar.id_mmfr4;
164
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
165
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
166
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
167
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
168
- cpu->isar.id_mmfr4 = t;
169
-
170
- t = cpu->isar.id_pfr0;
171
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
172
- cpu->isar.id_pfr0 = t;
173
-
174
- t = cpu->isar.id_pfr2;
175
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
176
- cpu->isar.id_pfr2 = t;
177
- }
178
-#endif /* CONFIG_USER_ONLY */
179
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
180
+#endif
181
}
182
#endif /* !TARGET_AARCH64 */
31
183
32
--
184
--
33
2.20.1
185
2.25.1
34
35
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Until now, Hypervisor.framework has only been available on x86_64 systems.
3
We set this for qemu-system-aarch64, but failed to do so
4
With Apple Silicon shipping now, it extends its reach to aarch64. To
4
for the strictly 32-bit emulation.
5
prepare for support for multiple architectures, let's start moving common
6
code out into its own accel directory.
7
5
8
This patch moves a few internal struct and constant defines over.
6
Fixes: 3bec78447a9 ("target/arm: Provide ARMv8.4-PMU in '-cpu max'")
9
10
Signed-off-by: Alexander Graf <agraf@csgraf.de>
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
12
Message-id: 20210519202253.76782-5-agraf@csgraf.de
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-8-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
11
---
16
include/sysemu/hvf_int.h | 30 ++++++++++++++++++++++++++++++
12
target/arm/cpu_tcg.c | 4 ++++
17
target/i386/hvf/hvf-i386.h | 31 +------------------------------
13
1 file changed, 4 insertions(+)
18
2 files changed, 31 insertions(+), 30 deletions(-)
19
14
20
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
15
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/include/sysemu/hvf_int.h
17
--- a/target/arm/cpu_tcg.c
23
+++ b/include/sysemu/hvf_int.h
18
+++ b/target/arm/cpu_tcg.c
24
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
25
20
t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
26
#include <Hypervisor/hv.h>
21
cpu->isar.id_pfr2 = t;
27
22
28
+/* hvf_slot flags */
23
+ t = cpu->isar.id_dfr0;
29
+#define HVF_SLOT_LOG (1 << 0)
24
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
25
+ cpu->isar.id_dfr0 = t;
30
+
26
+
31
+typedef struct hvf_slot {
27
#ifdef CONFIG_USER_ONLY
32
+ uint64_t start;
28
/*
33
+ uint64_t size;
29
* Break with true ARMv8 and add back old-style VFP short-vector support.
34
+ uint8_t *mem;
35
+ int slot_id;
36
+ uint32_t flags;
37
+ MemoryRegion *region;
38
+} hvf_slot;
39
+
40
+typedef struct hvf_vcpu_caps {
41
+ uint64_t vmx_cap_pinbased;
42
+ uint64_t vmx_cap_procbased;
43
+ uint64_t vmx_cap_procbased2;
44
+ uint64_t vmx_cap_entry;
45
+ uint64_t vmx_cap_exit;
46
+ uint64_t vmx_cap_preemption_timer;
47
+} hvf_vcpu_caps;
48
+
49
+struct HVFState {
50
+ AccelState parent;
51
+ hvf_slot slots[32];
52
+ int num_slots;
53
+
54
+ hvf_vcpu_caps *hvf_caps;
55
+};
56
+extern HVFState *hvf_state;
57
+
58
void hvf_set_phys_mem(MemoryRegionSection *, bool);
59
void assert_hvf_ok(hv_return_t ret);
60
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
61
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/i386/hvf/hvf-i386.h
64
+++ b/target/i386/hvf/hvf-i386.h
65
@@ -XXX,XX +XXX,XX @@
66
67
#include "qemu/accel.h"
68
#include "sysemu/hvf.h"
69
+#include "sysemu/hvf_int.h"
70
#include "cpu.h"
71
#include "x86.h"
72
73
-/* hvf_slot flags */
74
-#define HVF_SLOT_LOG (1 << 0)
75
-
76
-typedef struct hvf_slot {
77
- uint64_t start;
78
- uint64_t size;
79
- uint8_t *mem;
80
- int slot_id;
81
- uint32_t flags;
82
- MemoryRegion *region;
83
-} hvf_slot;
84
-
85
-typedef struct hvf_vcpu_caps {
86
- uint64_t vmx_cap_pinbased;
87
- uint64_t vmx_cap_procbased;
88
- uint64_t vmx_cap_procbased2;
89
- uint64_t vmx_cap_entry;
90
- uint64_t vmx_cap_exit;
91
- uint64_t vmx_cap_preemption_timer;
92
-} hvf_vcpu_caps;
93
-
94
-struct HVFState {
95
- AccelState parent;
96
- hvf_slot slots[32];
97
- int num_slots;
98
-
99
- hvf_vcpu_caps *hvf_caps;
100
-};
101
-extern HVFState *hvf_state;
102
-
103
void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
104
105
#ifdef NEED_CPU_H
106
--
30
--
107
2.20.1
31
2.25.1
108
109
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFDOT for both AArch64 AdvSIMD and SVE,
3
Share the code to set AArch32 max features so that we no
4
and VDOT.BF16 for AArch32 NEON.
4
longer have code drift between qemu{-system,}-{arm,aarch64}.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525225817.400336-8-richard.henderson@linaro.org
8
Message-id: 20220506180242.216785-9-richard.henderson@linaro.org
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper.h | 2 ++
11
target/arm/internals.h | 2 +
12
target/arm/neon-shared.decode | 2 ++
12
target/arm/cpu64.c | 50 +-----------------
13
target/arm/sve.decode | 3 +++
13
target/arm/cpu_tcg.c | 114 ++++++++++++++++++++++-------------------
14
target/arm/translate-a64.c | 41 +++++++++++++++++++++++++++--------
14
3 files changed, 65 insertions(+), 101 deletions(-)
15
target/arm/translate-neon.c | 9 ++++++++
16
target/arm/translate-sve.c | 12 ++++++++++
17
target/arm/vec_helper.c | 20 +++++++++++++++++
18
7 files changed, 80 insertions(+), 9 deletions(-)
19
15
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
18
--- a/target/arm/internals.h
23
+++ b/target/arm/helper.h
19
+++ b/target/arm/internals.h
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
20
@@ -XXX,XX +XXX,XX @@ static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
25
21
void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
26
DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
22
#endif
27
void, ptr, ptr, ptr, ptr, i32)
23
28
+DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
24
+void aa32_max_features(ARMCPU *cpu);
29
+ void, ptr, ptr, ptr, ptr, i32)
25
+
30
26
#endif
31
#ifdef TARGET_AARCH64
27
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
32
#include "helper-a64.h"
33
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
34
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/neon-shared.decode
29
--- a/target/arm/cpu64.c
36
+++ b/target/arm/neon-shared.decode
30
+++ b/target/arm/cpu64.c
37
@@ -XXX,XX +XXX,XX @@ VUSDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
38
vn=%vn_dp vd=%vd_dp
32
{
39
VSUDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 1 vm:4 \
33
ARMCPU *cpu = ARM_CPU(obj);
40
vn=%vn_dp vd=%vd_dp
34
uint64_t t;
41
+VDOT_b16_scal 1111 1110 0 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
35
- uint32_t u;
42
+ vn=%vn_dp vd=%vd_dp
36
43
37
if (kvm_enabled() || hvf_enabled()) {
44
%vfml_scalar_q0_rm 0:3 5:1
38
/* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
45
%vfml_scalar_q1_index 5:1 3:1
39
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
46
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
40
t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
41
cpu->isar.id_aa64zfr0 = t;
42
43
- /* Replicate the same data to the 32-bit id registers. */
44
- u = cpu->isar.id_isar5;
45
- u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
46
- u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
47
- u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
48
- u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
49
- u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
50
- u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
51
- cpu->isar.id_isar5 = u;
52
-
53
- u = cpu->isar.id_isar6;
54
- u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
55
- u = FIELD_DP32(u, ID_ISAR6, DP, 1);
56
- u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
57
- u = FIELD_DP32(u, ID_ISAR6, SB, 1);
58
- u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
59
- u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
60
- u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
61
- cpu->isar.id_isar6 = u;
62
-
63
- u = cpu->isar.id_pfr0;
64
- u = FIELD_DP32(u, ID_PFR0, DIT, 1);
65
- cpu->isar.id_pfr0 = u;
66
-
67
- u = cpu->isar.id_pfr2;
68
- u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
69
- cpu->isar.id_pfr2 = u;
70
-
71
- u = cpu->isar.id_mmfr3;
72
- u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
73
- cpu->isar.id_mmfr3 = u;
74
-
75
- u = cpu->isar.id_mmfr4;
76
- u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
77
- u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
78
- u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
79
- u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
80
- cpu->isar.id_mmfr4 = u;
81
-
82
t = cpu->isar.id_aa64dfr0;
83
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
84
cpu->isar.id_aa64dfr0 = t;
85
86
- u = cpu->isar.id_dfr0;
87
- u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
88
- cpu->isar.id_dfr0 = u;
89
-
90
- u = cpu->isar.mvfr1;
91
- u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */
92
- u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
93
- cpu->isar.mvfr1 = u;
94
+ /* Replicate the same data to the 32-bit id registers. */
95
+ aa32_max_features(cpu);
96
97
#ifdef CONFIG_USER_ONLY
98
/*
99
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
47
index XXXXXXX..XXXXXXX 100644
100
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/sve.decode
101
--- a/target/arm/cpu_tcg.c
49
+++ b/target/arm/sve.decode
102
+++ b/target/arm/cpu_tcg.c
50
@@ -XXX,XX +XXX,XX @@ FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
103
@@ -XXX,XX +XXX,XX @@
51
FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
104
#endif
52
FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
105
#include "cpregs.h"
53
FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
106
54
+
107
+
55
+### SVE2 floating-point bfloat16 dot-product (indexed)
108
+/* Share AArch32 -cpu max features with AArch64. */
56
+BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
109
+void aa32_max_features(ARMCPU *cpu)
57
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/translate-a64.c
60
+++ b/target/arm/translate-a64.c
61
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
62
return;
63
}
64
break;
65
- case 0x0f: /* SUDOT, USDOT */
66
- if (is_scalar || (size & 1) || !dc_isar_feature(aa64_i8mm, s)) {
67
+ case 0x0f:
68
+ switch (size) {
69
+ case 0: /* SUDOT */
70
+ case 2: /* USDOT */
71
+ if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
72
+ unallocated_encoding(s);
73
+ return;
74
+ }
75
+ break;
76
+ case 1: /* BFDOT */
77
+ if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
78
+ unallocated_encoding(s);
79
+ return;
80
+ }
81
+ break;
82
+ default:
83
unallocated_encoding(s);
84
return;
85
}
86
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
87
u ? gen_helper_gvec_udot_idx_b
88
: gen_helper_gvec_sdot_idx_b);
89
return;
90
- case 0x0f: /* SUDOT, USDOT */
91
- gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
92
- extract32(insn, 23, 1)
93
- ? gen_helper_gvec_usdot_idx_b
94
- : gen_helper_gvec_sudot_idx_b);
95
- return;
96
-
97
+ case 0x0f:
98
+ switch (extract32(insn, 22, 2)) {
99
+ case 0: /* SUDOT */
100
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
101
+ gen_helper_gvec_sudot_idx_b);
102
+ return;
103
+ case 1: /* BFDOT */
104
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
105
+ gen_helper_gvec_bfdot_idx);
106
+ return;
107
+ case 2: /* USDOT */
108
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
109
+ gen_helper_gvec_usdot_idx_b);
110
+ return;
111
+ }
112
+ g_assert_not_reached();
113
case 0x11: /* FCMLA #0 */
114
case 0x13: /* FCMLA #90 */
115
case 0x15: /* FCMLA #180 */
116
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/translate-neon.c
119
+++ b/target/arm/translate-neon.c
120
@@ -XXX,XX +XXX,XX @@ static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
121
gen_helper_gvec_sudot_idx_b);
122
}
123
124
+static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
125
+{
110
+{
126
+ if (!dc_isar_feature(aa32_bf16, s)) {
111
+ uint32_t t;
127
+ return false;
112
+
128
+ }
113
+ /* Add additional features supported by QEMU */
129
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
114
+ t = cpu->isar.id_isar5;
130
+ gen_helper_gvec_bfdot_idx);
115
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
116
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
117
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
118
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
119
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
120
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
121
+ cpu->isar.id_isar5 = t;
122
+
123
+ t = cpu->isar.id_isar6;
124
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
125
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
126
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
127
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
128
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
129
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
130
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
131
+ cpu->isar.id_isar6 = t;
132
+
133
+ t = cpu->isar.mvfr1;
134
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
135
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
136
+ cpu->isar.mvfr1 = t;
137
+
138
+ t = cpu->isar.mvfr2;
139
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
140
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
141
+ cpu->isar.mvfr2 = t;
142
+
143
+ t = cpu->isar.id_mmfr3;
144
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
145
+ cpu->isar.id_mmfr3 = t;
146
+
147
+ t = cpu->isar.id_mmfr4;
148
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
149
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
150
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
151
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
152
+ cpu->isar.id_mmfr4 = t;
153
+
154
+ t = cpu->isar.id_pfr0;
155
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
156
+ cpu->isar.id_pfr0 = t;
157
+
158
+ t = cpu->isar.id_pfr2;
159
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
160
+ cpu->isar.id_pfr2 = t;
161
+
162
+ t = cpu->isar.id_dfr0;
163
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
164
+ cpu->isar.id_dfr0 = t;
131
+}
165
+}
132
+
166
+
133
static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
167
#ifndef CONFIG_USER_ONLY
168
static uint64_t l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
134
{
169
{
135
int opr_sz;
170
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
136
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
171
static void arm_max_initfn(Object *obj)
137
index XXXXXXX..XXXXXXX 100644
172
{
138
--- a/target/arm/translate-sve.c
173
ARMCPU *cpu = ARM_CPU(obj);
139
+++ b/target/arm/translate-sve.c
174
- uint32_t t;
140
@@ -XXX,XX +XXX,XX @@ static bool trans_BFDOT_zzzz(DisasContext *s, arg_rrrr_esz *a)
175
141
}
176
/* aarch64_a57_initfn, advertising none of the aarch64 features */
142
return true;
177
cpu->dtb_compatible = "arm,cortex-a57";
143
}
178
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
144
+
179
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
145
+static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a)
180
define_cortex_a72_a57_a53_cp_reginfo(cpu);
146
+{
181
147
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
182
- /* Add additional features supported by QEMU */
148
+ return false;
183
- t = cpu->isar.id_isar5;
149
+ }
184
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
150
+ if (sve_access_check(s)) {
185
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
151
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot_idx,
186
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
152
+ a->rd, a->rn, a->rm, a->ra, a->index);
187
- t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
153
+ }
188
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
154
+ return true;
189
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
155
+}
190
- cpu->isar.id_isar5 = t;
156
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
191
-
157
index XXXXXXX..XXXXXXX 100644
192
- t = cpu->isar.id_isar6;
158
--- a/target/arm/vec_helper.c
193
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
159
+++ b/target/arm/vec_helper.c
194
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
160
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
195
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
161
}
196
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
162
clear_tail(d, opr_sz, simd_maxsz(desc));
197
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
163
}
198
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
164
+
199
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
165
+void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
200
- cpu->isar.id_isar6 = t;
166
+ void *va, uint32_t desc)
201
-
167
+{
202
- t = cpu->isar.mvfr1;
168
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
203
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
169
+ intptr_t index = simd_data(desc);
204
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
170
+ intptr_t elements = opr_sz / 4;
205
- cpu->isar.mvfr1 = t;
171
+ intptr_t eltspersegment = MIN(16 / 4, elements);
206
-
172
+ float32 *d = vd, *a = va;
207
- t = cpu->isar.mvfr2;
173
+ uint32_t *n = vn, *m = vm;
208
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
174
+
209
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
175
+ for (i = 0; i < elements; i += eltspersegment) {
210
- cpu->isar.mvfr2 = t;
176
+ uint32_t m_idx = m[i + H4(index)];
211
-
177
+
212
- t = cpu->isar.id_mmfr3;
178
+ for (j = i; j < i + eltspersegment; j++) {
213
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
179
+ d[j] = bfdotadd(a[j], n[j], m_idx);
214
- cpu->isar.id_mmfr3 = t;
180
+ }
215
-
181
+ }
216
- t = cpu->isar.id_mmfr4;
182
+ clear_tail(d, opr_sz, simd_maxsz(desc));
217
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
183
+}
218
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
219
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
220
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
221
- cpu->isar.id_mmfr4 = t;
222
-
223
- t = cpu->isar.id_pfr0;
224
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
225
- cpu->isar.id_pfr0 = t;
226
-
227
- t = cpu->isar.id_pfr2;
228
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
229
- cpu->isar.id_pfr2 = t;
230
-
231
- t = cpu->isar.id_dfr0;
232
- t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
233
- cpu->isar.id_dfr0 = t;
234
+ aa32_max_features(cpu);
235
236
#ifdef CONFIG_USER_ONLY
237
/*
184
--
238
--
185
2.20.1
239
2.25.1
186
187
diff view generated by jsdifflib
1
From: Jamie Iles <jamie@nuviainc.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Now that there are no other users of do_raise_exception, fold it into
3
Update the legacy feature names to the current names.
4
raise_exception.
4
Provide feature names for id changes that were not marked.
5
Sort the field updates into increasing bitfield order.
5
6
6
Cc: Richard Henderson <richard.henderson@linaro.org>
7
Cc: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Jamie Iles <jamie@nuviainc.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-10-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
11
---
12
target/arm/op_helper.c | 12 ++----------
12
target/arm/cpu64.c | 100 +++++++++++++++++++++----------------------
13
1 file changed, 2 insertions(+), 10 deletions(-)
13
target/arm/cpu_tcg.c | 48 ++++++++++-----------
14
2 files changed, 74 insertions(+), 74 deletions(-)
14
15
15
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
16
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/op_helper.c
18
--- a/target/arm/cpu64.c
18
+++ b/target/arm/op_helper.c
19
+++ b/target/arm/cpu64.c
19
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
20
#define SIGNBIT (uint32_t)0x80000000
21
cpu->midr = t;
21
#define SIGNBIT64 ((uint64_t)1 << 63)
22
22
23
t = cpu->isar.id_aa64isar0;
23
-static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
24
- t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
24
- uint32_t syndrome, uint32_t target_el)
25
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
25
+void raise_exception(CPUARMState *env, uint32_t excp,
26
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
26
+ uint32_t syndrome, uint32_t target_el)
27
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
27
{
28
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
28
CPUState *cs = env_cpu(env);
29
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
29
30
t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
30
@@ -XXX,XX +XXX,XX @@ static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
31
- t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
31
cs->exception_index = excp;
32
- t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
32
env->exception.syndrome = syndrome;
33
- t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
33
env->exception.target_el = target_el;
34
- t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
34
-
35
- t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
35
- return cs;
36
- t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
36
-}
37
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
37
-
38
- t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
38
-void raise_exception(CPUARMState *env, uint32_t excp,
39
- t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
39
- uint32_t syndrome, uint32_t target_el)
40
- t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
40
-{
41
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */
41
- CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
42
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */
42
cpu_loop_exit(cs);
43
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */
44
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */
45
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); /* FEAT_SM4 */
46
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); /* FEAT_DotProd */
47
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1); /* FEAT_FHM */
48
+ t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */
49
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
50
+ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */
51
cpu->isar.id_aa64isar0 = t;
52
53
t = cpu->isar.id_aa64isar1;
54
- t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
55
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
56
- t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
57
- t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
58
- t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
59
- t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
60
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
61
- t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
62
- t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
63
+ t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
64
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); /* FEAT_JSCVT */
65
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); /* FEAT_FCMA */
66
+ t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* FEAT_LRCPC2 */
67
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */
68
+ t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */
69
+ t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */
70
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */
71
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
72
cpu->isar.id_aa64isar1 = t;
73
74
t = cpu->isar.id_aa64pfr0;
75
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
76
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
77
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
78
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
79
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
80
- t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
81
- t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
82
+ t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
83
+ t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
84
cpu->isar.id_aa64pfr0 = t;
85
86
t = cpu->isar.id_aa64pfr1;
87
- t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
88
- t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
89
+ t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */
90
+ t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */
91
/*
92
* Begin with full support for MTE. This will be downgraded to MTE=0
93
* during realize if the board provides no tag memory, much like
94
* we do for EL2 with the virtualization=on property.
95
*/
96
- t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
97
+ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
98
cpu->isar.id_aa64pfr1 = t;
99
100
t = cpu->isar.id_aa64mmfr0;
101
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
102
cpu->isar.id_aa64mmfr0 = t;
103
104
t = cpu->isar.id_aa64mmfr1;
105
- t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
106
- t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
107
- t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
108
- t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
109
- t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
110
- t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
111
+ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
112
+ t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
113
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
114
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
115
+ t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
116
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
117
cpu->isar.id_aa64mmfr1 = t;
118
119
t = cpu->isar.id_aa64mmfr2;
120
- t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
121
- t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
122
- t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
123
- t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
124
- t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
125
- t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
126
+ t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
127
+ t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
128
+ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
129
+ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
130
+ t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
131
+ t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
132
cpu->isar.id_aa64mmfr2 = t;
133
134
t = cpu->isar.id_aa64zfr0;
135
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
136
- t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
137
- t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
138
- t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
139
- t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
140
- t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
141
- t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
142
- t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
143
- t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
144
+ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */
145
+ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */
146
+ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */
147
+ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */
148
+ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */
149
+ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */
150
+ t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */
151
+ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */
152
cpu->isar.id_aa64zfr0 = t;
153
154
t = cpu->isar.id_aa64dfr0;
155
- t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
156
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
157
cpu->isar.id_aa64dfr0 = t;
158
159
/* Replicate the same data to the 32-bit id registers. */
160
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/arm/cpu_tcg.c
163
+++ b/target/arm/cpu_tcg.c
164
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
165
166
/* Add additional features supported by QEMU */
167
t = cpu->isar.id_isar5;
168
- t = FIELD_DP32(t, ID_ISAR5, AES, 2);
169
- t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
170
- t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
171
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */
172
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */
173
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */
174
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
175
- t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
176
- t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
177
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */
178
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */
179
cpu->isar.id_isar5 = t;
180
181
t = cpu->isar.id_isar6;
182
- t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
183
- t = FIELD_DP32(t, ID_ISAR6, DP, 1);
184
- t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
185
- t = FIELD_DP32(t, ID_ISAR6, SB, 1);
186
- t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
187
- t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
188
- t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
189
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */
190
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */
191
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */
192
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1); /* FEAT_SB */
193
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */
194
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */
195
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */
196
cpu->isar.id_isar6 = t;
197
198
t = cpu->isar.mvfr1;
199
- t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
200
- t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
201
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */
202
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* FEAT_FP16 */
203
cpu->isar.mvfr1 = t;
204
205
t = cpu->isar.mvfr2;
206
- t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
207
- t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
208
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
209
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
210
cpu->isar.mvfr2 = t;
211
212
t = cpu->isar.id_mmfr3;
213
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
214
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
215
cpu->isar.id_mmfr3 = t;
216
217
t = cpu->isar.id_mmfr4;
218
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
219
- t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
220
- t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
221
- t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
222
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */
223
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
224
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
225
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX*/
226
cpu->isar.id_mmfr4 = t;
227
228
t = cpu->isar.id_pfr0;
229
- t = FIELD_DP32(t, ID_PFR0, DIT, 1);
230
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
231
cpu->isar.id_pfr0 = t;
232
233
t = cpu->isar.id_pfr2;
234
- t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
235
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
236
cpu->isar.id_pfr2 = t;
237
238
t = cpu->isar.id_dfr0;
239
- t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
240
+ t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
241
cpu->isar.id_dfr0 = t;
43
}
242
}
44
243
45
--
244
--
46
2.20.1
245
2.25.1
47
48
diff view generated by jsdifflib
1
Some v8M instructions are present if either the floating point
1
From: Richard Henderson <richard.henderson@linaro.org>
2
extension or MVE is implemented. Update our implementation of them
3
to check for MVE as well as for FP.
4
2
5
This is all the insns which use CheckDecodeFaults(ExtType_MveOrFp) or
3
Use FIELD_DP{32,64} to manipulate id_pfr1 and id_aa64pfr0
6
CheckDecodeFaults(ExtType_MveOrDpFp) in their pseudocode, which are
4
during arm_cpu_realizefn.
7
essentially the loads and stores, moves and sysreg accesses, except
8
for VMOV_reg_sp and VMOV_reg_dp, which we handle in subsequent
9
patches because they need a refactor to provide a place to put the
10
new MVE check.
11
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-11-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20210520152840.24453-3-peter.maydell@linaro.org
15
---
10
---
16
target/arm/translate-vfp.c | 48 +++++++++++++++++++++++---------------
11
target/arm/cpu.c | 22 +++++++++++++---------
17
1 file changed, 29 insertions(+), 19 deletions(-)
12
1 file changed, 13 insertions(+), 9 deletions(-)
18
13
19
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/translate-vfp.c
16
--- a/target/arm/cpu.c
22
+++ b/target/arm/translate-vfp.c
17
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
18
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
24
/* VMOV scalar to general purpose register */
19
*/
25
TCGv_i32 tmp;
20
unset_feature(env, ARM_FEATURE_EL3);
26
21
27
- /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
22
- /* Disable the security extension feature bits in the processor feature
28
- if (a->size == MO_32
23
- * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
29
- ? !dc_isar_feature(aa32_fpsp_v2, s)
24
+ /*
30
- : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
25
+ * Disable the security extension feature bits in the processor
31
- return false;
26
+ * feature registers as well.
32
+ /*
27
*/
33
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
28
- cpu->isar.id_pfr1 &= ~0xf0;
34
+ * all sizes, whether the CPU has fp or not.
29
- cpu->isar.id_aa64pfr0 &= ~0xf000;
35
+ */
30
+ cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
36
+ if (!dc_isar_feature(aa32_mve, s)) {
31
+ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
37
+ if (a->size == MO_32
32
+ ID_AA64PFR0, EL3, 0);
38
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
39
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
40
+ return false;
41
+ }
42
}
33
}
43
34
44
/* UNDEF accesses to D16-D31 if they don't exist */
35
if (!cpu->has_el2) {
45
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
36
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
46
/* VMOV general purpose register to scalar */
47
TCGv_i32 tmp;
48
49
- /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
50
- if (a->size == MO_32
51
- ? !dc_isar_feature(aa32_fpsp_v2, s)
52
- : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
53
- return false;
54
+ /*
55
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
56
+ * all sizes, whether the CPU has fp or not.
57
+ */
58
+ if (!dc_isar_feature(aa32_mve, s)) {
59
+ if (a->size == MO_32
60
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
61
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
62
+ return false;
63
+ }
64
}
37
}
65
38
66
/* UNDEF accesses to D16-D31 if they don't exist */
39
if (!arm_feature(env, ARM_FEATURE_EL2)) {
67
@@ -XXX,XX +XXX,XX @@ typedef enum FPSysRegCheckResult {
40
- /* Disable the hypervisor feature bits in the processor feature
68
41
- * registers if we don't have EL2. These are id_pfr1[15:12] and
69
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
42
- * id_aa64pfr0_el1[11:8].
70
{
43
+ /*
71
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
44
+ * Disable the hypervisor feature bits in the processor feature
72
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
45
+ * registers if we don't have EL2.
73
return FPSysRegCheckFailed;
46
*/
47
- cpu->isar.id_aa64pfr0 &= ~0xf00;
48
- cpu->isar.id_pfr1 &= ~0xf000;
49
+ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
50
+ ID_AA64PFR0, EL2, 0);
51
+ cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
52
+ ID_PFR1, VIRTUALIZATION, 0);
74
}
53
}
75
54
76
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
55
#ifndef CONFIG_USER_ONLY
77
{
78
TCGv_i32 tmp;
79
80
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
81
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
82
return false;
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
86
{
87
TCGv_i32 tmp;
88
89
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
90
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
91
return false;
92
}
93
94
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
95
* floating point register. Note that this does not require support
96
* for double precision arithmetic.
97
*/
98
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
99
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
100
return false;
101
}
102
103
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
104
uint32_t offset;
105
TCGv_i32 addr, tmp;
106
107
- if (!dc_isar_feature(aa32_fp16_arith, s)) {
108
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
109
return false;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
113
uint32_t offset;
114
TCGv_i32 addr, tmp;
115
116
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
117
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
118
return false;
119
}
120
121
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
122
TCGv_i64 tmp;
123
124
/* Note that this does not require support for double arithmetic. */
125
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
126
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
127
return false;
128
}
129
130
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
131
TCGv_i32 addr, tmp;
132
int i, n;
133
134
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
135
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
136
return false;
137
}
138
139
@@ -XXX,XX +XXX,XX @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
140
int i, n;
141
142
/* Note that this does not require support for double arithmetic. */
143
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
144
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
145
return false;
146
}
147
148
--
56
--
149
2.20.1
57
2.25.1
150
151
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Disable BF16 again for !have_neon and !have_vfp during realize.
3
The only portion of FEAT_Debugv8p2 that is relevant to QEMU
4
is CONTEXTIDR_EL2, which is also conditionally implemented
5
with FEAT_VHE. The rest of the debug extension concerns the
6
External debug interface, which is outside the scope of QEMU.
4
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210525225817.400336-13-richard.henderson@linaro.org
10
Message-id: 20220506180242.216785-12-richard.henderson@linaro.org
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
12
---
10
target/arm/cpu.c | 3 +++
13
docs/system/arm/emulation.rst | 1 +
11
target/arm/cpu64.c | 3 +++
14
target/arm/cpu.c | 1 +
12
target/arm/cpu_tcg.c | 1 +
15
target/arm/cpu64.c | 1 +
13
3 files changed, 7 insertions(+)
16
target/arm/cpu_tcg.c | 2 ++
17
4 files changed, 5 insertions(+)
14
18
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
20
index XXXXXXX..XXXXXXX 100644
21
--- a/docs/system/arm/emulation.rst
22
+++ b/docs/system/arm/emulation.rst
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
24
- FEAT_BTI (Branch Target Identification)
25
- FEAT_DIT (Data Independent Timing instructions)
26
- FEAT_DPB (DC CVAP instruction)
27
+- FEAT_Debugv8p2 (Debug changes for v8.2)
28
- FEAT_DotProd (Advanced SIMD dot product instructions)
29
- FEAT_FCMA (Floating-point complex number instructions)
30
- FEAT_FHM (Floating-point half-precision multiplication instructions)
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
31
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
16
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.c
33
--- a/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
34
+++ b/target/arm/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
35
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
20
36
* feature registers as well.
21
u = cpu->isar.id_isar6;
37
*/
22
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
38
cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
23
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
39
+ cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
24
cpu->isar.id_isar6 = u;
40
cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
25
41
ID_AA64PFR0, EL3, 0);
26
u = cpu->isar.mvfr0;
42
}
27
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
28
29
t = cpu->isar.id_aa64isar1;
30
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
31
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
32
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
33
cpu->isar.id_aa64isar1 = t;
34
35
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
36
u = cpu->isar.id_isar6;
37
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
38
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
39
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
40
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
41
cpu->isar.id_isar6 = u;
42
43
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
43
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
44
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/cpu64.c
45
--- a/target/arm/cpu64.c
46
+++ b/target/arm/cpu64.c
46
+++ b/target/arm/cpu64.c
47
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
47
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
48
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
48
cpu->isar.id_aa64zfr0 = t;
49
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
49
50
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
50
t = cpu->isar.id_aa64dfr0;
51
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
51
+ t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 8); /* FEAT_Debugv8p2 */
52
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
52
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
53
t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
53
cpu->isar.id_aa64dfr0 = t;
54
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
55
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
56
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
57
t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
58
t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
59
+ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
60
t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
61
t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
62
t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
63
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
64
u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
65
u = FIELD_DP32(u, ID_ISAR6, SB, 1);
66
u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
67
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
68
u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
69
cpu->isar.id_isar6 = u;
70
54
71
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
55
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
72
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
73
--- a/target/arm/cpu_tcg.c
57
--- a/target/arm/cpu_tcg.c
74
+++ b/target/arm/cpu_tcg.c
58
+++ b/target/arm/cpu_tcg.c
75
@@ -XXX,XX +XXX,XX @@ static void arm_max_initfn(Object *obj)
59
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
76
t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
60
cpu->isar.id_pfr2 = t;
77
t = FIELD_DP32(t, ID_ISAR6, SB, 1);
61
78
t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
62
t = cpu->isar.id_dfr0;
79
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
63
+ t = FIELD_DP32(t, ID_DFR0, COPDBG, 8); /* FEAT_Debugv8p2 */
80
t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
64
+ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 8); /* FEAT_Debugv8p2 */
81
cpu->isar.id_isar6 = t;
65
t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
82
66
cpu->isar.id_dfr0 = t;
67
}
83
--
68
--
84
2.20.1
69
2.25.1
85
86
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The hvf accel synchronize functions are only used as input for local
3
This extension concerns changes to the External Debug interface,
4
callback functions, so we can make them static.
4
with Secure and Non-secure access to the debug registers, and all
5
of it is outside the scope of QEMU. Indicating support for this
6
is mandatory with FEAT_SEL2, which we do implement.
5
7
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Reviewed-by: Sergio Lopez <slp@redhat.com>
8
Message-id: 20210519202253.76782-10-agraf@csgraf.de
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220506180242.216785-13-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
accel/hvf/hvf-accel-ops.h | 3 ---
13
docs/system/arm/emulation.rst | 1 +
13
accel/hvf/hvf-accel-ops.c | 6 +++---
14
target/arm/cpu64.c | 2 +-
14
2 files changed, 3 insertions(+), 6 deletions(-)
15
target/arm/cpu_tcg.c | 4 ++--
16
3 files changed, 4 insertions(+), 3 deletions(-)
15
17
16
diff --git a/accel/hvf/hvf-accel-ops.h b/accel/hvf/hvf-accel-ops.h
18
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/hvf/hvf-accel-ops.h
20
--- a/docs/system/arm/emulation.rst
19
+++ b/accel/hvf/hvf-accel-ops.h
21
+++ b/docs/system/arm/emulation.rst
20
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
21
#include "sysemu/cpus.h"
23
- FEAT_DIT (Data Independent Timing instructions)
22
24
- FEAT_DPB (DC CVAP instruction)
23
int hvf_vcpu_exec(CPUState *);
25
- FEAT_Debugv8p2 (Debug changes for v8.2)
24
-void hvf_cpu_synchronize_post_reset(CPUState *);
26
+- FEAT_Debugv8p4 (Debug changes for v8.4)
25
-void hvf_cpu_synchronize_post_init(CPUState *);
27
- FEAT_DotProd (Advanced SIMD dot product instructions)
26
-void hvf_cpu_synchronize_pre_loadvm(CPUState *);
28
- FEAT_FCMA (Floating-point complex number instructions)
27
29
- FEAT_FHM (Floating-point half-precision multiplication instructions)
28
#endif /* HVF_CPUS_H */
30
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
29
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
30
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/hvf/hvf-accel-ops.c
32
--- a/target/arm/cpu64.c
32
+++ b/accel/hvf/hvf-accel-ops.c
33
+++ b/target/arm/cpu64.c
33
@@ -XXX,XX +XXX,XX @@ static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
34
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
34
cpu->vcpu_dirty = false;
35
cpu->isar.id_aa64zfr0 = t;
35
}
36
36
37
t = cpu->isar.id_aa64dfr0;
37
-void hvf_cpu_synchronize_post_reset(CPUState *cpu)
38
- t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 8); /* FEAT_Debugv8p2 */
38
+static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
39
+ t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */
39
{
40
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
40
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
41
cpu->isar.id_aa64dfr0 = t;
41
}
42
42
@@ -XXX,XX +XXX,XX @@ static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
43
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
43
cpu->vcpu_dirty = false;
44
index XXXXXXX..XXXXXXX 100644
44
}
45
--- a/target/arm/cpu_tcg.c
45
46
+++ b/target/arm/cpu_tcg.c
46
-void hvf_cpu_synchronize_post_init(CPUState *cpu)
47
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
47
+static void hvf_cpu_synchronize_post_init(CPUState *cpu)
48
cpu->isar.id_pfr2 = t;
48
{
49
49
run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
50
t = cpu->isar.id_dfr0;
50
}
51
- t = FIELD_DP32(t, ID_DFR0, COPDBG, 8); /* FEAT_Debugv8p2 */
51
@@ -XXX,XX +XXX,XX @@ static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
52
- t = FIELD_DP32(t, ID_DFR0, COPSDBG, 8); /* FEAT_Debugv8p2 */
52
cpu->vcpu_dirty = true;
53
+ t = FIELD_DP32(t, ID_DFR0, COPDBG, 9); /* FEAT_Debugv8p4 */
53
}
54
+ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */
54
55
t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
55
-void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
56
cpu->isar.id_dfr0 = t;
56
+static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
57
{
58
run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
59
}
57
}
60
--
58
--
61
2.20.1
59
2.25.1
62
63
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Note that the SVE BFLOAT16 support does not require SVE2,
3
Add only the system registers required to implement zero error
4
it is an independent extension.
4
records. This means that all values for ERRSELR are out of range,
5
which means that it and all of the indexed error record registers
6
need not be implemented.
7
8
Add the EL2 registers required for injecting virtual SError.
5
9
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-2-richard.henderson@linaro.org
12
Message-id: 20220506180242.216785-14-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
14
---
11
target/arm/cpu.h | 15 +++++++++++++++
15
target/arm/cpu.h | 5 +++
12
1 file changed, 15 insertions(+)
16
target/arm/helper.c | 84 +++++++++++++++++++++++++++++++++++++++++++++
17
2 files changed, 89 insertions(+)
13
18
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
19
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
15
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
21
--- a/target/arm/cpu.h
17
+++ b/target/arm/cpu.h
22
+++ b/target/arm/cpu.h
18
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
23
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
19
return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
24
uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */
20
}
25
uint64_t gcr_el1;
21
26
uint64_t rgsr_el1;
22
+static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
27
+
28
+ /* Minimal RAS registers */
29
+ uint64_t disr_el1;
30
+ uint64_t vdisr_el2;
31
+ uint64_t vsesr_el2;
32
} cp15;
33
34
struct {
35
diff --git a/target/arm/helper.c b/target/arm/helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/helper.c
38
+++ b/target/arm/helper.c
39
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
40
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
41
};
42
43
+/*
44
+ * Check for traps to RAS registers, which are controlled
45
+ * by HCR_EL2.TERR and SCR_EL3.TERR.
46
+ */
47
+static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
48
+ bool isread)
23
+{
49
+{
24
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
50
+ int el = arm_current_el(env);
51
+
52
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
53
+ return CP_ACCESS_TRAP_EL2;
54
+ }
55
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
56
+ return CP_ACCESS_TRAP_EL3;
57
+ }
58
+ return CP_ACCESS_OK;
25
+}
59
+}
26
+
60
+
27
static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
61
+static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
28
{
29
return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
30
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
31
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
32
}
33
34
+static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
35
+{
62
+{
36
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
63
+ int el = arm_current_el(env);
64
+
65
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
66
+ return env->cp15.vdisr_el2;
67
+ }
68
+ if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
69
+ return 0; /* RAZ/WI */
70
+ }
71
+ return env->cp15.disr_el1;
37
+}
72
+}
38
+
73
+
39
static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
74
+static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
40
{
41
/* We always set the AdvSIMD and FP fields identically. */
42
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
43
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
44
}
45
46
+static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
47
+{
75
+{
48
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
76
+ int el = arm_current_el(env);
77
+
78
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
79
+ env->cp15.vdisr_el2 = val;
80
+ return;
81
+ }
82
+ if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
83
+ return; /* RAZ/WI */
84
+ }
85
+ env->cp15.disr_el1 = val;
49
+}
86
+}
50
+
87
+
51
static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
88
+/*
52
{
89
+ * Minimal RAS implementation with no Error Records.
53
return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
90
+ * Which means that all of the Error Record registers:
91
+ * ERXADDR_EL1
92
+ * ERXCTLR_EL1
93
+ * ERXFR_EL1
94
+ * ERXMISC0_EL1
95
+ * ERXMISC1_EL1
96
+ * ERXMISC2_EL1
97
+ * ERXMISC3_EL1
98
+ * ERXPFGCDN_EL1 (RASv1p1)
99
+ * ERXPFGCTL_EL1 (RASv1p1)
100
+ * ERXPFGF_EL1 (RASv1p1)
101
+ * ERXSTATUS_EL1
102
+ * and
103
+ * ERRSELR_EL1
104
+ * may generate UNDEFINED, which is the effect we get by not
105
+ * listing them at all.
106
+ */
107
+static const ARMCPRegInfo minimal_ras_reginfo[] = {
108
+ { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
109
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
110
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
111
+ .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
112
+ { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
113
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
114
+ .access = PL1_R, .accessfn = access_terr,
115
+ .type = ARM_CP_CONST, .resetvalue = 0 },
116
+ { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
117
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
118
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
119
+ { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
120
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
121
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
122
+};
123
+
124
/* Return the exception level to which exceptions should be taken
125
* via SVEAccessTrap. If an exception should be routed through
126
* AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
127
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
128
if (cpu_isar_feature(aa64_ssbs, cpu)) {
129
define_one_arm_cp_reg(cpu, &ssbs_reginfo);
130
}
131
+ if (cpu_isar_feature(any_ras, cpu)) {
132
+ define_arm_cp_regs(cpu, minimal_ras_reginfo);
133
+ }
134
135
if (cpu_isar_feature(aa64_vh, cpu) ||
136
cpu_isar_feature(aa64_debugv8p2, cpu)) {
54
--
137
--
55
2.20.1
138
2.25.1
56
57
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The operands to tcg_gen_atomic_fetch_s{min,max}_i64 must
3
Enable writes to the TERR and TEA bits when RAS is enabled.
4
be signed, so that the inputs are properly extended.
4
These bits are otherwise RES0.
5
Zero extend the result afterward, as needed.
6
5
7
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/364
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Message-id: 20220506180242.216785-15-richard.henderson@linaro.org
10
Message-id: 20210602020720.47679-1-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
10
---
13
target/arm/translate-a64.c | 13 ++++++++++---
11
target/arm/helper.c | 9 +++++++++
14
1 file changed, 10 insertions(+), 3 deletions(-)
12
1 file changed, 9 insertions(+)
15
13
16
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-a64.c
16
--- a/target/arm/helper.c
19
+++ b/target/arm/translate-a64.c
17
+++ b/target/arm/helper.c
20
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
18
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
21
int o3_opc = extract32(insn, 12, 4);
19
}
22
bool r = extract32(insn, 22, 1);
20
valid_mask &= ~SCR_NET;
23
bool a = extract32(insn, 23, 1);
21
24
- TCGv_i64 tcg_rs, clean_addr;
22
+ if (cpu_isar_feature(aa64_ras, cpu)) {
25
+ TCGv_i64 tcg_rs, tcg_rt, clean_addr;
23
+ valid_mask |= SCR_TERR;
26
AtomicThreeOpFn *fn = NULL;
24
+ }
27
+ MemOp mop = s->be_data | size | MO_ALIGN;
25
if (cpu_isar_feature(aa64_lor, cpu)) {
28
26
valid_mask |= SCR_TLOR;
29
if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
27
}
30
unallocated_encoding(s);
28
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
31
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
29
}
32
break;
30
} else {
33
case 004: /* LDSMAX */
31
valid_mask &= ~(SCR_RW | SCR_ST);
34
fn = tcg_gen_atomic_fetch_smax_i64;
32
+ if (cpu_isar_feature(aa32_ras, cpu)) {
35
+ mop |= MO_SIGN;
33
+ valid_mask |= SCR_TERR;
36
break;
34
+ }
37
case 005: /* LDSMIN */
38
fn = tcg_gen_atomic_fetch_smin_i64;
39
+ mop |= MO_SIGN;
40
break;
41
case 006: /* LDUMAX */
42
fn = tcg_gen_atomic_fetch_umax_i64;
43
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
44
}
35
}
45
36
46
tcg_rs = read_cpu_reg(s, rs, true);
37
if (!arm_feature(env, ARM_FEATURE_EL2)) {
47
+ tcg_rt = cpu_reg(s, rt);
38
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
48
39
if (cpu_isar_feature(aa64_vh, cpu)) {
49
if (o3_opc == 1) { /* LDCLR */
40
valid_mask |= HCR_E2H;
50
tcg_gen_not_i64(tcg_rs, tcg_rs);
41
}
51
@@ -XXX,XX +XXX,XX @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
42
+ if (cpu_isar_feature(aa64_ras, cpu)) {
52
/* The tcg atomic primitives are all full barriers. Therefore we
43
+ valid_mask |= HCR_TERR | HCR_TEA;
53
* can ignore the Acquire and Release bits of this instruction.
44
+ }
54
*/
45
if (cpu_isar_feature(aa64_lor, cpu)) {
55
- fn(cpu_reg(s, rt), clean_addr, tcg_rs, get_mem_index(s),
46
valid_mask |= HCR_TLOR;
56
- s->be_data | size | MO_ALIGN);
47
}
57
+ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
58
+
59
+ if ((mop & MO_SIGN) && size != MO_64) {
60
+ tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
61
+ }
62
}
63
64
/*
65
--
48
--
66
2.20.1
49
2.25.1
67
68
diff view generated by jsdifflib
1
If MVE is implemented for an M-profile CPU then it has a VPR
1
From: Richard Henderson <richard.henderson@linaro.org>
2
register, which tracks predication information.
3
2
4
Implement the read and write handling of this register, and
3
Virtual SError exceptions are raised by setting HCR_EL2.VSE,
5
the migration of its state.
4
and are routed to EL1 just like other virtual exceptions.
6
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-16-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210520152840.24453-7-peter.maydell@linaro.org
10
---
10
---
11
target/arm/cpu.h | 6 ++++++
11
target/arm/cpu.h | 2 ++
12
target/arm/machine.c | 19 +++++++++++++++++++
12
target/arm/internals.h | 8 ++++++++
13
target/arm/translate-vfp.c | 38 ++++++++++++++++++++++++++++++++++++++
13
target/arm/syndrome.h | 5 +++++
14
3 files changed, 63 insertions(+)
14
target/arm/cpu.c | 38 +++++++++++++++++++++++++++++++++++++-
15
target/arm/helper.c | 40 +++++++++++++++++++++++++++++++++++++++-
16
5 files changed, 91 insertions(+), 2 deletions(-)
15
17
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
20
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
21
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
22
@@ -XXX,XX +XXX,XX @@
21
uint32_t cpacr[M_REG_NUM_BANKS];
23
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
22
uint32_t nsacr;
24
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
23
int ltpsize;
25
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
24
+ uint32_t vpr;
26
+#define EXCP_VSERR 24
25
} v7m;
27
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
26
28
27
/* Information associated with an exception about to be taken:
29
#define ARMV7M_EXCP_RESET 1
28
@@ -XXX,XX +XXX,XX @@ FIELD(V7M_FPCCR, ASPEN, 31, 1)
30
@@ -XXX,XX +XXX,XX @@ enum {
29
R_V7M_FPCCR_UFRDY_MASK | \
31
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
30
R_V7M_FPCCR_ASPEN_MASK)
32
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
31
33
#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
32
+/* v7M VPR bits */
34
+#define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
33
+FIELD(V7M_VPR, P0, 0, 16)
35
34
+FIELD(V7M_VPR, MASK01, 16, 4)
36
/* The usual mapping for an AArch64 system register to its AArch32
35
+FIELD(V7M_VPR, MASK23, 20, 4)
37
* counterpart is for the 32 bit world to have access to the lower
36
+
38
diff --git a/target/arm/internals.h b/target/arm/internals.h
37
/*
39
index XXXXXXX..XXXXXXX 100644
38
* System register ID fields.
40
--- a/target/arm/internals.h
41
+++ b/target/arm/internals.h
42
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_virq(ARMCPU *cpu);
39
*/
43
*/
40
diff --git a/target/arm/machine.c b/target/arm/machine.c
44
void arm_cpu_update_vfiq(ARMCPU *cpu);
41
index XXXXXXX..XXXXXXX 100644
45
42
--- a/target/arm/machine.c
46
+/**
43
+++ b/target/arm/machine.c
47
+ * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
44
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_fp = {
48
+ *
45
}
49
+ * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
46
};
50
+ * following a change to the HCR_EL2.VSE bit.
47
51
+ */
48
+static bool mve_needed(void *opaque)
52
+void arm_cpu_update_vserr(ARMCPU *cpu);
53
+
54
/**
55
* arm_mmu_idx_el:
56
* @env: The cpu environment
57
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/syndrome.h
60
+++ b/target/arm/syndrome.h
61
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_pcalignment(void)
62
return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
63
}
64
65
+static inline uint32_t syn_serror(uint32_t extra)
49
+{
66
+{
50
+ ARMCPU *cpu = opaque;
67
+ return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
51
+
52
+ return cpu_isar_feature(aa32_mve, cpu);
53
+}
68
+}
54
+
69
+
55
+static const VMStateDescription vmstate_m_mve = {
70
#endif /* TARGET_ARM_SYNDROME_H */
56
+ .name = "cpu/m/mve",
71
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
57
+ .version_id = 1,
72
index XXXXXXX..XXXXXXX 100644
58
+ .minimum_version_id = 1,
73
--- a/target/arm/cpu.c
59
+ .needed = mve_needed,
74
+++ b/target/arm/cpu.c
60
+ .fields = (VMStateField[]) {
75
@@ -XXX,XX +XXX,XX @@ static bool arm_cpu_has_work(CPUState *cs)
61
+ VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
76
return (cpu->power_state != PSCI_OFF)
62
+ VMSTATE_END_OF_LIST()
77
&& cs->interrupt_request &
63
+ },
78
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
64
+};
79
- | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
65
+
80
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
66
static const VMStateDescription vmstate_m = {
81
| CPU_INTERRUPT_EXITTB);
67
.name = "cpu/m",
82
}
68
.version_id = 4,
83
69
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m = {
84
@@ -XXX,XX +XXX,XX @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
70
&vmstate_m_other_sp,
85
return false;
71
&vmstate_m_v8m,
72
&vmstate_m_fp,
73
+ &vmstate_m_mve,
74
NULL
75
}
76
};
77
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/translate-vfp.c
80
+++ b/target/arm/translate-vfp.c
81
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
82
return FPSysRegCheckFailed;
83
}
86
}
87
return !(env->daif & PSTATE_I);
88
+ case EXCP_VSERR:
89
+ if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
90
+ /* VIRQs are only taken when hypervized. */
91
+ return false;
92
+ }
93
+ return !(env->daif & PSTATE_A);
94
default:
95
g_assert_not_reached();
96
}
97
@@ -XXX,XX +XXX,XX @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
98
goto found;
99
}
100
}
101
+ if (interrupt_request & CPU_INTERRUPT_VSERR) {
102
+ excp_idx = EXCP_VSERR;
103
+ target_el = 1;
104
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
105
+ cur_el, secure, hcr_el2)) {
106
+ /* Taking a virtual abort clears HCR_EL2.VSE */
107
+ env->cp15.hcr_el2 &= ~HCR_VSE;
108
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
109
+ goto found;
110
+ }
111
+ }
112
return false;
113
114
found:
115
@@ -XXX,XX +XXX,XX @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
116
}
117
}
118
119
+void arm_cpu_update_vserr(ARMCPU *cpu)
120
+{
121
+ /*
122
+ * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
123
+ */
124
+ CPUARMState *env = &cpu->env;
125
+ CPUState *cs = CPU(cpu);
126
+
127
+ bool new_state = env->cp15.hcr_el2 & HCR_VSE;
128
+
129
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
130
+ if (new_state) {
131
+ cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
132
+ } else {
133
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
134
+ }
135
+ }
136
+}
137
+
138
#ifndef CONFIG_USER_ONLY
139
static void arm_cpu_set_irq(void *opaque, int irq, int level)
140
{
141
diff --git a/target/arm/helper.c b/target/arm/helper.c
142
index XXXXXXX..XXXXXXX 100644
143
--- a/target/arm/helper.c
144
+++ b/target/arm/helper.c
145
@@ -XXX,XX +XXX,XX @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
146
}
147
}
148
149
- /* External aborts are not possible in QEMU so A bit is always clear */
150
+ if (hcr_el2 & HCR_AMO) {
151
+ if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
152
+ ret |= CPSR_A;
153
+ }
154
+ }
155
+
156
return ret;
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
160
g_assert(qemu_mutex_iothread_locked());
161
arm_cpu_update_virq(cpu);
162
arm_cpu_update_vfiq(cpu);
163
+ arm_cpu_update_vserr(cpu);
164
}
165
166
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
167
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs)
168
[EXCP_LSERR] = "v8M LSERR UsageFault",
169
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
170
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
171
+ [EXCP_VSERR] = "Virtual SERR",
172
};
173
174
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
175
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
176
mask = CPSR_A | CPSR_I | CPSR_F;
177
offset = 4;
84
break;
178
break;
85
+ case ARM_VFP_VPR:
179
+ case EXCP_VSERR:
86
+ case ARM_VFP_P0:
180
+ {
87
+ if (!dc_isar_feature(aa32_mve, s)) {
181
+ /*
88
+ return FPSysRegCheckFailed;
182
+ * Note that this is reported as a data abort, but the DFAR
89
+ }
183
+ * has an UNKNOWN value. Construct the SError syndrome from
184
+ * AET and ExT fields.
185
+ */
186
+ ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
187
+
188
+ if (extended_addresses_enabled(env)) {
189
+ env->exception.fsr = arm_fi_to_lfsc(&fi);
190
+ } else {
191
+ env->exception.fsr = arm_fi_to_sfsc(&fi);
192
+ }
193
+ env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
194
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
195
+ qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
196
+ env->exception.fsr);
197
+
198
+ new_mode = ARM_CPU_MODE_ABT;
199
+ addr = 0x10;
200
+ mask = CPSR_A | CPSR_I;
201
+ offset = 8;
202
+ }
203
+ break;
204
case EXCP_SMC:
205
new_mode = ARM_CPU_MODE_MON;
206
addr = 0x08;
207
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
208
case EXCP_VFIQ:
209
addr += 0x100;
210
break;
211
+ case EXCP_VSERR:
212
+ addr += 0x180;
213
+ /* Construct the SError syndrome from IDS and ISS fields. */
214
+ env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
215
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
90
+ break;
216
+ break;
91
default:
217
default:
92
return FPSysRegCheckFailed;
218
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
93
}
94
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
95
tcg_temp_free_i32(sfpa);
96
break;
97
}
98
+ case ARM_VFP_VPR:
99
+ /* Behaves as NOP if not privileged */
100
+ if (IS_USER(s)) {
101
+ break;
102
+ }
103
+ tmp = loadfn(s, opaque);
104
+ store_cpu_field(tmp, v7m.vpr);
105
+ break;
106
+ case ARM_VFP_P0:
107
+ {
108
+ TCGv_i32 vpr;
109
+ tmp = loadfn(s, opaque);
110
+ vpr = load_cpu_field(v7m.vpr);
111
+ tcg_gen_deposit_i32(vpr, vpr, tmp,
112
+ R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
113
+ store_cpu_field(vpr, v7m.vpr);
114
+ tcg_temp_free_i32(tmp);
115
+ break;
116
+ }
117
default:
118
g_assert_not_reached();
119
}
120
@@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
121
tcg_temp_free_i32(fpscr);
122
break;
123
}
124
+ case ARM_VFP_VPR:
125
+ /* Behaves as NOP if not privileged */
126
+ if (IS_USER(s)) {
127
+ break;
128
+ }
129
+ tmp = load_cpu_field(v7m.vpr);
130
+ storefn(s, opaque, tmp);
131
+ break;
132
+ case ARM_VFP_P0:
133
+ tmp = load_cpu_field(v7m.vpr);
134
+ tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
135
+ storefn(s, opaque, tmp);
136
+ break;
137
default:
138
g_assert_not_reached();
139
}
219
}
140
--
220
--
141
2.20.1
221
2.25.1
142
143
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is the 64-bit BFCVT and the 32-bit VCVT{B,T}.BF16.F32.
3
Check for and defer any pending virtual SError.
4
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210525225817.400336-4-richard.henderson@linaro.org
7
Message-id: 20220506180242.216785-17-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
9
---
10
target/arm/helper.h | 1 +
10
target/arm/helper.h | 1 +
11
target/arm/vfp.decode | 2 ++
11
target/arm/a32.decode | 16 ++++++++------
12
target/arm/translate-a64.c | 19 +++++++++++++++++++
12
target/arm/t32.decode | 18 ++++++++--------
13
target/arm/translate-vfp.c | 24 ++++++++++++++++++++++++
13
target/arm/op_helper.c | 43 ++++++++++++++++++++++++++++++++++++++
14
target/arm/vfp_helper.c | 5 +++++
14
target/arm/translate-a64.c | 17 +++++++++++++++
15
5 files changed, 51 insertions(+)
15
target/arm/translate.c | 23 ++++++++++++++++++++
16
6 files changed, 103 insertions(+), 15 deletions(-)
16
17
17
diff --git a/target/arm/helper.h b/target/arm/helper.h
18
diff --git a/target/arm/helper.h b/target/arm/helper.h
18
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper.h
20
--- a/target/arm/helper.h
20
+++ b/target/arm/helper.h
21
+++ b/target/arm/helper.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
22
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(wfe, void, env)
22
23
DEF_HELPER_1(yield, void, env)
23
DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
24
DEF_HELPER_1(pre_hvc, void, env)
24
DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
25
DEF_HELPER_2(pre_smc, void, env, i32)
25
+DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr)
26
+DEF_HELPER_1(vesb, void, env)
26
27
27
DEF_HELPER_2(vfp_uitoh, f16, i32, ptr)
28
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
28
DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
29
DEF_HELPER_2(cpsr_write_eret, void, env, i32)
29
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
30
diff --git a/target/arm/a32.decode b/target/arm/a32.decode
30
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/vfp.decode
32
--- a/target/arm/a32.decode
32
+++ b/target/arm/vfp.decode
33
+++ b/target/arm/a32.decode
33
@@ -XXX,XX +XXX,XX @@ VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
34
@@ -XXX,XX +XXX,XX @@ SMULTT .... 0001 0110 .... 0000 .... 1110 .... @rd0mn
34
35
35
# VCVTB and VCVTT to f16: Vd format is always vd_sp;
36
{
36
# Vm format depends on size bit
37
{
37
+VCVT_b16_f32 ---- 1110 1.11 0011 .... 1001 t:1 1.0 .... \
38
- YIELD ---- 0011 0010 0000 1111 ---- 0000 0001
38
+ vd=%vd_sp vm=%vm_sp
39
- WFE ---- 0011 0010 0000 1111 ---- 0000 0010
39
VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
40
- WFI ---- 0011 0010 0000 1111 ---- 0000 0011
40
vd=%vd_sp vm=%vm_sp
41
+ [
41
VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
42
+ YIELD ---- 0011 0010 0000 1111 ---- 0000 0001
43
+ WFE ---- 0011 0010 0000 1111 ---- 0000 0010
44
+ WFI ---- 0011 0010 0000 1111 ---- 0000 0011
45
46
- # TODO: Implement SEV, SEVL; may help SMP performance.
47
- # SEV ---- 0011 0010 0000 1111 ---- 0000 0100
48
- # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101
49
+ # TODO: Implement SEV, SEVL; may help SMP performance.
50
+ # SEV ---- 0011 0010 0000 1111 ---- 0000 0100
51
+ # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101
52
+
53
+ ESB ---- 0011 0010 0000 1111 ---- 0001 0000
54
+ ]
55
56
# The canonical nop ends in 00000000, but the whole of the
57
# rest of the space executes as nop if otherwise unsupported.
58
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/arm/t32.decode
61
+++ b/target/arm/t32.decode
62
@@ -XXX,XX +XXX,XX @@ CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm
63
[
64
# Hints, and CPS
65
{
66
- YIELD 1111 0011 1010 1111 1000 0000 0000 0001
67
- WFE 1111 0011 1010 1111 1000 0000 0000 0010
68
- WFI 1111 0011 1010 1111 1000 0000 0000 0011
69
+ [
70
+ YIELD 1111 0011 1010 1111 1000 0000 0000 0001
71
+ WFE 1111 0011 1010 1111 1000 0000 0000 0010
72
+ WFI 1111 0011 1010 1111 1000 0000 0000 0011
73
74
- # TODO: Implement SEV, SEVL; may help SMP performance.
75
- # SEV 1111 0011 1010 1111 1000 0000 0000 0100
76
- # SEVL 1111 0011 1010 1111 1000 0000 0000 0101
77
+ # TODO: Implement SEV, SEVL; may help SMP performance.
78
+ # SEV 1111 0011 1010 1111 1000 0000 0000 0100
79
+ # SEVL 1111 0011 1010 1111 1000 0000 0000 0101
80
81
- # For M-profile minimal-RAS ESB can be a NOP, which is the
82
- # default behaviour since it is in the hint space.
83
- # ESB 1111 0011 1010 1111 1000 0000 0001 0000
84
+ ESB 1111 0011 1010 1111 1000 0000 0001 0000
85
+ ]
86
87
# The canonical nop ends in 0000 0000, but the whole rest
88
# of the space is "reserved hint, behaves as nop".
89
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/op_helper.c
92
+++ b/target/arm/op_helper.c
93
@@ -XXX,XX +XXX,XX @@ void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
94
access_type, mmu_idx, ra);
95
}
96
}
97
+
98
+/*
99
+ * This function corresponds to AArch64.vESBOperation().
100
+ * Note that the AArch32 version is not functionally different.
101
+ */
102
+void HELPER(vesb)(CPUARMState *env)
103
+{
104
+ /*
105
+ * The EL2Enabled() check is done inside arm_hcr_el2_eff,
106
+ * and will return HCR_EL2.VSE == 0, so nothing happens.
107
+ */
108
+ uint64_t hcr = arm_hcr_el2_eff(env);
109
+ bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
110
+ bool pending = enabled && (hcr & HCR_VSE);
111
+ bool masked = (env->daif & PSTATE_A);
112
+
113
+ /* If VSE pending and masked, defer the exception. */
114
+ if (pending && masked) {
115
+ uint32_t syndrome;
116
+
117
+ if (arm_el_is_aa64(env, 1)) {
118
+ /* Copy across IDS and ISS from VSESR. */
119
+ syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
120
+ } else {
121
+ ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
122
+
123
+ if (extended_addresses_enabled(env)) {
124
+ syndrome = arm_fi_to_lfsc(&fi);
125
+ } else {
126
+ syndrome = arm_fi_to_sfsc(&fi);
127
+ }
128
+ /* Copy across AET and ExT from VSESR. */
129
+ syndrome |= env->cp15.vsesr_el2 & 0xd000;
130
+ }
131
+
132
+ /* Set VDISR_EL2.A along with the syndrome. */
133
+ env->cp15.vdisr_el2 = syndrome | (1u << 31);
134
+
135
+ /* Clear pending virtual SError */
136
+ env->cp15.hcr_el2 &= ~HCR_VSE;
137
+ cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
138
+ }
139
+}
42
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
140
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
43
index XXXXXXX..XXXXXXX 100644
141
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/translate-a64.c
142
--- a/target/arm/translate-a64.c
45
+++ b/target/arm/translate-a64.c
143
+++ b/target/arm/translate-a64.c
46
@@ -XXX,XX +XXX,XX @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
144
@@ -XXX,XX +XXX,XX @@ static void handle_hint(DisasContext *s, uint32_t insn,
47
case 0x3: /* FSQRT */
145
gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
48
gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
49
goto done;
50
+ case 0x6: /* BFCVT */
51
+ gen_fpst = gen_helper_bfcvt;
52
+ break;
53
case 0x8: /* FRINTN */
54
case 0x9: /* FRINTP */
55
case 0xa: /* FRINTM */
56
@@ -XXX,XX +XXX,XX @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
57
}
146
}
58
break;
147
break;
59
148
+ case 0b10000: /* ESB */
60
+ case 0x6:
149
+ /* Without RAS, we must implement this as NOP. */
61
+ switch (type) {
150
+ if (dc_isar_feature(aa64_ras, s)) {
62
+ case 1: /* BFCVT */
151
+ /*
63
+ if (!dc_isar_feature(aa64_bf16, s)) {
152
+ * QEMU does not have a source of physical SErrors,
64
+ goto do_unallocated;
153
+ * so we are only concerned with virtual SErrors.
154
+ * The pseudocode in the ARM for this case is
155
+ * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
156
+ * AArch64.vESBOperation();
157
+ * Most of the condition can be evaluated at translation time.
158
+ * Test for EL2 present, and defer test for SEL2 to runtime.
159
+ */
160
+ if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
161
+ gen_helper_vesb(cpu_env);
65
+ }
162
+ }
66
+ if (!fp_access_check(s)) {
67
+ return;
68
+ }
69
+ handle_fp_1src_single(s, opcode, rd, rn);
70
+ break;
71
+ default:
72
+ goto do_unallocated;
73
+ }
163
+ }
74
+ break;
164
+ break;
75
+
165
case 0b11000: /* PACIAZ */
76
default:
166
if (s->pauth_active) {
77
do_unallocated:
167
gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
78
unallocated_encoding(s);
168
diff --git a/target/arm/translate.c b/target/arm/translate.c
79
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
169
index XXXXXXX..XXXXXXX 100644
80
index XXXXXXX..XXXXXXX 100644
170
--- a/target/arm/translate.c
81
--- a/target/arm/translate-vfp.c
171
+++ b/target/arm/translate.c
82
+++ b/target/arm/translate-vfp.c
172
@@ -XXX,XX +XXX,XX @@ static bool trans_WFI(DisasContext *s, arg_WFI *a)
83
@@ -XXX,XX +XXX,XX @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
84
return true;
173
return true;
85
}
174
}
86
175
87
+static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
176
+static bool trans_ESB(DisasContext *s, arg_ESB *a)
88
+{
177
+{
89
+ TCGv_ptr fpst;
178
+ /*
90
+ TCGv_i32 tmp;
179
+ * For M-profile, minimal-RAS ESB can be a NOP.
91
+
180
+ * Without RAS, we must implement this as NOP.
92
+ if (!dc_isar_feature(aa32_bf16, s)) {
181
+ */
93
+ return false;
182
+ if (!arm_dc_feature(s, ARM_FEATURE_M) && dc_isar_feature(aa32_ras, s)) {
183
+ /*
184
+ * QEMU does not have a source of physical SErrors,
185
+ * so we are only concerned with virtual SErrors.
186
+ * The pseudocode in the ARM for this case is
187
+ * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
188
+ * AArch32.vESBOperation();
189
+ * Most of the condition can be evaluated at translation time.
190
+ * Test for EL2 present, and defer test for SEL2 to runtime.
191
+ */
192
+ if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
193
+ gen_helper_vesb(cpu_env);
194
+ }
94
+ }
195
+ }
95
+
96
+ if (!vfp_access_check(s)) {
97
+ return true;
98
+ }
99
+
100
+ fpst = fpstatus_ptr(FPST_FPCR);
101
+ tmp = tcg_temp_new_i32();
102
+
103
+ vfp_load_reg32(tmp, a->vm);
104
+ gen_helper_bfcvt(tmp, tmp, fpst);
105
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
106
+ tcg_temp_free_ptr(fpst);
107
+ tcg_temp_free_i32(tmp);
108
+ return true;
196
+ return true;
109
+}
197
+}
110
+
198
+
111
static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
199
static bool trans_NOP(DisasContext *s, arg_NOP *a)
112
{
200
{
113
TCGv_ptr fpst;
201
return true;
114
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/vfp_helper.c
117
+++ b/target/arm/vfp_helper.c
118
@@ -XXX,XX +XXX,XX @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
119
return float64_to_float32(x, &env->vfp.fp_status);
120
}
121
122
+uint32_t HELPER(bfcvt)(float32 x, void *status)
123
+{
124
+ return float32_to_bfloat16(x, status);
125
+}
126
+
127
/*
128
* VFP3 fixed point conversion. The AArch32 versions of fix-to-float
129
* must always round-to-nearest; the AArch64 ones honour the FPSCR
130
--
202
--
131
2.20.1
203
2.25.1
132
133
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-id: 20210525225817.400336-12-richard.henderson@linaro.org
5
Message-id: 20220506180242.216785-18-richard.henderson@linaro.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
---
7
---
8
linux-user/elfload.c | 2 ++
8
docs/system/arm/emulation.rst | 1 +
9
1 file changed, 2 insertions(+)
9
target/arm/cpu64.c | 1 +
10
target/arm/cpu_tcg.c | 1 +
11
3 files changed, 3 insertions(+)
10
12
11
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
13
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/elfload.c
15
--- a/docs/system/arm/emulation.rst
14
+++ b/linux-user/elfload.c
16
+++ b/docs/system/arm/emulation.rst
15
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap2(void)
17
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
16
GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
18
- FEAT_PMULL (PMULL, PMULL2 instructions)
17
GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
19
- FEAT_PMUv3p1 (PMU Extensions v3.1)
18
GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
20
- FEAT_PMUv3p4 (PMU Extensions v3.4)
19
+ GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
21
+- FEAT_RAS (Reliability, availability, and serviceability)
20
GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
22
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
21
+ GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
23
- FEAT_RNG (Random number generator)
22
GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
24
- FEAT_SB (Speculation Barrier)
23
GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
25
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
24
GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/cpu64.c
28
+++ b/target/arm/cpu64.c
29
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
30
t = cpu->isar.id_aa64pfr0;
31
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
32
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
33
+ t = FIELD_DP64(t, ID_AA64PFR0, RAS, 1); /* FEAT_RAS */
34
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
35
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
36
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
37
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/cpu_tcg.c
40
+++ b/target/arm/cpu_tcg.c
41
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
42
43
t = cpu->isar.id_pfr0;
44
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
45
+ t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
46
cpu->isar.id_pfr0 = t;
47
48
t = cpu->isar.id_pfr2;
25
--
49
--
26
2.20.1
50
2.25.1
27
28
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFMLAL{B,T} for both AArch64 AdvSIMD and SVE,
3
This feature is AArch64 only, and applies to physical SErrors,
4
and VFMA{B,T}.BF16 for AArch32 NEON.
4
which QEMU does not implement, thus the feature is a nop.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-11-richard.henderson@linaro.org
8
Message-id: 20220506180242.216785-19-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper.h | 2 ++
11
docs/system/arm/emulation.rst | 1 +
12
target/arm/neon-shared.decode | 2 ++
12
target/arm/cpu64.c | 1 +
13
target/arm/sve.decode | 2 ++
13
2 files changed, 2 insertions(+)
14
target/arm/translate-a64.c | 15 ++++++++++++++-
15
target/arm/translate-neon.c | 10 ++++++++++
16
target/arm/translate-sve.c | 30 ++++++++++++++++++++++++++++++
17
target/arm/vec_helper.c | 22 ++++++++++++++++++++++
18
7 files changed, 82 insertions(+), 1 deletion(-)
19
14
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
15
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
17
--- a/docs/system/arm/emulation.rst
23
+++ b/target/arm/helper.h
18
+++ b/docs/system/arm/emulation.rst
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
19
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
25
20
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
26
DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
21
- FEAT_HPDS (Hierarchical permission disables)
27
void, ptr, ptr, ptr, ptr, ptr, i32)
22
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
28
+DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
23
+- FEAT_IESB (Implicit error synchronization event)
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
24
- FEAT_JSCVT (JavaScript conversion instructions)
30
25
- FEAT_LOR (Limited ordering regions)
31
#ifdef TARGET_AARCH64
26
- FEAT_LPA (Large Physical Address space)
32
#include "helper-a64.h"
27
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
33
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
34
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/neon-shared.decode
29
--- a/target/arm/cpu64.c
36
+++ b/target/arm/neon-shared.decode
30
+++ b/target/arm/cpu64.c
37
@@ -XXX,XX +XXX,XX @@ VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
38
rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
32
t = cpu->isar.id_aa64mmfr2;
39
VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
33
t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
40
index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
34
t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
41
+VFMA_b16_scal 1111 1110 0.11 .... .... 1000 . q:1 . 1 . vm:3 \
35
+ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
42
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp
36
t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
43
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
37
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
44
index XXXXXXX..XXXXXXX 100644
38
t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
45
--- a/target/arm/sve.decode
46
+++ b/target/arm/sve.decode
47
@@ -XXX,XX +XXX,XX @@ FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
48
FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
49
FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
50
FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
51
+BFMLALB_zzxw 01100100 11 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
52
+BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
53
54
### SVE2 floating-point bfloat16 dot-product (indexed)
55
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
56
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/translate-a64.c
59
+++ b/target/arm/translate-a64.c
60
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
61
unallocated_encoding(s);
62
return;
63
}
64
+ size = MO_32;
65
break;
66
case 1: /* BFDOT */
67
if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
68
unallocated_encoding(s);
69
return;
70
}
71
+ size = MO_32;
72
+ break;
73
+ case 3: /* BFMLAL{B,T} */
74
+ if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
75
+ unallocated_encoding(s);
76
+ return;
77
+ }
78
+ /* can't set is_fp without other incorrect size checks */
79
+ size = MO_16;
80
break;
81
default:
82
unallocated_encoding(s);
83
return;
84
}
85
- size = MO_32;
86
break;
87
case 0x11: /* FCMLA #0 */
88
case 0x13: /* FCMLA #90 */
89
@@ -XXX,XX +XXX,XX @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
90
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
91
gen_helper_gvec_usdot_idx_b);
92
return;
93
+ case 3: /* BFMLAL{B,T} */
94
+ gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
95
+ gen_helper_gvec_bfmlal_idx);
96
+ return;
97
}
98
g_assert_not_reached();
99
case 0x11: /* FCMLA #0 */
100
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/target/arm/translate-neon.c
103
+++ b/target/arm/translate-neon.c
104
@@ -XXX,XX +XXX,XX @@ static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
105
return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD,
106
gen_helper_gvec_bfmlal);
107
}
108
+
109
+static bool trans_VFMA_b16_scal(DisasContext *s, arg_VFMA_b16_scal *a)
110
+{
111
+ if (!dc_isar_feature(aa32_bf16, s)) {
112
+ return false;
113
+ }
114
+ return do_neon_ddda_fpst(s, 6, a->vd, a->vn, a->vm,
115
+ (a->index << 1) | a->q, FPST_STD,
116
+ gen_helper_gvec_bfmlal_idx);
117
+}
118
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/target/arm/translate-sve.c
121
+++ b/target/arm/translate-sve.c
122
@@ -XXX,XX +XXX,XX @@ static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
123
{
124
return do_BFMLAL_zzzw(s, a, true);
125
}
126
+
127
+static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
128
+{
129
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
130
+ return false;
131
+ }
132
+ if (sve_access_check(s)) {
133
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
134
+ unsigned vsz = vec_full_reg_size(s);
135
+
136
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
137
+ vec_full_reg_offset(s, a->rn),
138
+ vec_full_reg_offset(s, a->rm),
139
+ vec_full_reg_offset(s, a->ra),
140
+ status, vsz, vsz, (a->index << 1) | sel,
141
+ gen_helper_gvec_bfmlal_idx);
142
+ tcg_temp_free_ptr(status);
143
+ }
144
+ return true;
145
+}
146
+
147
+static bool trans_BFMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
148
+{
149
+ return do_BFMLAL_zzxw(s, a, false);
150
+}
151
+
152
+static bool trans_BFMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
153
+{
154
+ return do_BFMLAL_zzxw(s, a, true);
155
+}
156
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/target/arm/vec_helper.c
159
+++ b/target/arm/vec_helper.c
160
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
161
}
162
clear_tail(d, opr_sz, simd_maxsz(desc));
163
}
164
+
165
+void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
166
+ void *va, void *stat, uint32_t desc)
167
+{
168
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
169
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
170
+ intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 1, 3);
171
+ intptr_t elements = opr_sz / 4;
172
+ intptr_t eltspersegment = MIN(16 / 4, elements);
173
+ float32 *d = vd, *a = va;
174
+ bfloat16 *n = vn, *m = vm;
175
+
176
+ for (i = 0; i < elements; i += eltspersegment) {
177
+ float32 m_idx = m[H2(2 * i + index)] << 16;
178
+
179
+ for (j = i; j < i + eltspersegment; j++) {
180
+ float32 n_j = n[H2(2 * j + sel)] << 16;
181
+ d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], 0, stat);
182
+ }
183
+ }
184
+ clear_tail(d, opr_sz, simd_maxsz(desc));
185
+}
186
--
39
--
187
2.20.1
40
2.25.1
188
189
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFMLAL{B,T} for both AArch64 AdvSIMD and SVE,
3
This extension concerns branch speculation, which TCG does
4
and VFMA{B,T}.BF16 for AArch32 NEON.
4
not implement. Thus we can trivially enable this feature.
5
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-10-richard.henderson@linaro.org
8
Message-id: 20220506180242.216785-20-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/helper.h | 3 +++
11
docs/system/arm/emulation.rst | 1 +
12
target/arm/neon-shared.decode | 3 +++
12
target/arm/cpu64.c | 1 +
13
target/arm/sve.decode | 3 +++
13
target/arm/cpu_tcg.c | 1 +
14
target/arm/translate-a64.c | 13 +++++++++----
14
3 files changed, 3 insertions(+)
15
target/arm/translate-neon.c | 9 +++++++++
16
target/arm/translate-sve.c | 30 ++++++++++++++++++++++++++++++
17
target/arm/vec_helper.c | 16 ++++++++++++++++
18
7 files changed, 73 insertions(+), 4 deletions(-)
19
15
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
18
--- a/docs/system/arm/emulation.rst
23
+++ b/target/arm/helper.h
19
+++ b/docs/system/arm/emulation.rst
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
25
DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
21
- FEAT_BBM at level 2 (Translation table break-before-make levels)
26
void, ptr, ptr, ptr, ptr, i32)
22
- FEAT_BF16 (AArch64 BFloat16 instructions)
27
23
- FEAT_BTI (Branch Target Identification)
28
+DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
24
+- FEAT_CSV2 (Cache speculation variant 2)
29
+ void, ptr, ptr, ptr, ptr, ptr, i32)
25
- FEAT_DIT (Data Independent Timing instructions)
30
+
26
- FEAT_DPB (DC CVAP instruction)
31
#ifdef TARGET_AARCH64
27
- FEAT_Debugv8p2 (Debug changes for v8.2)
32
#include "helper-a64.h"
28
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
33
#include "helper-sve.h"
34
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
35
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/neon-shared.decode
30
--- a/target/arm/cpu64.c
37
+++ b/target/arm/neon-shared.decode
31
+++ b/target/arm/cpu64.c
38
@@ -XXX,XX +XXX,XX @@ VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
32
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
39
VMMLA_b16 1111 1100 0.00 .... .... 1100 .1.0 .... \
33
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
40
vm=%vm_dp vn=%vn_dp vd=%vd_dp
34
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
41
35
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
42
+VFMA_b16 1111 110 0 0.11 .... .... 1000 . q:1 . 1 .... \
36
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 1); /* FEAT_CSV2 */
43
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
37
cpu->isar.id_aa64pfr0 = t;
44
+
38
45
VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
39
t = cpu->isar.id_aa64pfr1;
46
vn=%vn_dp vd=%vd_dp size=1
40
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
47
VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
48
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
49
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/sve.decode
42
--- a/target/arm/cpu_tcg.c
51
+++ b/target/arm/sve.decode
43
+++ b/target/arm/cpu_tcg.c
52
@@ -XXX,XX +XXX,XX @@ FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
44
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
53
FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
45
cpu->isar.id_mmfr4 = t;
54
FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
46
55
47
t = cpu->isar.id_pfr0;
56
+BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
48
+ t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */
57
+BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
49
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
58
+
50
t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
59
### SVE2 floating-point bfloat16 dot-product
51
cpu->isar.id_pfr0 = t;
60
BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
61
62
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/translate-a64.c
65
+++ b/target/arm/translate-a64.c
66
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
67
}
68
feature = dc_isar_feature(aa64_bf16, s);
69
break;
70
- case 0x1f: /* BFDOT */
71
+ case 0x1f:
72
switch (size) {
73
- case 1:
74
+ case 1: /* BFDOT */
75
+ case 3: /* BFMLAL{B,T} */
76
feature = dc_isar_feature(aa64_bf16, s);
77
break;
78
default:
79
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
80
case 0xd: /* BFMMLA */
81
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
82
return;
83
- case 0xf: /* BFDOT */
84
+ case 0xf:
85
switch (size) {
86
- case 1:
87
+ case 1: /* BFDOT */
88
gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
89
break;
90
+ case 3: /* BFMLAL{B,T} */
91
+ gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
92
+ gen_helper_gvec_bfmlal);
93
+ break;
94
default:
95
g_assert_not_reached();
96
}
97
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/arm/translate-neon.c
100
+++ b/target/arm/translate-neon.c
101
@@ -XXX,XX +XXX,XX @@ static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
102
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
103
gen_helper_gvec_bfmmla);
104
}
105
+
106
+static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
107
+{
108
+ if (!dc_isar_feature(aa32_bf16, s)) {
109
+ return false;
110
+ }
111
+ return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD,
112
+ gen_helper_gvec_bfmlal);
113
+}
114
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/arm/translate-sve.c
117
+++ b/target/arm/translate-sve.c
118
@@ -XXX,XX +XXX,XX @@ static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a)
119
}
120
return true;
121
}
122
+
123
+static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
124
+{
125
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
126
+ return false;
127
+ }
128
+ if (sve_access_check(s)) {
129
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
130
+ unsigned vsz = vec_full_reg_size(s);
131
+
132
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
133
+ vec_full_reg_offset(s, a->rn),
134
+ vec_full_reg_offset(s, a->rm),
135
+ vec_full_reg_offset(s, a->ra),
136
+ status, vsz, vsz, sel,
137
+ gen_helper_gvec_bfmlal);
138
+ tcg_temp_free_ptr(status);
139
+ }
140
+ return true;
141
+}
142
+
143
+static bool trans_BFMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
144
+{
145
+ return do_BFMLAL_zzzw(s, a, false);
146
+}
147
+
148
+static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
149
+{
150
+ return do_BFMLAL_zzzw(s, a, true);
151
+}
152
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/target/arm/vec_helper.c
155
+++ b/target/arm/vec_helper.c
156
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
157
}
158
clear_tail(d, opr_sz, simd_maxsz(desc));
159
}
160
+
161
+void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
162
+ void *stat, uint32_t desc)
163
+{
164
+ intptr_t i, opr_sz = simd_oprsz(desc);
165
+ intptr_t sel = simd_data(desc);
166
+ float32 *d = vd, *a = va;
167
+ bfloat16 *n = vn, *m = vm;
168
+
169
+ for (i = 0; i < opr_sz / 4; ++i) {
170
+ float32 nn = n[H2(i * 2 + sel)] << 16;
171
+ float32 mm = m[H2(i * 2 + sel)] << 16;
172
+ d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], 0, stat);
173
+ }
174
+ clear_tail(d, opr_sz, simd_maxsz(desc));
175
+}
176
--
52
--
177
2.20.1
53
2.25.1
178
179
diff view generated by jsdifflib
1
Currently we allow board models to specify the initial value of the
1
From: Richard Henderson <richard.henderson@linaro.org>
2
Secure VTOR register, using an init-svtor property on the TYPE_ARMV7M
3
object which is plumbed through to the CPU. Allow board models to
4
also specify the initial value of the Non-secure VTOR via a similar
5
init-nsvtor property.
6
2
3
There is no branch prediction in TCG, therefore there is no
4
need to actually include the context number into the predictor.
5
Therefore all we need to do is add the state for SCXTNUM_ELx.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220506180242.216785-21-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210520152840.24453-10-peter.maydell@linaro.org
10
---
11
---
11
include/hw/arm/armv7m.h | 2 ++
12
docs/system/arm/emulation.rst | 3 ++
12
target/arm/cpu.h | 2 ++
13
target/arm/cpu.h | 16 +++++++++
13
hw/arm/armv7m.c | 7 +++++++
14
target/arm/cpu.c | 5 +++
14
target/arm/cpu.c | 10 ++++++++++
15
target/arm/cpu64.c | 3 +-
15
4 files changed, 21 insertions(+)
16
target/arm/helper.c | 61 ++++++++++++++++++++++++++++++++++-
17
5 files changed, 86 insertions(+), 2 deletions(-)
16
18
17
diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h
19
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/arm/armv7m.h
21
--- a/docs/system/arm/emulation.rst
20
+++ b/include/hw/arm/armv7m.h
22
+++ b/docs/system/arm/emulation.rst
21
@@ -XXX,XX +XXX,XX @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M)
23
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
22
* devices will be automatically layered on top of this view.)
24
- FEAT_BF16 (AArch64 BFloat16 instructions)
23
* + Property "idau": IDAU interface (forwarded to CPU object)
25
- FEAT_BTI (Branch Target Identification)
24
* + Property "init-svtor": secure VTOR reset value (forwarded to CPU object)
26
- FEAT_CSV2 (Cache speculation variant 2)
25
+ * + Property "init-nsvtor": non-secure VTOR reset value (forwarded to CPU object)
27
+- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1)
26
* + Property "vfp": enable VFP (forwarded to CPU object)
28
+- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
27
* + Property "dsp": enable DSP (forwarded to CPU object)
29
+- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
28
* + Property "enable-bitband": expose bitbanded IO
30
- FEAT_DIT (Data Independent Timing instructions)
29
@@ -XXX,XX +XXX,XX @@ struct ARMv7MState {
31
- FEAT_DPB (DC CVAP instruction)
30
MemoryRegion *board_memory;
32
- FEAT_Debugv8p2 (Debug changes for v8.2)
31
Object *idau;
32
uint32_t init_svtor;
33
+ uint32_t init_nsvtor;
34
bool enable_bitband;
35
bool start_powered_off;
36
bool vfp;
37
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
33
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
38
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/cpu.h
35
--- a/target/arm/cpu.h
40
+++ b/target/arm/cpu.h
36
+++ b/target/arm/cpu.h
41
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
37
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
42
38
ARMPACKey apdb;
43
/* For v8M, initial value of the Secure VTOR */
39
ARMPACKey apga;
44
uint32_t init_svtor;
40
} keys;
45
+ /* For v8M, initial value of the Non-secure VTOR */
41
+
46
+ uint32_t init_nsvtor;
42
+ uint64_t scxtnum_el[4];
47
43
#endif
48
/* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
44
49
* QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
45
#if defined(CONFIG_USER_ONLY)
50
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
46
@@ -XXX,XX +XXX,XX @@ void pmu_init(ARMCPU *cpu);
51
index XXXXXXX..XXXXXXX 100644
47
#define SCTLR_WXN (1U << 19)
52
--- a/hw/arm/armv7m.c
48
#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
53
+++ b/hw/arm/armv7m.c
49
#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
54
@@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp)
50
+#define SCTLR_TSCXT (1U << 20) /* FEAT_CSV2_1p2, AArch64 only */
55
return;
51
#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
56
}
52
#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
57
}
53
#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
58
+ if (object_property_find(OBJECT(s->cpu), "init-nsvtor")) {
54
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
59
+ if (!object_property_set_uint(OBJECT(s->cpu), "init-nsvtor",
55
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
60
+ s->init_nsvtor, errp)) {
56
}
61
+ return;
57
62
+ }
58
+static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
63
+ }
59
+{
64
if (object_property_find(OBJECT(s->cpu), "start-powered-off")) {
60
+ int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2);
65
if (!object_property_set_bool(OBJECT(s->cpu), "start-powered-off",
61
+ if (key >= 2) {
66
s->start_powered_off, errp)) {
62
+ return true; /* FEAT_CSV2_2 */
67
@@ -XXX,XX +XXX,XX @@ static Property armv7m_properties[] = {
63
+ }
68
MemoryRegion *),
64
+ if (key == 1) {
69
DEFINE_PROP_LINK("idau", ARMv7MState, idau, TYPE_IDAU_INTERFACE, Object *),
65
+ key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC);
70
DEFINE_PROP_UINT32("init-svtor", ARMv7MState, init_svtor, 0),
66
+ return key >= 2; /* FEAT_CSV2_1p2 */
71
+ DEFINE_PROP_UINT32("init-nsvtor", ARMv7MState, init_nsvtor, 0),
67
+ }
72
DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false),
68
+ return false;
73
DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off,
69
+}
74
false),
70
+
71
static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
72
{
73
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
75
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
74
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
76
index XXXXXXX..XXXXXXX 100644
75
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/cpu.c
76
--- a/target/arm/cpu.c
78
+++ b/target/arm/cpu.c
77
+++ b/target/arm/cpu.c
79
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
78
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
80
env->regs[14] = 0xffffffff;
79
*/
81
80
env->cp15.gcr_el1 = 0x1ffff;
82
env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
81
}
83
+ env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
82
+ /*
84
83
+ * Disable access to SCXTNUM_EL0 from CSV2_1p2.
85
/* Load the initial SP and PC from offset 0 and 4 in the vector table */
84
+ * This is not yet exposed from the Linux kernel in any way.
86
vecbase = env->v7m.vecbase[env->v7m.secure];
85
+ */
87
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
86
+ env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
88
&cpu->init_svtor,
87
#else
89
OBJ_PROP_FLAG_READWRITE);
88
/* Reset into the highest available EL */
89
if (arm_feature(env, ARM_FEATURE_EL3)) {
90
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
91
index XXXXXXX..XXXXXXX 100644
92
--- a/target/arm/cpu64.c
93
+++ b/target/arm/cpu64.c
94
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
95
t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
96
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
97
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
98
- t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 1); /* FEAT_CSV2 */
99
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */
100
cpu->isar.id_aa64pfr0 = t;
101
102
t = cpu->isar.id_aa64pfr1;
103
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
104
* we do for EL2 with the virtualization=on property.
105
*/
106
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
107
+ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */
108
cpu->isar.id_aa64pfr1 = t;
109
110
t = cpu->isar.id_aa64mmfr0;
111
diff --git a/target/arm/helper.c b/target/arm/helper.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/arm/helper.c
114
+++ b/target/arm/helper.c
115
@@ -XXX,XX +XXX,XX @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
116
if (cpu_isar_feature(aa64_mte, cpu)) {
117
valid_mask |= SCR_ATA;
118
}
119
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
120
+ valid_mask |= SCR_ENSCXT;
121
+ }
122
} else {
123
valid_mask &= ~(SCR_RW | SCR_ST);
124
if (cpu_isar_feature(aa32_ras, cpu)) {
125
@@ -XXX,XX +XXX,XX @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
126
if (cpu_isar_feature(aa64_mte, cpu)) {
127
valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
128
}
129
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
130
+ valid_mask |= HCR_ENSCXT;
131
+ }
90
}
132
}
91
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
133
92
+ /*
134
/* Clear RES0 bits. */
93
+ * Initial value of the NS VTOR (for cores without the Security
135
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
94
+ * extension, this is the only VTOR)
136
{ K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
95
+ */
137
"TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
96
+ object_property_add_uint32_ptr(obj, "init-nsvtor",
138
97
+ &cpu->init_nsvtor,
139
+ { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
98
+ OBJ_PROP_FLAG_READWRITE);
140
+ "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
99
+ }
141
+ isar_feature_aa64_scxtnum },
100
142
+
101
qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
143
/* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
102
144
/* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
145
};
146
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
147
},
148
};
149
150
-#endif
151
+static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
152
+ bool isread)
153
+{
154
+ uint64_t hcr = arm_hcr_el2_eff(env);
155
+ int el = arm_current_el(env);
156
+
157
+ if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
158
+ if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
159
+ if (hcr & HCR_TGE) {
160
+ return CP_ACCESS_TRAP_EL2;
161
+ }
162
+ return CP_ACCESS_TRAP;
163
+ }
164
+ } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
165
+ return CP_ACCESS_TRAP_EL2;
166
+ }
167
+ if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
168
+ return CP_ACCESS_TRAP_EL2;
169
+ }
170
+ if (el < 3
171
+ && arm_feature(env, ARM_FEATURE_EL3)
172
+ && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
173
+ return CP_ACCESS_TRAP_EL3;
174
+ }
175
+ return CP_ACCESS_OK;
176
+}
177
+
178
+static const ARMCPRegInfo scxtnum_reginfo[] = {
179
+ { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
180
+ .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
181
+ .access = PL0_RW, .accessfn = access_scxtnum,
182
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
183
+ { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
184
+ .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
185
+ .access = PL1_RW, .accessfn = access_scxtnum,
186
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
187
+ { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
188
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
189
+ .access = PL2_RW, .accessfn = access_scxtnum,
190
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
191
+ { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
192
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
193
+ .access = PL3_RW,
194
+ .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
195
+};
196
+#endif /* TARGET_AARCH64 */
197
198
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
199
bool isread)
200
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
201
define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
202
define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
203
}
204
+
205
+ if (cpu_isar_feature(aa64_scxtnum, cpu)) {
206
+ define_arm_cp_regs(cpu, scxtnum_reginfo);
207
+ }
208
#endif
209
210
if (cpu_isar_feature(any_predinv, cpu)) {
103
--
211
--
104
2.20.1
212
2.25.1
105
106
diff view generated by jsdifflib
1
From: Jamie Iles <jamie@nuviainc.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The sequence cpu_restore_state() + raise_exception() is equivalent to
3
This extension concerns cache speculation, which TCG does
4
raise_exception_ra(), so use that instead. (In this case we never
4
not implement. Thus we can trivially enable this feature.
5
cared about the syndrome value, because M-profile doesn't use the
6
syndrome; the old code was just written unnecessarily awkwardly.)
7
5
8
Cc: Richard Henderson <richard.henderson@linaro.org>
9
Cc: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Jamie Iles <jamie@nuviainc.com>
11
[PMM: Retain edited version of comment; rewrite commit message]
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220506180242.216785-22-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
10
---
15
target/arm/m_helper.c | 5 +----
11
docs/system/arm/emulation.rst | 1 +
16
target/arm/op_helper.c | 9 +++------
12
target/arm/cpu64.c | 1 +
17
2 files changed, 4 insertions(+), 10 deletions(-)
13
target/arm/cpu_tcg.c | 1 +
14
3 files changed, 3 insertions(+)
18
15
19
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
16
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/m_helper.c
18
--- a/docs/system/arm/emulation.rst
22
+++ b/target/arm/m_helper.c
19
+++ b/docs/system/arm/emulation.rst
23
@@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
20
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
24
limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
21
- FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1)
25
22
- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
26
if (val < limit) {
23
- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
27
- CPUState *cs = env_cpu(env);
24
+- FEAT_CSV3 (Cache speculation variant 3)
28
-
25
- FEAT_DIT (Data Independent Timing instructions)
29
- cpu_restore_state(cs, GETPC(), true);
26
- FEAT_DPB (DC CVAP instruction)
30
- raise_exception(env, EXCP_STKOF, 0, 1);
27
- FEAT_Debugv8p2 (Debug changes for v8.2)
31
+ raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
28
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
32
}
33
34
if (is_psp) {
35
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
36
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/op_helper.c
30
--- a/target/arm/cpu64.c
38
+++ b/target/arm/op_helper.c
31
+++ b/target/arm/cpu64.c
39
@@ -XXX,XX +XXX,XX @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
32
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
40
* raising an exception if the limit is breached.
33
t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); /* FEAT_SEL2 */
41
*/
34
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
42
if (newvalue < v7m_sp_limit(env)) {
35
t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 2); /* FEAT_CSV2_2 */
43
- CPUState *cs = env_cpu(env);
36
+ t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */
44
-
37
cpu->isar.id_aa64pfr0 = t;
45
/*
38
46
* Stack limit exceptions are a rare case, so rather than syncing
39
t = cpu->isar.id_aa64pfr1;
47
- * PC/condbits before the call, we use cpu_restore_state() to
40
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
48
- * get them right before raising the exception.
41
index XXXXXXX..XXXXXXX 100644
49
+ * PC/condbits before the call, we use raise_exception_ra() so
42
--- a/target/arm/cpu_tcg.c
50
+ * that cpu_restore_state() will sort them out.
43
+++ b/target/arm/cpu_tcg.c
51
*/
44
@@ -XXX,XX +XXX,XX @@ void aa32_max_features(ARMCPU *cpu)
52
- cpu_restore_state(cs, GETPC(), true);
45
cpu->isar.id_pfr0 = t;
53
- raise_exception(env, EXCP_STKOF, 0, 1);
46
54
+ raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
47
t = cpu->isar.id_pfr2;
55
}
48
+ t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */
56
}
49
t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
50
cpu->isar.id_pfr2 = t;
57
51
58
--
52
--
59
2.20.1
53
2.25.1
60
61
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFMMLA for both AArch64 AdvSIMD and SVE,
3
This extension concerns not merging memory access, which TCG does
4
and VMMLA.BF16 for AArch32 NEON.
4
not implement. Thus we can trivially enable this feature.
5
Add a comment to handle_hint for the DGH instruction, but no code.
5
6
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-9-richard.henderson@linaro.org
9
Message-id: 20220506180242.216785-23-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
---
11
target/arm/helper.h | 3 +++
12
docs/system/arm/emulation.rst | 1 +
12
target/arm/neon-shared.decode | 2 ++
13
target/arm/cpu64.c | 1 +
13
target/arm/sve.decode | 6 +++--
14
target/arm/translate-a64.c | 1 +
14
target/arm/translate-a64.c | 10 +++++++++
15
3 files changed, 3 insertions(+)
15
target/arm/translate-neon.c | 9 ++++++++
16
target/arm/translate-sve.c | 12 ++++++++++
17
target/arm/vec_helper.c | 42 ++++++++++++++++++++++++++++++++++-
18
7 files changed, 81 insertions(+), 3 deletions(-)
19
16
20
diff --git a/target/arm/helper.h b/target/arm/helper.h
17
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
21
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/helper.h
19
--- a/docs/system/arm/emulation.rst
23
+++ b/target/arm/helper.h
20
+++ b/docs/system/arm/emulation.rst
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
21
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
25
DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
22
- FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2)
26
void, ptr, ptr, ptr, ptr, i32)
23
- FEAT_CSV2_2 (Cache speculation variant 2, version 2)
27
24
- FEAT_CSV3 (Cache speculation variant 3)
28
+DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
25
+- FEAT_DGH (Data gathering hint)
29
+ void, ptr, ptr, ptr, ptr, i32)
26
- FEAT_DIT (Data Independent Timing instructions)
30
+
27
- FEAT_DPB (DC CVAP instruction)
31
#ifdef TARGET_AARCH64
28
- FEAT_Debugv8p2 (Debug changes for v8.2)
32
#include "helper-a64.h"
29
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
33
#include "helper-sve.h"
34
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
35
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/neon-shared.decode
31
--- a/target/arm/cpu64.c
37
+++ b/target/arm/neon-shared.decode
32
+++ b/target/arm/cpu64.c
38
@@ -XXX,XX +XXX,XX @@ VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \
33
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
39
vm=%vm_dp vn=%vn_dp vd=%vd_dp
34
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */
40
VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
35
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */
41
vm=%vm_dp vn=%vn_dp vd=%vd_dp
36
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */
42
+VMMLA_b16 1111 1100 0.00 .... .... 1100 .1.0 .... \
37
+ t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */
43
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
38
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
44
39
cpu->isar.id_aa64isar1 = t;
45
VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
46
vn=%vn_dp vd=%vd_dp size=1
47
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/sve.decode
50
+++ b/target/arm/sve.decode
51
@@ -XXX,XX +XXX,XX @@ SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
52
USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
53
54
### SVE2 floating point matrix multiply accumulate
55
-
56
-FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm
57
+{
58
+ BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
59
+ FMMLA 01100100 .. 1 ..... 111 001 ..... ..... @rda_rn_rm
60
+}
61
62
### SVE2 Memory Gather Load Group
63
40
64
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
41
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
65
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/translate-a64.c
43
--- a/target/arm/translate-a64.c
67
+++ b/target/arm/translate-a64.c
44
+++ b/target/arm/translate-a64.c
68
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
45
@@ -XXX,XX +XXX,XX @@ static void handle_hint(DisasContext *s, uint32_t insn,
69
}
70
feature = dc_isar_feature(aa64_fcma, s);
71
break;
46
break;
72
+ case 0x1d: /* BFMMLA */
47
case 0b00100: /* SEV */
73
+ if (size != MO_16 || !is_q) {
48
case 0b00101: /* SEVL */
74
+ unallocated_encoding(s);
49
+ case 0b00110: /* DGH */
75
+ return;
50
/* we treat all as NOP at least for now */
76
+ }
51
break;
77
+ feature = dc_isar_feature(aa64_bf16, s);
52
case 0b00111: /* XPACLRI */
78
+ break;
79
case 0x1f: /* BFDOT */
80
switch (size) {
81
case 1:
82
@@ -XXX,XX +XXX,XX @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
83
}
84
return;
85
86
+ case 0xd: /* BFMMLA */
87
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
88
+ return;
89
case 0xf: /* BFDOT */
90
switch (size) {
91
case 1:
92
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
93
index XXXXXXX..XXXXXXX 100644
94
--- a/target/arm/translate-neon.c
95
+++ b/target/arm/translate-neon.c
96
@@ -XXX,XX +XXX,XX @@ static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
97
return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
98
gen_helper_gvec_usmmla_b);
99
}
100
+
101
+static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
102
+{
103
+ if (!dc_isar_feature(aa32_bf16, s)) {
104
+ return false;
105
+ }
106
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
107
+ gen_helper_gvec_bfmmla);
108
+}
109
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/arm/translate-sve.c
112
+++ b/target/arm/translate-sve.c
113
@@ -XXX,XX +XXX,XX @@ static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a)
114
}
115
return true;
116
}
117
+
118
+static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a)
119
+{
120
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
121
+ return false;
122
+ }
123
+ if (sve_access_check(s)) {
124
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfmmla,
125
+ a->rd, a->rn, a->rm, a->ra, 0);
126
+ }
127
+ return true;
128
+}
129
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/target/arm/vec_helper.c
132
+++ b/target/arm/vec_helper.c
133
@@ -XXX,XX +XXX,XX @@ static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
134
* Process the entire segment at once, writing back the
135
* results only after we've consumed all of the inputs.
136
*
137
- * Key to indicies by column:
138
+ * Key to indices by column:
139
* i j i j
140
*/
141
sum0 = a[H4(0 + 0)];
142
@@ -XXX,XX +XXX,XX @@ void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
143
}
144
clear_tail(d, opr_sz, simd_maxsz(desc));
145
}
146
+
147
+void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
148
+{
149
+ intptr_t s, opr_sz = simd_oprsz(desc);
150
+ float32 *d = vd, *a = va;
151
+ uint32_t *n = vn, *m = vm;
152
+
153
+ for (s = 0; s < opr_sz / 4; s += 4) {
154
+ float32 sum00, sum01, sum10, sum11;
155
+
156
+ /*
157
+ * Process the entire segment at once, writing back the
158
+ * results only after we've consumed all of the inputs.
159
+ *
160
+ * Key to indicies by column:
161
+ * i j i k j k
162
+ */
163
+ sum00 = a[s + H4(0 + 0)];
164
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]);
165
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]);
166
+
167
+ sum01 = a[s + H4(0 + 1)];
168
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]);
169
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]);
170
+
171
+ sum10 = a[s + H4(2 + 0)];
172
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]);
173
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]);
174
+
175
+ sum11 = a[s + H4(2 + 1)];
176
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]);
177
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]);
178
+
179
+ d[s + H4(0 + 0)] = sum00;
180
+ d[s + H4(0 + 1)] = sum01;
181
+ d[s + H4(2 + 0)] = sum10;
182
+ d[s + H4(2 + 1)] = sum11;
183
+ }
184
+ clear_tail(d, opr_sz, simd_maxsz(desc));
185
+}
186
--
53
--
187
2.20.1
54
2.25.1
188
189
diff view generated by jsdifflib
1
Add the isar feature check functions we will need for v8.1M MVE:
1
From: Richard Henderson <richard.henderson@linaro.org>
2
* a check for MVE present: this corresponds to the pseudocode's
3
CheckDecodeFaults(ExtType_Mve)
4
* a check for the optional floating-point part of MVE: this
5
corresponds to CheckDecodeFaults(ExtType_MveFp)
6
2
3
Enable the a76 for virt and sbsa board use.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220506180242.216785-24-richard.henderson@linaro.org
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210520152840.24453-2-peter.maydell@linaro.org
10
---
9
---
11
target/arm/cpu.h | 22 ++++++++++++++++++++++
10
docs/system/arm/virt.rst | 1 +
12
1 file changed, 22 insertions(+)
11
hw/arm/sbsa-ref.c | 1 +
12
hw/arm/virt.c | 1 +
13
target/arm/cpu64.c | 66 ++++++++++++++++++++++++++++++++++++++++
14
4 files changed, 69 insertions(+)
13
15
14
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu.h
18
--- a/docs/system/arm/virt.rst
17
+++ b/target/arm/cpu.h
19
+++ b/docs/system/arm/virt.rst
18
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
20
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
19
}
21
- ``cortex-a53`` (64-bit)
22
- ``cortex-a57`` (64-bit)
23
- ``cortex-a72`` (64-bit)
24
+- ``cortex-a76`` (64-bit)
25
- ``a64fx`` (64-bit)
26
- ``host`` (with KVM only)
27
- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
28
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/sbsa-ref.c
31
+++ b/hw/arm/sbsa-ref.c
32
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
33
static const char * const valid_cpus[] = {
34
ARM_CPU_TYPE_NAME("cortex-a57"),
35
ARM_CPU_TYPE_NAME("cortex-a72"),
36
+ ARM_CPU_TYPE_NAME("cortex-a76"),
37
ARM_CPU_TYPE_NAME("max"),
38
};
39
40
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/arm/virt.c
43
+++ b/hw/arm/virt.c
44
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
45
ARM_CPU_TYPE_NAME("cortex-a53"),
46
ARM_CPU_TYPE_NAME("cortex-a57"),
47
ARM_CPU_TYPE_NAME("cortex-a72"),
48
+ ARM_CPU_TYPE_NAME("cortex-a76"),
49
ARM_CPU_TYPE_NAME("a64fx"),
50
ARM_CPU_TYPE_NAME("host"),
51
ARM_CPU_TYPE_NAME("max"),
52
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/cpu64.c
55
+++ b/target/arm/cpu64.c
56
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
57
define_cortex_a72_a57_a53_cp_reginfo(cpu);
20
}
58
}
21
59
22
+static inline bool isar_feature_aa32_mve(const ARMISARegisters *id)
60
+static void aarch64_a76_initfn(Object *obj)
23
+{
61
+{
24
+ /*
62
+ ARMCPU *cpu = ARM_CPU(obj);
25
+ * Return true if MVE is supported (either integer or floating point).
63
+
26
+ * We must check for M-profile as the MVFR1 field means something
64
+ cpu->dtb_compatible = "arm,cortex-a76";
27
+ * else for A-profile.
65
+ set_feature(&cpu->env, ARM_FEATURE_V8);
28
+ */
66
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
29
+ return isar_feature_aa32_mprofile(id) &&
67
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
30
+ FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0;
68
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
69
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
70
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
71
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
72
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
73
+
74
+ /* Ordered by B2.4 AArch64 registers by functional group */
75
+ cpu->clidr = 0x82000023;
76
+ cpu->ctr = 0x8444C004;
77
+ cpu->dcz_blocksize = 4;
78
+ cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
79
+ cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
80
+ cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
81
+ cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
82
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
83
+ cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
84
+ cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
85
+ cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
86
+ cpu->id_afr0 = 0x00000000;
87
+ cpu->isar.id_dfr0 = 0x04010088;
88
+ cpu->isar.id_isar0 = 0x02101110;
89
+ cpu->isar.id_isar1 = 0x13112111;
90
+ cpu->isar.id_isar2 = 0x21232042;
91
+ cpu->isar.id_isar3 = 0x01112131;
92
+ cpu->isar.id_isar4 = 0x00010142;
93
+ cpu->isar.id_isar5 = 0x01011121;
94
+ cpu->isar.id_isar6 = 0x00000010;
95
+ cpu->isar.id_mmfr0 = 0x10201105;
96
+ cpu->isar.id_mmfr1 = 0x40000000;
97
+ cpu->isar.id_mmfr2 = 0x01260000;
98
+ cpu->isar.id_mmfr3 = 0x02122211;
99
+ cpu->isar.id_mmfr4 = 0x00021110;
100
+ cpu->isar.id_pfr0 = 0x10010131;
101
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
102
+ cpu->isar.id_pfr2 = 0x00000011;
103
+ cpu->midr = 0x414fd0b1; /* r4p1 */
104
+ cpu->revidr = 0;
105
+
106
+ /* From B2.18 CCSIDR_EL1 */
107
+ cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
108
+ cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
109
+ cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */
110
+
111
+ /* From B2.93 SCTLR_EL3 */
112
+ cpu->reset_sctlr = 0x30c50838;
113
+
114
+ /* From B4.23 ICH_VTR_EL2 */
115
+ cpu->gic_num_lrs = 4;
116
+ cpu->gic_vpribits = 5;
117
+ cpu->gic_vprebits = 5;
118
+
119
+ /* From B5.1 AdvSIMD AArch64 register summary */
120
+ cpu->isar.mvfr0 = 0x10110222;
121
+ cpu->isar.mvfr1 = 0x13211111;
122
+ cpu->isar.mvfr2 = 0x00000043;
31
+}
123
+}
32
+
124
+
33
+static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id)
125
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
34
+{
35
+ /*
36
+ * Return true if MVE is supported (either integer or floating point).
37
+ * We must check for M-profile as the MVFR1 field means something
38
+ * else for A-profile.
39
+ */
40
+ return isar_feature_aa32_mprofile(id) &&
41
+ FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2;
42
+}
43
+
44
static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id)
45
{
126
{
46
/*
127
/*
128
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
129
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
130
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
131
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
132
+ { .name = "cortex-a76", .initfn = aarch64_a76_initfn },
133
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
134
{ .name = "max", .initfn = aarch64_max_initfn },
135
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
47
--
136
--
48
2.20.1
137
2.25.1
49
50
diff view generated by jsdifflib
Deleted patch
1
The do_vfp_2op_sp() and do_vfp_2op_dp() functions currently check
2
whether floating point is supported via the aa32_fpdp_v2 and
3
aa32_fpsp_v2 isar checks. For v8.1M MVE support, the VMOV_reg trans
4
functions (but not any of the others) need to update this to also
5
allow the insn if MVE is implemented. Move the check out of the do_
6
function and into its callsites (which are all implemented via the
7
DO_VFP_2OP macro), so we have a place to change the check for the
8
VMOV insns.
9
1
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210520152840.24453-4-peter.maydell@linaro.org
13
---
14
target/arm/translate-vfp.c | 37 +++++++++++++++++++------------------
15
1 file changed, 19 insertions(+), 18 deletions(-)
16
17
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/translate-vfp.c
20
+++ b/target/arm/translate-vfp.c
21
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
22
int veclen = s->vec_len;
23
TCGv_i32 f0, fd;
24
25
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
26
- return false;
27
- }
28
+ /* Note that the caller must check the aa32_fpsp_v2 feature. */
29
30
if (!dc_isar_feature(aa32_fpshvec, s) &&
31
(veclen != 0 || s->vec_stride != 0)) {
32
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
33
*/
34
TCGv_i32 f0;
35
36
+ /* Note that the caller must check the aa32_fp16_arith feature */
37
+
38
if (!dc_isar_feature(aa32_fp16_arith, s)) {
39
return false;
40
}
41
@@ -XXX,XX +XXX,XX @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
42
int veclen = s->vec_len;
43
TCGv_i64 f0, fd;
44
45
- if (!dc_isar_feature(aa32_fpdp_v2, s)) {
46
- return false;
47
- }
48
+ /* Note that the caller must check the aa32_fpdp_v2 feature. */
49
50
/* UNDEF accesses to D16-D31 if they don't exist */
51
if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
52
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
53
return true;
54
}
55
56
-#define DO_VFP_2OP(INSN, PREC, FN) \
57
+#define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
58
static bool trans_##INSN##_##PREC(DisasContext *s, \
59
arg_##INSN##_##PREC *a) \
60
{ \
61
+ if (!dc_isar_feature(CHECK, s)) { \
62
+ return false; \
63
+ } \
64
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
65
}
66
67
-DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
68
-DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
69
+DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32, aa32_fpsp_v2)
70
+DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64, aa32_fpdp_v2)
71
72
-DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh)
73
-DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
74
-DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
75
+DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
76
+DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
77
+DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd, aa32_fpdp_v2)
78
79
-DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh)
80
-DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
81
-DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
82
+DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh, aa32_fp16_arith)
83
+DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs, aa32_fpsp_v2)
84
+DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
85
86
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
87
{
88
@@ -XXX,XX +XXX,XX @@ static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
89
gen_helper_vfp_sqrtd(vd, vm, cpu_env);
90
}
91
92
-DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
93
-DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
94
-DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
95
+DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
96
+DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp, aa32_fpsp_v2)
97
+DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp, aa32_fpdp_v2)
98
99
static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
100
{
101
--
102
2.20.1
103
104
diff view generated by jsdifflib
Deleted patch
1
Split out the handling of VMOV_reg_sp and VMOV_reg_dp so that we can
2
permit the insns if either FP or MVE are present.
3
1
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210520152840.24453-5-peter.maydell@linaro.org
7
---
8
target/arm/translate-vfp.c | 15 +++++++++++++--
9
1 file changed, 13 insertions(+), 2 deletions(-)
10
11
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/translate-vfp.c
14
+++ b/target/arm/translate-vfp.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
16
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
17
}
18
19
-DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32, aa32_fpsp_v2)
20
-DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64, aa32_fpdp_v2)
21
+#define DO_VFP_VMOV(INSN, PREC, FN) \
22
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
23
+ arg_##INSN##_##PREC *a) \
24
+ { \
25
+ if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
26
+ !dc_isar_feature(aa32_mve, s)) { \
27
+ return false; \
28
+ } \
29
+ return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
30
+ }
31
+
32
+DO_VFP_VMOV(VMOV_reg, sp, tcg_gen_mov_i32)
33
+DO_VFP_VMOV(VMOV_reg, dp, tcg_gen_mov_i64)
34
35
DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
36
DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
37
--
38
2.20.1
39
40
diff view generated by jsdifflib
Deleted patch
1
The fp_sysreg_checks() function is supposed to be returning an
2
FPSysRegCheckResult, which is an enum with three possible values.
3
However, three places in the function "return false" (a hangover from
4
a previous iteration of the design where the function just returned a
5
bool). Make these return FPSysRegCheckFailed instead (for no
6
functional change, since both false and FPSysRegCheckFailed are
7
zero).
8
1
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20210520152840.24453-6-peter.maydell@linaro.org
12
---
13
target/arm/translate-vfp.c | 6 +++---
14
1 file changed, 3 insertions(+), 3 deletions(-)
15
16
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/translate-vfp.c
19
+++ b/target/arm/translate-vfp.c
20
@@ -XXX,XX +XXX,XX @@ static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
21
break;
22
case ARM_VFP_FPSCR_NZCVQC:
23
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
24
- return false;
25
+ return FPSysRegCheckFailed;
26
}
27
break;
28
case ARM_VFP_FPCXT_S:
29
case ARM_VFP_FPCXT_NS:
30
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
31
- return false;
32
+ return FPSysRegCheckFailed;
33
}
34
if (!s->v8m_secure) {
35
- return false;
36
+ return FPSysRegCheckFailed;
37
}
38
break;
39
default:
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
The M-profile FPSCR has an LTPSIZE field, but if MVE is not
2
implemented it is read-only and always reads as 4; this is how QEMU
3
currently handles it.
4
1
5
Make the field writable when MVE is implemented.
6
7
We can safely add the field to the MVE migration struct because
8
currently no CPUs enable MVE and so the migration struct is never
9
used.
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20210520152840.24453-8-peter.maydell@linaro.org
14
---
15
target/arm/cpu.h | 3 ++-
16
target/arm/machine.c | 1 +
17
target/arm/vfp_helper.c | 9 ++++++---
18
3 files changed, 9 insertions(+), 4 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState {
25
uint32_t fpdscr[M_REG_NUM_BANKS];
26
uint32_t cpacr[M_REG_NUM_BANKS];
27
uint32_t nsacr;
28
- int ltpsize;
29
+ uint32_t ltpsize;
30
uint32_t vpr;
31
} v7m;
32
33
@@ -XXX,XX +XXX,XX @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
34
35
#define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
36
#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
37
+#define FPCR_LTPSIZE_LENGTH 3
38
39
#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
40
#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
41
diff --git a/target/arm/machine.c b/target/arm/machine.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/machine.c
44
+++ b/target/arm/machine.c
45
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_mve = {
46
.needed = mve_needed,
47
.fields = (VMStateField[]) {
48
VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
49
+ VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
50
VMSTATE_END_OF_LIST()
51
},
52
};
53
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/arm/vfp_helper.c
56
+++ b/target/arm/vfp_helper.c
57
@@ -XXX,XX +XXX,XX @@ uint32_t vfp_get_fpscr(CPUARMState *env)
58
59
void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
60
{
61
+ ARMCPU *cpu = env_archcpu(env);
62
+
63
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
64
- if (!cpu_isar_feature(any_fp16, env_archcpu(env))) {
65
+ if (!cpu_isar_feature(any_fp16, cpu)) {
66
val &= ~FPCR_FZ16;
67
}
68
69
@@ -XXX,XX +XXX,XX @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
70
* because in v7A no-short-vector-support cores still had to
71
* allow Stride/Len to be written with the only effect that
72
* some insns are required to UNDEF if the guest sets them.
73
- *
74
- * TODO: if M-profile MVE implemented, set LTPSIZE.
75
*/
76
env->vfp.vec_len = extract32(val, 16, 3);
77
env->vfp.vec_stride = extract32(val, 20, 2);
78
+ } else if (cpu_isar_feature(aa32_mve, cpu)) {
79
+ env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT,
80
+ FPCR_LTPSIZE_LENGTH);
81
}
82
83
if (arm_feature(env, ARM_FEATURE_NEON)) {
84
--
85
2.20.1
86
87
diff view generated by jsdifflib
Deleted patch
1
The official punctuation for Arm CPU names uses a hyphen, like
2
"Cortex-A9". We mostly follow this, but in a few places usage
3
without the hyphen has crept in. Fix those so we consistently
4
use the same way of writing the CPU name.
5
1
6
This commit was created with:
7
git grep -z -l 'Cortex ' | xargs -0 sed -i 's/Cortex /Cortex-/'
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Message-id: 20210527095152.10968-1-peter.maydell@linaro.org
14
---
15
docs/system/arm/aspeed.rst | 4 ++--
16
docs/system/arm/nuvoton.rst | 6 +++---
17
docs/system/arm/sabrelite.rst | 2 +-
18
include/hw/arm/allwinner-h3.h | 2 +-
19
hw/arm/aspeed.c | 6 +++---
20
hw/arm/mcimx6ul-evk.c | 2 +-
21
hw/arm/mcimx7d-sabre.c | 2 +-
22
hw/arm/npcm7xx_boards.c | 4 ++--
23
hw/arm/sabrelite.c | 2 +-
24
hw/misc/npcm7xx_clk.c | 2 +-
25
10 files changed, 16 insertions(+), 16 deletions(-)
26
27
diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst
28
index XXXXXXX..XXXXXXX 100644
29
--- a/docs/system/arm/aspeed.rst
30
+++ b/docs/system/arm/aspeed.rst
31
@@ -XXX,XX +XXX,XX @@ The QEMU Aspeed machines model BMCs of various OpenPOWER systems and
32
Aspeed evaluation boards. They are based on different releases of the
33
Aspeed SoC : the AST2400 integrating an ARM926EJ-S CPU (400MHz), the
34
AST2500 with an ARM1176JZS CPU (800MHz) and more recently the AST2600
35
-with dual cores ARM Cortex A7 CPUs (1.2GHz).
36
+with dual cores ARM Cortex-A7 CPUs (1.2GHz).
37
38
The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C,
39
etc.
40
@@ -XXX,XX +XXX,XX @@ AST2500 SoC based machines :
41
42
AST2600 SoC based machines :
43
44
-- ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex A7)
45
+- ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex-A7)
46
- ``tacoma-bmc`` OpenPOWER Witherspoon POWER9 AST2600 BMC
47
48
Supported devices
49
diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst
50
index XXXXXXX..XXXXXXX 100644
51
--- a/docs/system/arm/nuvoton.rst
52
+++ b/docs/system/arm/nuvoton.rst
53
@@ -XXX,XX +XXX,XX @@ Nuvoton iBMC boards (``npcm750-evb``, ``quanta-gsj``)
54
55
The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are
56
designed to be used as Baseboard Management Controllers (BMCs) in various
57
-servers. They all feature one or two ARM Cortex A9 CPU cores, as well as an
58
+servers. They all feature one or two ARM Cortex-A9 CPU cores, as well as an
59
assortment of peripherals targeted for either Enterprise or Data Center /
60
Hyperscale applications. The former is a superset of the latter, so NPCM750 has
61
all the peripherals of NPCM730 and more.
62
63
.. _Nuvoton iBMC: https://www.nuvoton.com/products/cloud-computing/ibmc/
64
65
-The NPCM750 SoC has two Cortex A9 cores and is targeted for the Enterprise
66
+The NPCM750 SoC has two Cortex-A9 cores and is targeted for the Enterprise
67
segment. The following machines are based on this chip :
68
69
- ``npcm750-evb`` Nuvoton NPCM750 Evaluation board
70
71
-The NPCM730 SoC has two Cortex A9 cores and is targeted for Data Center and
72
+The NPCM730 SoC has two Cortex-A9 cores and is targeted for Data Center and
73
Hyperscale applications. The following machines are based on this chip :
74
75
- ``quanta-gsj`` Quanta GSJ server BMC
76
diff --git a/docs/system/arm/sabrelite.rst b/docs/system/arm/sabrelite.rst
77
index XXXXXXX..XXXXXXX 100644
78
--- a/docs/system/arm/sabrelite.rst
79
+++ b/docs/system/arm/sabrelite.rst
80
@@ -XXX,XX +XXX,XX @@ Supported devices
81
82
The SABRE Lite machine supports the following devices:
83
84
- * Up to 4 Cortex A9 cores
85
+ * Up to 4 Cortex-A9 cores
86
* Generic Interrupt Controller
87
* 1 Clock Controller Module
88
* 1 System Reset Controller
89
diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h
90
index XXXXXXX..XXXXXXX 100644
91
--- a/include/hw/arm/allwinner-h3.h
92
+++ b/include/hw/arm/allwinner-h3.h
93
@@ -XXX,XX +XXX,XX @@
94
*/
95
96
/*
97
- * The Allwinner H3 is a System on Chip containing four ARM Cortex A7
98
+ * The Allwinner H3 is a System on Chip containing four ARM Cortex-A7
99
* processor cores. Features and specifications include DDR2/DDR3 memory,
100
* SD/MMC storage cards, 10/100/1000Mbit Ethernet, USB 2.0, HDMI and
101
* various I/O modules.
102
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/hw/arm/aspeed.c
105
+++ b/hw/arm/aspeed.c
106
@@ -XXX,XX +XXX,XX @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data)
107
MachineClass *mc = MACHINE_CLASS(oc);
108
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
109
110
- mc->desc = "Aspeed AST2600 EVB (Cortex A7)";
111
+ mc->desc = "Aspeed AST2600 EVB (Cortex-A7)";
112
amc->soc_name = "ast2600-a1";
113
amc->hw_strap1 = AST2600_EVB_HW_STRAP1;
114
amc->hw_strap2 = AST2600_EVB_HW_STRAP2;
115
@@ -XXX,XX +XXX,XX @@ static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data)
116
MachineClass *mc = MACHINE_CLASS(oc);
117
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
118
119
- mc->desc = "OpenPOWER Tacoma BMC (Cortex A7)";
120
+ mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)";
121
amc->soc_name = "ast2600-a1";
122
amc->hw_strap1 = TACOMA_BMC_HW_STRAP1;
123
amc->hw_strap2 = TACOMA_BMC_HW_STRAP2;
124
@@ -XXX,XX +XXX,XX @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
125
MachineClass *mc = MACHINE_CLASS(oc);
126
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
127
128
- mc->desc = "IBM Rainier BMC (Cortex A7)";
129
+ mc->desc = "IBM Rainier BMC (Cortex-A7)";
130
amc->soc_name = "ast2600-a1";
131
amc->hw_strap1 = RAINIER_BMC_HW_STRAP1;
132
amc->hw_strap2 = RAINIER_BMC_HW_STRAP2;
133
diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/hw/arm/mcimx6ul-evk.c
136
+++ b/hw/arm/mcimx6ul-evk.c
137
@@ -XXX,XX +XXX,XX @@ static void mcimx6ul_evk_init(MachineState *machine)
138
139
static void mcimx6ul_evk_machine_init(MachineClass *mc)
140
{
141
- mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex A7)";
142
+ mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex-A7)";
143
mc->init = mcimx6ul_evk_init;
144
mc->max_cpus = FSL_IMX6UL_NUM_CPUS;
145
mc->default_ram_id = "mcimx6ul-evk.ram";
146
diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/hw/arm/mcimx7d-sabre.c
149
+++ b/hw/arm/mcimx7d-sabre.c
150
@@ -XXX,XX +XXX,XX @@ static void mcimx7d_sabre_init(MachineState *machine)
151
152
static void mcimx7d_sabre_machine_init(MachineClass *mc)
153
{
154
- mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex A7)";
155
+ mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex-A7)";
156
mc->init = mcimx7d_sabre_init;
157
mc->max_cpus = FSL_IMX7_NUM_CPUS;
158
mc->default_ram_id = "mcimx7d-sabre.ram";
159
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/hw/arm/npcm7xx_boards.c
162
+++ b/hw/arm/npcm7xx_boards.c
163
@@ -XXX,XX +XXX,XX @@ static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data)
164
165
npcm7xx_set_soc_type(nmc, TYPE_NPCM750);
166
167
- mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex A9)";
168
+ mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex-A9)";
169
mc->init = npcm750_evb_init;
170
mc->default_ram_size = 512 * MiB;
171
};
172
@@ -XXX,XX +XXX,XX @@ static void gsj_machine_class_init(ObjectClass *oc, void *data)
173
174
npcm7xx_set_soc_type(nmc, TYPE_NPCM730);
175
176
- mc->desc = "Quanta GSJ (Cortex A9)";
177
+ mc->desc = "Quanta GSJ (Cortex-A9)";
178
mc->init = quanta_gsj_init;
179
mc->default_ram_size = 512 * MiB;
180
};
181
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/hw/arm/sabrelite.c
184
+++ b/hw/arm/sabrelite.c
185
@@ -XXX,XX +XXX,XX @@ static void sabrelite_init(MachineState *machine)
186
187
static void sabrelite_machine_init(MachineClass *mc)
188
{
189
- mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex A9)";
190
+ mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex-A9)";
191
mc->init = sabrelite_init;
192
mc->max_cpus = FSL_IMX6_NUM_CPUS;
193
mc->ignore_memory_transaction_failures = true;
194
diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c
195
index XXXXXXX..XXXXXXX 100644
196
--- a/hw/misc/npcm7xx_clk.c
197
+++ b/hw/misc/npcm7xx_clk.c
198
@@ -XXX,XX +XXX,XX @@
199
#define NPCM7XX_CLOCK_REF_HZ (25000000)
200
201
/* Register Field Definitions */
202
-#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex A9 Cores */
203
+#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */
204
205
#define PLLCON_LOKI BIT(31)
206
#define PLLCON_LOKS BIT(30)
207
--
208
2.20.1
209
210
diff view generated by jsdifflib
Deleted patch
1
From: Jamie Iles <jamie@nuviainc.com>
2
1
3
The DAIF and PAC checks used raise_exception_ra to raise an exception
4
and unwind CPU state but raise_exception_ra is currently designed for
5
handling data aborts as the syndrome is partially precomputed and
6
encoded in the TB and then merged in merge_syn_data_abort when handling
7
the data abort. Using raise_exception_ra for DAIF and PAC checks
8
results in an empty syndrome being retrieved from data[2] in
9
restore_state_to_opc and setting ESR to 0. This manifested as:
10
11
kvm [571]: Unknown exception class: esr: 0x000000 –
12
Unknown/Uncategorized
13
14
when launching a KVM guest when the host qemu used a CPU supporting
15
EL2+pointer authentication and enabling pointer authentication in the
16
guest.
17
18
Rework raise_exception_ra such that the state is restored before raising
19
the exception so that the exception is not clobbered by
20
restore_state_to_opc.
21
22
Fixes: 0d43e1a2d29a ("target/arm: Add PAuth helpers")
23
Cc: Richard Henderson <richard.henderson@linaro.org>
24
Cc: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Jamie Iles <jamie@nuviainc.com>
26
[PMM: added comment]
27
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
---
30
target/arm/op_helper.c | 11 +++++++++--
31
1 file changed, 9 insertions(+), 2 deletions(-)
32
33
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/op_helper.c
36
+++ b/target/arm/op_helper.c
37
@@ -XXX,XX +XXX,XX @@ void raise_exception(CPUARMState *env, uint32_t excp,
38
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
39
uint32_t target_el, uintptr_t ra)
40
{
41
- CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
42
- cpu_loop_exit_restore(cs, ra);
43
+ CPUState *cs = env_cpu(env);
44
+
45
+ /*
46
+ * restore_state_to_opc() will set env->exception.syndrome, so
47
+ * we must restore CPU state here before setting the syndrome
48
+ * the caller passed us, and cannot use cpu_loop_exit_restore().
49
+ */
50
+ cpu_restore_state(cs, ra, true);
51
+ raise_exception(env, excp, syndrome, target_el);
52
}
53
54
uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
55
--
56
2.20.1
57
58
diff view generated by jsdifflib
Deleted patch
1
From: Jamie Iles <jamie@nuviainc.com>
2
1
3
Now that raise_exception_ra restores the state before raising the
4
exception we can use restore_exception_ra to perform the state restore +
5
exception raising without clobbering the syndrome.
6
7
Cc: Richard Henderson <richard.henderson@linaro.org>
8
Cc: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Jamie Iles <jamie@nuviainc.com>
10
[PMM: Keep the one line of the comment that is still relevant]
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/mte_helper.c | 12 +++---------
15
1 file changed, 3 insertions(+), 9 deletions(-)
16
17
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/mte_helper.c
20
+++ b/target/arm/mte_helper.c
21
@@ -XXX,XX +XXX,XX @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
22
23
switch (tcf) {
24
case 1:
25
- /*
26
- * Tag check fail causes a synchronous exception.
27
- *
28
- * In restore_state_to_opc, we set the exception syndrome
29
- * for the load or store operation. Unwind first so we
30
- * may overwrite that with the syndrome for the tag check.
31
- */
32
- cpu_restore_state(env_cpu(env), ra, true);
33
+ /* Tag check fail causes a synchronous exception. */
34
env->exception.vaddress = dirty_ptr;
35
36
is_write = FIELD_EX32(desc, MTEDESC, WRITE);
37
syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
38
is_write, 0x11);
39
- raise_exception(env, EXCP_DATA_ABORT, syn, exception_target_el(env));
40
+ raise_exception_ra(env, EXCP_DATA_ABORT, syn,
41
+ exception_target_el(env), ra);
42
/* noreturn, but fall through to the assert anyway */
43
44
case 0:
45
--
46
2.20.1
47
48
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is BFCVT{N,T} for both AArch64 AdvSIMD and SVE,
3
Enable the n1 for virt and sbsa board use.
4
and VCVT.BF16.F32 for AArch32 NEON.
5
4
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210525225817.400336-5-richard.henderson@linaro.org
7
Message-id: 20220506180242.216785-25-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
9
---
11
target/arm/helper-sve.h | 4 ++++
10
docs/system/arm/virt.rst | 1 +
12
target/arm/helper.h | 1 +
11
hw/arm/sbsa-ref.c | 1 +
13
target/arm/neon-dp.decode | 1 +
12
hw/arm/virt.c | 1 +
14
target/arm/sve.decode | 2 ++
13
target/arm/cpu64.c | 66 ++++++++++++++++++++++++++++++++++++++++
15
target/arm/sve_helper.c | 2 ++
14
4 files changed, 69 insertions(+)
16
target/arm/translate-a64.c | 17 ++++++++++++++
17
target/arm/translate-neon.c | 45 +++++++++++++++++++++++++++++++++++++
18
target/arm/translate-sve.c | 16 +++++++++++++
19
target/arm/vfp_helper.c | 7 ++++++
20
9 files changed, 95 insertions(+)
21
15
22
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
16
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
23
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/helper-sve.h
18
--- a/docs/system/arm/virt.rst
25
+++ b/target/arm/helper-sve.h
19
+++ b/docs/system/arm/virt.rst
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG,
20
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
27
void, ptr, ptr, ptr, ptr, i32)
21
- ``cortex-a76`` (64-bit)
28
DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG,
22
- ``a64fx`` (64-bit)
29
void, ptr, ptr, ptr, ptr, i32)
23
- ``host`` (with KVM only)
30
+DEF_HELPER_FLAGS_5(sve_bfcvt, TCG_CALL_NO_RWG,
24
+- ``neoverse-n1`` (64-bit)
31
+ void, ptr, ptr, ptr, ptr, i32)
25
- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
32
26
33
DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG,
27
Note that the default is ``cortex-a15``, so for an AArch64 guest you must
34
void, ptr, ptr, ptr, ptr, i32)
28
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
36
void, ptr, ptr, ptr, ptr, i32)
37
DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
38
void, ptr, ptr, ptr, ptr, i32)
39
+DEF_HELPER_FLAGS_5(sve_bfcvtnt, TCG_CALL_NO_RWG,
40
+ void, ptr, ptr, ptr, ptr, i32)
41
42
DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG,
43
void, ptr, ptr, ptr, ptr, i32)
44
diff --git a/target/arm/helper.h b/target/arm/helper.h
45
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/helper.h
30
--- a/hw/arm/sbsa-ref.c
47
+++ b/target/arm/helper.h
31
+++ b/hw/arm/sbsa-ref.c
48
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
32
@@ -XXX,XX +XXX,XX @@ static const char * const valid_cpus[] = {
49
DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
33
ARM_CPU_TYPE_NAME("cortex-a57"),
50
DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
34
ARM_CPU_TYPE_NAME("cortex-a72"),
51
DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr)
35
ARM_CPU_TYPE_NAME("cortex-a76"),
52
+DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, ptr)
36
+ ARM_CPU_TYPE_NAME("neoverse-n1"),
53
37
ARM_CPU_TYPE_NAME("max"),
54
DEF_HELPER_2(vfp_uitoh, f16, i32, ptr)
38
};
55
DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
39
56
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
40
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
57
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/neon-dp.decode
42
--- a/hw/arm/virt.c
59
+++ b/target/arm/neon-dp.decode
43
+++ b/hw/arm/virt.c
60
@@ -XXX,XX +XXX,XX @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
44
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
61
VRINTZ 1111 001 11 . 11 .. 10 .... 0 1011 . . 0 .... @2misc
45
ARM_CPU_TYPE_NAME("cortex-a72"),
62
46
ARM_CPU_TYPE_NAME("cortex-a76"),
63
VCVT_F16_F32 1111 001 11 . 11 .. 10 .... 0 1100 0 . 0 .... @2misc_q0
47
ARM_CPU_TYPE_NAME("a64fx"),
64
+ VCVT_B16_F32 1111 001 11 . 11 .. 10 .... 0 1100 1 . 0 .... @2misc_q0
48
+ ARM_CPU_TYPE_NAME("neoverse-n1"),
65
49
ARM_CPU_TYPE_NAME("host"),
66
VRINTM 1111 001 11 . 11 .. 10 .... 0 1101 . . 0 .... @2misc
50
ARM_CPU_TYPE_NAME("max"),
67
51
};
68
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
52
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
69
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
70
--- a/target/arm/sve.decode
54
--- a/target/arm/cpu64.c
71
+++ b/target/arm/sve.decode
55
+++ b/target/arm/cpu64.c
72
@@ -XXX,XX +XXX,XX @@ FNMLS_zpzzz 01100101 .. 1 ..... 111 ... ..... ..... @rdn_pg_rm_ra
56
@@ -XXX,XX +XXX,XX @@ static void aarch64_a76_initfn(Object *obj)
73
# SVE floating-point convert precision
57
cpu->isar.mvfr2 = 0x00000043;
74
FCVT_sh 01100101 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
75
FCVT_hs 01100101 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
76
+BFCVT 01100101 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
77
FCVT_dh 01100101 11 0010 00 101 ... ..... ..... @rd_pg_rn_e0
78
FCVT_hd 01100101 11 0010 01 101 ... ..... ..... @rd_pg_rn_e0
79
FCVT_ds 01100101 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
80
@@ -XXX,XX +XXX,XX @@ RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
81
FCVTXNT_ds 01100100 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
82
FCVTX_ds 01100101 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
83
FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
84
+BFCVTNT 01100100 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
85
FCVTLT_hs 01100100 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
86
FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
87
FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
88
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/arm/sve_helper.c
91
+++ b/target/arm/sve_helper.c
92
@@ -XXX,XX +XXX,XX @@ static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s)
93
94
DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16)
95
DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32)
96
+DO_ZPZ_FP(sve_bfcvt, uint32_t, H1_4, float32_to_bfloat16)
97
DO_ZPZ_FP(sve_fcvt_dh, uint64_t, , sve_f64_to_f16)
98
DO_ZPZ_FP(sve_fcvt_hd, uint64_t, , sve_f16_to_f64)
99
DO_ZPZ_FP(sve_fcvt_ds, uint64_t, , float64_to_float32)
100
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
101
} while (i != 0); \
102
}
58
}
103
59
104
+DO_FCVTNT(sve_bfcvtnt, uint32_t, uint16_t, H1_4, H1_2, float32_to_bfloat16)
60
+static void aarch64_neoverse_n1_initfn(Object *obj)
105
DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
106
DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, , H1_4, float64_to_float32)
107
108
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/arm/translate-a64.c
111
+++ b/target/arm/translate-a64.c
112
@@ -XXX,XX +XXX,XX @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
113
tcg_temp_free_i32(ahp);
114
}
115
break;
116
+ case 0x36: /* BFCVTN, BFCVTN2 */
117
+ {
118
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
119
+ gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
120
+ tcg_temp_free_ptr(fpst);
121
+ }
122
+ break;
123
case 0x56: /* FCVTXN, FCVTXN2 */
124
/* 64 bit to 32 bit float conversion
125
* with von Neumann rounding (round to odd)
126
@@ -XXX,XX +XXX,XX @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
127
}
128
handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
129
return;
130
+ case 0x36: /* BFCVTN, BFCVTN2 */
131
+ if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
132
+ unallocated_encoding(s);
133
+ return;
134
+ }
135
+ if (!fp_access_check(s)) {
136
+ return;
137
+ }
138
+ handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
139
+ return;
140
case 0x17: /* FCVTL, FCVTL2 */
141
if (!fp_access_check(s)) {
142
return;
143
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/target/arm/translate-neon.c
146
+++ b/target/arm/translate-neon.c
147
@@ -XXX,XX +XXX,XX @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
148
return true;
149
}
150
151
+static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a)
152
+{
61
+{
153
+ TCGv_ptr fpst;
62
+ ARMCPU *cpu = ARM_CPU(obj);
154
+ TCGv_i64 tmp;
155
+ TCGv_i32 dst0, dst1;
156
+
63
+
157
+ if (!dc_isar_feature(aa32_bf16, s)) {
64
+ cpu->dtb_compatible = "arm,neoverse-n1";
158
+ return false;
65
+ set_feature(&cpu->env, ARM_FEATURE_V8);
159
+ }
66
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
67
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
68
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
69
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
70
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
71
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
72
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
160
+
73
+
161
+ /* UNDEF accesses to D16-D31 if they don't exist. */
74
+ /* Ordered by B2.4 AArch64 registers by functional group */
162
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
75
+ cpu->clidr = 0x82000023;
163
+ ((a->vd | a->vm) & 0x10)) {
76
+ cpu->ctr = 0x8444c004;
164
+ return false;
77
+ cpu->dcz_blocksize = 4;
165
+ }
78
+ cpu->isar.id_aa64dfr0 = 0x0000000110305408ull;
79
+ cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
80
+ cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
81
+ cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
82
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
83
+ cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
84
+ cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
85
+ cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
86
+ cpu->id_afr0 = 0x00000000;
87
+ cpu->isar.id_dfr0 = 0x04010088;
88
+ cpu->isar.id_isar0 = 0x02101110;
89
+ cpu->isar.id_isar1 = 0x13112111;
90
+ cpu->isar.id_isar2 = 0x21232042;
91
+ cpu->isar.id_isar3 = 0x01112131;
92
+ cpu->isar.id_isar4 = 0x00010142;
93
+ cpu->isar.id_isar5 = 0x01011121;
94
+ cpu->isar.id_isar6 = 0x00000010;
95
+ cpu->isar.id_mmfr0 = 0x10201105;
96
+ cpu->isar.id_mmfr1 = 0x40000000;
97
+ cpu->isar.id_mmfr2 = 0x01260000;
98
+ cpu->isar.id_mmfr3 = 0x02122211;
99
+ cpu->isar.id_mmfr4 = 0x00021110;
100
+ cpu->isar.id_pfr0 = 0x10010131;
101
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
102
+ cpu->isar.id_pfr2 = 0x00000011;
103
+ cpu->midr = 0x414fd0c1; /* r4p1 */
104
+ cpu->revidr = 0;
166
+
105
+
167
+ if ((a->vm & 1) || (a->size != 1)) {
106
+ /* From B2.23 CCSIDR_EL1 */
168
+ return false;
107
+ cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */
169
+ }
108
+ cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */
109
+ cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */
170
+
110
+
171
+ if (!vfp_access_check(s)) {
111
+ /* From B2.98 SCTLR_EL3 */
172
+ return true;
112
+ cpu->reset_sctlr = 0x30c50838;
173
+ }
174
+
113
+
175
+ fpst = fpstatus_ptr(FPST_STD);
114
+ /* From B4.23 ICH_VTR_EL2 */
176
+ tmp = tcg_temp_new_i64();
115
+ cpu->gic_num_lrs = 4;
177
+ dst0 = tcg_temp_new_i32();
116
+ cpu->gic_vpribits = 5;
178
+ dst1 = tcg_temp_new_i32();
117
+ cpu->gic_vprebits = 5;
179
+
118
+
180
+ read_neon_element64(tmp, a->vm, 0, MO_64);
119
+ /* From B5.1 AdvSIMD AArch64 register summary */
181
+ gen_helper_bfcvt_pair(dst0, tmp, fpst);
120
+ cpu->isar.mvfr0 = 0x10110222;
182
+
121
+ cpu->isar.mvfr1 = 0x13211111;
183
+ read_neon_element64(tmp, a->vm, 1, MO_64);
122
+ cpu->isar.mvfr2 = 0x00000043;
184
+ gen_helper_bfcvt_pair(dst1, tmp, fpst);
185
+
186
+ write_neon_element32(dst0, a->vd, 0, MO_32);
187
+ write_neon_element32(dst1, a->vd, 1, MO_32);
188
+
189
+ tcg_temp_free_i64(tmp);
190
+ tcg_temp_free_i32(dst0);
191
+ tcg_temp_free_i32(dst1);
192
+ tcg_temp_free_ptr(fpst);
193
+ return true;
194
+}
123
+}
195
+
124
+
196
static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
125
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
197
{
126
{
198
TCGv_ptr fpst;
127
/*
199
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
128
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
200
index XXXXXXX..XXXXXXX 100644
129
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
201
--- a/target/arm/translate-sve.c
130
{ .name = "cortex-a76", .initfn = aarch64_a76_initfn },
202
+++ b/target/arm/translate-sve.c
131
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
203
@@ -XXX,XX +XXX,XX @@ static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
132
+ { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn },
204
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
133
{ .name = "max", .initfn = aarch64_max_initfn },
205
}
134
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
206
135
{ .name = "host", .initfn = aarch64_host_initfn },
207
+static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a)
208
+{
209
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
210
+ return false;
211
+ }
212
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt);
213
+}
214
+
215
static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
216
{
217
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
218
@@ -XXX,XX +XXX,XX @@ static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
219
return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
220
}
221
222
+static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a)
223
+{
224
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
225
+ return false;
226
+ }
227
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt);
228
+}
229
+
230
static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
231
{
232
if (!dc_isar_feature(aa64_sve2, s)) {
233
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
234
index XXXXXXX..XXXXXXX 100644
235
--- a/target/arm/vfp_helper.c
236
+++ b/target/arm/vfp_helper.c
237
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(bfcvt)(float32 x, void *status)
238
return float32_to_bfloat16(x, status);
239
}
240
241
+uint32_t HELPER(bfcvt_pair)(uint64_t pair, void *status)
242
+{
243
+ bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status);
244
+ bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status);
245
+ return deposit32(lo, 16, 16, hi);
246
+}
247
+
248
/*
249
* VFP3 fixed point conversion. The AArch32 versions of fix-to-float
250
* must always round-to-nearest; the AArch64 ones honour the FPSCR
251
--
136
--
252
2.20.1
137
2.25.1
253
254
diff view generated by jsdifflib
Deleted patch
1
From: Alexander Graf <agraf@csgraf.de>
2
1
3
Until now, Hypervisor.framework has only been available on x86_64 systems.
4
With Apple Silicon shipping now, it extends its reach to aarch64. To
5
prepare for support for multiple architectures, let's start moving common
6
code out into its own accel directory.
7
8
This patch moves the vCPU thread loop over.
9
10
Signed-off-by: Alexander Graf <agraf@csgraf.de>
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
12
Message-id: 20210519202253.76782-3-agraf@csgraf.de
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
{target/i386 => accel}/hvf/hvf-accel-ops.h | 0
17
{target/i386 => accel}/hvf/hvf-accel-ops.c | 0
18
target/i386/hvf/x86hvf.c | 2 +-
19
accel/hvf/meson.build | 1 +
20
target/i386/hvf/meson.build | 1 -
21
5 files changed, 2 insertions(+), 2 deletions(-)
22
rename {target/i386 => accel}/hvf/hvf-accel-ops.h (100%)
23
rename {target/i386 => accel}/hvf/hvf-accel-ops.c (100%)
24
25
diff --git a/target/i386/hvf/hvf-accel-ops.h b/accel/hvf/hvf-accel-ops.h
26
similarity index 100%
27
rename from target/i386/hvf/hvf-accel-ops.h
28
rename to accel/hvf/hvf-accel-ops.h
29
diff --git a/target/i386/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
30
similarity index 100%
31
rename from target/i386/hvf/hvf-accel-ops.c
32
rename to accel/hvf/hvf-accel-ops.c
33
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/i386/hvf/x86hvf.c
36
+++ b/target/i386/hvf/x86hvf.c
37
@@ -XXX,XX +XXX,XX @@
38
#include <Hypervisor/hv.h>
39
#include <Hypervisor/hv_vmx.h>
40
41
-#include "hvf-accel-ops.h"
42
+#include "accel/hvf/hvf-accel-ops.h"
43
44
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
45
SegmentCache *qseg, bool is_tr)
46
diff --git a/accel/hvf/meson.build b/accel/hvf/meson.build
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/hvf/meson.build
49
+++ b/accel/hvf/meson.build
50
@@ -XXX,XX +XXX,XX @@
51
hvf_ss = ss.source_set()
52
hvf_ss.add(files(
53
'hvf-all.c',
54
+ 'hvf-accel-ops.c',
55
))
56
57
specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)
58
diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/i386/hvf/meson.build
61
+++ b/target/i386/hvf/meson.build
62
@@ -XXX,XX +XXX,XX @@
63
i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
64
'hvf.c',
65
- 'hvf-accel-ops.c',
66
'x86.c',
67
'x86_cpuid.c',
68
'x86_decode.c',
69
--
70
2.20.1
71
72
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Leif Lindholm <quic_llindhol@quicinc.com>
2
2
3
We will need more than a single field for hvf going forward. To keep
3
The sbsa-ref machine is continuously evolving. Some of the changes we
4
the global vcpu struct uncluttered, let's allocate a special hvf vcpu
4
want to make in the near future, to align with real components (e.g.
5
struct, similar to how hax does it.
5
the GIC-700), will break compatibility for existing firmware.
6
6
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
Introduce two new properties to the DT generated on machine generation:
8
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
8
- machine-version-major
9
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
9
To be incremented when a platform change makes the machine
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
incompatible with existing firmware.
11
Reviewed-by: Sergio Lopez <slp@redhat.com>
11
- machine-version-minor
12
Message-id: 20210519202253.76782-12-agraf@csgraf.de
12
To be incremented when functionality is added to the machine
13
without causing incompatibility with existing firmware.
14
to be reset to 0 when machine-version-major is incremented.
15
16
This versioning scheme is *neither*:
17
- A QEMU versioned machine type; a given version of QEMU will emulate
18
a given version of the platform.
19
- A reflection of level of SBSA (now SystemReady SR) support provided.
20
21
The version will increment on guest-visible functional changes only,
22
akin to a revision ID register found on a physical platform.
23
24
These properties are both introduced with the value 0.
25
(Hence, a machine where the DT is lacking these nodes is equivalent
26
to version 0.0.)
27
28
Signed-off-by: Leif Lindholm <quic_llindhol@quicinc.com>
29
Message-id: 20220505113947.75714-1-quic_llindhol@quicinc.com
30
Cc: Peter Maydell <peter.maydell@linaro.org>
31
Cc: Radoslaw Biernacki <rad@semihalf.com>
32
Cc: Cédric Le Goater <clg@kaod.org>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
33
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
35
---
16
include/hw/core/cpu.h | 3 +-
36
hw/arm/sbsa-ref.c | 14 ++++++++++++++
17
include/sysemu/hvf_int.h | 4 +
37
1 file changed, 14 insertions(+)
18
target/i386/hvf/vmx.h | 24 +++--
19
accel/hvf/hvf-accel-ops.c | 8 +-
20
target/i386/hvf/hvf.c | 104 +++++++++---------
21
target/i386/hvf/x86.c | 28 ++---
22
target/i386/hvf/x86_descr.c | 26 ++---
23
target/i386/hvf/x86_emu.c | 62 +++++------
24
target/i386/hvf/x86_mmu.c | 4 +-
25
target/i386/hvf/x86_task.c | 12 +--
26
target/i386/hvf/x86hvf.c | 210 ++++++++++++++++++------------------
27
11 files changed, 248 insertions(+), 237 deletions(-)
28
38
29
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
39
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
30
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
31
--- a/include/hw/core/cpu.h
41
--- a/hw/arm/sbsa-ref.c
32
+++ b/include/hw/core/cpu.h
42
+++ b/hw/arm/sbsa-ref.c
33
@@ -XXX,XX +XXX,XX @@ struct KVMState;
43
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SBSAMachineState *sms)
34
struct kvm_run;
44
qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
35
45
qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
36
struct hax_vcpu_state;
46
37
+struct hvf_vcpu_state;
47
+ /*
38
48
+ * This versioning scheme is for informing platform fw only. It is neither:
39
#define TB_JMP_CACHE_BITS 12
49
+ * - A QEMU versioned machine type; a given version of QEMU will emulate
40
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
50
+ * a given version of the platform.
41
@@ -XXX,XX +XXX,XX @@ struct CPUState {
51
+ * - A reflection of level of SBSA (now SystemReady SR) support provided.
42
52
+ *
43
struct hax_vcpu_state *hax_vcpu;
53
+ * machine-version-major: updated when changes breaking fw compatibility
44
54
+ * are introduced.
45
- int hvf_fd;
55
+ * machine-version-minor: updated when features are added that don't break
46
+ struct hvf_vcpu_state *hvf;
56
+ * fw compatibility.
47
57
+ */
48
/* track IOMMUs whose translations we've cached in the TCG TLB */
58
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
49
GArray *iommu_notifiers;
59
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 0);
50
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/include/sysemu/hvf_int.h
53
+++ b/include/sysemu/hvf_int.h
54
@@ -XXX,XX +XXX,XX @@ struct HVFState {
55
};
56
extern HVFState *hvf_state;
57
58
+struct hvf_vcpu_state {
59
+ int fd;
60
+};
61
+
60
+
62
void assert_hvf_ok(hv_return_t ret);
61
if (ms->numa_state->have_numa_distance) {
63
int hvf_arch_init_vcpu(CPUState *cpu);
62
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
64
void hvf_arch_vcpu_destroy(CPUState *cpu);
63
uint32_t *matrix = g_malloc0(size);
65
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/i386/hvf/vmx.h
68
+++ b/target/i386/hvf/vmx.h
69
@@ -XXX,XX +XXX,XX @@
70
#include "vmcs.h"
71
#include "cpu.h"
72
#include "x86.h"
73
+#include "sysemu/hvf.h"
74
+#include "sysemu/hvf_int.h"
75
76
#include "exec/address-spaces.h"
77
78
@@ -XXX,XX +XXX,XX @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
79
uint64_t val;
80
81
/* BUG, should take considering overlap.. */
82
- wreg(cpu->hvf_fd, HV_X86_RIP, rip);
83
+ wreg(cpu->hvf->fd, HV_X86_RIP, rip);
84
env->eip = rip;
85
86
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
87
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
88
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
89
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
90
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
91
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
92
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
93
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
94
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
95
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
96
}
97
@@ -XXX,XX +XXX,XX @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu)
98
CPUX86State *env = &x86_cpu->env;
99
100
env->hflags2 &= ~HF2_NMI_MASK;
101
- uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
102
+ uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
103
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
104
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
105
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
106
}
107
108
static inline void vmx_set_nmi_blocking(CPUState *cpu)
109
@@ -XXX,XX +XXX,XX @@ static inline void vmx_set_nmi_blocking(CPUState *cpu)
110
CPUX86State *env = &x86_cpu->env;
111
112
env->hflags2 |= HF2_NMI_MASK;
113
- uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
114
+ uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
115
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
116
- wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
117
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
118
}
119
120
static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
121
{
122
uint64_t val;
123
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
124
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
125
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
126
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
127
VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
128
129
}
130
@@ -XXX,XX +XXX,XX @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
131
{
132
133
uint64_t val;
134
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
135
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
136
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
137
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
138
~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
139
}
140
141
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
142
index XXXXXXX..XXXXXXX 100644
143
--- a/accel/hvf/hvf-accel-ops.c
144
+++ b/accel/hvf/hvf-accel-ops.c
145
@@ -XXX,XX +XXX,XX @@ type_init(hvf_type_init);
146
147
static void hvf_vcpu_destroy(CPUState *cpu)
148
{
149
- hv_return_t ret = hv_vcpu_destroy(cpu->hvf_fd);
150
+ hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
151
assert_hvf_ok(ret);
152
153
hvf_arch_vcpu_destroy(cpu);
154
+ g_free(cpu->hvf);
155
+ cpu->hvf = NULL;
156
}
157
158
static int hvf_init_vcpu(CPUState *cpu)
159
{
160
int r;
161
162
+ cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
163
+
164
/* init cpu signals */
165
sigset_t set;
166
struct sigaction sigact;
167
@@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu)
168
pthread_sigmask(SIG_BLOCK, NULL, &set);
169
sigdelset(&set, SIG_IPI);
170
171
- r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
172
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
173
cpu->vcpu_dirty = 1;
174
assert_hvf_ok(r);
175
176
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/target/i386/hvf/hvf.c
179
+++ b/target/i386/hvf/hvf.c
180
@@ -XXX,XX +XXX,XX @@ void vmx_update_tpr(CPUState *cpu)
181
int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
182
int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
183
184
- wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
185
+ wreg(cpu->hvf->fd, HV_X86_TPR, tpr);
186
if (irr == -1) {
187
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
188
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
189
} else {
190
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
191
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
192
irr >> 4);
193
}
194
}
195
@@ -XXX,XX +XXX,XX @@ void vmx_update_tpr(CPUState *cpu)
196
static void update_apic_tpr(CPUState *cpu)
197
{
198
X86CPU *x86_cpu = X86_CPU(cpu);
199
- int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
200
+ int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;
201
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
202
}
203
204
@@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu)
205
}
206
207
/* set VMCS control fields */
208
- wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
209
+ wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,
210
cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
211
VMCS_PIN_BASED_CTLS_EXTINT |
212
VMCS_PIN_BASED_CTLS_NMI |
213
VMCS_PIN_BASED_CTLS_VNMI));
214
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
215
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,
216
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
217
VMCS_PRI_PROC_BASED_CTLS_HLT |
218
VMCS_PRI_PROC_BASED_CTLS_MWAIT |
219
VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
220
VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
221
VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
222
- wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
223
+ wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,
224
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
225
VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
226
227
- wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
228
+ wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
229
0));
230
- wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
231
+ wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
232
233
- wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
234
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
235
236
x86cpu = X86_CPU(cpu);
237
x86cpu->env.xsave_buf = qemu_memalign(4096, 4096);
238
239
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
240
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
241
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
242
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
243
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
244
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
245
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
246
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
247
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);
248
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
249
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
250
- hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
251
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
252
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
253
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);
254
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);
255
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);
256
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);
257
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);
258
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);
259
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);
260
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);
261
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);
262
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);
263
264
return 0;
265
}
266
@@ -XXX,XX +XXX,XX @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in
267
}
268
if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
269
env->has_error_code = true;
270
- env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
271
+ env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);
272
}
273
}
274
- if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
275
+ if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
276
VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
277
env->hflags2 |= HF2_NMI_MASK;
278
} else {
279
env->hflags2 &= ~HF2_NMI_MASK;
280
}
281
- if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
282
+ if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
283
(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
284
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
285
env->hflags |= HF_INHIBIT_IRQ_MASK;
286
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
287
return EXCP_HLT;
288
}
289
290
- hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
291
+ hv_return_t r = hv_vcpu_run(cpu->hvf->fd);
292
assert_hvf_ok(r);
293
294
/* handle VMEXIT */
295
- uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
296
- uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
297
- uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
298
+ uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);
299
+ uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);
300
+ uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,
301
VMCS_EXIT_INSTRUCTION_LENGTH);
302
303
- uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
304
+ uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
305
306
hvf_store_events(cpu, ins_len, idtvec_info);
307
- rip = rreg(cpu->hvf_fd, HV_X86_RIP);
308
- env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
309
+ rip = rreg(cpu->hvf->fd, HV_X86_RIP);
310
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
311
312
qemu_mutex_lock_iothread();
313
314
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
315
case EXIT_REASON_EPT_FAULT:
316
{
317
hvf_slot *slot;
318
- uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
319
+ uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);
320
321
if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
322
((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
323
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
324
store_regs(cpu);
325
break;
326
} else if (!string && !in) {
327
- RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
328
+ RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);
329
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
330
macvm_set_rip(cpu, rip + ins_len);
331
break;
332
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
333
break;
334
}
335
case EXIT_REASON_CPUID: {
336
- uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
337
- uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
338
- uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
339
- uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
340
+ uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
341
+ uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);
342
+ uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
343
+ uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
344
345
if (rax == 1) {
346
/* CPUID1.ecx.OSXSAVE needs to know CR4 */
347
- env->cr[4] = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
348
+ env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
349
}
350
hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
351
352
- wreg(cpu->hvf_fd, HV_X86_RAX, rax);
353
- wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
354
- wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
355
- wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
356
+ wreg(cpu->hvf->fd, HV_X86_RAX, rax);
357
+ wreg(cpu->hvf->fd, HV_X86_RBX, rbx);
358
+ wreg(cpu->hvf->fd, HV_X86_RCX, rcx);
359
+ wreg(cpu->hvf->fd, HV_X86_RDX, rdx);
360
361
macvm_set_rip(cpu, rip + ins_len);
362
break;
363
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
364
case EXIT_REASON_XSETBV: {
365
X86CPU *x86_cpu = X86_CPU(cpu);
366
CPUX86State *env = &x86_cpu->env;
367
- uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
368
- uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
369
- uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
370
+ uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
371
+ uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
372
+ uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
373
374
if (ecx) {
375
macvm_set_rip(cpu, rip + ins_len);
376
break;
377
}
378
env->xcr0 = ((uint64_t)edx << 32) | eax;
379
- wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
380
+ wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);
381
macvm_set_rip(cpu, rip + ins_len);
382
break;
383
}
384
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
385
386
switch (cr) {
387
case 0x0: {
388
- macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
389
+ macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));
390
break;
391
}
392
case 4: {
393
- macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
394
+ macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));
395
break;
396
}
397
case 8: {
398
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
399
break;
400
}
401
case EXIT_REASON_TASK_SWITCH: {
402
- uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
403
+ uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
404
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
405
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
406
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
407
@@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu)
408
break;
409
}
410
case EXIT_REASON_RDPMC:
411
- wreg(cpu->hvf_fd, HV_X86_RAX, 0);
412
- wreg(cpu->hvf_fd, HV_X86_RDX, 0);
413
+ wreg(cpu->hvf->fd, HV_X86_RAX, 0);
414
+ wreg(cpu->hvf->fd, HV_X86_RDX, 0);
415
macvm_set_rip(cpu, rip + ins_len);
416
break;
417
case VMX_REASON_VMCALL:
418
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
419
index XXXXXXX..XXXXXXX 100644
420
--- a/target/i386/hvf/x86.c
421
+++ b/target/i386/hvf/x86.c
422
@@ -XXX,XX +XXX,XX @@ bool x86_read_segment_descriptor(struct CPUState *cpu,
423
}
424
425
if (GDT_SEL == sel.ti) {
426
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
427
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
428
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
429
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
430
} else {
431
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
432
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
433
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
434
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
435
}
436
437
if (sel.index * 8 >= limit) {
438
@@ -XXX,XX +XXX,XX @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
439
uint32_t limit;
440
441
if (GDT_SEL == sel.ti) {
442
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
443
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
444
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
445
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
446
} else {
447
- base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
448
- limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
449
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
450
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
451
}
452
453
if (sel.index * 8 >= limit) {
454
@@ -XXX,XX +XXX,XX @@ bool x86_write_segment_descriptor(struct CPUState *cpu,
455
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
456
int gate)
457
{
458
- target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
459
- uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
460
+ target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
461
+ uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
462
463
memset(idt_desc, 0, sizeof(*idt_desc));
464
if (gate * 8 >= limit) {
465
@@ -XXX,XX +XXX,XX @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
466
467
bool x86_is_protected(struct CPUState *cpu)
468
{
469
- uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
470
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
471
return cr0 & CR0_PE;
472
}
473
474
@@ -XXX,XX +XXX,XX @@ bool x86_is_v8086(struct CPUState *cpu)
475
476
bool x86_is_long_mode(struct CPUState *cpu)
477
{
478
- return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
479
+ return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
480
}
481
482
bool x86_is_long64_mode(struct CPUState *cpu)
483
@@ -XXX,XX +XXX,XX @@ bool x86_is_long64_mode(struct CPUState *cpu)
484
485
bool x86_is_paging_mode(struct CPUState *cpu)
486
{
487
- uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
488
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
489
return cr0 & CR0_PG;
490
}
491
492
bool x86_is_pae_enabled(struct CPUState *cpu)
493
{
494
- uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
495
+ uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
496
return cr4 & CR4_PAE;
497
}
498
499
diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c
500
index XXXXXXX..XXXXXXX 100644
501
--- a/target/i386/hvf/x86_descr.c
502
+++ b/target/i386/hvf/x86_descr.c
503
@@ -XXX,XX +XXX,XX @@ static const struct vmx_segment_field {
504
505
uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
506
{
507
- return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
508
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
509
}
510
511
uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
512
{
513
- return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
514
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
515
}
516
517
uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
518
{
519
- return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
520
+ return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
521
}
522
523
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
524
{
525
x68_segment_selector sel;
526
- sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
527
+ sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
528
return sel;
529
}
530
531
void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
532
{
533
- wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
534
+ wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel);
535
}
536
537
void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
538
{
539
- desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
540
- desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
541
- desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
542
- desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
543
+ desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
544
+ desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
545
+ desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
546
+ desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
547
}
548
549
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
550
{
551
const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
552
553
- wvmcs(cpu->hvf_fd, sf->base, desc->base);
554
- wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
555
- wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
556
- wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
557
+ wvmcs(cpu->hvf->fd, sf->base, desc->base);
558
+ wvmcs(cpu->hvf->fd, sf->limit, desc->limit);
559
+ wvmcs(cpu->hvf->fd, sf->selector, desc->sel);
560
+ wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar);
561
}
562
563
void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
564
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
565
index XXXXXXX..XXXXXXX 100644
566
--- a/target/i386/hvf/x86_emu.c
567
+++ b/target/i386/hvf/x86_emu.c
568
@@ -XXX,XX +XXX,XX @@ void simulate_rdmsr(struct CPUState *cpu)
569
570
switch (msr) {
571
case MSR_IA32_TSC:
572
- val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
573
+ val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET);
574
break;
575
case MSR_IA32_APICBASE:
576
val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
577
@@ -XXX,XX +XXX,XX @@ void simulate_rdmsr(struct CPUState *cpu)
578
val = x86_cpu->ucode_rev;
579
break;
580
case MSR_EFER:
581
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
582
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER);
583
break;
584
case MSR_FSBASE:
585
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
586
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE);
587
break;
588
case MSR_GSBASE:
589
- val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
590
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE);
591
break;
592
case MSR_KERNELGSBASE:
593
- val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
594
+ val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE);
595
break;
596
case MSR_STAR:
597
abort();
598
@@ -XXX,XX +XXX,XX @@ void simulate_wrmsr(struct CPUState *cpu)
599
cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
600
break;
601
case MSR_FSBASE:
602
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
603
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data);
604
break;
605
case MSR_GSBASE:
606
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
607
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data);
608
break;
609
case MSR_KERNELGSBASE:
610
- wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
611
+ wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data);
612
break;
613
case MSR_STAR:
614
abort();
615
@@ -XXX,XX +XXX,XX @@ void simulate_wrmsr(struct CPUState *cpu)
616
break;
617
case MSR_EFER:
618
/*printf("new efer %llx\n", EFER(cpu));*/
619
- wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
620
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data);
621
if (data & MSR_EFER_NXE) {
622
- hv_vcpu_invalidate_tlb(cpu->hvf_fd);
623
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
624
}
625
break;
626
case MSR_MTRRphysBase(0):
627
@@ -XXX,XX +XXX,XX @@ void load_regs(struct CPUState *cpu)
628
CPUX86State *env = &x86_cpu->env;
629
630
int i = 0;
631
- RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
632
- RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
633
- RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
634
- RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
635
- RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
636
- RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
637
- RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
638
- RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
639
+ RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX);
640
+ RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX);
641
+ RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX);
642
+ RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX);
643
+ RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI);
644
+ RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI);
645
+ RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP);
646
+ RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP);
647
for (i = 8; i < 16; i++) {
648
- RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
649
+ RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i);
650
}
651
652
- env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
653
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
654
rflags_to_lflags(env);
655
- env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);
656
+ env->eip = rreg(cpu->hvf->fd, HV_X86_RIP);
657
}
658
659
void store_regs(struct CPUState *cpu)
660
@@ -XXX,XX +XXX,XX @@ void store_regs(struct CPUState *cpu)
661
CPUX86State *env = &x86_cpu->env;
662
663
int i = 0;
664
- wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
665
- wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
666
- wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
667
- wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
668
- wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
669
- wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
670
- wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
671
- wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
672
+ wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env));
673
+ wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env));
674
+ wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env));
675
+ wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env));
676
+ wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env));
677
+ wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env));
678
+ wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env));
679
+ wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env));
680
for (i = 8; i < 16; i++) {
681
- wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
682
+ wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i));
683
}
684
685
lflags_to_rflags(env);
686
- wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);
687
+ wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags);
688
macvm_set_rip(cpu, env->eip);
689
}
690
691
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
692
index XXXXXXX..XXXXXXX 100644
693
--- a/target/i386/hvf/x86_mmu.c
694
+++ b/target/i386/hvf/x86_mmu.c
695
@@ -XXX,XX +XXX,XX @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
696
pt->err_code |= MMU_PAGE_PT;
697
}
698
699
- uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
700
+ uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
701
/* check protection */
702
if (cr0 & CR0_WP) {
703
if (pt->write_access && !pte_write_access(pte)) {
704
@@ -XXX,XX +XXX,XX @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
705
{
706
int top_level, level;
707
bool is_large = false;
708
- target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
709
+ target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);
710
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
711
712
memset(pt, 0, sizeof(*pt));
713
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
714
index XXXXXXX..XXXXXXX 100644
715
--- a/target/i386/hvf/x86_task.c
716
+++ b/target/i386/hvf/x86_task.c
717
@@ -XXX,XX +XXX,XX @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
718
X86CPU *x86_cpu = X86_CPU(cpu);
719
CPUX86State *env = &x86_cpu->env;
720
721
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
722
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);
723
724
env->eip = tss->eip;
725
env->eflags = tss->eflags | 2;
726
@@ -XXX,XX +XXX,XX @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme
727
728
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
729
{
730
- uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
731
+ uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);
732
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
733
gate_type != VMCS_INTR_T_HWINTR &&
734
gate_type != VMCS_INTR_T_NMI)) {
735
- int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
736
+ int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
737
macvm_set_rip(cpu, rip + ins_len);
738
return;
739
}
740
@@ -XXX,XX +XXX,XX @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
741
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
742
VM_PANIC("task_switch_16");
743
744
- macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
745
+ macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
746
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
747
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
748
749
store_regs(cpu);
750
751
- hv_vcpu_invalidate_tlb(cpu->hvf_fd);
752
- hv_vcpu_flush(cpu->hvf_fd);
753
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
754
+ hv_vcpu_flush(cpu->hvf->fd);
755
}
756
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
757
index XXXXXXX..XXXXXXX 100644
758
--- a/target/i386/hvf/x86hvf.c
759
+++ b/target/i386/hvf/x86hvf.c
760
@@ -XXX,XX +XXX,XX @@ void hvf_put_xsave(CPUState *cpu_state)
761
762
x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
763
764
- if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
765
+ if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
766
abort();
767
}
768
}
769
@@ -XXX,XX +XXX,XX @@ void hvf_put_segments(CPUState *cpu_state)
770
CPUX86State *env = &X86_CPU(cpu_state)->env;
771
struct vmx_segment seg;
772
773
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
774
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
775
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
776
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
777
778
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
779
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
780
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
781
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
782
783
- /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
784
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
785
+ /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */
786
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]);
787
vmx_update_tpr(cpu_state);
788
- wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
789
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer);
790
791
- macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
792
- macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
793
+ macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]);
794
+ macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]);
795
796
hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
797
vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
798
@@ -XXX,XX +XXX,XX @@ void hvf_put_segments(CPUState *cpu_state)
799
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
800
vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
801
802
- hv_vcpu_flush(cpu_state->hvf_fd);
803
+ hv_vcpu_flush(cpu_state->hvf->fd);
804
}
805
806
void hvf_put_msrs(CPUState *cpu_state)
807
{
808
CPUX86State *env = &X86_CPU(cpu_state)->env;
809
810
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
811
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS,
812
env->sysenter_cs);
813
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
814
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP,
815
env->sysenter_esp);
816
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
817
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP,
818
env->sysenter_eip);
819
820
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
821
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star);
822
823
#ifdef TARGET_X86_64
824
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
825
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
826
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
827
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
828
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar);
829
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase);
830
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask);
831
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar);
832
#endif
833
834
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
835
- hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
836
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base);
837
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base);
838
}
839
840
841
@@ -XXX,XX +XXX,XX @@ void hvf_get_xsave(CPUState *cpu_state)
842
843
xsave = X86_CPU(cpu_state)->env.xsave_buf;
844
845
- if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
846
+ if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {
847
abort();
848
}
849
850
@@ -XXX,XX +XXX,XX @@ void hvf_get_segments(CPUState *cpu_state)
851
vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
852
hvf_get_segment(&env->ldt, &seg);
853
854
- env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
855
- env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
856
- env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
857
- env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
858
+ env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
859
+ env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE);
860
+ env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
861
+ env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE);
862
863
- env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
864
+ env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0);
865
env->cr[2] = 0;
866
- env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
867
- env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
868
+ env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3);
869
+ env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4);
870
871
- env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
872
+ env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER);
873
}
874
875
void hvf_get_msrs(CPUState *cpu_state)
876
@@ -XXX,XX +XXX,XX @@ void hvf_get_msrs(CPUState *cpu_state)
877
CPUX86State *env = &X86_CPU(cpu_state)->env;
878
uint64_t tmp;
879
880
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
881
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp);
882
env->sysenter_cs = tmp;
883
884
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
885
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp);
886
env->sysenter_esp = tmp;
887
888
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
889
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp);
890
env->sysenter_eip = tmp;
891
892
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
893
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star);
894
895
#ifdef TARGET_X86_64
896
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
897
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
898
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
899
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
900
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar);
901
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase);
902
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask);
903
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar);
904
#endif
905
906
- hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
907
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp);
908
909
- env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
910
+ env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET);
911
}
912
913
int hvf_put_registers(CPUState *cpu_state)
914
@@ -XXX,XX +XXX,XX @@ int hvf_put_registers(CPUState *cpu_state)
915
X86CPU *x86cpu = X86_CPU(cpu_state);
916
CPUX86State *env = &x86cpu->env;
917
918
- wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
919
- wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
920
- wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
921
- wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
922
- wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
923
- wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
924
- wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
925
- wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
926
- wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
927
- wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
928
- wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
929
- wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
930
- wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
931
- wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
932
- wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
933
- wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
934
- wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
935
- wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
936
+ wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]);
937
+ wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]);
938
+ wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]);
939
+ wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]);
940
+ wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]);
941
+ wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]);
942
+ wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]);
943
+ wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]);
944
+ wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]);
945
+ wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]);
946
+ wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]);
947
+ wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]);
948
+ wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]);
949
+ wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]);
950
+ wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]);
951
+ wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]);
952
+ wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags);
953
+ wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip);
954
955
- wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
956
+ wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0);
957
958
hvf_put_xsave(cpu_state);
959
960
@@ -XXX,XX +XXX,XX @@ int hvf_put_registers(CPUState *cpu_state)
961
962
hvf_put_msrs(cpu_state);
963
964
- wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
965
- wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
966
- wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
967
- wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
968
- wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
969
- wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
970
- wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
971
- wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
972
+ wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]);
973
+ wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]);
974
+ wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]);
975
+ wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]);
976
+ wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]);
977
+ wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]);
978
+ wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]);
979
+ wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]);
980
981
return 0;
982
}
983
@@ -XXX,XX +XXX,XX @@ int hvf_get_registers(CPUState *cpu_state)
984
X86CPU *x86cpu = X86_CPU(cpu_state);
985
CPUX86State *env = &x86cpu->env;
986
987
- env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
988
- env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
989
- env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
990
- env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
991
- env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
992
- env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
993
- env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
994
- env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
995
- env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
996
- env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
997
- env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
998
- env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
999
- env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
1000
- env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
1001
- env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
1002
- env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
1003
+ env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX);
1004
+ env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX);
1005
+ env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX);
1006
+ env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX);
1007
+ env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP);
1008
+ env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP);
1009
+ env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI);
1010
+ env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI);
1011
+ env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8);
1012
+ env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9);
1013
+ env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10);
1014
+ env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11);
1015
+ env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12);
1016
+ env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13);
1017
+ env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14);
1018
+ env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15);
1019
1020
- env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
1021
- env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
1022
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
1023
+ env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP);
1024
1025
hvf_get_xsave(cpu_state);
1026
- env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
1027
+ env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0);
1028
1029
hvf_get_segments(cpu_state);
1030
hvf_get_msrs(cpu_state);
1031
1032
- env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
1033
- env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
1034
- env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
1035
- env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
1036
- env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
1037
- env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
1038
- env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
1039
- env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
1040
+ env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0);
1041
+ env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1);
1042
+ env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2);
1043
+ env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3);
1044
+ env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4);
1045
+ env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5);
1046
+ env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6);
1047
+ env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7);
1048
1049
x86_update_hflags(env);
1050
return 0;
1051
@@ -XXX,XX +XXX,XX @@ int hvf_get_registers(CPUState *cpu_state)
1052
static void vmx_set_int_window_exiting(CPUState *cpu)
1053
{
1054
uint64_t val;
1055
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
1056
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
1057
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
1058
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
1059
VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
1060
}
1061
1062
void vmx_clear_int_window_exiting(CPUState *cpu)
1063
{
1064
uint64_t val;
1065
- val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
1066
- wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
1067
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
1068
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
1069
~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
1070
}
1071
1072
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1073
uint64_t info = 0;
1074
if (have_event) {
1075
info = vector | intr_type | VMCS_INTR_VALID;
1076
- uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
1077
+ uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON);
1078
if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
1079
vmx_clear_nmi_blocking(cpu_state);
1080
}
1081
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1082
info &= ~(1 << 12); /* clear undefined bit */
1083
if (intr_type == VMCS_INTR_T_SWINTR ||
1084
intr_type == VMCS_INTR_T_SWEXCEPTION) {
1085
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
1086
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
1087
}
1088
1089
if (env->has_error_code) {
1090
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
1091
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR,
1092
env->error_code);
1093
/* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
1094
info |= VMCS_INTR_DEL_ERRCODE;
1095
}
1096
/*printf("reinject %lx err %d\n", info, err);*/
1097
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
1098
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
1099
};
1100
}
1101
1102
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1103
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
1104
cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
1105
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
1106
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
1107
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
1108
} else {
1109
vmx_set_nmi_window_exiting(cpu_state);
1110
}
1111
@@ -XXX,XX +XXX,XX @@ bool hvf_inject_interrupts(CPUState *cpu_state)
1112
int line = cpu_get_pic_interrupt(&x86cpu->env);
1113
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
1114
if (line >= 0) {
1115
- wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
1116
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line |
1117
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
1118
}
1119
}
1120
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
1121
X86CPU *cpu = X86_CPU(cpu_state);
1122
CPUX86State *env = &cpu->env;
1123
1124
- env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
1125
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
1126
1127
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
1128
cpu_synchronize_state(cpu_state);
1129
--
64
--
1130
2.20.1
65
2.25.1
1131
66
1132
67
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
We can move the definition of hvf_vcpu_exec() into our internal
3
This adds cluster-id in CPU instance properties, which will be used
4
hvf header, obsoleting the need for hvf-accel-ops.h.
4
by arm/virt machine. Besides, the cluster-id is also verified or
5
dumped in various spots:
5
6
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
7
* hw/core/machine.c::machine_set_cpu_numa_node() to associate
7
Reviewed-by: Sergio Lopez <slp@redhat.com>
8
CPU with its NUMA node.
8
Message-id: 20210519202253.76782-11-agraf@csgraf.de
9
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
* hw/core/machine.c::machine_numa_finish_cpu_init() to record
11
CPU slots with no NUMA mapping set.
12
13
* hw/core/machine-hmp-cmds.c::hmp_hotpluggable_cpus() to dump
14
cluster-id.
15
16
Signed-off-by: Gavin Shan <gshan@redhat.com>
17
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
18
Acked-by: Igor Mammedov <imammedo@redhat.com>
19
Message-id: 20220503140304.855514-2-gshan@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
21
---
12
accel/hvf/hvf-accel-ops.h | 17 -----------------
22
qapi/machine.json | 6 ++++--
13
include/sysemu/hvf_int.h | 1 +
23
hw/core/machine-hmp-cmds.c | 4 ++++
14
accel/hvf/hvf-accel-ops.c | 2 --
24
hw/core/machine.c | 16 ++++++++++++++++
15
target/i386/hvf/hvf.c | 2 --
25
3 files changed, 24 insertions(+), 2 deletions(-)
16
4 files changed, 1 insertion(+), 21 deletions(-)
17
delete mode 100644 accel/hvf/hvf-accel-ops.h
18
26
19
diff --git a/accel/hvf/hvf-accel-ops.h b/accel/hvf/hvf-accel-ops.h
27
diff --git a/qapi/machine.json b/qapi/machine.json
20
deleted file mode 100644
28
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX
29
--- a/qapi/machine.json
22
--- a/accel/hvf/hvf-accel-ops.h
30
+++ b/qapi/machine.json
23
+++ /dev/null
24
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@
25
-/*
32
# @node-id: NUMA node ID the CPU belongs to
26
- * Accelerator CPUS Interface
33
# @socket-id: socket number within node/board the CPU belongs to
27
- *
34
# @die-id: die number within socket the CPU belongs to (since 4.1)
28
- * Copyright 2020 SUSE LLC
35
-# @core-id: core number within die the CPU belongs to
29
- *
36
+# @cluster-id: cluster number within die the CPU belongs to (since 7.1)
30
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
37
+# @core-id: core number within cluster the CPU belongs to
31
- * See the COPYING file in the top-level directory.
38
# @thread-id: thread number within core the CPU belongs to
32
- */
39
#
33
-
40
-# Note: currently there are 5 properties that could be present
34
-#ifndef HVF_CPUS_H
41
+# Note: currently there are 6 properties that could be present
35
-#define HVF_CPUS_H
42
# but management should be prepared to pass through other
36
-
43
# properties with device_add command to allow for future
37
-#include "sysemu/cpus.h"
44
# interface extension. This also requires the filed names to be kept in
38
-
45
@@ -XXX,XX +XXX,XX @@
39
-int hvf_vcpu_exec(CPUState *);
46
'data': { '*node-id': 'int',
40
-
47
'*socket-id': 'int',
41
-#endif /* HVF_CPUS_H */
48
'*die-id': 'int',
42
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
49
+ '*cluster-id': 'int',
50
'*core-id': 'int',
51
'*thread-id': 'int'
52
}
53
diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c
43
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
44
--- a/include/sysemu/hvf_int.h
55
--- a/hw/core/machine-hmp-cmds.c
45
+++ b/include/sysemu/hvf_int.h
56
+++ b/hw/core/machine-hmp-cmds.c
46
@@ -XXX,XX +XXX,XX @@ extern HVFState *hvf_state;
57
@@ -XXX,XX +XXX,XX @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
47
void assert_hvf_ok(hv_return_t ret);
58
if (c->has_die_id) {
48
int hvf_arch_init_vcpu(CPUState *cpu);
59
monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id);
49
void hvf_arch_vcpu_destroy(CPUState *cpu);
60
}
50
+int hvf_vcpu_exec(CPUState *);
61
+ if (c->has_cluster_id) {
51
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
62
+ monitor_printf(mon, " cluster-id: \"%" PRIu64 "\"\n",
52
int hvf_put_registers(CPUState *);
63
+ c->cluster_id);
53
int hvf_get_registers(CPUState *);
64
+ }
54
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
65
if (c->has_core_id) {
66
monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
67
}
68
diff --git a/hw/core/machine.c b/hw/core/machine.c
55
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
56
--- a/accel/hvf/hvf-accel-ops.c
70
--- a/hw/core/machine.c
57
+++ b/accel/hvf/hvf-accel-ops.c
71
+++ b/hw/core/machine.c
58
@@ -XXX,XX +XXX,XX @@
72
@@ -XXX,XX +XXX,XX @@ void machine_set_cpu_numa_node(MachineState *machine,
59
#include "sysemu/runstate.h"
73
return;
60
#include "qemu/guest-random.h"
74
}
61
75
62
-#include "hvf-accel-ops.h"
76
+ if (props->has_cluster_id && !slot->props.has_cluster_id) {
63
-
77
+ error_setg(errp, "cluster-id is not supported");
64
HVFState *hvf_state;
78
+ return;
65
79
+ }
66
/* Memory slots */
80
+
67
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
81
if (props->has_socket_id && !slot->props.has_socket_id) {
68
index XXXXXXX..XXXXXXX 100644
82
error_setg(errp, "socket-id is not supported");
69
--- a/target/i386/hvf/hvf.c
83
return;
70
+++ b/target/i386/hvf/hvf.c
84
@@ -XXX,XX +XXX,XX @@ void machine_set_cpu_numa_node(MachineState *machine,
71
@@ -XXX,XX +XXX,XX @@
85
continue;
72
#include "qemu/accel.h"
86
}
73
#include "target/i386/cpu.h"
87
74
88
+ if (props->has_cluster_id &&
75
-#include "hvf-accel-ops.h"
89
+ props->cluster_id != slot->props.cluster_id) {
76
-
90
+ continue;
77
void vmx_update_tpr(CPUState *cpu)
91
+ }
78
{
92
+
79
/* TODO: need integrate APIC handling */
93
if (props->has_die_id && props->die_id != slot->props.die_id) {
94
continue;
95
}
96
@@ -XXX,XX +XXX,XX @@ static char *cpu_slot_to_string(const CPUArchId *cpu)
97
}
98
g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id);
99
}
100
+ if (cpu->props.has_cluster_id) {
101
+ if (s->len) {
102
+ g_string_append_printf(s, ", ");
103
+ }
104
+ g_string_append_printf(s, "cluster-id: %"PRId64, cpu->props.cluster_id);
105
+ }
106
if (cpu->props.has_core_id) {
107
if (s->len) {
108
g_string_append_printf(s, ", ");
80
--
109
--
81
2.20.1
110
2.25.1
82
83
diff view generated by jsdifflib
1
The e1000e_send_verify() test calls qemu_recv() but doesn't
1
From: Gavin Shan <gshan@redhat.com>
2
check that the call succeeded, which annoys Coverity. Add
3
an explicit test check for the length of the data.
4
2
5
(This is a test check, not a "we assume this syscall always
3
The CPU topology isn't enabled on arm/virt machine yet, but we're
6
succeeds", so we use g_assert_cmpint() rather than g_assert().)
4
going to do it in next patch. After the CPU topology is enabled by
5
next patch, "thread-id=1" becomes invalid because the CPU core is
6
preferred on arm/virt machine. It means these two CPUs have 0/1
7
as their core IDs, but their thread IDs are all 0. It will trigger
8
test failure as the following message indicates:
7
9
8
Fixes: Coverity CID 1432324
10
[14/21 qemu:qtest+qtest-aarch64 / qtest-aarch64/numa-test ERROR
11
1.48s killed by signal 6 SIGABRT
12
>>> G_TEST_DBUS_DAEMON=/home/gavin/sandbox/qemu.main/tests/dbus-vmstate-daemon.sh \
13
QTEST_QEMU_STORAGE_DAEMON_BINARY=./storage-daemon/qemu-storage-daemon \
14
QTEST_QEMU_BINARY=./qemu-system-aarch64 \
15
QTEST_QEMU_IMG=./qemu-img MALLOC_PERTURB_=83 \
16
/home/gavin/sandbox/qemu.main/build/tests/qtest/numa-test --tap -k
17
――――――――――――――――――――――――――――――――――――――――――――――
18
stderr:
19
qemu-system-aarch64: -numa cpu,node-id=0,thread-id=1: no match found
20
21
This fixes the issue by providing comprehensive SMP configurations
22
in aarch64_numa_cpu(). The SMP configurations aren't used before
23
the CPU topology is enabled in next patch.
24
25
Signed-off-by: Gavin Shan <gshan@redhat.com>
26
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
27
Message-id: 20220503140304.855514-3-gshan@redhat.com
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
11
Message-id: 20210525134458.6675-3-peter.maydell@linaro.org
12
---
29
---
13
tests/qtest/e1000e-test.c | 3 ++-
30
tests/qtest/numa-test.c | 3 ++-
14
1 file changed, 2 insertions(+), 1 deletion(-)
31
1 file changed, 2 insertions(+), 1 deletion(-)
15
32
16
diff --git a/tests/qtest/e1000e-test.c b/tests/qtest/e1000e-test.c
33
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
17
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/qtest/e1000e-test.c
35
--- a/tests/qtest/numa-test.c
19
+++ b/tests/qtest/e1000e-test.c
36
+++ b/tests/qtest/numa-test.c
20
@@ -XXX,XX +XXX,XX @@ static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *a
37
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
21
/* Check data sent to the backend */
38
QTestState *qts;
22
ret = qemu_recv(test_sockets[0], &recv_len, sizeof(recv_len), 0);
39
g_autofree char *cli = NULL;
23
g_assert_cmpint(ret, == , sizeof(recv_len));
40
24
- qemu_recv(test_sockets[0], buffer, 64, 0);
41
- cli = make_cli(data, "-machine smp.cpus=2 "
25
+ ret = qemu_recv(test_sockets[0], buffer, 64, 0);
42
+ cli = make_cli(data, "-machine "
26
+ g_assert_cmpint(ret, >=, 5);
43
+ "smp.cpus=2,smp.sockets=1,smp.clusters=1,smp.cores=1,smp.threads=2 "
27
g_assert_cmpstr(buffer, == , "TEST");
44
"-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 "
28
45
"-numa cpu,node-id=1,thread-id=0 "
29
/* Free test data buffer */
46
"-numa cpu,node-id=0,thread-id=1");
30
--
47
--
31
2.20.1
48
2.25.1
32
49
33
50
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
The ARM version of Hypervisor.framework no longer defines these two
3
Currently, the SMP configuration isn't considered when the CPU
4
types, so let's just revert to standard ones.
4
topology is populated. In this case, it's impossible to provide
5
the default CPU-to-NUMA mapping or association based on the socket
6
ID of the given CPU.
5
7
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
This takes account of SMP configuration when the CPU topology
7
Reviewed-by: Sergio Lopez <slp@redhat.com>
9
is populated. The die ID for the given CPU isn't assigned since
8
Message-id: 20210519202253.76782-7-agraf@csgraf.de
10
it's not supported on arm/virt machine. Besides, the used SMP
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
configuration in qtest/numa-test/aarch64_numa_cpu() is corrcted
12
to avoid testing failure
13
14
Signed-off-by: Gavin Shan <gshan@redhat.com>
15
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
16
Acked-by: Igor Mammedov <imammedo@redhat.com>
17
Message-id: 20220503140304.855514-4-gshan@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
19
---
12
accel/hvf/hvf-accel-ops.c | 6 +++---
20
hw/arm/virt.c | 15 ++++++++++++++-
13
1 file changed, 3 insertions(+), 3 deletions(-)
21
1 file changed, 14 insertions(+), 1 deletion(-)
14
22
15
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
23
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/hvf/hvf-accel-ops.c
25
--- a/hw/arm/virt.c
18
+++ b/accel/hvf/hvf-accel-ops.c
26
+++ b/hw/arm/virt.c
19
@@ -XXX,XX +XXX,XX @@ static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
27
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
20
macslot->present = 1;
28
int n;
21
macslot->gpa_start = slot->start;
29
unsigned int max_cpus = ms->smp.max_cpus;
22
macslot->size = slot->size;
30
VirtMachineState *vms = VIRT_MACHINE(ms);
23
- ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
31
+ MachineClass *mc = MACHINE_GET_CLASS(vms);
24
+ ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
32
25
assert_hvf_ok(ret);
33
if (ms->possible_cpus) {
26
return 0;
34
assert(ms->possible_cpus->len == max_cpus);
27
}
35
@@ -XXX,XX +XXX,XX @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
28
@@ -XXX,XX +XXX,XX @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
36
ms->possible_cpus->cpus[n].type = ms->cpu_type;
29
/* protect region against writes; begin tracking it */
37
ms->possible_cpus->cpus[n].arch_id =
30
if (on) {
38
virt_cpu_mp_affinity(vms, n);
31
slot->flags |= HVF_SLOT_LOG;
39
+
32
- hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
40
+ assert(!mc->smp_props.dies_supported);
33
+ hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
41
+ ms->possible_cpus->cpus[n].props.has_socket_id = true;
34
HV_MEMORY_READ);
42
+ ms->possible_cpus->cpus[n].props.socket_id =
35
/* stop tracking region*/
43
+ n / (ms->smp.clusters * ms->smp.cores * ms->smp.threads);
36
} else {
44
+ ms->possible_cpus->cpus[n].props.has_cluster_id = true;
37
slot->flags &= ~HVF_SLOT_LOG;
45
+ ms->possible_cpus->cpus[n].props.cluster_id =
38
- hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
46
+ (n / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters;
39
+ hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
47
+ ms->possible_cpus->cpus[n].props.has_core_id = true;
40
HV_MEMORY_READ | HV_MEMORY_WRITE);
48
+ ms->possible_cpus->cpus[n].props.core_id =
49
+ (n / ms->smp.threads) % ms->smp.cores;
50
ms->possible_cpus->cpus[n].props.has_thread_id = true;
51
- ms->possible_cpus->cpus[n].props.thread_id = n;
52
+ ms->possible_cpus->cpus[n].props.thread_id =
53
+ n % ms->smp.threads;
41
}
54
}
55
return ms->possible_cpus;
42
}
56
}
43
--
57
--
44
2.20.1
58
2.25.1
45
46
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
The hooks we have that call us after reset, init and loadvm really all
3
In aarch64_numa_cpu(), the CPU and NUMA association is something
4
just want to say "The reference of all register state is in the QEMU
4
like below. Two threads in the same core/cluster/socket are
5
vcpu struct, please push it".
5
associated with two individual NUMA nodes, which is unreal as
6
Igor Mammedov mentioned. We don't expect the association to break
7
NUMA-to-socket boundary, which matches with the real world.
6
8
7
We already have a working pushing mechanism though called cpu->vcpu_dirty,
9
NUMA-node socket cluster core thread
8
so we can just reuse that for all of the above, syncing state properly the
10
------------------------------------------
9
next time we actually execute a vCPU.
11
0 0 0 0 0
12
1 0 0 0 1
10
13
11
This fixes PSCI resets on ARM, as they modify CPU state even after the
14
This corrects the topology for CPUs and their association with
12
post init call has completed, but before we execute the vCPU again.
15
NUMA nodes. After this patch is applied, the CPU and NUMA
16
association becomes something like below, which looks real.
17
Besides, socket/cluster/core/thread IDs are all checked when
18
the NUMA node IDs are verified. It helps to check if the CPU
19
topology is properly populated or not.
13
20
14
To also make the scheme work for x86, we have to make sure we don't
21
NUMA-node socket cluster core thread
15
move stale eflags into our env when the vcpu state is dirty.
22
------------------------------------------
23
0 1 0 0 0
24
1 0 0 0 0
16
25
17
Signed-off-by: Alexander Graf <agraf@csgraf.de>
26
Suggested-by: Igor Mammedov <imammedo@redhat.com>
18
Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>
27
Signed-off-by: Gavin Shan <gshan@redhat.com>
19
Tested-by: Roman Bolshakov <r.bolshakov@yadro.com>
28
Acked-by: Igor Mammedov <imammedo@redhat.com>
20
Reviewed-by: Sergio Lopez <slp@redhat.com>
29
Message-id: 20220503140304.855514-5-gshan@redhat.com
21
Message-id: 20210519202253.76782-13-agraf@csgraf.de
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
30
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
---
31
---
24
accel/hvf/hvf-accel-ops.c | 27 +++++++--------------------
32
tests/qtest/numa-test.c | 18 ++++++++++++------
25
target/i386/hvf/x86hvf.c | 5 ++++-
33
1 file changed, 12 insertions(+), 6 deletions(-)
26
2 files changed, 11 insertions(+), 21 deletions(-)
27
34
28
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
35
diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c
29
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/hvf/hvf-accel-ops.c
37
--- a/tests/qtest/numa-test.c
31
+++ b/accel/hvf/hvf-accel-ops.c
38
+++ b/tests/qtest/numa-test.c
32
@@ -XXX,XX +XXX,XX @@ static void hvf_cpu_synchronize_state(CPUState *cpu)
39
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
33
}
40
g_autofree char *cli = NULL;
34
}
41
35
42
cli = make_cli(data, "-machine "
36
-static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
43
- "smp.cpus=2,smp.sockets=1,smp.clusters=1,smp.cores=1,smp.threads=2 "
37
- run_on_cpu_data arg)
44
+ "smp.cpus=2,smp.sockets=2,smp.clusters=1,smp.cores=1,smp.threads=1 "
38
+static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
45
"-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 "
39
+ run_on_cpu_data arg)
46
- "-numa cpu,node-id=1,thread-id=0 "
40
{
47
- "-numa cpu,node-id=0,thread-id=1");
41
- hvf_put_registers(cpu);
48
+ "-numa cpu,node-id=0,socket-id=1,cluster-id=0,core-id=0,thread-id=0 "
42
- cpu->vcpu_dirty = false;
49
+ "-numa cpu,node-id=1,socket-id=0,cluster-id=0,core-id=0,thread-id=0");
43
+ /* QEMU state is the reference, push it to HVF now and on next entry */
50
qts = qtest_init(cli);
44
+ cpu->vcpu_dirty = true;
51
cpus = get_cpus(qts, &resp);
45
}
52
g_assert(cpus);
46
53
47
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
54
while ((e = qlist_pop(cpus))) {
48
{
55
QDict *cpu, *props;
49
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
56
- int64_t thread, node;
50
-}
57
+ int64_t socket, cluster, core, thread, node;
51
-
58
52
-static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
59
cpu = qobject_to(QDict, e);
53
- run_on_cpu_data arg)
60
g_assert(qdict_haskey(cpu, "props"));
54
-{
61
@@ -XXX,XX +XXX,XX @@ static void aarch64_numa_cpu(const void *data)
55
- hvf_put_registers(cpu);
62
56
- cpu->vcpu_dirty = false;
63
g_assert(qdict_haskey(props, "node-id"));
57
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
64
node = qdict_get_int(props, "node-id");
58
}
65
+ g_assert(qdict_haskey(props, "socket-id"));
59
66
+ socket = qdict_get_int(props, "socket-id");
60
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
67
+ g_assert(qdict_haskey(props, "cluster-id"));
61
{
68
+ cluster = qdict_get_int(props, "cluster-id");
62
- run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
69
+ g_assert(qdict_haskey(props, "core-id"));
63
-}
70
+ core = qdict_get_int(props, "core-id");
64
-
71
g_assert(qdict_haskey(props, "thread-id"));
65
-static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
72
thread = qdict_get_int(props, "thread-id");
66
- run_on_cpu_data arg)
73
67
-{
74
- if (thread == 0) {
68
- cpu->vcpu_dirty = true;
75
+ if (socket == 0 && cluster == 0 && core == 0 && thread == 0) {
69
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
76
g_assert_cmpint(node, ==, 1);
70
}
77
- } else if (thread == 1) {
71
78
+ } else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) {
72
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
79
g_assert_cmpint(node, ==, 0);
73
{
80
} else {
74
- run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
81
g_assert(false);
75
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
76
}
77
78
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
79
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/i386/hvf/x86hvf.c
82
+++ b/target/i386/hvf/x86hvf.c
83
@@ -XXX,XX +XXX,XX @@ int hvf_process_events(CPUState *cpu_state)
84
X86CPU *cpu = X86_CPU(cpu_state);
85
CPUX86State *env = &cpu->env;
86
87
- env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
88
+ if (!cpu_state->vcpu_dirty) {
89
+ /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
90
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
91
+ }
92
93
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
94
cpu_synchronize_state(cpu_state);
95
--
82
--
96
2.20.1
83
2.25.1
97
98
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
The hvf_set_phys_mem() function is only called within the same file.
3
When CPU-to-NUMA association isn't explicitly provided by users,
4
Make it static.
4
the default one is given by mc->get_default_cpu_node_id(). However,
5
the CPU topology isn't fully considered in the default association
6
and this causes CPU topology broken warnings on booting Linux guest.
5
7
6
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
For example, the following warning messages are observed when the
7
Reviewed-by: Sergio Lopez <slp@redhat.com>
9
Linux guest is booted with the following command lines.
8
Message-id: 20210519202253.76782-6-agraf@csgraf.de
10
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
/home/gavin/sandbox/qemu.main/build/qemu-system-aarch64 \
12
-accel kvm -machine virt,gic-version=host \
13
-cpu host \
14
-smp 6,sockets=2,cores=3,threads=1 \
15
-m 1024M,slots=16,maxmem=64G \
16
-object memory-backend-ram,id=mem0,size=128M \
17
-object memory-backend-ram,id=mem1,size=128M \
18
-object memory-backend-ram,id=mem2,size=128M \
19
-object memory-backend-ram,id=mem3,size=128M \
20
-object memory-backend-ram,id=mem4,size=128M \
21
-object memory-backend-ram,id=mem4,size=384M \
22
-numa node,nodeid=0,memdev=mem0 \
23
-numa node,nodeid=1,memdev=mem1 \
24
-numa node,nodeid=2,memdev=mem2 \
25
-numa node,nodeid=3,memdev=mem3 \
26
-numa node,nodeid=4,memdev=mem4 \
27
-numa node,nodeid=5,memdev=mem5
28
:
29
alternatives: patching kernel code
30
BUG: arch topology borken
31
the CLS domain not a subset of the MC domain
32
<the above error log repeats>
33
BUG: arch topology borken
34
the DIE domain not a subset of the NODE domain
35
36
With current implementation of mc->get_default_cpu_node_id(),
37
CPU#0 to CPU#5 are associated with NODE#0 to NODE#5 separately.
38
That's incorrect because CPU#0/1/2 should be associated with same
39
NUMA node because they're seated in same socket.
40
41
This fixes the issue by considering the socket ID when the default
42
CPU-to-NUMA association is provided in virt_possible_cpu_arch_ids().
43
With this applied, no more CPU topology broken warnings are seen
44
from the Linux guest. The 6 CPUs are associated with NODE#0/1, but
45
there are no CPUs associated with NODE#2/3/4/5.
46
47
Signed-off-by: Gavin Shan <gshan@redhat.com>
48
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
49
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
50
Message-id: 20220503140304.855514-6-gshan@redhat.com
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
51
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
52
---
12
include/sysemu/hvf_int.h | 1 -
53
hw/arm/virt.c | 4 +++-
13
accel/hvf/hvf-accel-ops.c | 2 +-
54
1 file changed, 3 insertions(+), 1 deletion(-)
14
2 files changed, 1 insertion(+), 2 deletions(-)
15
55
16
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
56
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
17
index XXXXXXX..XXXXXXX 100644
57
index XXXXXXX..XXXXXXX 100644
18
--- a/include/sysemu/hvf_int.h
58
--- a/hw/arm/virt.c
19
+++ b/include/sysemu/hvf_int.h
59
+++ b/hw/arm/virt.c
20
@@ -XXX,XX +XXX,XX @@ struct HVFState {
60
@@ -XXX,XX +XXX,XX @@ virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
21
};
61
22
extern HVFState *hvf_state;
62
static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx)
23
63
{
24
-void hvf_set_phys_mem(MemoryRegionSection *, bool);
64
- return idx % ms->numa_state->num_nodes;
25
void assert_hvf_ok(hv_return_t ret);
65
+ int64_t socket_id = ms->possible_cpus->cpus[idx].props.socket_id;
26
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
66
+
27
int hvf_put_registers(CPUState *);
67
+ return socket_id % ms->numa_state->num_nodes;
28
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/hvf/hvf-accel-ops.c
31
+++ b/accel/hvf/hvf-accel-ops.c
32
@@ -XXX,XX +XXX,XX @@ static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
33
return 0;
34
}
68
}
35
69
36
-void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
70
static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
37
+static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
38
{
39
hvf_slot *mem;
40
MemoryRegion *area = section->mr;
41
--
71
--
42
2.20.1
72
2.25.1
43
44
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Gavin Shan <gshan@redhat.com>
2
2
3
Until now, Hypervisor.framework has only been available on x86_64 systems.
3
When the PPTT table is built, the CPU topology is re-calculated, but
4
With Apple Silicon shipping now, it extends its reach to aarch64. To
4
it's unecessary because the CPU topology has been populated in
5
prepare for support for multiple architectures, let's start moving common
5
virt_possible_cpu_arch_ids() on arm/virt machine.
6
code out into its own accel directory.
7
6
8
This patch splits the vcpu init and destroy functions into a generic and
7
This reworks build_pptt() to avoid by reusing the existing IDs in
9
an architecture specific portion. This also allows us to move the generic
8
ms->possible_cpus. Currently, the only user of build_pptt() is
10
functions into the generic hvf code, removing exported functions.
9
arm/virt machine.
11
10
12
Signed-off-by: Alexander Graf <agraf@csgraf.de>
11
Signed-off-by: Gavin Shan <gshan@redhat.com>
13
Reviewed-by: Sergio Lopez <slp@redhat.com>
12
Tested-by: Yanan Wang <wangyanan55@huawei.com>
14
Message-id: 20210519202253.76782-8-agraf@csgraf.de
13
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
Acked-by: Igor Mammedov <imammedo@redhat.com>
15
Acked-by: Michael S. Tsirkin <mst@redhat.com>
16
Message-id: 20220503140304.855514-7-gshan@redhat.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
18
---
18
accel/hvf/hvf-accel-ops.h | 2 --
19
hw/acpi/aml-build.c | 111 +++++++++++++++++++-------------------------
19
include/sysemu/hvf_int.h | 2 ++
20
1 file changed, 48 insertions(+), 63 deletions(-)
20
accel/hvf/hvf-accel-ops.c | 30 ++++++++++++++++++++++++++++++
21
target/i386/hvf/hvf.c | 23 ++---------------------
22
4 files changed, 34 insertions(+), 23 deletions(-)
23
21
24
diff --git a/accel/hvf/hvf-accel-ops.h b/accel/hvf/hvf-accel-ops.h
22
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
25
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
26
--- a/accel/hvf/hvf-accel-ops.h
24
--- a/hw/acpi/aml-build.c
27
+++ b/accel/hvf/hvf-accel-ops.h
25
+++ b/hw/acpi/aml-build.c
28
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
29
27
const char *oem_id, const char *oem_table_id)
30
#include "sysemu/cpus.h"
31
32
-int hvf_init_vcpu(CPUState *);
33
int hvf_vcpu_exec(CPUState *);
34
void hvf_cpu_synchronize_state(CPUState *);
35
void hvf_cpu_synchronize_post_reset(CPUState *);
36
void hvf_cpu_synchronize_post_init(CPUState *);
37
void hvf_cpu_synchronize_pre_loadvm(CPUState *);
38
-void hvf_vcpu_destroy(CPUState *);
39
40
#endif /* HVF_CPUS_H */
41
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/sysemu/hvf_int.h
44
+++ b/include/sysemu/hvf_int.h
45
@@ -XXX,XX +XXX,XX @@ struct HVFState {
46
extern HVFState *hvf_state;
47
48
void assert_hvf_ok(hv_return_t ret);
49
+int hvf_arch_init_vcpu(CPUState *cpu);
50
+void hvf_arch_vcpu_destroy(CPUState *cpu);
51
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
52
int hvf_put_registers(CPUState *);
53
int hvf_get_registers(CPUState *);
54
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/accel/hvf/hvf-accel-ops.c
57
+++ b/accel/hvf/hvf-accel-ops.c
58
@@ -XXX,XX +XXX,XX @@ static void hvf_type_init(void)
59
60
type_init(hvf_type_init);
61
62
+static void hvf_vcpu_destroy(CPUState *cpu)
63
+{
64
+ hv_return_t ret = hv_vcpu_destroy(cpu->hvf_fd);
65
+ assert_hvf_ok(ret);
66
+
67
+ hvf_arch_vcpu_destroy(cpu);
68
+}
69
+
70
+static int hvf_init_vcpu(CPUState *cpu)
71
+{
72
+ int r;
73
+
74
+ /* init cpu signals */
75
+ sigset_t set;
76
+ struct sigaction sigact;
77
+
78
+ memset(&sigact, 0, sizeof(sigact));
79
+ sigact.sa_handler = dummy_signal;
80
+ sigaction(SIG_IPI, &sigact, NULL);
81
+
82
+ pthread_sigmask(SIG_BLOCK, NULL, &set);
83
+ sigdelset(&set, SIG_IPI);
84
+
85
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
86
+ cpu->vcpu_dirty = 1;
87
+ assert_hvf_ok(r);
88
+
89
+ return hvf_arch_init_vcpu(cpu);
90
+}
91
+
92
/*
93
* The HVF-specific vCPU thread function. This one should only run when the host
94
* CPU supports the VMX "unrestricted guest" feature.
95
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/target/i386/hvf/hvf.c
98
+++ b/target/i386/hvf/hvf.c
99
@@ -XXX,XX +XXX,XX @@ static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
100
return false;
101
}
102
103
-void hvf_vcpu_destroy(CPUState *cpu)
104
+void hvf_arch_vcpu_destroy(CPUState *cpu)
105
{
28
{
106
X86CPU *x86_cpu = X86_CPU(cpu);
29
MachineClass *mc = MACHINE_GET_CLASS(ms);
107
CPUX86State *env = &x86_cpu->env;
30
- GQueue *list = g_queue_new();
108
31
- guint pptt_start = table_data->len;
109
- hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
32
- guint parent_offset;
110
g_free(env->hvf_mmio_buf);
33
- guint length, i;
111
- assert_hvf_ok(ret);
34
- int uid = 0;
112
}
35
- int socket;
113
36
+ CPUArchIdList *cpus = ms->possible_cpus;
114
static void init_tsc_freq(CPUX86State *env)
37
+ int64_t socket_id = -1, cluster_id = -1, core_id = -1;
115
@@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env)
38
+ uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
116
return env->apic_bus_freq != 0;
39
+ uint32_t pptt_start = table_data->len;
117
}
40
+ int n;
118
41
AcpiTable table = { .sig = "PPTT", .rev = 2,
119
-int hvf_init_vcpu(CPUState *cpu)
42
.oem_id = oem_id, .oem_table_id = oem_table_id };
120
+int hvf_arch_init_vcpu(CPUState *cpu)
43
121
{
44
acpi_table_begin(&table, table_data);
45
46
- for (socket = 0; socket < ms->smp.sockets; socket++) {
47
- g_queue_push_tail(list,
48
- GUINT_TO_POINTER(table_data->len - pptt_start));
49
- build_processor_hierarchy_node(
50
- table_data,
51
- /*
52
- * Physical package - represents the boundary
53
- * of a physical package
54
- */
55
- (1 << 0),
56
- 0, socket, NULL, 0);
57
- }
122
-
58
-
123
X86CPU *x86cpu = X86_CPU(cpu);
59
- if (mc->smp_props.clusters_supported) {
124
CPUX86State *env = &x86cpu->env;
60
- length = g_queue_get_length(list);
125
- int r;
61
- for (i = 0; i < length; i++) {
62
- int cluster;
126
-
63
-
127
- /* init cpu signals */
64
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
128
- sigset_t set;
65
- for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
129
- struct sigaction sigact;
66
- g_queue_push_tail(list,
67
- GUINT_TO_POINTER(table_data->len - pptt_start));
68
- build_processor_hierarchy_node(
69
- table_data,
70
- (0 << 0), /* not a physical package */
71
- parent_offset, cluster, NULL, 0);
72
- }
73
+ /*
74
+ * This works with the assumption that cpus[n].props.*_id has been
75
+ * sorted from top to down levels in mc->possible_cpu_arch_ids().
76
+ * Otherwise, the unexpected and duplicated containers will be
77
+ * created.
78
+ */
79
+ for (n = 0; n < cpus->len; n++) {
80
+ if (cpus->cpus[n].props.socket_id != socket_id) {
81
+ assert(cpus->cpus[n].props.socket_id > socket_id);
82
+ socket_id = cpus->cpus[n].props.socket_id;
83
+ cluster_id = -1;
84
+ core_id = -1;
85
+ socket_offset = table_data->len - pptt_start;
86
+ build_processor_hierarchy_node(table_data,
87
+ (1 << 0), /* Physical package */
88
+ 0, socket_id, NULL, 0);
89
}
90
- }
91
92
- length = g_queue_get_length(list);
93
- for (i = 0; i < length; i++) {
94
- int core;
130
-
95
-
131
- memset(&sigact, 0, sizeof(sigact));
96
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
132
- sigact.sa_handler = dummy_signal;
97
- for (core = 0; core < ms->smp.cores; core++) {
133
- sigaction(SIG_IPI, &sigact, NULL);
98
- if (ms->smp.threads > 1) {
134
-
99
- g_queue_push_tail(list,
135
- pthread_sigmask(SIG_BLOCK, NULL, &set);
100
- GUINT_TO_POINTER(table_data->len - pptt_start));
136
- sigdelset(&set, SIG_IPI);
101
- build_processor_hierarchy_node(
137
102
- table_data,
138
init_emu();
103
- (0 << 0), /* not a physical package */
139
init_decoder();
104
- parent_offset, core, NULL, 0);
140
@@ -XXX,XX +XXX,XX @@ int hvf_init_vcpu(CPUState *cpu)
105
- } else {
106
- build_processor_hierarchy_node(
107
- table_data,
108
- (1 << 1) | /* ACPI Processor ID valid */
109
- (1 << 3), /* Node is a Leaf */
110
- parent_offset, uid++, NULL, 0);
111
+ if (mc->smp_props.clusters_supported) {
112
+ if (cpus->cpus[n].props.cluster_id != cluster_id) {
113
+ assert(cpus->cpus[n].props.cluster_id > cluster_id);
114
+ cluster_id = cpus->cpus[n].props.cluster_id;
115
+ core_id = -1;
116
+ cluster_offset = table_data->len - pptt_start;
117
+ build_processor_hierarchy_node(table_data,
118
+ (0 << 0), /* Not a physical package */
119
+ socket_offset, cluster_id, NULL, 0);
120
}
121
+ } else {
122
+ cluster_offset = socket_offset;
123
}
124
- }
125
126
- length = g_queue_get_length(list);
127
- for (i = 0; i < length; i++) {
128
- int thread;
129
+ if (ms->smp.threads == 1) {
130
+ build_processor_hierarchy_node(table_data,
131
+ (1 << 1) | /* ACPI Processor ID valid */
132
+ (1 << 3), /* Node is a Leaf */
133
+ cluster_offset, n, NULL, 0);
134
+ } else {
135
+ if (cpus->cpus[n].props.core_id != core_id) {
136
+ assert(cpus->cpus[n].props.core_id > core_id);
137
+ core_id = cpus->cpus[n].props.core_id;
138
+ core_offset = table_data->len - pptt_start;
139
+ build_processor_hierarchy_node(table_data,
140
+ (0 << 0), /* Not a physical package */
141
+ cluster_offset, core_id, NULL, 0);
142
+ }
143
144
- parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
145
- for (thread = 0; thread < ms->smp.threads; thread++) {
146
- build_processor_hierarchy_node(
147
- table_data,
148
+ build_processor_hierarchy_node(table_data,
149
(1 << 1) | /* ACPI Processor ID valid */
150
(1 << 2) | /* Processor is a Thread */
151
(1 << 3), /* Node is a Leaf */
152
- parent_offset, uid++, NULL, 0);
153
+ core_offset, n, NULL, 0);
141
}
154
}
142
}
155
}
143
156
144
- r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
157
- g_queue_free(list);
145
- cpu->vcpu_dirty = 1;
158
acpi_table_end(linker, &table);
146
- assert_hvf_ok(r);
159
}
147
-
160
148
if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
149
&hvf_state->hvf_caps->vmx_cap_pinbased)) {
150
abort();
151
--
161
--
152
2.20.1
162
2.25.1
153
154
diff view generated by jsdifflib
Deleted patch
1
Coverity notes that we don't check for dup2() failing. Add some
2
assertions so that if it does ever happen we get some indication.
3
(This is similar to how we handle other "don't expect this syscall to
4
fail" checks in this test code.)
5
1
6
Fixes: Coverity CID 1432346
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
9
Message-id: 20210525134458.6675-2-peter.maydell@linaro.org
10
---
11
tests/qtest/bios-tables-test.c | 8 ++++++--
12
1 file changed, 6 insertions(+), 2 deletions(-)
13
14
diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/qtest/bios-tables-test.c
17
+++ b/tests/qtest/bios-tables-test.c
18
@@ -XXX,XX +XXX,XX @@ static void test_acpi_asl(test_data *data)
19
exp_sdt->asl_file, sdt->asl_file);
20
int out = dup(STDOUT_FILENO);
21
int ret G_GNUC_UNUSED;
22
+ int dupret;
23
24
- dup2(STDERR_FILENO, STDOUT_FILENO);
25
+ g_assert(out >= 0);
26
+ dupret = dup2(STDERR_FILENO, STDOUT_FILENO);
27
+ g_assert(dupret >= 0);
28
ret = system(diff) ;
29
- dup2(out, STDOUT_FILENO);
30
+ dupret = dup2(out, STDOUT_FILENO);
31
+ g_assert(dupret >= 0);
32
close(out);
33
g_free(diff);
34
}
35
--
36
2.20.1
37
38
diff view generated by jsdifflib
Deleted patch
1
Coverity notices that the checks against mkstemp() failing in
2
create_qcow2_with_mbr() are wrong: mkstemp returns -1 on failure but
3
the check is just "g_assert(fd)". Fix to use "g_assert(fd >= 0)",
4
matching the correct check in create_test_img().
5
1
6
Fixes: Coverity CID 1432274
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
10
Message-id: 20210525134458.6675-4-peter.maydell@linaro.org
11
---
12
tests/qtest/hd-geo-test.c | 4 ++--
13
1 file changed, 2 insertions(+), 2 deletions(-)
14
15
diff --git a/tests/qtest/hd-geo-test.c b/tests/qtest/hd-geo-test.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/qtest/hd-geo-test.c
18
+++ b/tests/qtest/hd-geo-test.c
19
@@ -XXX,XX +XXX,XX @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors)
20
}
21
22
fd = mkstemp(raw_path);
23
- g_assert(fd);
24
+ g_assert(fd >= 0);
25
close(fd);
26
27
fd = open(raw_path, O_WRONLY);
28
@@ -XXX,XX +XXX,XX @@ static char *create_qcow2_with_mbr(MBRpartitions mbr, uint64_t sectors)
29
close(fd);
30
31
fd = mkstemp(qcow2_path);
32
- g_assert(fd);
33
+ g_assert(fd >= 0);
34
close(fd);
35
36
qemu_img_path = getenv("QTEST_QEMU_IMG");
37
--
38
2.20.1
39
40
diff view generated by jsdifflib
Deleted patch
1
Coverity points out that we calculate a 64-bit value using 32-bit
2
arithmetic; add the cast to force the multiply to be done as 64-bits.
3
(The overflow will never happen with the current test data.)
4
1
5
Fixes: Coverity CID 1432320
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
9
Message-id: 20210525134458.6675-5-peter.maydell@linaro.org
10
---
11
tests/qtest/pflash-cfi02-test.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/tests/qtest/pflash-cfi02-test.c b/tests/qtest/pflash-cfi02-test.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/qtest/pflash-cfi02-test.c
17
+++ b/tests/qtest/pflash-cfi02-test.c
18
@@ -XXX,XX +XXX,XX @@ static void test_geometry(const void *opaque)
19
20
for (int region = 0; region < nb_erase_regions; ++region) {
21
for (uint32_t i = 0; i < c->nb_blocs[region]; ++i) {
22
- uint64_t byte_addr = i * c->sector_len[region];
23
+ uint64_t byte_addr = (uint64_t)i * c->sector_len[region];
24
g_assert_cmphex(flash_read(c, byte_addr), ==, bank_mask(c));
25
}
26
}
27
--
28
2.20.1
29
30
diff view generated by jsdifflib
Deleted patch
1
Coverity points out that in tpm_test_swtpm_migration_test() we
2
assume that src_tpm_addr and dst_tpm_addr are non-NULL (we
3
pass them to tpm_util_migration_start_qemu() which will
4
unconditionally dereference them) but then later explicitly
5
check them for NULL. Remove the pointless checks.
6
1
7
Fixes: Coverity CID 1432367, 1432359
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
12
Message-id: 20210525134458.6675-6-peter.maydell@linaro.org
13
---
14
tests/qtest/tpm-tests.c | 12 ++++--------
15
1 file changed, 4 insertions(+), 8 deletions(-)
16
17
diff --git a/tests/qtest/tpm-tests.c b/tests/qtest/tpm-tests.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/qtest/tpm-tests.c
20
+++ b/tests/qtest/tpm-tests.c
21
@@ -XXX,XX +XXX,XX @@ void tpm_test_swtpm_migration_test(const char *src_tpm_path,
22
qtest_quit(src_qemu);
23
24
tpm_util_swtpm_kill(dst_tpm_pid);
25
- if (dst_tpm_addr) {
26
- g_unlink(dst_tpm_addr->u.q_unix.path);
27
- qapi_free_SocketAddress(dst_tpm_addr);
28
- }
29
+ g_unlink(dst_tpm_addr->u.q_unix.path);
30
+ qapi_free_SocketAddress(dst_tpm_addr);
31
32
tpm_util_swtpm_kill(src_tpm_pid);
33
- if (src_tpm_addr) {
34
- g_unlink(src_tpm_addr->u.q_unix.path);
35
- qapi_free_SocketAddress(src_tpm_addr);
36
- }
37
+ g_unlink(src_tpm_addr->u.q_unix.path);
38
+ qapi_free_SocketAddress(src_tpm_addr);
39
}
40
--
41
2.20.1
42
43
diff view generated by jsdifflib
Deleted patch
1
Coverity complains that we don't check for failures from dup()
2
and mkstemp(); add asserts that these syscalls succeeded.
3
1
4
Fixes: Coverity CID 1432516, 1432574
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Message-id: 20210525134458.6675-7-peter.maydell@linaro.org
9
---
10
tests/unit/test-vmstate.c | 5 ++++-
11
1 file changed, 4 insertions(+), 1 deletion(-)
12
13
diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tests/unit/test-vmstate.c
16
+++ b/tests/unit/test-vmstate.c
17
@@ -XXX,XX +XXX,XX @@ static int temp_fd;
18
/* Duplicate temp_fd and seek to the beginning of the file */
19
static QEMUFile *open_test_file(bool write)
20
{
21
- int fd = dup(temp_fd);
22
+ int fd;
23
QIOChannel *ioc;
24
QEMUFile *f;
25
26
+ fd = dup(temp_fd);
27
+ g_assert(fd >= 0);
28
lseek(fd, 0, SEEK_SET);
29
if (write) {
30
g_assert_cmpint(ftruncate(fd, 0), ==, 0);
31
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
32
g_autofree char *temp_file = g_strdup_printf("%s/vmst.test.XXXXXX",
33
g_get_tmp_dir());
34
temp_fd = mkstemp(temp_file);
35
+ g_assert(temp_fd >= 0);
36
37
module_call_init(MODULE_INIT_QOM);
38
39
--
40
2.20.1
41
42
diff view generated by jsdifflib