1
target-arm queue, mostly SME preliminaries.
1
Changes v1->v2 (fixing CI failures in v1, added a couple of
2
extra patches in an attempt to avoid having to do a last
3
minute arm pullreq next week):
4
* new patch to hopefully fix the build issue with the SVE/SME sysregs test
5
* dropped the IC IVAU test case patch
6
* new patch: fix over-length shift
7
* new patches: define neoverse-v1
2
8
3
In the unlikely event we don't land the rest of SME before freeze
9
thanks
4
for 7.1 we can revert the docs/property changes included here.
5
6
-- PMM
10
-- PMM
7
11
8
The following changes since commit 097ccbbbaf2681df1e65542e5b7d2b2d0c66e2bc:
12
The following changes since commit 2a6ae69154542caa91dd17c40fd3f5ffbec300de:
9
13
10
Merge tag 'qemu-sparc-20220626' of https://github.com/mcayland/qemu into staging (2022-06-27 05:21:05 +0530)
14
Merge tag 'pull-maintainer-ominbus-030723-1' of https://gitlab.com/stsquad/qemu into staging (2023-07-04 08:36:44 +0200)
11
15
12
are available in the Git repository at:
16
are available in the Git repository at:
13
17
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220627
18
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230706
15
19
16
for you to fetch changes up to 59e1b8a22ea9f947d038ccac784de1020f266e14:
20
for you to fetch changes up to c41077235168140cdd4a34fce9bd95c3d30efe9c:
17
21
18
target/arm: Check V7VE as well as LPAE in arm_pamax (2022-06-27 11:18:17 +0100)
22
target/arm: Avoid over-length shift in arm_cpu_sve_finalize() error case (2023-07-06 13:36:51 +0100)
19
23
20
----------------------------------------------------------------
24
----------------------------------------------------------------
21
target-arm queue:
25
target-arm queue:
22
* sphinx: change default language to 'en'
26
* Add raw_writes ops for register whose write induce TLB maintenance
23
* Diagnose attempts to emulate EL3 in hvf as well as kvm
27
* hw/arm/sbsa-ref: use XHCI to replace EHCI
24
* More SME groundwork patches
28
* Avoid splitting Zregs across lines in dump
25
* virt: Fix calculation of physical address space size
29
* Dump ZA[] when active
26
for v7VE CPUs (eg cortex-a15)
30
* Fix SME full tile indexing
31
* Handle IC IVAU to improve compatibility with JITs
32
* xlnx-canfd-test: Fix code coverity issues
33
* gdbstub: Guard M-profile code with CONFIG_TCG
34
* allwinner-sramc: Set class_size
35
* target/xtensa: Assert that interrupt level is within bounds
36
* Avoid over-length shift in arm_cpu_sve_finalize() error case
37
* Define new 'neoverse-v1' CPU type
27
38
28
----------------------------------------------------------------
39
----------------------------------------------------------------
29
Alexander Graf (2):
40
Akihiko Odaki (1):
30
accel: Introduce current_accel_name()
41
hw: arm: allwinner-sramc: Set class_size
31
target/arm: Catch invalid kvm state also for hvf
32
42
33
Martin Liška (1):
43
Eric Auger (1):
34
sphinx: change default language to 'en'
44
target/arm: Add raw_writes ops for register whose write induce TLB maintenance
35
45
36
Richard Henderson (22):
46
Fabiano Rosas (1):
37
target/arm: Implement TPIDR2_EL0
47
target/arm: gdbstub: Guard M-profile code with CONFIG_TCG
38
target/arm: Add SMEEXC_EL to TB flags
39
target/arm: Add syn_smetrap
40
target/arm: Add ARM_CP_SME
41
target/arm: Add SVCR
42
target/arm: Add SMCR_ELx
43
target/arm: Add SMIDR_EL1, SMPRI_EL1, SMPRIMAP_EL2
44
target/arm: Add PSTATE.{SM,ZA} to TB flags
45
target/arm: Add the SME ZA storage to CPUARMState
46
target/arm: Implement SMSTART, SMSTOP
47
target/arm: Move error for sve%d property to arm_cpu_sve_finalize
48
target/arm: Create ARMVQMap
49
target/arm: Generalize cpu_arm_{get,set}_vq
50
target/arm: Generalize cpu_arm_{get, set}_default_vec_len
51
target/arm: Move arm_cpu_*_finalize to internals.h
52
target/arm: Unexport aarch64_add_*_properties
53
target/arm: Add cpu properties for SME
54
target/arm: Introduce sve_vqm1_for_el_sm
55
target/arm: Add SVL to TB flags
56
target/arm: Move pred_{full, gvec}_reg_{offset, size} to translate-a64.h
57
target/arm: Extend arm_pamax to more than aarch64
58
target/arm: Check V7VE as well as LPAE in arm_pamax
59
48
60
docs/conf.py | 2 +-
49
John Högberg (1):
61
docs/system/arm/cpu-features.rst | 56 ++++++++++
50
target/arm: Handle IC IVAU to improve compatibility with JITs
62
include/qemu/accel.h | 1 +
63
target/arm/cpregs.h | 5 +
64
target/arm/cpu.h | 103 ++++++++++++++-----
65
target/arm/helper-sme.h | 21 ++++
66
target/arm/helper.h | 1 +
67
target/arm/internals.h | 4 +
68
target/arm/syndrome.h | 14 +++
69
target/arm/translate-a64.h | 38 +++++++
70
target/arm/translate.h | 6 ++
71
accel/accel-common.c | 8 ++
72
hw/arm/virt.c | 10 +-
73
softmmu/vl.c | 3 +-
74
target/arm/cpu.c | 32 ++++--
75
target/arm/cpu64.c | 205 ++++++++++++++++++++++++++++---------
76
target/arm/helper.c | 213 +++++++++++++++++++++++++++++++++++++--
77
target/arm/kvm64.c | 2 +-
78
target/arm/machine.c | 34 +++++++
79
target/arm/ptw.c | 26 +++--
80
target/arm/sme_helper.c | 61 +++++++++++
81
target/arm/translate-a64.c | 46 +++++++++
82
target/arm/translate-sve.c | 36 -------
83
target/arm/meson.build | 1 +
84
24 files changed, 782 insertions(+), 146 deletions(-)
85
create mode 100644 target/arm/helper-sme.h
86
create mode 100644 target/arm/sme_helper.c
87
51
52
Peter Maydell (5):
53
tests/tcg/aarch64/sysregs.c: Use S syntax for id_aa64zfr0_el1 and id_aa64smfr0_el1
54
target/xtensa: Assert that interrupt level is within bounds
55
target/arm: Suppress more TCG unimplemented features in ID registers
56
target/arm: Define neoverse-v1
57
target/arm: Avoid over-length shift in arm_cpu_sve_finalize() error case
58
59
Richard Henderson (3):
60
target/arm: Avoid splitting Zregs across lines in dump
61
target/arm: Dump ZA[] when active
62
target/arm: Fix SME full tile indexing
63
64
Vikram Garhwal (1):
65
tests/qtest: xlnx-canfd-test: Fix code coverity issues
66
67
Yuquan Wang (1):
68
hw/arm/sbsa-ref: use XHCI to replace EHCI
69
70
docs/system/arm/sbsa.rst | 5 +-
71
docs/system/arm/virt.rst | 1 +
72
hw/arm/sbsa-ref.c | 24 ++++---
73
hw/arm/virt.c | 1 +
74
hw/misc/allwinner-sramc.c | 1 +
75
target/arm/cpu.c | 98 +++++++++++++++++++++--------
76
target/arm/cpu64.c | 4 +-
77
target/arm/gdbstub.c | 4 ++
78
target/arm/helper.c | 70 +++++++++++++++++----
79
target/arm/tcg/cpu64.c | 128 ++++++++++++++++++++++++++++++++++++++
80
target/arm/tcg/translate-sme.c | 24 +++++--
81
target/xtensa/exc_helper.c | 3 +
82
tests/qtest/xlnx-canfd-test.c | 33 ++++------
83
tests/tcg/aarch64/sme-outprod1.c | 83 ++++++++++++++++++++++++
84
tests/tcg/aarch64/sysregs.c | 11 ++--
85
hw/arm/Kconfig | 2 +-
86
tests/tcg/aarch64/Makefile.target | 16 ++---
87
17 files changed, 415 insertions(+), 93 deletions(-)
88
create mode 100644 tests/tcg/aarch64/sme-outprod1.c
89
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Eric Auger <eric.auger@redhat.com>
2
2
3
This register is part of SME, but isn't closely related to the
3
Some registers whose 'cooked' writefns induce TLB maintenance do
4
rest of the extension.
4
not have raw_writefn ops defined. If only the writefn ops is set
5
(ie. no raw_writefn is provided), it is assumed the cooked also
6
work as the raw one. For those registers it is not obvious the
7
tlb_flush works on KVM mode so better/safer setting the raw write.
5
8
9
Signed-off-by: Eric Auger <eric.auger@redhat.com>
10
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-2-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
target/arm/cpu.h | 1 +
14
target/arm/helper.c | 23 +++++++++++++----------
12
target/arm/helper.c | 32 ++++++++++++++++++++++++++++++++
15
1 file changed, 13 insertions(+), 10 deletions(-)
13
2 files changed, 33 insertions(+)
14
16
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
20
};
21
uint64_t tpidr_el[4];
22
};
23
+ uint64_t tpidr2_el0;
24
/* The secure banks of these registers don't map anywhere */
25
uint64_t tpidrurw_s;
26
uint64_t tpidrprw_s;
27
diff --git a/target/arm/helper.c b/target/arm/helper.c
17
diff --git a/target/arm/helper.c b/target/arm/helper.c
28
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/helper.c
19
--- a/target/arm/helper.c
30
+++ b/target/arm/helper.c
20
+++ b/target/arm/helper.c
31
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo zcr_reginfo[] = {
21
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
32
.writefn = zcr_write, .raw_writefn = raw_write },
22
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
23
.access = PL1_RW, .accessfn = access_tvm_trvm,
24
.fgt = FGT_TTBR0_EL1,
25
- .writefn = vmsa_ttbr_write, .resetvalue = 0,
26
+ .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
27
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
28
offsetof(CPUARMState, cp15.ttbr0_ns) } },
29
{ .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
30
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
31
.access = PL1_RW, .accessfn = access_tvm_trvm,
32
.fgt = FGT_TTBR1_EL1,
33
- .writefn = vmsa_ttbr_write, .resetvalue = 0,
34
+ .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
35
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
36
offsetof(CPUARMState, cp15.ttbr1_ns) } },
37
{ .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
38
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
39
.type = ARM_CP_64BIT | ARM_CP_ALIAS,
40
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
41
offsetof(CPUARMState, cp15.ttbr0_ns) },
42
- .writefn = vmsa_ttbr_write, },
43
+ .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
44
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
45
.access = PL1_RW, .accessfn = access_tvm_trvm,
46
.type = ARM_CP_64BIT | ARM_CP_ALIAS,
47
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
48
offsetof(CPUARMState, cp15.ttbr1_ns) },
49
- .writefn = vmsa_ttbr_write, },
50
+ .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
33
};
51
};
34
52
35
+#ifdef TARGET_AARCH64
53
static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
36
+static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
54
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
37
+ bool isread)
55
.type = ARM_CP_IO,
38
+{
56
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
39
+ int el = arm_current_el(env);
57
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
40
+
58
- .writefn = hcr_write },
41
+ if (el == 0) {
59
+ .writefn = hcr_write, .raw_writefn = raw_write },
42
+ uint64_t sctlr = arm_sctlr(env, el);
60
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
43
+ if (!(sctlr & SCTLR_EnTP2)) {
61
.type = ARM_CP_ALIAS | ARM_CP_IO,
44
+ return CP_ACCESS_TRAP;
62
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
45
+ }
63
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
46
+ }
64
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
47
+ /* TODO: FEAT_FGT */
65
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
48
+ if (el < 3
66
.access = PL2_RW, .writefn = vmsa_tcr_el12_write,
49
+ && arm_feature(env, ARM_FEATURE_EL3)
67
+ .raw_writefn = raw_write,
50
+ && !(env->cp15.scr_el3 & SCR_ENTP2)) {
68
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
51
+ return CP_ACCESS_TRAP_EL3;
69
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
52
+ }
70
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
53
+ return CP_ACCESS_OK;
71
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
54
+}
72
.type = ARM_CP_64BIT | ARM_CP_ALIAS,
55
+
73
.access = PL2_RW, .accessfn = access_el3_aa32ns,
56
+static const ARMCPRegInfo sme_reginfo[] = {
74
.fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
57
+ { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
75
- .writefn = vttbr_write },
58
+ .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
76
+ .writefn = vttbr_write, .raw_writefn = raw_write },
59
+ .access = PL0_RW, .accessfn = access_tpidr2,
77
{ .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
60
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
78
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
61
+};
79
- .access = PL2_RW, .writefn = vttbr_write,
62
+#endif /* TARGET_AARCH64 */
80
+ .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
63
+
81
.fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
64
void hw_watchpoint_update(ARMCPU *cpu, int n)
82
{ .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
65
{
83
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
66
CPUARMState *env = &cpu->env;
84
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
67
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
85
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
68
}
86
{ .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
69
87
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
70
#ifdef TARGET_AARCH64
88
- .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
71
+ if (cpu_isar_feature(aa64_sme, cpu)) {
89
+ .access = PL2_RW, .resetvalue = 0,
72
+ define_arm_cp_regs(cpu, sme_reginfo);
90
+ .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
73
+ }
91
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
74
if (cpu_isar_feature(aa64_pauth, cpu)) {
92
{ .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
75
define_arm_cp_regs(cpu, pauth_reginfo);
93
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
76
}
94
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
95
{ .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
96
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
97
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
98
- .resetfn = scr_reset, .writefn = scr_write },
99
+ .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
100
{ .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
101
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
102
.access = PL1_RW, .accessfn = access_trap_aa32s_el1,
103
.fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
104
- .writefn = scr_write },
105
+ .writefn = scr_write, .raw_writefn = raw_write },
106
{ .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
107
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
108
.access = PL3_RW, .resetvalue = 0,
109
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo vhe_reginfo[] = {
110
{ .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
111
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
112
.access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
113
+ .raw_writefn = raw_write,
114
.fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
115
#ifndef CONFIG_USER_ONLY
116
{ .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
77
--
117
--
78
2.25.1
118
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Yuquan Wang <wangyuquan1236@phytium.com.cn>
2
2
3
Rename from cpu_arm_{get,set}_sve_default_vec_len,
3
The current sbsa-ref cannot use EHCI controller which is only
4
and take the pointer to default_vq from opaque.
4
able to do 32-bit DMA, since sbsa-ref doesn't have RAM below 4GB.
5
Hence, this uses XHCI to provide a usb controller with 64-bit
6
DMA capablity instead of EHCI.
5
7
8
We bump the platform version to 0.3 with this change. Although the
9
hardware at the USB controller address changes, the firmware and
10
Linux can both cope with this -- on an older non-XHCI-aware
11
firmware/kernel setup the probe routine simply fails and the guest
12
proceeds without any USB. (This isn't a loss of functionality,
13
because the old USB controller never worked in the first place.) So
14
we can call this a backwards-compatible change and only bump the
15
minor version.
16
17
Signed-off-by: Yuquan Wang <wangyuquan1236@phytium.com.cn>
18
Message-id: 20230621103847.447508-2-wangyuquan1236@phytium.com.cn
19
[PMM: tweaked commit message; add line to docs about what
20
changes in platform version 0.3]
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
21
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-15-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
23
---
11
target/arm/cpu64.c | 27 ++++++++++++++-------------
24
docs/system/arm/sbsa.rst | 5 ++++-
12
1 file changed, 14 insertions(+), 13 deletions(-)
25
hw/arm/sbsa-ref.c | 23 +++++++++++++----------
26
hw/arm/Kconfig | 2 +-
27
3 files changed, 18 insertions(+), 12 deletions(-)
13
28
14
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
29
diff --git a/docs/system/arm/sbsa.rst b/docs/system/arm/sbsa.rst
15
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu64.c
31
--- a/docs/system/arm/sbsa.rst
17
+++ b/target/arm/cpu64.c
32
+++ b/docs/system/arm/sbsa.rst
18
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
33
@@ -XXX,XX +XXX,XX @@ The ``sbsa-ref`` board supports:
19
34
- A configurable number of AArch64 CPUs
20
#ifdef CONFIG_USER_ONLY
35
- GIC version 3
21
/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
36
- System bus AHCI controller
22
-static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
37
- - System bus EHCI controller
23
- const char *name, void *opaque,
38
+ - System bus XHCI controller
24
- Error **errp)
39
- CDROM and hard disc on AHCI bus
25
+static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
40
- E1000E ethernet card on PCIe bus
26
+ const char *name, void *opaque,
41
- Bochs display adapter on PCIe bus
27
+ Error **errp)
42
@@ -XXX,XX +XXX,XX @@ Platform version changes:
43
44
0.2
45
GIC ITS information is present in devicetree.
46
+
47
+0.3
48
+ The USB controller is an XHCI device, not EHCI
49
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/arm/sbsa-ref.c
52
+++ b/hw/arm/sbsa-ref.c
53
@@ -XXX,XX +XXX,XX @@
54
#include "hw/pci-host/gpex.h"
55
#include "hw/qdev-properties.h"
56
#include "hw/usb.h"
57
+#include "hw/usb/xhci.h"
58
#include "hw/char/pl011.h"
59
#include "hw/watchdog/sbsa_gwdt.h"
60
#include "net/net.h"
61
@@ -XXX,XX +XXX,XX @@ enum {
62
SBSA_SECURE_UART_MM,
63
SBSA_SECURE_MEM,
64
SBSA_AHCI,
65
- SBSA_EHCI,
66
+ SBSA_XHCI,
67
};
68
69
struct SBSAMachineState {
70
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry sbsa_ref_memmap[] = {
71
[SBSA_SMMU] = { 0x60050000, 0x00020000 },
72
/* Space here reserved for more SMMUs */
73
[SBSA_AHCI] = { 0x60100000, 0x00010000 },
74
- [SBSA_EHCI] = { 0x60110000, 0x00010000 },
75
+ [SBSA_XHCI] = { 0x60110000, 0x00010000 },
76
/* Space here reserved for other devices */
77
[SBSA_PCIE_PIO] = { 0x7fff0000, 0x00010000 },
78
/* 32-bit address PCIE MMIO space */
79
@@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = {
80
[SBSA_SECURE_UART] = 8,
81
[SBSA_SECURE_UART_MM] = 9,
82
[SBSA_AHCI] = 10,
83
- [SBSA_EHCI] = 11,
84
+ [SBSA_XHCI] = 11,
85
[SBSA_SMMU] = 12, /* ... to 15 */
86
[SBSA_GWDT_WS0] = 16,
87
};
88
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SBSAMachineState *sms)
89
* fw compatibility.
90
*/
91
qemu_fdt_setprop_cell(fdt, "/", "machine-version-major", 0);
92
- qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 2);
93
+ qemu_fdt_setprop_cell(fdt, "/", "machine-version-minor", 3);
94
95
if (ms->numa_state->have_numa_distance) {
96
int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t);
97
@@ -XXX,XX +XXX,XX @@ static void create_ahci(const SBSAMachineState *sms)
98
}
99
}
100
101
-static void create_ehci(const SBSAMachineState *sms)
102
+static void create_xhci(const SBSAMachineState *sms)
28
{
103
{
29
- ARMCPU *cpu = ARM_CPU(obj);
104
- hwaddr base = sbsa_ref_memmap[SBSA_EHCI].base;
30
+ uint32_t *ptr_default_vq = opaque;
105
- int irq = sbsa_ref_irqmap[SBSA_EHCI];
31
int32_t default_len, default_vq, remainder;
106
+ hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
32
107
+ int irq = sbsa_ref_irqmap[SBSA_XHCI];
33
if (!visit_type_int32(v, name, &default_len, errp)) {
108
+ DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
34
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
109
35
110
- sysbus_create_simple("platform-ehci-usb", base,
36
/* Undocumented, but the kernel allows -1 to indicate "maximum". */
111
- qdev_get_gpio_in(sms->gic, irq));
37
if (default_len == -1) {
112
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
38
- cpu->sve_default_vq = ARM_MAX_VQ;
113
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
39
+ *ptr_default_vq = ARM_MAX_VQ;
114
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(sms->gic, irq));
40
return;
41
}
42
43
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
44
return;
45
}
46
47
- cpu->sve_default_vq = default_vq;
48
+ *ptr_default_vq = default_vq;
49
}
115
}
50
116
51
-static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
117
static void create_smmu(const SBSAMachineState *sms, PCIBus *bus)
52
- const char *name, void *opaque,
118
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
53
- Error **errp)
119
54
+static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v,
120
create_ahci(sms);
55
+ const char *name, void *opaque,
121
56
+ Error **errp)
122
- create_ehci(sms);
57
{
123
+ create_xhci(sms);
58
- ARMCPU *cpu = ARM_CPU(obj);
124
59
- int32_t value = cpu->sve_default_vq * 16;
125
create_pcie(sms);
60
+ uint32_t *ptr_default_vq = opaque;
126
61
+ int32_t value = *ptr_default_vq * 16;
127
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
62
128
index XXXXXXX..XXXXXXX 100644
63
visit_type_int32(v, name, &value, errp);
129
--- a/hw/arm/Kconfig
64
}
130
+++ b/hw/arm/Kconfig
65
@@ -XXX,XX +XXX,XX @@ void aarch64_add_sve_properties(Object *obj)
131
@@ -XXX,XX +XXX,XX @@ config SBSA_REF
66
#ifdef CONFIG_USER_ONLY
132
select PL011 # UART
67
/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
133
select PL031 # RTC
68
object_property_add(obj, "sve-default-vector-length", "int32",
134
select PL061 # GPIO
69
- cpu_arm_get_sve_default_vec_len,
135
- select USB_EHCI_SYSBUS
70
- cpu_arm_set_sve_default_vec_len, NULL, NULL);
136
+ select USB_XHCI_SYSBUS
71
+ cpu_arm_get_default_vec_len,
137
select WDT_SBSA
72
+ cpu_arm_set_default_vec_len, NULL,
138
select BOCHS_DISPLAY
73
+ &cpu->sve_default_vq);
74
#endif
75
}
76
139
77
--
140
--
78
2.25.1
141
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Some assemblers will complain about attempts to access
2
id_aa64zfr0_el1 and id_aa64smfr0_el1 by name if the test
3
binary isn't built for the right processor type:
2
4
3
In machvirt_init we create a cpu but do not fully initialize it.
5
/tmp/ccASXpLo.s:782: Error: selected processor does not support system register name 'id_aa64zfr0_el1'
4
Thus the propagation of V7VE to LPAE has not been done, and we
6
/tmp/ccASXpLo.s:829: Error: selected processor does not support system register name 'id_aa64smfr0_el1'
5
compute the wrong value for some v7 cpus, e.g. cortex-a15.
6
7
7
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1078
8
However, these registers are in the ID space and are guaranteed to
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
read-as-zero on older CPUs, so the access is both safe and sensible.
9
Reported-by: He Zhe <zhe.he@windriver.com>
10
Switch to using the S syntax, as we already do for ID_AA64ISAR2_EL1
10
Message-id: 20220619001541.131672-3-richard.henderson@linaro.org
11
and ID_AA64MMFR2_EL1. This allows us to drop the HAS_ARMV9_SME check
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
and the makefile machinery to adjust the CFLAGS for this test, so we
13
don't rely on having a sufficiently new compiler to be able to check
14
these registers.
15
16
This means we're actually testing the SME ID register: no released
17
GCC yet recognizes -march=armv9-a+sme, so that was always skipped.
18
It also avoids a future problem if we try to switch the "do we have
19
SME support in the toolchain" check from "in the compiler" to "in the
20
assembler" (at which point we would otherwise run into the above
21
errors).
22
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
24
---
14
target/arm/ptw.c | 8 +++++++-
25
tests/tcg/aarch64/sysregs.c | 11 +++++++----
15
1 file changed, 7 insertions(+), 1 deletion(-)
26
tests/tcg/aarch64/Makefile.target | 7 +------
27
2 files changed, 8 insertions(+), 10 deletions(-)
16
28
17
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
29
diff --git a/tests/tcg/aarch64/sysregs.c b/tests/tcg/aarch64/sysregs.c
18
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/ptw.c
31
--- a/tests/tcg/aarch64/sysregs.c
20
+++ b/target/arm/ptw.c
32
+++ b/tests/tcg/aarch64/sysregs.c
21
@@ -XXX,XX +XXX,XX @@ unsigned int arm_pamax(ARMCPU *cpu)
33
@@ -XXX,XX +XXX,XX @@
22
assert(parange < ARRAY_SIZE(pamax_map));
34
/*
23
return pamax_map[parange];
35
* Older assemblers don't recognize newer system register names,
24
}
36
* but we can still access them by the Sn_n_Cn_Cn_n syntax.
25
- if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
37
+ * This also means we don't need to specifically request that the
26
+
38
+ * assembler enables whatever architectural features the ID registers
27
+ /*
39
+ * syntax might be gated behind.
28
+ * In machvirt_init, we call arm_pamax on a cpu that is not fully
40
*/
29
+ * initialized, so we can't rely on the propagation done in realize.
41
#define SYS_ID_AA64ISAR2_EL1 S3_0_C0_C6_2
30
+ */
42
#define SYS_ID_AA64MMFR2_EL1 S3_0_C0_C7_2
31
+ if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
43
+#define SYS_ID_AA64ZFR0_EL1 S3_0_C0_C4_4
32
+ arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
44
+#define SYS_ID_AA64SMFR0_EL1 S3_0_C0_C4_5
33
/* v7 with LPAE */
45
34
return 40;
46
int failed_bit_count;
35
}
47
48
@@ -XXX,XX +XXX,XX @@ int main(void)
49
/* all hidden, DebugVer fixed to 0x6 (ARMv8 debug architecture) */
50
get_cpu_reg_check_mask(id_aa64dfr0_el1, _m(0000,0000,0000,0006));
51
get_cpu_reg_check_zero(id_aa64dfr1_el1);
52
- get_cpu_reg_check_mask(id_aa64zfr0_el1, _m(0ff0,ff0f,00ff,00ff));
53
-#ifdef HAS_ARMV9_SME
54
- get_cpu_reg_check_mask(id_aa64smfr0_el1, _m(80f1,00fd,0000,0000));
55
-#endif
56
+ get_cpu_reg_check_mask(SYS_ID_AA64ZFR0_EL1, _m(0ff0,ff0f,00ff,00ff));
57
+ get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(80f1,00fd,0000,0000));
58
59
get_cpu_reg_check_zero(id_aa64afr0_el1);
60
get_cpu_reg_check_zero(id_aa64afr1_el1);
61
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tests/tcg/aarch64/Makefile.target
64
+++ b/tests/tcg/aarch64/Makefile.target
65
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 mte-7
66
mte-%: CFLAGS += -march=armv8.5-a+memtag
67
endif
68
69
-ifneq ($(CROSS_CC_HAS_SVE),)
70
# System Registers Tests
71
AARCH64_TESTS += sysregs
72
-ifneq ($(CROSS_CC_HAS_ARMV9_SME),)
73
-sysregs: CFLAGS+=-march=armv9-a+sme -DHAS_ARMV9_SME
74
-else
75
-sysregs: CFLAGS+=-march=armv8.1-a+sve
76
-endif
77
78
+ifneq ($(CROSS_CC_HAS_SVE),)
79
# SVE ioctl test
80
AARCH64_TESTS += sve-ioctls
81
sve-ioctls: CFLAGS+=-march=armv8.1-a+sve
36
--
82
--
37
2.25.1
83
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Mirror the properties for SVE. The main difference is
3
Allow the line length to extend to 548 columns. While annoyingly wide,
4
that any arbitrary set of powers of 2 may be supported,
4
it's still less confusing than the continuations we print. Also, the
5
and not the stricter constraints that apply to SVE.
5
default VL used by Linux (and max for A64FX) uses only 140 columns.
6
7
Include a property to control FEAT_SME_FA64, as failing
8
to restrict the runtime to the proper subset of insns
9
could be a major point for bugs.
10
6
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20230622151201.1578522-2-richard.henderson@linaro.org
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Message-id: 20220620175235.60881-18-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
11
---
16
docs/system/arm/cpu-features.rst | 56 +++++++++++++++
12
target/arm/cpu.c | 36 ++++++++++++++----------------------
17
target/arm/cpu.h | 2 +
13
1 file changed, 14 insertions(+), 22 deletions(-)
18
target/arm/internals.h | 1 +
19
target/arm/cpu.c | 14 +++-
20
target/arm/cpu64.c | 114 +++++++++++++++++++++++++++++--
21
5 files changed, 180 insertions(+), 7 deletions(-)
22
14
23
diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
24
index XXXXXXX..XXXXXXX 100644
25
--- a/docs/system/arm/cpu-features.rst
26
+++ b/docs/system/arm/cpu-features.rst
27
@@ -XXX,XX +XXX,XX @@ verbose command lines. However, the recommended way to select vector
28
lengths is to explicitly enable each desired length. Therefore only
29
example's (1), (4), and (6) exhibit recommended uses of the properties.
30
31
+SME CPU Property Examples
32
+-------------------------
33
+
34
+ 1) Disable SME::
35
+
36
+ $ qemu-system-aarch64 -M virt -cpu max,sme=off
37
+
38
+ 2) Implicitly enable all vector lengths for the ``max`` CPU type::
39
+
40
+ $ qemu-system-aarch64 -M virt -cpu max
41
+
42
+ 3) Only enable the 256-bit vector length::
43
+
44
+ $ qemu-system-aarch64 -M virt -cpu max,sme256=on
45
+
46
+ 3) Enable the 256-bit and 1024-bit vector lengths::
47
+
48
+ $ qemu-system-aarch64 -M virt -cpu max,sme256=on,sme1024=on
49
+
50
+ 4) Disable the 512-bit vector length. This results in all the other
51
+ lengths supported by ``max`` defaulting to enabled
52
+ (128, 256, 1024 and 2048)::
53
+
54
+ $ qemu-system-aarch64 -M virt -cpu max,sve512=off
55
+
56
SVE User-mode Default Vector Length Property
57
--------------------------------------------
58
59
@@ -XXX,XX +XXX,XX @@ length supported by QEMU is 256.
60
61
If this property is set to ``-1`` then the default vector length
62
is set to the maximum possible length.
63
+
64
+SME CPU Properties
65
+==================
66
+
67
+The SME CPU properties are much like the SVE properties: ``sme`` is
68
+used to enable or disable the entire SME feature, and ``sme<N>`` is
69
+used to enable or disable specific vector lengths. Finally,
70
+``sme_fa64`` is used to enable or disable ``FEAT_SME_FA64``, which
71
+allows execution of the "full a64" instruction set while Streaming
72
+SVE mode is enabled.
73
+
74
+SME is not supported by KVM at this time.
75
+
76
+At least one vector length must be enabled when ``sme`` is enabled,
77
+and all vector lengths must be powers of 2. The maximum vector
78
+length supported by qemu is 2048 bits. Otherwise, there are no
79
+additional constraints on the set of vector lengths supported by SME.
80
+
81
+SME User-mode Default Vector Length Property
82
+--------------------------------------------
83
+
84
+For qemu-aarch64, the cpu propery ``sme-default-vector-length=N`` is
85
+defined to mirror the Linux kernel parameter file
86
+``/proc/sys/abi/sme_default_vector_length``. The default length, ``N``,
87
+is in units of bytes and must be between 16 and 8192.
88
+If not specified, the default vector length is 32.
89
+
90
+As with ``sve-default-vector-length``, if the default length is larger
91
+than the maximum vector length enabled, the actual vector length will
92
+be reduced. If this property is set to ``-1`` then the default vector
93
+length is set to the maximum possible length.
94
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/cpu.h
97
+++ b/target/arm/cpu.h
98
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
99
#ifdef CONFIG_USER_ONLY
100
/* Used to set the default vector length at process start. */
101
uint32_t sve_default_vq;
102
+ uint32_t sme_default_vq;
103
#endif
104
105
ARMVQMap sve_vq;
106
+ ARMVQMap sme_vq;
107
108
/* Generic timer counter frequency, in Hz */
109
uint64_t gt_cntfrq_hz;
110
diff --git a/target/arm/internals.h b/target/arm/internals.h
111
index XXXXXXX..XXXXXXX 100644
112
--- a/target/arm/internals.h
113
+++ b/target/arm/internals.h
114
@@ -XXX,XX +XXX,XX @@ int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
115
int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
116
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
117
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
118
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
119
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
120
void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
121
#endif
122
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
15
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
123
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
124
--- a/target/arm/cpu.c
17
--- a/target/arm/cpu.c
125
+++ b/target/arm/cpu.c
18
+++ b/target/arm/cpu.c
126
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
19
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
127
#ifdef CONFIG_USER_ONLY
20
ARMCPU *cpu = ARM_CPU(cs);
128
# ifdef TARGET_AARCH64
21
CPUARMState *env = &cpu->env;
129
/*
22
uint32_t psr = pstate_read(env);
130
- * The linux kernel defaults to 512-bit vectors, when sve is supported.
23
- int i;
131
- * See documentation for /proc/sys/abi/sve_default_vector_length, and
24
+ int i, j;
132
- * our corresponding sve-default-vector-length cpu property.
25
int el = arm_current_el(env);
133
+ * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
26
const char *ns_status;
134
+ * These values were chosen to fit within the default signal frame.
27
bool sve;
135
+ * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
28
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
136
+ * and our corresponding cpu property.
29
}
137
*/
30
138
cpu->sve_default_vq = 4;
31
if (sve) {
139
+ cpu->sme_default_vq = 2;
32
- int j, zcr_len = sve_vqm1_for_el(env, el);
140
# endif
33
+ int zcr_len = sve_vqm1_for_el(env, el);
141
#else
34
142
/* Our inbound IRQ and FIQ lines */
35
for (i = 0; i <= FFR_PRED_NUM; i++) {
143
@@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
36
bool eol;
144
return;
37
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
38
}
145
}
39
}
146
40
147
+ arm_cpu_sme_finalize(cpu, &local_err);
41
- for (i = 0; i < 32; i++) {
148
+ if (local_err != NULL) {
42
- if (zcr_len == 0) {
149
+ error_propagate(errp, local_err);
43
+ if (zcr_len == 0) {
150
+ return;
44
+ /*
151
+ }
45
+ * With vl=16, there are only 37 columns per register,
152
+
46
+ * so output two registers per line.
153
arm_cpu_pauth_finalize(cpu, &local_err);
47
+ */
154
if (local_err != NULL) {
48
+ for (i = 0; i < 32; i++) {
155
error_propagate(errp, local_err);
49
qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
156
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
50
i, env->vfp.zregs[i].d[1],
157
index XXXXXXX..XXXXXXX 100644
51
env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
158
--- a/target/arm/cpu64.c
52
- } else if (zcr_len == 1) {
159
+++ b/target/arm/cpu64.c
53
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
160
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name,
54
- ":%016" PRIx64 ":%016" PRIx64 "\n",
161
ARMCPU *cpu = ARM_CPU(obj);
55
- i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
162
ARMVQMap *vq_map = opaque;
56
- env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
163
uint32_t vq = atoi(&name[3]) / 128;
57
- } else {
164
+ bool sve = vq_map == &cpu->sve_vq;
58
+ }
165
bool value;
59
+ } else {
166
60
+ for (i = 0; i < 32; i++) {
167
- /* All vector lengths are disabled when SVE is off. */
61
+ qemu_fprintf(f, "Z%02d=", i);
168
- if (!cpu_isar_feature(aa64_sve, cpu)) {
62
for (j = zcr_len; j >= 0; j--) {
169
+ /* All vector lengths are disabled when feature is off. */
63
- bool odd = (zcr_len - j) % 2 != 0;
170
+ if (sve
64
- if (j == zcr_len) {
171
+ ? !cpu_isar_feature(aa64_sve, cpu)
65
- qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
172
+ : !cpu_isar_feature(aa64_sme, cpu)) {
66
- } else if (!odd) {
173
value = false;
67
- if (j > 0) {
174
} else {
68
- qemu_fprintf(f, " [%x-%x]=", j, j - 1);
175
value = extract32(vq_map->map, vq - 1, 1);
69
- } else {
176
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
70
- qemu_fprintf(f, " [%x]=", j);
177
cpu->isar.id_aa64pfr0 = t;
71
- }
178
}
72
- }
179
73
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
180
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
74
env->vfp.zregs[i].d[j * 2 + 1],
181
+{
75
- env->vfp.zregs[i].d[j * 2],
182
+ uint32_t vq_map = cpu->sme_vq.map;
76
- odd || j == 0 ? "\n" : ":");
183
+ uint32_t vq_init = cpu->sme_vq.init;
77
+ env->vfp.zregs[i].d[j * 2 + 0],
184
+ uint32_t vq_supported = cpu->sme_vq.supported;
78
+ j ? ":" : "\n");
185
+ uint32_t vq;
79
}
186
+
80
}
187
+ if (vq_map == 0) {
81
}
188
+ if (!cpu_isar_feature(aa64_sme, cpu)) {
189
+ cpu->isar.id_aa64smfr0 = 0;
190
+ return;
191
+ }
192
+
193
+ /* TODO: KVM will require limitations via SMCR_EL2. */
194
+ vq_map = vq_supported & ~vq_init;
195
+
196
+ if (vq_map == 0) {
197
+ vq = ctz32(vq_supported) + 1;
198
+ error_setg(errp, "cannot disable sme%d", vq * 128);
199
+ error_append_hint(errp, "All SME vector lengths are disabled.\n");
200
+ error_append_hint(errp, "With SME enabled, at least one "
201
+ "vector length must be enabled.\n");
202
+ return;
203
+ }
204
+ } else {
205
+ if (!cpu_isar_feature(aa64_sme, cpu)) {
206
+ vq = 32 - clz32(vq_map);
207
+ error_setg(errp, "cannot enable sme%d", vq * 128);
208
+ error_append_hint(errp, "SME must be enabled to enable "
209
+ "vector lengths.\n");
210
+ error_append_hint(errp, "Add sme=on to the CPU property list.\n");
211
+ return;
212
+ }
213
+ /* TODO: KVM will require limitations via SMCR_EL2. */
214
+ }
215
+
216
+ cpu->sme_vq.map = vq_map;
217
+}
218
+
219
+static bool cpu_arm_get_sme(Object *obj, Error **errp)
220
+{
221
+ ARMCPU *cpu = ARM_CPU(obj);
222
+ return cpu_isar_feature(aa64_sme, cpu);
223
+}
224
+
225
+static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
226
+{
227
+ ARMCPU *cpu = ARM_CPU(obj);
228
+ uint64_t t;
229
+
230
+ t = cpu->isar.id_aa64pfr1;
231
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, value);
232
+ cpu->isar.id_aa64pfr1 = t;
233
+}
234
+
235
+static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
236
+{
237
+ ARMCPU *cpu = ARM_CPU(obj);
238
+ return cpu_isar_feature(aa64_sme, cpu) &&
239
+ cpu_isar_feature(aa64_sme_fa64, cpu);
240
+}
241
+
242
+static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
243
+{
244
+ ARMCPU *cpu = ARM_CPU(obj);
245
+ uint64_t t;
246
+
247
+ t = cpu->isar.id_aa64smfr0;
248
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value);
249
+ cpu->isar.id_aa64smfr0 = t;
250
+}
251
+
252
#ifdef CONFIG_USER_ONLY
253
-/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
254
+/* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */
255
static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
256
const char *name, void *opaque,
257
Error **errp)
258
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
259
* and is the maximum architectural width of ZCR_ELx.LEN.
260
*/
261
if (remainder || default_vq < 1 || default_vq > 512) {
262
- error_setg(errp, "cannot set sve-default-vector-length");
263
+ ARMCPU *cpu = ARM_CPU(obj);
264
+ const char *which =
265
+ (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme");
266
+
267
+ error_setg(errp, "cannot set %s-default-vector-length", which);
268
if (remainder) {
269
error_append_hint(errp, "Vector length not a multiple of 16\n");
270
} else if (default_vq < 1) {
271
@@ -XXX,XX +XXX,XX @@ static void aarch64_add_sve_properties(Object *obj)
272
#endif
273
}
274
275
+static void aarch64_add_sme_properties(Object *obj)
276
+{
277
+ ARMCPU *cpu = ARM_CPU(obj);
278
+ uint32_t vq;
279
+
280
+ object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme);
281
+ object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64,
282
+ cpu_arm_set_sme_fa64);
283
+
284
+ for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
285
+ char name[8];
286
+ sprintf(name, "sme%d", vq * 128);
287
+ object_property_add(obj, name, "bool", cpu_arm_get_vq,
288
+ cpu_arm_set_vq, NULL, &cpu->sme_vq);
289
+ }
290
+
291
+#ifdef CONFIG_USER_ONLY
292
+ /* Mirror linux /proc/sys/abi/sme_default_vector_length. */
293
+ object_property_add(obj, "sme-default-vector-length", "int32",
294
+ cpu_arm_get_default_vec_len,
295
+ cpu_arm_set_default_vec_len, NULL,
296
+ &cpu->sme_default_vq);
297
+#endif
298
+}
299
+
300
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
301
{
302
int arch_val = 0, impdef_val = 0;
303
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
304
#endif
305
306
cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
307
+ cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
308
309
aarch64_add_pauth_properties(obj);
310
aarch64_add_sve_properties(obj);
311
+ aarch64_add_sme_properties(obj);
312
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
313
cpu_max_set_sve_max_vq, NULL, NULL);
314
qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
315
--
82
--
316
2.25.1
83
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Drop the aa32-only inline fallbacks,
3
Always print each matrix row whole, one per line, so that we
4
and just use a couple of ifdefs.
4
get the entire matrix in the proper shape.
5
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20230622151201.1578522-3-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-16-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
10
---
11
target/arm/cpu.h | 6 ------
11
target/arm/cpu.c | 18 ++++++++++++++++++
12
target/arm/internals.h | 3 +++
12
1 file changed, 18 insertions(+)
13
target/arm/cpu.c | 2 ++
14
3 files changed, 5 insertions(+), 6 deletions(-)
15
13
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct {
21
22
#ifdef TARGET_AARCH64
23
# define ARM_MAX_VQ 16
24
-void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
25
-void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
26
-void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
27
#else
28
# define ARM_MAX_VQ 1
29
-static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
30
-static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
31
-static inline void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) { }
32
#endif
33
34
typedef struct ARMVectorReg {
35
diff --git a/target/arm/internals.h b/target/arm/internals.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/internals.h
38
+++ b/target/arm/internals.h
39
@@ -XXX,XX +XXX,XX @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
40
int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
41
int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
42
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
43
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
44
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
45
+void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
46
#endif
47
48
#ifdef CONFIG_USER_ONLY
49
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
50
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/cpu.c
16
--- a/target/arm/cpu.c
52
+++ b/target/arm/cpu.c
17
+++ b/target/arm/cpu.c
53
@@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
18
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
54
{
19
i, q[1], q[0], (i & 1 ? "\n" : " "));
55
Error *local_err = NULL;
56
57
+#ifdef TARGET_AARCH64
58
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
59
arm_cpu_sve_finalize(cpu, &local_err);
60
if (local_err != NULL) {
61
@@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
62
return;
63
}
20
}
64
}
21
}
65
+#endif
22
+
66
23
+ if (cpu_isar_feature(aa64_sme, cpu) &&
67
if (kvm_enabled()) {
24
+ FIELD_EX64(env->svcr, SVCR, ZA) &&
68
kvm_arm_steal_time_finalize(cpu, &local_err);
25
+ sme_exception_el(env, el) == 0) {
26
+ int zcr_len = sve_vqm1_for_el_sm(env, el, true);
27
+ int svl = (zcr_len + 1) * 16;
28
+ int svl_lg10 = svl < 100 ? 2 : 3;
29
+
30
+ for (i = 0; i < svl; i++) {
31
+ qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i);
32
+ for (j = zcr_len; j >= 0; --j) {
33
+ qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c",
34
+ env->zarray[i].d[2 * j + 1],
35
+ env->zarray[i].d[2 * j],
36
+ j ? ':' : '\n');
37
+ }
38
+ }
39
+ }
40
}
41
42
#else
69
--
43
--
70
2.25.1
44
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This is CheckSMEAccess, which is the basis for a set of
3
For the outer product set of insns, which take an entire matrix
4
related tests for various SME cpregs and instructions.
4
tile as output, the argument is not a combined tile+column.
5
Therefore using get_tile_rowcol was incorrect, as we extracted
6
the tile number from itself.
5
7
8
The test case relies only on assembler support for SME, since
9
no release of GCC recognizes -march=armv9-a+sme yet.
10
11
Cc: qemu-stable@nongnu.org
12
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1620
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20230622151201.1578522-5-richard.henderson@linaro.org
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
[PMM: dropped now-unneeded changes to sysregs CFLAGS]
8
Message-id: 20220620175235.60881-3-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
18
---
11
target/arm/cpu.h | 2 ++
19
target/arm/tcg/translate-sme.c | 24 ++++++---
12
target/arm/translate.h | 1 +
20
tests/tcg/aarch64/sme-outprod1.c | 83 +++++++++++++++++++++++++++++++
13
target/arm/helper.c | 52 ++++++++++++++++++++++++++++++++++++++
21
tests/tcg/aarch64/Makefile.target | 7 ++-
14
target/arm/translate-a64.c | 1 +
22
3 files changed, 107 insertions(+), 7 deletions(-)
15
4 files changed, 56 insertions(+)
23
create mode 100644 tests/tcg/aarch64/sme-outprod1.c
16
24
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
25
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
18
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
27
--- a/target/arm/tcg/translate-sme.c
20
+++ b/target/arm/cpu.h
28
+++ b/target/arm/tcg/translate-sme.c
21
@@ -XXX,XX +XXX,XX @@ void aarch64_sync_64_to_32(CPUARMState *env);
29
@@ -XXX,XX +XXX,XX @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
22
30
return addr;
23
int fp_exception_el(CPUARMState *env, int cur_el);
24
int sve_exception_el(CPUARMState *env, int cur_el);
25
+int sme_exception_el(CPUARMState *env, int cur_el);
26
27
/**
28
* sve_vqm1_for_el:
29
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, ATA, 15, 1)
30
FIELD(TBFLAG_A64, TCMA, 16, 2)
31
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
32
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
33
+FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
34
35
/*
36
* Helpers for using the above.
37
diff --git a/target/arm/translate.h b/target/arm/translate.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/translate.h
40
+++ b/target/arm/translate.h
41
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
42
bool ns; /* Use non-secure CPREG bank on access */
43
int fp_excp_el; /* FP exception EL or 0 if enabled */
44
int sve_excp_el; /* SVE exception EL or 0 if enabled */
45
+ int sme_excp_el; /* SME exception EL or 0 if enabled */
46
int vl; /* current vector length in bytes */
47
bool vfp_enabled; /* FP enabled via FPSCR.EN */
48
int vec_len;
49
diff --git a/target/arm/helper.c b/target/arm/helper.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/helper.c
52
+++ b/target/arm/helper.c
53
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
54
return 0;
55
}
31
}
56
32
57
+/*
33
+/*
58
+ * Return the exception level to which exceptions should be taken for SME.
34
+ * Resolve tile.size[0] to a host pointer.
59
+ * C.f. the ARM pseudocode function CheckSMEAccess.
35
+ * Used by e.g. outer product insns where we require the entire tile.
60
+ */
36
+ */
61
+int sme_exception_el(CPUARMState *env, int el)
37
+static TCGv_ptr get_tile(DisasContext *s, int esz, int tile)
62
+{
38
+{
63
+#ifndef CONFIG_USER_ONLY
39
+ TCGv_ptr addr = tcg_temp_new_ptr();
64
+ if (el <= 1 && !el_is_in_host(env, el)) {
40
+ int offset;
65
+ switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
41
+
66
+ case 1:
42
+ offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray);
67
+ if (el != 0) {
43
+
68
+ break;
44
+ tcg_gen_addi_ptr(addr, cpu_env, offset);
69
+ }
45
+ return addr;
70
+ /* fall through */
46
+}
71
+ case 0:
47
+
72
+ case 2:
48
static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
73
+ return 1;
49
{
50
if (!dc_isar_feature(aa64_sme, s)) {
51
@@ -XXX,XX +XXX,XX @@ static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
52
return true;
53
}
54
55
- /* Sum XZR+zad to find ZAd. */
56
- za = get_tile_rowcol(s, esz, 31, a->zad, false);
57
+ za = get_tile(s, esz, a->zad);
58
zn = vec_full_reg_ptr(s, a->zn);
59
pn = pred_full_reg_ptr(s, a->pn);
60
pm = pred_full_reg_ptr(s, a->pm);
61
@@ -XXX,XX +XXX,XX @@ static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz,
62
return true;
63
}
64
65
- /* Sum XZR+zad to find ZAd. */
66
- za = get_tile_rowcol(s, esz, 31, a->zad, false);
67
+ za = get_tile(s, esz, a->zad);
68
zn = vec_full_reg_ptr(s, a->zn);
69
zm = vec_full_reg_ptr(s, a->zm);
70
pn = pred_full_reg_ptr(s, a->pn);
71
@@ -XXX,XX +XXX,XX @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
72
return true;
73
}
74
75
- /* Sum XZR+zad to find ZAd. */
76
- za = get_tile_rowcol(s, esz, 31, a->zad, false);
77
+ za = get_tile(s, esz, a->zad);
78
zn = vec_full_reg_ptr(s, a->zn);
79
zm = vec_full_reg_ptr(s, a->zm);
80
pn = pred_full_reg_ptr(s, a->pn);
81
diff --git a/tests/tcg/aarch64/sme-outprod1.c b/tests/tcg/aarch64/sme-outprod1.c
82
new file mode 100644
83
index XXXXXXX..XXXXXXX
84
--- /dev/null
85
+++ b/tests/tcg/aarch64/sme-outprod1.c
86
@@ -XXX,XX +XXX,XX @@
87
+/*
88
+ * SME outer product, 1 x 1.
89
+ * SPDX-License-Identifier: GPL-2.0-or-later
90
+ */
91
+
92
+#include <stdio.h>
93
+
94
+extern void foo(float *dst);
95
+
96
+asm(
97
+"    .arch_extension sme\n"
98
+"    .type foo, @function\n"
99
+"foo:\n"
100
+"    stp x29, x30, [sp, -80]!\n"
101
+"    mov x29, sp\n"
102
+"    stp d8, d9, [sp, 16]\n"
103
+"    stp d10, d11, [sp, 32]\n"
104
+"    stp d12, d13, [sp, 48]\n"
105
+"    stp d14, d15, [sp, 64]\n"
106
+"    smstart\n"
107
+"    ptrue p0.s, vl4\n"
108
+"    fmov z0.s, #1.0\n"
109
+/*
110
+ * An outer product of a vector of 1.0 by itself should be a matrix of 1.0.
111
+ * Note that we are using tile 1 here (za1.s) rather than tile 0.
112
+ */
113
+"    zero {za}\n"
114
+"    fmopa za1.s, p0/m, p0/m, z0.s, z0.s\n"
115
+/*
116
+ * Read the first 4x4 sub-matrix of elements from tile 1:
117
+ * Note that za1h should be interchangable here.
118
+ */
119
+"    mov w12, #0\n"
120
+"    mova z0.s, p0/m, za1v.s[w12, #0]\n"
121
+"    mova z1.s, p0/m, za1v.s[w12, #1]\n"
122
+"    mova z2.s, p0/m, za1v.s[w12, #2]\n"
123
+"    mova z3.s, p0/m, za1v.s[w12, #3]\n"
124
+/*
125
+ * And store them to the input pointer (dst in the C code):
126
+ */
127
+"    st1w {z0.s}, p0, [x0]\n"
128
+"    add x0, x0, #16\n"
129
+"    st1w {z1.s}, p0, [x0]\n"
130
+"    add x0, x0, #16\n"
131
+"    st1w {z2.s}, p0, [x0]\n"
132
+"    add x0, x0, #16\n"
133
+"    st1w {z3.s}, p0, [x0]\n"
134
+"    smstop\n"
135
+"    ldp d8, d9, [sp, 16]\n"
136
+"    ldp d10, d11, [sp, 32]\n"
137
+"    ldp d12, d13, [sp, 48]\n"
138
+"    ldp d14, d15, [sp, 64]\n"
139
+"    ldp x29, x30, [sp], 80\n"
140
+"    ret\n"
141
+"    .size foo, . - foo"
142
+);
143
+
144
+int main()
145
+{
146
+ float dst[16];
147
+ int i, j;
148
+
149
+ foo(dst);
150
+
151
+ for (i = 0; i < 16; i++) {
152
+ if (dst[i] != 1.0f) {
153
+ break;
74
+ }
154
+ }
75
+ }
155
+ }
76
+
156
+
77
+ if (el <= 2 && arm_is_el2_enabled(env)) {
157
+ if (i == 16) {
78
+ /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
158
+ return 0; /* success */
79
+ if (env->cp15.hcr_el2 & HCR_E2H) {
80
+ switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
81
+ case 1:
82
+ if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
83
+ break;
84
+ }
85
+ /* fall through */
86
+ case 0:
87
+ case 2:
88
+ return 2;
89
+ }
90
+ } else {
91
+ if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
92
+ return 2;
93
+ }
94
+ }
95
+ }
159
+ }
96
+
160
+
97
+ /* CPTR_EL3. Since ESM is negative we must check for EL3. */
161
+ /* failure */
98
+ if (arm_feature(env, ARM_FEATURE_EL3)
162
+ for (i = 0; i < 4; ++i) {
99
+ && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
163
+ for (j = 0; j < 4; ++j) {
100
+ return 3;
164
+ printf("%f ", (double)dst[i * 4 + j]);
165
+ }
166
+ printf("\n");
101
+ }
167
+ }
102
+#endif
168
+ return 1;
103
+ return 0;
104
+}
169
+}
170
diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tests/tcg/aarch64/Makefile.target
173
+++ b/tests/tcg/aarch64/Makefile.target
174
@@ -XXX,XX +XXX,XX @@ config-cc.mak: Makefile
175
     $(call cc-option,-march=armv8.5-a, CROSS_CC_HAS_ARMV8_5); \
176
     $(call cc-option,-mbranch-protection=standard, CROSS_CC_HAS_ARMV8_BTI); \
177
     $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE); \
178
-     $(call cc-option,-march=armv9-a+sme, CROSS_CC_HAS_ARMV9_SME)) 3> config-cc.mak
179
+     $(call cc-option,-Wa$(COMMA)-march=armv9-a+sme, CROSS_AS_HAS_ARMV9_SME)) 3> config-cc.mak
180
-include config-cc.mak
181
182
ifneq ($(CROSS_CC_HAS_ARMV8_2),)
183
@@ -XXX,XX +XXX,XX @@ AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4 mte-5 mte-6 mte-7
184
mte-%: CFLAGS += -march=armv8.5-a+memtag
185
endif
186
187
+# SME Tests
188
+ifneq ($(CROSS_AS_HAS_ARMV9_SME),)
189
+AARCH64_TESTS += sme-outprod1
190
+endif
105
+
191
+
106
/*
192
# System Registers Tests
107
* Given that SVE is enabled, return the vector length for EL.
193
AARCH64_TESTS += sysregs
108
*/
194
109
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
110
}
111
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
112
}
113
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
114
+ DP_TBFLAG_A64(flags, SMEEXC_EL, sme_exception_el(env, el));
115
+ }
116
117
sctlr = regime_sctlr(env, stage1);
118
119
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/translate-a64.c
122
+++ b/target/arm/translate-a64.c
123
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
124
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
125
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
126
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
127
+ dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
128
dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
129
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
130
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
131
--
195
--
132
2.25.1
196
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: John Högberg <john.hogberg@ericsson.com>
2
2
3
Pull the three sve_vq_* values into a structure.
3
Unlike architectures with precise self-modifying code semantics
4
This will be reused for SME.
4
(e.g. x86) ARM processors do not maintain coherency for instruction
5
execution and memory, requiring an instruction synchronization
6
barrier on every core that will execute the new code, and on many
7
models also the explicit use of cache management instructions.
5
8
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
While this is required to make JITs work on actual hardware, QEMU
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
has gotten away with not handling this since it does not emulate
8
Message-id: 20220620175235.60881-13-richard.henderson@linaro.org
11
caches, and unconditionally invalidates code whenever the softmmu
12
or the user-mode page protection logic detects that code has been
13
modified.
14
15
Unfortunately the latter does not work in the face of dual-mapped
16
code (a common W^X workaround), where one page is executable and
17
the other is writable: user-mode has no way to connect one with the
18
other as that is only known to the kernel and the emulated
19
application.
20
21
This commit works around the issue by telling software that
22
instruction cache invalidation is required by clearing the
23
CPR_EL0.DIC flag (regardless of whether the emulated processor
24
needs it), and then invalidating code in IC IVAU instructions.
25
26
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1034
27
28
Co-authored-by: Richard Henderson <richard.henderson@linaro.org>
29
Signed-off-by: John Högberg <john.hogberg@ericsson.com>
30
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
31
Message-id: 168778890374.24232.3402138851538068785-1@git.sr.ht
32
[PMM: removed unnecessary AArch64 feature check; moved
33
"clear CTR_EL1.DIC" code up a bit so it's not in the middle
34
of the vfp/neon related tests]
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
35
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
36
---
11
target/arm/cpu.h | 29 ++++++++++++++---------------
37
target/arm/cpu.c | 11 +++++++++++
12
target/arm/cpu64.c | 22 +++++++++++-----------
38
target/arm/helper.c | 47 ++++++++++++++++++++++++++++++++++++++++++---
13
target/arm/helper.c | 2 +-
39
2 files changed, 55 insertions(+), 3 deletions(-)
14
target/arm/kvm64.c | 2 +-
15
4 files changed, 27 insertions(+), 28 deletions(-)
16
40
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
41
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
18
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
43
--- a/target/arm/cpu.c
20
+++ b/target/arm/cpu.h
44
+++ b/target/arm/cpu.c
21
@@ -XXX,XX +XXX,XX @@ typedef enum ARMPSCIState {
45
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
22
23
typedef struct ARMISARegisters ARMISARegisters;
24
25
+/*
26
+ * In map, each set bit is a supported vector length of (bit-number + 1) * 16
27
+ * bytes, i.e. each bit number + 1 is the vector length in quadwords.
28
+ *
29
+ * While processing properties during initialization, corresponding init bits
30
+ * are set for bits in sve_vq_map that have been set by properties.
31
+ *
32
+ * Bits set in supported represent valid vector lengths for the CPU type.
33
+ */
34
+typedef struct {
35
+ uint32_t map, init, supported;
36
+} ARMVQMap;
37
+
38
/**
39
* ARMCPU:
40
* @env: #CPUARMState
41
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
42
uint32_t sve_default_vq;
43
#endif
44
45
- /*
46
- * In sve_vq_map each set bit is a supported vector length of
47
- * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
48
- * length in quadwords.
49
- *
50
- * While processing properties during initialization, corresponding
51
- * sve_vq_init bits are set for bits in sve_vq_map that have been
52
- * set by properties.
53
- *
54
- * Bits set in sve_vq_supported represent valid vector lengths for
55
- * the CPU type.
56
- */
57
- uint32_t sve_vq_map;
58
- uint32_t sve_vq_init;
59
- uint32_t sve_vq_supported;
60
+ ARMVQMap sve_vq;
61
62
/* Generic timer counter frequency, in Hz */
63
uint64_t gt_cntfrq_hz;
64
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/cpu64.c
67
+++ b/target/arm/cpu64.c
68
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
69
* any of the above. Finally, if SVE is not disabled, then at least one
70
* vector length must be enabled.
71
*/
72
- uint32_t vq_map = cpu->sve_vq_map;
73
- uint32_t vq_init = cpu->sve_vq_init;
74
+ uint32_t vq_map = cpu->sve_vq.map;
75
+ uint32_t vq_init = cpu->sve_vq.init;
76
uint32_t vq_supported;
77
uint32_t vq_mask = 0;
78
uint32_t tmp, vq, max_vq = 0;
79
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
80
*/
81
if (kvm_enabled()) {
82
if (kvm_arm_sve_supported()) {
83
- cpu->sve_vq_supported = kvm_arm_sve_get_vls(CPU(cpu));
84
- vq_supported = cpu->sve_vq_supported;
85
+ cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu));
86
+ vq_supported = cpu->sve_vq.supported;
87
} else {
88
assert(!cpu_isar_feature(aa64_sve, cpu));
89
vq_supported = 0;
90
}
91
} else {
92
- vq_supported = cpu->sve_vq_supported;
93
+ vq_supported = cpu->sve_vq.supported;
94
}
95
96
/*
97
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
98
99
/* From now on sve_max_vq is the actual maximum supported length. */
100
cpu->sve_max_vq = max_vq;
101
- cpu->sve_vq_map = vq_map;
102
+ cpu->sve_vq.map = vq_map;
103
}
104
105
static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
106
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
107
if (!cpu_isar_feature(aa64_sve, cpu)) {
108
value = false;
109
} else {
110
- value = extract32(cpu->sve_vq_map, vq - 1, 1);
111
+ value = extract32(cpu->sve_vq.map, vq - 1, 1);
112
}
113
visit_type_bool(v, name, &value, errp);
114
}
115
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
116
return;
46
return;
117
}
47
}
118
48
119
- cpu->sve_vq_map = deposit32(cpu->sve_vq_map, vq - 1, 1, value);
49
+#ifdef CONFIG_USER_ONLY
120
- cpu->sve_vq_init |= 1 << (vq - 1);
50
+ /*
121
+ cpu->sve_vq.map = deposit32(cpu->sve_vq.map, vq - 1, 1, value);
51
+ * User mode relies on IC IVAU instructions to catch modification of
122
+ cpu->sve_vq.init |= 1 << (vq - 1);
52
+ * dual-mapped code.
123
}
53
+ *
124
54
+ * Clear CTR_EL0.DIC to ensure that software that honors these flags uses
125
static bool cpu_arm_get_sve(Object *obj, Error **errp)
55
+ * IC IVAU even if the emulated processor does not normally require it.
126
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
56
+ */
127
cpu->dcz_blocksize = 7; /* 512 bytes */
57
+ cpu->ctr = FIELD_DP64(cpu->ctr, CTR_EL0, DIC, 0);
128
#endif
58
+#endif
129
59
+
130
- cpu->sve_vq_supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
60
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
131
+ cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
61
cpu->has_vfp != cpu->has_neon) {
132
62
/*
133
aarch64_add_pauth_properties(obj);
134
aarch64_add_sve_properties(obj);
135
@@ -XXX,XX +XXX,XX @@ static void aarch64_a64fx_initfn(Object *obj)
136
137
/* The A64FX supports only 128, 256 and 512 bit vector lengths */
138
aarch64_add_sve_properties(obj);
139
- cpu->sve_vq_supported = (1 << 0) /* 128bit */
140
+ cpu->sve_vq.supported = (1 << 0) /* 128bit */
141
| (1 << 1) /* 256bit */
142
| (1 << 3); /* 512bit */
143
144
diff --git a/target/arm/helper.c b/target/arm/helper.c
63
diff --git a/target/arm/helper.c b/target/arm/helper.c
145
index XXXXXXX..XXXXXXX 100644
64
index XXXXXXX..XXXXXXX 100644
146
--- a/target/arm/helper.c
65
--- a/target/arm/helper.c
147
+++ b/target/arm/helper.c
66
+++ b/target/arm/helper.c
148
@@ -XXX,XX +XXX,XX @@ uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
67
@@ -XXX,XX +XXX,XX @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
149
len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
150
}
68
}
151
152
- len = 31 - clz32(cpu->sve_vq_map & MAKE_64BIT_MASK(0, len + 1));
153
+ len = 31 - clz32(cpu->sve_vq.map & MAKE_64BIT_MASK(0, len + 1));
154
return len;
155
}
69
}
156
70
157
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
71
+#ifdef CONFIG_USER_ONLY
158
index XXXXXXX..XXXXXXX 100644
72
+/*
159
--- a/target/arm/kvm64.c
73
+ * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
160
+++ b/target/arm/kvm64.c
74
+ * code to get around W^X restrictions, where one region is writable and the
161
@@ -XXX,XX +XXX,XX @@ uint32_t kvm_arm_sve_get_vls(CPUState *cs)
75
+ * other is executable.
162
static int kvm_arm_sve_set_vls(CPUState *cs)
76
+ *
163
{
77
+ * Since the executable region is never written to we cannot detect code
164
ARMCPU *cpu = ARM_CPU(cs);
78
+ * changes when running in user mode, and rely on the emulated JIT telling us
165
- uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq_map };
79
+ * that the code has changed by executing this instruction.
166
+ uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
80
+ */
167
struct kvm_one_reg reg = {
81
+static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
168
.id = KVM_REG_ARM64_SVE_VLS,
82
+ uint64_t value)
169
.addr = (uint64_t)&vls[0],
83
+{
84
+ uint64_t icache_line_mask, start_address, end_address;
85
+ const ARMCPU *cpu;
86
+
87
+ cpu = env_archcpu(env);
88
+
89
+ icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
90
+ start_address = value & ~icache_line_mask;
91
+ end_address = value | icache_line_mask;
92
+
93
+ mmap_lock();
94
+
95
+ tb_invalidate_phys_range(start_address, end_address);
96
+
97
+ mmap_unlock();
98
+}
99
+#endif
100
+
101
static const ARMCPRegInfo v8_cp_reginfo[] = {
102
/*
103
* Minimal set of EL0-visible registers. This will need to be expanded
104
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
105
{ .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
106
.opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
107
.access = PL1_R, .type = ARM_CP_CURRENTEL },
108
- /* Cache ops: all NOPs since we don't emulate caches */
109
+ /*
110
+ * Instruction cache ops. All of these except `IC IVAU` NOP because we
111
+ * don't emulate caches.
112
+ */
113
{ .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
114
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
115
.access = PL1_W, .type = ARM_CP_NOP,
116
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
117
.accessfn = access_tocu },
118
{ .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
119
.opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
120
- .access = PL0_W, .type = ARM_CP_NOP,
121
+ .access = PL0_W,
122
.fgt = FGT_ICIVAU,
123
- .accessfn = access_tocu },
124
+ .accessfn = access_tocu,
125
+#ifdef CONFIG_USER_ONLY
126
+ .type = ARM_CP_NO_RAW,
127
+ .writefn = ic_ivau_write
128
+#else
129
+ .type = ARM_CP_NOP
130
+#endif
131
+ },
132
+ /* Cache ops: all NOPs since we don't emulate caches */
133
{ .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
134
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
135
.access = PL1_W, .accessfn = aa64_cacheop_poc_access,
170
--
136
--
171
2.25.1
137
2.34.1
138
139
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
From: Vikram Garhwal <vikram.garhwal@amd.com>
2
2
3
We need to fetch the name of the current accelerator in flexible error
3
Following are done to fix the coverity issues:
4
messages more going forward. Let's create a helper that gives it to us
4
1. Change read_data to fix the CID 1512899: Out-of-bounds access (OVERRUN)
5
without casting in the target code.
5
2. Fix match_rx_tx_data to fix CID 1512900: Logically dead code (DEADCODE)
6
3. Replace rand() in generate_random_data() with g_rand_int()
6
7
7
Signed-off-by: Alexander Graf <agraf@csgraf.de>
8
Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20230628202758.16398-1-vikram.garhwal@amd.com
9
Message-id: 20220620192242.70573-1-agraf@csgraf.de
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
---
12
include/qemu/accel.h | 1 +
13
tests/qtest/xlnx-canfd-test.c | 33 +++++++++++----------------------
13
accel/accel-common.c | 8 ++++++++
14
1 file changed, 11 insertions(+), 22 deletions(-)
14
softmmu/vl.c | 3 +--
15
3 files changed, 10 insertions(+), 2 deletions(-)
16
15
17
diff --git a/include/qemu/accel.h b/include/qemu/accel.h
16
diff --git a/tests/qtest/xlnx-canfd-test.c b/tests/qtest/xlnx-canfd-test.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/include/qemu/accel.h
18
--- a/tests/qtest/xlnx-canfd-test.c
20
+++ b/include/qemu/accel.h
19
+++ b/tests/qtest/xlnx-canfd-test.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct AccelClass {
20
@@ -XXX,XX +XXX,XX @@ static void generate_random_data(uint32_t *buf_tx, bool is_canfd_frame)
22
21
/* Generate random TX data for CANFD frame. */
23
AccelClass *accel_find(const char *opt_name);
22
if (is_canfd_frame) {
24
AccelState *current_accel(void);
23
for (int i = 0; i < CANFD_FRAME_SIZE - 2; i++) {
25
+const char *current_accel_name(void);
24
- buf_tx[2 + i] = rand();
26
25
+ buf_tx[2 + i] = g_random_int();
27
void accel_init_interfaces(AccelClass *ac);
26
}
28
27
} else {
29
diff --git a/accel/accel-common.c b/accel/accel-common.c
28
/* Generate random TX data for CAN frame. */
30
index XXXXXXX..XXXXXXX 100644
29
for (int i = 0; i < CAN_FRAME_SIZE - 2; i++) {
31
--- a/accel/accel-common.c
30
- buf_tx[2 + i] = rand();
32
+++ b/accel/accel-common.c
31
+ buf_tx[2 + i] = g_random_int();
33
@@ -XXX,XX +XXX,XX @@ AccelClass *accel_find(const char *opt_name)
32
}
34
return ac;
33
}
35
}
34
}
36
35
37
+/* Return the name of the current accelerator */
36
-static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx)
38
+const char *current_accel_name(void)
37
+static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx,
39
+{
38
+ uint32_t frame_size)
40
+ AccelClass *ac = ACCEL_GET_CLASS(current_accel());
41
+
42
+ return ac->name;
43
+}
44
+
45
static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
46
{
39
{
47
CPUClass *cc = CPU_CLASS(klass);
40
uint32_t int_status;
48
diff --git a/softmmu/vl.c b/softmmu/vl.c
41
uint32_t fifo_status_reg_value;
49
index XXXXXXX..XXXXXXX 100644
42
/* At which RX FIFO the received data is stored. */
50
--- a/softmmu/vl.c
43
uint8_t store_ind = 0;
51
+++ b/softmmu/vl.c
44
- bool is_canfd_frame = false;
52
@@ -XXX,XX +XXX,XX @@ static void configure_accelerators(const char *progname)
45
46
/* Read the interrupt on CANFD rx. */
47
int_status = qtest_readl(qts, can_base_addr + R_ISR_OFFSET) & ISR_RXOK;
48
@@ -XXX,XX +XXX,XX @@ static void read_data(QTestState *qts, uint64_t can_base_addr, uint32_t *buf_rx)
49
buf_rx[0] = qtest_readl(qts, can_base_addr + R_RX0_ID_OFFSET);
50
buf_rx[1] = qtest_readl(qts, can_base_addr + R_RX0_DLC_OFFSET);
51
52
- is_canfd_frame = (buf_rx[1] >> DLC_FD_BIT_SHIFT) & 1;
53
-
54
- if (is_canfd_frame) {
55
- for (int i = 0; i < CANFD_FRAME_SIZE - 2; i++) {
56
- buf_rx[i + 2] = qtest_readl(qts,
57
- can_base_addr + R_RX0_DATA1_OFFSET + 4 * i);
58
- }
59
- } else {
60
- buf_rx[2] = qtest_readl(qts, can_base_addr + R_RX0_DATA1_OFFSET);
61
- buf_rx[3] = qtest_readl(qts, can_base_addr + R_RX0_DATA2_OFFSET);
62
+ for (int i = 0; i < frame_size - 2; i++) {
63
+ buf_rx[i + 2] = qtest_readl(qts,
64
+ can_base_addr + R_RX0_DATA1_OFFSET + 4 * i);
53
}
65
}
54
66
55
if (init_failed && !qtest_chrdev) {
67
/* Clear the RX interrupt. */
56
- AccelClass *ac = ACCEL_GET_CLASS(current_accel());
68
@@ -XXX,XX +XXX,XX @@ static void match_rx_tx_data(const uint32_t *buf_tx, const uint32_t *buf_rx,
57
- error_report("falling back to %s", ac->name);
69
g_assert_cmpint((buf_rx[size] & DLC_FD_BIT_MASK), ==,
58
+ error_report("falling back to %s", current_accel_name());
70
(buf_tx[size] & DLC_FD_BIT_MASK));
59
}
71
} else {
60
72
- if (!is_canfd_frame && size == 4) {
61
if (icount_enabled() && !tcg_enabled()) {
73
- break;
74
- }
75
-
76
g_assert_cmpint(buf_rx[size], ==, buf_tx[size]);
77
}
78
79
@@ -XXX,XX +XXX,XX @@ static void test_can_data_transfer(void)
80
write_data(qts, CANFD0_BASE_ADDR, buf_tx, false);
81
82
send_data(qts, CANFD0_BASE_ADDR);
83
- read_data(qts, CANFD1_BASE_ADDR, buf_rx);
84
+ read_data(qts, CANFD1_BASE_ADDR, buf_rx, CAN_FRAME_SIZE);
85
match_rx_tx_data(buf_tx, buf_rx, false);
86
87
qtest_quit(qts);
88
@@ -XXX,XX +XXX,XX @@ static void test_canfd_data_transfer(void)
89
write_data(qts, CANFD0_BASE_ADDR, buf_tx, true);
90
91
send_data(qts, CANFD0_BASE_ADDR);
92
- read_data(qts, CANFD1_BASE_ADDR, buf_rx);
93
+ read_data(qts, CANFD1_BASE_ADDR, buf_rx, CANFD_FRAME_SIZE);
94
match_rx_tx_data(buf_tx, buf_rx, true);
95
96
qtest_quit(qts);
97
@@ -XXX,XX +XXX,XX @@ static void test_can_loopback(void)
98
write_data(qts, CANFD0_BASE_ADDR, buf_tx, true);
99
100
send_data(qts, CANFD0_BASE_ADDR);
101
- read_data(qts, CANFD0_BASE_ADDR, buf_rx);
102
+ read_data(qts, CANFD0_BASE_ADDR, buf_rx, CANFD_FRAME_SIZE);
103
match_rx_tx_data(buf_tx, buf_rx, true);
104
105
generate_random_data(buf_tx, true);
106
@@ -XXX,XX +XXX,XX @@ static void test_can_loopback(void)
107
write_data(qts, CANFD1_BASE_ADDR, buf_tx, true);
108
109
send_data(qts, CANFD1_BASE_ADDR);
110
- read_data(qts, CANFD1_BASE_ADDR, buf_rx);
111
+ read_data(qts, CANFD1_BASE_ADDR, buf_rx, CANFD_FRAME_SIZE);
112
match_rx_tx_data(buf_tx, buf_rx, true);
113
114
qtest_quit(qts);
62
--
115
--
63
2.25.1
116
2.34.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Fabiano Rosas <farosas@suse.de>
2
2
3
We will need these functions in translate-sme.c.
3
This code is only relevant when TCG is present in the build. Building
4
with --disable-tcg --enable-xen on an x86 host we get:
4
5
6
$ ../configure --target-list=x86_64-softmmu,aarch64-softmmu --disable-tcg --enable-xen
7
$ make -j$(nproc)
8
...
9
libqemu-aarch64-softmmu.fa.p/target_arm_gdbstub.c.o: in function `m_sysreg_ptr':
10
../target/arm/gdbstub.c:358: undefined reference to `arm_v7m_get_sp_ptr'
11
../target/arm/gdbstub.c:361: undefined reference to `arm_v7m_get_sp_ptr'
12
13
libqemu-aarch64-softmmu.fa.p/target_arm_gdbstub.c.o: in function `arm_gdb_get_m_systemreg':
14
../target/arm/gdbstub.c:405: undefined reference to `arm_v7m_mrs_control'
15
16
Signed-off-by: Fabiano Rosas <farosas@suse.de>
17
Message-id: 20230628164821.16771-1-farosas@suse.de
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220620175235.60881-21-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
20
---
10
target/arm/translate-a64.h | 38 ++++++++++++++++++++++++++++++++++++++
21
target/arm/gdbstub.c | 4 ++++
11
target/arm/translate-sve.c | 36 ------------------------------------
22
1 file changed, 4 insertions(+)
12
2 files changed, 38 insertions(+), 36 deletions(-)
13
23
14
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
24
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
15
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/translate-a64.h
26
--- a/target/arm/gdbstub.c
17
+++ b/target/arm/translate-a64.h
27
+++ b/target/arm/gdbstub.c
18
@@ -XXX,XX +XXX,XX @@ static inline int vec_full_reg_size(DisasContext *s)
28
@@ -XXX,XX +XXX,XX @@ static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
19
return s->vl;
29
return cpu->dyn_sysreg_xml.num;
20
}
30
}
21
31
22
+/*
32
+#ifdef CONFIG_TCG
23
+ * Return the offset info CPUARMState of the predicate vector register Pn.
33
typedef enum {
24
+ * Note for this purpose, FFR is P16.
34
M_SYSREG_MSP,
25
+ */
35
M_SYSREG_PSP,
26
+static inline int pred_full_reg_offset(DisasContext *s, int regno)
36
@@ -XXX,XX +XXX,XX @@ static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg)
27
+{
37
return cpu->dyn_m_secextreg_xml.num;
28
+ return offsetof(CPUARMState, vfp.pregs[regno]);
38
}
29
+}
39
#endif
30
+
40
+#endif /* CONFIG_TCG */
31
+/* Return the byte size of the whole predicate register, VL / 64. */
41
32
+static inline int pred_full_reg_size(DisasContext *s)
42
const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
33
+{
43
{
34
+ return s->vl >> 3;
44
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
35
+}
45
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
36
+
46
"system-registers.xml", 0);
37
+/*
47
38
+ * Round up the size of a register to a size allowed by
48
+#ifdef CONFIG_TCG
39
+ * the tcg vector infrastructure. Any operation which uses this
49
if (arm_feature(env, ARM_FEATURE_M) && tcg_enabled()) {
40
+ * size may assume that the bits above pred_full_reg_size are zero,
50
gdb_register_coprocessor(cs,
41
+ * and must leave them the same way.
51
arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg,
42
+ *
52
@@ -XXX,XX +XXX,XX @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
43
+ * Note that this is not needed for the vector registers as they
53
}
44
+ * are always properly sized for tcg vectors.
54
#endif
45
+ */
55
}
46
+static inline int size_for_gvec(int size)
56
+#endif /* CONFIG_TCG */
47
+{
57
}
48
+ if (size <= 8) {
49
+ return 8;
50
+ } else {
51
+ return QEMU_ALIGN_UP(size, 16);
52
+ }
53
+}
54
+
55
+static inline int pred_gvec_reg_size(DisasContext *s)
56
+{
57
+ return size_for_gvec(pred_full_reg_size(s));
58
+}
59
+
60
bool disas_sve(DisasContext *, uint32_t);
61
62
void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
63
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/translate-sve.c
66
+++ b/target/arm/translate-sve.c
67
@@ -XXX,XX +XXX,XX @@ static inline int msz_dtype(DisasContext *s, int msz)
68
* Implement all of the translator functions referenced by the decoder.
69
*/
70
71
-/* Return the offset info CPUARMState of the predicate vector register Pn.
72
- * Note for this purpose, FFR is P16.
73
- */
74
-static inline int pred_full_reg_offset(DisasContext *s, int regno)
75
-{
76
- return offsetof(CPUARMState, vfp.pregs[regno]);
77
-}
78
-
79
-/* Return the byte size of the whole predicate register, VL / 64. */
80
-static inline int pred_full_reg_size(DisasContext *s)
81
-{
82
- return s->vl >> 3;
83
-}
84
-
85
-/* Round up the size of a register to a size allowed by
86
- * the tcg vector infrastructure. Any operation which uses this
87
- * size may assume that the bits above pred_full_reg_size are zero,
88
- * and must leave them the same way.
89
- *
90
- * Note that this is not needed for the vector registers as they
91
- * are always properly sized for tcg vectors.
92
- */
93
-static int size_for_gvec(int size)
94
-{
95
- if (size <= 8) {
96
- return 8;
97
- } else {
98
- return QEMU_ALIGN_UP(size, 16);
99
- }
100
-}
101
-
102
-static int pred_gvec_reg_size(DisasContext *s)
103
-{
104
- return size_for_gvec(pred_full_reg_size(s));
105
-}
106
-
107
/* Invoke an out-of-line helper on 2 Zregs. */
108
static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
109
int rd, int rn, int data)
110
--
58
--
111
2.25.1
59
2.34.1
diff view generated by jsdifflib
1
From: Martin Liška <mliska@suse.cz>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
Fixes the following Sphinx warning (treated as error) starting
3
AwSRAMCClass is larger than SysBusDeviceClass so the class size must be
4
with 5.0 release:
4
advertised accordingly.
5
5
6
Warning, treated as error:
6
Fixes: 05def917e1 ("hw: arm: allwinner-sramc: Add SRAM Controller support for R40")
7
Invalid configuration value found: 'language = None'. Update your configuration to a valid langauge code. Falling back to 'en' (English).
7
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
8
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Martin Liska <mliska@suse.cz>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: e91e51ee-48ac-437e-6467-98b56ee40042@suse.cz
10
Message-id: 20230628110905.38125-1-akihiko.odaki@daynix.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
docs/conf.py | 2 +-
13
hw/misc/allwinner-sramc.c | 1 +
15
1 file changed, 1 insertion(+), 1 deletion(-)
14
1 file changed, 1 insertion(+)
16
15
17
diff --git a/docs/conf.py b/docs/conf.py
16
diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/docs/conf.py
18
--- a/hw/misc/allwinner-sramc.c
20
+++ b/docs/conf.py
19
+++ b/hw/misc/allwinner-sramc.c
21
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static const TypeInfo allwinner_sramc_info = {
22
#
21
.parent = TYPE_SYS_BUS_DEVICE,
23
# This is also used if you do content translation via gettext catalogs.
22
.instance_init = allwinner_sramc_init,
24
# Usually you set "language" from the command line for these cases.
23
.instance_size = sizeof(AwSRAMCState),
25
-language = None
24
+ .class_size = sizeof(AwSRAMCClass),
26
+language = 'en'
25
.class_init = allwinner_sramc_class_init,
27
26
};
28
# List of patterns, relative to source directory, that match files and
27
29
# directories to ignore when looking for source files.
30
--
28
--
31
2.25.1
29
2.34.1
32
30
33
31
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
In handle_interrupt() we use level as an index into the interrupt_vector[]
2
array. This is safe because we have checked it against env->config->nlevel,
3
but Coverity can't see that (and it is only true because each CPU config
4
sets its XCHAL_NUM_INTLEVELS to something less than MAX_NLEVELS), so it
5
complains about a possible array overrun (CID 1507131)
2
6
3
We need SVL separate from VL for RDSVL et al, as well as
7
Add an assert() which will make Coverity happy and catch the unlikely
4
ZA storage loads and stores, which do not require PSTATE.SM.
8
case of a mis-set XCHAL_NUM_INTLEVELS in future.
5
9
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-20-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
12
Message-id: 20230623154135.1930261-1-peter.maydell@linaro.org
10
---
13
---
11
target/arm/cpu.h | 12 ++++++++++++
14
target/xtensa/exc_helper.c | 3 +++
12
target/arm/translate.h | 1 +
15
1 file changed, 3 insertions(+)
13
target/arm/helper.c | 8 +++++++-
14
target/arm/translate-a64.c | 1 +
15
4 files changed, 21 insertions(+), 1 deletion(-)
16
16
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
19
--- a/target/xtensa/exc_helper.c
20
+++ b/target/arm/cpu.h
20
+++ b/target/xtensa/exc_helper.c
21
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
21
@@ -XXX,XX +XXX,XX @@ static void handle_interrupt(CPUXtensaState *env)
22
FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
22
CPUState *cs = env_cpu(env);
23
FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
23
24
FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
24
if (level > 1) {
25
+FIELD(TBFLAG_A64, SVL, 24, 4)
25
+ /* env->config->nlevel check should have ensured this */
26
26
+ assert(level < sizeof(env->config->interrupt_vector));
27
/*
28
* Helpers for using the above.
29
@@ -XXX,XX +XXX,XX @@ static inline int sve_vq(CPUARMState *env)
30
return EX_TBFLAG_A64(env->hflags, VL) + 1;
31
}
32
33
+/**
34
+ * sme_vq
35
+ * @env: the cpu context
36
+ *
37
+ * Return the SVL cached within env->hflags, in units of quadwords.
38
+ */
39
+static inline int sme_vq(CPUARMState *env)
40
+{
41
+ return EX_TBFLAG_A64(env->hflags, SVL) + 1;
42
+}
43
+
27
+
44
static inline bool bswap_code(bool sctlr_b)
28
env->sregs[EPC1 + level - 1] = env->pc;
45
{
29
env->sregs[EPS2 + level - 2] = env->sregs[PS];
46
#ifdef CONFIG_USER_ONLY
30
env->sregs[PS] =
47
diff --git a/target/arm/translate.h b/target/arm/translate.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/translate.h
50
+++ b/target/arm/translate.h
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
52
int sve_excp_el; /* SVE exception EL or 0 if enabled */
53
int sme_excp_el; /* SME exception EL or 0 if enabled */
54
int vl; /* current vector length in bytes */
55
+ int svl; /* current streaming vector length in bytes */
56
bool vfp_enabled; /* FP enabled via FPSCR.EN */
57
int vec_len;
58
int vec_stride;
59
diff --git a/target/arm/helper.c b/target/arm/helper.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/arm/helper.c
62
+++ b/target/arm/helper.c
63
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
64
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
65
}
66
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
67
- DP_TBFLAG_A64(flags, SMEEXC_EL, sme_exception_el(env, el));
68
+ int sme_el = sme_exception_el(env, el);
69
+
70
+ DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
71
+ if (sme_el == 0) {
72
+ /* Similarly, do not compute SVL if SME is disabled. */
73
+ DP_TBFLAG_A64(flags, SVL, sve_vqm1_for_el_sm(env, el, true));
74
+ }
75
if (FIELD_EX64(env->svcr, SVCR, SM)) {
76
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
77
}
78
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/target/arm/translate-a64.c
81
+++ b/target/arm/translate-a64.c
82
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
83
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
84
dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
85
dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
86
+ dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
87
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
88
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
89
dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
90
--
31
--
91
2.25.1
32
2.34.1
diff view generated by jsdifflib
1
From: Alexander Graf <agraf@csgraf.de>
1
We already squash the ID register field for FEAT_SPE (the Statistical
2
Profiling Extension) because TCG does not implement it and if we
3
advertise it to the guest the guest will crash trying to look at
4
non-existent system registers. Do the same for some other features
5
which a real hardware Neoverse-V1 implements but which TCG doesn't:
6
* FEAT_TRF (Self-hosted Trace Extension)
7
* Trace Macrocell system register access
8
* Memory mapped trace
9
* FEAT_AMU (Activity Monitors Extension)
10
* FEAT_MPAM (Memory Partitioning and Monitoring Extension)
11
* FEAT_NV (Nested Virtualization)
2
12
3
Some features such as running in EL3 or running M profile code are
13
Most of these, like FEAT_SPE, are "introspection/trace" type features
4
incompatible with virtualization as QEMU implements it today. To prevent
14
which QEMU is unlikely to ever implement. The odd-one-out here is
5
users from picking invalid configurations on other virt solutions like
15
FEAT_NV -- we could implement that and at some point we probably
6
Hvf, let's run the same checks there too.
16
will.
7
17
8
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1073
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Alexander Graf <agraf@csgraf.de>
19
Message-id: 20230704130647.2842917-2-peter.maydell@linaro.org
20
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220620192242.70573-2-agraf@csgraf.de
12
[PMM: Allow qtest accelerator too; tweak comment]
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
22
---
15
target/arm/cpu.c | 16 ++++++++++++----
23
target/arm/cpu.c | 33 +++++++++++++++++++++++++++++----
16
1 file changed, 12 insertions(+), 4 deletions(-)
24
1 file changed, 29 insertions(+), 4 deletions(-)
17
25
18
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
26
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
19
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/cpu.c
28
--- a/target/arm/cpu.c
21
+++ b/target/arm/cpu.c
29
+++ b/target/arm/cpu.c
22
@@ -XXX,XX +XXX,XX @@
23
#include "hw/boards.h"
24
#endif
25
#include "sysemu/tcg.h"
26
+#include "sysemu/qtest.h"
27
#include "sysemu/hw_accel.h"
28
#include "kvm_arm.h"
29
#include "disas/capstone.h"
30
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
30
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
31
}
31
32
if (tcg_enabled()) {
33
/*
34
- * Don't report the Statistical Profiling Extension in the ID
35
- * registers, because TCG doesn't implement it yet (not even a
36
- * minimal stub version) and guests will fall over when they
37
- * try to access the non-existent system registers for it.
38
+ * Don't report some architectural features in the ID registers
39
+ * where TCG does not yet implement it (not even a minimal
40
+ * stub version). This avoids guests falling over when they
41
+ * try to access the non-existent system registers for them.
42
*/
43
+ /* FEAT_SPE (Statistical Profiling Extension) */
44
cpu->isar.id_aa64dfr0 =
45
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
46
+ /* FEAT_TRF (Self-hosted Trace Extension) */
47
+ cpu->isar.id_aa64dfr0 =
48
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
49
+ cpu->isar.id_dfr0 =
50
+ FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
51
+ /* Trace Macrocell system register access */
52
+ cpu->isar.id_aa64dfr0 =
53
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
54
+ cpu->isar.id_dfr0 =
55
+ FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
56
+ /* Memory mapped trace */
57
+ cpu->isar.id_dfr0 =
58
+ FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
59
+ /* FEAT_AMU (Activity Monitors Extension) */
60
+ cpu->isar.id_aa64pfr0 =
61
+ FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
62
+ cpu->isar.id_pfr0 =
63
+ FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
64
+ /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
65
+ cpu->isar.id_aa64pfr0 =
66
+ FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
67
+ /* FEAT_NV (Nested Virtualization) */
68
+ cpu->isar.id_aa64mmfr2 =
69
+ FIELD_DP64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV, 0);
32
}
70
}
33
71
34
- if (kvm_enabled()) {
72
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
35
+ if (!tcg_enabled() && !qtest_enabled()) {
36
/*
37
+ * We assume that no accelerator except TCG (and the "not really an
38
+ * accelerator" qtest) can handle these features, because Arm hardware
39
+ * virtualization can't virtualize them.
40
+ *
41
* Catch all the cases which might cause us to create more than one
42
* address space for the CPU (otherwise we will assert() later in
43
* cpu_address_space_init()).
44
*/
45
if (arm_feature(env, ARM_FEATURE_M)) {
46
error_setg(errp,
47
- "Cannot enable KVM when using an M-profile guest CPU");
48
+ "Cannot enable %s when using an M-profile guest CPU",
49
+ current_accel_name());
50
return;
51
}
52
if (cpu->has_el3) {
53
error_setg(errp,
54
- "Cannot enable KVM when guest CPU has EL3 enabled");
55
+ "Cannot enable %s when guest CPU has EL3 enabled",
56
+ current_accel_name());
57
return;
58
}
59
if (cpu->tag_memory) {
60
error_setg(errp,
61
- "Cannot enable KVM when guest CPUs has MTE enabled");
62
+ "Cannot enable %s when guest CPUs has MTE enabled",
63
+ current_accel_name());
64
return;
65
}
66
}
67
--
73
--
68
2.25.1
74
2.34.1
75
76
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This will be used for raising various traps for SME.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220620175235.60881-4-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/syndrome.h | 14 ++++++++++++++
11
1 file changed, 14 insertions(+)
12
13
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/arm/syndrome.h
16
+++ b/target/arm/syndrome.h
17
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
18
EC_AA64_SMC = 0x17,
19
EC_SYSTEMREGISTERTRAP = 0x18,
20
EC_SVEACCESSTRAP = 0x19,
21
+ EC_SMETRAP = 0x1d,
22
EC_INSNABORT = 0x20,
23
EC_INSNABORT_SAME_EL = 0x21,
24
EC_PCALIGNMENT = 0x22,
25
@@ -XXX,XX +XXX,XX @@ enum arm_exception_class {
26
EC_AA64_BKPT = 0x3c,
27
};
28
29
+typedef enum {
30
+ SME_ET_AccessTrap,
31
+ SME_ET_Streaming,
32
+ SME_ET_NotStreaming,
33
+ SME_ET_InactiveZA,
34
+} SMEExceptionType;
35
+
36
#define ARM_EL_EC_SHIFT 26
37
#define ARM_EL_IL_SHIFT 25
38
#define ARM_EL_ISV_SHIFT 24
39
@@ -XXX,XX +XXX,XX @@ static inline uint32_t syn_sve_access_trap(void)
40
return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
41
}
42
43
+static inline uint32_t syn_smetrap(SMEExceptionType etype, bool is_16bit)
44
+{
45
+ return (EC_SMETRAP << ARM_EL_EC_SHIFT)
46
+ | (is_16bit ? 0 : ARM_EL_IL) | etype;
47
+}
48
+
49
static inline uint32_t syn_pactrap(void)
50
{
51
return EC_PACTRAP << ARM_EL_EC_SHIFT;
52
--
53
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This will be used for controlling access to SME cpregs.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220620175235.60881-5-richard.henderson@linaro.org
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
target/arm/cpregs.h | 5 +++++
11
target/arm/translate-a64.c | 18 ++++++++++++++++++
12
2 files changed, 23 insertions(+)
13
14
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpregs.h
17
+++ b/target/arm/cpregs.h
18
@@ -XXX,XX +XXX,XX @@ enum {
19
ARM_CP_EL3_NO_EL2_UNDEF = 1 << 16,
20
ARM_CP_EL3_NO_EL2_KEEP = 1 << 17,
21
ARM_CP_EL3_NO_EL2_C_NZ = 1 << 18,
22
+ /*
23
+ * Flag: Access check for this sysreg is constrained by the
24
+ * ARM pseudocode function CheckSMEAccess().
25
+ */
26
+ ARM_CP_SME = 1 << 19,
27
};
28
29
/*
30
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate-a64.c
33
+++ b/target/arm/translate-a64.c
34
@@ -XXX,XX +XXX,XX @@ bool sve_access_check(DisasContext *s)
35
return fp_access_check(s);
36
}
37
38
+/*
39
+ * Check that SME access is enabled, raise an exception if not.
40
+ * Note that this function corresponds to CheckSMEAccess and is
41
+ * only used directly for cpregs.
42
+ */
43
+static bool sme_access_check(DisasContext *s)
44
+{
45
+ if (s->sme_excp_el) {
46
+ gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF,
47
+ syn_smetrap(SME_ET_AccessTrap, false),
48
+ s->sme_excp_el);
49
+ return false;
50
+ }
51
+ return true;
52
+}
53
+
54
/*
55
* This utility function is for doing register extension with an
56
* optional shift. You will likely want to pass a temporary for the
57
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
58
return;
59
} else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
60
return;
61
+ } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
62
+ return;
63
}
64
65
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
66
--
67
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
This cpreg is used to access two new bits of PSTATE
4
that are not visible via any other mechanism.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-6-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 6 ++++++
12
target/arm/helper.c | 13 +++++++++++++
13
2 files changed, 19 insertions(+)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
20
* nRW (also known as M[4]) is kept, inverted, in env->aarch64
21
* DAIF (exception masks) are kept in env->daif
22
* BTYPE is kept in env->btype
23
+ * SM and ZA are kept in env->svcr
24
* all other bits are stored in their correct places in env->pstate
25
*/
26
uint32_t pstate;
27
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
28
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
29
uint32_t btype; /* BTI branch type. spsr[11:10]. */
30
uint64_t daif; /* exception masks, in the bits they are in PSTATE */
31
+ uint64_t svcr; /* PSTATE.{SM,ZA} in the bits they are in SVCR */
32
33
uint64_t elr_el[4]; /* AArch64 exception link regs */
34
uint64_t sp_el[4]; /* AArch64 banked stack pointers */
35
@@ -XXX,XX +XXX,XX @@ FIELD(CPTR_EL3, TCPAC, 31, 1)
36
#define PSTATE_MODE_EL1t 4
37
#define PSTATE_MODE_EL0t 0
38
39
+/* PSTATE bits that are accessed via SVCR and not stored in SPSR_ELx. */
40
+FIELD(SVCR, SM, 0, 1)
41
+FIELD(SVCR, ZA, 1, 1)
42
+
43
/* Write a new value to v7m.exception, thus transitioning into or out
44
* of Handler mode; this may result in a change of active stack pointer.
45
*/
46
diff --git a/target/arm/helper.c b/target/arm/helper.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/arm/helper.c
49
+++ b/target/arm/helper.c
50
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
51
return CP_ACCESS_OK;
52
}
53
54
+static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
55
+ uint64_t value)
56
+{
57
+ value &= R_SVCR_SM_MASK | R_SVCR_ZA_MASK;
58
+ /* TODO: Side effects. */
59
+ env->svcr = value;
60
+}
61
+
62
static const ARMCPRegInfo sme_reginfo[] = {
63
{ .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
64
.opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
65
.access = PL0_RW, .accessfn = access_tpidr2,
66
.fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
67
+ { .name = "SVCR", .state = ARM_CP_STATE_AA64,
68
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
69
+ .access = PL0_RW, .type = ARM_CP_SME,
70
+ .fieldoffset = offsetof(CPUARMState, svcr),
71
+ .writefn = svcr_write, .raw_writefn = raw_write },
72
};
73
#endif /* TARGET_AARCH64 */
74
75
--
76
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
These cpregs control the streaming vector length and whether the
4
full a64 instruction set is allowed while in streaming mode.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-7-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 8 ++++++--
12
target/arm/helper.c | 41 +++++++++++++++++++++++++++++++++++++++++
13
2 files changed, 47 insertions(+), 2 deletions(-)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
20
float_status standard_fp_status;
21
float_status standard_fp_status_f16;
22
23
- /* ZCR_EL[1-3] */
24
- uint64_t zcr_el[4];
25
+ uint64_t zcr_el[4]; /* ZCR_EL[1-3] */
26
+ uint64_t smcr_el[4]; /* SMCR_EL[1-3] */
27
} vfp;
28
uint64_t exclusive_addr;
29
uint64_t exclusive_val;
30
@@ -XXX,XX +XXX,XX @@ FIELD(CPTR_EL3, TCPAC, 31, 1)
31
FIELD(SVCR, SM, 0, 1)
32
FIELD(SVCR, ZA, 1, 1)
33
34
+/* Fields for SMCR_ELx. */
35
+FIELD(SMCR, LEN, 0, 4)
36
+FIELD(SMCR, FA64, 31, 1)
37
+
38
/* Write a new value to v7m.exception, thus transitioning into or out
39
* of Handler mode; this may result in a change of active stack pointer.
40
*/
41
diff --git a/target/arm/helper.c b/target/arm/helper.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/helper.c
44
+++ b/target/arm/helper.c
45
@@ -XXX,XX +XXX,XX @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
46
*/
47
{ K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
48
"ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
49
+ { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
50
+ "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
51
52
{ K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
53
"TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
54
@@ -XXX,XX +XXX,XX @@ static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
55
env->svcr = value;
56
}
57
58
+static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
59
+ uint64_t value)
60
+{
61
+ int cur_el = arm_current_el(env);
62
+ int old_len = sve_vqm1_for_el(env, cur_el);
63
+ int new_len;
64
+
65
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
66
+ value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
67
+ raw_write(env, ri, value);
68
+
69
+ /*
70
+ * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
71
+ * when SVL is widened (old values kept, or zeros). Choose to keep the
72
+ * current values for simplicity. But for QEMU internals, we must still
73
+ * apply the narrower SVL to the Zregs and Pregs -- see the comment
74
+ * above aarch64_sve_narrow_vq.
75
+ */
76
+ new_len = sve_vqm1_for_el(env, cur_el);
77
+ if (new_len < old_len) {
78
+ aarch64_sve_narrow_vq(env, new_len + 1);
79
+ }
80
+}
81
+
82
static const ARMCPRegInfo sme_reginfo[] = {
83
{ .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
84
.opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
85
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo sme_reginfo[] = {
86
.access = PL0_RW, .type = ARM_CP_SME,
87
.fieldoffset = offsetof(CPUARMState, svcr),
88
.writefn = svcr_write, .raw_writefn = raw_write },
89
+ { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
90
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
91
+ .access = PL1_RW, .type = ARM_CP_SME,
92
+ .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
93
+ .writefn = smcr_write, .raw_writefn = raw_write },
94
+ { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
95
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
96
+ .access = PL2_RW, .type = ARM_CP_SME,
97
+ .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
98
+ .writefn = smcr_write, .raw_writefn = raw_write },
99
+ { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
100
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
101
+ .access = PL3_RW, .type = ARM_CP_SME,
102
+ .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
103
+ .writefn = smcr_write, .raw_writefn = raw_write },
104
};
105
#endif /* TARGET_AARCH64 */
106
107
--
108
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Implement the streaming mode identification register, and the
4
two streaming priority registers. For QEMU, they are all RES0.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-8-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 33 +++++++++++++++++++++++++++++++++
12
1 file changed, 33 insertions(+)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
19
return CP_ACCESS_OK;
20
}
21
22
+static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
23
+ bool isread)
24
+{
25
+ /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */
26
+ if (arm_current_el(env) < 3
27
+ && arm_feature(env, ARM_FEATURE_EL3)
28
+ && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
29
+ return CP_ACCESS_TRAP_EL3;
30
+ }
31
+ return CP_ACCESS_OK;
32
+}
33
+
34
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
35
uint64_t value)
36
{
37
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo sme_reginfo[] = {
38
.access = PL3_RW, .type = ARM_CP_SME,
39
.fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
40
.writefn = smcr_write, .raw_writefn = raw_write },
41
+ { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
42
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
43
+ .access = PL1_R, .accessfn = access_aa64_tid1,
44
+ /*
45
+ * IMPLEMENTOR = 0 (software)
46
+ * REVISION = 0 (implementation defined)
47
+ * SMPS = 0 (no streaming execution priority in QEMU)
48
+ * AFFINITY = 0 (streaming sve mode not shared with other PEs)
49
+ */
50
+ .type = ARM_CP_CONST, .resetvalue = 0, },
51
+ /*
52
+ * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
53
+ */
54
+ { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
55
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
56
+ .access = PL1_RW, .accessfn = access_esm,
57
+ .type = ARM_CP_CONST, .resetvalue = 0 },
58
+ { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
59
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
60
+ .access = PL2_RW, .accessfn = access_esm,
61
+ .type = ARM_CP_CONST, .resetvalue = 0 },
62
};
63
#endif /* TARGET_AARCH64 */
64
65
--
66
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
These are required to determine if various insns
4
are allowed to issue.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-9-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 2 ++
12
target/arm/translate.h | 4 ++++
13
target/arm/helper.c | 4 ++++
14
target/arm/translate-a64.c | 2 ++
15
4 files changed, 12 insertions(+)
16
17
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu.h
20
+++ b/target/arm/cpu.h
21
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_A64, TCMA, 16, 2)
22
FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
23
FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
24
FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2)
25
+FIELD(TBFLAG_A64, PSTATE_SM, 22, 1)
26
+FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1)
27
28
/*
29
* Helpers for using the above.
30
diff --git a/target/arm/translate.h b/target/arm/translate.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/translate.h
33
+++ b/target/arm/translate.h
34
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
35
bool align_mem;
36
/* True if PSTATE.IL is set */
37
bool pstate_il;
38
+ /* True if PSTATE.SM is set. */
39
+ bool pstate_sm;
40
+ /* True if PSTATE.ZA is set. */
41
+ bool pstate_za;
42
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
43
bool mve_no_pred;
44
/*
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
50
}
51
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
52
DP_TBFLAG_A64(flags, SMEEXC_EL, sme_exception_el(env, el));
53
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
54
+ DP_TBFLAG_A64(flags, PSTATE_SM, 1);
55
+ }
56
+ DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
57
}
58
59
sctlr = regime_sctlr(env, stage1);
60
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/translate-a64.c
63
+++ b/target/arm/translate-a64.c
64
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
65
dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
66
dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
67
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
68
+ dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
69
+ dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
70
dc->vec_len = 0;
71
dc->vec_stride = 0;
72
dc->cp_regs = arm_cpu->cp_regs;
73
--
74
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Place this late in the resettable section of the structure,
4
to keep the most common element offsets from being > 64k.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-10-richard.henderson@linaro.org
9
[PMM: expanded comment on zarray[] format]
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/cpu.h | 22 ++++++++++++++++++++++
13
target/arm/machine.c | 34 ++++++++++++++++++++++++++++++++++
14
2 files changed, 56 insertions(+)
15
16
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/cpu.h
19
+++ b/target/arm/cpu.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState {
21
} keys;
22
23
uint64_t scxtnum_el[4];
24
+
25
+ /*
26
+ * SME ZA storage -- 256 x 256 byte array, with bytes in host word order,
27
+ * as we do with vfp.zregs[]. This corresponds to the architectural ZA
28
+ * array, where ZA[N] is in the least-significant bytes of env->zarray[N].
29
+ * When SVL is less than the architectural maximum, the accessible
30
+ * storage is restricted, such that if the SVL is X bytes the guest can
31
+ * see only the bottom X elements of zarray[], and only the least
32
+ * significant X bytes of each element of the array. (In other words,
33
+ * the observable part is always square.)
34
+ *
35
+ * The ZA storage can also be considered as a set of square tiles of
36
+ * elements of different sizes. The mapping from tiles to the ZA array
37
+ * is architecturally defined, such that for tiles of elements of esz
38
+ * bytes, the Nth row (or "horizontal slice") of tile T is in
39
+ * ZA[T + N * esz]. Note that this means that each tile is not contiguous
40
+ * in the ZA storage, because its rows are striped through the ZA array.
41
+ *
42
+ * Because this is so large, keep this toward the end of the reset area,
43
+ * to keep the offsets into the rest of the structure smaller.
44
+ */
45
+ ARMVectorReg zarray[ARM_MAX_VQ * 16];
46
#endif
47
48
#if defined(CONFIG_USER_ONLY)
49
diff --git a/target/arm/machine.c b/target/arm/machine.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/machine.c
52
+++ b/target/arm/machine.c
53
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sve = {
54
VMSTATE_END_OF_LIST()
55
}
56
};
57
+
58
+static const VMStateDescription vmstate_vreg = {
59
+ .name = "vreg",
60
+ .version_id = 1,
61
+ .minimum_version_id = 1,
62
+ .fields = (VMStateField[]) {
63
+ VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2),
64
+ VMSTATE_END_OF_LIST()
65
+ }
66
+};
67
+
68
+static bool za_needed(void *opaque)
69
+{
70
+ ARMCPU *cpu = opaque;
71
+
72
+ /*
73
+ * When ZA storage is disabled, its contents are discarded.
74
+ * It will be zeroed when ZA storage is re-enabled.
75
+ */
76
+ return FIELD_EX64(cpu->env.svcr, SVCR, ZA);
77
+}
78
+
79
+static const VMStateDescription vmstate_za = {
80
+ .name = "cpu/sme",
81
+ .version_id = 1,
82
+ .minimum_version_id = 1,
83
+ .needed = za_needed,
84
+ .fields = (VMStateField[]) {
85
+ VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
86
+ vmstate_vreg, ARMVectorReg),
87
+ VMSTATE_END_OF_LIST()
88
+ }
89
+};
90
#endif /* AARCH64 */
91
92
static bool serror_needed(void *opaque)
93
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_arm_cpu = {
94
&vmstate_m_security,
95
#ifdef TARGET_AARCH64
96
&vmstate_sve,
97
+ &vmstate_za,
98
#endif
99
&vmstate_serror,
100
&vmstate_irq_line_state,
101
--
102
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
These two instructions are aliases of MSR (immediate).
4
Use the two helpers to properly implement svcr_write.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-11-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 1 +
12
target/arm/helper-sme.h | 21 +++++++++++++
13
target/arm/helper.h | 1 +
14
target/arm/helper.c | 6 ++--
15
target/arm/sme_helper.c | 61 ++++++++++++++++++++++++++++++++++++++
16
target/arm/translate-a64.c | 24 +++++++++++++++
17
target/arm/meson.build | 1 +
18
7 files changed, 112 insertions(+), 3 deletions(-)
19
create mode 100644 target/arm/helper-sme.h
20
create mode 100644 target/arm/sme_helper.c
21
22
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/cpu.h
25
+++ b/target/arm/cpu.h
26
@@ -XXX,XX +XXX,XX @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
27
int new_el, bool el0_a64);
28
void aarch64_add_sve_properties(Object *obj);
29
void aarch64_add_pauth_properties(Object *obj);
30
+void arm_reset_sve_state(CPUARMState *env);
31
32
/*
33
* SVE registers are encoded in KVM's memory in an endianness-invariant format.
34
diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h
35
new file mode 100644
36
index XXXXXXX..XXXXXXX
37
--- /dev/null
38
+++ b/target/arm/helper-sme.h
39
@@ -XXX,XX +XXX,XX @@
40
+/*
41
+ * AArch64 SME specific helper definitions
42
+ *
43
+ * Copyright (c) 2022 Linaro, Ltd
44
+ *
45
+ * This library is free software; you can redistribute it and/or
46
+ * modify it under the terms of the GNU Lesser General Public
47
+ * License as published by the Free Software Foundation; either
48
+ * version 2.1 of the License, or (at your option) any later version.
49
+ *
50
+ * This library is distributed in the hope that it will be useful,
51
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
52
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
53
+ * Lesser General Public License for more details.
54
+ *
55
+ * You should have received a copy of the GNU Lesser General Public
56
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
57
+ */
58
+
59
+DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32)
60
+DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32)
61
diff --git a/target/arm/helper.h b/target/arm/helper.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/helper.h
64
+++ b/target/arm/helper.h
65
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
66
#ifdef TARGET_AARCH64
67
#include "helper-a64.h"
68
#include "helper-sve.h"
69
+#include "helper-sme.h"
70
#endif
71
72
#include "helper-mve.h"
73
diff --git a/target/arm/helper.c b/target/arm/helper.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/target/arm/helper.c
76
+++ b/target/arm/helper.c
77
@@ -XXX,XX +XXX,XX @@ static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
78
static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
79
uint64_t value)
80
{
81
- value &= R_SVCR_SM_MASK | R_SVCR_ZA_MASK;
82
- /* TODO: Side effects. */
83
- env->svcr = value;
84
+ helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
85
+ helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
86
+ arm_rebuild_hflags(env);
87
}
88
89
static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
90
diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c
91
new file mode 100644
92
index XXXXXXX..XXXXXXX
93
--- /dev/null
94
+++ b/target/arm/sme_helper.c
95
@@ -XXX,XX +XXX,XX @@
96
+/*
97
+ * ARM SME Operations
98
+ *
99
+ * Copyright (c) 2022 Linaro, Ltd.
100
+ *
101
+ * This library is free software; you can redistribute it and/or
102
+ * modify it under the terms of the GNU Lesser General Public
103
+ * License as published by the Free Software Foundation; either
104
+ * version 2.1 of the License, or (at your option) any later version.
105
+ *
106
+ * This library is distributed in the hope that it will be useful,
107
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
108
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
109
+ * Lesser General Public License for more details.
110
+ *
111
+ * You should have received a copy of the GNU Lesser General Public
112
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
113
+ */
114
+
115
+#include "qemu/osdep.h"
116
+#include "cpu.h"
117
+#include "internals.h"
118
+#include "exec/helper-proto.h"
119
+
120
+/* ResetSVEState */
121
+void arm_reset_sve_state(CPUARMState *env)
122
+{
123
+ memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
124
+ /* Recall that FFR is stored as pregs[16]. */
125
+ memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
126
+ vfp_set_fpcr(env, 0x0800009f);
127
+}
128
+
129
+void helper_set_pstate_sm(CPUARMState *env, uint32_t i)
130
+{
131
+ if (i == FIELD_EX64(env->svcr, SVCR, SM)) {
132
+ return;
133
+ }
134
+ env->svcr ^= R_SVCR_SM_MASK;
135
+ arm_reset_sve_state(env);
136
+}
137
+
138
+void helper_set_pstate_za(CPUARMState *env, uint32_t i)
139
+{
140
+ if (i == FIELD_EX64(env->svcr, SVCR, ZA)) {
141
+ return;
142
+ }
143
+ env->svcr ^= R_SVCR_ZA_MASK;
144
+
145
+ /*
146
+ * ResetSMEState.
147
+ *
148
+ * SetPSTATE_ZA zeros on enable and disable. We can zero this only
149
+ * on enable: while disabled, the storage is inaccessible and the
150
+ * value does not matter. We're not saving the storage in vmstate
151
+ * when disabled either.
152
+ */
153
+ if (i) {
154
+ memset(env->zarray, 0, sizeof(env->zarray));
155
+ }
156
+}
157
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/target/arm/translate-a64.c
160
+++ b/target/arm/translate-a64.c
161
@@ -XXX,XX +XXX,XX @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
162
}
163
break;
164
165
+ case 0x1b: /* SVCR* */
166
+ if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
167
+ goto do_unallocated;
168
+ }
169
+ if (sme_access_check(s)) {
170
+ bool i = crm & 1;
171
+ bool changed = false;
172
+
173
+ if ((crm & 2) && i != s->pstate_sm) {
174
+ gen_helper_set_pstate_sm(cpu_env, tcg_constant_i32(i));
175
+ changed = true;
176
+ }
177
+ if ((crm & 4) && i != s->pstate_za) {
178
+ gen_helper_set_pstate_za(cpu_env, tcg_constant_i32(i));
179
+ changed = true;
180
+ }
181
+ if (changed) {
182
+ gen_rebuild_hflags(s);
183
+ } else {
184
+ s->base.is_jmp = DISAS_NEXT;
185
+ }
186
+ }
187
+ break;
188
+
189
default:
190
do_unallocated:
191
unallocated_encoding(s);
192
diff --git a/target/arm/meson.build b/target/arm/meson.build
193
index XXXXXXX..XXXXXXX 100644
194
--- a/target/arm/meson.build
195
+++ b/target/arm/meson.build
196
@@ -XXX,XX +XXX,XX @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
197
'mte_helper.c',
198
'pauth_helper.c',
199
'sve_helper.c',
200
+ 'sme_helper.c',
201
'translate-a64.c',
202
'translate-sve.c',
203
))
204
--
205
2.25.1
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
Now that we have implemented support for FEAT_LSE2, we can define
2
2
a CPU model for the Neoverse-V1, and enable it for the virt and
3
Move the code from hw/arm/virt.c that is supposed
3
sbsa-ref boards.
4
to handle v7 into the one function.
4
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reported-by: He Zhe <zhe.he@windriver.com>
8
Message-id: 20220619001541.131672-2-richard.henderson@linaro.org
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Message-id: 20230704130647.2842917-3-peter.maydell@linaro.org
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
---
9
---
12
hw/arm/virt.c | 10 +---------
10
docs/system/arm/virt.rst | 1 +
13
target/arm/ptw.c | 24 ++++++++++++++++--------
11
hw/arm/sbsa-ref.c | 1 +
14
2 files changed, 17 insertions(+), 17 deletions(-)
12
hw/arm/virt.c | 1 +
15
13
target/arm/tcg/cpu64.c | 128 +++++++++++++++++++++++++++++++++++++++
14
4 files changed, 131 insertions(+)
15
16
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
17
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/arm/virt.rst
19
+++ b/docs/system/arm/virt.rst
20
@@ -XXX,XX +XXX,XX @@ Supported guest CPU types:
21
- ``a64fx`` (64-bit)
22
- ``host`` (with KVM only)
23
- ``neoverse-n1`` (64-bit)
24
+- ``neoverse-v1`` (64-bit)
25
- ``max`` (same as ``host`` for KVM; best possible emulation with TCG)
26
27
Note that the default is ``cortex-a15``, so for an AArch64 guest you must
28
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/arm/sbsa-ref.c
31
+++ b/hw/arm/sbsa-ref.c
32
@@ -XXX,XX +XXX,XX @@ static const char * const valid_cpus[] = {
33
ARM_CPU_TYPE_NAME("cortex-a57"),
34
ARM_CPU_TYPE_NAME("cortex-a72"),
35
ARM_CPU_TYPE_NAME("neoverse-n1"),
36
+ ARM_CPU_TYPE_NAME("neoverse-v1"),
37
ARM_CPU_TYPE_NAME("max"),
38
};
39
16
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
40
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
17
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/virt.c
42
--- a/hw/arm/virt.c
19
+++ b/hw/arm/virt.c
43
+++ b/hw/arm/virt.c
20
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
44
@@ -XXX,XX +XXX,XX @@ static const char *valid_cpus[] = {
21
cpuobj = object_new(possible_cpus->cpus[0].type);
45
ARM_CPU_TYPE_NAME("cortex-a76"),
22
armcpu = ARM_CPU(cpuobj);
46
ARM_CPU_TYPE_NAME("a64fx"),
23
47
ARM_CPU_TYPE_NAME("neoverse-n1"),
24
- if (object_property_get_bool(cpuobj, "aarch64", NULL)) {
48
+ ARM_CPU_TYPE_NAME("neoverse-v1"),
25
- pa_bits = arm_pamax(armcpu);
49
#endif
26
- } else if (arm_feature(&armcpu->env, ARM_FEATURE_LPAE)) {
50
ARM_CPU_TYPE_NAME("cortex-a53"),
27
- /* v7 with LPAE */
51
ARM_CPU_TYPE_NAME("cortex-a57"),
28
- pa_bits = 40;
52
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
29
- } else {
53
index XXXXXXX..XXXXXXX 100644
30
- /* Anything else */
54
--- a/target/arm/tcg/cpu64.c
31
- pa_bits = 32;
55
+++ b/target/arm/tcg/cpu64.c
32
- }
56
@@ -XXX,XX +XXX,XX @@ static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
33
+ pa_bits = arm_pamax(armcpu);
57
define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
34
58
}
35
object_unref(cpuobj);
59
36
60
+static const ARMCPRegInfo neoverse_v1_cp_reginfo[] = {
37
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
61
+ { .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
38
index XXXXXXX..XXXXXXX 100644
62
+ .opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
39
--- a/target/arm/ptw.c
63
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
40
+++ b/target/arm/ptw.c
64
+ { .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
41
@@ -XXX,XX +XXX,XX @@ static const uint8_t pamax_map[] = {
65
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
42
/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
66
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
43
unsigned int arm_pamax(ARMCPU *cpu)
67
+ { .name = "CPUPPMCR2_EL3", .state = ARM_CP_STATE_AA64,
68
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 1,
69
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
70
+ { .name = "CPUPPMCR3_EL3", .state = ARM_CP_STATE_AA64,
71
+ .opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 6,
72
+ .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
73
+};
74
+
75
+static void define_neoverse_v1_cp_reginfo(ARMCPU *cpu)
76
+{
77
+ /*
78
+ * The Neoverse V1 has all of the Neoverse N1's IMPDEF
79
+ * registers and a few more of its own.
80
+ */
81
+ define_arm_cp_regs(cpu, neoverse_n1_cp_reginfo);
82
+ define_arm_cp_regs(cpu, neoverse_v1_cp_reginfo);
83
+}
84
+
85
static void aarch64_neoverse_n1_initfn(Object *obj)
44
{
86
{
45
- unsigned int parange =
87
ARMCPU *cpu = ARM_CPU(obj);
46
- FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
88
@@ -XXX,XX +XXX,XX @@ static void aarch64_neoverse_n1_initfn(Object *obj)
47
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
89
define_neoverse_n1_cp_reginfo(cpu);
48
+ unsigned int parange =
49
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
50
51
- /*
52
- * id_aa64mmfr0 is a read-only register so values outside of the
53
- * supported mappings can be considered an implementation error.
54
- */
55
- assert(parange < ARRAY_SIZE(pamax_map));
56
- return pamax_map[parange];
57
+ /*
58
+ * id_aa64mmfr0 is a read-only register so values outside of the
59
+ * supported mappings can be considered an implementation error.
60
+ */
61
+ assert(parange < ARRAY_SIZE(pamax_map));
62
+ return pamax_map[parange];
63
+ }
64
+ if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
65
+ /* v7 with LPAE */
66
+ return 40;
67
+ }
68
+ /* Anything else */
69
+ return 32;
70
}
90
}
71
91
92
+static void aarch64_neoverse_v1_initfn(Object *obj)
93
+{
94
+ ARMCPU *cpu = ARM_CPU(obj);
95
+
96
+ cpu->dtb_compatible = "arm,neoverse-v1";
97
+ set_feature(&cpu->env, ARM_FEATURE_V8);
98
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
99
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
100
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
101
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
102
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
103
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
104
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
105
+
106
+ /* Ordered by 3.2.4 AArch64 registers by functional group */
107
+ cpu->clidr = 0x82000023;
108
+ cpu->ctr = 0xb444c004; /* With DIC and IDC set */
109
+ cpu->dcz_blocksize = 4;
110
+ cpu->id_aa64afr0 = 0x00000000;
111
+ cpu->id_aa64afr1 = 0x00000000;
112
+ cpu->isar.id_aa64dfr0 = 0x000001f210305519ull;
113
+ cpu->isar.id_aa64dfr1 = 0x00000000;
114
+ cpu->isar.id_aa64isar0 = 0x1011111110212120ull; /* with FEAT_RNG */
115
+ cpu->isar.id_aa64isar1 = 0x0111000001211032ull;
116
+ cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
117
+ cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
118
+ cpu->isar.id_aa64mmfr2 = 0x0220011102101011ull;
119
+ cpu->isar.id_aa64pfr0 = 0x1101110120111112ull; /* GIC filled in later */
120
+ cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
121
+ cpu->id_afr0 = 0x00000000;
122
+ cpu->isar.id_dfr0 = 0x15011099;
123
+ cpu->isar.id_isar0 = 0x02101110;
124
+ cpu->isar.id_isar1 = 0x13112111;
125
+ cpu->isar.id_isar2 = 0x21232042;
126
+ cpu->isar.id_isar3 = 0x01112131;
127
+ cpu->isar.id_isar4 = 0x00010142;
128
+ cpu->isar.id_isar5 = 0x11011121;
129
+ cpu->isar.id_isar6 = 0x01100111;
130
+ cpu->isar.id_mmfr0 = 0x10201105;
131
+ cpu->isar.id_mmfr1 = 0x40000000;
132
+ cpu->isar.id_mmfr2 = 0x01260000;
133
+ cpu->isar.id_mmfr3 = 0x02122211;
134
+ cpu->isar.id_mmfr4 = 0x01021110;
135
+ cpu->isar.id_pfr0 = 0x21110131;
136
+ cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
137
+ cpu->isar.id_pfr2 = 0x00000011;
138
+ cpu->midr = 0x411FD402; /* r1p2 */
139
+ cpu->revidr = 0;
140
+
141
+ /*
142
+ * The Neoverse-V1 r1p2 TRM lists 32-bit format CCSIDR_EL1 values,
143
+ * but also says it implements CCIDX, which means they should be
144
+ * 64-bit format. So we here use values which are based on the textual
145
+ * information in chapter 2 of the TRM (and on the fact that
146
+ * sets * associativity * linesize == cachesize).
147
+ *
148
+ * The 64-bit CCSIDR_EL1 format is:
149
+ * [55:32] number of sets - 1
150
+ * [23:3] associativity - 1
151
+ * [2:0] log2(linesize) - 4
152
+ * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
153
+ *
154
+ * L1: 4-way set associative 64-byte line size, total size 64K,
155
+ * so sets is 256.
156
+ *
157
+ * L2: 8-way set associative, 64 byte line size, either 512K or 1MB.
158
+ * We pick 1MB, so this has 2048 sets.
159
+ *
160
+ * L3: No L3 (this matches the CLIDR_EL1 value).
161
+ */
162
+ cpu->ccsidr[0] = 0x000000ff0000001aull; /* 64KB L1 dcache */
163
+ cpu->ccsidr[1] = 0x000000ff0000001aull; /* 64KB L1 icache */
164
+ cpu->ccsidr[2] = 0x000007ff0000003aull; /* 1MB L2 cache */
165
+
166
+ /* From 3.2.115 SCTLR_EL3 */
167
+ cpu->reset_sctlr = 0x30c50838;
168
+
169
+ /* From 3.4.8 ICC_CTLR_EL3 and 3.4.23 ICH_VTR_EL2 */
170
+ cpu->gic_num_lrs = 4;
171
+ cpu->gic_vpribits = 5;
172
+ cpu->gic_vprebits = 5;
173
+ cpu->gic_pribits = 5;
174
+
175
+ /* From 3.5.1 AdvSIMD AArch64 register summary */
176
+ cpu->isar.mvfr0 = 0x10110222;
177
+ cpu->isar.mvfr1 = 0x13211111;
178
+ cpu->isar.mvfr2 = 0x00000043;
179
+
180
+ /* From 3.7.5 ID_AA64ZFR0_EL1 */
181
+ cpu->isar.id_aa64zfr0 = 0x0000100000100000;
182
+ cpu->sve_vq.supported = (1 << 0) /* 128bit */
183
+ | (1 << 1); /* 256bit */
184
+
185
+ /* From 5.5.1 AArch64 PMU register summary */
186
+ cpu->isar.reset_pmcr_el0 = 0x41213000;
187
+
188
+ define_neoverse_v1_cp_reginfo(cpu);
189
+
190
+ aarch64_add_pauth_properties(obj);
191
+ aarch64_add_sve_properties(obj);
192
+}
193
+
72
/*
194
/*
195
* -cpu max: a CPU with as many features enabled as our emulation supports.
196
* The version of '-cpu max' for qemu-system-arm is defined in cpu32.c;
197
@@ -XXX,XX +XXX,XX @@ static const ARMCPUInfo aarch64_cpus[] = {
198
{ .name = "cortex-a76", .initfn = aarch64_a76_initfn },
199
{ .name = "a64fx", .initfn = aarch64_a64fx_initfn },
200
{ .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn },
201
+ { .name = "neoverse-v1", .initfn = aarch64_neoverse_v1_initfn },
202
};
203
204
static void aarch64_cpu_register_types(void)
73
--
205
--
74
2.25.1
206
2.34.1
207
208
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
If you build QEMU with the clang sanitizer enabled, you can see it
2
fire when running the arm-cpu-features test:
2
3
3
Keep all of the error messages together. This does mean that
4
$ QTEST_QEMU_BINARY=./build/arm-clang/qemu-system-aarch64 ./build/arm-clang/tests/qtest/arm-cpu-features
4
when setting many sve length properties we'll only generate
5
[...]
5
one error, but we only really need one.
6
../../target/arm/cpu64.c:125:19: runtime error: shift exponent 64 is too large for 64-bit type 'unsigned long long'
7
[...]
6
8
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
This happens because the user can specify some incorrect SVE
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
properties that result in our calculating a max_vq of 0. We catch
9
Message-id: 20220620175235.60881-12-richard.henderson@linaro.org
11
this and error out, but before we do that we calculate
12
13
vq_mask = MAKE_64BIT_MASK(0, max_vq);$
14
15
and the MAKE_64BIT_MASK() call is only valid for lengths that are
16
greater than zero, so we hit the undefined behaviour.
17
18
Change the logic so that if max_vq is 0 we specifically set vq_mask
19
to 0 without going via MAKE_64BIT_MASK(). This lets us drop the
20
max_vq check from the error-exit logic, because if max_vq is 0 then
21
vq_map must now be 0.
22
23
The UB only happens in the case where the user passed us an incorrect
24
set of SVE properties, so it's not a big problem in practice.
25
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
26
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-id: 20230704154332.3014896-1-peter.maydell@linaro.org
11
---
30
---
12
target/arm/cpu64.c | 15 +++++++--------
31
target/arm/cpu64.c | 4 ++--
13
1 file changed, 7 insertions(+), 8 deletions(-)
32
1 file changed, 2 insertions(+), 2 deletions(-)
14
33
15
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
34
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
16
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu64.c
36
--- a/target/arm/cpu64.c
18
+++ b/target/arm/cpu64.c
37
+++ b/target/arm/cpu64.c
19
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
38
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
20
"using only sve<N> properties.\n");
39
vq = ctz32(tmp) + 1;
21
} else {
40
22
error_setg(errp, "cannot enable sve%d", vq * 128);
41
max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
23
- error_append_hint(errp, "This CPU does not support "
42
- vq_mask = MAKE_64BIT_MASK(0, max_vq);
24
- "the vector length %d-bits.\n", vq * 128);
43
+ vq_mask = max_vq > 0 ? MAKE_64BIT_MASK(0, max_vq) : 0;
25
+ if (vq_supported) {
44
vq_map = vq_supported & ~vq_init & vq_mask;
26
+ error_append_hint(errp, "This CPU does not support "
45
27
+ "the vector length %d-bits.\n", vq * 128);
46
- if (max_vq == 0 || vq_map == 0) {
28
+ } else {
47
+ if (vq_map == 0) {
29
+ error_append_hint(errp, "SVE not supported by KVM "
48
error_setg(errp, "cannot disable sve%d", vq * 128);
30
+ "on this host\n");
49
error_append_hint(errp, "Disabling sve%d results in all "
31
+ }
50
"vector lengths being disabled.\n",
32
}
33
return;
34
} else {
35
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
36
return;
37
}
38
39
- if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
40
- error_setg(errp, "cannot enable %s", name);
41
- error_append_hint(errp, "SVE not supported by KVM on this host\n");
42
- return;
43
- }
44
-
45
cpu->sve_vq_map = deposit32(cpu->sve_vq_map, vq - 1, 1, value);
46
cpu->sve_vq_init |= 1 << (vq - 1);
47
}
48
--
51
--
49
2.25.1
52
2.34.1
53
54
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
Rename from cpu_arm_{get,set}_sve_vq, and take the
4
ARMVQMap as the opaque parameter.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-14-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu64.c | 29 +++++++++++++++--------------
12
1 file changed, 15 insertions(+), 14 deletions(-)
13
14
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/cpu64.c
17
+++ b/target/arm/cpu64.c
18
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
19
}
20
21
/*
22
- * Note that cpu_arm_get/set_sve_vq cannot use the simpler
23
- * object_property_add_bool interface because they make use
24
- * of the contents of "name" to determine which bit on which
25
- * to operate.
26
+ * Note that cpu_arm_{get,set}_vq cannot use the simpler
27
+ * object_property_add_bool interface because they make use of the
28
+ * contents of "name" to determine which bit on which to operate.
29
*/
30
-static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
31
- void *opaque, Error **errp)
32
+static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name,
33
+ void *opaque, Error **errp)
34
{
35
ARMCPU *cpu = ARM_CPU(obj);
36
+ ARMVQMap *vq_map = opaque;
37
uint32_t vq = atoi(&name[3]) / 128;
38
bool value;
39
40
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
41
if (!cpu_isar_feature(aa64_sve, cpu)) {
42
value = false;
43
} else {
44
- value = extract32(cpu->sve_vq.map, vq - 1, 1);
45
+ value = extract32(vq_map->map, vq - 1, 1);
46
}
47
visit_type_bool(v, name, &value, errp);
48
}
49
50
-static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
51
- void *opaque, Error **errp)
52
+static void cpu_arm_set_vq(Object *obj, Visitor *v, const char *name,
53
+ void *opaque, Error **errp)
54
{
55
- ARMCPU *cpu = ARM_CPU(obj);
56
+ ARMVQMap *vq_map = opaque;
57
uint32_t vq = atoi(&name[3]) / 128;
58
bool value;
59
60
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
61
return;
62
}
63
64
- cpu->sve_vq.map = deposit32(cpu->sve_vq.map, vq - 1, 1, value);
65
- cpu->sve_vq.init |= 1 << (vq - 1);
66
+ vq_map->map = deposit32(vq_map->map, vq - 1, 1, value);
67
+ vq_map->init |= 1 << (vq - 1);
68
}
69
70
static bool cpu_arm_get_sve(Object *obj, Error **errp)
71
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
72
73
void aarch64_add_sve_properties(Object *obj)
74
{
75
+ ARMCPU *cpu = ARM_CPU(obj);
76
uint32_t vq;
77
78
object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
79
@@ -XXX,XX +XXX,XX @@ void aarch64_add_sve_properties(Object *obj)
80
for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
81
char name[8];
82
sprintf(name, "sve%d", vq * 128);
83
- object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
84
- cpu_arm_set_sve_vq, NULL, NULL);
85
+ object_property_add(obj, name, "bool", cpu_arm_get_vq,
86
+ cpu_arm_set_vq, NULL, &cpu->sve_vq);
87
}
88
89
#ifdef CONFIG_USER_ONLY
90
--
91
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
These functions are not used outside cpu64.c,
4
so make them static.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220620175235.60881-17-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/cpu.h | 3 ---
12
target/arm/cpu64.c | 4 ++--
13
2 files changed, 2 insertions(+), 5 deletions(-)
14
15
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/cpu.h
18
+++ b/target/arm/cpu.h
19
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
20
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
21
void aarch64_sve_change_el(CPUARMState *env, int old_el,
22
int new_el, bool el0_a64);
23
-void aarch64_add_sve_properties(Object *obj);
24
-void aarch64_add_pauth_properties(Object *obj);
25
void arm_reset_sve_state(CPUARMState *env);
26
27
/*
28
@@ -XXX,XX +XXX,XX @@ static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
29
static inline void aarch64_sve_change_el(CPUARMState *env, int o,
30
int n, bool a)
31
{ }
32
-static inline void aarch64_add_sve_properties(Object *obj) { }
33
#endif
34
35
void aarch64_sync_32_to_64(CPUARMState *env);
36
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/arm/cpu64.c
39
+++ b/target/arm/cpu64.c
40
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_get_default_vec_len(Object *obj, Visitor *v,
41
}
42
#endif
43
44
-void aarch64_add_sve_properties(Object *obj)
45
+static void aarch64_add_sve_properties(Object *obj)
46
{
47
ARMCPU *cpu = ARM_CPU(obj);
48
uint32_t vq;
49
@@ -XXX,XX +XXX,XX @@ static Property arm_cpu_pauth_property =
50
static Property arm_cpu_pauth_impdef_property =
51
DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
52
53
-void aarch64_add_pauth_properties(Object *obj)
54
+static void aarch64_add_pauth_properties(Object *obj)
55
{
56
ARMCPU *cpu = ARM_CPU(obj);
57
58
--
59
2.25.1
diff view generated by jsdifflib
Deleted patch
1
From: Richard Henderson <richard.henderson@linaro.org>
2
1
3
When Streaming SVE mode is enabled, the size is taken from
4
SMCR_ELx instead of ZCR_ELx. The format is shared, but the
5
set of vector lengths is not. Further, Streaming SVE does
6
not require any particular length to be supported.
7
8
Adjust sve_vqm1_for_el to pass the current value of PSTATE.SM
9
to the new function.
10
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20220620175235.60881-19-richard.henderson@linaro.org
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
---
16
target/arm/cpu.h | 9 +++++++--
17
target/arm/helper.c | 32 +++++++++++++++++++++++++-------
18
2 files changed, 32 insertions(+), 9 deletions(-)
19
20
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/cpu.h
23
+++ b/target/arm/cpu.h
24
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int cur_el);
25
int sme_exception_el(CPUARMState *env, int cur_el);
26
27
/**
28
- * sve_vqm1_for_el:
29
+ * sve_vqm1_for_el_sm:
30
* @env: CPUARMState
31
* @el: exception level
32
+ * @sm: streaming mode
33
*
34
- * Compute the current SVE vector length for @el, in units of
35
+ * Compute the current vector length for @el & @sm, in units of
36
* Quadwords Minus 1 -- the same scale used for ZCR_ELx.LEN.
37
+ * If @sm, compute for SVL, otherwise NVL.
38
*/
39
+uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm);
40
+
41
+/* Likewise, but using @sm = PSTATE.SM. */
42
uint32_t sve_vqm1_for_el(CPUARMState *env, int el);
43
44
static inline bool is_a64(CPUARMState *env)
45
diff --git a/target/arm/helper.c b/target/arm/helper.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/arm/helper.c
48
+++ b/target/arm/helper.c
49
@@ -XXX,XX +XXX,XX @@ int sme_exception_el(CPUARMState *env, int el)
50
/*
51
* Given that SVE is enabled, return the vector length for EL.
52
*/
53
-uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
54
+uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
55
{
56
ARMCPU *cpu = env_archcpu(env);
57
- uint32_t len = cpu->sve_max_vq - 1;
58
+ uint64_t *cr = env->vfp.zcr_el;
59
+ uint32_t map = cpu->sve_vq.map;
60
+ uint32_t len = ARM_MAX_VQ - 1;
61
+
62
+ if (sm) {
63
+ cr = env->vfp.smcr_el;
64
+ map = cpu->sme_vq.map;
65
+ }
66
67
if (el <= 1 && !el_is_in_host(env, el)) {
68
- len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
69
+ len = MIN(len, 0xf & (uint32_t)cr[1]);
70
}
71
if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
72
- len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
73
+ len = MIN(len, 0xf & (uint32_t)cr[2]);
74
}
75
if (arm_feature(env, ARM_FEATURE_EL3)) {
76
- len = MIN(len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
77
+ len = MIN(len, 0xf & (uint32_t)cr[3]);
78
}
79
80
- len = 31 - clz32(cpu->sve_vq.map & MAKE_64BIT_MASK(0, len + 1));
81
- return len;
82
+ map &= MAKE_64BIT_MASK(0, len + 1);
83
+ if (map != 0) {
84
+ return 31 - clz32(map);
85
+ }
86
+
87
+ /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
88
+ assert(sm);
89
+ return ctz32(cpu->sme_vq.map);
90
+}
91
+
92
+uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
93
+{
94
+ return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
95
}
96
97
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
98
--
99
2.25.1
diff view generated by jsdifflib