1
target-arm queue: two bug fixes, plus the KVM/SVE patchset,
1
The following changes since commit 55ef0b702bc2c90c3c4ed97f97676d8f139e5ca1:
2
which is a new feature but one which was in my pre-softfreeze
3
pullreq (it just had to be dropped due to an unexpected test failure.)
4
2
5
thanks
3
Merge remote-tracking branch 'remotes/lvivier-gitlab/tags/linux-user-for-7.0-pull-request' into staging (2022-02-07 10:48:25 +0000)
6
-- PMM
7
8
The following changes since commit b7c9a7f353c0e260519bf735ff0d4aa01e72784b:
9
10
Merge remote-tracking branch 'remotes/jnsnow/tags/ide-pull-request' into staging (2019-10-31 15:57:30 +0000)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20191101-1
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220208
15
8
16
for you to fetch changes up to d9ae7624b659362cb2bb2b04fee53bf50829ca56:
9
for you to fetch changes up to 4fd1ebb10593087d45d2f56f7f3d13447d24802c:
17
10
18
target/arm: Allow reading flags from FPSCR for M-profile (2019-11-01 08:49:10 +0000)
11
hw/sensor: Add lsm303dlhc magnetometer device (2022-02-08 10:56:29 +0000)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
target-arm queue:
14
target-arm queue:
22
* Support SVE in KVM guests
15
* Fix handling of SVE ZCR_LEN when using VHE
23
* Don't UNDEF on M-profile 'vmrs apsr_nzcv, fpscr'
16
* xlnx-zynqmp: 'Or' the QSPI / QSPI DMA IRQs
24
* Update hflags after boot.c modifies CPU state
17
* Don't ever enable PSCI when booting guest in EL3
18
* Adhere to SMCCC 1.3 section 5.2
19
* highbank: Fix issues with booting SMP
20
* midway: Fix issues booting at all
21
* boot: Drop existing dtb /psci node rather than retaining it
22
* versal-virt: Always call arm_load_kernel()
23
* force flag recalculation when messing with DAIF
24
* hw/timer/armv7m_systick: Update clock source before enabling timer
25
* hw/arm/smmuv3: Fix device reset
26
* hw/intc/arm_gicv3_its: refactorings and minor bug fixes
27
* hw/sensor: Add lsm303dlhc magnetometer device
25
28
26
----------------------------------------------------------------
29
----------------------------------------------------------------
27
Andrew Jones (9):
30
Alex Bennée (1):
28
target/arm/monitor: Introduce qmp_query_cpu_model_expansion
31
arm: force flag recalculation when messing with DAIF
29
tests: arm: Introduce cpu feature tests
30
target/arm: Allow SVE to be disabled via a CPU property
31
target/arm/cpu64: max cpu: Introduce sve<N> properties
32
target/arm/kvm64: Add kvm_arch_get/put_sve
33
target/arm/kvm64: max cpu: Enable SVE when available
34
target/arm/kvm: scratch vcpu: Preserve input kvm_vcpu_init features
35
target/arm/cpu64: max cpu: Support sve properties with KVM
36
target/arm/kvm: host cpu: Add support for sve<N> properties
37
38
Christophe Lyon (1):
39
target/arm: Allow reading flags from FPSCR for M-profile
40
32
41
Edgar E. Iglesias (1):
33
Edgar E. Iglesias (1):
42
hw/arm/boot: Rebuild hflags when modifying CPUState at boot
34
hw/arm: versal-virt: Always call arm_load_kernel()
43
35
44
tests/Makefile.include | 5 +-
36
Eric Auger (1):
45
qapi/machine-target.json | 6 +-
37
hw/arm/smmuv3: Fix device reset
46
include/qemu/bitops.h | 1 +
47
target/arm/cpu.h | 21 ++
48
target/arm/kvm_arm.h | 39 +++
49
hw/arm/boot.c | 1 +
50
target/arm/cpu.c | 25 +-
51
target/arm/cpu64.c | 364 +++++++++++++++++++++++++--
52
target/arm/helper.c | 10 +-
53
target/arm/kvm.c | 25 +-
54
target/arm/kvm32.c | 6 +-
55
target/arm/kvm64.c | 325 +++++++++++++++++++++---
56
target/arm/monitor.c | 158 ++++++++++++
57
target/arm/translate-vfp.inc.c | 5 +-
58
tests/arm-cpu-features.c | 551 +++++++++++++++++++++++++++++++++++++++++
59
docs/arm-cpu-features.rst | 317 ++++++++++++++++++++++++
60
16 files changed, 1795 insertions(+), 64 deletions(-)
61
create mode 100644 tests/arm-cpu-features.c
62
create mode 100644 docs/arm-cpu-features.rst
63
38
39
Francisco Iglesias (1):
40
hw/arm/xlnx-zynqmp: 'Or' the QSPI / QSPI DMA IRQs
41
42
Kevin Townsend (1):
43
hw/sensor: Add lsm303dlhc magnetometer device
44
45
Peter Maydell (29):
46
target/arm: make psci-conduit settable after realize
47
cpu.c: Make start-powered-off settable after realize
48
hw/arm/boot: Support setting psci-conduit based on guest EL
49
hw/arm: imx: Don't enable PSCI conduit when booting guest in EL3
50
hw/arm: allwinner: Don't enable PSCI conduit when booting guest in EL3
51
hw/arm/xlnx-zcu102: Don't enable PSCI conduit when booting guest in EL3
52
hw/arm/versal: Let boot.c handle PSCI enablement
53
hw/arm/virt: Let boot.c handle PSCI enablement
54
hw/arm: highbank: For EL3 guests, don't enable PSCI, start all cores
55
arm: tcg: Adhere to SMCCC 1.3 section 5.2
56
hw/arm/highbank: Drop use of secure_board_setup
57
hw/arm/boot: Prevent setting both psci_conduit and secure_board_setup
58
hw/arm/boot: Don't write secondary boot stub if using PSCI
59
hw/arm/highbank: Drop unused secondary boot stub code
60
hw/arm/boot: Drop nb_cpus field from arm_boot_info
61
hw/arm/boot: Drop existing dtb /psci node rather than retaining it
62
hw/intc/arm_gicv3_its: Use address_space_map() to access command queue packets
63
hw/intc/arm_gicv3_its: Keep DTEs as a struct, not a raw uint64_t
64
hw/intc/arm_gicv3_its: Pass DTEntry to update_dte()
65
hw/intc/arm_gicv3_its: Keep CTEs as a struct, not a raw uint64_t
66
hw/intc/arm_gicv3_its: Pass CTEntry to update_cte()
67
hw/intc/arm_gicv3_its: Fix address calculation in get_ite() and update_ite()
68
hw/intc/arm_gicv3_its: Avoid nested ifs in get_ite()
69
hw/intc/arm_gicv3_its: Pass ITE values back from get_ite() via a struct
70
hw/intc/arm_gicv3_its: Make update_ite() use ITEntry
71
hw/intc/arm_gicv3_its: Drop TableDesc and CmdQDesc valid fields
72
hw/intc/arm_gicv3_its: In MAPC with V=0, don't check rdbase field
73
hw/intc/arm_gicv3_its: Don't allow intid 1023 in MAPI/MAPTI
74
hw/intc/arm_gicv3_its: Split error checks
75
76
Richard Henderson (4):
77
target/arm: Fix sve_zcr_len_for_el for VHE mode running
78
target/arm: Tidy sve_exception_el for CPACR_EL1 access
79
target/arm: Fix {fp, sve}_exception_el for VHE mode running
80
target/arm: Use CPTR_TFP with CPTR_EL3 in fp_exception_el
81
82
Richard Petri (1):
83
hw/timer/armv7m_systick: Update clock source before enabling timer
84
85
hw/intc/gicv3_internal.h | 23 +-
86
include/hw/arm/boot.h | 14 +-
87
include/hw/arm/xlnx-versal.h | 1 -
88
include/hw/arm/xlnx-zynqmp.h | 2 +
89
include/hw/intc/arm_gicv3_its_common.h | 2 -
90
cpu.c | 22 +-
91
hw/arm/allwinner-h3.c | 9 +-
92
hw/arm/aspeed.c | 1 -
93
hw/arm/boot.c | 107 ++++-
94
hw/arm/exynos4_boards.c | 1 -
95
hw/arm/fsl-imx6ul.c | 2 -
96
hw/arm/fsl-imx7.c | 8 +-
97
hw/arm/highbank.c | 72 +---
98
hw/arm/imx25_pdk.c | 3 +-
99
hw/arm/kzm.c | 1 -
100
hw/arm/mcimx6ul-evk.c | 2 +-
101
hw/arm/mcimx7d-sabre.c | 2 +-
102
hw/arm/npcm7xx.c | 3 -
103
hw/arm/orangepi.c | 5 +-
104
hw/arm/raspi.c | 1 -
105
hw/arm/realview.c | 1 -
106
hw/arm/sabrelite.c | 1 -
107
hw/arm/sbsa-ref.c | 1 -
108
hw/arm/smmuv3.c | 6 +
109
hw/arm/vexpress.c | 1 -
110
hw/arm/virt.c | 13 +-
111
hw/arm/xilinx_zynq.c | 1 -
112
hw/arm/xlnx-versal-virt.c | 17 +-
113
hw/arm/xlnx-versal.c | 5 +-
114
hw/arm/xlnx-zcu102.c | 1 +
115
hw/arm/xlnx-zynqmp.c | 25 +-
116
hw/intc/arm_gicv3_its.c | 696 +++++++++++++++------------------
117
hw/sensor/lsm303dlhc_mag.c | 556 ++++++++++++++++++++++++++
118
hw/timer/armv7m_systick.c | 8 +-
119
target/arm/cpu.c | 6 +-
120
target/arm/helper-a64.c | 2 +
121
target/arm/helper.c | 118 ++++--
122
target/arm/psci.c | 35 +-
123
tests/qtest/lsm303dlhc-mag-test.c | 148 +++++++
124
hw/sensor/Kconfig | 4 +
125
hw/sensor/meson.build | 1 +
126
tests/qtest/meson.build | 1 +
127
42 files changed, 1308 insertions(+), 620 deletions(-)
128
create mode 100644 hw/sensor/lsm303dlhc_mag.c
129
create mode 100644 tests/qtest/lsm303dlhc-mag-test.c
130
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
When HCR_EL2.{E2H,TGE} == '11', ZCR_EL1 is unused.
4
5
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
9
Message-id: 20220127063428.30212-2-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
---
12
target/arm/helper.c | 3 ++-
13
1 file changed, 2 insertions(+), 1 deletion(-)
14
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper.c
18
+++ b/target/arm/helper.c
19
@@ -XXX,XX +XXX,XX @@ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
20
ARMCPU *cpu = env_archcpu(env);
21
uint32_t zcr_len = cpu->sve_max_vq - 1;
22
23
- if (el <= 1) {
24
+ if (el <= 1 &&
25
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
26
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
27
}
28
if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
29
--
30
2.25.1
31
32
diff view generated by jsdifflib
1
From: Christophe Lyon <christophe.lyon@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
rt==15 is a special case when reading the flags: it means the
3
Extract entire fields for ZEN and FPEN, rather than testing specific bits.
4
destination is APSR. This patch avoids rejecting
4
This makes it easier to follow the code versus the ARM spec.
5
vmrs apsr_nzcv, fpscr
6
as illegal instruction.
7
5
8
Cc: qemu-stable@nongnu.org
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Christophe Lyon <christophe.lyon@linaro.org>
10
Message-id: 20191025095711.10853-1-christophe.lyon@linaro.org
11
[PMM: updated the comment]
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
9
Message-id: 20220127063428.30212-3-richard.henderson@linaro.org
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
---
11
---
15
target/arm/translate-vfp.inc.c | 5 +++--
12
target/arm/helper.c | 36 +++++++++++++++++-------------------
16
1 file changed, 3 insertions(+), 2 deletions(-)
13
1 file changed, 17 insertions(+), 19 deletions(-)
17
14
18
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/target/arm/translate-vfp.inc.c
17
--- a/target/arm/helper.c
21
+++ b/target/arm/translate-vfp.inc.c
18
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
19
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
23
if (arm_dc_feature(s, ARM_FEATURE_M)) {
20
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
24
/*
21
25
* The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
22
if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
26
- * Writes to R15 are UNPREDICTABLE; we choose to undef.
23
- bool disabled = false;
27
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
24
-
28
+ * (FPSCR -> r15 is a special case which writes to the PSR flags.)
25
- /* The CPACR.ZEN controls traps to EL1:
29
*/
26
- * 0, 2 : trap EL0 and EL1 accesses
30
- if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
27
- * 1 : trap only EL0 accesses
31
+ if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) {
28
- * 3 : trap no accesses
32
return false;
29
- */
30
- if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
31
- disabled = true;
32
- } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
33
- disabled = el == 0;
34
- }
35
- if (disabled) {
36
+ /* Check CPACR.ZEN. */
37
+ switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
38
+ case 1:
39
+ if (el != 0) {
40
+ break;
41
+ }
42
+ /* fall through */
43
+ case 0:
44
+ case 2:
45
/* route_to_el2 */
46
return hcr_el2 & HCR_TGE ? 2 : 1;
47
}
48
49
/* Check CPACR.FPEN. */
50
- if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
51
- disabled = true;
52
- } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
53
- disabled = el == 0;
54
- }
55
- if (disabled) {
56
+ switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
57
+ case 1:
58
+ if (el != 0) {
59
+ break;
60
+ }
61
+ /* fall through */
62
+ case 0:
63
+ case 2:
64
return 0;
33
}
65
}
34
}
66
}
35
--
67
--
36
2.20.1
68
2.25.1
37
69
38
70
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
These are the SVE equivalents to kvm_arch_get/put_fpsimd. Note, the
3
When HCR_EL2.E2H is set, the format of CPTR_EL2 changes to
4
swabbing is different than it is for fpsmid because the vector format
4
look more like CPACR_EL1, with ZEN and FPEN fields instead
5
is a little-endian stream of words.
5
of TZ and TFP fields.
6
6
7
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
10
Message-id: 20220127063428.30212-4-richard.henderson@linaro.org
11
Message-id: 20191031142734.8590-6-drjones@redhat.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
target/arm/kvm64.c | 185 ++++++++++++++++++++++++++++++++++++++-------
13
target/arm/helper.c | 77 +++++++++++++++++++++++++++++++++++----------
15
1 file changed, 156 insertions(+), 29 deletions(-)
14
1 file changed, 60 insertions(+), 17 deletions(-)
16
15
17
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/kvm64.c
18
--- a/target/arm/helper.c
20
+++ b/target/arm/kvm64.c
19
+++ b/target/arm/helper.c
21
@@ -XXX,XX +XXX,XX @@ int kvm_arch_destroy_vcpu(CPUState *cs)
20
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
22
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
23
{
24
/* Return true if the regidx is a register we should synchronize
25
- * via the cpreg_tuples array (ie is not a core reg we sync by
26
- * hand in kvm_arch_get/put_registers())
27
+ * via the cpreg_tuples array (ie is not a core or sve reg that
28
+ * we sync by hand in kvm_arch_get/put_registers())
29
*/
30
switch (regidx & KVM_REG_ARM_COPROC_MASK) {
31
case KVM_REG_ARM_CORE:
32
+ case KVM_REG_ARM64_SVE:
33
return false;
34
default:
35
return true;
36
@@ -XXX,XX +XXX,XX @@ int kvm_arm_cpreg_level(uint64_t regidx)
37
38
static int kvm_arch_put_fpsimd(CPUState *cs)
39
{
40
- ARMCPU *cpu = ARM_CPU(cs);
41
- CPUARMState *env = &cpu->env;
42
+ CPUARMState *env = &ARM_CPU(cs)->env;
43
struct kvm_one_reg reg;
44
- uint32_t fpr;
45
int i, ret;
46
47
for (i = 0; i < 32; i++) {
48
@@ -XXX,XX +XXX,XX @@ static int kvm_arch_put_fpsimd(CPUState *cs)
49
}
21
}
50
}
22
}
51
23
52
- reg.addr = (uintptr_t)(&fpr);
24
- /* CPTR_EL2. Since TZ and TFP are positive,
53
- fpr = vfp_get_fpsr(env);
25
- * they will be zero when EL2 is not present.
54
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
26
+ /*
55
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
27
+ * CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE).
56
- if (ret) {
28
*/
57
- return ret;
29
- if (el <= 2 && arm_is_el2_enabled(env)) {
58
+ return 0;
30
- if (env->cp15.cptr_el[2] & CPTR_TZ) {
59
+}
31
- return 2;
32
- }
33
- if (env->cp15.cptr_el[2] & CPTR_TFP) {
34
- return 0;
35
+ if (el <= 2) {
36
+ if (hcr_el2 & HCR_E2H) {
37
+ /* Check CPTR_EL2.ZEN. */
38
+ switch (extract32(env->cp15.cptr_el[2], 16, 2)) {
39
+ case 1:
40
+ if (el != 0 || !(hcr_el2 & HCR_TGE)) {
41
+ break;
42
+ }
43
+ /* fall through */
44
+ case 0:
45
+ case 2:
46
+ return 2;
47
+ }
60
+
48
+
61
+/*
49
+ /* Check CPTR_EL2.FPEN. */
62
+ * SVE registers are encoded in KVM's memory in an endianness-invariant format.
50
+ switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
63
+ * The byte at offset i from the start of the in-memory representation contains
51
+ case 1:
64
+ * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
52
+ if (el == 2 || !(hcr_el2 & HCR_TGE)) {
65
+ * lowest offsets are stored in the lowest memory addresses, then that nearly
53
+ break;
66
+ * matches QEMU's representation, which is to use an array of host-endian
54
+ }
67
+ * uint64_t's, where the lower offsets are at the lower indices. To complete
55
+ /* fall through */
68
+ * the translation we just need to byte swap the uint64_t's on big-endian hosts.
56
+ case 0:
69
+ */
57
+ case 2:
70
+static uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
58
+ return 0;
71
+{
59
+ }
72
+#ifdef HOST_WORDS_BIGENDIAN
60
+ } else if (arm_is_el2_enabled(env)) {
73
+ int i;
61
+ if (env->cp15.cptr_el[2] & CPTR_TZ) {
74
+
62
+ return 2;
75
+ for (i = 0; i < nr; ++i) {
63
+ }
76
+ dst[i] = bswap64(src[i]);
64
+ if (env->cp15.cptr_el[2] & CPTR_TFP) {
77
}
65
+ return 0;
78
66
+ }
79
- reg.addr = (uintptr_t)(&fpr);
80
- fpr = vfp_get_fpcr(env);
81
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
82
+ return dst;
83
+#else
84
+ return src;
85
+#endif
86
+}
87
+
88
+/*
89
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
90
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
91
+ * code the slice index to zero for now as it's unlikely we'll need more than
92
+ * one slice for quite some time.
93
+ */
94
+static int kvm_arch_put_sve(CPUState *cs)
95
+{
96
+ ARMCPU *cpu = ARM_CPU(cs);
97
+ CPUARMState *env = &cpu->env;
98
+ uint64_t tmp[ARM_MAX_VQ * 2];
99
+ uint64_t *r;
100
+ struct kvm_one_reg reg;
101
+ int n, ret;
102
+
103
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
104
+ r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
105
+ reg.addr = (uintptr_t)r;
106
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
107
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
108
+ if (ret) {
109
+ return ret;
110
+ }
111
+ }
112
+
113
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
114
+ r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
115
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
116
+ reg.addr = (uintptr_t)r;
117
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
118
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
119
+ if (ret) {
120
+ return ret;
121
+ }
122
+ }
123
+
124
+ r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
125
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
126
+ reg.addr = (uintptr_t)r;
127
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
128
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
129
if (ret) {
130
return ret;
131
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
132
{
133
struct kvm_one_reg reg;
134
uint64_t val;
135
+ uint32_t fpr;
136
int i, ret;
137
unsigned int el;
138
139
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
140
}
67
}
141
}
68
}
142
69
143
- ret = kvm_arch_put_fpsimd(cs);
70
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
144
+ if (cpu_isar_feature(aa64_sve, cpu)) {
71
int fp_exception_el(CPUARMState *env, int cur_el)
145
+ ret = kvm_arch_put_sve(cs);
72
{
146
+ } else {
73
#ifndef CONFIG_USER_ONLY
147
+ ret = kvm_arch_put_fpsimd(cs);
74
+ uint64_t hcr_el2;
148
+ }
149
+ if (ret) {
150
+ return ret;
151
+ }
152
+
75
+
153
+ reg.addr = (uintptr_t)(&fpr);
76
/* CPACR and the CPTR registers don't exist before v6, so FP is
154
+ fpr = vfp_get_fpsr(env);
77
* always accessible
155
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
78
*/
156
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
79
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
157
+ if (ret) {
80
return 0;
158
+ return ret;
81
}
159
+ }
82
83
+ hcr_el2 = arm_hcr_el2_eff(env);
160
+
84
+
161
+ reg.addr = (uintptr_t)(&fpr);
85
/* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
162
+ fpr = vfp_get_fpcr(env);
86
* 0, 2 : trap EL0 and EL1/PL1 accesses
163
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
87
* 1 : trap only EL0 accesses
164
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
88
* 3 : trap no accesses
165
if (ret) {
89
* This register is ignored if E2H+TGE are both set.
166
return ret;
90
*/
167
}
91
- if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
168
@@ -XXX,XX +XXX,XX @@ int kvm_arch_put_registers(CPUState *cs, int level)
92
+ if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
169
93
int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
170
static int kvm_arch_get_fpsimd(CPUState *cs)
94
171
{
95
switch (fpen) {
172
- ARMCPU *cpu = ARM_CPU(cs);
96
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
173
- CPUARMState *env = &cpu->env;
174
+ CPUARMState *env = &ARM_CPU(cs)->env;
175
struct kvm_one_reg reg;
176
- uint32_t fpr;
177
int i, ret;
178
179
for (i = 0; i < 32; i++) {
180
@@ -XXX,XX +XXX,XX @@ static int kvm_arch_get_fpsimd(CPUState *cs)
181
}
97
}
182
}
98
}
183
99
184
- reg.addr = (uintptr_t)(&fpr);
100
- /* For the CPTR registers we don't need to guard with an ARM_FEATURE
185
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
101
- * check because zero bits in the registers mean "don't trap".
186
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
102
+ /*
187
- if (ret) {
103
+ * CPTR_EL2 is present in v7VE or v8, and changes format
188
- return ret;
104
+ * with HCR_EL2.E2H (regardless of TGE).
189
- }
105
*/
190
- vfp_set_fpsr(env, fpr);
106
-
191
+ return 0;
107
- /* CPTR_EL2 : present in v7VE or v8 */
192
+}
108
- if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
193
109
- && arm_is_el2_enabled(env)) {
194
- reg.addr = (uintptr_t)(&fpr);
110
- /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
195
- reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
111
- return 2;
196
+/*
112
+ if (cur_el <= 2) {
197
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
113
+ if (hcr_el2 & HCR_E2H) {
198
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
114
+ /* Check CPTR_EL2.FPEN. */
199
+ * code the slice index to zero for now as it's unlikely we'll need more than
115
+ switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
200
+ * one slice for quite some time.
116
+ case 1:
201
+ */
117
+ if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
202
+static int kvm_arch_get_sve(CPUState *cs)
118
+ break;
203
+{
119
+ }
204
+ ARMCPU *cpu = ARM_CPU(cs);
120
+ /* fall through */
205
+ CPUARMState *env = &cpu->env;
121
+ case 0:
206
+ struct kvm_one_reg reg;
122
+ case 2:
207
+ uint64_t *r;
123
+ return 2;
208
+ int n, ret;
124
+ }
209
+
125
+ } else if (arm_is_el2_enabled(env)) {
210
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
126
+ if (env->cp15.cptr_el[2] & CPTR_TFP) {
211
+ r = &env->vfp.zregs[n].d[0];
127
+ return 2;
212
+ reg.addr = (uintptr_t)r;
128
+ }
213
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
214
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
215
+ if (ret) {
216
+ return ret;
217
+ }
129
+ }
218
+ sve_bswap64(r, r, cpu->sve_max_vq * 2);
219
+ }
220
+
221
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
222
+ r = &env->vfp.pregs[n].p[0];
223
+ reg.addr = (uintptr_t)r;
224
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
225
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
226
+ if (ret) {
227
+ return ret;
228
+ }
229
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
230
+ }
231
+
232
+ r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
233
+ reg.addr = (uintptr_t)r;
234
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
235
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
236
if (ret) {
237
return ret;
238
}
130
}
239
- vfp_set_fpcr(env, fpr);
131
240
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
132
/* CPTR_EL3 : present in v8 */
241
242
return 0;
243
}
244
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
245
struct kvm_one_reg reg;
246
uint64_t val;
247
unsigned int el;
248
+ uint32_t fpr;
249
int i, ret;
250
251
ARMCPU *cpu = ARM_CPU(cs);
252
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
253
env->spsr = env->banked_spsr[i];
254
}
255
256
- ret = kvm_arch_get_fpsimd(cs);
257
+ if (cpu_isar_feature(aa64_sve, cpu)) {
258
+ ret = kvm_arch_get_sve(cs);
259
+ } else {
260
+ ret = kvm_arch_get_fpsimd(cs);
261
+ }
262
if (ret) {
263
return ret;
264
}
265
266
+ reg.addr = (uintptr_t)(&fpr);
267
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
268
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
269
+ if (ret) {
270
+ return ret;
271
+ }
272
+ vfp_set_fpsr(env, fpr);
273
+
274
+ reg.addr = (uintptr_t)(&fpr);
275
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
276
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
277
+ if (ret) {
278
+ return ret;
279
+ }
280
+ vfp_set_fpcr(env, fpr);
281
+
282
ret = kvm_get_vcpu_events(cpu);
283
if (ret) {
284
return ret;
285
--
133
--
286
2.20.1
134
2.25.1
287
135
288
136
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Use the named bit rather than a bare extract32.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
8
Message-id: 20220127063428.30212-5-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
11
target/arm/helper.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper.c
17
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ int fp_exception_el(CPUARMState *env, int cur_el)
19
}
20
21
/* CPTR_EL3 : present in v8 */
22
- if (extract32(env->cp15.cptr_el[3], 10, 1)) {
23
+ if (env->cp15.cptr_el[3] & CPTR_TFP) {
24
/* Trap all FP ops to EL3 */
25
return 3;
26
}
27
--
28
2.25.1
29
30
diff view generated by jsdifflib
New patch
1
From: Francisco Iglesias <francisco.iglesias@xilinx.com>
1
2
3
'Or' the IRQs coming from the QSPI and QSPI DMA models. This is done for
4
avoiding the situation where one of the models incorrectly deasserts an
5
interrupt asserted from the other model (which will result in that the IRQ
6
is lost and will not reach guest SW).
7
8
Signed-off-by: Francisco Iglesias <francisco.iglesias@xilinx.com>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Luc Michel <luc@lmichel.fr>
11
Message-id: 20220203151742.1457-1-francisco.iglesias@xilinx.com
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
include/hw/arm/xlnx-zynqmp.h | 2 ++
15
hw/arm/xlnx-zynqmp.c | 14 ++++++++++++--
16
2 files changed, 14 insertions(+), 2 deletions(-)
17
18
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/arm/xlnx-zynqmp.h
21
+++ b/include/hw/arm/xlnx-zynqmp.h
22
@@ -XXX,XX +XXX,XX @@
23
#include "hw/dma/xlnx_csu_dma.h"
24
#include "hw/nvram/xlnx-bbram.h"
25
#include "hw/nvram/xlnx-zynqmp-efuse.h"
26
+#include "hw/or-irq.h"
27
28
#define TYPE_XLNX_ZYNQMP "xlnx-zynqmp"
29
OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP)
30
@@ -XXX,XX +XXX,XX @@ struct XlnxZynqMPState {
31
XlnxZDMA gdma[XLNX_ZYNQMP_NUM_GDMA_CH];
32
XlnxZDMA adma[XLNX_ZYNQMP_NUM_ADMA_CH];
33
XlnxCSUDMA qspi_dma;
34
+ qemu_or_irq qspi_irq_orgate;
35
36
char *boot_cpu;
37
ARMCPU *boot_cpu_ptr;
38
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/arm/xlnx-zynqmp.c
41
+++ b/hw/arm/xlnx-zynqmp.c
42
@@ -XXX,XX +XXX,XX @@
43
#define LQSPI_ADDR 0xc0000000
44
#define QSPI_IRQ 15
45
#define QSPI_DMA_ADDR 0xff0f0800
46
+#define NUM_QSPI_IRQ_LINES 2
47
48
#define DP_ADDR 0xfd4a0000
49
#define DP_IRQ 113
50
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_init(Object *obj)
51
}
52
53
object_initialize_child(obj, "qspi-dma", &s->qspi_dma, TYPE_XLNX_CSU_DMA);
54
+ object_initialize_child(obj, "qspi-irq-orgate",
55
+ &s->qspi_irq_orgate, TYPE_OR_IRQ);
56
}
57
58
static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
59
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
60
gic_spi[adma_ch_intr[i]]);
61
}
62
63
+ object_property_set_int(OBJECT(&s->qspi_irq_orgate),
64
+ "num-lines", NUM_QSPI_IRQ_LINES, &error_fatal);
65
+ qdev_realize(DEVICE(&s->qspi_irq_orgate), NULL, &error_fatal);
66
+ qdev_connect_gpio_out(DEVICE(&s->qspi_irq_orgate), 0, gic_spi[QSPI_IRQ]);
67
+
68
if (!object_property_set_link(OBJECT(&s->qspi_dma), "dma",
69
OBJECT(system_memory), errp)) {
70
return;
71
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
72
}
73
74
sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi_dma), 0, QSPI_DMA_ADDR);
75
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, gic_spi[QSPI_IRQ]);
76
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0,
77
+ qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 0));
78
79
if (!object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma",
80
OBJECT(&s->qspi_dma), errp)) {
81
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
82
}
83
sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR);
84
sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR);
85
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, gic_spi[QSPI_IRQ]);
86
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0,
87
+ qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 1));
88
89
for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) {
90
g_autofree gchar *bus_name = g_strdup_printf("qspi%d", i);
91
--
92
2.25.1
93
94
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
We want to allow the psci-conduit property to be set after realize,
2
because the parts of the code which are best placed to decide if it's
3
OK to enable QEMU's builtin PSCI emulation (the board code and the
4
arm_load_kernel() function are distant from the code which creates
5
and realizes CPUs (typically inside an SoC object's init and realize
6
method) and run afterwards.
2
7
3
Since 97a28b0eeac14 ("target/arm: Allow VFP and Neon to be disabled via
8
Since the DEFINE_PROP_* macros don't have support for creating
4
a CPU property") we can disable the 'max' cpu model's VFP and neon
9
properties which can be changed after realize, change the property to
5
features, but there's no way to disable SVE. Add the 'sve=on|off'
10
be created with object_property_add_uint32_ptr(), which is what we
6
property to give it that flexibility. We also rename
11
already use in this function for creating settable-after-realize
7
cpu_max_get/set_sve_vq to cpu_max_get/set_sve_max_vq in order for them
12
properties like init-svtor and init-nsvtor.
8
to follow the typical *_get/set_<property-name> pattern.
9
13
10
Signed-off-by: Andrew Jones <drjones@redhat.com>
14
Note that it doesn't conceptually make sense to change the setting of
15
the property after the machine has been completely initialized,
16
beacuse this would mean that the behaviour of the machine when first
17
started would differ from its behaviour when the system is
18
subsequently reset. (It would also require the underlying state to
19
be migrated, which we don't do.)
20
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Eric Auger <eric.auger@redhat.com>
23
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
13
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
24
Tested-by: Cédric Le Goater <clg@kaod.org>
14
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
25
Message-id: 20220127154639.2090164-2-peter.maydell@linaro.org
15
Message-id: 20191031142734.8590-4-drjones@redhat.com
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
---
26
---
18
target/arm/cpu.c | 3 ++-
27
target/arm/cpu.c | 6 +++++-
19
target/arm/cpu64.c | 52 ++++++++++++++++++++++++++++++++++------
28
1 file changed, 5 insertions(+), 1 deletion(-)
20
target/arm/monitor.c | 2 +-
21
tests/arm-cpu-features.c | 1 +
22
4 files changed, 49 insertions(+), 9 deletions(-)
23
29
24
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
30
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
25
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/cpu.c
32
--- a/target/arm/cpu.c
27
+++ b/target/arm/cpu.c
33
+++ b/target/arm/cpu.c
28
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s)
34
@@ -XXX,XX +XXX,XX @@ void arm_cpu_post_init(Object *obj)
29
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
35
OBJ_PROP_FLAG_READWRITE);
30
env->cp15.cptr_el[3] |= CPTR_EZ;
36
}
31
/* with maximum vector length */
37
32
- env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
38
+ /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
33
+ env->vfp.zcr_el[1] = cpu_isar_feature(aa64_sve, cpu) ?
39
+ object_property_add_uint32_ptr(obj, "psci-conduit",
34
+ cpu->sve_max_vq - 1 : 0;
40
+ &cpu->psci_conduit,
35
env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
41
+ OBJ_PROP_FLAG_READWRITE);
36
env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
42
+
37
/*
43
qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
38
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
44
39
index XXXXXXX..XXXXXXX 100644
45
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
40
--- a/target/arm/cpu64.c
46
@@ -XXX,XX +XXX,XX @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
41
+++ b/target/arm/cpu64.c
42
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
43
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
44
}
47
}
45
48
46
-static void cpu_max_get_sve_vq(Object *obj, Visitor *v, const char *name,
49
static Property arm_cpu_properties[] = {
47
- void *opaque, Error **errp)
50
- DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
48
+static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
51
DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
49
+ void *opaque, Error **errp)
52
DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
50
{
53
mp_affinity, ARM64_AFFINITY_INVALID),
51
ARMCPU *cpu = ARM_CPU(obj);
52
- visit_type_uint32(v, name, &cpu->sve_max_vq, errp);
53
+ uint32_t value;
54
+
55
+ /* All vector lengths are disabled when SVE is off. */
56
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
57
+ value = 0;
58
+ } else {
59
+ value = cpu->sve_max_vq;
60
+ }
61
+ visit_type_uint32(v, name, &value, errp);
62
}
63
64
-static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
65
- void *opaque, Error **errp)
66
+static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
67
+ void *opaque, Error **errp)
68
{
69
ARMCPU *cpu = ARM_CPU(obj);
70
Error *err = NULL;
71
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_vq(Object *obj, Visitor *v, const char *name,
72
error_propagate(errp, err);
73
}
74
75
+static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
76
+ void *opaque, Error **errp)
77
+{
78
+ ARMCPU *cpu = ARM_CPU(obj);
79
+ bool value = cpu_isar_feature(aa64_sve, cpu);
80
+
81
+ visit_type_bool(v, name, &value, errp);
82
+}
83
+
84
+static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
85
+ void *opaque, Error **errp)
86
+{
87
+ ARMCPU *cpu = ARM_CPU(obj);
88
+ Error *err = NULL;
89
+ bool value;
90
+ uint64_t t;
91
+
92
+ visit_type_bool(v, name, &value, &err);
93
+ if (err) {
94
+ error_propagate(errp, err);
95
+ return;
96
+ }
97
+
98
+ t = cpu->isar.id_aa64pfr0;
99
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
100
+ cpu->isar.id_aa64pfr0 = t;
101
+}
102
+
103
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
104
* otherwise, a CPU with as many features enabled as our emulation supports.
105
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
106
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
107
#endif
108
109
cpu->sve_max_vq = ARM_MAX_VQ;
110
- object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_vq,
111
- cpu_max_set_sve_vq, NULL, NULL, &error_fatal);
112
+ object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
113
+ cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
114
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
115
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
116
}
117
}
118
119
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/target/arm/monitor.c
122
+++ b/target/arm/monitor.c
123
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
124
* then the order that considers those dependencies must be used.
125
*/
126
static const char *cpu_model_advertised_features[] = {
127
- "aarch64", "pmu",
128
+ "aarch64", "pmu", "sve",
129
NULL
130
};
131
132
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tests/arm-cpu-features.c
135
+++ b/tests/arm-cpu-features.c
136
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data)
137
138
if (g_str_equal(qtest_get_arch(), "aarch64")) {
139
assert_has_feature(qts, "max", "aarch64");
140
+ assert_has_feature(qts, "max", "sve");
141
assert_has_feature(qts, "cortex-a57", "pmu");
142
assert_has_feature(qts, "cortex-a57", "aarch64");
143
144
--
54
--
145
2.20.1
55
2.25.1
146
56
147
57
diff view generated by jsdifflib
New patch
1
The CPU object's start-powered-off property is currently only
2
settable before the CPU object is realized. For arm machines this is
3
awkward, because we would like to decide whether the CPU should be
4
powered-off based on how we are booting the guest code, which is
5
something done in the machine model code and in common code called by
6
the machine model, which runs much later and in completely different
7
parts of the codebase from the SoC object code that is responsible
8
for creating and realizing the CPU objects.
1
9
10
Allow start-powered-off to be set after realize. Since this isn't
11
something that's supported by the DEFINE_PROP_* macros, we have to
12
switch the property definition to use the
13
object_class_property_add_bool() function.
14
15
Note that it doesn't conceptually make sense to change the setting of
16
the property after the machine has been completely initialized,
17
beacuse this would mean that the behaviour of the machine when first
18
started would differ from its behaviour when the system is
19
subsequently reset. (It would also require the underlying state to
20
be migrated, which we don't do.)
21
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
25
Tested-by: Cédric Le Goater <clg@kaod.org>
26
Message-id: 20220127154639.2090164-3-peter.maydell@linaro.org
27
---
28
cpu.c | 22 +++++++++++++++++++++-
29
1 file changed, 21 insertions(+), 1 deletion(-)
30
31
diff --git a/cpu.c b/cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/cpu.c
34
+++ b/cpu.c
35
@@ -XXX,XX +XXX,XX @@ static Property cpu_common_props[] = {
36
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
37
MemoryRegion *),
38
#endif
39
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
40
DEFINE_PROP_END_OF_LIST(),
41
};
42
43
+static bool cpu_get_start_powered_off(Object *obj, Error **errp)
44
+{
45
+ CPUState *cpu = CPU(obj);
46
+ return cpu->start_powered_off;
47
+}
48
+
49
+static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp)
50
+{
51
+ CPUState *cpu = CPU(obj);
52
+ cpu->start_powered_off = value;
53
+}
54
+
55
void cpu_class_init_props(DeviceClass *dc)
56
{
57
+ ObjectClass *oc = OBJECT_CLASS(dc);
58
+
59
device_class_set_props(dc, cpu_common_props);
60
+ /*
61
+ * We can't use DEFINE_PROP_BOOL in the Property array for this
62
+ * property, because we want this to be settable after realize.
63
+ */
64
+ object_class_property_add_bool(oc, "start-powered-off",
65
+ cpu_get_start_powered_off,
66
+ cpu_set_start_powered_off);
67
}
68
69
void cpu_exec_initfn(CPUState *cpu)
70
--
71
2.25.1
72
73
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Currently we expect board code to set the psci-conduit property on
2
CPUs and ensure that secondary CPUs are created with the
3
start-powered-off property set to false, if the board wishes to use
4
QEMU's builtin PSCI emulation. This worked OK for the virt board
5
where we first wanted to use it, because the virt board directly
6
creates its CPUs and is in a reasonable position to set those
7
properties. For other boards which model real hardware and use a
8
separate SoC object, however, it is more awkward. Most PSCI-using
9
boards just set the psci-conduit board unconditionally.
2
10
3
Add support for the query-cpu-model-expansion QMP command to Arm. We
11
This was never strictly speaking correct (because you would not be
4
do this selectively, only exposing CPU properties which represent
12
able to run EL3 guest firmware that itself provided the PSCI
5
optional CPU features which the user may want to enable/disable.
13
interface, as the QEMU implementation would overrule it), but mostly
6
Additionally we restrict the list of queryable cpu models to 'max',
14
worked in practice because for non-PSCI SMC calls QEMU would emulate
7
'host', or the current type when KVM is in use. And, finally, we only
15
the SMC instruction as normal (by trapping to guest EL3). However,
8
implement expansion type 'full', as Arm does not yet have a "base"
16
we would like to make our PSCI emulation follow the part of the SMCC
9
CPU type. More details and example queries are described in a new
17
specification that mandates that SMC calls with unknown function
10
document (docs/arm-cpu-features.rst).
18
identifiers return a failure code, which means that all SMC calls
19
will be handled by the PSCI code and the "emulate as normal" path
20
will no longer be taken.
11
21
12
Note, certainly more features may be added to the list of advertised
22
We tried to implement that in commit 9fcd15b9193e81
13
features, e.g. 'vfp' and 'neon'. The only requirement is that we can
23
("arm: tcg: Adhere to SMCCC 1.3 section 5.2"), but this
14
detect invalid configurations and emit failures at QMP query time.
24
regressed attempts to run EL3 guest code on the affected boards:
15
For 'vfp' and 'neon' this will require some refactoring to share a
25
* mcimx6ul-evk, mcimx7d-sabre, orangepi, xlnx-zcu102
16
validation function between the QMP query and the CPU realize
26
* for the case only of EL3 code loaded via -kernel (and
17
functions.
27
not via -bios or -pflash), virt and xlnx-versal-virt
28
so for the 7.0 release we reverted it (in commit 4825eaae4fdd56f).
18
29
19
Signed-off-by: Andrew Jones <drjones@redhat.com>
30
This commit provides a mechanism that boards can use to arrange that
31
psci-conduit is set if running guest code at a low enough EL but not
32
if it would be running at the same EL that the conduit implies that
33
the QEMU PSCI implementation is using. (Later commits will convert
34
individual board models to use this mechanism.)
35
36
We do this by moving the setting of the psci-conduit and
37
start-powered-off properties to arm_load_kernel(). Boards which want
38
to potentially use emulated PSCI must set a psci_conduit field in the
39
arm_boot_info struct to the type of conduit they want to use (SMC or
40
HVC); arm_load_kernel() will then set the CPUs up accordingly if it
41
is not going to start the guest code at the same or higher EL as the
42
fake QEMU firmware would be at.
43
44
Board/SoC code which uses this mechanism should no longer set the CPU
45
psci-conduit property directly. It should only set the
46
start-powered-off property for secondaries if EL3 guest firmware
47
running bare metal expects that rather than the alternative "all CPUs
48
start executing the firmware at once".
49
50
Note that when calculating whether we are going to run guest
51
code at EL3, we ignore the setting of arm_boot_info::secure_board_setup,
52
which might cause us to run a stub bit of guest code at EL3 which
53
does some board-specific setup before dropping to EL2 or EL1 to
54
run the guest kernel. This is OK because only one board that
55
enables PSCI sets secure_board_setup (the highbank board), and
56
the stub code it writes will behave the same way whether the
57
one SMC call it makes is handled by "emulate the SMC" or by
58
"PSCI default returns an error code". So we can leave that stub
59
code in place until after we've changed the PSCI default behaviour;
60
at that point we will remove it.
61
62
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
63
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Reviewed-by: Eric Auger <eric.auger@redhat.com>
64
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
22
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
65
Tested-by: Cédric Le Goater <clg@kaod.org>
23
Message-id: 20191031142734.8590-2-drjones@redhat.com
66
Message-id: 20220127154639.2090164-4-peter.maydell@linaro.org
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
67
---
26
qapi/machine-target.json | 6 +-
68
include/hw/arm/boot.h | 10 +++++++++
27
target/arm/monitor.c | 146 ++++++++++++++++++++++++++++++++++++++
69
hw/arm/boot.c | 50 +++++++++++++++++++++++++++++++++++++++++++
28
docs/arm-cpu-features.rst | 137 +++++++++++++++++++++++++++++++++++
70
2 files changed, 60 insertions(+)
29
3 files changed, 286 insertions(+), 3 deletions(-)
30
create mode 100644 docs/arm-cpu-features.rst
31
71
32
diff --git a/qapi/machine-target.json b/qapi/machine-target.json
72
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
33
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
34
--- a/qapi/machine-target.json
74
--- a/include/hw/arm/boot.h
35
+++ b/qapi/machine-target.json
75
+++ b/include/hw/arm/boot.h
36
@@ -XXX,XX +XXX,XX @@
76
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
37
##
77
* the user it should implement this hook.
38
{ 'struct': 'CpuModelExpansionInfo',
78
*/
39
'data': { 'model': 'CpuModelInfo' },
79
void (*modify_dtb)(const struct arm_boot_info *info, void *fdt);
40
- 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' }
80
+ /*
41
+ 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' }
81
+ * If a board wants to use the QEMU emulated-firmware PSCI support,
42
82
+ * it should set this to QEMU_PSCI_CONDUIT_HVC or QEMU_PSCI_CONDUIT_SMC
43
##
83
+ * as appropriate. arm_load_kernel() will set the psci-conduit and
44
# @query-cpu-model-expansion:
84
+ * start-powered-off properties on the CPUs accordingly.
45
@@ -XXX,XX +XXX,XX @@
85
+ * Note that if the guest image is started at the same exception level
46
# query-cpu-model-expansion while using these is not advised.
86
+ * as the conduit specifies calls should go to (eg guest firmware booted
47
#
87
+ * to EL3) then PSCI will not be enabled.
48
# Some architectures may not support all expansion types. s390x supports
88
+ */
49
-# "full" and "static".
89
+ int psci_conduit;
50
+# "full" and "static". Arm only supports "full".
90
/* Used internally by arm_boot.c */
51
#
91
int is_linux;
52
# Returns: a CpuModelExpansionInfo. Returns an error if expanding CPU models is
92
hwaddr initrd_start;
53
# not supported, if the model cannot be expanded, if the model contains
93
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
54
@@ -XXX,XX +XXX,XX @@
55
'data': { 'type': 'CpuModelExpansionType',
56
'model': 'CpuModelInfo' },
57
'returns': 'CpuModelExpansionInfo',
58
- 'if': 'defined(TARGET_S390X) || defined(TARGET_I386)' }
59
+ 'if': 'defined(TARGET_S390X) || defined(TARGET_I386) || defined(TARGET_ARM)' }
60
61
##
62
# @CpuDefinitionInfo:
63
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
64
index XXXXXXX..XXXXXXX 100644
94
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/monitor.c
95
--- a/hw/arm/boot.c
66
+++ b/target/arm/monitor.c
96
+++ b/hw/arm/boot.c
67
@@ -XXX,XX +XXX,XX @@
97
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
68
*/
69
70
#include "qemu/osdep.h"
71
+#include "hw/boards.h"
72
#include "kvm_arm.h"
73
+#include "qapi/error.h"
74
+#include "qapi/visitor.h"
75
+#include "qapi/qobject-input-visitor.h"
76
+#include "qapi/qapi-commands-machine-target.h"
77
#include "qapi/qapi-commands-misc-target.h"
78
+#include "qapi/qmp/qerror.h"
79
+#include "qapi/qmp/qdict.h"
80
+#include "qom/qom-qobject.h"
81
82
static GICCapability *gic_cap_new(int version)
83
{
98
{
84
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
99
CPUState *cs;
85
100
AddressSpace *as = arm_boot_address_space(cpu, info);
86
return head;
101
+ int boot_el;
87
}
102
+ CPUARMState *env = &cpu->env;
103
104
/*
105
* CPU objects (unlike devices) are not automatically reset on system
106
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
107
arm_setup_direct_kernel_boot(cpu, info);
108
}
109
110
+ /*
111
+ * Disable the PSCI conduit if it is set up to target the same
112
+ * or a lower EL than the one we're going to start the guest code in.
113
+ * This logic needs to agree with the code in do_cpu_reset() which
114
+ * decides whether we're going to boot the guest in the highest
115
+ * supported exception level or in a lower one.
116
+ */
88
+
117
+
89
+/*
118
+ /* Boot into highest supported EL ... */
90
+ * These are cpu model features we want to advertise. The order here
119
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
91
+ * matters as this is the order in which qmp_query_cpu_model_expansion
120
+ boot_el = 3;
92
+ * will attempt to set them. If there are dependencies between features,
121
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
93
+ * then the order that considers those dependencies must be used.
122
+ boot_el = 2;
94
+ */
123
+ } else {
95
+static const char *cpu_model_advertised_features[] = {
124
+ boot_el = 1;
96
+ "aarch64", "pmu",
125
+ }
97
+ NULL
126
+ /* ...except that if we're booting Linux we adjust the EL we boot into */
98
+};
127
+ if (info->is_linux && !info->secure_boot) {
99
+
128
+ boot_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
100
+CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
101
+ CpuModelInfo *model,
102
+ Error **errp)
103
+{
104
+ CpuModelExpansionInfo *expansion_info;
105
+ const QDict *qdict_in = NULL;
106
+ QDict *qdict_out;
107
+ ObjectClass *oc;
108
+ Object *obj;
109
+ const char *name;
110
+ int i;
111
+
112
+ if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
113
+ error_setg(errp, "The requested expansion type is not supported");
114
+ return NULL;
115
+ }
129
+ }
116
+
130
+
117
+ if (!kvm_enabled() && !strcmp(model->name, "host")) {
131
+ if ((info->psci_conduit == QEMU_PSCI_CONDUIT_HVC && boot_el >= 2) ||
118
+ error_setg(errp, "The CPU type '%s' requires KVM", model->name);
132
+ (info->psci_conduit == QEMU_PSCI_CONDUIT_SMC && boot_el == 3)) {
119
+ return NULL;
133
+ info->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
120
+ }
134
+ }
121
+
135
+
122
+ oc = cpu_class_by_name(TYPE_ARM_CPU, model->name);
136
+ if (info->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) {
123
+ if (!oc) {
137
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
124
+ error_setg(errp, "The CPU type '%s' is not a recognized ARM CPU type",
138
+ Object *cpuobj = OBJECT(cs);
125
+ model->name);
126
+ return NULL;
127
+ }
128
+
139
+
129
+ if (kvm_enabled()) {
140
+ object_property_set_int(cpuobj, "psci-conduit", info->psci_conduit,
130
+ const char *cpu_type = current_machine->cpu_type;
141
+ &error_abort);
131
+ int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
142
+ /*
132
+ bool supported = false;
143
+ * Secondary CPUs start in PSCI powered-down state. Like the
133
+
144
+ * code in do_cpu_reset(), we assume first_cpu is the primary
134
+ if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) {
145
+ * CPU.
135
+ /* These are kvmarm's recommended cpu types */
146
+ */
136
+ supported = true;
147
+ if (cs != first_cpu) {
137
+ } else if (strlen(model->name) == len &&
148
+ object_property_set_bool(cpuobj, "start-powered-off", true,
138
+ !strncmp(model->name, cpu_type, len)) {
149
+ &error_abort);
139
+ /* KVM is enabled and we're using this type, so it works. */
150
+ }
140
+ supported = true;
141
+ }
142
+ if (!supported) {
143
+ error_setg(errp, "We cannot guarantee the CPU type '%s' works "
144
+ "with KVM on this host", model->name);
145
+ return NULL;
146
+ }
151
+ }
147
+ }
152
+ }
148
+
153
+
149
+ if (model->props) {
154
+ /*
150
+ qdict_in = qobject_to(QDict, model->props);
155
+ * arm_load_dtb() may add a PSCI node so it must be called after we have
151
+ if (!qdict_in) {
156
+ * decided whether to enable PSCI and set the psci-conduit CPU properties.
152
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
157
+ */
153
+ return NULL;
158
if (!info->skip_dtb_autoload && have_dtb(info)) {
154
+ }
159
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) {
155
+ }
160
exit(1);
156
+
157
+ obj = object_new(object_class_get_name(oc));
158
+
159
+ if (qdict_in) {
160
+ Visitor *visitor;
161
+ Error *err = NULL;
162
+
163
+ visitor = qobject_input_visitor_new(model->props);
164
+ visit_start_struct(visitor, NULL, NULL, 0, &err);
165
+ if (err) {
166
+ visit_free(visitor);
167
+ object_unref(obj);
168
+ error_propagate(errp, err);
169
+ return NULL;
170
+ }
171
+
172
+ i = 0;
173
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
174
+ if (qdict_get(qdict_in, name)) {
175
+ object_property_set(obj, visitor, name, &err);
176
+ if (err) {
177
+ break;
178
+ }
179
+ }
180
+ }
181
+
182
+ if (!err) {
183
+ visit_check_struct(visitor, &err);
184
+ }
185
+ visit_end_struct(visitor, NULL);
186
+ visit_free(visitor);
187
+ if (err) {
188
+ object_unref(obj);
189
+ error_propagate(errp, err);
190
+ return NULL;
191
+ }
192
+ }
193
+
194
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
195
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
196
+ expansion_info->model->name = g_strdup(model->name);
197
+
198
+ qdict_out = qdict_new();
199
+
200
+ i = 0;
201
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
202
+ ObjectProperty *prop = object_property_find(obj, name, NULL);
203
+ if (prop) {
204
+ Error *err = NULL;
205
+ QObject *value;
206
+
207
+ assert(prop->get);
208
+ value = object_property_get_qobject(obj, name, &err);
209
+ assert(!err);
210
+
211
+ qdict_put_obj(qdict_out, name, value);
212
+ }
213
+ }
214
+
215
+ if (!qdict_size(qdict_out)) {
216
+ qobject_unref(qdict_out);
217
+ } else {
218
+ expansion_info->model->props = QOBJECT(qdict_out);
219
+ expansion_info->model->has_props = true;
220
+ }
221
+
222
+ object_unref(obj);
223
+
224
+ return expansion_info;
225
+}
226
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
227
new file mode 100644
228
index XXXXXXX..XXXXXXX
229
--- /dev/null
230
+++ b/docs/arm-cpu-features.rst
231
@@ -XXX,XX +XXX,XX @@
232
+================
233
+ARM CPU Features
234
+================
235
+
236
+Examples of probing and using ARM CPU features
237
+
238
+Introduction
239
+============
240
+
241
+CPU features are optional features that a CPU of supporting type may
242
+choose to implement or not. In QEMU, optional CPU features have
243
+corresponding boolean CPU proprieties that, when enabled, indicate
244
+that the feature is implemented, and, conversely, when disabled,
245
+indicate that it is not implemented. An example of an ARM CPU feature
246
+is the Performance Monitoring Unit (PMU). CPU types such as the
247
+Cortex-A15 and the Cortex-A57, which respectively implement ARM
248
+architecture reference manuals ARMv7-A and ARMv8-A, may both optionally
249
+implement PMUs. For example, if a user wants to use a Cortex-A15 without
250
+a PMU, then the `-cpu` parameter should contain `pmu=off` on the QEMU
251
+command line, i.e. `-cpu cortex-a15,pmu=off`.
252
+
253
+As not all CPU types support all optional CPU features, then whether or
254
+not a CPU property exists depends on the CPU type. For example, CPUs
255
+that implement the ARMv8-A architecture reference manual may optionally
256
+support the AArch32 CPU feature, which may be enabled by disabling the
257
+`aarch64` CPU property. A CPU type such as the Cortex-A15, which does
258
+not implement ARMv8-A, will not have the `aarch64` CPU property.
259
+
260
+QEMU's support may be limited for some CPU features, only partially
261
+supporting the feature or only supporting the feature under certain
262
+configurations. For example, the `aarch64` CPU feature, which, when
263
+disabled, enables the optional AArch32 CPU feature, is only supported
264
+when using the KVM accelerator and when running on a host CPU type that
265
+supports the feature.
266
+
267
+CPU Feature Probing
268
+===================
269
+
270
+Determining which CPU features are available and functional for a given
271
+CPU type is possible with the `query-cpu-model-expansion` QMP command.
272
+Below are some examples where `scripts/qmp/qmp-shell` (see the top comment
273
+block in the script for usage) is used to issue the QMP commands.
274
+
275
+(1) Determine which CPU features are available for the `max` CPU type
276
+ (Note, we started QEMU with qemu-system-aarch64, so `max` is
277
+ implementing the ARMv8-A reference manual in this case)::
278
+
279
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max"}
280
+ { "return": {
281
+ "model": { "name": "max", "props": {
282
+ "pmu": true, "aarch64": true
283
+ }}}}
284
+
285
+We see that the `max` CPU type has the `pmu` and `aarch64` CPU features.
286
+We also see that the CPU features are enabled, as they are all `true`.
287
+
288
+(2) Let's try to disable the PMU::
289
+
290
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"pmu":false}}
291
+ { "return": {
292
+ "model": { "name": "max", "props": {
293
+ "pmu": false, "aarch64": true
294
+ }}}}
295
+
296
+We see it worked, as `pmu` is now `false`.
297
+
298
+(3) Let's try to disable `aarch64`, which enables the AArch32 CPU feature::
299
+
300
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"aarch64":false}}
301
+ {"error": {
302
+ "class": "GenericError", "desc":
303
+ "'aarch64' feature cannot be disabled unless KVM is enabled and 32-bit EL1 is supported"
304
+ }}
305
+
306
+It looks like this feature is limited to a configuration we do not
307
+currently have.
308
+
309
+(4) Let's try probing CPU features for the Cortex-A15 CPU type::
310
+
311
+ (QEMU) query-cpu-model-expansion type=full model={"name":"cortex-a15"}
312
+ {"return": {"model": {"name": "cortex-a15", "props": {"pmu": true}}}}
313
+
314
+Only the `pmu` CPU feature is available.
315
+
316
+A note about CPU feature dependencies
317
+-------------------------------------
318
+
319
+It's possible for features to have dependencies on other features. I.e.
320
+it may be possible to change one feature at a time without error, but
321
+when attempting to change all features at once an error could occur
322
+depending on the order they are processed. It's also possible changing
323
+all at once doesn't generate an error, because a feature's dependencies
324
+are satisfied with other features, but the same feature cannot be changed
325
+independently without error. For these reasons callers should always
326
+attempt to make their desired changes all at once in order to ensure the
327
+collection is valid.
328
+
329
+A note about CPU models and KVM
330
+-------------------------------
331
+
332
+Named CPU models generally do not work with KVM. There are a few cases
333
+that do work, e.g. using the named CPU model `cortex-a57` with KVM on a
334
+seattle host, but mostly if KVM is enabled the `host` CPU type must be
335
+used. This means the guest is provided all the same CPU features as the
336
+host CPU type has. And, for this reason, the `host` CPU type should
337
+enable all CPU features that the host has by default. Indeed it's even
338
+a bit strange to allow disabling CPU features that the host has when using
339
+the `host` CPU type, but in the absence of CPU models it's the best we can
340
+do if we want to launch guests without all the host's CPU features enabled.
341
+
342
+Enabling KVM also affects the `query-cpu-model-expansion` QMP command. The
343
+affect is not only limited to specific features, as pointed out in example
344
+(3) of "CPU Feature Probing", but also to which CPU types may be expanded.
345
+When KVM is enabled, only the `max`, `host`, and current CPU type may be
346
+expanded. This restriction is necessary as it's not possible to know all
347
+CPU types that may work with KVM, but it does impose a small risk of users
348
+experiencing unexpected errors. For example on a seattle, as mentioned
349
+above, the `cortex-a57` CPU type is also valid when KVM is enabled.
350
+Therefore a user could use the `host` CPU type for the current type, but
351
+then attempt to query `cortex-a57`, however that query will fail with our
352
+restrictions. This shouldn't be an issue though as management layers and
353
+users have been preferring the `host` CPU type for use with KVM for quite
354
+some time. Additionally, if the KVM-enabled QEMU instance running on a
355
+seattle host is using the `cortex-a57` CPU type, then querying `cortex-a57`
356
+will work.
357
+
358
+Using CPU Features
359
+==================
360
+
361
+After determining which CPU features are available and supported for a
362
+given CPU type, then they may be selectively enabled or disabled on the
363
+QEMU command line with that CPU type::
364
+
365
+ $ qemu-system-aarch64 -M virt -cpu max,pmu=off
366
+
367
+The example above disables the PMU for the `max` CPU type.
368
+
369
--
161
--
370
2.20.1
162
2.25.1
371
163
372
164
diff view generated by jsdifflib
New patch
1
Change the iMX-SoC based boards to use the new boot.c functionality
2
to allow us to enable psci-conduit only if the guest is being booted
3
in EL1 or EL2, so that if the user runs guest EL3 firmware code our
4
PSCI emulation doesn't get in its way.
1
5
6
To do this we stop setting the psci-conduit property on the CPU
7
objects in the SoC code, and instead set the psci_conduit field in
8
the arm_boot_info struct to tell the common boot loader code that
9
we'd like PSCI if the guest is starting at an EL that it makes
10
sense with.
11
12
This affects the mcimx6ul-evk and mcimx7d-sabre boards.
13
14
Note that for the mcimx7d board, this means that when running guest
15
code at EL3 there is currently no way to power on the secondary CPUs,
16
because we do not currently have a model of the system reset
17
controller module which should be used to do that for the imx7 SoC,
18
only for the imx6 SoC. (Previously EL3 code which knew it was
19
running on QEMU could use a PSCI call to do this.) This doesn't
20
affect the imx6ul-evk board because it is uniprocessor.
21
22
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
24
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
25
Tested-by: Cédric Le Goater <clg@kaod.org>
26
Acked-by: Richard Henderson <richard.henderson@linaro.org>
27
Message-id: 20220127154639.2090164-5-peter.maydell@linaro.org
28
---
29
hw/arm/fsl-imx6ul.c | 2 --
30
hw/arm/fsl-imx7.c | 8 ++++----
31
hw/arm/mcimx6ul-evk.c | 1 +
32
hw/arm/mcimx7d-sabre.c | 1 +
33
4 files changed, 6 insertions(+), 6 deletions(-)
34
35
diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/arm/fsl-imx6ul.c
38
+++ b/hw/arm/fsl-imx6ul.c
39
@@ -XXX,XX +XXX,XX @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp)
40
return;
41
}
42
43
- object_property_set_int(OBJECT(&s->cpu), "psci-conduit",
44
- QEMU_PSCI_CONDUIT_SMC, &error_abort);
45
qdev_realize(DEVICE(&s->cpu), NULL, &error_abort);
46
47
/*
48
diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/hw/arm/fsl-imx7.c
51
+++ b/hw/arm/fsl-imx7.c
52
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
53
for (i = 0; i < smp_cpus; i++) {
54
o = OBJECT(&s->cpu[i]);
55
56
- object_property_set_int(o, "psci-conduit", QEMU_PSCI_CONDUIT_SMC,
57
- &error_abort);
58
-
59
/* On uniprocessor, the CBAR is set to 0 */
60
if (smp_cpus > 1) {
61
object_property_set_int(o, "reset-cbar", FSL_IMX7_A7MPCORE_ADDR,
62
@@ -XXX,XX +XXX,XX @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp)
63
}
64
65
if (i) {
66
- /* Secondary CPUs start in PSCI powered-down state */
67
+ /*
68
+ * Secondary CPUs start in powered-down state (and can be
69
+ * powered up via the SRC system reset controller)
70
+ */
71
object_property_set_bool(o, "start-powered-off", true,
72
&error_abort);
73
}
74
diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/hw/arm/mcimx6ul-evk.c
77
+++ b/hw/arm/mcimx6ul-evk.c
78
@@ -XXX,XX +XXX,XX @@ static void mcimx6ul_evk_init(MachineState *machine)
79
.board_id = -1,
80
.ram_size = machine->ram_size,
81
.nb_cpus = machine->smp.cpus,
82
+ .psci_conduit = QEMU_PSCI_CONDUIT_SMC,
83
};
84
85
s = FSL_IMX6UL(object_new(TYPE_FSL_IMX6UL));
86
diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/hw/arm/mcimx7d-sabre.c
89
+++ b/hw/arm/mcimx7d-sabre.c
90
@@ -XXX,XX +XXX,XX @@ static void mcimx7d_sabre_init(MachineState *machine)
91
.board_id = -1,
92
.ram_size = machine->ram_size,
93
.nb_cpus = machine->smp.cpus,
94
+ .psci_conduit = QEMU_PSCI_CONDUIT_SMC,
95
};
96
97
s = FSL_IMX7(object_new(TYPE_FSL_IMX7));
98
--
99
2.25.1
100
101
diff view generated by jsdifflib
New patch
1
Change the allwinner-h3 based board to use the new boot.c
2
functionality to allow us to enable psci-conduit only if the guest is
3
being booted in EL1 or EL2, so that if the user runs guest EL3
4
firmware code our PSCI emulation doesn't get in its way.
1
5
6
To do this we stop setting the psci-conduit property on the CPU
7
objects in the SoC code, and instead set the psci_conduit field in
8
the arm_boot_info struct to tell the common boot loader code that
9
we'd like PSCI if the guest is starting at an EL that it makes sense
10
with.
11
12
This affects the orangepi-pc board.
13
14
This commit leaves the secondary CPUs in the powered-down state if
15
the guest is booting at EL3, which is the same behaviour as before
16
this commit. The secondaries can no longer be started by that EL3
17
code making a PSCI call but can still be started via the CPU
18
Configuration Module registers (which we model in
19
hw/misc/allwinner-cpucfg.c).
20
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
23
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
24
Tested-by: Cédric Le Goater <clg@kaod.org>
25
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
26
Message-id: 20220127154639.2090164-6-peter.maydell@linaro.org
27
---
28
hw/arm/allwinner-h3.c | 9 ++++-----
29
hw/arm/orangepi.c | 1 +
30
2 files changed, 5 insertions(+), 5 deletions(-)
31
32
diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/hw/arm/allwinner-h3.c
35
+++ b/hw/arm/allwinner-h3.c
36
@@ -XXX,XX +XXX,XX @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp)
37
/* CPUs */
38
for (i = 0; i < AW_H3_NUM_CPUS; i++) {
39
40
- /* Provide Power State Coordination Interface */
41
- qdev_prop_set_int32(DEVICE(&s->cpus[i]), "psci-conduit",
42
- QEMU_PSCI_CONDUIT_SMC);
43
-
44
- /* Disable secondary CPUs */
45
+ /*
46
+ * Disable secondary CPUs. Guest EL3 firmware will start
47
+ * them via CPU reset control registers.
48
+ */
49
qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off",
50
i > 0);
51
52
diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/arm/orangepi.c
55
+++ b/hw/arm/orangepi.c
56
@@ -XXX,XX +XXX,XX @@ static void orangepi_init(MachineState *machine)
57
}
58
orangepi_binfo.loader_start = h3->memmap[AW_H3_DEV_SDRAM];
59
orangepi_binfo.ram_size = machine->ram_size;
60
+ orangepi_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
61
arm_load_kernel(ARM_CPU(first_cpu), machine, &orangepi_binfo);
62
}
63
64
--
65
2.25.1
66
67
diff view generated by jsdifflib
New patch
1
Change the Xilinx ZynqMP-based board xlnx-zcu102 to use the new
2
boot.c functionality to allow us to enable psci-conduit only if
3
the guest is being booted in EL1 or EL2, so that if the user runs
4
guest EL3 firmware code our PSCI emulation doesn't get in its
5
way.
1
6
7
To do this we stop setting the psci-conduit property on the CPU
8
objects in the SoC code, and instead set the psci_conduit field in
9
the arm_boot_info struct to tell the common boot loader code that
10
we'd like PSCI if the guest is starting at an EL that it makes
11
sense with.
12
13
Note that this means that EL3 guest code will have no way
14
to power on secondary cores, because we don't model any
15
kind of power controller that does that on this SoC.
16
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
21
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
22
Tested-by: Cédric Le Goater <clg@kaod.org>
23
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
24
Acked-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 20220127154639.2090164-7-peter.maydell@linaro.org
26
---
27
hw/arm/xlnx-zcu102.c | 1 +
28
hw/arm/xlnx-zynqmp.c | 11 ++++++-----
29
2 files changed, 7 insertions(+), 5 deletions(-)
30
31
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/arm/xlnx-zcu102.c
34
+++ b/hw/arm/xlnx-zcu102.c
35
@@ -XXX,XX +XXX,XX @@ static void xlnx_zcu102_init(MachineState *machine)
36
s->binfo.ram_size = ram_size;
37
s->binfo.loader_start = 0;
38
s->binfo.modify_dtb = zcu102_modify_dtb;
39
+ s->binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
40
arm_load_kernel(s->soc.boot_cpu_ptr, machine, &s->binfo);
41
}
42
43
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/arm/xlnx-zynqmp.c
46
+++ b/hw/arm/xlnx-zynqmp.c
47
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s,
48
49
name = object_get_canonical_path_component(OBJECT(&s->rpu_cpu[i]));
50
if (strcmp(name, boot_cpu)) {
51
- /* Secondary CPUs start in PSCI powered-down state */
52
+ /*
53
+ * Secondary CPUs start in powered-down state.
54
+ */
55
object_property_set_bool(OBJECT(&s->rpu_cpu[i]),
56
"start-powered-off", true, &error_abort);
57
} else {
58
@@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
59
for (i = 0; i < num_apus; i++) {
60
const char *name;
61
62
- object_property_set_int(OBJECT(&s->apu_cpu[i]), "psci-conduit",
63
- QEMU_PSCI_CONDUIT_SMC, &error_abort);
64
-
65
name = object_get_canonical_path_component(OBJECT(&s->apu_cpu[i]));
66
if (strcmp(name, boot_cpu)) {
67
- /* Secondary CPUs start in PSCI powered-down state */
68
+ /*
69
+ * Secondary CPUs start in powered-down state.
70
+ */
71
object_property_set_bool(OBJECT(&s->apu_cpu[i]),
72
"start-powered-off", true, &error_abort);
73
} else {
74
--
75
2.25.1
76
77
diff view generated by jsdifflib
New patch
1
Instead of setting the CPU psci-conduit and start-powered-off
2
properties in the xlnx-versal-virt board code, set the arm_boot_info
3
psci_conduit field so that the boot.c code can do it.
1
4
5
This will fix a corner case where we were incorrectly enabling PSCI
6
emulation when booting guest code into EL3 because it was an ELF file
7
passed to -kernel. (EL3 guest code started via -bios, -pflash, or
8
the generic loader was already being run with PSCI emulation
9
disabled.)
10
11
Note that EL3 guest code has no way to turn on the secondary CPUs
12
because there's no emulated power controller, but this was already
13
true for EL3 guest code run via -bios, -pflash, or the generic
14
loader.
15
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
17
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
20
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
21
Tested-by: Cédric Le Goater <clg@kaod.org>
22
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
23
Message-id: 20220127154639.2090164-8-peter.maydell@linaro.org
24
---
25
include/hw/arm/xlnx-versal.h | 1 -
26
hw/arm/xlnx-versal-virt.c | 6 ++++--
27
hw/arm/xlnx-versal.c | 5 +----
28
3 files changed, 5 insertions(+), 7 deletions(-)
29
30
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/include/hw/arm/xlnx-versal.h
33
+++ b/include/hw/arm/xlnx-versal.h
34
@@ -XXX,XX +XXX,XX @@ struct Versal {
35
36
struct {
37
MemoryRegion *mr_ddr;
38
- uint32_t psci_conduit;
39
} cfg;
40
};
41
42
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/hw/arm/xlnx-versal-virt.c
45
+++ b/hw/arm/xlnx-versal-virt.c
46
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
47
* When loading an OS, we turn on QEMU's PSCI implementation with SMC
48
* as the PSCI conduit. When there's no -kernel, we assume the user
49
* provides EL3 firmware to handle PSCI.
50
+ *
51
+ * Even if the user provides a kernel filename, arm_load_kernel()
52
+ * may suppress PSCI if it's going to boot that guest code at EL3.
53
*/
54
if (machine->kernel_filename) {
55
psci_conduit = QEMU_PSCI_CONDUIT_SMC;
56
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
57
TYPE_XLNX_VERSAL);
58
object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram),
59
&error_abort);
60
- object_property_set_int(OBJECT(&s->soc), "psci-conduit", psci_conduit,
61
- &error_abort);
62
sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal);
63
64
fdt_create(s);
65
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
66
s->binfo.loader_start = 0x0;
67
s->binfo.get_dtb = versal_virt_get_dtb;
68
s->binfo.modify_dtb = versal_virt_modify_dtb;
69
+ s->binfo.psci_conduit = psci_conduit;
70
if (machine->kernel_filename) {
71
arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
72
} else {
73
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/hw/arm/xlnx-versal.c
76
+++ b/hw/arm/xlnx-versal.c
77
@@ -XXX,XX +XXX,XX @@ static void versal_create_apu_cpus(Versal *s)
78
object_initialize_child(OBJECT(s), "apu-cpu[*]", &s->fpd.apu.cpu[i],
79
XLNX_VERSAL_ACPU_TYPE);
80
obj = OBJECT(&s->fpd.apu.cpu[i]);
81
- object_property_set_int(obj, "psci-conduit", s->cfg.psci_conduit,
82
- &error_abort);
83
if (i) {
84
- /* Secondary CPUs start in PSCI powered-down state */
85
+ /* Secondary CPUs start in powered-down state */
86
object_property_set_bool(obj, "start-powered-off", true,
87
&error_abort);
88
}
89
@@ -XXX,XX +XXX,XX @@ static void versal_init(Object *obj)
90
static Property versal_properties[] = {
91
DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION,
92
MemoryRegion *),
93
- DEFINE_PROP_UINT32("psci-conduit", Versal, cfg.psci_conduit, 0),
94
DEFINE_PROP_END_OF_LIST()
95
};
96
97
--
98
2.25.1
99
100
diff view generated by jsdifflib
New patch
1
Instead of setting the CPU psci-conduit and start-powered-off
2
properties in the virt board code, set the arm_boot_info psci_conduit
3
field so that the boot.c code can do it.
1
4
5
This will fix a corner case where we were incorrectly enabling PSCI
6
emulation when booting guest code into EL3 because it was an ELF file
7
passed to -kernel or to the generic loader. (EL3 guest code started
8
via -bios or -pflash was already being run with PSCI emulation
9
disabled.)
10
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
14
Tested-by: Cédric Le Goater <clg@kaod.org>
15
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
16
Message-id: 20220127154639.2090164-9-peter.maydell@linaro.org
17
---
18
hw/arm/virt.c | 12 +-----------
19
1 file changed, 1 insertion(+), 11 deletions(-)
20
21
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/arm/virt.c
24
+++ b/hw/arm/virt.c
25
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
26
object_property_set_bool(cpuobj, "has_el2", false, NULL);
27
}
28
29
- if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) {
30
- object_property_set_int(cpuobj, "psci-conduit", vms->psci_conduit,
31
- NULL);
32
-
33
- /* Secondary CPUs start in PSCI powered-down state */
34
- if (n > 0) {
35
- object_property_set_bool(cpuobj, "start-powered-off", true,
36
- NULL);
37
- }
38
- }
39
-
40
if (vmc->kvm_no_adjvtime &&
41
object_property_find(cpuobj, "kvm-no-adjvtime")) {
42
object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL);
43
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
44
vms->bootinfo.get_dtb = machvirt_dtb;
45
vms->bootinfo.skip_dtb_autoload = true;
46
vms->bootinfo.firmware_loaded = firmware_loaded;
47
+ vms->bootinfo.psci_conduit = vms->psci_conduit;
48
arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo);
49
50
vms->machine_done.notify = virt_machine_done;
51
--
52
2.25.1
53
54
diff view generated by jsdifflib
New patch
1
Change the highbank/midway boards to use the new boot.c functionality
2
to allow us to enable psci-conduit only if the guest is being booted
3
in EL1 or EL2, so that if the user runs guest EL3 firmware code our
4
PSCI emulation doesn't get in its way.
1
5
6
To do this we stop setting the psci-conduit and start-powered-off
7
properties on the CPU objects in the board code, and instead set the
8
psci_conduit field in the arm_boot_info struct to tell the common
9
boot loader code that we'd like PSCI if the guest is starting at an
10
EL that it makes sense with (in which case it will set these
11
properties).
12
13
This means that when running guest code at EL3, all the cores
14
will start execution at once on poweron. This matches the
15
real hardware behaviour. (A brief description of the hardware
16
boot process is in the u-boot documentation for these boards:
17
https://u-boot.readthedocs.io/en/latest/board/highbank/highbank.html#boot-process
18
-- in theory one might run the 'a9boot'/'a15boot' secure monitor
19
code in QEMU, though we probably don't emulate enough for that.)
20
21
This affects the highbank and midway boards.
22
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
26
Tested-by: Cédric Le Goater <clg@kaod.org>
27
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
28
Message-id: 20220127154639.2090164-10-peter.maydell@linaro.org
29
---
30
hw/arm/highbank.c | 7 +------
31
1 file changed, 1 insertion(+), 6 deletions(-)
32
33
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/arm/highbank.c
36
+++ b/hw/arm/highbank.c
37
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
38
object_property_set_int(cpuobj, "psci-conduit", QEMU_PSCI_CONDUIT_SMC,
39
&error_abort);
40
41
- if (n) {
42
- /* Secondary CPUs start in PSCI powered-down state */
43
- object_property_set_bool(cpuobj, "start-powered-off", true,
44
- &error_abort);
45
- }
46
-
47
if (object_property_find(cpuobj, "reset-cbar")) {
48
object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE,
49
&error_abort);
50
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
51
highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR;
52
highbank_binfo.write_board_setup = hb_write_board_setup;
53
highbank_binfo.secure_board_setup = true;
54
+ highbank_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
55
56
arm_load_kernel(ARM_CPU(first_cpu), machine, &highbank_binfo);
57
}
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
The SMCCC 1.3 spec section 5.2 says
2
2
3
kvm_arm_create_scratch_host_vcpu() takes a struct kvm_vcpu_init
3
The Unknown SMC Function Identifier is a sign-extended value of (-1)
4
parameter. Rather than just using it as an output parameter to
4
that is returned in the R0, W0 or X0 registers. An implementation must
5
pass back the preferred target, use it also as an input parameter,
5
return this error code when it receives:
6
allowing a caller to pass a selected target if they wish and to
7
also pass cpu features. If the caller doesn't want to select a
8
target they can pass -1 for the target which indicates they want
9
to use the preferred target and have it passed back like before.
10
6
11
Signed-off-by: Andrew Jones <drjones@redhat.com>
7
* An SMC or HVC call with an unknown Function Identifier
8
* An SMC or HVC call for a removed Function Identifier
9
* An SMC64/HVC64 call from AArch32 state
10
11
To comply with these statements, let's always return -1 when we encounter
12
an unknown HVC or SMC call.
13
14
[PMM:
15
This is a reinstatement of commit 9fcd15b9193e819b, previously
16
reverted in commit 4825eaae4fdd56fba0f; we can do this now that we
17
have arranged for all the affected board models to not enable the
18
PSCI emulation if they are running guest code at EL3. This avoids
19
the regressions that caused us to revert the change for 7.0.]
20
21
Signed-off-by: Alexander Graf <agraf@csgraf.de>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Eric Auger <eric.auger@redhat.com>
23
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
14
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
24
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
15
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
25
Tested-by: Cédric Le Goater <clg@kaod.org>
16
Message-id: 20191031142734.8590-8-drjones@redhat.com
26
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
27
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
29
---
19
target/arm/kvm.c | 20 +++++++++++++++-----
30
target/arm/psci.c | 35 ++++++-----------------------------
20
target/arm/kvm32.c | 6 +++++-
31
1 file changed, 6 insertions(+), 29 deletions(-)
21
target/arm/kvm64.c | 6 +++++-
22
3 files changed, 25 insertions(+), 7 deletions(-)
23
32
24
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
33
diff --git a/target/arm/psci.c b/target/arm/psci.c
25
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/kvm.c
35
--- a/target/arm/psci.c
27
+++ b/target/arm/kvm.c
36
+++ b/target/arm/psci.c
28
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
37
@@ -XXX,XX +XXX,XX @@
29
int *fdarray,
38
30
struct kvm_vcpu_init *init)
39
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
31
{
40
{
32
- int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
41
- /* Return true if the r0/x0 value indicates a PSCI call and
33
+ int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
42
- * the exception type matches the configured PSCI conduit. This is
34
43
- * called before the SMC/HVC instruction is executed, to decide whether
35
kvmfd = qemu_open("/dev/kvm", O_RDWR);
44
- * we should treat it as a PSCI call or with the architecturally
36
if (kvmfd < 0) {
45
+ /*
37
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
46
+ * Return true if the exception type matches the configured PSCI conduit.
38
goto finish;
47
+ * This is called before the SMC/HVC instruction is executed, to decide
48
+ * whether we should treat it as a PSCI call or with the architecturally
49
* defined behaviour for an SMC or HVC (which might be UNDEF or trap
50
* to EL2 or to EL3).
51
*/
52
- CPUARMState *env = &cpu->env;
53
- uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
54
55
switch (excp_type) {
56
case EXCP_HVC:
57
@@ -XXX,XX +XXX,XX @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
58
return false;
39
}
59
}
40
60
41
- ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
61
- switch (param) {
42
+ if (init->target == -1) {
62
- case QEMU_PSCI_0_2_FN_PSCI_VERSION:
43
+ struct kvm_vcpu_init preferred;
63
- case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
44
+
64
- case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
45
+ ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
65
- case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
46
+ if (!ret) {
66
- case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
47
+ init->target = preferred.target;
67
- case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
48
+ }
68
- case QEMU_PSCI_0_1_FN_CPU_ON:
49
+ }
69
- case QEMU_PSCI_0_2_FN_CPU_ON:
50
if (ret >= 0) {
70
- case QEMU_PSCI_0_2_FN64_CPU_ON:
51
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
71
- case QEMU_PSCI_0_1_FN_CPU_OFF:
52
if (ret < 0) {
72
- case QEMU_PSCI_0_2_FN_CPU_OFF:
53
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
73
- case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
54
* creating one kind of guest CPU which is its preferred
74
- case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
55
* CPU type.
75
- case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
56
*/
76
- case QEMU_PSCI_0_1_FN_MIGRATE:
57
+ struct kvm_vcpu_init try;
77
- case QEMU_PSCI_0_2_FN_MIGRATE:
58
+
78
- return true;
59
while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
79
- default:
60
- init->target = *cpus_to_try++;
80
- return false;
61
- memset(init->features, 0, sizeof(init->features));
81
- }
62
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
82
+ return true;
63
+ try.target = *cpus_to_try++;
83
}
64
+ memcpy(try.features, init->features, sizeof(init->features));
84
65
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
85
void arm_handle_psci_call(ARMCPU *cpu)
66
if (ret >= 0) {
86
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
67
break;
87
break;
68
}
88
case QEMU_PSCI_0_1_FN_MIGRATE:
69
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
89
case QEMU_PSCI_0_2_FN_MIGRATE:
70
if (ret < 0) {
90
+ default:
71
goto err;
91
ret = QEMU_PSCI_RET_NOT_SUPPORTED;
72
}
92
break;
73
+ init->target = try.target;
93
- default:
74
} else {
94
- g_assert_not_reached();
75
/* Treat a NULL cpus_to_try argument the same as an empty
95
}
76
* list, which means we will fail the call since this must
96
77
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
97
err:
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/kvm32.c
80
+++ b/target/arm/kvm32.c
81
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
82
QEMU_KVM_ARM_TARGET_CORTEX_A15,
83
QEMU_KVM_ARM_TARGET_NONE
84
};
85
- struct kvm_vcpu_init init;
86
+ /*
87
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
88
+ * to use the preferred target
89
+ */
90
+ struct kvm_vcpu_init init = { .target = -1, };
91
92
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
93
return false;
94
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/kvm64.c
97
+++ b/target/arm/kvm64.c
98
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
99
KVM_ARM_TARGET_CORTEX_A57,
100
QEMU_KVM_ARM_TARGET_NONE
101
};
102
- struct kvm_vcpu_init init;
103
+ /*
104
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
105
+ * to use the preferred target
106
+ */
107
+ struct kvm_vcpu_init init = { .target = -1, };
108
109
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
110
return false;
111
--
98
--
112
2.20.1
99
2.25.1
113
100
114
101
diff view generated by jsdifflib
New patch
1
Guest code on highbank may make non-PSCI SMC calls in order to
2
enable/disable the L2x0 cache controller (see the Linux kernel's
3
arch/arm/mach-highbank/highbank.c highbank_l2c310_write_sec()
4
function). The ABI for this is documented in kernel commit
5
8e56130dcb as being borrowed from the OMAP44xx ROM. The OMAP44xx TRM
6
documents this function ID as having no return value and potentially
7
trashing all guest registers except SP and PC. For QEMU's purposes
8
(where our L2x0 model is a stub and enabling or disabling it doesn't
9
affect the guest behaviour) a simple "do nothing" SMC is fine.
1
10
11
We currently implement this NOP behaviour using a little bit of
12
Secure code we run before jumping to the guest kernel, which is
13
written by arm_write_secure_board_setup_dummy_smc(). The code sets
14
up a set of Secure vectors where the SMC entry point returns without
15
doing anything.
16
17
Now that the PSCI SMC emulation handles all SMC calls (setting r0 to
18
an error code if the input r0 function identifier is not recognized),
19
we can use that default behaviour as sufficient for the highbank
20
cache controller call. (Because the guest code assumes r0 has no
21
interesting value on exit it doesn't matter that we set it to the
22
error code). We can therefore delete the highbank board code that
23
sets secure_board_setup to true and writes the secure-code bootstub.
24
25
(Note that because the OMAP44xx ABI puts function-identifiers in
26
r12 and PSCI uses r0, we only avoid a clash because Linux's code
27
happens to put the function-identifier in both registers. But this
28
is true also when the kernel is running on real firmware that
29
implements both ABIs as far as I can see.)
30
31
This change fixes in passing booting on the 'midway' board model,
32
which has been completely broken since we added support for Hyp
33
mode to the Cortex-A15 CPU. When we did that boot.c was made to
34
start running the guest code in Hyp mode; this includes the
35
board_setup hook, which instantly UNDEFs because the NSACR is
36
not accessible from Hyp. (Put another way, we never made the
37
secure_board_setup hook support cope with Hyp mode.)
38
39
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
40
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
41
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
42
Tested-by: Cédric Le Goater <clg@kaod.org>
43
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
44
Message-id: 20220127154639.2090164-12-peter.maydell@linaro.org
45
---
46
hw/arm/highbank.c | 8 --------
47
1 file changed, 8 deletions(-)
48
49
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/arm/highbank.c
52
+++ b/hw/arm/highbank.c
53
@@ -XXX,XX +XXX,XX @@
54
55
/* Board init. */
56
57
-static void hb_write_board_setup(ARMCPU *cpu,
58
- const struct arm_boot_info *info)
59
-{
60
- arm_write_secure_board_setup_dummy_smc(cpu, info, MVBAR_ADDR);
61
-}
62
-
63
static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
64
{
65
int n;
66
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
67
highbank_binfo.write_secondary_boot = hb_write_secondary;
68
highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary;
69
highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR;
70
- highbank_binfo.write_board_setup = hb_write_board_setup;
71
- highbank_binfo.secure_board_setup = true;
72
highbank_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
73
74
arm_load_kernel(ARM_CPU(first_cpu), machine, &highbank_binfo);
75
--
76
2.25.1
77
78
diff view generated by jsdifflib
New patch
1
Now that we have dealt with the one special case (highbank) that needed
2
to set both psci_conduit and secure_board_setup, we don't need to
3
allow that combination any more. It doesn't make sense in general,
4
so use an assertion to ensure we don't add new boards that do it
5
by accident without thinking through the consequences.
1
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
10
Tested-by: Cédric Le Goater <clg@kaod.org>
11
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
12
Message-id: 20220127154639.2090164-13-peter.maydell@linaro.org
13
---
14
hw/arm/boot.c | 10 ++++++++++
15
1 file changed, 10 insertions(+)
16
17
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/boot.c
20
+++ b/hw/arm/boot.c
21
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
22
* supported exception level or in a lower one.
23
*/
24
25
+ /*
26
+ * If PSCI is enabled, then SMC calls all go to the PSCI handler and
27
+ * are never emulated to trap into guest code. It therefore does not
28
+ * make sense for the board to have a setup code fragment that runs
29
+ * in Secure, because this will probably need to itself issue an SMC of some
30
+ * kind as part of its operation.
31
+ */
32
+ assert(info->psci_conduit == QEMU_PSCI_CONDUIT_DISABLED ||
33
+ !info->secure_board_setup);
34
+
35
/* Boot into highest supported EL ... */
36
if (arm_feature(env, ARM_FEATURE_EL3)) {
37
boot_el = 3;
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
If we're using PSCI emulation to start secondary CPUs, there is no
2
point in writing the "secondary boot" stub code, because it will
3
never be used -- secondary CPUs start powered-off, and when powered
4
on are set to begin execution at the address specified by the guest's
5
power-on PSCI call, not at the stub.
2
6
3
Rebuild hflags when modifying CPUState at boot.
7
Move the call to the hook that writes the secondary boot stub code so
8
that we can do it only if we're starting a Linux kernel and not using
9
PSCI.
4
10
5
Fixes: e979972a6a
11
(None of the users of the hook care about the ordering of its call
6
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
12
relative to anything else: they only use it to write a rom blob to
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
guest memory.)
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
9
Reviewed-by: Luc Michel <luc.michel@greensocs.com>
10
Message-id: 20191031040830.18800-2-edgar.iglesias@xilinx.com
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
18
Tested-by: Cédric Le Goater <clg@kaod.org>
19
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
20
Message-id: 20220127154639.2090164-14-peter.maydell@linaro.org
12
---
21
---
13
hw/arm/boot.c | 1 +
22
include/hw/arm/boot.h | 3 +++
14
1 file changed, 1 insertion(+)
23
hw/arm/boot.c | 35 ++++++++++++++++++++++++-----------
24
2 files changed, 27 insertions(+), 11 deletions(-)
15
25
26
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
27
index XXXXXXX..XXXXXXX 100644
28
--- a/include/hw/arm/boot.h
29
+++ b/include/hw/arm/boot.h
30
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
31
* boot loader/boot ROM code, and secondary_cpu_reset_hook() should
32
* perform any necessary CPU reset handling and set the PC for the
33
* secondary CPUs to point at this boot blob.
34
+ *
35
+ * These hooks won't be called if secondary CPUs are booting via
36
+ * emulated PSCI (see psci_conduit below).
37
*/
38
void (*write_secondary_boot)(ARMCPU *cpu,
39
const struct arm_boot_info *info);
16
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
40
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
17
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/arm/boot.c
42
--- a/hw/arm/boot.c
19
+++ b/hw/arm/boot.c
43
+++ b/hw/arm/boot.c
20
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
44
@@ -XXX,XX +XXX,XX @@ static void do_cpu_reset(void *opaque)
45
set_kernel_args(info, as);
46
}
47
}
48
- } else {
49
+ } else if (info->secondary_cpu_reset_hook) {
21
info->secondary_cpu_reset_hook(cpu, info);
50
info->secondary_cpu_reset_hook(cpu, info);
22
}
51
}
23
}
52
}
24
+ arm_rebuild_hflags(env);
53
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
54
elf_machine = EM_ARM;
25
}
55
}
26
}
56
27
57
- if (!info->secondary_cpu_reset_hook) {
58
- info->secondary_cpu_reset_hook = default_reset_secondary;
59
- }
60
- if (!info->write_secondary_boot) {
61
- info->write_secondary_boot = default_write_secondary;
62
- }
63
-
64
if (info->nb_cpus == 0)
65
info->nb_cpus = 1;
66
67
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
68
write_bootloader("bootloader", info->loader_start,
69
primary_loader, fixupcontext, as);
70
71
- if (info->nb_cpus > 1) {
72
- info->write_secondary_boot(cpu, info);
73
- }
74
if (info->write_board_setup) {
75
info->write_board_setup(cpu, info);
76
}
77
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
78
}
79
}
80
81
+ if (info->psci_conduit == QEMU_PSCI_CONDUIT_DISABLED &&
82
+ info->is_linux && info->nb_cpus > 1) {
83
+ /*
84
+ * We're booting Linux but not using PSCI, so for SMP we need
85
+ * to write a custom secondary CPU boot loader stub, and arrange
86
+ * for the secondary CPU reset to make the accompanying initialization.
87
+ */
88
+ if (!info->secondary_cpu_reset_hook) {
89
+ info->secondary_cpu_reset_hook = default_reset_secondary;
90
+ }
91
+ if (!info->write_secondary_boot) {
92
+ info->write_secondary_boot = default_write_secondary;
93
+ }
94
+ info->write_secondary_boot(cpu, info);
95
+ } else {
96
+ /*
97
+ * No secondary boot stub; don't use the reset hook that would
98
+ * have set the CPU up to call it
99
+ */
100
+ info->write_secondary_boot = NULL;
101
+ info->secondary_cpu_reset_hook = NULL;
102
+ }
103
+
104
/*
105
* arm_load_dtb() may add a PSCI node so it must be called after we have
106
* decided whether to enable PSCI and set the psci-conduit CPU properties.
28
--
107
--
29
2.20.1
108
2.25.1
30
109
31
110
diff view generated by jsdifflib
New patch
1
The highbank and midway board code includes boot-stub code for
2
handling secondary CPU boot which keeps the secondaries in a pen
3
until the primary writes to a known location with the address they
4
should jump to.
1
5
6
This code is never used, because the boards enable QEMU's PSCI
7
emulation, so secondary CPUs are kept powered off until the PSCI call
8
which turns them on, and then start execution from the address given
9
by the guest in that PSCI call. Delete the unreachable code.
10
11
(The code was wrong for midway in any case -- on the Cortex-A15 the
12
GIC CPU interface registers are at a different offset from PERIPHBASE
13
compared to the Cortex-A9, and the code baked-in the offsets for
14
highbank's A9.)
15
16
Note that this commit implicitly depends on the preceding "Don't
17
write secondary boot stub if using PSCI" commit -- the default
18
secondary-boot stub code overlaps with one of the highbank-specific
19
bootcode rom blobs, so we must suppress the secondary-boot
20
stub code entirely, not merely replace the highbank-specific
21
version with the default.
22
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
26
Tested-by: Cédric Le Goater <clg@kaod.org>
27
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
28
Message-id: 20220127154639.2090164-15-peter.maydell@linaro.org
29
---
30
hw/arm/highbank.c | 56 -----------------------------------------------
31
1 file changed, 56 deletions(-)
32
33
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/arm/highbank.c
36
+++ b/hw/arm/highbank.c
37
@@ -XXX,XX +XXX,XX @@
38
39
/* Board init. */
40
41
-static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
42
-{
43
- int n;
44
- uint32_t smpboot[] = {
45
- 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 - read current core id */
46
- 0xe210000f, /* ands r0, r0, #0x0f */
47
- 0xe3a03040, /* mov r3, #0x40 - jump address is 0x40 + 0x10 * core id */
48
- 0xe0830200, /* add r0, r3, r0, lsl #4 */
49
- 0xe59f2024, /* ldr r2, privbase */
50
- 0xe3a01001, /* mov r1, #1 */
51
- 0xe5821100, /* str r1, [r2, #256] - set GICC_CTLR.Enable */
52
- 0xe3a010ff, /* mov r1, #0xff */
53
- 0xe5821104, /* str r1, [r2, #260] - set GICC_PMR.Priority to 0xff */
54
- 0xf57ff04f, /* dsb */
55
- 0xe320f003, /* wfi */
56
- 0xe5901000, /* ldr r1, [r0] */
57
- 0xe1110001, /* tst r1, r1 */
58
- 0x0afffffb, /* beq <wfi> */
59
- 0xe12fff11, /* bx r1 */
60
- MPCORE_PERIPHBASE /* privbase: MPCore peripheral base address. */
61
- };
62
- for (n = 0; n < ARRAY_SIZE(smpboot); n++) {
63
- smpboot[n] = tswap32(smpboot[n]);
64
- }
65
- rom_add_blob_fixed_as("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR,
66
- arm_boot_address_space(cpu, info));
67
-}
68
-
69
-static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
70
-{
71
- CPUARMState *env = &cpu->env;
72
-
73
- switch (info->nb_cpus) {
74
- case 4:
75
- address_space_stl_notdirty(&address_space_memory,
76
- SMP_BOOT_REG + 0x30, 0,
77
- MEMTXATTRS_UNSPECIFIED, NULL);
78
- /* fallthrough */
79
- case 3:
80
- address_space_stl_notdirty(&address_space_memory,
81
- SMP_BOOT_REG + 0x20, 0,
82
- MEMTXATTRS_UNSPECIFIED, NULL);
83
- /* fallthrough */
84
- case 2:
85
- address_space_stl_notdirty(&address_space_memory,
86
- SMP_BOOT_REG + 0x10, 0,
87
- MEMTXATTRS_UNSPECIFIED, NULL);
88
- env->regs[15] = SMP_BOOT_ADDR;
89
- break;
90
- default:
91
- break;
92
- }
93
-}
94
-
95
#define NUM_REGS 0x200
96
static void hb_regs_write(void *opaque, hwaddr offset,
97
uint64_t value, unsigned size)
98
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
99
highbank_binfo.board_id = -1;
100
highbank_binfo.nb_cpus = smp_cpus;
101
highbank_binfo.loader_start = 0;
102
- highbank_binfo.write_secondary_boot = hb_write_secondary;
103
- highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary;
104
highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR;
105
highbank_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
106
107
--
108
2.25.1
109
110
diff view generated by jsdifflib
New patch
1
1
We use the arm_boot_info::nb_cpus field in only one place, and that
2
place can easily get the number of CPUs locally rather than relying
3
on the board code to have set the field correctly. (At least one
4
board, xlnx-versal-virt, does not set the field despite having more
5
than one CPU.)
6
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
10
Tested-by: Cédric Le Goater <clg@kaod.org>
11
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
12
Message-id: 20220127154639.2090164-16-peter.maydell@linaro.org
13
---
14
include/hw/arm/boot.h | 1 -
15
hw/arm/aspeed.c | 1 -
16
hw/arm/boot.c | 7 +++----
17
hw/arm/exynos4_boards.c | 1 -
18
hw/arm/highbank.c | 1 -
19
hw/arm/imx25_pdk.c | 3 +--
20
hw/arm/kzm.c | 1 -
21
hw/arm/mcimx6ul-evk.c | 1 -
22
hw/arm/mcimx7d-sabre.c | 1 -
23
hw/arm/npcm7xx.c | 3 ---
24
hw/arm/orangepi.c | 4 +---
25
hw/arm/raspi.c | 1 -
26
hw/arm/realview.c | 1 -
27
hw/arm/sabrelite.c | 1 -
28
hw/arm/sbsa-ref.c | 1 -
29
hw/arm/vexpress.c | 1 -
30
hw/arm/virt.c | 1 -
31
hw/arm/xilinx_zynq.c | 1 -
32
18 files changed, 5 insertions(+), 26 deletions(-)
33
34
diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/hw/arm/boot.h
37
+++ b/include/hw/arm/boot.h
38
@@ -XXX,XX +XXX,XX @@ struct arm_boot_info {
39
hwaddr smp_loader_start;
40
hwaddr smp_bootreg_addr;
41
hwaddr gic_cpu_if_addr;
42
- int nb_cpus;
43
int board_id;
44
/* ARM machines that support the ARM Security Extensions use this field to
45
* control whether Linux is booted as secure(true) or non-secure(false).
46
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/arm/aspeed.c
49
+++ b/hw/arm/aspeed.c
50
@@ -XXX,XX +XXX,XX @@ static void aspeed_machine_init(MachineState *machine)
51
52
aspeed_board_binfo.ram_size = machine->ram_size;
53
aspeed_board_binfo.loader_start = sc->memmap[ASPEED_DEV_SDRAM];
54
- aspeed_board_binfo.nb_cpus = sc->num_cpus;
55
56
if (amc->i2c_init) {
57
amc->i2c_init(bmc);
58
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/hw/arm/boot.c
61
+++ b/hw/arm/boot.c
62
@@ -XXX,XX +XXX,XX @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu,
63
elf_machine = EM_ARM;
64
}
65
66
- if (info->nb_cpus == 0)
67
- info->nb_cpus = 1;
68
-
69
/* Assume that raw images are linux kernels, and ELF images are not. */
70
kernel_size = arm_load_elf(info, &elf_entry, &image_low_addr,
71
&image_high_addr, elf_machine, as);
72
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
73
AddressSpace *as = arm_boot_address_space(cpu, info);
74
int boot_el;
75
CPUARMState *env = &cpu->env;
76
+ int nb_cpus = 0;
77
78
/*
79
* CPU objects (unlike devices) are not automatically reset on system
80
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
81
*/
82
for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
83
qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
84
+ nb_cpus++;
85
}
86
87
/*
88
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info)
89
}
90
91
if (info->psci_conduit == QEMU_PSCI_CONDUIT_DISABLED &&
92
- info->is_linux && info->nb_cpus > 1) {
93
+ info->is_linux && nb_cpus > 1) {
94
/*
95
* We're booting Linux but not using PSCI, so for SMP we need
96
* to write a custom secondary CPU boot loader stub, and arrange
97
diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/hw/arm/exynos4_boards.c
100
+++ b/hw/arm/exynos4_boards.c
101
@@ -XXX,XX +XXX,XX @@ static unsigned long exynos4_board_ram_size[EXYNOS4_NUM_OF_BOARDS] = {
102
static struct arm_boot_info exynos4_board_binfo = {
103
.loader_start = EXYNOS4210_BASE_BOOT_ADDR,
104
.smp_loader_start = EXYNOS4210_SMP_BOOT_ADDR,
105
- .nb_cpus = EXYNOS4210_NCPUS,
106
.write_secondary_boot = exynos4210_write_secondary,
107
};
108
109
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/hw/arm/highbank.c
112
+++ b/hw/arm/highbank.c
113
@@ -XXX,XX +XXX,XX @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id)
114
* clear that the value is meaningless.
115
*/
116
highbank_binfo.board_id = -1;
117
- highbank_binfo.nb_cpus = smp_cpus;
118
highbank_binfo.loader_start = 0;
119
highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR;
120
highbank_binfo.psci_conduit = QEMU_PSCI_CONDUIT_SMC;
121
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/hw/arm/imx25_pdk.c
124
+++ b/hw/arm/imx25_pdk.c
125
@@ -XXX,XX +XXX,XX @@ static void imx25_pdk_init(MachineState *machine)
126
127
imx25_pdk_binfo.ram_size = machine->ram_size;
128
imx25_pdk_binfo.loader_start = FSL_IMX25_SDRAM0_ADDR;
129
- imx25_pdk_binfo.board_id = 1771,
130
- imx25_pdk_binfo.nb_cpus = 1;
131
+ imx25_pdk_binfo.board_id = 1771;
132
133
for (i = 0; i < FSL_IMX25_NUM_ESDHCS; i++) {
134
BusState *bus;
135
diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/hw/arm/kzm.c
138
+++ b/hw/arm/kzm.c
139
@@ -XXX,XX +XXX,XX @@ static void kzm_init(MachineState *machine)
140
}
141
142
kzm_binfo.ram_size = machine->ram_size;
143
- kzm_binfo.nb_cpus = 1;
144
145
if (!qtest_enabled()) {
146
arm_load_kernel(&s->soc.cpu, machine, &kzm_binfo);
147
diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/hw/arm/mcimx6ul-evk.c
150
+++ b/hw/arm/mcimx6ul-evk.c
151
@@ -XXX,XX +XXX,XX @@ static void mcimx6ul_evk_init(MachineState *machine)
152
.loader_start = FSL_IMX6UL_MMDC_ADDR,
153
.board_id = -1,
154
.ram_size = machine->ram_size,
155
- .nb_cpus = machine->smp.cpus,
156
.psci_conduit = QEMU_PSCI_CONDUIT_SMC,
157
};
158
159
diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/hw/arm/mcimx7d-sabre.c
162
+++ b/hw/arm/mcimx7d-sabre.c
163
@@ -XXX,XX +XXX,XX @@ static void mcimx7d_sabre_init(MachineState *machine)
164
.loader_start = FSL_IMX7_MMDC_ADDR,
165
.board_id = -1,
166
.ram_size = machine->ram_size,
167
- .nb_cpus = machine->smp.cpus,
168
.psci_conduit = QEMU_PSCI_CONDUIT_SMC,
169
};
170
171
diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/hw/arm/npcm7xx.c
174
+++ b/hw/arm/npcm7xx.c
175
@@ -XXX,XX +XXX,XX @@ static struct arm_boot_info npcm7xx_binfo = {
176
177
void npcm7xx_load_kernel(MachineState *machine, NPCM7xxState *soc)
178
{
179
- NPCM7xxClass *sc = NPCM7XX_GET_CLASS(soc);
180
-
181
npcm7xx_binfo.ram_size = machine->ram_size;
182
- npcm7xx_binfo.nb_cpus = sc->num_cpus;
183
184
arm_load_kernel(&soc->cpu[0], machine, &npcm7xx_binfo);
185
}
186
diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/hw/arm/orangepi.c
189
+++ b/hw/arm/orangepi.c
190
@@ -XXX,XX +XXX,XX @@
191
#include "hw/qdev-properties.h"
192
#include "hw/arm/allwinner-h3.h"
193
194
-static struct arm_boot_info orangepi_binfo = {
195
- .nb_cpus = AW_H3_NUM_CPUS,
196
-};
197
+static struct arm_boot_info orangepi_binfo;
198
199
static void orangepi_init(MachineState *machine)
200
{
201
diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c
202
index XXXXXXX..XXXXXXX 100644
203
--- a/hw/arm/raspi.c
204
+++ b/hw/arm/raspi.c
205
@@ -XXX,XX +XXX,XX @@ static void setup_boot(MachineState *machine, RaspiProcessorId processor_id,
206
207
s->binfo.board_id = MACH_TYPE_BCM2708;
208
s->binfo.ram_size = ram_size;
209
- s->binfo.nb_cpus = machine->smp.cpus;
210
211
if (processor_id <= PROCESSOR_ID_BCM2836) {
212
/*
213
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/hw/arm/realview.c
216
+++ b/hw/arm/realview.c
217
@@ -XXX,XX +XXX,XX @@ static void realview_init(MachineState *machine,
218
memory_region_add_subregion(sysmem, SMP_BOOT_ADDR, ram_hack);
219
220
realview_binfo.ram_size = ram_size;
221
- realview_binfo.nb_cpus = smp_cpus;
222
realview_binfo.board_id = realview_board_id[board_type];
223
realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0);
224
arm_load_kernel(ARM_CPU(first_cpu), machine, &realview_binfo);
225
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/hw/arm/sabrelite.c
228
+++ b/hw/arm/sabrelite.c
229
@@ -XXX,XX +XXX,XX @@ static void sabrelite_init(MachineState *machine)
230
}
231
232
sabrelite_binfo.ram_size = machine->ram_size;
233
- sabrelite_binfo.nb_cpus = machine->smp.cpus;
234
sabrelite_binfo.secure_boot = true;
235
sabrelite_binfo.write_secondary_boot = sabrelite_write_secondary;
236
sabrelite_binfo.secondary_cpu_reset_hook = sabrelite_reset_secondary;
237
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/hw/arm/sbsa-ref.c
240
+++ b/hw/arm/sbsa-ref.c
241
@@ -XXX,XX +XXX,XX @@ static void sbsa_ref_init(MachineState *machine)
242
create_secure_ec(secure_sysmem);
243
244
sms->bootinfo.ram_size = machine->ram_size;
245
- sms->bootinfo.nb_cpus = smp_cpus;
246
sms->bootinfo.board_id = -1;
247
sms->bootinfo.loader_start = sbsa_ref_memmap[SBSA_MEM].base;
248
sms->bootinfo.get_dtb = sbsa_ref_dtb;
249
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
250
index XXXXXXX..XXXXXXX 100644
251
--- a/hw/arm/vexpress.c
252
+++ b/hw/arm/vexpress.c
253
@@ -XXX,XX +XXX,XX @@ static void vexpress_common_init(MachineState *machine)
254
}
255
256
daughterboard->bootinfo.ram_size = machine->ram_size;
257
- daughterboard->bootinfo.nb_cpus = machine->smp.cpus;
258
daughterboard->bootinfo.board_id = VEXPRESS_BOARD_ID;
259
daughterboard->bootinfo.loader_start = daughterboard->loader_start;
260
daughterboard->bootinfo.smp_loader_start = map[VE_SRAM];
261
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
262
index XXXXXXX..XXXXXXX 100644
263
--- a/hw/arm/virt.c
264
+++ b/hw/arm/virt.c
265
@@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine)
266
}
267
268
vms->bootinfo.ram_size = machine->ram_size;
269
- vms->bootinfo.nb_cpus = smp_cpus;
270
vms->bootinfo.board_id = -1;
271
vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base;
272
vms->bootinfo.get_dtb = machvirt_dtb;
273
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
274
index XXXXXXX..XXXXXXX 100644
275
--- a/hw/arm/xilinx_zynq.c
276
+++ b/hw/arm/xilinx_zynq.c
277
@@ -XXX,XX +XXX,XX @@ static void zynq_init(MachineState *machine)
278
sysbus_mmio_map(busdev, 0, 0xF8007000);
279
280
zynq_binfo.ram_size = machine->ram_size;
281
- zynq_binfo.nb_cpus = 1;
282
zynq_binfo.board_id = 0xd32;
283
zynq_binfo.loader_start = 0;
284
zynq_binfo.board_setup_addr = BOARD_SETUP_ADDR;
285
--
286
2.25.1
287
288
diff view generated by jsdifflib
New patch
1
If we're using PSCI emulation, we add a /psci node to the device tree
2
we pass to the guest. At the moment, if the dtb already has a /psci
3
node in it, we retain it, rather than replacing it. (This behaviour
4
was added in commit c39770cd637765 in 2018.)
1
5
6
This is a problem if the existing node doesn't match our PSCI
7
emulation. In particular, it might specify the wrong method (HVC vs
8
SMC), or wrong function IDs for cpu_suspend/cpu_off/etc, in which
9
case the guest will not get the behaviour it wants when it makes PSCI
10
calls.
11
12
An example of this is trying to boot the highbank or midway board
13
models using the device tree supplied in the kernel sources: this
14
device tree includes a /psci node that specifies function IDs that
15
don't match the (PSCI 0.2 compliant) IDs that QEMU uses. The dtb
16
cpu_suspend function ID happens to match the PSCI 0.2 cpu_off ID, so
17
the guest hangs after booting when the kernel tries to idle the CPU
18
and instead it gets turned off.
19
20
Instead of retaining an existing /psci node, delete it entirely
21
and replace it with a node whose properties match QEMU's PSCI
22
emulation behaviour. This matches the way we handle /memory nodes,
23
where we also delete any existing nodes and write in ones that
24
match the way QEMU is going to behave.
25
26
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
27
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
28
Reviewed-by: Niek Linnenbank <nieklinnenbank@gmail.com>
29
Tested-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
30
Tested-by: Cédric Le Goater <clg@kaod.org>
31
Tested-by: Niek Linnenbank <nieklinnenbank@gmail.com>
32
Message-id: 20220127154639.2090164-17-peter.maydell@linaro.org
33
---
34
hw/arm/boot.c | 7 ++++---
35
1 file changed, 4 insertions(+), 3 deletions(-)
36
37
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/hw/arm/boot.c
40
+++ b/hw/arm/boot.c
41
@@ -XXX,XX +XXX,XX @@ static void fdt_add_psci_node(void *fdt)
42
}
43
44
/*
45
- * If /psci node is present in provided DTB, assume that no fixup
46
- * is necessary and all PSCI configuration should be taken as-is
47
+ * A pre-existing /psci node might specify function ID values
48
+ * that don't match QEMU's PSCI implementation. Delete the whole
49
+ * node and put our own in instead.
50
*/
51
rc = fdt_path_offset(fdt, "/psci");
52
if (rc >= 0) {
53
- return;
54
+ qemu_fdt_nop_node(fdt, "/psci");
55
}
56
57
qemu_fdt_add_subnode(fdt, "/psci");
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
New patch
1
From: "Edgar E. Iglesias" <edgar.iglesias@xilinx.com>
1
2
3
Always call arm_load_kernel() regardless of kernel_filename being
4
set. This is needed because arm_load_kernel() sets up reset for
5
the CPUs.
6
7
Fixes: 6f16da53ff (hw/arm: versal: Add a virtual Xilinx Versal board)
8
Reported-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
10
Message-id: 20220130110313.4045351-2-edgar.iglesias@gmail.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
hw/arm/xlnx-versal-virt.c | 11 ++---------
15
1 file changed, 2 insertions(+), 9 deletions(-)
16
17
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/xlnx-versal-virt.c
20
+++ b/hw/arm/xlnx-versal-virt.c
21
@@ -XXX,XX +XXX,XX @@ static void versal_virt_init(MachineState *machine)
22
s->binfo.get_dtb = versal_virt_get_dtb;
23
s->binfo.modify_dtb = versal_virt_modify_dtb;
24
s->binfo.psci_conduit = psci_conduit;
25
- if (machine->kernel_filename) {
26
- arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
27
- } else {
28
- AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0],
29
- &s->binfo);
30
+ if (!machine->kernel_filename) {
31
/* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL).
32
* Offset things by 4K. */
33
s->binfo.loader_start = 0x1000;
34
s->binfo.dtb_limit = 0x1000000;
35
- if (arm_load_dtb(s->binfo.loader_start,
36
- &s->binfo, s->binfo.dtb_limit, as, machine) < 0) {
37
- exit(EXIT_FAILURE);
38
- }
39
}
40
+ arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo);
41
42
for (i = 0; i < XLNX_VERSAL_NUM_OSPI_FLASH; i++) {
43
BusState *spi_bus;
44
--
45
2.25.1
46
47
diff view generated by jsdifflib
New patch
1
From: Alex Bennée <alex.bennee@linaro.org>
1
2
3
The recently introduced debug tests in kvm-unit-tests exposed an error
4
in our handling of singlestep cause by stale hflags. This is caught by
5
--enable-debug-tcg when running the tests.
6
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Reported-by: Andrew Jones <drjones@redhat.com>
9
Tested-by: Andrew Jones <drjones@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220202122353.457084-1-alex.bennee@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/helper-a64.c | 2 ++
15
1 file changed, 2 insertions(+)
16
17
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/helper-a64.c
20
+++ b/target/arm/helper-a64.c
21
@@ -XXX,XX +XXX,XX @@ void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm)
22
{
23
daif_check(env, 0x1e, imm, GETPC());
24
env->daif |= (imm << 6) & PSTATE_DAIF;
25
+ arm_rebuild_hflags(env);
26
}
27
28
void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm)
29
{
30
daif_check(env, 0x1f, imm, GETPC());
31
env->daif &= ~((imm << 6) & PSTATE_DAIF);
32
+ arm_rebuild_hflags(env);
33
}
34
35
/* Convert a softfloat float_relation_ (as returned by
36
--
37
2.25.1
38
39
diff view generated by jsdifflib
New patch
1
From: Richard Petri <git@rpls.de>
1
2
3
Starting the SysTick timer and changing the clock source a the same time
4
will result in an error, if the previous clock period was zero. For exmaple,
5
on the mps2-tz platforms, no refclk is present. Right after reset, the
6
configured ptimer period is zero, and trying to enabling it will turn it off
7
right away. E.g., code running on the platform setting
8
9
SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | SysTick_CTRL_ENABLE_Msk;
10
11
should change the clock source and enable the timer on real hardware, but
12
resulted in an error in qemu.
13
14
Signed-off-by: Richard Petri <git@rpls.de>
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Message-id: 20220201192650.289584-1-git@rpls.de
17
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
18
---
19
hw/timer/armv7m_systick.c | 8 ++++----
20
1 file changed, 4 insertions(+), 4 deletions(-)
21
22
diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/timer/armv7m_systick.c
25
+++ b/hw/timer/armv7m_systick.c
26
@@ -XXX,XX +XXX,XX @@ static MemTxResult systick_write(void *opaque, hwaddr addr,
27
s->control &= 0xfffffff8;
28
s->control |= value & 7;
29
30
+ if ((oldval ^ value) & SYSTICK_CLKSOURCE) {
31
+ systick_set_period_from_clock(s);
32
+ }
33
+
34
if ((oldval ^ value) & SYSTICK_ENABLE) {
35
if (value & SYSTICK_ENABLE) {
36
ptimer_run(s->ptimer, 0);
37
@@ -XXX,XX +XXX,XX @@ static MemTxResult systick_write(void *opaque, hwaddr addr,
38
ptimer_stop(s->ptimer);
39
}
40
}
41
-
42
- if ((oldval ^ value) & SYSTICK_CLKSOURCE) {
43
- systick_set_period_from_clock(s);
44
- }
45
ptimer_transaction_commit(s->ptimer);
46
break;
47
}
48
--
49
2.25.1
50
51
diff view generated by jsdifflib
New patch
1
From: Eric Auger <eric.auger@redhat.com>
1
2
3
We currently miss a bunch of register resets in the device reset
4
function. This sometimes prevents the guest from rebooting after
5
a system_reset (with virtio-blk-pci). For instance, we may get
6
the following errors:
7
8
invalid STE
9
smmuv3-iommu-memory-region-0-0 translation failed for iova=0x13a9d2000(SMMU_EVT_C_BAD_STE)
10
Invalid read at addr 0x13A9D2000, size 2, region '(null)', reason: rejected
11
invalid STE
12
smmuv3-iommu-memory-region-0-0 translation failed for iova=0x13a9d2000(SMMU_EVT_C_BAD_STE)
13
Invalid write at addr 0x13A9D2000, size 2, region '(null)', reason: rejected
14
invalid STE
15
16
Signed-off-by: Eric Auger <eric.auger@redhat.com>
17
Message-id: 20220202111602.627429-1-eric.auger@redhat.com
18
Fixes: 10a83cb988 ("hw/arm/smmuv3: Skeleton")
19
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
---
22
hw/arm/smmuv3.c | 6 ++++++
23
1 file changed, 6 insertions(+)
24
25
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/arm/smmuv3.c
28
+++ b/hw/arm/smmuv3.c
29
@@ -XXX,XX +XXX,XX @@ static void smmuv3_init_regs(SMMUv3State *s)
30
s->features = 0;
31
s->sid_split = 0;
32
s->aidr = 0x1;
33
+ s->cr[0] = 0;
34
+ s->cr0ack = 0;
35
+ s->irq_ctrl = 0;
36
+ s->gerror = 0;
37
+ s->gerrorn = 0;
38
+ s->statusr = 0;
39
}
40
41
static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
42
--
43
2.25.1
44
45
diff view generated by jsdifflib
New patch
1
1
Currently the ITS accesses each 8-byte doubleword in a 4-doubleword
2
command packet with a separate address_space_ldq_le() call. This is
3
awkward because the individual command processing functions have
4
ended up with code to handle "load more doublewords out of the
5
packet", which is both unwieldy and also a potential source of bugs
6
because it's not obvious when looking at a line that pulls a field
7
out of the 'value' variable which of the 4 doublewords that variable
8
currently holds.
9
10
Switch to using address_space_map() to map the whole command packet
11
at once and fish the four doublewords out of it. Then each process_*
12
function can start with a few lines of code that extract the fields
13
it cares about.
14
15
This requires us to split out the guts of process_its_cmd() into a
16
new do_process_its_cmd(), because we were previously overloading the
17
value and offset arguments as a backdoor way to directly pass the
18
devid and eventid from a write to GITS_TRANSLATER. The new
19
do_process_its_cmd() takes those arguments directly, and
20
process_its_cmd() is just a wrapper that does the "read fields from
21
command packet" part.
22
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Message-id: 20220201193207.2771604-2-peter.maydell@linaro.org
26
---
27
hw/intc/gicv3_internal.h | 4 +-
28
hw/intc/arm_gicv3_its.c | 208 +++++++++++----------------------------
29
2 files changed, 62 insertions(+), 150 deletions(-)
30
31
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/hw/intc/gicv3_internal.h
34
+++ b/hw/intc/gicv3_internal.h
35
@@ -XXX,XX +XXX,XX @@ FIELD(GITS_TYPER, CIL, 36, 1)
36
#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK
37
#define LPI_PRIORITY_MASK 0xfc
38
39
-#define GITS_CMDQ_ENTRY_SIZE 32
40
-#define NUM_BYTES_IN_DW 8
41
+#define GITS_CMDQ_ENTRY_WORDS 4
42
+#define GITS_CMDQ_ENTRY_SIZE (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t))
43
44
#define CMD_MASK 0xff
45
46
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/intc/arm_gicv3_its.c
49
+++ b/hw/intc/arm_gicv3_its.c
50
@@ -XXX,XX +XXX,XX @@ static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
51
* 3. handling of ITS CLEAR command
52
* 4. handling of ITS DISCARD command
53
*/
54
-static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
55
- uint32_t offset, ItsCmdType cmd)
56
+static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
57
+ uint32_t eventid, ItsCmdType cmd)
58
{
59
- AddressSpace *as = &s->gicv3->dma_as;
60
- uint32_t devid, eventid;
61
MemTxResult res = MEMTX_OK;
62
bool dte_valid;
63
uint64_t dte = 0;
64
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
65
bool cte_valid = false;
66
uint64_t rdbase;
67
68
- if (cmd == NONE) {
69
- devid = offset;
70
- } else {
71
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
72
-
73
- offset += NUM_BYTES_IN_DW;
74
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
75
- MEMTXATTRS_UNSPECIFIED, &res);
76
- }
77
-
78
- if (res != MEMTX_OK) {
79
- return CMD_STALL;
80
- }
81
-
82
- eventid = (value & EVENTID_MASK);
83
-
84
if (devid >= s->dt.num_entries) {
85
qemu_log_mask(LOG_GUEST_ERROR,
86
"%s: invalid command attributes: devid %d>=%d",
87
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
88
}
89
return CMD_CONTINUE;
90
}
91
-
92
-static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
93
- uint32_t offset, bool ignore_pInt)
94
+static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
95
+ ItsCmdType cmd)
96
+{
97
+ uint32_t devid, eventid;
98
+
99
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
100
+ eventid = cmdpkt[1] & EVENTID_MASK;
101
+ return do_process_its_cmd(s, devid, eventid, cmd);
102
+}
103
+
104
+static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
105
+ bool ignore_pInt)
106
{
107
- AddressSpace *as = &s->gicv3->dma_as;
108
uint32_t devid, eventid;
109
uint32_t pIntid = 0;
110
uint64_t num_eventids;
111
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
112
uint64_t dte = 0;
113
IteEntry ite = {};
114
115
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
116
- offset += NUM_BYTES_IN_DW;
117
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
118
- MEMTXATTRS_UNSPECIFIED, &res);
119
-
120
- if (res != MEMTX_OK) {
121
- return CMD_STALL;
122
- }
123
-
124
- eventid = (value & EVENTID_MASK);
125
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
126
+ eventid = cmdpkt[1] & EVENTID_MASK;
127
128
if (ignore_pInt) {
129
pIntid = eventid;
130
} else {
131
- pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
132
+ pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
133
}
134
135
- offset += NUM_BYTES_IN_DW;
136
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
137
- MEMTXATTRS_UNSPECIFIED, &res);
138
-
139
- if (res != MEMTX_OK) {
140
- return CMD_STALL;
141
- }
142
-
143
- icid = value & ICID_MASK;
144
+ icid = cmdpkt[2] & ICID_MASK;
145
146
if (devid >= s->dt.num_entries) {
147
qemu_log_mask(LOG_GUEST_ERROR,
148
@@ -XXX,XX +XXX,XX @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
149
return res == MEMTX_OK;
150
}
151
152
-static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
153
+static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
154
{
155
- AddressSpace *as = &s->gicv3->dma_as;
156
uint16_t icid;
157
uint64_t rdbase;
158
bool valid;
159
- MemTxResult res = MEMTX_OK;
160
- uint64_t value;
161
162
- offset += NUM_BYTES_IN_DW;
163
- offset += NUM_BYTES_IN_DW;
164
+ icid = cmdpkt[2] & ICID_MASK;
165
166
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
167
- MEMTXATTRS_UNSPECIFIED, &res);
168
-
169
- if (res != MEMTX_OK) {
170
- return CMD_STALL;
171
- }
172
-
173
- icid = value & ICID_MASK;
174
-
175
- rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
176
+ rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
177
rdbase &= RDBASE_PROCNUM_MASK;
178
179
- valid = (value & CMD_FIELD_VALID_MASK);
180
+ valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
181
182
if ((icid >= s->ct.num_entries) || (rdbase >= s->gicv3->num_cpu)) {
183
qemu_log_mask(LOG_GUEST_ERROR,
184
@@ -XXX,XX +XXX,XX @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
185
return res == MEMTX_OK;
186
}
187
188
-static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
189
- uint32_t offset)
190
+static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
191
{
192
- AddressSpace *as = &s->gicv3->dma_as;
193
uint32_t devid;
194
uint8_t size;
195
uint64_t itt_addr;
196
bool valid;
197
- MemTxResult res = MEMTX_OK;
198
199
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
200
-
201
- offset += NUM_BYTES_IN_DW;
202
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
203
- MEMTXATTRS_UNSPECIFIED, &res);
204
-
205
- if (res != MEMTX_OK) {
206
- return CMD_STALL;
207
- }
208
-
209
- size = (value & SIZE_MASK);
210
-
211
- offset += NUM_BYTES_IN_DW;
212
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
213
- MEMTXATTRS_UNSPECIFIED, &res);
214
-
215
- if (res != MEMTX_OK) {
216
- return CMD_STALL;
217
- }
218
-
219
- itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
220
-
221
- valid = (value & CMD_FIELD_VALID_MASK);
222
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
223
+ size = cmdpkt[1] & SIZE_MASK;
224
+ itt_addr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
225
+ valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
226
227
if ((devid >= s->dt.num_entries) ||
228
(size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
229
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
230
return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
231
}
232
233
-static ItsCmdResult process_movall(GICv3ITSState *s, uint64_t value,
234
- uint32_t offset)
235
+static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
236
{
237
- AddressSpace *as = &s->gicv3->dma_as;
238
- MemTxResult res = MEMTX_OK;
239
uint64_t rd1, rd2;
240
241
- /* No fields in dwords 0 or 1 */
242
- offset += NUM_BYTES_IN_DW;
243
- offset += NUM_BYTES_IN_DW;
244
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
245
- MEMTXATTRS_UNSPECIFIED, &res);
246
- if (res != MEMTX_OK) {
247
- return CMD_STALL;
248
- }
249
+ rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
250
+ rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
251
252
- rd1 = FIELD_EX64(value, MOVALL_2, RDBASE1);
253
if (rd1 >= s->gicv3->num_cpu) {
254
qemu_log_mask(LOG_GUEST_ERROR,
255
"%s: RDBASE1 %" PRId64
256
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movall(GICv3ITSState *s, uint64_t value,
257
__func__, rd1, s->gicv3->num_cpu);
258
return CMD_CONTINUE;
259
}
260
-
261
- offset += NUM_BYTES_IN_DW;
262
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
263
- MEMTXATTRS_UNSPECIFIED, &res);
264
- if (res != MEMTX_OK) {
265
- return CMD_STALL;
266
- }
267
-
268
- rd2 = FIELD_EX64(value, MOVALL_3, RDBASE2);
269
if (rd2 >= s->gicv3->num_cpu) {
270
qemu_log_mask(LOG_GUEST_ERROR,
271
"%s: RDBASE2 %" PRId64
272
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movall(GICv3ITSState *s, uint64_t value,
273
return CMD_CONTINUE;
274
}
275
276
-static ItsCmdResult process_movi(GICv3ITSState *s, uint64_t value,
277
- uint32_t offset)
278
+static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
279
{
280
- AddressSpace *as = &s->gicv3->dma_as;
281
MemTxResult res = MEMTX_OK;
282
uint32_t devid, eventid, intid;
283
uint16_t old_icid, new_icid;
284
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, uint64_t value,
285
uint64_t num_eventids;
286
IteEntry ite = {};
287
288
- devid = FIELD_EX64(value, MOVI_0, DEVICEID);
289
-
290
- offset += NUM_BYTES_IN_DW;
291
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
292
- MEMTXATTRS_UNSPECIFIED, &res);
293
- if (res != MEMTX_OK) {
294
- return CMD_STALL;
295
- }
296
- eventid = FIELD_EX64(value, MOVI_1, EVENTID);
297
-
298
- offset += NUM_BYTES_IN_DW;
299
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
300
- MEMTXATTRS_UNSPECIFIED, &res);
301
- if (res != MEMTX_OK) {
302
- return CMD_STALL;
303
- }
304
- new_icid = FIELD_EX64(value, MOVI_2, ICID);
305
+ devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
306
+ eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
307
+ new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
308
309
if (devid >= s->dt.num_entries) {
310
qemu_log_mask(LOG_GUEST_ERROR,
311
@@ -XXX,XX +XXX,XX @@ static void process_cmdq(GICv3ITSState *s)
312
uint32_t wr_offset = 0;
313
uint32_t rd_offset = 0;
314
uint32_t cq_offset = 0;
315
- uint64_t data;
316
AddressSpace *as = &s->gicv3->dma_as;
317
- MemTxResult res = MEMTX_OK;
318
uint8_t cmd;
319
int i;
320
321
@@ -XXX,XX +XXX,XX @@ static void process_cmdq(GICv3ITSState *s)
322
323
while (wr_offset != rd_offset) {
324
ItsCmdResult result = CMD_CONTINUE;
325
+ void *hostmem;
326
+ hwaddr buflen;
327
+ uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
328
329
cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
330
- data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
331
- MEMTXATTRS_UNSPECIFIED, &res);
332
- if (res != MEMTX_OK) {
333
+
334
+ buflen = GITS_CMDQ_ENTRY_SIZE;
335
+ hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
336
+ &buflen, false, MEMTXATTRS_UNSPECIFIED);
337
+ if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
338
+ if (hostmem) {
339
+ address_space_unmap(as, hostmem, buflen, false, 0);
340
+ }
341
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
342
qemu_log_mask(LOG_GUEST_ERROR,
343
"%s: could not read command at 0x%" PRIx64 "\n",
344
__func__, s->cq.base_addr + cq_offset);
345
break;
346
}
347
+ for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
348
+ cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
349
+ }
350
+ address_space_unmap(as, hostmem, buflen, false, 0);
351
352
- cmd = (data & CMD_MASK);
353
+ cmd = cmdpkt[0] & CMD_MASK;
354
355
trace_gicv3_its_process_command(rd_offset, cmd);
356
357
switch (cmd) {
358
case GITS_CMD_INT:
359
- result = process_its_cmd(s, data, cq_offset, INTERRUPT);
360
+ result = process_its_cmd(s, cmdpkt, INTERRUPT);
361
break;
362
case GITS_CMD_CLEAR:
363
- result = process_its_cmd(s, data, cq_offset, CLEAR);
364
+ result = process_its_cmd(s, cmdpkt, CLEAR);
365
break;
366
case GITS_CMD_SYNC:
367
/*
368
@@ -XXX,XX +XXX,XX @@ static void process_cmdq(GICv3ITSState *s)
369
*/
370
break;
371
case GITS_CMD_MAPD:
372
- result = process_mapd(s, data, cq_offset);
373
+ result = process_mapd(s, cmdpkt);
374
break;
375
case GITS_CMD_MAPC:
376
- result = process_mapc(s, cq_offset);
377
+ result = process_mapc(s, cmdpkt);
378
break;
379
case GITS_CMD_MAPTI:
380
- result = process_mapti(s, data, cq_offset, false);
381
+ result = process_mapti(s, cmdpkt, false);
382
break;
383
case GITS_CMD_MAPI:
384
- result = process_mapti(s, data, cq_offset, true);
385
+ result = process_mapti(s, cmdpkt, true);
386
break;
387
case GITS_CMD_DISCARD:
388
- result = process_its_cmd(s, data, cq_offset, DISCARD);
389
+ result = process_its_cmd(s, cmdpkt, DISCARD);
390
break;
391
case GITS_CMD_INV:
392
case GITS_CMD_INVALL:
393
@@ -XXX,XX +XXX,XX @@ static void process_cmdq(GICv3ITSState *s)
394
}
395
break;
396
case GITS_CMD_MOVI:
397
- result = process_movi(s, data, cq_offset);
398
+ result = process_movi(s, cmdpkt);
399
break;
400
case GITS_CMD_MOVALL:
401
- result = process_movall(s, data, cq_offset);
402
+ result = process_movall(s, cmdpkt);
403
break;
404
default:
405
break;
406
@@ -XXX,XX +XXX,XX @@ static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
407
{
408
GICv3ITSState *s = (GICv3ITSState *)opaque;
409
bool result = true;
410
- uint32_t devid = 0;
411
412
trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
413
414
switch (offset) {
415
case GITS_TRANSLATER:
416
if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
417
- devid = attrs.requester_id;
418
- result = process_its_cmd(s, data, devid, NONE);
419
+ result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
420
}
421
break;
422
default:
423
--
424
2.25.1
425
426
diff view generated by jsdifflib
New patch
1
1
In the ITS, a DTE is an entry in the device table, which contains
2
multiple fields. Currently the function get_dte() which reads one
3
entry from the device table returns it as a raw 64-bit integer,
4
which we then pass around in that form, only extracting fields
5
from it as we need them.
6
7
Create a real C struct with the same fields as the DTE, and
8
populate it in get_dte(), so that that function and update_dte()
9
are the only ones that need to care about the in-guest-memory
10
format of the DTE.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220201193207.2771604-3-peter.maydell@linaro.org
15
---
16
hw/intc/arm_gicv3_its.c | 111 ++++++++++++++++++++--------------------
17
1 file changed, 56 insertions(+), 55 deletions(-)
18
19
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/intc/arm_gicv3_its.c
22
+++ b/hw/intc/arm_gicv3_its.c
23
@@ -XXX,XX +XXX,XX @@ typedef struct {
24
uint64_t itel;
25
} IteEntry;
26
27
+typedef struct DTEntry {
28
+ bool valid;
29
+ unsigned size;
30
+ uint64_t ittaddr;
31
+} DTEntry;
32
+
33
/*
34
* The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
35
* if a command parameter is not correct. These include both "stall
36
@@ -XXX,XX +XXX,XX @@ static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
37
return FIELD_EX64(*cte, CTE, VALID);
38
}
39
40
-static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
41
+static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
42
IteEntry ite)
43
{
44
AddressSpace *as = &s->gicv3->dma_as;
45
- uint64_t itt_addr;
46
MemTxResult res = MEMTX_OK;
47
48
- itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
49
- itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
50
-
51
- address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
52
+ address_space_stq_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
53
sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
54
&res);
55
56
if (res == MEMTX_OK) {
57
- address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
58
+ address_space_stl_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
59
sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
60
MEMTXATTRS_UNSPECIFIED, &res);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
63
}
64
}
65
66
-static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
67
+static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
68
uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
69
{
70
AddressSpace *as = &s->gicv3->dma_as;
71
- uint64_t itt_addr;
72
bool status = false;
73
IteEntry ite = {};
74
75
- itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
76
- itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
77
-
78
- ite.itel = address_space_ldq_le(as, itt_addr +
79
+ ite.itel = address_space_ldq_le(as, dte->ittaddr +
80
(eventid * (sizeof(uint64_t) +
81
sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
82
res);
83
84
if (*res == MEMTX_OK) {
85
- ite.iteh = address_space_ldl_le(as, itt_addr +
86
+ ite.iteh = address_space_ldl_le(as, dte->ittaddr +
87
(eventid * (sizeof(uint64_t) +
88
sizeof(uint32_t))) + sizeof(uint32_t),
89
MEMTXATTRS_UNSPECIFIED, res);
90
@@ -XXX,XX +XXX,XX @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
91
return status;
92
}
93
94
-static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
95
+/*
96
+ * Read the Device Table entry at index @devid. On success (including
97
+ * successfully determining that there is no valid DTE for this index),
98
+ * we return MEMTX_OK and populate the DTEntry struct accordingly.
99
+ * If there is an error reading memory then we return the error code.
100
+ */
101
+static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
102
{
103
+ MemTxResult res = MEMTX_OK;
104
AddressSpace *as = &s->gicv3->dma_as;
105
- uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res);
106
+ uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
107
+ uint64_t dteval;
108
109
if (entry_addr == -1) {
110
- return 0; /* a DTE entry with the Valid bit clear */
111
+ /* No L2 table entry, i.e. no valid DTE, or a memory error */
112
+ dte->valid = false;
113
+ return res;
114
}
115
- return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
116
+ dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
117
+ if (res != MEMTX_OK) {
118
+ return res;
119
+ }
120
+ dte->valid = FIELD_EX64(dteval, DTE, VALID);
121
+ dte->size = FIELD_EX64(dteval, DTE, SIZE);
122
+ /* DTE word field stores bits [51:8] of the ITT address */
123
+ dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
124
+ return MEMTX_OK;
125
}
126
127
/*
128
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
129
uint32_t eventid, ItsCmdType cmd)
130
{
131
MemTxResult res = MEMTX_OK;
132
- bool dte_valid;
133
- uint64_t dte = 0;
134
uint64_t num_eventids;
135
uint16_t icid = 0;
136
uint32_t pIntid = 0;
137
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
138
uint64_t cte = 0;
139
bool cte_valid = false;
140
uint64_t rdbase;
141
+ DTEntry dte;
142
143
if (devid >= s->dt.num_entries) {
144
qemu_log_mask(LOG_GUEST_ERROR,
145
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
146
return CMD_CONTINUE;
147
}
148
149
- dte = get_dte(s, devid, &res);
150
-
151
- if (res != MEMTX_OK) {
152
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
153
return CMD_STALL;
154
}
155
- dte_valid = FIELD_EX64(dte, DTE, VALID);
156
-
157
- if (!dte_valid) {
158
+ if (!dte.valid) {
159
qemu_log_mask(LOG_GUEST_ERROR,
160
"%s: invalid command attributes: "
161
- "invalid dte: %"PRIx64" for %d\n",
162
- __func__, dte, devid);
163
+ "invalid dte for %d\n", __func__, devid);
164
return CMD_CONTINUE;
165
}
166
167
- num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
168
-
169
+ num_eventids = 1ULL << (dte.size + 1);
170
if (eventid >= num_eventids) {
171
qemu_log_mask(LOG_GUEST_ERROR,
172
"%s: invalid command attributes: eventid %d >= %"
173
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
174
return CMD_CONTINUE;
175
}
176
177
- ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
178
+ ite_valid = get_ite(s, eventid, &dte, &icid, &pIntid, &res);
179
if (res != MEMTX_OK) {
180
return CMD_STALL;
181
}
182
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
183
if (cmd == DISCARD) {
184
IteEntry ite = {};
185
/* remove mapping from interrupt translation table */
186
- return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
187
+ return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
188
}
189
return CMD_CONTINUE;
190
}
191
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
192
uint32_t pIntid = 0;
193
uint64_t num_eventids;
194
uint32_t num_intids;
195
- bool dte_valid;
196
- MemTxResult res = MEMTX_OK;
197
uint16_t icid = 0;
198
- uint64_t dte = 0;
199
IteEntry ite = {};
200
+ DTEntry dte;
201
202
devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
203
eventid = cmdpkt[1] & EVENTID_MASK;
204
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
205
return CMD_CONTINUE;
206
}
207
208
- dte = get_dte(s, devid, &res);
209
-
210
- if (res != MEMTX_OK) {
211
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
212
return CMD_STALL;
213
}
214
- dte_valid = FIELD_EX64(dte, DTE, VALID);
215
- num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
216
+ num_eventids = 1ULL << (dte.size + 1);
217
num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
218
219
if ((icid >= s->ct.num_entries)
220
- || !dte_valid || (eventid >= num_eventids) ||
221
+ || !dte.valid || (eventid >= num_eventids) ||
222
(((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
223
(pIntid != INTID_SPURIOUS))) {
224
qemu_log_mask(LOG_GUEST_ERROR,
225
"%s: invalid command attributes "
226
"icid %d or eventid %d or pIntid %d or"
227
"unmapped dte %d\n", __func__, icid, eventid,
228
- pIntid, dte_valid);
229
+ pIntid, dte.valid);
230
/*
231
* in this implementation, in case of error
232
* we ignore this command and move onto the next
233
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
234
}
235
236
/* add ite entry to interrupt translation table */
237
- ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
238
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, true);
239
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
240
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
241
ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
242
ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
243
244
- return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
245
+ return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
246
}
247
248
static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
249
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
250
uint16_t old_icid, new_icid;
251
uint64_t old_cte, new_cte;
252
uint64_t old_rdbase, new_rdbase;
253
- uint64_t dte;
254
- bool dte_valid, ite_valid, cte_valid;
255
+ bool ite_valid, cte_valid;
256
uint64_t num_eventids;
257
IteEntry ite = {};
258
+ DTEntry dte;
259
260
devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
261
eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
262
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
263
__func__, devid, s->dt.num_entries);
264
return CMD_CONTINUE;
265
}
266
- dte = get_dte(s, devid, &res);
267
- if (res != MEMTX_OK) {
268
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
269
return CMD_STALL;
270
}
271
272
- dte_valid = FIELD_EX64(dte, DTE, VALID);
273
- if (!dte_valid) {
274
+ if (!dte.valid) {
275
qemu_log_mask(LOG_GUEST_ERROR,
276
"%s: invalid command attributes: "
277
- "invalid dte: %"PRIx64" for %d\n",
278
- __func__, dte, devid);
279
+ "invalid dte for %d\n", __func__, devid);
280
return CMD_CONTINUE;
281
}
282
283
- num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
284
+ num_eventids = 1ULL << (dte.size + 1);
285
if (eventid >= num_eventids) {
286
qemu_log_mask(LOG_GUEST_ERROR,
287
"%s: invalid command attributes: eventid %d >= %"
288
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
289
return CMD_CONTINUE;
290
}
291
292
- ite_valid = get_ite(s, eventid, dte, &old_icid, &intid, &res);
293
+ ite_valid = get_ite(s, eventid, &dte, &old_icid, &intid, &res);
294
if (res != MEMTX_OK) {
295
return CMD_STALL;
296
}
297
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
298
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid);
299
ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
300
ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, new_icid);
301
- return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
302
+ return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
303
}
304
305
/*
306
--
307
2.25.1
308
309
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Make update_dte() take a DTEntry struct rather than all the fields of
2
the new DTE as separate arguments.
2
3
3
Enable SVE in the KVM guest when the 'max' cpu type is configured
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
and KVM supports it. KVM SVE requires use of the new finalize
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
vcpu ioctl, so we add that now too. For starters SVE can only be
6
Message-id: 20220201193207.2771604-4-peter.maydell@linaro.org
6
turned on or off, getting all vector lengths the host CPU supports
7
---
7
when on. We'll add the other SVE CPU properties in later patches.
8
hw/intc/arm_gicv3_its.c | 35 ++++++++++++++++++-----------------
9
1 file changed, 18 insertions(+), 17 deletions(-)
8
10
9
Signed-off-by: Andrew Jones <drjones@redhat.com>
11
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Eric Auger <eric.auger@redhat.com>
12
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
13
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
14
Message-id: 20191031142734.8590-7-drjones@redhat.com
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
17
target/arm/kvm_arm.h | 27 +++++++++++++++++++++++++++
18
target/arm/cpu64.c | 17 ++++++++++++++---
19
target/arm/kvm.c | 5 +++++
20
target/arm/kvm64.c | 20 +++++++++++++++++++-
21
tests/arm-cpu-features.c | 4 ++++
22
5 files changed, 69 insertions(+), 4 deletions(-)
23
24
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
25
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
26
--- a/target/arm/kvm_arm.h
13
--- a/hw/intc/arm_gicv3_its.c
27
+++ b/target/arm/kvm_arm.h
14
+++ b/hw/intc/arm_gicv3_its.c
28
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
29
*/
16
return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
30
int kvm_arm_vcpu_init(CPUState *cs);
17
}
31
18
32
+/**
19
-static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
33
+ * kvm_arm_vcpu_finalize
20
- uint8_t size, uint64_t itt_addr)
34
+ * @cs: CPUState
21
+/*
35
+ * @feature: int
22
+ * Update the Device Table entry for @devid to @dte. Returns true
36
+ *
23
+ * on success, false if there was a memory access error.
37
+ * Finalizes the configuration of the specified VCPU feature by
38
+ * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
39
+ * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
40
+ * KVM's API documentation.
41
+ *
42
+ * Returns: 0 if success else < 0 error code
43
+ */
24
+ */
44
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature);
25
+static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
45
+
26
{
46
/**
27
AddressSpace *as = &s->gicv3->dma_as;
47
* kvm_arm_register_device:
28
uint64_t entry_addr;
48
* @mr: memory region for this device
29
- uint64_t dte = 0;
49
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_aarch32_supported(CPUState *cs);
30
+ uint64_t dteval = 0;
50
*/
31
MemTxResult res = MEMTX_OK;
51
bool kvm_arm_pmu_supported(CPUState *cs);
32
52
33
if (s->dt.valid) {
53
+/**
34
- if (valid) {
54
+ * bool kvm_arm_sve_supported:
35
+ if (dte->valid) {
55
+ * @cs: CPUState
36
/* add mapping entry to device table */
56
+ *
37
- dte = FIELD_DP64(dte, DTE, VALID, 1);
57
+ * Returns true if the KVM VCPU can enable SVE and false otherwise.
38
- dte = FIELD_DP64(dte, DTE, SIZE, size);
58
+ */
39
- dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
59
+bool kvm_arm_sve_supported(CPUState *cs);
40
+ dteval = FIELD_DP64(dteval, DTE, VALID, 1);
60
+
41
+ dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
61
/**
42
+ dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
62
* kvm_arm_get_max_vm_ipa_size - Returns the number of bits in the
43
}
63
* IPA address space supported by KVM
44
} else {
64
@@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_pmu_supported(CPUState *cs)
45
return true;
65
return false;
46
@@ -XXX,XX +XXX,XX @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
47
/* No L2 table for this index: discard write and continue */
48
return true;
49
}
50
- address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res);
51
+ address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
52
return res == MEMTX_OK;
66
}
53
}
67
54
68
+static inline bool kvm_arm_sve_supported(CPUState *cs)
55
static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
69
+{
70
+ return false;
71
+}
72
+
73
static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms)
74
{
56
{
75
return -ENOENT;
57
uint32_t devid;
76
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
58
- uint8_t size;
77
index XXXXXXX..XXXXXXX 100644
59
- uint64_t itt_addr;
78
--- a/target/arm/cpu64.c
60
- bool valid;
79
+++ b/target/arm/cpu64.c
61
+ DTEntry dte;
80
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
62
81
return;
63
devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
64
- size = cmdpkt[1] & SIZE_MASK;
65
- itt_addr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
66
- valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
67
+ dte.size = cmdpkt[1] & SIZE_MASK;
68
+ dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
69
+ dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
70
71
if ((devid >= s->dt.num_entries) ||
72
- (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
73
+ (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
74
qemu_log_mask(LOG_GUEST_ERROR,
75
"ITS MAPD: invalid device table attributes "
76
- "devid %d or size %d\n", devid, size);
77
+ "devid %d or size %d\n", devid, dte.size);
78
/*
79
* in this implementation, in case of error
80
* we ignore this command and move onto the next
81
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
82
return CMD_CONTINUE;
82
}
83
}
83
84
84
+ if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
85
- return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
85
+ error_setg(errp, "'sve' feature not supported by KVM on this host");
86
+ return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
86
+ return;
87
+ }
88
+
89
t = cpu->isar.id_aa64pfr0;
90
t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
91
cpu->isar.id_aa64pfr0 = t;
92
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
93
{
94
ARMCPU *cpu = ARM_CPU(obj);
95
uint32_t vq;
96
+ uint64_t t;
97
98
if (kvm_enabled()) {
99
kvm_arm_set_cpu_features_from_host(cpu);
100
+ if (kvm_arm_sve_supported(CPU(cpu))) {
101
+ t = cpu->isar.id_aa64pfr0;
102
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
103
+ cpu->isar.id_aa64pfr0 = t;
104
+ }
105
} else {
106
- uint64_t t;
107
uint32_t u;
108
aarch64_a57_initfn(obj);
109
110
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
111
112
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
113
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
114
- object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
115
- cpu_arm_set_sve, NULL, NULL, &error_fatal);
116
117
for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
118
char name[8];
119
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
120
cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
121
}
122
}
123
+
124
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
125
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
126
}
87
}
127
88
128
struct ARMCPUInfo {
89
static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
129
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/target/arm/kvm.c
132
+++ b/target/arm/kvm.c
133
@@ -XXX,XX +XXX,XX @@ int kvm_arm_vcpu_init(CPUState *cs)
134
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
135
}
136
137
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
138
+{
139
+ return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
140
+}
141
+
142
void kvm_arm_init_serror_injection(CPUState *cs)
143
{
144
cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
145
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/target/arm/kvm64.c
148
+++ b/target/arm/kvm64.c
149
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_aarch32_supported(CPUState *cpu)
150
return kvm_check_extension(s, KVM_CAP_ARM_EL1_32BIT);
151
}
152
153
+bool kvm_arm_sve_supported(CPUState *cpu)
154
+{
155
+ KVMState *s = KVM_STATE(current_machine->accelerator);
156
+
157
+ return kvm_check_extension(s, KVM_CAP_ARM_SVE);
158
+}
159
+
160
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
161
162
int kvm_arch_init_vcpu(CPUState *cs)
163
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
164
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
165
}
166
if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
167
- cpu->has_pmu = false;
168
+ cpu->has_pmu = false;
169
}
170
if (cpu->has_pmu) {
171
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
172
} else {
173
unset_feature(&env->features, ARM_FEATURE_PMU);
174
}
175
+ if (cpu_isar_feature(aa64_sve, cpu)) {
176
+ assert(kvm_arm_sve_supported(cs));
177
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
178
+ }
179
180
/* Do KVM_ARM_VCPU_INIT ioctl */
181
ret = kvm_arm_vcpu_init(cs);
182
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
183
return ret;
184
}
185
186
+ if (cpu_isar_feature(aa64_sve, cpu)) {
187
+ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
188
+ if (ret) {
189
+ return ret;
190
+ }
191
+ }
192
+
193
/*
194
* When KVM is in use, PSCI is emulated in-kernel and not by qemu.
195
* Currently KVM has its own idea about MPIDR assignment, so we
196
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tests/arm-cpu-features.c
199
+++ b/tests/arm-cpu-features.c
200
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
201
assert_has_feature(qts, "host", "aarch64");
202
assert_has_feature(qts, "host", "pmu");
203
204
+ assert_has_feature(qts, "max", "sve");
205
+
206
assert_error(qts, "cortex-a15",
207
"We cannot guarantee the CPU type 'cortex-a15' works "
208
"with KVM on this host", NULL);
209
} else {
210
assert_has_not_feature(qts, "host", "aarch64");
211
assert_has_not_feature(qts, "host", "pmu");
212
+
213
+ assert_has_not_feature(qts, "max", "sve");
214
}
215
216
qtest_quit(qts);
217
--
90
--
218
2.20.1
91
2.25.1
219
92
220
93
diff view generated by jsdifflib
New patch
1
1
In the ITS, a CTE is an entry in the collection table, which contains
2
multiple fields. Currently the function get_cte() which reads one
3
entry from the device table returns a success/failure boolean and
4
passes back the raw 64-bit integer CTE value via a pointer argument.
5
We then extract fields from the CTE as we need them.
6
7
Create a real C struct with the same fields as the CTE, and
8
populate it in get_cte(), so that that function and update_cte()
9
are the only ones which need to care about the in-guest-memory
10
format of the CTE.
11
12
This brings get_cte()'s API into line with get_dte().
13
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20220201193207.2771604-5-peter.maydell@linaro.org
17
---
18
hw/intc/arm_gicv3_its.c | 96 ++++++++++++++++++++++-------------------
19
1 file changed, 52 insertions(+), 44 deletions(-)
20
21
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/intc/arm_gicv3_its.c
24
+++ b/hw/intc/arm_gicv3_its.c
25
@@ -XXX,XX +XXX,XX @@ typedef struct DTEntry {
26
uint64_t ittaddr;
27
} DTEntry;
28
29
+typedef struct CTEntry {
30
+ bool valid;
31
+ uint32_t rdbase;
32
+} CTEntry;
33
+
34
/*
35
* The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
36
* if a command parameter is not correct. These include both "stall
37
@@ -XXX,XX +XXX,XX @@ static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
38
return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
39
}
40
41
-static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
42
- MemTxResult *res)
43
+/*
44
+ * Read the Collection Table entry at index @icid. On success (including
45
+ * successfully determining that there is no valid CTE for this index),
46
+ * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
47
+ * If there is an error reading memory then we return the error code.
48
+ */
49
+static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
50
{
51
AddressSpace *as = &s->gicv3->dma_as;
52
- uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res);
53
+ MemTxResult res = MEMTX_OK;
54
+ uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
55
+ uint64_t cteval;
56
57
if (entry_addr == -1) {
58
- return false; /* not valid */
59
+ /* No L2 table entry, i.e. no valid CTE, or a memory error */
60
+ cte->valid = false;
61
+ return res;
62
}
63
64
- *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
65
- return FIELD_EX64(*cte, CTE, VALID);
66
+ cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
67
+ if (res != MEMTX_OK) {
68
+ return res;
69
+ }
70
+ cte->valid = FIELD_EX64(cteval, CTE, VALID);
71
+ cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
72
+ return MEMTX_OK;
73
}
74
75
static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
76
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
77
uint16_t icid = 0;
78
uint32_t pIntid = 0;
79
bool ite_valid = false;
80
- uint64_t cte = 0;
81
- bool cte_valid = false;
82
- uint64_t rdbase;
83
DTEntry dte;
84
+ CTEntry cte;
85
86
if (devid >= s->dt.num_entries) {
87
qemu_log_mask(LOG_GUEST_ERROR,
88
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
89
return CMD_CONTINUE;
90
}
91
92
- cte_valid = get_cte(s, icid, &cte, &res);
93
- if (res != MEMTX_OK) {
94
+ if (get_cte(s, icid, &cte) != MEMTX_OK) {
95
return CMD_STALL;
96
}
97
- if (!cte_valid) {
98
+ if (!cte.valid) {
99
qemu_log_mask(LOG_GUEST_ERROR,
100
- "%s: invalid command attributes: "
101
- "invalid cte: %"PRIx64"\n",
102
- __func__, cte);
103
+ "%s: invalid command attributes: invalid CTE\n",
104
+ __func__);
105
return CMD_CONTINUE;
106
}
107
108
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
109
* Current implementation only supports rdbase == procnum
110
* Hence rdbase physical address is ignored
111
*/
112
- rdbase = FIELD_EX64(cte, CTE, RDBASE);
113
-
114
- if (rdbase >= s->gicv3->num_cpu) {
115
+ if (cte.rdbase >= s->gicv3->num_cpu) {
116
return CMD_CONTINUE;
117
}
118
119
if ((cmd == CLEAR) || (cmd == DISCARD)) {
120
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
121
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], pIntid, 0);
122
} else {
123
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
124
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], pIntid, 1);
125
}
126
127
if (cmd == DISCARD) {
128
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
129
MemTxResult res = MEMTX_OK;
130
uint32_t devid, eventid, intid;
131
uint16_t old_icid, new_icid;
132
- uint64_t old_cte, new_cte;
133
- uint64_t old_rdbase, new_rdbase;
134
- bool ite_valid, cte_valid;
135
+ bool ite_valid;
136
uint64_t num_eventids;
137
IteEntry ite = {};
138
DTEntry dte;
139
+ CTEntry old_cte, new_cte;
140
141
devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
142
eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
143
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
144
return CMD_CONTINUE;
145
}
146
147
- cte_valid = get_cte(s, old_icid, &old_cte, &res);
148
- if (res != MEMTX_OK) {
149
+ if (get_cte(s, old_icid, &old_cte) != MEMTX_OK) {
150
return CMD_STALL;
151
}
152
- if (!cte_valid) {
153
+ if (!old_cte.valid) {
154
qemu_log_mask(LOG_GUEST_ERROR,
155
"%s: invalid command attributes: "
156
- "invalid cte: %"PRIx64"\n",
157
- __func__, old_cte);
158
+ "invalid CTE for old ICID 0x%x\n",
159
+ __func__, old_icid);
160
return CMD_CONTINUE;
161
}
162
163
- cte_valid = get_cte(s, new_icid, &new_cte, &res);
164
- if (res != MEMTX_OK) {
165
+ if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
166
return CMD_STALL;
167
}
168
- if (!cte_valid) {
169
+ if (!new_cte.valid) {
170
qemu_log_mask(LOG_GUEST_ERROR,
171
"%s: invalid command attributes: "
172
- "invalid cte: %"PRIx64"\n",
173
- __func__, new_cte);
174
+ "invalid CTE for new ICID 0x%x\n",
175
+ __func__, new_icid);
176
return CMD_CONTINUE;
177
}
178
179
- old_rdbase = FIELD_EX64(old_cte, CTE, RDBASE);
180
- if (old_rdbase >= s->gicv3->num_cpu) {
181
+ if (old_cte.rdbase >= s->gicv3->num_cpu) {
182
qemu_log_mask(LOG_GUEST_ERROR,
183
- "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
184
- __func__, old_rdbase);
185
+ "%s: CTE has invalid rdbase 0x%x\n",
186
+ __func__, old_cte.rdbase);
187
return CMD_CONTINUE;
188
}
189
190
- new_rdbase = FIELD_EX64(new_cte, CTE, RDBASE);
191
- if (new_rdbase >= s->gicv3->num_cpu) {
192
+ if (new_cte.rdbase >= s->gicv3->num_cpu) {
193
qemu_log_mask(LOG_GUEST_ERROR,
194
- "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
195
- __func__, new_rdbase);
196
+ "%s: CTE has invalid rdbase 0x%x\n",
197
+ __func__, new_cte.rdbase);
198
return CMD_CONTINUE;
199
}
200
201
- if (old_rdbase != new_rdbase) {
202
+ if (old_cte.rdbase != new_cte.rdbase) {
203
/* Move the LPI from the old redistributor to the new one */
204
- gicv3_redist_mov_lpi(&s->gicv3->cpu[old_rdbase],
205
- &s->gicv3->cpu[new_rdbase],
206
+ gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
207
+ &s->gicv3->cpu[new_cte.rdbase],
208
intid);
209
}
210
211
--
212
2.25.1
213
214
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Make update_cte() take a CTEntry struct rather than all the fields
2
of the new CTE as separate arguments.
2
3
3
Introduce cpu properties to give fine control over SVE vector lengths.
4
This brings it into line with the update_dte() API.
4
We introduce a property for each valid length up to the current
5
maximum supported, which is 2048-bits. The properties are named, e.g.
6
sve128, sve256, sve384, sve512, ..., where the number is the number of
7
bits. See the updates to docs/arm-cpu-features.rst for a description
8
of the semantics and for example uses.
9
5
10
Note, as sve-max-vq is still present and we'd like to be able to
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
support qmp_query_cpu_model_expansion with guests launched with e.g.
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
-cpu max,sve-max-vq=8 on their command lines, then we do allow
8
Message-id: 20220201193207.2771604-6-peter.maydell@linaro.org
13
sve-max-vq and sve<N> properties to be provided at the same time, but
9
---
14
this is not recommended, and is why sve-max-vq is not mentioned in the
10
hw/intc/arm_gicv3_its.c | 32 +++++++++++++++++---------------
15
document. If sve-max-vq is provided then it enables all lengths smaller
11
1 file changed, 17 insertions(+), 15 deletions(-)
16
than and including the max and disables all lengths larger. It also has
17
the side-effect that no larger lengths may be enabled and that the max
18
itself cannot be disabled. Smaller non-power-of-two lengths may,
19
however, be disabled, e.g. -cpu max,sve-max-vq=4,sve384=off provides a
20
guest the vector lengths 128, 256, and 512 bits.
21
12
22
This patch has been co-authored with Richard Henderson, who reworked
13
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
23
the target/arm/cpu64.c changes in order to push all the validation and
24
auto-enabling/disabling steps into the finalizer, resulting in a nice
25
LOC reduction.
26
27
Signed-off-by: Andrew Jones <drjones@redhat.com>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Reviewed-by: Eric Auger <eric.auger@redhat.com>
30
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
31
Reviewed-by: Beata Michalska <beata.michalska@linaro.org>
32
Message-id: 20191031142734.8590-5-drjones@redhat.com
33
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
34
---
35
include/qemu/bitops.h | 1 +
36
target/arm/cpu.h | 19 ++++
37
target/arm/cpu.c | 19 ++++
38
target/arm/cpu64.c | 192 ++++++++++++++++++++++++++++++++++++-
39
target/arm/helper.c | 10 +-
40
target/arm/monitor.c | 12 +++
41
tests/arm-cpu-features.c | 194 ++++++++++++++++++++++++++++++++++++++
42
docs/arm-cpu-features.rst | 168 +++++++++++++++++++++++++++++++--
43
8 files changed, 606 insertions(+), 9 deletions(-)
44
45
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
46
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
47
--- a/include/qemu/bitops.h
15
--- a/hw/intc/arm_gicv3_its.c
48
+++ b/include/qemu/bitops.h
16
+++ b/hw/intc/arm_gicv3_its.c
49
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
50
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
18
return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
51
52
#define BIT(nr) (1UL << (nr))
53
+#define BIT_ULL(nr) (1ULL << (nr))
54
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
55
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
56
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
57
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/cpu.h
60
+++ b/target/arm/cpu.h
61
@@ -XXX,XX +XXX,XX @@ typedef struct {
62
63
#ifdef TARGET_AARCH64
64
# define ARM_MAX_VQ 16
65
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
66
+uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq);
67
#else
68
# define ARM_MAX_VQ 1
69
+static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
70
+static inline uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq)
71
+{ return 0; }
72
#endif
73
74
typedef struct ARMVectorReg {
75
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
76
77
/* Used to set the maximum vector length the cpu will support. */
78
uint32_t sve_max_vq;
79
+
80
+ /*
81
+ * In sve_vq_map each set bit is a supported vector length of
82
+ * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
83
+ * length in quadwords.
84
+ *
85
+ * While processing properties during initialization, corresponding
86
+ * sve_vq_init bits are set for bits in sve_vq_map that have been
87
+ * set by properties.
88
+ */
89
+ DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ);
90
+ DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ);
91
};
92
93
void arm_cpu_post_init(Object *obj);
94
@@ -XXX,XX +XXX,XX @@ static inline int arm_feature(CPUARMState *env, int feature)
95
return (env->features & (1ULL << feature)) != 0;
96
}
19
}
97
20
98
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
21
-static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
99
+
22
- uint64_t rdbase)
100
#if !defined(CONFIG_USER_ONLY)
23
+/*
101
/* Return true if exception levels below EL3 are in secure state,
24
+ * Update the Collection Table entry for @icid to @cte. Returns true
102
* or would be following an exception return to that level.
25
+ * on success, false if there was a memory access error.
103
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
26
+ */
104
index XXXXXXX..XXXXXXX 100644
27
+static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
105
--- a/target/arm/cpu.c
28
{
106
+++ b/target/arm/cpu.c
29
AddressSpace *as = &s->gicv3->dma_as;
107
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_finalizefn(Object *obj)
30
uint64_t entry_addr;
108
#endif
31
- uint64_t cte = 0;
32
+ uint64_t cteval = 0;
33
MemTxResult res = MEMTX_OK;
34
35
if (!s->ct.valid) {
36
return true;
37
}
38
39
- if (valid) {
40
+ if (cte->valid) {
41
/* add mapping entry to collection table */
42
- cte = FIELD_DP64(cte, CTE, VALID, 1);
43
- cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
44
+ cteval = FIELD_DP64(cteval, CTE, VALID, 1);
45
+ cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
46
}
47
48
entry_addr = table_entry_addr(s, &s->ct, icid, &res);
49
@@ -XXX,XX +XXX,XX @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
50
return true;
51
}
52
53
- address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res);
54
+ address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
55
return res == MEMTX_OK;
109
}
56
}
110
57
111
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
58
static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
112
+{
113
+ Error *local_err = NULL;
114
+
115
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
116
+ arm_cpu_sve_finalize(cpu, &local_err);
117
+ if (local_err != NULL) {
118
+ error_propagate(errp, local_err);
119
+ return;
120
+ }
121
+ }
122
+}
123
+
124
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
125
{
59
{
126
CPUState *cs = CPU(dev);
60
uint16_t icid;
127
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
61
- uint64_t rdbase;
128
return;
62
- bool valid;
63
+ CTEntry cte;
64
65
icid = cmdpkt[2] & ICID_MASK;
66
67
- rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
68
- rdbase &= RDBASE_PROCNUM_MASK;
69
+ cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
70
+ cte.rdbase &= RDBASE_PROCNUM_MASK;
71
72
- valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
73
+ cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
74
75
- if ((icid >= s->ct.num_entries) || (rdbase >= s->gicv3->num_cpu)) {
76
+ if ((icid >= s->ct.num_entries) || (cte.rdbase >= s->gicv3->num_cpu)) {
77
qemu_log_mask(LOG_GUEST_ERROR,
78
"ITS MAPC: invalid collection table attributes "
79
- "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
80
+ "icid %d rdbase %u\n", icid, cte.rdbase);
81
/*
82
* in this implementation, in case of error
83
* we ignore this command and move onto the next
84
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
85
return CMD_CONTINUE;
129
}
86
}
130
87
131
+ arm_cpu_finalize_features(cpu, &local_err);
88
- return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
132
+ if (local_err != NULL) {
89
+ return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
133
+ error_propagate(errp, local_err);
134
+ return;
135
+ }
136
+
137
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
138
cpu->has_vfp != cpu->has_neon) {
139
/*
140
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/target/arm/cpu64.c
143
+++ b/target/arm/cpu64.c
144
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
145
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
146
}
90
}
147
91
148
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
149
+{
150
+ /*
151
+ * If any vector lengths are explicitly enabled with sve<N> properties,
152
+ * then all other lengths are implicitly disabled. If sve-max-vq is
153
+ * specified then it is the same as explicitly enabling all lengths
154
+ * up to and including the specified maximum, which means all larger
155
+ * lengths will be implicitly disabled. If no sve<N> properties
156
+ * are enabled and sve-max-vq is not specified, then all lengths not
157
+ * explicitly disabled will be enabled. Additionally, all power-of-two
158
+ * vector lengths less than the maximum enabled length will be
159
+ * automatically enabled and all vector lengths larger than the largest
160
+ * disabled power-of-two vector length will be automatically disabled.
161
+ * Errors are generated if the user provided input that interferes with
162
+ * any of the above. Finally, if SVE is not disabled, then at least one
163
+ * vector length must be enabled.
164
+ */
165
+ DECLARE_BITMAP(tmp, ARM_MAX_VQ);
166
+ uint32_t vq, max_vq = 0;
167
+
168
+ /*
169
+ * Process explicit sve<N> properties.
170
+ * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
171
+ * Check first for any sve<N> enabled.
172
+ */
173
+ if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
174
+ max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
175
+
176
+ if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
177
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
178
+ error_append_hint(errp, "sve%d is larger than the maximum vector "
179
+ "length, sve-max-vq=%d (%d bits)\n",
180
+ max_vq * 128, cpu->sve_max_vq,
181
+ cpu->sve_max_vq * 128);
182
+ return;
183
+ }
184
+
185
+ /* Propagate enabled bits down through required powers-of-two. */
186
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
187
+ if (!test_bit(vq - 1, cpu->sve_vq_init)) {
188
+ set_bit(vq - 1, cpu->sve_vq_map);
189
+ }
190
+ }
191
+ } else if (cpu->sve_max_vq == 0) {
192
+ /*
193
+ * No explicit bits enabled, and no implicit bits from sve-max-vq.
194
+ */
195
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
196
+ /* SVE is disabled and so are all vector lengths. Good. */
197
+ return;
198
+ }
199
+
200
+ /* Disabling a power-of-two disables all larger lengths. */
201
+ if (test_bit(0, cpu->sve_vq_init)) {
202
+ error_setg(errp, "cannot disable sve128");
203
+ error_append_hint(errp, "Disabling sve128 results in all vector "
204
+ "lengths being disabled.\n");
205
+ error_append_hint(errp, "With SVE enabled, at least one vector "
206
+ "length must be enabled.\n");
207
+ return;
208
+ }
209
+ for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
210
+ if (test_bit(vq - 1, cpu->sve_vq_init)) {
211
+ break;
212
+ }
213
+ }
214
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
215
+
216
+ bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
217
+ max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
218
+ }
219
+
220
+ /*
221
+ * Process the sve-max-vq property.
222
+ * Note that we know from the above that no bit above
223
+ * sve-max-vq is currently set.
224
+ */
225
+ if (cpu->sve_max_vq != 0) {
226
+ max_vq = cpu->sve_max_vq;
227
+
228
+ if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
229
+ test_bit(max_vq - 1, cpu->sve_vq_init)) {
230
+ error_setg(errp, "cannot disable sve%d", max_vq * 128);
231
+ error_append_hint(errp, "The maximum vector length must be "
232
+ "enabled, sve-max-vq=%d (%d bits)\n",
233
+ max_vq, max_vq * 128);
234
+ return;
235
+ }
236
+
237
+ /* Set all bits not explicitly set within sve-max-vq. */
238
+ bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
239
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
240
+ }
241
+
242
+ /*
243
+ * We should know what max-vq is now. Also, as we're done
244
+ * manipulating sve-vq-map, we ensure any bits above max-vq
245
+ * are clear, just in case anybody looks.
246
+ */
247
+ assert(max_vq != 0);
248
+ bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
249
+
250
+ /* Ensure all required powers-of-two are enabled. */
251
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
252
+ if (!test_bit(vq - 1, cpu->sve_vq_map)) {
253
+ error_setg(errp, "cannot disable sve%d", vq * 128);
254
+ error_append_hint(errp, "sve%d is required as it "
255
+ "is a power-of-two length smaller than "
256
+ "the maximum, sve%d\n",
257
+ vq * 128, max_vq * 128);
258
+ return;
259
+ }
260
+ }
261
+
262
+ /*
263
+ * Now that we validated all our vector lengths, the only question
264
+ * left to answer is if we even want SVE at all.
265
+ */
266
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
267
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
268
+ error_append_hint(errp, "SVE must be enabled to enable vector "
269
+ "lengths.\n");
270
+ error_append_hint(errp, "Add sve=on to the CPU property list.\n");
271
+ return;
272
+ }
273
+
274
+ /* From now on sve_max_vq is the actual maximum supported length. */
275
+ cpu->sve_max_vq = max_vq;
276
+}
277
+
278
+uint32_t arm_cpu_vq_map_next_smaller(ARMCPU *cpu, uint32_t vq)
279
+{
280
+ uint32_t bitnum;
281
+
282
+ /*
283
+ * We allow vq == ARM_MAX_VQ + 1 to be input because the caller may want
284
+ * to find the maximum vq enabled, which may be ARM_MAX_VQ, but this
285
+ * function always returns the next smaller than the input.
286
+ */
287
+ assert(vq && vq <= ARM_MAX_VQ + 1);
288
+
289
+ bitnum = find_last_bit(cpu->sve_vq_map, vq - 1);
290
+ return bitnum == vq - 1 ? 0 : bitnum + 1;
291
+}
292
+
293
static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
294
void *opaque, Error **errp)
295
{
296
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
297
error_propagate(errp, err);
298
}
299
300
+static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
301
+ void *opaque, Error **errp)
302
+{
303
+ ARMCPU *cpu = ARM_CPU(obj);
304
+ uint32_t vq = atoi(&name[3]) / 128;
305
+ bool value;
306
+
307
+ /* All vector lengths are disabled when SVE is off. */
308
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
309
+ value = false;
310
+ } else {
311
+ value = test_bit(vq - 1, cpu->sve_vq_map);
312
+ }
313
+ visit_type_bool(v, name, &value, errp);
314
+}
315
+
316
+static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
317
+ void *opaque, Error **errp)
318
+{
319
+ ARMCPU *cpu = ARM_CPU(obj);
320
+ uint32_t vq = atoi(&name[3]) / 128;
321
+ Error *err = NULL;
322
+ bool value;
323
+
324
+ visit_type_bool(v, name, &value, &err);
325
+ if (err) {
326
+ error_propagate(errp, err);
327
+ return;
328
+ }
329
+
330
+ if (value) {
331
+ set_bit(vq - 1, cpu->sve_vq_map);
332
+ } else {
333
+ clear_bit(vq - 1, cpu->sve_vq_map);
334
+ }
335
+ set_bit(vq - 1, cpu->sve_vq_init);
336
+}
337
+
338
static void cpu_arm_get_sve(Object *obj, Visitor *v, const char *name,
339
void *opaque, Error **errp)
340
{
341
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
342
static void aarch64_max_initfn(Object *obj)
343
{
344
ARMCPU *cpu = ARM_CPU(obj);
345
+ uint32_t vq;
346
347
if (kvm_enabled()) {
348
kvm_arm_set_cpu_features_from_host(cpu);
349
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
350
cpu->dcz_blocksize = 7; /* 512 bytes */
351
#endif
352
353
- cpu->sve_max_vq = ARM_MAX_VQ;
354
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
355
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
356
object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
357
cpu_arm_set_sve, NULL, NULL, &error_fatal);
358
+
359
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
360
+ char name[8];
361
+ sprintf(name, "sve%d", vq * 128);
362
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
363
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
364
+ }
365
}
366
}
367
368
diff --git a/target/arm/helper.c b/target/arm/helper.c
369
index XXXXXXX..XXXXXXX 100644
370
--- a/target/arm/helper.c
371
+++ b/target/arm/helper.c
372
@@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el)
373
return 0;
374
}
375
376
+static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
377
+{
378
+ uint32_t start_vq = (start_len & 0xf) + 1;
379
+
380
+ return arm_cpu_vq_map_next_smaller(cpu, start_vq + 1) - 1;
381
+}
382
+
383
/*
92
/*
384
* Given that SVE is enabled, return the vector length for EL.
385
*/
386
@@ -XXX,XX +XXX,XX @@ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
387
if (arm_feature(env, ARM_FEATURE_EL3)) {
388
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
389
}
390
- return zcr_len;
391
+
392
+ return sve_zcr_get_valid_len(cpu, zcr_len);
393
}
394
395
static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
396
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
397
index XXXXXXX..XXXXXXX 100644
398
--- a/target/arm/monitor.c
399
+++ b/target/arm/monitor.c
400
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
401
return head;
402
}
403
404
+QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
405
+
406
/*
407
* These are cpu model features we want to advertise. The order here
408
* matters as this is the order in which qmp_query_cpu_model_expansion
409
@@ -XXX,XX +XXX,XX @@ GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
410
*/
411
static const char *cpu_model_advertised_features[] = {
412
"aarch64", "pmu", "sve",
413
+ "sve128", "sve256", "sve384", "sve512",
414
+ "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
415
+ "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
416
NULL
417
};
418
419
@@ -XXX,XX +XXX,XX @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
420
if (!err) {
421
visit_check_struct(visitor, &err);
422
}
423
+ if (!err) {
424
+ arm_cpu_finalize_features(ARM_CPU(obj), &err);
425
+ }
426
visit_end_struct(visitor, NULL);
427
visit_free(visitor);
428
if (err) {
429
@@ -XXX,XX +XXX,XX @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
430
error_propagate(errp, err);
431
return NULL;
432
}
433
+ } else {
434
+ Error *err = NULL;
435
+ arm_cpu_finalize_features(ARM_CPU(obj), &err);
436
+ assert(err == NULL);
437
}
438
439
expansion_info = g_new0(CpuModelExpansionInfo, 1);
440
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
441
index XXXXXXX..XXXXXXX 100644
442
--- a/tests/arm-cpu-features.c
443
+++ b/tests/arm-cpu-features.c
444
@@ -XXX,XX +XXX,XX @@
445
* See the COPYING file in the top-level directory.
446
*/
447
#include "qemu/osdep.h"
448
+#include "qemu/bitops.h"
449
#include "libqtest.h"
450
#include "qapi/qmp/qdict.h"
451
#include "qapi/qmp/qjson.h"
452
453
+/*
454
+ * We expect the SVE max-vq to be 16. Also it must be <= 64
455
+ * for our test code, otherwise 'vls' can't just be a uint64_t.
456
+ */
457
+#define SVE_MAX_VQ 16
458
+
459
#define MACHINE "-machine virt,gic-version=max,accel=tcg "
460
#define MACHINE_KVM "-machine virt,gic-version=max,accel=kvm:tcg "
461
#define QUERY_HEAD "{ 'execute': 'query-cpu-model-expansion', " \
462
@@ -XXX,XX +XXX,XX @@ static void assert_bad_props(QTestState *qts, const char *cpu_type)
463
qobject_unref(resp);
464
}
465
466
+static uint64_t resp_get_sve_vls(QDict *resp)
467
+{
468
+ QDict *props;
469
+ const QDictEntry *e;
470
+ uint64_t vls = 0;
471
+ int n = 0;
472
+
473
+ g_assert(resp);
474
+ g_assert(resp_has_props(resp));
475
+
476
+ props = resp_get_props(resp);
477
+
478
+ for (e = qdict_first(props); e; e = qdict_next(props, e)) {
479
+ if (strlen(e->key) > 3 && !strncmp(e->key, "sve", 3) &&
480
+ g_ascii_isdigit(e->key[3])) {
481
+ char *endptr;
482
+ int bits;
483
+
484
+ bits = g_ascii_strtoll(&e->key[3], &endptr, 10);
485
+ if (!bits || *endptr != '\0') {
486
+ continue;
487
+ }
488
+
489
+ if (qdict_get_bool(props, e->key)) {
490
+ vls |= BIT_ULL((bits / 128) - 1);
491
+ }
492
+ ++n;
493
+ }
494
+ }
495
+
496
+ g_assert(n == SVE_MAX_VQ);
497
+
498
+ return vls;
499
+}
500
+
501
+#define assert_sve_vls(qts, cpu_type, expected_vls, fmt, ...) \
502
+({ \
503
+ QDict *_resp = do_query(qts, cpu_type, fmt, ##__VA_ARGS__); \
504
+ g_assert(_resp); \
505
+ g_assert(resp_has_props(_resp)); \
506
+ g_assert(resp_get_sve_vls(_resp) == expected_vls); \
507
+ qobject_unref(_resp); \
508
+})
509
+
510
+static void sve_tests_default(QTestState *qts, const char *cpu_type)
511
+{
512
+ /*
513
+ * With no sve-max-vq or sve<N> properties on the command line
514
+ * the default is to have all vector lengths enabled. This also
515
+ * tests that 'sve' is 'on' by default.
516
+ */
517
+ assert_sve_vls(qts, cpu_type, BIT_ULL(SVE_MAX_VQ) - 1, NULL);
518
+
519
+ /* With SVE off, all vector lengths should also be off. */
520
+ assert_sve_vls(qts, cpu_type, 0, "{ 'sve': false }");
521
+
522
+ /* With SVE on, we must have at least one vector length enabled. */
523
+ assert_error(qts, cpu_type, "cannot disable sve128", "{ 'sve128': false }");
524
+
525
+ /* Basic enable/disable tests. */
526
+ assert_sve_vls(qts, cpu_type, 0x7, "{ 'sve384': true }");
527
+ assert_sve_vls(qts, cpu_type, ((BIT_ULL(SVE_MAX_VQ) - 1) & ~BIT_ULL(2)),
528
+ "{ 'sve384': false }");
529
+
530
+ /*
531
+ * ---------------------------------------------------------------------
532
+ * power-of-two(vq) all-power- can can
533
+ * of-two(< vq) enable disable
534
+ * ---------------------------------------------------------------------
535
+ * vq < max_vq no MUST* yes yes
536
+ * vq < max_vq yes MUST* yes no
537
+ * ---------------------------------------------------------------------
538
+ * vq == max_vq n/a MUST* yes** yes**
539
+ * ---------------------------------------------------------------------
540
+ * vq > max_vq n/a no no yes
541
+ * vq > max_vq n/a yes yes yes
542
+ * ---------------------------------------------------------------------
543
+ *
544
+ * [*] "MUST" means this requirement must already be satisfied,
545
+ * otherwise 'max_vq' couldn't itself be enabled.
546
+ *
547
+ * [**] Not testable with the QMP interface, only with the command line.
548
+ */
549
+
550
+ /* max_vq := 8 */
551
+ assert_sve_vls(qts, cpu_type, 0x8b, "{ 'sve1024': true }");
552
+
553
+ /* max_vq := 8, vq < max_vq, !power-of-two(vq) */
554
+ assert_sve_vls(qts, cpu_type, 0x8f,
555
+ "{ 'sve1024': true, 'sve384': true }");
556
+ assert_sve_vls(qts, cpu_type, 0x8b,
557
+ "{ 'sve1024': true, 'sve384': false }");
558
+
559
+ /* max_vq := 8, vq < max_vq, power-of-two(vq) */
560
+ assert_sve_vls(qts, cpu_type, 0x8b,
561
+ "{ 'sve1024': true, 'sve256': true }");
562
+ assert_error(qts, cpu_type, "cannot disable sve256",
563
+ "{ 'sve1024': true, 'sve256': false }");
564
+
565
+ /* max_vq := 3, vq > max_vq, !all-power-of-two(< vq) */
566
+ assert_error(qts, cpu_type, "cannot disable sve512",
567
+ "{ 'sve384': true, 'sve512': false, 'sve640': true }");
568
+
569
+ /*
570
+ * We can disable power-of-two vector lengths when all larger lengths
571
+ * are also disabled. We only need to disable the power-of-two length,
572
+ * as all non-enabled larger lengths will then be auto-disabled.
573
+ */
574
+ assert_sve_vls(qts, cpu_type, 0x7, "{ 'sve512': false }");
575
+
576
+ /* max_vq := 3, vq > max_vq, all-power-of-two(< vq) */
577
+ assert_sve_vls(qts, cpu_type, 0x1f,
578
+ "{ 'sve384': true, 'sve512': true, 'sve640': true }");
579
+ assert_sve_vls(qts, cpu_type, 0xf,
580
+ "{ 'sve384': true, 'sve512': true, 'sve640': false }");
581
+}
582
+
583
+static void sve_tests_sve_max_vq_8(const void *data)
584
+{
585
+ QTestState *qts;
586
+
587
+ qts = qtest_init(MACHINE "-cpu max,sve-max-vq=8");
588
+
589
+ assert_sve_vls(qts, "max", BIT_ULL(8) - 1, NULL);
590
+
591
+ /*
592
+ * Disabling the max-vq set by sve-max-vq is not allowed, but
593
+ * of course enabling it is OK.
594
+ */
595
+ assert_error(qts, "max", "cannot disable sve1024", "{ 'sve1024': false }");
596
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve1024': true }");
597
+
598
+ /*
599
+ * Enabling anything larger than max-vq set by sve-max-vq is not
600
+ * allowed, but of course disabling everything larger is OK.
601
+ */
602
+ assert_error(qts, "max", "cannot enable sve1152", "{ 'sve1152': true }");
603
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve1152': false }");
604
+
605
+ /*
606
+ * We can enable/disable non power-of-two lengths smaller than the
607
+ * max-vq set by sve-max-vq, but, while we can enable power-of-two
608
+ * lengths, we can't disable them.
609
+ */
610
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve384': true }");
611
+ assert_sve_vls(qts, "max", 0xfb, "{ 'sve384': false }");
612
+ assert_sve_vls(qts, "max", 0xff, "{ 'sve256': true }");
613
+ assert_error(qts, "max", "cannot disable sve256", "{ 'sve256': false }");
614
+
615
+ qtest_quit(qts);
616
+}
617
+
618
+static void sve_tests_sve_off(const void *data)
619
+{
620
+ QTestState *qts;
621
+
622
+ qts = qtest_init(MACHINE "-cpu max,sve=off");
623
+
624
+ /* SVE is off, so the map should be empty. */
625
+ assert_sve_vls(qts, "max", 0, NULL);
626
+
627
+ /* The map stays empty even if we turn lengths off. */
628
+ assert_sve_vls(qts, "max", 0, "{ 'sve128': false }");
629
+
630
+ /* It's an error to enable lengths when SVE is off. */
631
+ assert_error(qts, "max", "cannot enable sve128", "{ 'sve128': true }");
632
+
633
+ /* With SVE re-enabled we should get all vector lengths enabled. */
634
+ assert_sve_vls(qts, "max", BIT_ULL(SVE_MAX_VQ) - 1, "{ 'sve': true }");
635
+
636
+ /* Or enable SVE with just specific vector lengths. */
637
+ assert_sve_vls(qts, "max", 0x3,
638
+ "{ 'sve': true, 'sve128': true, 'sve256': true }");
639
+
640
+ qtest_quit(qts);
641
+}
642
+
643
static void test_query_cpu_model_expansion(const void *data)
644
{
645
QTestState *qts;
646
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data)
647
if (g_str_equal(qtest_get_arch(), "aarch64")) {
648
assert_has_feature(qts, "max", "aarch64");
649
assert_has_feature(qts, "max", "sve");
650
+ assert_has_feature(qts, "max", "sve128");
651
assert_has_feature(qts, "cortex-a57", "pmu");
652
assert_has_feature(qts, "cortex-a57", "aarch64");
653
654
+ sve_tests_default(qts, "max");
655
+
656
/* Test that features that depend on KVM generate errors without. */
657
assert_error(qts, "max",
658
"'aarch64' feature cannot be disabled "
659
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
660
qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
661
NULL, test_query_cpu_model_expansion_kvm);
662
663
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
664
+ qtest_add_data_func("/arm/max/query-cpu-model-expansion/sve-max-vq-8",
665
+ NULL, sve_tests_sve_max_vq_8);
666
+ qtest_add_data_func("/arm/max/query-cpu-model-expansion/sve-off",
667
+ NULL, sve_tests_sve_off);
668
+ }
669
+
670
return g_test_run();
671
}
672
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
673
index XXXXXXX..XXXXXXX 100644
674
--- a/docs/arm-cpu-features.rst
675
+++ b/docs/arm-cpu-features.rst
676
@@ -XXX,XX +XXX,XX @@ block in the script for usage) is used to issue the QMP commands.
677
(QEMU) query-cpu-model-expansion type=full model={"name":"max"}
678
{ "return": {
679
"model": { "name": "max", "props": {
680
- "pmu": true, "aarch64": true
681
+ "sve1664": true, "pmu": true, "sve1792": true, "sve1920": true,
682
+ "sve128": true, "aarch64": true, "sve1024": true, "sve": true,
683
+ "sve640": true, "sve768": true, "sve1408": true, "sve256": true,
684
+ "sve1152": true, "sve512": true, "sve384": true, "sve1536": true,
685
+ "sve896": true, "sve1280": true, "sve2048": true
686
}}}}
687
688
-We see that the `max` CPU type has the `pmu` and `aarch64` CPU features.
689
-We also see that the CPU features are enabled, as they are all `true`.
690
+We see that the `max` CPU type has the `pmu`, `aarch64`, `sve`, and many
691
+`sve<N>` CPU features. We also see that all the CPU features are
692
+enabled, as they are all `true`. (The `sve<N>` CPU features are all
693
+optional SVE vector lengths (see "SVE CPU Properties"). While with TCG
694
+all SVE vector lengths can be supported, when KVM is in use it's more
695
+likely that only a few lengths will be supported, if SVE is supported at
696
+all.)
697
698
(2) Let's try to disable the PMU::
699
700
(QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"pmu":false}}
701
{ "return": {
702
"model": { "name": "max", "props": {
703
- "pmu": false, "aarch64": true
704
+ "sve1664": true, "pmu": false, "sve1792": true, "sve1920": true,
705
+ "sve128": true, "aarch64": true, "sve1024": true, "sve": true,
706
+ "sve640": true, "sve768": true, "sve1408": true, "sve256": true,
707
+ "sve1152": true, "sve512": true, "sve384": true, "sve1536": true,
708
+ "sve896": true, "sve1280": true, "sve2048": true
709
}}}}
710
711
We see it worked, as `pmu` is now `false`.
712
@@ -XXX,XX +XXX,XX @@ We see it worked, as `pmu` is now `false`.
713
It looks like this feature is limited to a configuration we do not
714
currently have.
715
716
-(4) Let's try probing CPU features for the Cortex-A15 CPU type::
717
+(4) Let's disable `sve` and see what happens to all the optional SVE
718
+ vector lengths::
719
+
720
+ (QEMU) query-cpu-model-expansion type=full model={"name":"max","props":{"sve":false}}
721
+ { "return": {
722
+ "model": { "name": "max", "props": {
723
+ "sve1664": false, "pmu": true, "sve1792": false, "sve1920": false,
724
+ "sve128": false, "aarch64": true, "sve1024": false, "sve": false,
725
+ "sve640": false, "sve768": false, "sve1408": false, "sve256": false,
726
+ "sve1152": false, "sve512": false, "sve384": false, "sve1536": false,
727
+ "sve896": false, "sve1280": false, "sve2048": false
728
+ }}}}
729
+
730
+As expected they are now all `false`.
731
+
732
+(5) Let's try probing CPU features for the Cortex-A15 CPU type::
733
734
(QEMU) query-cpu-model-expansion type=full model={"name":"cortex-a15"}
735
{"return": {"model": {"name": "cortex-a15", "props": {"pmu": true}}}}
736
@@ -XXX,XX +XXX,XX @@ After determining which CPU features are available and supported for a
737
given CPU type, then they may be selectively enabled or disabled on the
738
QEMU command line with that CPU type::
739
740
- $ qemu-system-aarch64 -M virt -cpu max,pmu=off
741
+ $ qemu-system-aarch64 -M virt -cpu max,pmu=off,sve=on,sve128=on,sve256=on
742
743
-The example above disables the PMU for the `max` CPU type.
744
+The example above disables the PMU and enables the first two SVE vector
745
+lengths for the `max` CPU type. Note, the `sve=on` isn't actually
746
+necessary, because, as we observed above with our probe of the `max` CPU
747
+type, `sve` is already on by default. Also, based on our probe of
748
+defaults, it would seem we need to disable many SVE vector lengths, rather
749
+than only enabling the two we want. This isn't the case, because, as
750
+disabling many SVE vector lengths would be quite verbose, the `sve<N>` CPU
751
+properties have special semantics (see "SVE CPU Property Parsing
752
+Semantics").
753
+
754
+SVE CPU Properties
755
+==================
756
+
757
+There are two types of SVE CPU properties: `sve` and `sve<N>`. The first
758
+is used to enable or disable the entire SVE feature, just as the `pmu`
759
+CPU property completely enables or disables the PMU. The second type
760
+is used to enable or disable specific vector lengths, where `N` is the
761
+number of bits of the length. The `sve<N>` CPU properties have special
762
+dependencies and constraints, see "SVE CPU Property Dependencies and
763
+Constraints" below. Additionally, as we want all supported vector lengths
764
+to be enabled by default, then, in order to avoid overly verbose command
765
+lines (command lines full of `sve<N>=off`, for all `N` not wanted), we
766
+provide the parsing semantics listed in "SVE CPU Property Parsing
767
+Semantics".
768
+
769
+SVE CPU Property Dependencies and Constraints
770
+---------------------------------------------
771
+
772
+ 1) At least one vector length must be enabled when `sve` is enabled.
773
+
774
+ 2) If a vector length `N` is enabled, then all power-of-two vector
775
+ lengths smaller than `N` must also be enabled. E.g. if `sve512`
776
+ is enabled, then the 128-bit and 256-bit vector lengths must also
777
+ be enabled.
778
+
779
+SVE CPU Property Parsing Semantics
780
+----------------------------------
781
+
782
+ 1) If SVE is disabled (`sve=off`), then which SVE vector lengths
783
+ are enabled or disabled is irrelevant to the guest, as the entire
784
+ SVE feature is disabled and that disables all vector lengths for
785
+ the guest. However QEMU will still track any `sve<N>` CPU
786
+ properties provided by the user. If later an `sve=on` is provided,
787
+ then the guest will get only the enabled lengths. If no `sve=on`
788
+ is provided and there are explicitly enabled vector lengths, then
789
+ an error is generated.
790
+
791
+ 2) If SVE is enabled (`sve=on`), but no `sve<N>` CPU properties are
792
+ provided, then all supported vector lengths are enabled, including
793
+ the non-power-of-two lengths.
794
+
795
+ 3) If SVE is enabled, then an error is generated when attempting to
796
+ disable the last enabled vector length (see constraint (1) of "SVE
797
+ CPU Property Dependencies and Constraints").
798
+
799
+ 4) If one or more vector lengths have been explicitly enabled and at
800
+ at least one of the dependency lengths of the maximum enabled length
801
+ has been explicitly disabled, then an error is generated (see
802
+ constraint (2) of "SVE CPU Property Dependencies and Constraints").
803
+
804
+ 5) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
805
+ CPU properties are set `on`, then the specified vector lengths are
806
+ disabled but the default for any unspecified lengths remains enabled.
807
+ Disabling a power-of-two vector length also disables all vector
808
+ lengths larger than the power-of-two length (see constraint (2) of
809
+ "SVE CPU Property Dependencies and Constraints").
810
+
811
+ 6) If one or more `sve<N>` CPU properties are set to `on`, then they
812
+ are enabled and all unspecified lengths default to disabled, except
813
+ for the required lengths per constraint (2) of "SVE CPU Property
814
+ Dependencies and Constraints", which will even be auto-enabled if
815
+ they were not explicitly enabled.
816
+
817
+ 7) If SVE was disabled (`sve=off`), allowing all vector lengths to be
818
+ explicitly disabled (i.e. avoiding the error specified in (3) of
819
+ "SVE CPU Property Parsing Semantics"), then if later an `sve=on` is
820
+ provided an error will be generated. To avoid this error, one must
821
+ enable at least one vector length prior to enabling SVE.
822
+
823
+SVE CPU Property Examples
824
+-------------------------
825
+
826
+ 1) Disable SVE::
827
+
828
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off
829
+
830
+ 2) Implicitly enable all vector lengths for the `max` CPU type::
831
+
832
+ $ qemu-system-aarch64 -M virt -cpu max
833
+
834
+ 3) Only enable the 128-bit vector length::
835
+
836
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=on
837
+
838
+ 4) Disable the 512-bit vector length and all larger vector lengths,
839
+ since 512 is a power-of-two. This results in all the smaller,
840
+ uninitialized lengths (128, 256, and 384) defaulting to enabled::
841
+
842
+ $ qemu-system-aarch64 -M virt -cpu max,sve512=off
843
+
844
+ 5) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
845
+
846
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=on,sve256=on,sve512=on
847
+
848
+ 6) The same as (5), but since the 128-bit and 256-bit vector
849
+ lengths are required for the 512-bit vector length to be enabled,
850
+ then allow them to be auto-enabled::
851
+
852
+ $ qemu-system-aarch64 -M virt -cpu max,sve512=on
853
+
854
+ 7) Do the same as (6), but by first disabling SVE and then re-enabling it::
855
+
856
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off,sve512=on,sve=on
857
+
858
+ 8) Force errors regarding the last vector length::
859
+
860
+ $ qemu-system-aarch64 -M virt -cpu max,sve128=off
861
+ $ qemu-system-aarch64 -M virt -cpu max,sve=off,sve128=off,sve=on
862
+
863
+SVE CPU Property Recommendations
864
+--------------------------------
865
+
866
+The examples in "SVE CPU Property Examples" exhibit many ways to select
867
+vector lengths which developers may find useful in order to avoid overly
868
+verbose command lines. However, the recommended way to select vector
869
+lengths is to explicitly enable each desired length. Therefore only
870
+example's (1), (3), and (5) exhibit recommended uses of the properties.
871
872
--
93
--
873
2.20.1
94
2.25.1
874
95
875
96
diff view generated by jsdifflib
New patch
1
In get_ite() and update_ite() we work with a 12-byte in-guest-memory
2
table entry, which we intend to handle as an 8-byte value followed by
3
a 4-byte value. Unfortunately the calculation of the address of the
4
4-byte value is wrong, because we write it as:
1
5
6
table_base_address + (index * entrysize) + 4
7
(obfuscated by the way the expression has been written)
8
9
when it should be + 8. This bug meant that we overwrote the top
10
bytes of the 8-byte value with the 4-byte value. There are no
11
guest-visible effects because the top half of the 8-byte value
12
contains only the doorbell interrupt field, which is used only in
13
GICv4, and the two bugs in the "write ITE" and "read ITE" codepaths
14
cancel each other out.
15
16
We can't simply change the calculation, because this would break
17
migration of a (TCG) guest from the old version of QEMU which had
18
in-guest-memory interrupt tables written using the buggy version of
19
update_ite(). We must also at the same time change the layout of the
20
fields within the ITE_L and ITE_H values so that the in-memory
21
locations of the fields we care about (VALID, INTTYPE, INTID and
22
ICID) stay the same.
23
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-id: 20220201193207.2771604-7-peter.maydell@linaro.org
27
---
28
hw/intc/gicv3_internal.h | 19 ++++++++++---------
29
hw/intc/arm_gicv3_its.c | 28 +++++++++++-----------------
30
2 files changed, 21 insertions(+), 26 deletions(-)
31
32
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/hw/intc/gicv3_internal.h
35
+++ b/hw/intc/gicv3_internal.h
36
@@ -XXX,XX +XXX,XX @@ FIELD(MOVI_2, ICID, 0, 16)
37
* 12 bytes Interrupt translation Table Entry size
38
* as per Table 5.3 in GICv3 spec
39
* ITE Lower 8 Bytes
40
- * Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
41
- * Values: | Doorbell | IntNum | IntType | Valid |
42
+ * Bits: | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 | 1 | 0 |
43
+ * Values: | vPEID | ICID | unused | IntNum | IntType | Valid |
44
* ITE Higher 4 Bytes
45
- * Bits: | 31 ... 16 | 15 ...0 |
46
- * Values: | vPEID | ICID |
47
- * (When Doorbell is unused, as it always is in GICv3, it is 1023)
48
+ * Bits: | 31 ... 25 | 24 ... 0 |
49
+ * Values: | unused | Doorbell |
50
+ * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL,
51
+ * the value of that field in memory cannot be relied upon -- older
52
+ * versions of QEMU did not correctly write to that memory.)
53
*/
54
#define ITS_ITT_ENTRY_SIZE 0xC
55
56
FIELD(ITE_L, VALID, 0, 1)
57
FIELD(ITE_L, INTTYPE, 1, 1)
58
FIELD(ITE_L, INTID, 2, 24)
59
-FIELD(ITE_L, DOORBELL, 26, 24)
60
-
61
-FIELD(ITE_H, ICID, 0, 16)
62
-FIELD(ITE_H, VPEID, 16, 16)
63
+FIELD(ITE_L, ICID, 32, 16)
64
+FIELD(ITE_L, VPEID, 48, 16)
65
+FIELD(ITE_H, DOORBELL, 0, 24)
66
67
/* Possible values for ITE_L INTTYPE */
68
#define ITE_INTTYPE_VIRTUAL 0
69
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/hw/intc/arm_gicv3_its.c
72
+++ b/hw/intc/arm_gicv3_its.c
73
@@ -XXX,XX +XXX,XX @@ static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
74
{
75
AddressSpace *as = &s->gicv3->dma_as;
76
MemTxResult res = MEMTX_OK;
77
+ hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
78
79
- address_space_stq_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
80
- sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
81
- &res);
82
+ address_space_stq_le(as, iteaddr, ite.itel, MEMTXATTRS_UNSPECIFIED, &res);
83
84
if (res == MEMTX_OK) {
85
- address_space_stl_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
86
- sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
87
+ address_space_stl_le(as, iteaddr + 8, ite.iteh,
88
MEMTXATTRS_UNSPECIFIED, &res);
89
}
90
if (res != MEMTX_OK) {
91
@@ -XXX,XX +XXX,XX @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
92
AddressSpace *as = &s->gicv3->dma_as;
93
bool status = false;
94
IteEntry ite = {};
95
+ hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
96
97
- ite.itel = address_space_ldq_le(as, dte->ittaddr +
98
- (eventid * (sizeof(uint64_t) +
99
- sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
100
- res);
101
+ ite.itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, res);
102
103
if (*res == MEMTX_OK) {
104
- ite.iteh = address_space_ldl_le(as, dte->ittaddr +
105
- (eventid * (sizeof(uint64_t) +
106
- sizeof(uint32_t))) + sizeof(uint32_t),
107
+ ite.iteh = address_space_ldl_le(as, iteaddr + 8,
108
MEMTXATTRS_UNSPECIFIED, res);
109
110
if (*res == MEMTX_OK) {
111
@@ -XXX,XX +XXX,XX @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
112
int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
113
if (inttype == ITE_INTTYPE_PHYSICAL) {
114
*pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
115
- *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
116
+ *icid = FIELD_EX64(ite.itel, ITE_L, ICID);
117
status = true;
118
}
119
}
120
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
121
ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, true);
122
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
123
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
124
- ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
125
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
126
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, ICID, icid);
127
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, DOORBELL, INTID_SPURIOUS);
128
129
return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
130
}
131
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
132
ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1);
133
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
134
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid);
135
- ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
136
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, new_icid);
137
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, ICID, new_icid);
138
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, DOORBELL, INTID_SPURIOUS);
139
return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
140
}
141
142
--
143
2.25.1
144
145
diff view generated by jsdifflib
New patch
1
The get_ite() code has some awkward nested if statements; clean
2
them up by returning early if the memory accesses fail.
1
3
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20220201193207.2771604-8-peter.maydell@linaro.org
7
---
8
hw/intc/arm_gicv3_its.c | 26 ++++++++++++++------------
9
1 file changed, 14 insertions(+), 12 deletions(-)
10
11
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/intc/arm_gicv3_its.c
14
+++ b/hw/intc/arm_gicv3_its.c
15
@@ -XXX,XX +XXX,XX @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
16
hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
17
18
ite.itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, res);
19
+ if (*res != MEMTX_OK) {
20
+ return false;
21
+ }
22
23
- if (*res == MEMTX_OK) {
24
- ite.iteh = address_space_ldl_le(as, iteaddr + 8,
25
- MEMTXATTRS_UNSPECIFIED, res);
26
+ ite.iteh = address_space_ldl_le(as, iteaddr + 8,
27
+ MEMTXATTRS_UNSPECIFIED, res);
28
+ if (*res != MEMTX_OK) {
29
+ return false;
30
+ }
31
32
- if (*res == MEMTX_OK) {
33
- if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
34
- int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
35
- if (inttype == ITE_INTTYPE_PHYSICAL) {
36
- *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
37
- *icid = FIELD_EX64(ite.itel, ITE_L, ICID);
38
- status = true;
39
- }
40
- }
41
+ if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
42
+ int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
43
+ if (inttype == ITE_INTTYPE_PHYSICAL) {
44
+ *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
45
+ *icid = FIELD_EX64(ite.itel, ITE_L, ICID);
46
+ status = true;
47
}
48
}
49
return status;
50
--
51
2.25.1
52
53
diff view generated by jsdifflib
New patch
1
1
In get_ite() we currently return the caller some of the fields of an
2
Interrupt Table Entry via a set of pointer arguments, and validate
3
some of them internally (interrupt type and valid bit) to return a
4
simple true/false 'valid' indication. Define a new ITEntry struct
5
which has all the fields that the in-memory ITE has, and bring the
6
get_ite() function in to line with get_dte() and get_cte().
7
8
This paves the way for handling virtual interrupts, which will want
9
a different subset of the fields in the ITE. Handling them under
10
the old "lots of pointer arguments" scheme would have meant a
11
confusingly large set of arguments for this function.
12
13
The new struct ITEntry is obviously confusably similar to the
14
existing IteEntry struct, whose fields are the raw 12 bytes
15
of the in-memory ITE. In the next commit we will make update_ite()
16
use ITEntry instead of IteEntry, which will allow us to delete
17
the IteEntry struct and remove the confusion.
18
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Message-id: 20220201193207.2771604-9-peter.maydell@linaro.org
22
---
23
hw/intc/arm_gicv3_its.c | 102 ++++++++++++++++++++++------------------
24
1 file changed, 55 insertions(+), 47 deletions(-)
25
26
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/hw/intc/arm_gicv3_its.c
29
+++ b/hw/intc/arm_gicv3_its.c
30
@@ -XXX,XX +XXX,XX @@ typedef struct CTEntry {
31
uint32_t rdbase;
32
} CTEntry;
33
34
+typedef struct ITEntry {
35
+ bool valid;
36
+ int inttype;
37
+ uint32_t intid;
38
+ uint32_t doorbell;
39
+ uint32_t icid;
40
+ uint32_t vpeid;
41
+} ITEntry;
42
+
43
+
44
/*
45
* The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
46
* if a command parameter is not correct. These include both "stall
47
@@ -XXX,XX +XXX,XX @@ static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
48
}
49
}
50
51
-static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
52
- uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
53
+/*
54
+ * Read the Interrupt Table entry at index @eventid from the table specified
55
+ * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
56
+ * struct @ite accordingly. If there is an error reading memory then we return
57
+ * the error code.
58
+ */
59
+static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
60
+ const DTEntry *dte, ITEntry *ite)
61
{
62
AddressSpace *as = &s->gicv3->dma_as;
63
- bool status = false;
64
- IteEntry ite = {};
65
+ MemTxResult res = MEMTX_OK;
66
+ uint64_t itel;
67
+ uint32_t iteh;
68
hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
69
70
- ite.itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, res);
71
- if (*res != MEMTX_OK) {
72
- return false;
73
+ itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
74
+ if (res != MEMTX_OK) {
75
+ return res;
76
}
77
78
- ite.iteh = address_space_ldl_le(as, iteaddr + 8,
79
- MEMTXATTRS_UNSPECIFIED, res);
80
- if (*res != MEMTX_OK) {
81
- return false;
82
+ iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
83
+ if (res != MEMTX_OK) {
84
+ return res;
85
}
86
87
- if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
88
- int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
89
- if (inttype == ITE_INTTYPE_PHYSICAL) {
90
- *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
91
- *icid = FIELD_EX64(ite.itel, ITE_L, ICID);
92
- status = true;
93
- }
94
- }
95
- return status;
96
+ ite->valid = FIELD_EX64(itel, ITE_L, VALID);
97
+ ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
98
+ ite->intid = FIELD_EX64(itel, ITE_L, INTID);
99
+ ite->icid = FIELD_EX64(itel, ITE_L, ICID);
100
+ ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
101
+ ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
102
+ return MEMTX_OK;
103
}
104
105
/*
106
@@ -XXX,XX +XXX,XX @@ static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
107
static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
108
uint32_t eventid, ItsCmdType cmd)
109
{
110
- MemTxResult res = MEMTX_OK;
111
uint64_t num_eventids;
112
- uint16_t icid = 0;
113
- uint32_t pIntid = 0;
114
- bool ite_valid = false;
115
DTEntry dte;
116
CTEntry cte;
117
+ ITEntry ite;
118
119
if (devid >= s->dt.num_entries) {
120
qemu_log_mask(LOG_GUEST_ERROR,
121
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
122
return CMD_CONTINUE;
123
}
124
125
- ite_valid = get_ite(s, eventid, &dte, &icid, &pIntid, &res);
126
- if (res != MEMTX_OK) {
127
+ if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
128
return CMD_STALL;
129
}
130
131
- if (!ite_valid) {
132
+ if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
133
qemu_log_mask(LOG_GUEST_ERROR,
134
"%s: invalid command attributes: invalid ITE\n",
135
__func__);
136
return CMD_CONTINUE;
137
}
138
139
- if (icid >= s->ct.num_entries) {
140
+ if (ite.icid >= s->ct.num_entries) {
141
qemu_log_mask(LOG_GUEST_ERROR,
142
"%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
143
- __func__, icid);
144
+ __func__, ite.icid);
145
return CMD_CONTINUE;
146
}
147
148
- if (get_cte(s, icid, &cte) != MEMTX_OK) {
149
+ if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
150
return CMD_STALL;
151
}
152
if (!cte.valid) {
153
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
154
}
155
156
if ((cmd == CLEAR) || (cmd == DISCARD)) {
157
- gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], pIntid, 0);
158
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
159
} else {
160
- gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], pIntid, 1);
161
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
162
}
163
164
if (cmd == DISCARD) {
165
- IteEntry ite = {};
166
+ IteEntry itee = {};
167
/* remove mapping from interrupt translation table */
168
- return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
169
+ return update_ite(s, eventid, &dte, itee) ? CMD_CONTINUE : CMD_STALL;
170
}
171
return CMD_CONTINUE;
172
}
173
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
174
175
static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
176
{
177
- MemTxResult res = MEMTX_OK;
178
- uint32_t devid, eventid, intid;
179
- uint16_t old_icid, new_icid;
180
- bool ite_valid;
181
+ uint32_t devid, eventid;
182
+ uint16_t new_icid;
183
uint64_t num_eventids;
184
IteEntry ite = {};
185
DTEntry dte;
186
CTEntry old_cte, new_cte;
187
+ ITEntry old_ite;
188
189
devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
190
eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
191
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
192
return CMD_CONTINUE;
193
}
194
195
- ite_valid = get_ite(s, eventid, &dte, &old_icid, &intid, &res);
196
- if (res != MEMTX_OK) {
197
+ if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
198
return CMD_STALL;
199
}
200
201
- if (!ite_valid) {
202
+ if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
203
qemu_log_mask(LOG_GUEST_ERROR,
204
"%s: invalid command attributes: invalid ITE\n",
205
__func__);
206
return CMD_CONTINUE;
207
}
208
209
- if (old_icid >= s->ct.num_entries) {
210
+ if (old_ite.icid >= s->ct.num_entries) {
211
qemu_log_mask(LOG_GUEST_ERROR,
212
"%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
213
- __func__, old_icid);
214
+ __func__, old_ite.icid);
215
return CMD_CONTINUE;
216
}
217
218
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
219
return CMD_CONTINUE;
220
}
221
222
- if (get_cte(s, old_icid, &old_cte) != MEMTX_OK) {
223
+ if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
224
return CMD_STALL;
225
}
226
if (!old_cte.valid) {
227
qemu_log_mask(LOG_GUEST_ERROR,
228
"%s: invalid command attributes: "
229
"invalid CTE for old ICID 0x%x\n",
230
- __func__, old_icid);
231
+ __func__, old_ite.icid);
232
return CMD_CONTINUE;
233
}
234
235
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
236
/* Move the LPI from the old redistributor to the new one */
237
gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
238
&s->gicv3->cpu[new_cte.rdbase],
239
- intid);
240
+ old_ite.intid);
241
}
242
243
/* Update the ICID field in the interrupt translation table entry */
244
ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1);
245
ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
246
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid);
247
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, old_ite.intid);
248
ite.itel = FIELD_DP64(ite.itel, ITE_L, ICID, new_icid);
249
ite.iteh = FIELD_DP32(ite.iteh, ITE_H, DOORBELL, INTID_SPURIOUS);
250
return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
251
--
252
2.25.1
253
254
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
Make the update_ite() struct use the new ITEntry struct, so that
2
callers don't need to assemble the in-memory ITE data themselves, and
3
only get_ite() and update_ite() need to care about that in-memory
4
layout. We can then drop the no-longer-used IteEntry struct
5
definition.
2
6
3
Allow cpu 'host' to enable SVE when it's available, unless the
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
user chooses to disable it with the added 'sve=off' cpu property.
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Also give the user the ability to select vector lengths with the
9
Message-id: 20220201193207.2771604-10-peter.maydell@linaro.org
6
sve<N> properties. We don't adopt 'max' cpu's other sve property,
10
---
7
sve-max-vq, because that property is difficult to use with KVM.
11
hw/intc/arm_gicv3_its.c | 62 +++++++++++++++++++++--------------------
8
That property assumes all vector lengths in the range from 1 up
12
1 file changed, 32 insertions(+), 30 deletions(-)
9
to and including the specified maximum length are supported, but
10
there may be optional lengths not supported by the host in that
11
range. With KVM one must be more specific when enabling vector
12
lengths.
13
13
14
Signed-off-by: Andrew Jones <drjones@redhat.com>
14
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
15
Reviewed-by: Eric Auger <eric.auger@redhat.com>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
18
Message-id: 20191031142734.8590-10-drjones@redhat.com
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
---
21
target/arm/cpu.h | 2 ++
22
target/arm/cpu.c | 3 +++
23
target/arm/cpu64.c | 33 +++++++++++++++++----------------
24
target/arm/kvm64.c | 14 +++++++++++++-
25
tests/arm-cpu-features.c | 17 ++++++++---------
26
docs/arm-cpu-features.rst | 19 ++++++++++++-------
27
6 files changed, 55 insertions(+), 33 deletions(-)
28
29
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
30
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/cpu.h
16
--- a/hw/intc/arm_gicv3_its.c
32
+++ b/target/arm/cpu.h
17
+++ b/hw/intc/arm_gicv3_its.c
33
@@ -XXX,XX +XXX,XX @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
18
@@ -XXX,XX +XXX,XX @@ typedef enum ItsCmdType {
34
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
19
INTERRUPT = 3,
35
void aarch64_sve_change_el(CPUARMState *env, int old_el,
20
} ItsCmdType;
36
int new_el, bool el0_a64);
21
37
+void aarch64_add_sve_properties(Object *obj);
22
-typedef struct {
38
#else
23
- uint32_t iteh;
39
static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
24
- uint64_t itel;
40
static inline void aarch64_sve_change_el(CPUARMState *env, int o,
25
-} IteEntry;
41
int n, bool a)
26
-
42
{ }
27
typedef struct DTEntry {
43
+static inline void aarch64_add_sve_properties(Object *obj) { }
28
bool valid;
44
#endif
29
unsigned size;
45
30
@@ -XXX,XX +XXX,XX @@ static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
46
#if !defined(CONFIG_TCG)
31
return MEMTX_OK;
47
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/cpu.c
50
+++ b/target/arm/cpu.c
51
@@ -XXX,XX +XXX,XX @@ static void arm_host_initfn(Object *obj)
52
ARMCPU *cpu = ARM_CPU(obj);
53
54
kvm_arm_set_cpu_features_from_host(cpu);
55
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
56
+ aarch64_add_sve_properties(obj);
57
+ }
58
arm_cpu_post_init(obj);
59
}
32
}
60
33
61
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
34
+/*
62
index XXXXXXX..XXXXXXX 100644
35
+ * Update the Interrupt Table entry at index @evinted in the table specified
63
--- a/target/arm/cpu64.c
36
+ * by the dte @dte. Returns true on success, false if there was a memory
64
+++ b/target/arm/cpu64.c
37
+ * access error.
65
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
38
+ */
66
cpu->isar.id_aa64pfr0 = t;
39
static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
40
- IteEntry ite)
41
+ const ITEntry *ite)
42
{
43
AddressSpace *as = &s->gicv3->dma_as;
44
MemTxResult res = MEMTX_OK;
45
hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
46
+ uint64_t itel = 0;
47
+ uint32_t iteh = 0;
48
49
- address_space_stq_le(as, iteaddr, ite.itel, MEMTXATTRS_UNSPECIFIED, &res);
50
-
51
- if (res == MEMTX_OK) {
52
- address_space_stl_le(as, iteaddr + 8, ite.iteh,
53
- MEMTXATTRS_UNSPECIFIED, &res);
54
+ if (ite->valid) {
55
+ itel = FIELD_DP64(itel, ITE_L, VALID, 1);
56
+ itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
57
+ itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
58
+ itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
59
+ itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
60
+ iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
61
}
62
+
63
+ address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
64
if (res != MEMTX_OK) {
65
return false;
66
- } else {
67
- return true;
68
}
69
+ address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
70
+ return res == MEMTX_OK;
67
}
71
}
68
72
69
+void aarch64_add_sve_properties(Object *obj)
73
/*
70
+{
74
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
71
+ uint32_t vq;
72
+
73
+ object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
74
+ cpu_arm_set_sve, NULL, NULL, &error_fatal);
75
+
76
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
77
+ char name[8];
78
+ sprintf(name, "sve%d", vq * 128);
79
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
80
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
81
+ }
82
+}
83
+
84
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
85
* otherwise, a CPU with as many features enabled as our emulation supports.
86
* The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
87
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, Visitor *v, const char *name,
88
static void aarch64_max_initfn(Object *obj)
89
{
90
ARMCPU *cpu = ARM_CPU(obj);
91
- uint32_t vq;
92
- uint64_t t;
93
94
if (kvm_enabled()) {
95
kvm_arm_set_cpu_features_from_host(cpu);
96
- if (kvm_arm_sve_supported(CPU(cpu))) {
97
- t = cpu->isar.id_aa64pfr0;
98
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
99
- cpu->isar.id_aa64pfr0 = t;
100
- }
101
} else {
102
+ uint64_t t;
103
uint32_t u;
104
aarch64_a57_initfn(obj);
105
106
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
107
#endif
108
}
75
}
109
76
110
- object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
77
if (cmd == DISCARD) {
111
- cpu_arm_set_sve, NULL, NULL, &error_fatal);
78
- IteEntry itee = {};
112
+ aarch64_add_sve_properties(obj);
79
+ ITEntry ite = {};
113
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
80
/* remove mapping from interrupt translation table */
114
cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
81
- return update_ite(s, eventid, &dte, itee) ? CMD_CONTINUE : CMD_STALL;
82
+ ite.valid = false;
83
+ return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
84
}
85
return CMD_CONTINUE;
86
}
87
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
88
uint64_t num_eventids;
89
uint32_t num_intids;
90
uint16_t icid = 0;
91
- IteEntry ite = {};
92
DTEntry dte;
93
+ ITEntry ite;
94
95
devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
96
eventid = cmdpkt[1] & EVENTID_MASK;
97
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
98
}
99
100
/* add ite entry to interrupt translation table */
101
- ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, true);
102
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
103
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
104
- ite.itel = FIELD_DP64(ite.itel, ITE_L, ICID, icid);
105
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, DOORBELL, INTID_SPURIOUS);
115
-
106
-
116
- for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
107
- return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
117
- char name[8];
108
+ ite.valid = true;
118
- sprintf(name, "sve%d", vq * 128);
109
+ ite.inttype = ITE_INTTYPE_PHYSICAL;
119
- object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
110
+ ite.intid = pIntid;
120
- cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
111
+ ite.icid = icid;
121
- }
112
+ ite.doorbell = INTID_SPURIOUS;
113
+ ite.vpeid = 0;
114
+ return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
122
}
115
}
123
116
124
struct ARMCPUInfo {
117
/*
125
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
118
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
126
index XXXXXXX..XXXXXXX 100644
119
uint32_t devid, eventid;
127
--- a/target/arm/kvm64.c
120
uint16_t new_icid;
128
+++ b/target/arm/kvm64.c
121
uint64_t num_eventids;
129
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
122
- IteEntry ite = {};
130
* and then query that CPU for the relevant ID registers.
123
DTEntry dte;
131
*/
124
CTEntry old_cte, new_cte;
132
int fdarray[3];
125
ITEntry old_ite;
133
+ bool sve_supported;
126
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
134
uint64_t features = 0;
135
+ uint64_t t;
136
int err;
137
138
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
139
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
140
ARM64_SYS_REG(3, 0, 0, 3, 2));
141
}
127
}
142
128
143
+ sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
129
/* Update the ICID field in the interrupt translation table entry */
144
+
130
- ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1);
145
kvm_arm_destroy_scratch_host_vcpu(fdarray);
131
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
146
132
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, old_ite.intid);
147
if (err < 0) {
133
- ite.itel = FIELD_DP64(ite.itel, ITE_L, ICID, new_icid);
148
return false;
134
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, DOORBELL, INTID_SPURIOUS);
149
}
135
- return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
150
136
+ old_ite.icid = new_icid;
151
- /* We can assume any KVM supporting CPU is at least a v8
137
+ return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
152
+ /* Add feature bits that can't appear until after VCPU init. */
138
}
153
+ if (sve_supported) {
139
154
+ t = ahcf->isar.id_aa64pfr0;
140
/*
155
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
156
+ ahcf->isar.id_aa64pfr0 = t;
157
+ }
158
+
159
+ /*
160
+ * We can assume any KVM supporting CPU is at least a v8
161
* with VFPv4+Neon; this in turn implies most of the other
162
* feature bits.
163
*/
164
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tests/arm-cpu-features.c
167
+++ b/tests/arm-cpu-features.c
168
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
169
"We cannot guarantee the CPU type 'cortex-a15' works "
170
"with KVM on this host", NULL);
171
172
- assert_has_feature(qts, "max", "sve");
173
- resp = do_query_no_props(qts, "max");
174
+ assert_has_feature(qts, "host", "sve");
175
+ resp = do_query_no_props(qts, "host");
176
kvm_supports_sve = resp_get_feature(resp, "sve");
177
vls = resp_get_sve_vls(resp);
178
qobject_unref(resp);
179
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
180
sprintf(max_name, "sve%d", max_vq * 128);
181
182
/* Enabling a supported length is of course fine. */
183
- assert_sve_vls(qts, "max", vls, "{ %s: true }", max_name);
184
+ assert_sve_vls(qts, "host", vls, "{ %s: true }", max_name);
185
186
/* Get the next supported length smaller than max-vq. */
187
vq = 64 - __builtin_clzll(vls & ~BIT_ULL(max_vq - 1));
188
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
189
* We have at least one length smaller than max-vq,
190
* so we can disable max-vq.
191
*/
192
- assert_sve_vls(qts, "max", (vls & ~BIT_ULL(max_vq - 1)),
193
+ assert_sve_vls(qts, "host", (vls & ~BIT_ULL(max_vq - 1)),
194
"{ %s: false }", max_name);
195
196
/*
197
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
198
*/
199
sprintf(name, "sve%d", vq * 128);
200
error = g_strdup_printf("cannot disable %s", name);
201
- assert_error(qts, "max", error,
202
+ assert_error(qts, "host", error,
203
"{ %s: true, %s: false }",
204
max_name, name);
205
g_free(error);
206
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
207
vq = __builtin_ffsll(vls);
208
sprintf(name, "sve%d", vq * 128);
209
error = g_strdup_printf("cannot disable %s", name);
210
- assert_error(qts, "max", error, "{ %s: false }", name);
211
+ assert_error(qts, "host", error, "{ %s: false }", name);
212
g_free(error);
213
214
/* Get an unsupported length. */
215
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
216
if (vq <= SVE_MAX_VQ) {
217
sprintf(name, "sve%d", vq * 128);
218
error = g_strdup_printf("cannot enable %s", name);
219
- assert_error(qts, "max", error, "{ %s: true }", name);
220
+ assert_error(qts, "host", error, "{ %s: true }", name);
221
g_free(error);
222
}
223
} else {
224
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
225
} else {
226
assert_has_not_feature(qts, "host", "aarch64");
227
assert_has_not_feature(qts, "host", "pmu");
228
-
229
- assert_has_not_feature(qts, "max", "sve");
230
+ assert_has_not_feature(qts, "host", "sve");
231
}
232
233
qtest_quit(qts);
234
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
235
index XXXXXXX..XXXXXXX 100644
236
--- a/docs/arm-cpu-features.rst
237
+++ b/docs/arm-cpu-features.rst
238
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Examples
239
240
$ qemu-system-aarch64 -M virt -cpu max
241
242
- 3) Only enable the 128-bit vector length::
243
+ 3) When KVM is enabled, implicitly enable all host CPU supported vector
244
+ lengths with the `host` CPU type::
245
+
246
+ $ qemu-system-aarch64 -M virt,accel=kvm -cpu host
247
+
248
+ 4) Only enable the 128-bit vector length::
249
250
$ qemu-system-aarch64 -M virt -cpu max,sve128=on
251
252
- 4) Disable the 512-bit vector length and all larger vector lengths,
253
+ 5) Disable the 512-bit vector length and all larger vector lengths,
254
since 512 is a power-of-two. This results in all the smaller,
255
uninitialized lengths (128, 256, and 384) defaulting to enabled::
256
257
$ qemu-system-aarch64 -M virt -cpu max,sve512=off
258
259
- 5) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
260
+ 6) Enable the 128-bit, 256-bit, and 512-bit vector lengths::
261
262
$ qemu-system-aarch64 -M virt -cpu max,sve128=on,sve256=on,sve512=on
263
264
- 6) The same as (5), but since the 128-bit and 256-bit vector
265
+ 7) The same as (6), but since the 128-bit and 256-bit vector
266
lengths are required for the 512-bit vector length to be enabled,
267
then allow them to be auto-enabled::
268
269
$ qemu-system-aarch64 -M virt -cpu max,sve512=on
270
271
- 7) Do the same as (6), but by first disabling SVE and then re-enabling it::
272
+ 8) Do the same as (7), but by first disabling SVE and then re-enabling it::
273
274
$ qemu-system-aarch64 -M virt -cpu max,sve=off,sve512=on,sve=on
275
276
- 8) Force errors regarding the last vector length::
277
+ 9) Force errors regarding the last vector length::
278
279
$ qemu-system-aarch64 -M virt -cpu max,sve128=off
280
$ qemu-system-aarch64 -M virt -cpu max,sve=off,sve128=off,sve=on
281
@@ -XXX,XX +XXX,XX @@ The examples in "SVE CPU Property Examples" exhibit many ways to select
282
vector lengths which developers may find useful in order to avoid overly
283
verbose command lines. However, the recommended way to select vector
284
lengths is to explicitly enable each desired length. Therefore only
285
-example's (1), (3), and (5) exhibit recommended uses of the properties.
286
+example's (1), (4), and (6) exhibit recommended uses of the properties.
287
288
--
141
--
289
2.20.1
142
2.25.1
290
143
291
144
diff view generated by jsdifflib
New patch
1
Currently we track in the TableDesc and CmdQDesc structs the state of
2
the GITS_BASER<n> and GITS_CBASER Valid bits. However we aren't very
3
consistent abut checking the valid field: we test it in update_cte()
4
and update_dte(), but not anywhere else we look things up in tables.
1
5
6
The GIC specification says that it is UNPREDICTABLE if a guest fails
7
to set any of these Valid bits before enabling the ITS via
8
GITS_CTLR.Enabled. So we can choose to handle Valid == 0 as
9
equivalent to a zero-length table. This is in fact how we're already
10
catching this case in most of the table-access paths: when Valid is 0
11
we leave the num_entries fields in TableDesc or CmdQDesc set to zero,
12
and then the out-of-bounds check "index >= num_entries" that we have
13
to do anyway before doing any of these table lookups will always be
14
true, catching the no-valid-table case without any extra code.
15
16
So we can remove the checks on the valid field from update_cte()
17
and update_dte(): since these happen after the bounds check there
18
was never any case when the test could fail. That means the valid
19
fields would be entirely unused, so just remove them.
20
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
22
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
23
Message-id: 20220201193207.2771604-11-peter.maydell@linaro.org
24
---
25
include/hw/intc/arm_gicv3_its_common.h | 2 --
26
hw/intc/arm_gicv3_its.c | 31 ++++++++++++--------------
27
2 files changed, 14 insertions(+), 19 deletions(-)
28
29
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/include/hw/intc/arm_gicv3_its_common.h
32
+++ b/include/hw/intc/arm_gicv3_its_common.h
33
@@ -XXX,XX +XXX,XX @@
34
#define GITS_TRANSLATER 0x0040
35
36
typedef struct {
37
- bool valid;
38
bool indirect;
39
uint16_t entry_sz;
40
uint32_t page_sz;
41
@@ -XXX,XX +XXX,XX @@ typedef struct {
42
} TableDesc;
43
44
typedef struct {
45
- bool valid;
46
uint32_t num_entries;
47
uint64_t base_addr;
48
} CmdQDesc;
49
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/intc/arm_gicv3_its.c
52
+++ b/hw/intc/arm_gicv3_its.c
53
@@ -XXX,XX +XXX,XX @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
54
uint64_t cteval = 0;
55
MemTxResult res = MEMTX_OK;
56
57
- if (!s->ct.valid) {
58
- return true;
59
- }
60
-
61
if (cte->valid) {
62
/* add mapping entry to collection table */
63
cteval = FIELD_DP64(cteval, CTE, VALID, 1);
64
@@ -XXX,XX +XXX,XX @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
65
uint64_t dteval = 0;
66
MemTxResult res = MEMTX_OK;
67
68
- if (s->dt.valid) {
69
- if (dte->valid) {
70
- /* add mapping entry to device table */
71
- dteval = FIELD_DP64(dteval, DTE, VALID, 1);
72
- dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
73
- dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
74
- }
75
- } else {
76
- return true;
77
+ if (dte->valid) {
78
+ /* add mapping entry to device table */
79
+ dteval = FIELD_DP64(dteval, DTE, VALID, 1);
80
+ dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
81
+ dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
82
}
83
84
entry_addr = table_entry_addr(s, &s->dt, devid, &res);
85
@@ -XXX,XX +XXX,XX @@ static void extract_table_params(GICv3ITSState *s)
86
}
87
88
memset(td, 0, sizeof(*td));
89
- td->valid = FIELD_EX64(value, GITS_BASER, VALID);
90
/*
91
* If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
92
* interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
93
@@ -XXX,XX +XXX,XX @@ static void extract_table_params(GICv3ITSState *s)
94
* for the register corresponding to the Collection table but we
95
* still have to process interrupts using non-memory-backed
96
* Collection table entries.)
97
+ * The specification makes it UNPREDICTABLE to enable the ITS without
98
+ * marking each BASER<n> as valid. We choose to handle these as if
99
+ * the table was zero-sized, so commands using the table will fail
100
+ * and interrupts requested via GITS_TRANSLATER writes will be ignored.
101
+ * This happens automatically by leaving the num_entries field at
102
+ * zero, which will be caught by the bounds checks we have before
103
+ * every table lookup anyway.
104
*/
105
- if (!td->valid) {
106
+ if (!FIELD_EX64(value, GITS_BASER, VALID)) {
107
continue;
108
}
109
td->page_sz = page_sz;
110
@@ -XXX,XX +XXX,XX @@ static void extract_cmdq_params(GICv3ITSState *s)
111
num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
112
113
memset(&s->cq, 0 , sizeof(s->cq));
114
- s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
115
116
- if (s->cq.valid) {
117
+ if (FIELD_EX64(value, GITS_CBASER, VALID)) {
118
s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
119
GITS_CMDQ_ENTRY_SIZE;
120
s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
121
--
122
2.25.1
123
124
diff view generated by jsdifflib
New patch
1
In the MAPC command, if V=0 this is a request to delete a collection
2
table entry and the rdbase field of the command packet will not be
3
used. In particular, the specification says that the "UNPREDICTABLE
4
if rdbase is not valid" only applies for V=1.
1
5
6
We were doing a check-and-log-guest-error on rdbase regardless of
7
whether the V bit was set, and also (harmlessly but confusingly)
8
storing the contents of the rdbase field into the updated collection
9
table entry. Update the code so that if V=0 we don't check or use
10
the rdbase field value.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220201193207.2771604-12-peter.maydell@linaro.org
15
---
16
hw/intc/arm_gicv3_its.c | 24 ++++++++++++------------
17
1 file changed, 12 insertions(+), 12 deletions(-)
18
19
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/intc/arm_gicv3_its.c
22
+++ b/hw/intc/arm_gicv3_its.c
23
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
24
CTEntry cte;
25
26
icid = cmdpkt[2] & ICID_MASK;
27
-
28
- cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
29
- cte.rdbase &= RDBASE_PROCNUM_MASK;
30
-
31
cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
32
+ if (cte.valid) {
33
+ cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
34
+ cte.rdbase &= RDBASE_PROCNUM_MASK;
35
+ } else {
36
+ cte.rdbase = 0;
37
+ }
38
39
- if ((icid >= s->ct.num_entries) || (cte.rdbase >= s->gicv3->num_cpu)) {
40
+ if (icid >= s->ct.num_entries) {
41
+ qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%d", icid);
42
+ return CMD_CONTINUE;
43
+ }
44
+ if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
45
qemu_log_mask(LOG_GUEST_ERROR,
46
- "ITS MAPC: invalid collection table attributes "
47
- "icid %d rdbase %u\n", icid, cte.rdbase);
48
- /*
49
- * in this implementation, in case of error
50
- * we ignore this command and move onto the next
51
- * command in the queue
52
- */
53
+ "ITS MAPC: invalid RDBASE %u ", cte.rdbase);
54
return CMD_CONTINUE;
55
}
56
57
--
58
2.25.1
59
60
diff view generated by jsdifflib
New patch
1
When handling MAPI/MAPTI, we allow the supplied interrupt ID to be
2
either 1023 or something in the valid LPI range. This is a mistake:
3
only a real valid LPI is allowed. (The general behaviour of the ITS
4
is that most interrupt ID fields require a value in the LPI range;
5
the exception is that fields specifying a doorbell value, which are
6
all in GICv4 commands, allow also 1023 to mean "no doorbell".)
7
Remove the condition that incorrectly allows 1023 here.
1
8
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220201193207.2771604-13-peter.maydell@linaro.org
12
---
13
hw/intc/arm_gicv3_its.c | 3 +--
14
1 file changed, 1 insertion(+), 2 deletions(-)
15
16
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/arm_gicv3_its.c
19
+++ b/hw/intc/arm_gicv3_its.c
20
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
21
22
if ((icid >= s->ct.num_entries)
23
|| !dte.valid || (eventid >= num_eventids) ||
24
- (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
25
- (pIntid != INTID_SPURIOUS))) {
26
+ (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)))) {
27
qemu_log_mask(LOG_GUEST_ERROR,
28
"%s: invalid command attributes "
29
"icid %d or eventid %d or pIntid %d or"
30
--
31
2.25.1
32
33
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
In most of the ITS command processing, we check different error
2
possibilities one at a time and log them appropriately. In
3
process_mapti() and process_mapd() we have code which checks
4
multiple error cases at once, which means the logging is less
5
specific than it could be. Split those cases up.
2
6
3
Extend the SVE vq map initialization and validation with KVM's
7
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
supported vector lengths when KVM is enabled. In order to determine
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
and select supported lengths we add two new KVM functions for getting
9
Message-id: 20220201193207.2771604-14-peter.maydell@linaro.org
6
and setting the KVM_REG_ARM64_SVE_VLS pseudo-register.
10
---
11
hw/intc/arm_gicv3_its.c | 52 ++++++++++++++++++++++++-----------------
12
1 file changed, 31 insertions(+), 21 deletions(-)
7
13
8
This patch has been co-authored with Richard Henderson, who reworked
14
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
9
the target/arm/cpu64.c changes in order to push all the validation and
10
auto-enabling/disabling steps into the finalizer, resulting in a nice
11
LOC reduction.
12
13
Signed-off-by: Andrew Jones <drjones@redhat.com>
14
Reviewed-by: Eric Auger <eric.auger@redhat.com>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
17
Message-id: 20191031142734.8590-9-drjones@redhat.com
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
---
20
target/arm/kvm_arm.h | 12 +++
21
target/arm/cpu64.c | 176 ++++++++++++++++++++++++++++----------
22
target/arm/kvm64.c | 100 +++++++++++++++++++++-
23
tests/arm-cpu-features.c | 104 +++++++++++++++++++++-
24
docs/arm-cpu-features.rst | 45 +++++++---
25
5 files changed, 379 insertions(+), 58 deletions(-)
26
27
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
28
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
29
--- a/target/arm/kvm_arm.h
16
--- a/hw/intc/arm_gicv3_its.c
30
+++ b/target/arm/kvm_arm.h
17
+++ b/hw/intc/arm_gicv3_its.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct ARMHostCPUFeatures {
18
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
32
*/
19
num_eventids = 1ULL << (dte.size + 1);
33
bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
20
num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
34
21
35
+/**
22
- if ((icid >= s->ct.num_entries)
36
+ * kvm_arm_sve_get_vls:
23
- || !dte.valid || (eventid >= num_eventids) ||
37
+ * @cs: CPUState
24
- (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)))) {
38
+ * @map: bitmap to fill in
25
+ if (icid >= s->ct.num_entries) {
39
+ *
26
qemu_log_mask(LOG_GUEST_ERROR,
40
+ * Get all the SVE vector lengths supported by the KVM host, setting
27
- "%s: invalid command attributes "
41
+ * the bits corresponding to their length in quadwords minus one
28
- "icid %d or eventid %d or pIntid %d or"
42
+ * (vq - 1) in @map up to ARM_MAX_VQ.
29
- "unmapped dte %d\n", __func__, icid, eventid,
43
+ */
30
- pIntid, dte.valid);
44
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map);
31
- /*
45
+
32
- * in this implementation, in case of error
46
/**
33
- * we ignore this command and move onto the next
47
* kvm_arm_set_cpu_features_from_host:
34
- * command in the queue
48
* @cpu: ARMCPU to set the features for
35
- */
49
@@ -XXX,XX +XXX,XX @@ static inline int kvm_arm_vgic_probe(void)
36
+ "%s: invalid ICID 0x%x >= 0x%x\n",
50
static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {}
37
+ __func__, icid, s->ct.num_entries);
51
static inline void kvm_arm_pmu_init(CPUState *cs) {}
38
+ return CMD_CONTINUE;
52
53
+static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) {}
54
#endif
55
56
static inline const char *gic_class_name(void)
57
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/arm/cpu64.c
60
+++ b/target/arm/cpu64.c
61
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
62
* any of the above. Finally, if SVE is not disabled, then at least one
63
* vector length must be enabled.
64
*/
65
+ DECLARE_BITMAP(kvm_supported, ARM_MAX_VQ);
66
DECLARE_BITMAP(tmp, ARM_MAX_VQ);
67
uint32_t vq, max_vq = 0;
68
69
+ /* Collect the set of vector lengths supported by KVM. */
70
+ bitmap_zero(kvm_supported, ARM_MAX_VQ);
71
+ if (kvm_enabled() && kvm_arm_sve_supported(CPU(cpu))) {
72
+ kvm_arm_sve_get_vls(CPU(cpu), kvm_supported);
73
+ } else if (kvm_enabled()) {
74
+ assert(!cpu_isar_feature(aa64_sve, cpu));
75
+ }
39
+ }
76
+
40
+
77
/*
41
+ if (!dte.valid) {
78
* Process explicit sve<N> properties.
42
+ qemu_log_mask(LOG_GUEST_ERROR,
79
* From the properties, sve_vq_map<N> implies sve_vq_init<N>.
43
+ "%s: no valid DTE for devid 0x%x\n", __func__, devid);
80
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
44
+ return CMD_CONTINUE;
81
return;
82
}
83
84
- /* Propagate enabled bits down through required powers-of-two. */
85
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
86
- if (!test_bit(vq - 1, cpu->sve_vq_init)) {
87
- set_bit(vq - 1, cpu->sve_vq_map);
88
+ if (kvm_enabled()) {
89
+ /*
90
+ * For KVM we have to automatically enable all supported unitialized
91
+ * lengths, even when the smaller lengths are not all powers-of-two.
92
+ */
93
+ bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq);
94
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
95
+ } else {
96
+ /* Propagate enabled bits down through required powers-of-two. */
97
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
98
+ if (!test_bit(vq - 1, cpu->sve_vq_init)) {
99
+ set_bit(vq - 1, cpu->sve_vq_map);
100
+ }
101
}
102
}
103
} else if (cpu->sve_max_vq == 0) {
104
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
105
return;
106
}
107
108
- /* Disabling a power-of-two disables all larger lengths. */
109
- if (test_bit(0, cpu->sve_vq_init)) {
110
- error_setg(errp, "cannot disable sve128");
111
- error_append_hint(errp, "Disabling sve128 results in all vector "
112
- "lengths being disabled.\n");
113
- error_append_hint(errp, "With SVE enabled, at least one vector "
114
- "length must be enabled.\n");
115
- return;
116
- }
117
- for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
118
- if (test_bit(vq - 1, cpu->sve_vq_init)) {
119
- break;
120
+ if (kvm_enabled()) {
121
+ /* Disabling a supported length disables all larger lengths. */
122
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
123
+ if (test_bit(vq - 1, cpu->sve_vq_init) &&
124
+ test_bit(vq - 1, kvm_supported)) {
125
+ break;
126
+ }
127
}
128
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
129
+ bitmap_andnot(cpu->sve_vq_map, kvm_supported,
130
+ cpu->sve_vq_init, max_vq);
131
+ if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
132
+ error_setg(errp, "cannot disable sve%d", vq * 128);
133
+ error_append_hint(errp, "Disabling sve%d results in all "
134
+ "vector lengths being disabled.\n",
135
+ vq * 128);
136
+ error_append_hint(errp, "With SVE enabled, at least one "
137
+ "vector length must be enabled.\n");
138
+ return;
139
+ }
140
+ } else {
141
+ /* Disabling a power-of-two disables all larger lengths. */
142
+ if (test_bit(0, cpu->sve_vq_init)) {
143
+ error_setg(errp, "cannot disable sve128");
144
+ error_append_hint(errp, "Disabling sve128 results in all "
145
+ "vector lengths being disabled.\n");
146
+ error_append_hint(errp, "With SVE enabled, at least one "
147
+ "vector length must be enabled.\n");
148
+ return;
149
+ }
150
+ for (vq = 2; vq <= ARM_MAX_VQ; vq <<= 1) {
151
+ if (test_bit(vq - 1, cpu->sve_vq_init)) {
152
+ break;
153
+ }
154
+ }
155
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
156
+ bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
157
}
158
- max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
159
160
- bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq);
161
max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
165
assert(max_vq != 0);
166
bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
167
168
- /* Ensure all required powers-of-two are enabled. */
169
- for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
170
- if (!test_bit(vq - 1, cpu->sve_vq_map)) {
171
- error_setg(errp, "cannot disable sve%d", vq * 128);
172
- error_append_hint(errp, "sve%d is required as it "
173
- "is a power-of-two length smaller than "
174
- "the maximum, sve%d\n",
175
- vq * 128, max_vq * 128);
176
+ if (kvm_enabled()) {
177
+ /* Ensure the set of lengths matches what KVM supports. */
178
+ bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq);
179
+ if (!bitmap_empty(tmp, max_vq)) {
180
+ vq = find_last_bit(tmp, max_vq) + 1;
181
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
182
+ if (cpu->sve_max_vq) {
183
+ error_setg(errp, "cannot set sve-max-vq=%d",
184
+ cpu->sve_max_vq);
185
+ error_append_hint(errp, "This KVM host does not support "
186
+ "the vector length %d-bits.\n",
187
+ vq * 128);
188
+ error_append_hint(errp, "It may not be possible to use "
189
+ "sve-max-vq with this KVM host. Try "
190
+ "using only sve<N> properties.\n");
191
+ } else {
192
+ error_setg(errp, "cannot enable sve%d", vq * 128);
193
+ error_append_hint(errp, "This KVM host does not support "
194
+ "the vector length %d-bits.\n",
195
+ vq * 128);
196
+ }
197
+ } else {
198
+ error_setg(errp, "cannot disable sve%d", vq * 128);
199
+ error_append_hint(errp, "The KVM host requires all "
200
+ "supported vector lengths smaller "
201
+ "than %d bits to also be enabled.\n",
202
+ max_vq * 128);
203
+ }
204
return;
205
}
206
+ } else {
207
+ /* Ensure all required powers-of-two are enabled. */
208
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
209
+ if (!test_bit(vq - 1, cpu->sve_vq_map)) {
210
+ error_setg(errp, "cannot disable sve%d", vq * 128);
211
+ error_append_hint(errp, "sve%d is required as it "
212
+ "is a power-of-two length smaller than "
213
+ "the maximum, sve%d\n",
214
+ vq * 128, max_vq * 128);
215
+ return;
216
+ }
217
+ }
218
}
219
220
/*
221
@@ -XXX,XX +XXX,XX @@ static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
222
{
223
ARMCPU *cpu = ARM_CPU(obj);
224
Error *err = NULL;
225
+ uint32_t max_vq;
226
227
- visit_type_uint32(v, name, &cpu->sve_max_vq, &err);
228
-
229
- if (!err && (cpu->sve_max_vq == 0 || cpu->sve_max_vq > ARM_MAX_VQ)) {
230
- error_setg(&err, "unsupported SVE vector length");
231
- error_append_hint(&err, "Valid sve-max-vq in range [1-%d]\n",
232
- ARM_MAX_VQ);
233
+ visit_type_uint32(v, name, &max_vq, &err);
234
+ if (err) {
235
+ error_propagate(errp, err);
236
+ return;
237
}
238
- error_propagate(errp, err);
239
+
240
+ if (kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
241
+ error_setg(errp, "cannot set sve-max-vq");
242
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
243
+ return;
244
+ }
45
+ }
245
+
46
+
246
+ if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
47
+ if (eventid >= num_eventids) {
247
+ error_setg(errp, "unsupported SVE vector length");
48
+ qemu_log_mask(LOG_GUEST_ERROR,
248
+ error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
49
+ "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
249
+ ARM_MAX_VQ);
50
+ __func__, eventid, num_eventids);
250
+ return;
51
+ return CMD_CONTINUE;
251
+ }
52
+ }
252
+
53
+
253
+ cpu->sve_max_vq = max_vq;
54
+ if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) {
254
}
55
+ qemu_log_mask(LOG_GUEST_ERROR,
255
56
+ "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
256
static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
57
return CMD_CONTINUE;
257
@@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
258
return;
259
}
58
}
260
59
261
+ if (value && kvm_enabled() && !kvm_arm_sve_supported(CPU(cpu))) {
60
@@ -XXX,XX +XXX,XX @@ static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
262
+ error_setg(errp, "cannot enable %s", name);
61
dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
263
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
62
dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
264
+ return;
63
64
- if ((devid >= s->dt.num_entries) ||
65
- (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
66
+ if (devid >= s->dt.num_entries) {
67
qemu_log_mask(LOG_GUEST_ERROR,
68
- "ITS MAPD: invalid device table attributes "
69
- "devid %d or size %d\n", devid, dte.size);
70
- /*
71
- * in this implementation, in case of error
72
- * we ignore this command and move onto the next
73
- * command in the queue
74
- */
75
+ "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
76
+ devid, s->dt.num_entries);
77
+ return CMD_CONTINUE;
265
+ }
78
+ }
266
+
79
+
267
if (value) {
80
+ if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
268
set_bit(vq - 1, cpu->sve_vq_map);
81
+ qemu_log_mask(LOG_GUEST_ERROR,
269
} else {
82
+ "ITS MAPD: invalid size %d\n", dte.size);
270
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
83
return CMD_CONTINUE;
271
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
272
cpu->dcz_blocksize = 7; /* 512 bytes */
273
#endif
274
-
275
- object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
276
- cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
277
-
278
- for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
279
- char name[8];
280
- sprintf(name, "sve%d", vq * 128);
281
- object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
282
- cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
283
- }
284
}
84
}
285
85
286
object_property_add(obj, "sve", "bool", cpu_arm_get_sve,
287
cpu_arm_set_sve, NULL, NULL, &error_fatal);
288
+ object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
289
+ cpu_max_set_sve_max_vq, NULL, NULL, &error_fatal);
290
+
291
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
292
+ char name[8];
293
+ sprintf(name, "sve%d", vq * 128);
294
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
295
+ cpu_arm_set_sve_vq, NULL, NULL, &error_fatal);
296
+ }
297
}
298
299
struct ARMCPUInfo {
300
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
301
index XXXXXXX..XXXXXXX 100644
302
--- a/target/arm/kvm64.c
303
+++ b/target/arm/kvm64.c
304
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_sve_supported(CPUState *cpu)
305
return kvm_check_extension(s, KVM_CAP_ARM_SVE);
306
}
307
308
+QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
309
+
310
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
311
+{
312
+ /* Only call this function if kvm_arm_sve_supported() returns true. */
313
+ static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
314
+ static bool probed;
315
+ uint32_t vq = 0;
316
+ int i, j;
317
+
318
+ bitmap_clear(map, 0, ARM_MAX_VQ);
319
+
320
+ /*
321
+ * KVM ensures all host CPUs support the same set of vector lengths.
322
+ * So we only need to create the scratch VCPUs once and then cache
323
+ * the results.
324
+ */
325
+ if (!probed) {
326
+ struct kvm_vcpu_init init = {
327
+ .target = -1,
328
+ .features[0] = (1 << KVM_ARM_VCPU_SVE),
329
+ };
330
+ struct kvm_one_reg reg = {
331
+ .id = KVM_REG_ARM64_SVE_VLS,
332
+ .addr = (uint64_t)&vls[0],
333
+ };
334
+ int fdarray[3], ret;
335
+
336
+ probed = true;
337
+
338
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
339
+ error_report("failed to create scratch VCPU with SVE enabled");
340
+ abort();
341
+ }
342
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
343
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
344
+ if (ret) {
345
+ error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
346
+ strerror(errno));
347
+ abort();
348
+ }
349
+
350
+ for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
351
+ if (vls[i]) {
352
+ vq = 64 - clz64(vls[i]) + i * 64;
353
+ break;
354
+ }
355
+ }
356
+ if (vq > ARM_MAX_VQ) {
357
+ warn_report("KVM supports vector lengths larger than "
358
+ "QEMU can enable");
359
+ }
360
+ }
361
+
362
+ for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
363
+ if (!vls[i]) {
364
+ continue;
365
+ }
366
+ for (j = 1; j <= 64; ++j) {
367
+ vq = j + i * 64;
368
+ if (vq > ARM_MAX_VQ) {
369
+ return;
370
+ }
371
+ if (vls[i] & (1UL << (j - 1))) {
372
+ set_bit(vq - 1, map);
373
+ }
374
+ }
375
+ }
376
+}
377
+
378
+static int kvm_arm_sve_set_vls(CPUState *cs)
379
+{
380
+ uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
381
+ struct kvm_one_reg reg = {
382
+ .id = KVM_REG_ARM64_SVE_VLS,
383
+ .addr = (uint64_t)&vls[0],
384
+ };
385
+ ARMCPU *cpu = ARM_CPU(cs);
386
+ uint32_t vq;
387
+ int i, j;
388
+
389
+ assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
390
+
391
+ for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
392
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
393
+ i = (vq - 1) / 64;
394
+ j = (vq - 1) % 64;
395
+ vls[i] |= 1UL << j;
396
+ }
397
+ }
398
+
399
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
400
+}
401
+
402
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
403
404
int kvm_arch_init_vcpu(CPUState *cs)
405
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
406
407
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
408
!object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
409
- fprintf(stderr, "KVM is not supported for this guest CPU type\n");
410
+ error_report("KVM is not supported for this guest CPU type");
411
return -EINVAL;
412
}
413
414
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
415
}
416
417
if (cpu_isar_feature(aa64_sve, cpu)) {
418
+ ret = kvm_arm_sve_set_vls(cs);
419
+ if (ret) {
420
+ return ret;
421
+ }
422
ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
423
if (ret) {
424
return ret;
425
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/tests/arm-cpu-features.c
428
+++ b/tests/arm-cpu-features.c
429
@@ -XXX,XX +XXX,XX @@ static QDict *resp_get_props(QDict *resp)
430
return qdict;
431
}
432
433
+static bool resp_get_feature(QDict *resp, const char *feature)
434
+{
435
+ QDict *props;
436
+
437
+ g_assert(resp);
438
+ g_assert(resp_has_props(resp));
439
+ props = resp_get_props(resp);
440
+ g_assert(qdict_get(props, feature));
441
+ return qdict_get_bool(props, feature);
442
+}
443
+
444
#define assert_has_feature(qts, cpu_type, feature) \
445
({ \
446
QDict *_resp = do_query_no_props(qts, cpu_type); \
447
@@ -XXX,XX +XXX,XX @@ static void sve_tests_sve_off(const void *data)
448
qtest_quit(qts);
449
}
450
451
+static void sve_tests_sve_off_kvm(const void *data)
452
+{
453
+ QTestState *qts;
454
+
455
+ qts = qtest_init(MACHINE_KVM "-cpu max,sve=off");
456
+
457
+ /*
458
+ * We don't know if this host supports SVE so we don't
459
+ * attempt to test enabling anything. We only test that
460
+ * everything is disabled (as it should be with sve=off)
461
+ * and that using sve<N>=off to explicitly disable vector
462
+ * lengths is OK too.
463
+ */
464
+ assert_sve_vls(qts, "max", 0, NULL);
465
+ assert_sve_vls(qts, "max", 0, "{ 'sve128': false }");
466
+
467
+ qtest_quit(qts);
468
+}
469
+
470
static void test_query_cpu_model_expansion(const void *data)
471
{
472
QTestState *qts;
473
@@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data)
474
}
475
476
if (g_str_equal(qtest_get_arch(), "aarch64")) {
477
+ bool kvm_supports_sve;
478
+ char max_name[8], name[8];
479
+ uint32_t max_vq, vq;
480
+ uint64_t vls;
481
+ QDict *resp;
482
+ char *error;
483
+
484
assert_has_feature(qts, "host", "aarch64");
485
assert_has_feature(qts, "host", "pmu");
486
487
- assert_has_feature(qts, "max", "sve");
488
-
489
assert_error(qts, "cortex-a15",
490
"We cannot guarantee the CPU type 'cortex-a15' works "
491
"with KVM on this host", NULL);
492
+
493
+ assert_has_feature(qts, "max", "sve");
494
+ resp = do_query_no_props(qts, "max");
495
+ kvm_supports_sve = resp_get_feature(resp, "sve");
496
+ vls = resp_get_sve_vls(resp);
497
+ qobject_unref(resp);
498
+
499
+ if (kvm_supports_sve) {
500
+ g_assert(vls != 0);
501
+ max_vq = 64 - __builtin_clzll(vls);
502
+ sprintf(max_name, "sve%d", max_vq * 128);
503
+
504
+ /* Enabling a supported length is of course fine. */
505
+ assert_sve_vls(qts, "max", vls, "{ %s: true }", max_name);
506
+
507
+ /* Get the next supported length smaller than max-vq. */
508
+ vq = 64 - __builtin_clzll(vls & ~BIT_ULL(max_vq - 1));
509
+ if (vq) {
510
+ /*
511
+ * We have at least one length smaller than max-vq,
512
+ * so we can disable max-vq.
513
+ */
514
+ assert_sve_vls(qts, "max", (vls & ~BIT_ULL(max_vq - 1)),
515
+ "{ %s: false }", max_name);
516
+
517
+ /*
518
+ * Smaller, supported vector lengths cannot be disabled
519
+ * unless all larger, supported vector lengths are also
520
+ * disabled.
521
+ */
522
+ sprintf(name, "sve%d", vq * 128);
523
+ error = g_strdup_printf("cannot disable %s", name);
524
+ assert_error(qts, "max", error,
525
+ "{ %s: true, %s: false }",
526
+ max_name, name);
527
+ g_free(error);
528
+ }
529
+
530
+ /*
531
+ * The smallest, supported vector length is required, because
532
+ * we need at least one vector length enabled.
533
+ */
534
+ vq = __builtin_ffsll(vls);
535
+ sprintf(name, "sve%d", vq * 128);
536
+ error = g_strdup_printf("cannot disable %s", name);
537
+ assert_error(qts, "max", error, "{ %s: false }", name);
538
+ g_free(error);
539
+
540
+ /* Get an unsupported length. */
541
+ for (vq = 1; vq <= max_vq; ++vq) {
542
+ if (!(vls & BIT_ULL(vq - 1))) {
543
+ break;
544
+ }
545
+ }
546
+ if (vq <= SVE_MAX_VQ) {
547
+ sprintf(name, "sve%d", vq * 128);
548
+ error = g_strdup_printf("cannot enable %s", name);
549
+ assert_error(qts, "max", error, "{ %s: true }", name);
550
+ g_free(error);
551
+ }
552
+ } else {
553
+ g_assert(vls == 0);
554
+ }
555
} else {
556
assert_has_not_feature(qts, "host", "aarch64");
557
assert_has_not_feature(qts, "host", "pmu");
558
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
559
NULL, sve_tests_sve_max_vq_8);
560
qtest_add_data_func("/arm/max/query-cpu-model-expansion/sve-off",
561
NULL, sve_tests_sve_off);
562
+ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion/sve-off",
563
+ NULL, sve_tests_sve_off_kvm);
564
}
565
566
return g_test_run();
567
diff --git a/docs/arm-cpu-features.rst b/docs/arm-cpu-features.rst
568
index XXXXXXX..XXXXXXX 100644
569
--- a/docs/arm-cpu-features.rst
570
+++ b/docs/arm-cpu-features.rst
571
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Dependencies and Constraints
572
573
1) At least one vector length must be enabled when `sve` is enabled.
574
575
- 2) If a vector length `N` is enabled, then all power-of-two vector
576
- lengths smaller than `N` must also be enabled. E.g. if `sve512`
577
- is enabled, then the 128-bit and 256-bit vector lengths must also
578
- be enabled.
579
+ 2) If a vector length `N` is enabled, then, when KVM is enabled, all
580
+ smaller, host supported vector lengths must also be enabled. If
581
+ KVM is not enabled, then only all the smaller, power-of-two vector
582
+ lengths must be enabled. E.g. with KVM if the host supports all
583
+ vector lengths up to 512-bits (128, 256, 384, 512), then if `sve512`
584
+ is enabled, the 128-bit vector length, 256-bit vector length, and
585
+ 384-bit vector length must also be enabled. Without KVM, the 384-bit
586
+ vector length would not be required.
587
+
588
+ 3) If KVM is enabled then only vector lengths that the host CPU type
589
+ support may be enabled. If SVE is not supported by the host, then
590
+ no `sve*` properties may be enabled.
591
592
SVE CPU Property Parsing Semantics
593
----------------------------------
594
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Parsing Semantics
595
an error is generated.
596
597
2) If SVE is enabled (`sve=on`), but no `sve<N>` CPU properties are
598
- provided, then all supported vector lengths are enabled, including
599
- the non-power-of-two lengths.
600
+ provided, then all supported vector lengths are enabled, which when
601
+ KVM is not in use means including the non-power-of-two lengths, and,
602
+ when KVM is in use, it means all vector lengths supported by the host
603
+ processor.
604
605
3) If SVE is enabled, then an error is generated when attempting to
606
disable the last enabled vector length (see constraint (1) of "SVE
607
@@ -XXX,XX +XXX,XX @@ SVE CPU Property Parsing Semantics
608
has been explicitly disabled, then an error is generated (see
609
constraint (2) of "SVE CPU Property Dependencies and Constraints").
610
611
- 5) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
612
+ 5) When KVM is enabled, if the host does not support SVE, then an error
613
+ is generated when attempting to enable any `sve*` properties (see
614
+ constraint (3) of "SVE CPU Property Dependencies and Constraints").
615
+
616
+ 6) When KVM is enabled, if the host does support SVE, then an error is
617
+ generated when attempting to enable any vector lengths not supported
618
+ by the host (see constraint (3) of "SVE CPU Property Dependencies and
619
+ Constraints").
620
+
621
+ 7) If one or more `sve<N>` CPU properties are set `off`, but no `sve<N>`,
622
CPU properties are set `on`, then the specified vector lengths are
623
disabled but the default for any unspecified lengths remains enabled.
624
- Disabling a power-of-two vector length also disables all vector
625
- lengths larger than the power-of-two length (see constraint (2) of
626
- "SVE CPU Property Dependencies and Constraints").
627
+ When KVM is not enabled, disabling a power-of-two vector length also
628
+ disables all vector lengths larger than the power-of-two length.
629
+ When KVM is enabled, then disabling any supported vector length also
630
+ disables all larger vector lengths (see constraint (2) of "SVE CPU
631
+ Property Dependencies and Constraints").
632
633
- 6) If one or more `sve<N>` CPU properties are set to `on`, then they
634
+ 8) If one or more `sve<N>` CPU properties are set to `on`, then they
635
are enabled and all unspecified lengths default to disabled, except
636
for the required lengths per constraint (2) of "SVE CPU Property
637
Dependencies and Constraints", which will even be auto-enabled if
638
they were not explicitly enabled.
639
640
- 7) If SVE was disabled (`sve=off`), allowing all vector lengths to be
641
+ 9) If SVE was disabled (`sve=off`), allowing all vector lengths to be
642
explicitly disabled (i.e. avoiding the error specified in (3) of
643
"SVE CPU Property Parsing Semantics"), then if later an `sve=on` is
644
provided an error will be generated. To avoid this error, one must
645
--
86
--
646
2.20.1
87
2.25.1
647
88
648
89
diff view generated by jsdifflib
1
From: Andrew Jones <drjones@redhat.com>
1
From: Kevin Townsend <kevin.townsend@linaro.org>
2
2
3
Now that Arm CPUs have advertised features lets add tests to ensure
3
This commit adds emulation of the magnetometer on the LSM303DLHC.
4
we maintain their expected availability with and without KVM.
4
It allows the magnetometer's X, Y and Z outputs to be set via the
5
mag-x, mag-y and mag-z properties, as well as the 12-bit
6
temperature output via the temperature property. Sensor can be
7
enabled with 'CONFIG_LSM303DLHC_MAG=y'.
5
8
6
Signed-off-by: Andrew Jones <drjones@redhat.com>
9
Signed-off-by: Kevin Townsend <kevin.townsend@linaro.org>
7
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Message-id: 20220130095032.35392-1-kevin.townsend@linaro.org
8
Message-id: 20191031142734.8590-3-drjones@redhat.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
tests/Makefile.include | 5 +-
14
hw/sensor/lsm303dlhc_mag.c | 556 ++++++++++++++++++++++++++++++
12
tests/arm-cpu-features.c | 253 +++++++++++++++++++++++++++++++++++++++
15
tests/qtest/lsm303dlhc-mag-test.c | 148 ++++++++
13
2 files changed, 257 insertions(+), 1 deletion(-)
16
hw/sensor/Kconfig | 4 +
14
create mode 100644 tests/arm-cpu-features.c
17
hw/sensor/meson.build | 1 +
18
tests/qtest/meson.build | 1 +
19
5 files changed, 710 insertions(+)
20
create mode 100644 hw/sensor/lsm303dlhc_mag.c
21
create mode 100644 tests/qtest/lsm303dlhc-mag-test.c
15
22
16
diff --git a/tests/Makefile.include b/tests/Makefile.include
23
diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/Makefile.include
19
+++ b/tests/Makefile.include
20
@@ -XXX,XX +XXX,XX @@ check-qtest-sparc64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF)
21
check-qtest-sparc64-y += tests/prom-env-test$(EXESUF)
22
check-qtest-sparc64-y += tests/boot-serial-test$(EXESUF)
23
24
+check-qtest-arm-y += tests/arm-cpu-features$(EXESUF)
25
check-qtest-arm-y += tests/microbit-test$(EXESUF)
26
check-qtest-arm-y += tests/m25p80-test$(EXESUF)
27
check-qtest-arm-y += tests/test-arm-mptimer$(EXESUF)
28
@@ -XXX,XX +XXX,XX @@ check-qtest-arm-y += tests/boot-serial-test$(EXESUF)
29
check-qtest-arm-y += tests/hexloader-test$(EXESUF)
30
check-qtest-arm-$(CONFIG_PFLASH_CFI02) += tests/pflash-cfi02-test$(EXESUF)
31
32
-check-qtest-aarch64-y = tests/numa-test$(EXESUF)
33
+check-qtest-aarch64-y += tests/arm-cpu-features$(EXESUF)
34
+check-qtest-aarch64-y += tests/numa-test$(EXESUF)
35
check-qtest-aarch64-y += tests/boot-serial-test$(EXESUF)
36
check-qtest-aarch64-y += tests/migration-test$(EXESUF)
37
# TODO: once aarch64 TCG is fixed on ARM 32 bit host, make test unconditional
38
@@ -XXX,XX +XXX,XX @@ tests/test-qapi-util$(EXESUF): tests/test-qapi-util.o $(test-util-obj-y)
39
tests/numa-test$(EXESUF): tests/numa-test.o
40
tests/vmgenid-test$(EXESUF): tests/vmgenid-test.o tests/boot-sector.o tests/acpi-utils.o
41
tests/cdrom-test$(EXESUF): tests/cdrom-test.o tests/boot-sector.o $(libqos-obj-y)
42
+tests/arm-cpu-features$(EXESUF): tests/arm-cpu-features.o
43
44
tests/migration/stress$(EXESUF): tests/migration/stress.o
45
    $(call quiet-command, $(LINKPROG) -static -O3 $(PTHREAD_LIB) -o $@ $< ,"LINK","$(TARGET_DIR)$@")
46
diff --git a/tests/arm-cpu-features.c b/tests/arm-cpu-features.c
47
new file mode 100644
24
new file mode 100644
48
index XXXXXXX..XXXXXXX
25
index XXXXXXX..XXXXXXX
49
--- /dev/null
26
--- /dev/null
50
+++ b/tests/arm-cpu-features.c
27
+++ b/hw/sensor/lsm303dlhc_mag.c
51
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@
52
+/*
29
+/*
53
+ * Arm CPU feature test cases
30
+ * LSM303DLHC I2C magnetometer.
54
+ *
31
+ *
55
+ * Copyright (c) 2019 Red Hat Inc.
32
+ * Copyright (C) 2021 Linaro Ltd.
56
+ * Authors:
33
+ * Written by Kevin Townsend <kevin.townsend@linaro.org>
57
+ * Andrew Jones <drjones@redhat.com>
58
+ *
34
+ *
59
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
35
+ * Based on: https://www.st.com/resource/en/datasheet/lsm303dlhc.pdf
60
+ * See the COPYING file in the top-level directory.
36
+ *
61
+ */
37
+ * SPDX-License-Identifier: GPL-2.0-or-later
38
+ */
39
+
40
+/*
41
+ * The I2C address associated with this device is set on the command-line when
42
+ * initialising the machine, but the following address is standard: 0x1E.
43
+ *
44
+ * Get and set functions for 'mag-x', 'mag-y' and 'mag-z' assume that
45
+ * 1 = 0.001 uT. (NOTE the 1 gauss = 100 uT, so setting a value of 100,000
46
+ * would be equal to 1 gauss or 100 uT.)
47
+ *
48
+ * Get and set functions for 'temperature' assume that 1 = 0.001 C, so 23.6 C
49
+ * would be equal to 23600.
50
+ */
51
+
62
+#include "qemu/osdep.h"
52
+#include "qemu/osdep.h"
63
+#include "libqtest.h"
53
+#include "hw/i2c/i2c.h"
54
+#include "migration/vmstate.h"
55
+#include "qapi/error.h"
56
+#include "qapi/visitor.h"
57
+#include "qemu/module.h"
58
+#include "qemu/log.h"
59
+#include "qemu/bswap.h"
60
+
61
+enum LSM303DLHCMagReg {
62
+ LSM303DLHC_MAG_REG_CRA = 0x00,
63
+ LSM303DLHC_MAG_REG_CRB = 0x01,
64
+ LSM303DLHC_MAG_REG_MR = 0x02,
65
+ LSM303DLHC_MAG_REG_OUT_X_H = 0x03,
66
+ LSM303DLHC_MAG_REG_OUT_X_L = 0x04,
67
+ LSM303DLHC_MAG_REG_OUT_Z_H = 0x05,
68
+ LSM303DLHC_MAG_REG_OUT_Z_L = 0x06,
69
+ LSM303DLHC_MAG_REG_OUT_Y_H = 0x07,
70
+ LSM303DLHC_MAG_REG_OUT_Y_L = 0x08,
71
+ LSM303DLHC_MAG_REG_SR = 0x09,
72
+ LSM303DLHC_MAG_REG_IRA = 0x0A,
73
+ LSM303DLHC_MAG_REG_IRB = 0x0B,
74
+ LSM303DLHC_MAG_REG_IRC = 0x0C,
75
+ LSM303DLHC_MAG_REG_TEMP_OUT_H = 0x31,
76
+ LSM303DLHC_MAG_REG_TEMP_OUT_L = 0x32
77
+};
78
+
79
+typedef struct LSM303DLHCMagState {
80
+ I2CSlave parent_obj;
81
+ uint8_t cra;
82
+ uint8_t crb;
83
+ uint8_t mr;
84
+ int16_t x;
85
+ int16_t z;
86
+ int16_t y;
87
+ int16_t x_lock;
88
+ int16_t z_lock;
89
+ int16_t y_lock;
90
+ uint8_t sr;
91
+ uint8_t ira;
92
+ uint8_t irb;
93
+ uint8_t irc;
94
+ int16_t temperature;
95
+ int16_t temperature_lock;
96
+ uint8_t len;
97
+ uint8_t buf;
98
+ uint8_t pointer;
99
+} LSM303DLHCMagState;
100
+
101
+#define TYPE_LSM303DLHC_MAG "lsm303dlhc_mag"
102
+OBJECT_DECLARE_SIMPLE_TYPE(LSM303DLHCMagState, LSM303DLHC_MAG)
103
+
104
+/*
105
+ * Conversion factor from Gauss to sensor values for each GN gain setting,
106
+ * in units "lsb per Gauss" (see data sheet table 3). There is no documented
107
+ * behaviour if the GN setting in CRB is incorrectly set to 0b000;
108
+ * we arbitrarily make it the same as 0b001.
109
+ */
110
+uint32_t xy_gain[] = { 1100, 1100, 855, 670, 450, 400, 330, 230 };
111
+uint32_t z_gain[] = { 980, 980, 760, 600, 400, 355, 295, 205 };
112
+
113
+static void lsm303dlhc_mag_get_x(Object *obj, Visitor *v, const char *name,
114
+ void *opaque, Error **errp)
115
+{
116
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
117
+ int gm = extract32(s->crb, 5, 3);
118
+
119
+ /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */
120
+ int64_t value = muldiv64(s->x, 100000, xy_gain[gm]);
121
+ visit_type_int(v, name, &value, errp);
122
+}
123
+
124
+static void lsm303dlhc_mag_get_y(Object *obj, Visitor *v, const char *name,
125
+ void *opaque, Error **errp)
126
+{
127
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
128
+ int gm = extract32(s->crb, 5, 3);
129
+
130
+ /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */
131
+ int64_t value = muldiv64(s->y, 100000, xy_gain[gm]);
132
+ visit_type_int(v, name, &value, errp);
133
+}
134
+
135
+static void lsm303dlhc_mag_get_z(Object *obj, Visitor *v, const char *name,
136
+ void *opaque, Error **errp)
137
+{
138
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
139
+ int gm = extract32(s->crb, 5, 3);
140
+
141
+ /* Convert to uT where 1000 = 1 uT. Conversion factor depends on gain. */
142
+ int64_t value = muldiv64(s->z, 100000, z_gain[gm]);
143
+ visit_type_int(v, name, &value, errp);
144
+}
145
+
146
+static void lsm303dlhc_mag_set_x(Object *obj, Visitor *v, const char *name,
147
+ void *opaque, Error **errp)
148
+{
149
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
150
+ int64_t value;
151
+ int64_t reg;
152
+ int gm = extract32(s->crb, 5, 3);
153
+
154
+ if (!visit_type_int(v, name, &value, errp)) {
155
+ return;
156
+ }
157
+
158
+ reg = muldiv64(value, xy_gain[gm], 100000);
159
+
160
+ /* Make sure we are within a 12-bit limit. */
161
+ if (reg > 2047 || reg < -2048) {
162
+ error_setg(errp, "value %" PRId64 " out of register's range", value);
163
+ return;
164
+ }
165
+
166
+ s->x = (int16_t)reg;
167
+}
168
+
169
+static void lsm303dlhc_mag_set_y(Object *obj, Visitor *v, const char *name,
170
+ void *opaque, Error **errp)
171
+{
172
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
173
+ int64_t value;
174
+ int64_t reg;
175
+ int gm = extract32(s->crb, 5, 3);
176
+
177
+ if (!visit_type_int(v, name, &value, errp)) {
178
+ return;
179
+ }
180
+
181
+ reg = muldiv64(value, xy_gain[gm], 100000);
182
+
183
+ /* Make sure we are within a 12-bit limit. */
184
+ if (reg > 2047 || reg < -2048) {
185
+ error_setg(errp, "value %" PRId64 " out of register's range", value);
186
+ return;
187
+ }
188
+
189
+ s->y = (int16_t)reg;
190
+}
191
+
192
+static void lsm303dlhc_mag_set_z(Object *obj, Visitor *v, const char *name,
193
+ void *opaque, Error **errp)
194
+{
195
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
196
+ int64_t value;
197
+ int64_t reg;
198
+ int gm = extract32(s->crb, 5, 3);
199
+
200
+ if (!visit_type_int(v, name, &value, errp)) {
201
+ return;
202
+ }
203
+
204
+ reg = muldiv64(value, z_gain[gm], 100000);
205
+
206
+ /* Make sure we are within a 12-bit limit. */
207
+ if (reg > 2047 || reg < -2048) {
208
+ error_setg(errp, "value %" PRId64 " out of register's range", value);
209
+ return;
210
+ }
211
+
212
+ s->z = (int16_t)reg;
213
+}
214
+
215
+/*
216
+ * Get handler for the temperature property.
217
+ */
218
+static void lsm303dlhc_mag_get_temperature(Object *obj, Visitor *v,
219
+ const char *name, void *opaque,
220
+ Error **errp)
221
+{
222
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
223
+ int64_t value;
224
+
225
+ /* Convert to 1 lsb = 0.125 C to 1 = 0.001 C for 'temperature' property. */
226
+ value = s->temperature * 125;
227
+
228
+ visit_type_int(v, name, &value, errp);
229
+}
230
+
231
+/*
232
+ * Set handler for the temperature property.
233
+ */
234
+static void lsm303dlhc_mag_set_temperature(Object *obj, Visitor *v,
235
+ const char *name, void *opaque,
236
+ Error **errp)
237
+{
238
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(obj);
239
+ int64_t value;
240
+
241
+ if (!visit_type_int(v, name, &value, errp)) {
242
+ return;
243
+ }
244
+
245
+ /* Input temperature is in 0.001 C units. Convert to 1 lsb = 0.125 C. */
246
+ value /= 125;
247
+
248
+ if (value > 2047 || value < -2048) {
249
+ error_setg(errp, "value %" PRId64 " lsb is out of range", value);
250
+ return;
251
+ }
252
+
253
+ s->temperature = (int16_t)value;
254
+}
255
+
256
+/*
257
+ * Callback handler whenever a 'I2C_START_RECV' (read) event is received.
258
+ */
259
+static void lsm303dlhc_mag_read(LSM303DLHCMagState *s)
260
+{
261
+ /*
262
+ * Set the LOCK bit whenever a new read attempt is made. This will be
263
+ * cleared in I2C_FINISH. Note that DRDY is always set to 1 in this driver.
264
+ */
265
+ s->sr = 0x3;
266
+
267
+ /*
268
+ * Copy the current X/Y/Z and temp. values into the locked registers so
269
+ * that 'mag-x', 'mag-y', 'mag-z' and 'temperature' can continue to be
270
+ * updated via QOM, etc., without corrupting the current read event.
271
+ */
272
+ s->x_lock = s->x;
273
+ s->z_lock = s->z;
274
+ s->y_lock = s->y;
275
+ s->temperature_lock = s->temperature;
276
+}
277
+
278
+/*
279
+ * Callback handler whenever a 'I2C_FINISH' event is received.
280
+ */
281
+static void lsm303dlhc_mag_finish(LSM303DLHCMagState *s)
282
+{
283
+ /*
284
+ * Clear the LOCK bit when the read attempt terminates.
285
+ * This bit is initially set in the I2C_START_RECV handler.
286
+ */
287
+ s->sr = 0x1;
288
+}
289
+
290
+/*
291
+ * Callback handler when a device attempts to write to a register.
292
+ */
293
+static void lsm303dlhc_mag_write(LSM303DLHCMagState *s)
294
+{
295
+ switch (s->pointer) {
296
+ case LSM303DLHC_MAG_REG_CRA:
297
+ s->cra = s->buf;
298
+ break;
299
+ case LSM303DLHC_MAG_REG_CRB:
300
+ /* Make sure gain is at least 1, falling back to 1 on an error. */
301
+ if (s->buf >> 5 == 0) {
302
+ s->buf = 1 << 5;
303
+ }
304
+ s->crb = s->buf;
305
+ break;
306
+ case LSM303DLHC_MAG_REG_MR:
307
+ s->mr = s->buf;
308
+ break;
309
+ case LSM303DLHC_MAG_REG_SR:
310
+ s->sr = s->buf;
311
+ break;
312
+ case LSM303DLHC_MAG_REG_IRA:
313
+ s->ira = s->buf;
314
+ break;
315
+ case LSM303DLHC_MAG_REG_IRB:
316
+ s->irb = s->buf;
317
+ break;
318
+ case LSM303DLHC_MAG_REG_IRC:
319
+ s->irc = s->buf;
320
+ break;
321
+ default:
322
+ qemu_log_mask(LOG_GUEST_ERROR, "reg is read-only: 0x%02X", s->buf);
323
+ break;
324
+ }
325
+}
326
+
327
+/*
328
+ * Low-level master-to-slave transaction handler.
329
+ */
330
+static int lsm303dlhc_mag_send(I2CSlave *i2c, uint8_t data)
331
+{
332
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c);
333
+
334
+ if (s->len == 0) {
335
+ /* First byte is the reg pointer */
336
+ s->pointer = data;
337
+ s->len++;
338
+ } else if (s->len == 1) {
339
+ /* Second byte is the new register value. */
340
+ s->buf = data;
341
+ lsm303dlhc_mag_write(s);
342
+ } else {
343
+ g_assert_not_reached();
344
+ }
345
+
346
+ return 0;
347
+}
348
+
349
+/*
350
+ * Low-level slave-to-master transaction handler (read attempts).
351
+ */
352
+static uint8_t lsm303dlhc_mag_recv(I2CSlave *i2c)
353
+{
354
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c);
355
+ uint8_t resp;
356
+
357
+ switch (s->pointer) {
358
+ case LSM303DLHC_MAG_REG_CRA:
359
+ resp = s->cra;
360
+ break;
361
+ case LSM303DLHC_MAG_REG_CRB:
362
+ resp = s->crb;
363
+ break;
364
+ case LSM303DLHC_MAG_REG_MR:
365
+ resp = s->mr;
366
+ break;
367
+ case LSM303DLHC_MAG_REG_OUT_X_H:
368
+ resp = (uint8_t)(s->x_lock >> 8);
369
+ break;
370
+ case LSM303DLHC_MAG_REG_OUT_X_L:
371
+ resp = (uint8_t)(s->x_lock);
372
+ break;
373
+ case LSM303DLHC_MAG_REG_OUT_Z_H:
374
+ resp = (uint8_t)(s->z_lock >> 8);
375
+ break;
376
+ case LSM303DLHC_MAG_REG_OUT_Z_L:
377
+ resp = (uint8_t)(s->z_lock);
378
+ break;
379
+ case LSM303DLHC_MAG_REG_OUT_Y_H:
380
+ resp = (uint8_t)(s->y_lock >> 8);
381
+ break;
382
+ case LSM303DLHC_MAG_REG_OUT_Y_L:
383
+ resp = (uint8_t)(s->y_lock);
384
+ break;
385
+ case LSM303DLHC_MAG_REG_SR:
386
+ resp = s->sr;
387
+ break;
388
+ case LSM303DLHC_MAG_REG_IRA:
389
+ resp = s->ira;
390
+ break;
391
+ case LSM303DLHC_MAG_REG_IRB:
392
+ resp = s->irb;
393
+ break;
394
+ case LSM303DLHC_MAG_REG_IRC:
395
+ resp = s->irc;
396
+ break;
397
+ case LSM303DLHC_MAG_REG_TEMP_OUT_H:
398
+ /* Check if the temperature sensor is enabled or not (CRA & 0x80). */
399
+ if (s->cra & 0x80) {
400
+ resp = (uint8_t)(s->temperature_lock >> 8);
401
+ } else {
402
+ resp = 0;
403
+ }
404
+ break;
405
+ case LSM303DLHC_MAG_REG_TEMP_OUT_L:
406
+ if (s->cra & 0x80) {
407
+ resp = (uint8_t)(s->temperature_lock & 0xff);
408
+ } else {
409
+ resp = 0;
410
+ }
411
+ break;
412
+ default:
413
+ resp = 0;
414
+ break;
415
+ }
416
+
417
+ /*
418
+ * The address pointer on the LSM303DLHC auto-increments whenever a byte
419
+ * is read, without the master device having to request the next address.
420
+ *
421
+ * The auto-increment process has the following logic:
422
+ *
423
+ * - if (s->pointer == 8) then s->pointer = 3
424
+ * - else: if (s->pointer == 12) then s->pointer = 0
425
+ * - else: s->pointer += 1
426
+ *
427
+ * Reading an invalid address return 0.
428
+ */
429
+ if (s->pointer == LSM303DLHC_MAG_REG_OUT_Y_L) {
430
+ s->pointer = LSM303DLHC_MAG_REG_OUT_X_H;
431
+ } else if (s->pointer == LSM303DLHC_MAG_REG_IRC) {
432
+ s->pointer = LSM303DLHC_MAG_REG_CRA;
433
+ } else {
434
+ s->pointer++;
435
+ }
436
+
437
+ return resp;
438
+}
439
+
440
+/*
441
+ * Bus state change handler.
442
+ */
443
+static int lsm303dlhc_mag_event(I2CSlave *i2c, enum i2c_event event)
444
+{
445
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c);
446
+
447
+ switch (event) {
448
+ case I2C_START_SEND:
449
+ break;
450
+ case I2C_START_RECV:
451
+ lsm303dlhc_mag_read(s);
452
+ break;
453
+ case I2C_FINISH:
454
+ lsm303dlhc_mag_finish(s);
455
+ break;
456
+ case I2C_NACK:
457
+ break;
458
+ }
459
+
460
+ s->len = 0;
461
+ return 0;
462
+}
463
+
464
+/*
465
+ * Device data description using VMSTATE macros.
466
+ */
467
+static const VMStateDescription vmstate_lsm303dlhc_mag = {
468
+ .name = "LSM303DLHC_MAG",
469
+ .version_id = 0,
470
+ .minimum_version_id = 0,
471
+ .fields = (VMStateField[]) {
472
+
473
+ VMSTATE_I2C_SLAVE(parent_obj, LSM303DLHCMagState),
474
+ VMSTATE_UINT8(len, LSM303DLHCMagState),
475
+ VMSTATE_UINT8(buf, LSM303DLHCMagState),
476
+ VMSTATE_UINT8(pointer, LSM303DLHCMagState),
477
+ VMSTATE_UINT8(cra, LSM303DLHCMagState),
478
+ VMSTATE_UINT8(crb, LSM303DLHCMagState),
479
+ VMSTATE_UINT8(mr, LSM303DLHCMagState),
480
+ VMSTATE_INT16(x, LSM303DLHCMagState),
481
+ VMSTATE_INT16(z, LSM303DLHCMagState),
482
+ VMSTATE_INT16(y, LSM303DLHCMagState),
483
+ VMSTATE_INT16(x_lock, LSM303DLHCMagState),
484
+ VMSTATE_INT16(z_lock, LSM303DLHCMagState),
485
+ VMSTATE_INT16(y_lock, LSM303DLHCMagState),
486
+ VMSTATE_UINT8(sr, LSM303DLHCMagState),
487
+ VMSTATE_UINT8(ira, LSM303DLHCMagState),
488
+ VMSTATE_UINT8(irb, LSM303DLHCMagState),
489
+ VMSTATE_UINT8(irc, LSM303DLHCMagState),
490
+ VMSTATE_INT16(temperature, LSM303DLHCMagState),
491
+ VMSTATE_INT16(temperature_lock, LSM303DLHCMagState),
492
+ VMSTATE_END_OF_LIST()
493
+ }
494
+};
495
+
496
+/*
497
+ * Put the device into post-reset default state.
498
+ */
499
+static void lsm303dlhc_mag_default_cfg(LSM303DLHCMagState *s)
500
+{
501
+ /* Set the device into is default reset state. */
502
+ s->len = 0;
503
+ s->pointer = 0; /* Current register. */
504
+ s->buf = 0; /* Shared buffer. */
505
+ s->cra = 0x10; /* Temp Enabled = 0, Data Rate = 15.0 Hz. */
506
+ s->crb = 0x20; /* Gain = +/- 1.3 Gauss. */
507
+ s->mr = 0x3; /* Operating Mode = Sleep. */
508
+ s->x = 0;
509
+ s->z = 0;
510
+ s->y = 0;
511
+ s->x_lock = 0;
512
+ s->z_lock = 0;
513
+ s->y_lock = 0;
514
+ s->sr = 0x1; /* DRDY = 1. */
515
+ s->ira = 0x48;
516
+ s->irb = 0x34;
517
+ s->irc = 0x33;
518
+ s->temperature = 0; /* Default to 0 degrees C (0/8 lsb = 0 C). */
519
+ s->temperature_lock = 0;
520
+}
521
+
522
+/*
523
+ * Callback handler when DeviceState 'reset' is set to true.
524
+ */
525
+static void lsm303dlhc_mag_reset(DeviceState *dev)
526
+{
527
+ I2CSlave *i2c = I2C_SLAVE(dev);
528
+ LSM303DLHCMagState *s = LSM303DLHC_MAG(i2c);
529
+
530
+ /* Set the device into its default reset state. */
531
+ lsm303dlhc_mag_default_cfg(s);
532
+}
533
+
534
+/*
535
+ * Initialisation of any public properties.
536
+ */
537
+static void lsm303dlhc_mag_initfn(Object *obj)
538
+{
539
+ object_property_add(obj, "mag-x", "int",
540
+ lsm303dlhc_mag_get_x,
541
+ lsm303dlhc_mag_set_x, NULL, NULL);
542
+
543
+ object_property_add(obj, "mag-y", "int",
544
+ lsm303dlhc_mag_get_y,
545
+ lsm303dlhc_mag_set_y, NULL, NULL);
546
+
547
+ object_property_add(obj, "mag-z", "int",
548
+ lsm303dlhc_mag_get_z,
549
+ lsm303dlhc_mag_set_z, NULL, NULL);
550
+
551
+ object_property_add(obj, "temperature", "int",
552
+ lsm303dlhc_mag_get_temperature,
553
+ lsm303dlhc_mag_set_temperature, NULL, NULL);
554
+}
555
+
556
+/*
557
+ * Set the virtual method pointers (bus state change, tx/rx, etc.).
558
+ */
559
+static void lsm303dlhc_mag_class_init(ObjectClass *klass, void *data)
560
+{
561
+ DeviceClass *dc = DEVICE_CLASS(klass);
562
+ I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
563
+
564
+ dc->reset = lsm303dlhc_mag_reset;
565
+ dc->vmsd = &vmstate_lsm303dlhc_mag;
566
+ k->event = lsm303dlhc_mag_event;
567
+ k->recv = lsm303dlhc_mag_recv;
568
+ k->send = lsm303dlhc_mag_send;
569
+}
570
+
571
+static const TypeInfo lsm303dlhc_mag_info = {
572
+ .name = TYPE_LSM303DLHC_MAG,
573
+ .parent = TYPE_I2C_SLAVE,
574
+ .instance_size = sizeof(LSM303DLHCMagState),
575
+ .instance_init = lsm303dlhc_mag_initfn,
576
+ .class_init = lsm303dlhc_mag_class_init,
577
+};
578
+
579
+static void lsm303dlhc_mag_register_types(void)
580
+{
581
+ type_register_static(&lsm303dlhc_mag_info);
582
+}
583
+
584
+type_init(lsm303dlhc_mag_register_types)
585
diff --git a/tests/qtest/lsm303dlhc-mag-test.c b/tests/qtest/lsm303dlhc-mag-test.c
586
new file mode 100644
587
index XXXXXXX..XXXXXXX
588
--- /dev/null
589
+++ b/tests/qtest/lsm303dlhc-mag-test.c
590
@@ -XXX,XX +XXX,XX @@
591
+/*
592
+ * QTest testcase for the LSM303DLHC I2C magnetometer
593
+ *
594
+ * Copyright (C) 2021 Linaro Ltd.
595
+ * Written by Kevin Townsend <kevin.townsend@linaro.org>
596
+ *
597
+ * Based on: https://www.st.com/resource/en/datasheet/lsm303dlhc.pdf
598
+ *
599
+ * SPDX-License-Identifier: GPL-2.0-or-later
600
+ */
601
+
602
+#include "qemu/osdep.h"
603
+#include "libqtest-single.h"
604
+#include "libqos/qgraph.h"
605
+#include "libqos/i2c.h"
64
+#include "qapi/qmp/qdict.h"
606
+#include "qapi/qmp/qdict.h"
65
+#include "qapi/qmp/qjson.h"
607
+
66
+
608
+#define LSM303DLHC_MAG_TEST_ID "lsm303dlhc_mag-test"
67
+#define MACHINE "-machine virt,gic-version=max,accel=tcg "
609
+#define LSM303DLHC_MAG_REG_CRA 0x00
68
+#define MACHINE_KVM "-machine virt,gic-version=max,accel=kvm:tcg "
610
+#define LSM303DLHC_MAG_REG_CRB 0x01
69
+#define QUERY_HEAD "{ 'execute': 'query-cpu-model-expansion', " \
611
+#define LSM303DLHC_MAG_REG_OUT_X_H 0x03
70
+ " 'arguments': { 'type': 'full', "
612
+#define LSM303DLHC_MAG_REG_OUT_Z_H 0x05
71
+#define QUERY_TAIL "}}"
613
+#define LSM303DLHC_MAG_REG_OUT_Y_H 0x07
72
+
614
+#define LSM303DLHC_MAG_REG_IRC 0x0C
73
+static bool kvm_enabled(QTestState *qts)
615
+#define LSM303DLHC_MAG_REG_TEMP_OUT_H 0x31
74
+{
616
+
75
+ QDict *resp, *qdict;
617
+static int qmp_lsm303dlhc_mag_get_property(const char *id, const char *prop)
76
+ bool enabled;
618
+{
77
+
619
+ QDict *response;
78
+ resp = qtest_qmp(qts, "{ 'execute': 'query-kvm' }");
620
+ int ret;
79
+ g_assert(qdict_haskey(resp, "return"));
621
+
80
+ qdict = qdict_get_qdict(resp, "return");
622
+ response = qmp("{ 'execute': 'qom-get', 'arguments': { 'path': %s, "
81
+ g_assert(qdict_haskey(qdict, "enabled"));
623
+ "'property': %s } }", id, prop);
82
+ enabled = qdict_get_bool(qdict, "enabled");
624
+ g_assert(qdict_haskey(response, "return"));
83
+ qobject_unref(resp);
625
+ ret = qdict_get_int(response, "return");
84
+
626
+ qobject_unref(response);
85
+ return enabled;
627
+ return ret;
86
+}
628
+}
87
+
629
+
88
+static QDict *do_query_no_props(QTestState *qts, const char *cpu_type)
630
+static void qmp_lsm303dlhc_mag_set_property(const char *id, const char *prop,
89
+{
631
+ int value)
90
+ return qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s }"
632
+{
91
+ QUERY_TAIL, cpu_type);
633
+ QDict *response;
92
+}
634
+
93
+
635
+ response = qmp("{ 'execute': 'qom-set', 'arguments': { 'path': %s, "
94
+static QDict *do_query(QTestState *qts, const char *cpu_type,
636
+ "'property': %s, 'value': %d } }", id, prop, value);
95
+ const char *fmt, ...)
637
+ g_assert(qdict_haskey(response, "return"));
96
+{
638
+ qobject_unref(response);
97
+ QDict *resp;
639
+}
98
+
640
+
99
+ if (fmt) {
641
+static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc)
100
+ QDict *args;
642
+{
101
+ va_list ap;
643
+ int64_t value;
102
+
644
+ QI2CDevice *i2cdev = (QI2CDevice *)obj;
103
+ va_start(ap, fmt);
645
+
104
+ args = qdict_from_vjsonf_nofail(fmt, ap);
646
+ /* Check default value for CRB */
105
+ va_end(ap);
647
+ g_assert_cmphex(i2c_get8(i2cdev, LSM303DLHC_MAG_REG_CRB), ==, 0x20);
106
+
648
+
107
+ resp = qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s, "
649
+ /* Set x to 1.0 gauss and verify the value */
108
+ "'props': %p }"
650
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-x", 100000);
109
+ QUERY_TAIL, cpu_type, args);
651
+ value = qmp_lsm303dlhc_mag_get_property(
110
+ } else {
652
+ LSM303DLHC_MAG_TEST_ID, "mag-x");
111
+ resp = do_query_no_props(qts, cpu_type);
653
+ g_assert_cmpint(value, ==, 100000);
112
+ }
654
+
113
+
655
+ /* Set y to 1.5 gauss and verify the value */
114
+ return resp;
656
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-y", 150000);
115
+}
657
+ value = qmp_lsm303dlhc_mag_get_property(
116
+
658
+ LSM303DLHC_MAG_TEST_ID, "mag-y");
117
+static const char *resp_get_error(QDict *resp)
659
+ g_assert_cmpint(value, ==, 150000);
118
+{
660
+
119
+ QDict *qdict;
661
+ /* Set z to 0.5 gauss and verify the value */
120
+
662
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-z", 50000);
121
+ g_assert(resp);
663
+ value = qmp_lsm303dlhc_mag_get_property(
122
+
664
+ LSM303DLHC_MAG_TEST_ID, "mag-z");
123
+ qdict = qdict_get_qdict(resp, "error");
665
+ g_assert_cmpint(value, ==, 50000);
124
+ if (qdict) {
666
+
125
+ return qdict_get_str(qdict, "desc");
667
+ /* Set temperature to 23.6 C and verify the value */
126
+ }
668
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID,
127
+
669
+ "temperature", 23600);
128
+ return NULL;
670
+ value = qmp_lsm303dlhc_mag_get_property(
129
+}
671
+ LSM303DLHC_MAG_TEST_ID, "temperature");
130
+
672
+ /* Should return 23.5 C due to 0.125°C steps. */
131
+#define assert_error(qts, cpu_type, expected_error, fmt, ...) \
673
+ g_assert_cmpint(value, ==, 23500);
132
+({ \
674
+
133
+ QDict *_resp; \
675
+ /* Read raw x axis registers (1 gauss = 1100 at +/-1.3 g gain) */
134
+ const char *_error; \
676
+ value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_X_H);
135
+ \
677
+ g_assert_cmphex(value, ==, 1100);
136
+ _resp = do_query(qts, cpu_type, fmt, ##__VA_ARGS__); \
678
+
137
+ g_assert(_resp); \
679
+ /* Read raw y axis registers (1.5 gauss = 1650 at +/- 1.3 g gain = ) */
138
+ _error = resp_get_error(_resp); \
680
+ value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_Y_H);
139
+ g_assert(_error); \
681
+ g_assert_cmphex(value, ==, 1650);
140
+ g_assert(g_str_equal(_error, expected_error)); \
682
+
141
+ qobject_unref(_resp); \
683
+ /* Read raw z axis registers (0.5 gauss = 490 at +/- 1.3 g gain = ) */
142
+})
684
+ value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_OUT_Z_H);
143
+
685
+ g_assert_cmphex(value, ==, 490);
144
+static bool resp_has_props(QDict *resp)
686
+
145
+{
687
+ /* Read raw temperature registers with temp disabled (CRA = 0x10) */
146
+ QDict *qdict;
688
+ value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_TEMP_OUT_H);
147
+
689
+ g_assert_cmphex(value, ==, 0);
148
+ g_assert(resp);
690
+
149
+
691
+ /* Enable temperature reads (CRA = 0x90) */
150
+ if (!qdict_haskey(resp, "return")) {
692
+ i2c_set8(i2cdev, LSM303DLHC_MAG_REG_CRA, 0x90);
151
+ return false;
693
+
152
+ }
694
+ /* Read raw temp registers (23.5 C = 188 at 1 lsb = 0.125 C) */
153
+ qdict = qdict_get_qdict(resp, "return");
695
+ value = i2c_get16(i2cdev, LSM303DLHC_MAG_REG_TEMP_OUT_H);
154
+
696
+ g_assert_cmphex(value, ==, 188);
155
+ if (!qdict_haskey(qdict, "model")) {
697
+}
156
+ return false;
698
+
157
+ }
699
+static void reg_wraparound(void *obj, void *data, QGuestAllocator *alloc)
158
+ qdict = qdict_get_qdict(qdict, "model");
700
+{
159
+
701
+ uint8_t value[4];
160
+ return qdict_haskey(qdict, "props");
702
+ QI2CDevice *i2cdev = (QI2CDevice *)obj;
161
+}
703
+
162
+
704
+ /* Set x to 1.0 gauss, and y to 1.5 gauss for known test values */
163
+static QDict *resp_get_props(QDict *resp)
705
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-x", 100000);
164
+{
706
+ qmp_lsm303dlhc_mag_set_property(LSM303DLHC_MAG_TEST_ID, "mag-y", 150000);
165
+ QDict *qdict;
707
+
166
+
708
+ /* Check that requesting 4 bytes starting at Y_H wraps around to X_L */
167
+ g_assert(resp);
709
+ i2c_read_block(i2cdev, LSM303DLHC_MAG_REG_OUT_Y_H, value, 4);
168
+ g_assert(resp_has_props(resp));
710
+ /* 1.5 gauss = 1650 lsb = 0x672 */
169
+
711
+ g_assert_cmphex(value[0], ==, 0x06);
170
+ qdict = qdict_get_qdict(resp, "return");
712
+ g_assert_cmphex(value[1], ==, 0x72);
171
+ qdict = qdict_get_qdict(qdict, "model");
713
+ /* 1.0 gauss = 1100 lsb = 0x44C */
172
+ qdict = qdict_get_qdict(qdict, "props");
714
+ g_assert_cmphex(value[2], ==, 0x04);
173
+
715
+ g_assert_cmphex(value[3], ==, 0x4C);
174
+ return qdict;
716
+
175
+}
717
+ /* Check that requesting LSM303DLHC_MAG_REG_IRC wraps around to CRA */
176
+
718
+ i2c_read_block(i2cdev, LSM303DLHC_MAG_REG_IRC, value, 2);
177
+#define assert_has_feature(qts, cpu_type, feature) \
719
+ /* Default value for IRC = 0x33 */
178
+({ \
720
+ g_assert_cmphex(value[0], ==, 0x33);
179
+ QDict *_resp = do_query_no_props(qts, cpu_type); \
721
+ /* Default value for CRA = 0x10 */
180
+ g_assert(_resp); \
722
+ g_assert_cmphex(value[1], ==, 0x10);
181
+ g_assert(resp_has_props(_resp)); \
723
+}
182
+ g_assert(qdict_get(resp_get_props(_resp), feature)); \
724
+
183
+ qobject_unref(_resp); \
725
+static void lsm303dlhc_mag_register_nodes(void)
184
+})
726
+{
185
+
727
+ QOSGraphEdgeOptions opts = {
186
+#define assert_has_not_feature(qts, cpu_type, feature) \
728
+ .extra_device_opts = "id=" LSM303DLHC_MAG_TEST_ID ",address=0x1e"
187
+({ \
729
+ };
188
+ QDict *_resp = do_query_no_props(qts, cpu_type); \
730
+ add_qi2c_address(&opts, &(QI2CAddress) { 0x1E });
189
+ g_assert(_resp); \
731
+
190
+ g_assert(!resp_has_props(_resp) || \
732
+ qos_node_create_driver("lsm303dlhc_mag", i2c_device_create);
191
+ !qdict_get(resp_get_props(_resp), feature)); \
733
+ qos_node_consumes("lsm303dlhc_mag", "i2c-bus", &opts);
192
+ qobject_unref(_resp); \
734
+
193
+})
735
+ qos_add_test("tx-rx", "lsm303dlhc_mag", send_and_receive, NULL);
194
+
736
+ qos_add_test("regwrap", "lsm303dlhc_mag", reg_wraparound, NULL);
195
+static void assert_type_full(QTestState *qts)
737
+}
196
+{
738
+libqos_init(lsm303dlhc_mag_register_nodes);
197
+ const char *error;
739
diff --git a/hw/sensor/Kconfig b/hw/sensor/Kconfig
198
+ QDict *resp;
740
index XXXXXXX..XXXXXXX 100644
199
+
741
--- a/hw/sensor/Kconfig
200
+ resp = qtest_qmp(qts, "{ 'execute': 'query-cpu-model-expansion', "
742
+++ b/hw/sensor/Kconfig
201
+ "'arguments': { 'type': 'static', "
743
@@ -XXX,XX +XXX,XX @@ config ADM1272
202
+ "'model': { 'name': 'foo' }}}");
744
config MAX34451
203
+ g_assert(resp);
745
bool
204
+ error = resp_get_error(resp);
746
depends on I2C
205
+ g_assert(error);
747
+
206
+ g_assert(g_str_equal(error,
748
+config LSM303DLHC_MAG
207
+ "The requested expansion type is not supported"));
749
+ bool
208
+ qobject_unref(resp);
750
+ depends on I2C
209
+}
751
diff --git a/hw/sensor/meson.build b/hw/sensor/meson.build
210
+
752
index XXXXXXX..XXXXXXX 100644
211
+static void assert_bad_props(QTestState *qts, const char *cpu_type)
753
--- a/hw/sensor/meson.build
212
+{
754
+++ b/hw/sensor/meson.build
213
+ const char *error;
755
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c'))
214
+ QDict *resp;
756
softmmu_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c'))
215
+
757
softmmu_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c'))
216
+ resp = qtest_qmp(qts, "{ 'execute': 'query-cpu-model-expansion', "
758
softmmu_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c'))
217
+ "'arguments': { 'type': 'full', "
759
+softmmu_ss.add(when: 'CONFIG_LSM303DLHC_MAG', if_true: files('lsm303dlhc_mag.c'))
218
+ "'model': { 'name': %s, "
760
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
219
+ "'props': false }}}",
761
index XXXXXXX..XXXXXXX 100644
220
+ cpu_type);
762
--- a/tests/qtest/meson.build
221
+ g_assert(resp);
763
+++ b/tests/qtest/meson.build
222
+ error = resp_get_error(resp);
764
@@ -XXX,XX +XXX,XX @@ qos_test_ss.add(
223
+ g_assert(error);
765
'eepro100-test.c',
224
+ g_assert(g_str_equal(error,
766
'es1370-test.c',
225
+ "Invalid parameter type for 'props', expected: dict"));
767
'ipoctal232-test.c',
226
+ qobject_unref(resp);
768
+ 'lsm303dlhc-mag-test.c',
227
+}
769
'max34451-test.c',
228
+
770
'megasas-test.c',
229
+static void test_query_cpu_model_expansion(const void *data)
771
'ne2000-test.c',
230
+{
231
+ QTestState *qts;
232
+
233
+ qts = qtest_init(MACHINE "-cpu max");
234
+
235
+ /* Test common query-cpu-model-expansion input validation */
236
+ assert_type_full(qts);
237
+ assert_bad_props(qts, "max");
238
+ assert_error(qts, "foo", "The CPU type 'foo' is not a recognized "
239
+ "ARM CPU type", NULL);
240
+ assert_error(qts, "max", "Parameter 'not-a-prop' is unexpected",
241
+ "{ 'not-a-prop': false }");
242
+ assert_error(qts, "host", "The CPU type 'host' requires KVM", NULL);
243
+
244
+ /* Test expected feature presence/absence for some cpu types */
245
+ assert_has_feature(qts, "max", "pmu");
246
+ assert_has_feature(qts, "cortex-a15", "pmu");
247
+ assert_has_not_feature(qts, "cortex-a15", "aarch64");
248
+
249
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
250
+ assert_has_feature(qts, "max", "aarch64");
251
+ assert_has_feature(qts, "cortex-a57", "pmu");
252
+ assert_has_feature(qts, "cortex-a57", "aarch64");
253
+
254
+ /* Test that features that depend on KVM generate errors without. */
255
+ assert_error(qts, "max",
256
+ "'aarch64' feature cannot be disabled "
257
+ "unless KVM is enabled and 32-bit EL1 "
258
+ "is supported",
259
+ "{ 'aarch64': false }");
260
+ }
261
+
262
+ qtest_quit(qts);
263
+}
264
+
265
+static void test_query_cpu_model_expansion_kvm(const void *data)
266
+{
267
+ QTestState *qts;
268
+
269
+ qts = qtest_init(MACHINE_KVM "-cpu max");
270
+
271
+ /*
272
+ * These tests target the 'host' CPU type, so KVM must be enabled.
273
+ */
274
+ if (!kvm_enabled(qts)) {
275
+ qtest_quit(qts);
276
+ return;
277
+ }
278
+
279
+ if (g_str_equal(qtest_get_arch(), "aarch64")) {
280
+ assert_has_feature(qts, "host", "aarch64");
281
+ assert_has_feature(qts, "host", "pmu");
282
+
283
+ assert_error(qts, "cortex-a15",
284
+ "We cannot guarantee the CPU type 'cortex-a15' works "
285
+ "with KVM on this host", NULL);
286
+ } else {
287
+ assert_has_not_feature(qts, "host", "aarch64");
288
+ assert_has_not_feature(qts, "host", "pmu");
289
+ }
290
+
291
+ qtest_quit(qts);
292
+}
293
+
294
+int main(int argc, char **argv)
295
+{
296
+ g_test_init(&argc, &argv, NULL);
297
+
298
+ qtest_add_data_func("/arm/query-cpu-model-expansion",
299
+ NULL, test_query_cpu_model_expansion);
300
+ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion",
301
+ NULL, test_query_cpu_model_expansion_kvm);
302
+
303
+ return g_test_run();
304
+}
305
--
772
--
306
2.20.1
773
2.25.1
307
774
308
775
diff view generated by jsdifflib