Because we weren't setting this flag, our probe of ID_AA64ZFR0
was always returning zero. This also obviates the adjustment
of ID_AA64PFR0, which had sanitized the SVE field.
Reported-by: Zenghui Yu <yuzenghui@huawei.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/kvm64.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 36ff20c9e3..8b2ae9948a 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -512,7 +512,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
bool sve_supported;
bool pmu_supported = false;
uint64_t features = 0;
- uint64_t t;
int err;
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
@@ -533,10 +532,17 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
struct kvm_vcpu_init init = { .target = -1, };
/*
- * Ask for Pointer Authentication if supported. We can't play the
- * SVE trick of synthesising the ID reg as KVM won't tell us
- * whether we have the architected or IMPDEF version of PAuth, so
- * we have to use the actual ID regs.
+ * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
+ * which is otherwise RAZ.
+ */
+ sve_supported = kvm_arm_svm_supported();
+ if (sve_supported) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
+ }
+
+ /*
+ * Ask for Pointer Authentication if supported, so that we get
+ * the unsanitized field values for AA64ISAR1_EL1.
*/
if (kvm_arm_pauth_supported()) {
init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
@@ -680,20 +686,13 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
}
}
- sve_supported = kvm_arm_svm_supported();
-
- /* Add feature bits that can't appear until after VCPU init. */
if (sve_supported) {
- t = ahcf->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
- ahcf->isar.id_aa64pfr0 = t;
-
/*
* There is a range of kernels between kernel commit 73433762fcae
* and f81cb2c3ad41 which have a bug where the kernel doesn't expose
* SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
- * SVE support, so we only read it here, rather than together with all
- * the other ID registers earlier.
+ * SVE support, which resulted in an error rather than RAZ.
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
*/
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
ARM64_SYS_REG(3, 0, 0, 4, 4));
--
2.34.1
On 7/25/22 11:14, Richard Henderson wrote:
> Because we weren't setting this flag, our probe of ID_AA64ZFR0
> was always returning zero. This also obviates the adjustment
> of ID_AA64PFR0, which had sanitized the SVE field.
Oh, I meant to say here that this the effects of the bug are not visible, because the only
thing that ISAR.ID_AA64ZFR0 is used for within qemu at present is tcg translation. The
other tests for SVE within KVM are via ISAR.ID_AA64PFR0.SVE.
r~
>
> Reported-by: Zenghui Yu <yuzenghui@huawei.com>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> target/arm/kvm64.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
> index 36ff20c9e3..8b2ae9948a 100644
> --- a/target/arm/kvm64.c
> +++ b/target/arm/kvm64.c
> @@ -512,7 +512,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
> bool sve_supported;
> bool pmu_supported = false;
> uint64_t features = 0;
> - uint64_t t;
> int err;
>
> /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
> @@ -533,10 +532,17 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
> struct kvm_vcpu_init init = { .target = -1, };
>
> /*
> - * Ask for Pointer Authentication if supported. We can't play the
> - * SVE trick of synthesising the ID reg as KVM won't tell us
> - * whether we have the architected or IMPDEF version of PAuth, so
> - * we have to use the actual ID regs.
> + * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
> + * which is otherwise RAZ.
> + */
> + sve_supported = kvm_arm_svm_supported();
> + if (sve_supported) {
> + init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
> + }
> +
> + /*
> + * Ask for Pointer Authentication if supported, so that we get
> + * the unsanitized field values for AA64ISAR1_EL1.
> */
> if (kvm_arm_pauth_supported()) {
> init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
> @@ -680,20 +686,13 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
> }
> }
>
> - sve_supported = kvm_arm_svm_supported();
> -
> - /* Add feature bits that can't appear until after VCPU init. */
> if (sve_supported) {
> - t = ahcf->isar.id_aa64pfr0;
> - t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
> - ahcf->isar.id_aa64pfr0 = t;
> -
> /*
> * There is a range of kernels between kernel commit 73433762fcae
> * and f81cb2c3ad41 which have a bug where the kernel doesn't expose
> * SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has enabled
> - * SVE support, so we only read it here, rather than together with all
> - * the other ID registers earlier.
> + * SVE support, which resulted in an error rather than RAZ.
> + * So only read the register if we set KVM_ARM_VCPU_SVE above.
> */
> err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
> ARM64_SYS_REG(3, 0, 0, 4, 4));
© 2016 - 2026 Red Hat, Inc.