From nobody Thu Dec 18 05:23:38 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id BE42F22652A; Thu, 12 Dec 2024 15:59:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1734019153; cv=none; b=UUPeYgD3PuV6ywPkCCJO6JODCB9uNQvWsGpJXr1h1E5+99mD7Qn/ycKlGjVtLpFtQp/69dNN8W93kj2X7KchLrNPvlIVUkCUPDYqPDsDKljciuxXCYoJYUS1x8N5Zby2LI1bQvfXXArAXwiJDoOwWS8X9OXjSjomlLcUQDJ63mM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1734019153; c=relaxed/simple; bh=jRG2Tz3WIUu/EVikCIwEJILLyNp812sRqUNivQKqrCE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=XR3TQu5nHiQUvXGDN5fv12ZBgDu7HedNpa8MdrDvjpU00ij6QdplrQrPAEl6UUv+oMwFJltj6DZ1emR5HnxpyzOXlYARJyHiYJEwrAFLZpH7CLHHWisuWT/oZp/IPYePUtxmrA6wQkYzIWs5wgqwWWXuntpKXEf3kW0cYVaQrSY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 3D6161762; Thu, 12 Dec 2024 07:59:39 -0800 (PST) Received: from e122027.cambridge.arm.com (e122027.cambridge.arm.com [10.1.39.50]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id B5AFF3F720; Thu, 12 Dec 2024 07:59:07 -0800 (PST) From: Steven Price To: kvm@vger.kernel.org, kvmarm@lists.linux.dev Cc: Jean-Philippe Brucker , Catalin Marinas , Marc Zyngier , Will Deacon , James Morse , Oliver Upton , Suzuki K Poulose , Zenghui Yu , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Joey Gouly , Alexandru Elisei , Christoffer Dall , Fuad Tabba , linux-coco@lists.linux.dev, Ganapatrao Kulkarni , Gavin Shan , Shanker Donthineni , Alper Gun , "Aneesh Kumar K . V" , Steven Price Subject: [PATCH v6 40/43] arm64: RME: Provide accurate register list Date: Thu, 12 Dec 2024 15:56:05 +0000 Message-ID: <20241212155610.76522-41-steven.price@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241212155610.76522-1-steven.price@arm.com> References: <20241212155610.76522-1-steven.price@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Jean-Philippe Brucker Userspace can set a few registers with KVM_SET_ONE_REG (9 GP registers at runtime, and 3 system registers during initialization). Update the register list returned by KVM_GET_REG_LIST. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Steven Price --- arch/arm64/kvm/guest.c | 19 ++++++++---- arch/arm64/kvm/hypercalls.c | 4 +-- arch/arm64/kvm/sys_regs.c | 58 ++++++++++++++++++++++++++++--------- 3 files changed, 60 insertions(+), 21 deletions(-) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 24be77888061..e8c38eb5c245 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -621,8 +621,6 @@ static const u64 timer_reg_list[] =3D { KVM_REG_ARM_PTIMER_CVAL, }; =20 -#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) - static bool is_timer_reg(u64 index) { switch (index) { @@ -637,9 +635,14 @@ static bool is_timer_reg(u64 index) return false; } =20 +static unsigned long num_timer_regs(struct kvm_vcpu *vcpu) +{ + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(timer_reg_list); +} + static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - for (int i =3D 0; i < NUM_TIMER_REGS; i++) { + for (int i =3D 0; i < num_timer_regs(vcpu); i++) { if (put_user(timer_reg_list[i], uindices)) return -EFAULT; uindices++; @@ -677,6 +680,9 @@ static unsigned long num_sve_regs(const struct kvm_vcpu= *vcpu) if (!vcpu_has_sve(vcpu) || !kvm_arm_vcpu_sve_finalized(vcpu)) return 0; =20 + if (kvm_is_realm(vcpu->kvm)) + return 1; /* KVM_REG_ARM64_SVE_VLS */ + return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + 1; /* KVM_REG_ARM64_SVE_VLS */ } @@ -704,6 +710,9 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *= vcpu, return -EFAULT; ++num_regs; =20 + if (kvm_is_realm(vcpu->kvm)) + return num_regs; + for (i =3D 0; i < slices; i++) { for (n =3D 0; n < SVE_NUM_ZREGS; n++) { reg =3D KVM_REG_ARM64_SVE_ZREG(n, i); @@ -742,7 +751,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) res +=3D num_sve_regs(vcpu); res +=3D kvm_arm_num_sys_reg_descs(vcpu); res +=3D kvm_arm_get_fw_num_regs(vcpu); - res +=3D NUM_TIMER_REGS; + res +=3D num_timer_regs(vcpu); =20 return res; } @@ -776,7 +785,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64= __user *uindices) ret =3D copy_timer_indices(vcpu, uindices); if (ret < 0) return ret; - uindices +=3D NUM_TIMER_REGS; + uindices +=3D num_timer_regs(vcpu); =20 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); } diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 27ce4cb44904..378576771125 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -407,14 +407,14 @@ void kvm_arm_teardown_hypercalls(struct kvm *kvm) =20 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) { - return ARRAY_SIZE(kvm_arm_fw_reg_ids); + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(kvm_arm_fw_reg_ids); } =20 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindice= s) { int i; =20 - for (i =3D 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) { + for (i =3D 0; i < kvm_arm_get_fw_num_regs(vcpu); i++) { if (put_user(kvm_arm_fw_reg_ids[i], uindices++)) return -EFAULT; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 750f69abd3a5..2fcc93d7310e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -4706,18 +4706,18 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, = const struct kvm_one_reg *reg sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); } =20 -static unsigned int num_demux_regs(void) +static unsigned int num_demux_regs(struct kvm_vcpu *vcpu) { - return CSSELR_MAX; + return kvm_is_realm(vcpu->kvm) ? 0 : CSSELR_MAX; } =20 -static int write_demux_regids(u64 __user *uindices) +static int write_demux_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) { u64 val =3D KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; =20 val |=3D KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i =3D 0; i < CSSELR_MAX; i++) { + for (i =3D 0; i < num_demux_regs(vcpu); i++) { if (put_user(val | i, uindices)) return -EFAULT; uindices++; @@ -4725,6 +4725,23 @@ static int write_demux_regids(u64 __user *uindices) return 0; } =20 +static unsigned int num_invariant_regs(struct kvm_vcpu *vcpu) +{ + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(invariant_sys_regs); +} + +static int write_invariant_regids(struct kvm_vcpu *vcpu, u64 __user *uindi= ces) +{ + unsigned int i; + + for (i =3D 0; i < num_invariant_regs(vcpu); i++) { + if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) + return -EFAULT; + uindices++; + } + return 0; +} + static u64 sys_reg_to_index(const struct sys_reg_desc *reg) { return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | @@ -4748,11 +4765,27 @@ static bool copy_reg_to_user(const struct sys_reg_d= esc *reg, u64 __user **uind) return true; } =20 +static bool kvm_realm_sys_reg_hidden_user(const struct kvm_vcpu *vcpu, u64= reg) +{ + if (!kvm_is_realm(vcpu->kvm)) + return false; + + switch (reg) { + case SYS_ID_AA64DFR0_EL1: + case SYS_PMCR_EL0: + return false; + } + return true; +} + static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 __user **uind, unsigned int *total) { + if (kvm_realm_sys_reg_hidden_user(vcpu, reg_to_encoding(rd))) + return 0; + /* * Ignore registers we trap but don't save, * and for which no custom user accessor is provided. @@ -4790,29 +4823,26 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64= __user *uind) =20 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { - return ARRAY_SIZE(invariant_sys_regs) - + num_demux_regs() + return num_invariant_regs(vcpu) + + num_demux_regs(vcpu) + walk_sys_regs(vcpu, (u64 __user *)NULL); } =20 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindic= es) { - unsigned int i; int err; =20 - /* Then give them all the invariant registers' indices. */ - for (i =3D 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { - if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) - return -EFAULT; - uindices++; - } + err =3D write_invariant_regids(vcpu, uindices); + if (err) + return err; + uindices +=3D num_invariant_regs(vcpu); =20 err =3D walk_sys_regs(vcpu, uindices); if (err < 0) return err; uindices +=3D err; =20 - return write_demux_regids(uindices); + return write_demux_regids(vcpu, uindices); } =20 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ --=20 2.43.0