From nobody Sat Feb 7 06:35:17 2026 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 9BEE4349231; Wed, 20 Aug 2025 15:00:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755702025; cv=none; b=PJa1iV2NzQGFvljuRy4iVp6hsje+6NjqyU49spdGRbIBy7wy7UG763/av3LsItoNWj1zDfBUhR/IPg1VDpsEhVhfULUcfJNNeXH8FIDRTph4JorOU2ojlUdpWxu5Q+sXRaIJZaPRXdQcOFvfSuPlc71Wivl8qmFiiNXjMwnZ+/s= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755702025; c=relaxed/simple; bh=ej62IBIcJURFNgxeDW5Ysz0tLaZNrZZiEMMu4fEuq+Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=oQYG7Bc44ezCfvXaf/1as3ALJu6+aPiOtZvlNIzSPRim48Kn/KwnWnKsh5x+aYeDNlgL7QcCl05x43I960gCUCAULr62cd/LB9FEf7aidOKrqtaMr0XWKMtiNXIyGzzZk+MZwPK9OtoqqHYDgeDXJxWYLC1ekTAqqq2fnclannk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E0AEE2F27; Wed, 20 Aug 2025 08:00:13 -0700 (PDT) Received: from e122027.arm.com (unknown [10.57.2.58]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 1D4433F738; Wed, 20 Aug 2025 08:00:17 -0700 (PDT) From: Steven Price To: kvm@vger.kernel.org, kvmarm@lists.linux.dev Cc: Jean-Philippe Brucker , Catalin Marinas , Marc Zyngier , Will Deacon , James Morse , Oliver Upton , Suzuki K Poulose , Zenghui Yu , linux-arm-kernel@lists.infradead.org, linux-kernel@vger.kernel.org, Joey Gouly , Alexandru Elisei , Christoffer Dall , Fuad Tabba , linux-coco@lists.linux.dev, Ganapatrao Kulkarni , Gavin Shan , Shanker Donthineni , Alper Gun , "Aneesh Kumar K . V" , Emi Kisanuki , Vishal Annapurve , Steven Price Subject: [PATCH v10 40/43] arm64: RME: Provide accurate register list Date: Wed, 20 Aug 2025 15:56:00 +0100 Message-ID: <20250820145606.180644-41-steven.price@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250820145606.180644-1-steven.price@arm.com> References: <20250820145606.180644-1-steven.price@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Jean-Philippe Brucker Userspace can set a few registers with KVM_SET_ONE_REG (9 GP registers at runtime, and 3 system registers during initialization). Update the register list returned by KVM_GET_REG_LIST. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Steven Price Reviewed-by: Gavin Shan Reviewed-by: Suzuki K Poulose --- Changes since v8: * Minor type changes following review. Changes since v7: * Reworked on upstream changes. --- arch/arm64/kvm/guest.c | 19 ++++++++++++++----- arch/arm64/kvm/hypercalls.c | 4 ++-- arch/arm64/kvm/sys_regs.c | 29 +++++++++++++++++++++++------ 3 files changed, 39 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index c95a00b711c0..c5bdbcede086 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -619,8 +619,6 @@ static const u64 timer_reg_list[] =3D { KVM_REG_ARM_PTIMER_CVAL, }; =20 -#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) - static bool is_timer_reg(u64 index) { switch (index) { @@ -635,9 +633,14 @@ static bool is_timer_reg(u64 index) return false; } =20 +static inline unsigned long num_timer_regs(struct kvm_vcpu *vcpu) +{ + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(timer_reg_list); +} + static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - for (int i =3D 0; i < NUM_TIMER_REGS; i++) { + for (unsigned long i =3D 0; i < num_timer_regs(vcpu); i++) { if (put_user(timer_reg_list[i], uindices)) return -EFAULT; uindices++; @@ -678,6 +681,9 @@ static unsigned long num_sve_regs(const struct kvm_vcpu= *vcpu) if (!kvm_arm_vcpu_sve_finalized(vcpu)) return 1; /* KVM_REG_ARM64_SVE_VLS */ =20 + if (kvm_is_realm(vcpu->kvm)) + return 1; /* KVM_REG_ARM64_SVE_VLS */ + return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + 1; /* KVM_REG_ARM64_SVE_VLS */ } @@ -705,6 +711,9 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *= vcpu, if (!kvm_arm_vcpu_sve_finalized(vcpu)) return num_regs; =20 + if (kvm_is_realm(vcpu->kvm)) + return num_regs; + for (i =3D 0; i < slices; i++) { for (n =3D 0; n < SVE_NUM_ZREGS; n++) { reg =3D KVM_REG_ARM64_SVE_ZREG(n, i); @@ -743,7 +752,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) res +=3D num_sve_regs(vcpu); res +=3D kvm_arm_num_sys_reg_descs(vcpu); res +=3D kvm_arm_get_fw_num_regs(vcpu); - res +=3D NUM_TIMER_REGS; + res +=3D num_timer_regs(vcpu); =20 return res; } @@ -777,7 +786,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64= __user *uindices) ret =3D copy_timer_indices(vcpu, uindices); if (ret < 0) return ret; - uindices +=3D NUM_TIMER_REGS; + uindices +=3D num_timer_regs(vcpu); =20 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); } diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 58c5fe7d7572..70ac7971416c 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -414,14 +414,14 @@ void kvm_arm_teardown_hypercalls(struct kvm *kvm) =20 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) { - return ARRAY_SIZE(kvm_arm_fw_reg_ids); + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(kvm_arm_fw_reg_ids); } =20 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindice= s) { int i; =20 - for (i =3D 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) { + for (i =3D 0; i < kvm_arm_get_fw_num_regs(vcpu); i++) { if (put_user(kvm_arm_fw_reg_ids[i], uindices++)) return -EFAULT; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2b8de301f057..e32cf63b04a3 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -5120,18 +5120,18 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, = const struct kvm_one_reg *reg sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); } =20 -static unsigned int num_demux_regs(void) +static inline unsigned int num_demux_regs(struct kvm_vcpu *vcpu) { - return CSSELR_MAX; + return kvm_is_realm(vcpu->kvm) ? 0 : CSSELR_MAX; } =20 -static int write_demux_regids(u64 __user *uindices) +static int write_demux_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) { u64 val =3D KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; =20 val |=3D KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i =3D 0; i < CSSELR_MAX; i++) { + for (i =3D 0; i < num_demux_regs(vcpu); i++) { if (put_user(val | i, uindices)) return -EFAULT; uindices++; @@ -5162,11 +5162,28 @@ static bool copy_reg_to_user(const struct sys_reg_d= esc *reg, u64 __user **uind) return true; } =20 +static inline bool kvm_realm_sys_reg_hidden_user(const struct kvm_vcpu *vc= pu, + u64 reg) +{ + if (!kvm_is_realm(vcpu->kvm)) + return false; + + switch (reg) { + case SYS_ID_AA64DFR0_EL1: + case SYS_PMCR_EL0: + return false; + } + return true; +} + static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 __user **uind, unsigned int *total) { + if (kvm_realm_sys_reg_hidden_user(vcpu, reg_to_encoding(rd))) + return 0; + /* * Ignore registers we trap but don't save, * and for which no custom user accessor is provided. @@ -5204,7 +5221,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 _= _user *uind) =20 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { - return num_demux_regs() + return num_demux_regs(vcpu) + walk_sys_regs(vcpu, (u64 __user *)NULL); } =20 @@ -5217,7 +5234,7 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcp= u, u64 __user *uindices) return err; uindices +=3D err; =20 - return write_demux_regids(uindices); + return write_demux_regids(vcpu, uindices); } =20 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ --=20 2.43.0