[PATCH v5 12/24] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned

Colton Lewis posted 24 patches 1 week, 1 day ago
[PATCH v5 12/24] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned
Posted by Colton Lewis 1 week, 1 day ago
Because PMXEVTYPER is trapped and PMSELR is not, it is not appropriate
to use the virtual PMSELR register when it could be outdated and lead
to an invalid write. Use the physical register when partitioned.

Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
 arch/arm64/include/asm/arm_pmuv3.h | 7 ++++++-
 arch/arm64/kvm/sys_regs.c          | 9 +++++++--
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index 27c4d6d47da31..60600f04b5902 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -70,11 +70,16 @@ static inline u64 read_pmcr(void)
 	return read_sysreg(pmcr_el0);
 }
 
-static inline void write_pmselr(u32 val)
+static inline void write_pmselr(u64 val)
 {
 	write_sysreg(val, pmselr_el0);
 }
 
+static inline u64 read_pmselr(void)
+{
+	return read_sysreg(pmselr_el0);
+}
+
 static inline void write_pmccntr(u64 val)
 {
 	write_sysreg(val, pmccntr_el0);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0c9596325519b..2e6d907fa8af2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1199,14 +1199,19 @@ static bool writethrough_pmevtyper(struct kvm_vcpu *vcpu, struct sys_reg_params
 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			       const struct sys_reg_desc *r)
 {
-	u64 idx, reg;
+	u64 idx, reg, pmselr;
 
 	if (pmu_access_el0_disabled(vcpu))
 		return false;
 
 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 		/* PMXEVTYPER_EL0 */
-		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
+		if (kvm_vcpu_pmu_is_partitioned(vcpu))
+			pmselr = read_pmselr();
+		else
+			pmselr = __vcpu_sys_reg(vcpu, PMSELR_EL0);
+
+		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, pmselr);
 		reg = PMEVTYPER0_EL0 + idx;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
-- 
2.52.0.239.gd5f0c6e74e-goog
Re: [PATCH v5 12/24] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned
Posted by Oliver Upton 1 week, 1 day ago
On Tue, Dec 09, 2025 at 08:51:09PM +0000, Colton Lewis wrote:
> Because PMXEVTYPER is trapped and PMSELR is not, it is not appropriate
> to use the virtual PMSELR register when it could be outdated and lead
> to an invalid write. Use the physical register when partitioned.
> 
> Signed-off-by: Colton Lewis <coltonlewis@google.com>
> ---
>  arch/arm64/include/asm/arm_pmuv3.h | 7 ++++++-
>  arch/arm64/kvm/sys_regs.c          | 9 +++++++--
>  2 files changed, 13 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
> index 27c4d6d47da31..60600f04b5902 100644
> --- a/arch/arm64/include/asm/arm_pmuv3.h
> +++ b/arch/arm64/include/asm/arm_pmuv3.h
> @@ -70,11 +70,16 @@ static inline u64 read_pmcr(void)
>  	return read_sysreg(pmcr_el0);
>  }
>  
> -static inline void write_pmselr(u32 val)
> +static inline void write_pmselr(u64 val)
>  {
>  	write_sysreg(val, pmselr_el0);
>  }
>  
> +static inline u64 read_pmselr(void)
> +{
> +	return read_sysreg(pmselr_el0);
> +}
> +
>  static inline void write_pmccntr(u64 val)
>  {
>  	write_sysreg(val, pmccntr_el0);
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 0c9596325519b..2e6d907fa8af2 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1199,14 +1199,19 @@ static bool writethrough_pmevtyper(struct kvm_vcpu *vcpu, struct sys_reg_params
>  static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  			       const struct sys_reg_desc *r)
>  {
> -	u64 idx, reg;
> +	u64 idx, reg, pmselr;
>  
>  	if (pmu_access_el0_disabled(vcpu))
>  		return false;
>  
>  	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
>  		/* PMXEVTYPER_EL0 */
> -		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
> +		if (kvm_vcpu_pmu_is_partitioned(vcpu))
> +			pmselr = read_pmselr();
> +		else
> +			pmselr = __vcpu_sys_reg(vcpu, PMSELR_EL0);

This isn't preemption safe. Nor should the "if (partitioned) do X else do Y" get
open-coded throughout the shop.

I would rather this be handled with a prepatory patch that provides
generic PMU register accessors to the rest of KVM (e.g.
vcpu_read_pmu_reg() / vcpu_write_pmu_reg()). Internally those helpers
can locate the vCPU's PMU registers (emulated, partitioned in-memory,
partitioned in-CPU).

Thanks,
Oliver
Re: [PATCH v5 12/24] KVM: arm64: Use physical PMSELR for PMXEVTYPER if partitioned
Posted by Colton Lewis 5 days, 11 hours ago
Oliver Upton <oupton@kernel.org> writes:

> On Tue, Dec 09, 2025 at 08:51:09PM +0000, Colton Lewis wrote:
>> Because PMXEVTYPER is trapped and PMSELR is not, it is not appropriate
>> to use the virtual PMSELR register when it could be outdated and lead
>> to an invalid write. Use the physical register when partitioned.

>> Signed-off-by: Colton Lewis <coltonlewis@google.com>
>> ---
>>   arch/arm64/include/asm/arm_pmuv3.h | 7 ++++++-
>>   arch/arm64/kvm/sys_regs.c          | 9 +++++++--
>>   2 files changed, 13 insertions(+), 3 deletions(-)

>> diff --git a/arch/arm64/include/asm/arm_pmuv3.h  
>> b/arch/arm64/include/asm/arm_pmuv3.h
>> index 27c4d6d47da31..60600f04b5902 100644
>> --- a/arch/arm64/include/asm/arm_pmuv3.h
>> +++ b/arch/arm64/include/asm/arm_pmuv3.h
>> @@ -70,11 +70,16 @@ static inline u64 read_pmcr(void)
>>   	return read_sysreg(pmcr_el0);
>>   }

>> -static inline void write_pmselr(u32 val)
>> +static inline void write_pmselr(u64 val)
>>   {
>>   	write_sysreg(val, pmselr_el0);
>>   }

>> +static inline u64 read_pmselr(void)
>> +{
>> +	return read_sysreg(pmselr_el0);
>> +}
>> +
>>   static inline void write_pmccntr(u64 val)
>>   {
>>   	write_sysreg(val, pmccntr_el0);
>> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
>> index 0c9596325519b..2e6d907fa8af2 100644
>> --- a/arch/arm64/kvm/sys_regs.c
>> +++ b/arch/arm64/kvm/sys_regs.c
>> @@ -1199,14 +1199,19 @@ static bool writethrough_pmevtyper(struct  
>> kvm_vcpu *vcpu, struct sys_reg_params
>>   static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct  
>> sys_reg_params *p,
>>   			       const struct sys_reg_desc *r)
>>   {
>> -	u64 idx, reg;
>> +	u64 idx, reg, pmselr;

>>   	if (pmu_access_el0_disabled(vcpu))
>>   		return false;

>>   	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
>>   		/* PMXEVTYPER_EL0 */
>> -		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu,  
>> PMSELR_EL0));
>> +		if (kvm_vcpu_pmu_is_partitioned(vcpu))
>> +			pmselr = read_pmselr();
>> +		else
>> +			pmselr = __vcpu_sys_reg(vcpu, PMSELR_EL0);

> This isn't preemption safe. Nor should the "if (partitioned) do X else do  
> Y" get
> open-coded throughout the shop.

Okay. I had not known the register access stuff wasn't. I'll fix that.


> I would rather this be handled with a prepatory patch that provides
> generic PMU register accessors to the rest of KVM (e.g.
> vcpu_read_pmu_reg() / vcpu_write_pmu_reg()). Internally those helpers
> can locate the vCPU's PMU registers (emulated, partitioned in-memory,
> partitioned in-CPU).

That seems like a good idea.

> Thanks,
> Oliver