For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
explicitly implement the {get,set}_user functions for these
registers to mask out unimplemented counters for userspace reads
and writes.
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
arch/arm64/kvm/sys_regs.c | 91 ++++++++++++++++++++++++++++++++++++---
1 file changed, 85 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index faf97878dfbbb..2e5d497596ef8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -987,6 +987,45 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
+ u64 reg, u64 val, bool set)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ /* Make the register immutable once the VM has started running */
+ if (kvm_vm_has_ran_once(kvm)) {
+ mutex_unlock(&kvm->arch.config_lock);
+ return;
+ }
+
+ val &= kvm_pmu_valid_counter_mask(vcpu);
+ mutex_unlock(&kvm->arch.config_lock);
+
+ if (set)
+ __vcpu_sys_reg(vcpu, reg) |= val;
+ else
+ __vcpu_sys_reg(vcpu, reg) &= ~val;
+}
+
+static int get_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ *val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+ return 0;
+}
+
+static int set_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ /* r->Op2 & 0x1: true for PMCNTENSET_EL0, else PMCNTENCLR_EL0 */
+ set_pmreg_for_valid_counters(vcpu, PMCNTENSET_EL0, val, r->Op2 & 0x1);
+ return 0;
+}
+
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1015,6 +1054,23 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static int get_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ *val = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
+ return 0;
+}
+
+static int set_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ /* r->Op2 & 0x1: true for PMINTENSET_EL1, else PMINTENCLR_EL1 */
+ set_pmreg_for_valid_counters(vcpu, PMINTENSET_EL1, val, r->Op2 & 0x1);
+ return 0;
+}
+
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1039,6 +1095,23 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static int set_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ /* r->CRm & 0x2: true for PMOVSSET_EL0, else PMOVSCLR_EL0 */
+ set_pmreg_for_valid_counters(vcpu, PMOVSSET_EL0, val, r->CRm & 0x2);
+ return 0;
+}
+
+static int get_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 *val)
+{
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ *val = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
+ return 0;
+}
+
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -2184,9 +2257,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMBIDR_EL1 is not trapped */
{ PMU_SYS_REG(PMINTENSET_EL1),
- .access = access_pminten, .reg = PMINTENSET_EL1 },
+ .access = access_pminten, .reg = PMINTENSET_EL1,
+ .get_user = get_pminten, .set_user = set_pminten },
{ PMU_SYS_REG(PMINTENCLR_EL1),
- .access = access_pminten, .reg = PMINTENSET_EL1 },
+ .access = access_pminten, .reg = PMINTENSET_EL1,
+ .get_user = get_pminten, .set_user = set_pminten },
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
@@ -2237,11 +2312,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr },
{ PMU_SYS_REG(PMCNTENSET_EL0),
- .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0,
+ .get_user = get_pmcnten, .set_user = set_pmcnten },
{ PMU_SYS_REG(PMCNTENCLR_EL0),
- .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0,
+ .get_user = get_pmcnten, .set_user = set_pmcnten },
{ PMU_SYS_REG(PMOVSCLR_EL0),
- .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ .access = access_pmovs, .reg = PMOVSSET_EL0,
+ .get_user = get_pmovs, .set_user = set_pmovs },
/*
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
* previously (and pointlessly) advertised in the past...
@@ -2269,7 +2347,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
{ PMU_SYS_REG(PMOVSSET_EL0),
- .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ .access = access_pmovs, .reg = PMOVSSET_EL0,
+ .get_user = get_pmovs, .set_user = set_pmovs },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
--
2.42.0.655.g421f12c284-goog
On Fri, Oct 20, 2023 at 09:40:45PM +0000, Raghavendra Rao Ananta wrote:
> For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
> PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
> explicitly implement the {get,set}_user functions for these
> registers to mask out unimplemented counters for userspace reads
> and writes.
>
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
> arch/arm64/kvm/sys_regs.c | 91 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 85 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index faf97878dfbbb..2e5d497596ef8 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -987,6 +987,45 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return true;
> }
>
> +static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
> + u64 reg, u64 val, bool set)
> +{
> + struct kvm *kvm = vcpu->kvm;
> +
> + mutex_lock(&kvm->arch.config_lock);
> +
> + /* Make the register immutable once the VM has started running */
This is a considerable change from the existing behavior and lacks
justification. These registers, or rather the state that these aliases
update, is mutable from the guest. I see no reason for excluding
userspace from this behavior.
> + if (kvm_vm_has_ran_once(kvm)) {
> + mutex_unlock(&kvm->arch.config_lock);
> + return;
> + }
> +
> + val &= kvm_pmu_valid_counter_mask(vcpu);
> + mutex_unlock(&kvm->arch.config_lock);
I'm not entirely sold on taking the config_lock here.
- If userspace is doing these ioctls in parallel then it cannot guarantee
ordering in the first place, even w/ locking under the hood. Any
garbage values will be discarded by KVM_REQ_RELOAD_PMU.
- If the VM has already started PMCR.N is immutable, so there is no
race.
--
Thanks,
Oliver
On Fri, 20 Oct 2023 22:40:45 +0100,
Raghavendra Rao Ananta <rananta@google.com> wrote:
>
> For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
> PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
> explicitly implement the {get,set}_user functions for these
> registers to mask out unimplemented counters for userspace reads
> and writes.
>
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
> arch/arm64/kvm/sys_regs.c | 91 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 85 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index faf97878dfbbb..2e5d497596ef8 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -987,6 +987,45 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return true;
> }
>
> +static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
> + u64 reg, u64 val, bool set)
> +{
> + struct kvm *kvm = vcpu->kvm;
> +
> + mutex_lock(&kvm->arch.config_lock);
> +
> + /* Make the register immutable once the VM has started running */
> + if (kvm_vm_has_ran_once(kvm)) {
> + mutex_unlock(&kvm->arch.config_lock);
> + return;
> + }
> +
> + val &= kvm_pmu_valid_counter_mask(vcpu);
> + mutex_unlock(&kvm->arch.config_lock);
> +
> + if (set)
> + __vcpu_sys_reg(vcpu, reg) |= val;
> + else
> + __vcpu_sys_reg(vcpu, reg) &= ~val;
> +}
> +
> +static int get_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> + u64 *val)
> +{
> + u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> +
> + *val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
> + return 0;
> +}
> +
> +static int set_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> + u64 val)
> +{
> + /* r->Op2 & 0x1: true for PMCNTENSET_EL0, else PMCNTENCLR_EL0 */
> + set_pmreg_for_valid_counters(vcpu, PMCNTENSET_EL0, val, r->Op2 & 0x1);
> + return 0;
> +}
Huh, this is really ugly. Why the explosion of pointless helpers when
the whole design of the sysreg infrastructure to have *common* helpers
for registers that behave the same way?
I'd expect something like the hack below instead.
M.
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a2c5f210b3d6..8f560a2496f2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -987,42 +987,46 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
- u64 reg, u64 val, bool set)
+static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
{
struct kvm *kvm = vcpu->kvm;
+ bool set;
mutex_lock(&kvm->arch.config_lock);
/* Make the register immutable once the VM has started running */
if (kvm_vm_has_ran_once(kvm)) {
mutex_unlock(&kvm->arch.config_lock);
- return;
+ return 0;
}
val &= kvm_pmu_valid_counter_mask(vcpu);
mutex_unlock(&kvm->arch.config_lock);
+ switch(r->reg) {
+ case PMOVSSET_EL0:
+ /* CRm[1] being set indicates a SET register, and CLR otherwise */
+ set = r->CRm & 2;
+ break;
+ default:
+ /* Op2[0] being set indicates a SET register, and CLR otherwise */
+ set = r->Op2 & 1;
+ break;
+ }
+
if (set)
- __vcpu_sys_reg(vcpu, reg) |= val;
+ __vcpu_sys_reg(vcpu, r->reg) |= val;
else
- __vcpu_sys_reg(vcpu, reg) &= ~val;
-}
-
-static int get_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 *val)
-{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ __vcpu_sys_reg(vcpu, r->reg) &= ~val;
- *val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
return 0;
}
-static int set_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 val)
+static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
{
- /* r->Op2 & 0x1: true for PMCNTENSET_EL0, else PMCNTENCLR_EL0 */
- set_pmreg_for_valid_counters(vcpu, PMCNTENSET_EL0, val, r->Op2 & 0x1);
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
return 0;
}
@@ -1054,23 +1058,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-static int get_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 *val)
-{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
-
- *val = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
- return 0;
-}
-
-static int set_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 val)
-{
- /* r->Op2 & 0x1: true for PMINTENSET_EL1, else PMINTENCLR_EL1 */
- set_pmreg_for_valid_counters(vcpu, PMINTENSET_EL1, val, r->Op2 & 0x1);
- return 0;
-}
-
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -1095,23 +1082,6 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-static int set_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 val)
-{
- /* r->CRm & 0x2: true for PMOVSSET_EL0, else PMOVSCLR_EL0 */
- set_pmreg_for_valid_counters(vcpu, PMOVSSET_EL0, val, r->CRm & 0x2);
- return 0;
-}
-
-static int get_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
- u64 *val)
-{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
-
- *val = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
- return 0;
-}
-
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -2311,10 +2281,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMINTENSET_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1,
- .get_user = get_pminten, .set_user = set_pminten },
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMINTENCLR_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1,
- .get_user = get_pminten, .set_user = set_pminten },
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
@@ -2366,13 +2336,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
{ PMU_SYS_REG(PMCNTENSET_EL0),
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
- .get_user = get_pmcnten, .set_user = set_pmcnten },
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMCNTENCLR_EL0),
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
- .get_user = get_pmcnten, .set_user = set_pmcnten },
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMOVSCLR_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0,
- .get_user = get_pmovs, .set_user = set_pmovs },
+ .get_user = get_pmreg, .set_user = set_pmreg },
/*
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
* previously (and pointlessly) advertised in the past...
@@ -2401,7 +2371,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
{ PMU_SYS_REG(PMOVSSET_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0,
- .get_user = get_pmovs, .set_user = set_pmovs },
+ .get_user = get_pmreg, .set_user = set_pmreg },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
--
Without deviation from the norm, progress is not possible.
On Mon, Oct 23, 2023 at 5:31 AM Marc Zyngier <maz@kernel.org> wrote:
>
> On Fri, 20 Oct 2023 22:40:45 +0100,
> Raghavendra Rao Ananta <rananta@google.com> wrote:
> >
> > For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and
> > PMOVS{SET,CLR} registers are expected to RAZ. To honor this,
> > explicitly implement the {get,set}_user functions for these
> > registers to mask out unimplemented counters for userspace reads
> > and writes.
> >
> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > ---
> > arch/arm64/kvm/sys_regs.c | 91 ++++++++++++++++++++++++++++++++++++---
> > 1 file changed, 85 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index faf97878dfbbb..2e5d497596ef8 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -987,6 +987,45 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> > return true;
> > }
> >
> > +static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
> > + u64 reg, u64 val, bool set)
> > +{
> > + struct kvm *kvm = vcpu->kvm;
> > +
> > + mutex_lock(&kvm->arch.config_lock);
> > +
> > + /* Make the register immutable once the VM has started running */
> > + if (kvm_vm_has_ran_once(kvm)) {
> > + mutex_unlock(&kvm->arch.config_lock);
> > + return;
> > + }
> > +
> > + val &= kvm_pmu_valid_counter_mask(vcpu);
> > + mutex_unlock(&kvm->arch.config_lock);
> > +
> > + if (set)
> > + __vcpu_sys_reg(vcpu, reg) |= val;
> > + else
> > + __vcpu_sys_reg(vcpu, reg) &= ~val;
> > +}
> > +
> > +static int get_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> > + u64 *val)
> > +{
> > + u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> > +
> > + *val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
> > + return 0;
> > +}
> > +
> > +static int set_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> > + u64 val)
> > +{
> > + /* r->Op2 & 0x1: true for PMCNTENSET_EL0, else PMCNTENCLR_EL0 */
> > + set_pmreg_for_valid_counters(vcpu, PMCNTENSET_EL0, val, r->Op2 & 0x1);
> > + return 0;
> > +}
>
> Huh, this is really ugly. Why the explosion of pointless helpers when
> the whole design of the sysreg infrastructure to have *common* helpers
> for registers that behave the same way?
>
> I'd expect something like the hack below instead.
>
> M.
>
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index a2c5f210b3d6..8f560a2496f2 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -987,42 +987,46 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return true;
> }
>
> -static void set_pmreg_for_valid_counters(struct kvm_vcpu *vcpu,
> - u64 reg, u64 val, bool set)
> +static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
> {
> struct kvm *kvm = vcpu->kvm;
> + bool set;
>
> mutex_lock(&kvm->arch.config_lock);
>
> /* Make the register immutable once the VM has started running */
> if (kvm_vm_has_ran_once(kvm)) {
> mutex_unlock(&kvm->arch.config_lock);
> - return;
> + return 0;
> }
>
> val &= kvm_pmu_valid_counter_mask(vcpu);
> mutex_unlock(&kvm->arch.config_lock);
>
> + switch(r->reg) {
> + case PMOVSSET_EL0:
> + /* CRm[1] being set indicates a SET register, and CLR otherwise */
> + set = r->CRm & 2;
> + break;
> + default:
> + /* Op2[0] being set indicates a SET register, and CLR otherwise */
> + set = r->Op2 & 1;
> + break;
> + }
> +
> if (set)
> - __vcpu_sys_reg(vcpu, reg) |= val;
> + __vcpu_sys_reg(vcpu, r->reg) |= val;
> else
> - __vcpu_sys_reg(vcpu, reg) &= ~val;
> -}
> -
> -static int get_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 *val)
> -{
> - u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> + __vcpu_sys_reg(vcpu, r->reg) &= ~val;
>
> - *val = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
> return 0;
> }
>
> -static int set_pmcnten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 val)
> +static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
> {
> - /* r->Op2 & 0x1: true for PMCNTENSET_EL0, else PMCNTENCLR_EL0 */
> - set_pmreg_for_valid_counters(vcpu, PMCNTENSET_EL0, val, r->Op2 & 0x1);
> + u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> +
> + *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
> return 0;
> }
>
> @@ -1054,23 +1058,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return true;
> }
>
> -static int get_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 *val)
> -{
> - u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> -
> - *val = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
> - return 0;
> -}
> -
> -static int set_pminten(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 val)
> -{
> - /* r->Op2 & 0x1: true for PMINTENSET_EL1, else PMINTENCLR_EL1 */
> - set_pmreg_for_valid_counters(vcpu, PMINTENSET_EL1, val, r->Op2 & 0x1);
> - return 0;
> -}
> -
> static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> @@ -1095,23 +1082,6 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> return true;
> }
>
> -static int set_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 val)
> -{
> - /* r->CRm & 0x2: true for PMOVSSET_EL0, else PMOVSCLR_EL0 */
> - set_pmreg_for_valid_counters(vcpu, PMOVSSET_EL0, val, r->CRm & 0x2);
> - return 0;
> -}
> -
> -static int get_pmovs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
> - u64 *val)
> -{
> - u64 mask = kvm_pmu_valid_counter_mask(vcpu);
> -
> - *val = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
> - return 0;
> -}
> -
> static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> const struct sys_reg_desc *r)
> {
> @@ -2311,10 +2281,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>
> { PMU_SYS_REG(PMINTENSET_EL1),
> .access = access_pminten, .reg = PMINTENSET_EL1,
> - .get_user = get_pminten, .set_user = set_pminten },
> + .get_user = get_pmreg, .set_user = set_pmreg },
> { PMU_SYS_REG(PMINTENCLR_EL1),
> .access = access_pminten, .reg = PMINTENSET_EL1,
> - .get_user = get_pminten, .set_user = set_pminten },
> + .get_user = get_pmreg, .set_user = set_pmreg },
> { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
>
> { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
> @@ -2366,13 +2336,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
> { PMU_SYS_REG(PMCNTENSET_EL0),
> .access = access_pmcnten, .reg = PMCNTENSET_EL0,
> - .get_user = get_pmcnten, .set_user = set_pmcnten },
> + .get_user = get_pmreg, .set_user = set_pmreg },
> { PMU_SYS_REG(PMCNTENCLR_EL0),
> .access = access_pmcnten, .reg = PMCNTENSET_EL0,
> - .get_user = get_pmcnten, .set_user = set_pmcnten },
> + .get_user = get_pmreg, .set_user = set_pmreg },
> { PMU_SYS_REG(PMOVSCLR_EL0),
> .access = access_pmovs, .reg = PMOVSSET_EL0,
> - .get_user = get_pmovs, .set_user = set_pmovs },
> + .get_user = get_pmreg, .set_user = set_pmreg },
> /*
> * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
> * previously (and pointlessly) advertised in the past...
> @@ -2401,7 +2371,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
> { PMU_SYS_REG(PMOVSSET_EL0),
> .access = access_pmovs, .reg = PMOVSSET_EL0,
> - .get_user = get_pmovs, .set_user = set_pmovs },
> + .get_user = get_pmreg, .set_user = set_pmreg },
>
> { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
> { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
>
Thanks for the suggestion. I'll consider this in the next iteration.
- Raghavendra
© 2016 - 2025 Red Hat, Inc.