In order to gain the best performance benefit from partitioning the
PMU, utilize fine grain traps (FEAT_FGT and FEAT_FGT2) to avoid
trapping common PMU register accesses by the guest to remove that
overhead.
There should be no information leaks between guests as all these
registers are context swapped by a later patch in this series.
Untrapped:
* PMCR_EL0
* PMUSERENR_EL0
* PMSELR_EL0
* PMCCNTR_EL0
* PMINTEN_EL0
* PMEVCNTRn_EL0
Trapped:
* PMOVS_EL0
* PMEVTYPERn_EL0
* PMCCFILTR_EL0
* PMICNTR_EL0
* PMICFILTR_EL0
PMOVS remains trapped so KVM can track overflow IRQs that will need to
be injected into the guest.
PMICNTR remains trapped because KVM is not handling that yet.
PMEVTYPERn remains trapped so KVM can limit which events guests can
count, such as disallowing counting at EL2. PMCCFILTR and PMCIFILTR
are the same.
Signed-off-by: Colton Lewis <coltonlewis@google.com>
---
arch/arm64/include/asm/kvm_pmu.h | 23 ++++++++++
arch/arm64/kvm/hyp/include/hyp/switch.h | 58 +++++++++++++++++++++++++
arch/arm64/kvm/pmu-part.c | 32 ++++++++++++++
3 files changed, 113 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h
index 6328e90952ba..73b7161e3f4e 100644
--- a/arch/arm64/include/asm/kvm_pmu.h
+++ b/arch/arm64/include/asm/kvm_pmu.h
@@ -94,6 +94,21 @@ u64 kvm_pmu_guest_counter_mask(struct arm_pmu *pmu);
void kvm_pmu_host_counters_enable(void);
void kvm_pmu_host_counters_disable(void);
+#if !defined(__KVM_NVHE_HYPERVISOR__)
+bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu);
+#else
+static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif
+
/*
* Updates the vcpu's view of the pmu events for this cpu.
* Must be called before every vcpu run after disabling interrupts, to ensure
@@ -133,6 +148,14 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
{
return 0;
}
+static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 825b81749972..47d2db8446df 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -191,6 +191,61 @@ static inline bool cpu_has_amu(void)
ID_AA64PFR0_EL1_AMU_SHIFT);
}
+/**
+ * __activate_pmu_fgt() - Activate fine grain traps for partitioned PMU
+ * @vcpu: Pointer to struct kvm_vcpu
+ *
+ * Clear the most commonly accessed registers for a partitioned
+ * PMU. Trap the rest.
+ */
+static inline void __activate_pmu_fgt(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ u64 set;
+ u64 clr;
+
+ set = HDFGRTR_EL2_PMOVS
+ | HDFGRTR_EL2_PMCCFILTR_EL0
+ | HDFGRTR_EL2_PMEVTYPERn_EL0;
+ clr = HDFGRTR_EL2_PMUSERENR_EL0
+ | HDFGRTR_EL2_PMSELR_EL0
+ | HDFGRTR_EL2_PMINTEN
+ | HDFGRTR_EL2_PMCNTEN
+ | HDFGRTR_EL2_PMCCNTR_EL0
+ | HDFGRTR_EL2_PMEVCNTRn_EL0;
+
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR_EL2, clr, set);
+
+ set = HDFGWTR_EL2_PMOVS
+ | HDFGWTR_EL2_PMCCFILTR_EL0
+ | HDFGWTR_EL2_PMEVTYPERn_EL0;
+ clr = HDFGWTR_EL2_PMUSERENR_EL0
+ | HDFGWTR_EL2_PMCR_EL0
+ | HDFGWTR_EL2_PMSELR_EL0
+ | HDFGWTR_EL2_PMINTEN
+ | HDFGWTR_EL2_PMCNTEN
+ | HDFGWTR_EL2_PMCCNTR_EL0
+ | HDFGWTR_EL2_PMEVCNTRn_EL0;
+
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR_EL2, clr, set);
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT2))
+ return;
+
+ set = HDFGRTR2_EL2_nPMICFILTR_EL0
+ | HDFGRTR2_EL2_nPMICNTR_EL0;
+ clr = 0;
+
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR2_EL2, clr, set);
+
+ set = HDFGWTR2_EL2_nPMICFILTR_EL0
+ | HDFGWTR2_EL2_nPMICNTR_EL0;
+ clr = 0;
+
+ update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR2_EL2, clr, set);
+}
+
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
@@ -210,6 +265,9 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
if (cpu_has_amu())
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
+ if (kvm_vcpu_pmu_use_fgt(vcpu))
+ __activate_pmu_fgt(vcpu);
+
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
return;
diff --git a/arch/arm64/kvm/pmu-part.c b/arch/arm64/kvm/pmu-part.c
index 4f06a48175e2..92775e19cbf6 100644
--- a/arch/arm64/kvm/pmu-part.c
+++ b/arch/arm64/kvm/pmu-part.c
@@ -41,6 +41,38 @@ bool kvm_pmu_is_partitioned(struct arm_pmu *pmu)
pmu->hpmn_max <= *host_data_ptr(nr_event_counters);
}
+/**
+ * kvm_vcpu_pmu_is_partitioned() - Determine if given VCPU has a partitioned PMU
+ * @vcpu: Pointer to kvm_vcpu struct
+ *
+ * Determine if given VCPU has a partitioned PMU by extracting that
+ * field and passing it to :c:func:`kvm_pmu_is_partitioned`
+ *
+ * Return: True if the VCPU PMU is partitioned, false otherwise
+ */
+bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu)
+{
+ return kvm_pmu_is_partitioned(vcpu->kvm->arch.arm_pmu);
+}
+
+/**
+ * kvm_vcpu_pmu_use_fgt() - Determine if we can use FGT
+ * @vcpu: Pointer to struct kvm_vcpu
+ *
+ * Determine if we can use FGT for direct access to registers. We can
+ * if capabilities permit the number of guest counters requested.
+ *
+ * Return: True if we can use FGT, false otherwise
+ */
+bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu)
+{
+ u8 hpmn = vcpu->kvm->arch.nr_pmu_counters;
+
+ return kvm_vcpu_pmu_is_partitioned(vcpu) &&
+ cpus_have_final_cap(ARM64_HAS_FGT) &&
+ (hpmn != 0 || cpus_have_final_cap(ARM64_HAS_HPMN0));
+}
+
/**
* kvm_pmu_host_counter_mask() - Compute bitmask of host-reserved counters
* @pmu: Pointer to arm_pmu struct
--
2.50.0.727.gbf7dc18ff4-goog
On Thu, 26 Jun 2025 21:04:46 +0100, Colton Lewis <coltonlewis@google.com> wrote: > > In order to gain the best performance benefit from partitioning the > PMU, utilize fine grain traps (FEAT_FGT and FEAT_FGT2) to avoid > trapping common PMU register accesses by the guest to remove that > overhead. > > There should be no information leaks between guests as all these > registers are context swapped by a later patch in this series. > > Untrapped: > * PMCR_EL0 > * PMUSERENR_EL0 > * PMSELR_EL0 > * PMCCNTR_EL0 > * PMINTEN_EL0 > * PMEVCNTRn_EL0 > > Trapped: > * PMOVS_EL0 > * PMEVTYPERn_EL0 > * PMCCFILTR_EL0 > * PMICNTR_EL0 > * PMICFILTR_EL0 > > PMOVS remains trapped so KVM can track overflow IRQs that will need to > be injected into the guest. > > PMICNTR remains trapped because KVM is not handling that yet. > > PMEVTYPERn remains trapped so KVM can limit which events guests can > count, such as disallowing counting at EL2. PMCCFILTR and PMCIFILTR > are the same. I'd rather you explain why it is safe not to trap the rest. > > Signed-off-by: Colton Lewis <coltonlewis@google.com> > --- > arch/arm64/include/asm/kvm_pmu.h | 23 ++++++++++ > arch/arm64/kvm/hyp/include/hyp/switch.h | 58 +++++++++++++++++++++++++ > arch/arm64/kvm/pmu-part.c | 32 ++++++++++++++ > 3 files changed, 113 insertions(+) > > diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h > index 6328e90952ba..73b7161e3f4e 100644 > --- a/arch/arm64/include/asm/kvm_pmu.h > +++ b/arch/arm64/include/asm/kvm_pmu.h > @@ -94,6 +94,21 @@ u64 kvm_pmu_guest_counter_mask(struct arm_pmu *pmu); > void kvm_pmu_host_counters_enable(void); > void kvm_pmu_host_counters_disable(void); > > +#if !defined(__KVM_NVHE_HYPERVISOR__) > +bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu); > +bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu); > +#else > +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) > +{ > + return false; > +} > + > +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) > +{ > + return false; > +} > +#endif > + > /* > * Updates the vcpu's view of the pmu events for this cpu. > * Must be called before every vcpu run after disabling interrupts, to ensure > @@ -133,6 +148,14 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, > { > return 0; > } > +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) > +{ > + return false; > +} > +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) > +{ > + return false; > +} > static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, > u64 select_idx, u64 val) {} > static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 825b81749972..47d2db8446df 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -191,6 +191,61 @@ static inline bool cpu_has_amu(void) > ID_AA64PFR0_EL1_AMU_SHIFT); > } > > +/** > + * __activate_pmu_fgt() - Activate fine grain traps for partitioned PMU > + * @vcpu: Pointer to struct kvm_vcpu > + * > + * Clear the most commonly accessed registers for a partitioned > + * PMU. Trap the rest. > + */ > +static inline void __activate_pmu_fgt(struct kvm_vcpu *vcpu) > +{ > + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); > + struct kvm *kvm = kern_hyp_va(vcpu->kvm); > + u64 set; > + u64 clr; > + > + set = HDFGRTR_EL2_PMOVS > + | HDFGRTR_EL2_PMCCFILTR_EL0 > + | HDFGRTR_EL2_PMEVTYPERn_EL0; > + clr = HDFGRTR_EL2_PMUSERENR_EL0 > + | HDFGRTR_EL2_PMSELR_EL0 > + | HDFGRTR_EL2_PMINTEN > + | HDFGRTR_EL2_PMCNTEN > + | HDFGRTR_EL2_PMCCNTR_EL0 > + | HDFGRTR_EL2_PMEVCNTRn_EL0; > + > + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR_EL2, clr, set); > + > + set = HDFGWTR_EL2_PMOVS > + | HDFGWTR_EL2_PMCCFILTR_EL0 > + | HDFGWTR_EL2_PMEVTYPERn_EL0; > + clr = HDFGWTR_EL2_PMUSERENR_EL0 > + | HDFGWTR_EL2_PMCR_EL0 > + | HDFGWTR_EL2_PMSELR_EL0 > + | HDFGWTR_EL2_PMINTEN > + | HDFGWTR_EL2_PMCNTEN > + | HDFGWTR_EL2_PMCCNTR_EL0 > + | HDFGWTR_EL2_PMEVCNTRn_EL0; > + > + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR_EL2, clr, set); > + > + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) > + return; > + > + set = HDFGRTR2_EL2_nPMICFILTR_EL0 > + | HDFGRTR2_EL2_nPMICNTR_EL0; > + clr = 0; > + > + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR2_EL2, clr, set); > + > + set = HDFGWTR2_EL2_nPMICFILTR_EL0 > + | HDFGWTR2_EL2_nPMICNTR_EL0; > + clr = 0; > + > + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR2_EL2, clr, set); This feels wrong. There should be one place to populate the FGTs that apply to a guest as set from the host, not two or more. There is such a construct in the SME series, and maybe you could have a look at it, specially if the trap configuration is this static. M. -- Without deviation from the norm, progress is not possible.
Marc Zyngier <maz@kernel.org> writes: > On Thu, 26 Jun 2025 21:04:46 +0100, > Colton Lewis <coltonlewis@google.com> wrote: >> In order to gain the best performance benefit from partitioning the >> PMU, utilize fine grain traps (FEAT_FGT and FEAT_FGT2) to avoid >> trapping common PMU register accesses by the guest to remove that >> overhead. >> There should be no information leaks between guests as all these >> registers are context swapped by a later patch in this series. >> Untrapped: >> * PMCR_EL0 >> * PMUSERENR_EL0 >> * PMSELR_EL0 >> * PMCCNTR_EL0 >> * PMINTEN_EL0 >> * PMEVCNTRn_EL0 >> Trapped: >> * PMOVS_EL0 >> * PMEVTYPERn_EL0 >> * PMCCFILTR_EL0 >> * PMICNTR_EL0 >> * PMICFILTR_EL0 >> PMOVS remains trapped so KVM can track overflow IRQs that will need to >> be injected into the guest. >> PMICNTR remains trapped because KVM is not handling that yet. >> PMEVTYPERn remains trapped so KVM can limit which events guests can >> count, such as disallowing counting at EL2. PMCCFILTR and PMCIFILTR >> are the same. > I'd rather you explain why it is safe not to trap the rest. Okay, I will reverse my explanation. >> Signed-off-by: Colton Lewis <coltonlewis@google.com> >> --- >> arch/arm64/include/asm/kvm_pmu.h | 23 ++++++++++ >> arch/arm64/kvm/hyp/include/hyp/switch.h | 58 +++++++++++++++++++++++++ >> arch/arm64/kvm/pmu-part.c | 32 ++++++++++++++ >> 3 files changed, 113 insertions(+) >> diff --git a/arch/arm64/include/asm/kvm_pmu.h >> b/arch/arm64/include/asm/kvm_pmu.h >> index 6328e90952ba..73b7161e3f4e 100644 >> --- a/arch/arm64/include/asm/kvm_pmu.h >> +++ b/arch/arm64/include/asm/kvm_pmu.h >> @@ -94,6 +94,21 @@ u64 kvm_pmu_guest_counter_mask(struct arm_pmu *pmu); >> void kvm_pmu_host_counters_enable(void); >> void kvm_pmu_host_counters_disable(void); >> +#if !defined(__KVM_NVHE_HYPERVISOR__) >> +bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu); >> +bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu); >> +#else >> +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) >> +{ >> + return false; >> +} >> + >> +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) >> +{ >> + return false; >> +} >> +#endif >> + >> /* >> * Updates the vcpu's view of the pmu events for this cpu. >> * Must be called before every vcpu run after disabling interrupts, to >> ensure >> @@ -133,6 +148,14 @@ static inline u64 kvm_pmu_get_counter_value(struct >> kvm_vcpu *vcpu, >> { >> return 0; >> } >> +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) >> +{ >> + return false; >> +} >> +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) >> +{ >> + return false; >> +} >> static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, >> u64 select_idx, u64 val) {} >> static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, >> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h >> b/arch/arm64/kvm/hyp/include/hyp/switch.h >> index 825b81749972..47d2db8446df 100644 >> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h >> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h >> @@ -191,6 +191,61 @@ static inline bool cpu_has_amu(void) >> ID_AA64PFR0_EL1_AMU_SHIFT); >> } >> +/** >> + * __activate_pmu_fgt() - Activate fine grain traps for partitioned PMU >> + * @vcpu: Pointer to struct kvm_vcpu >> + * >> + * Clear the most commonly accessed registers for a partitioned >> + * PMU. Trap the rest. >> + */ >> +static inline void __activate_pmu_fgt(struct kvm_vcpu *vcpu) >> +{ >> + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); >> + struct kvm *kvm = kern_hyp_va(vcpu->kvm); >> + u64 set; >> + u64 clr; >> + >> + set = HDFGRTR_EL2_PMOVS >> + | HDFGRTR_EL2_PMCCFILTR_EL0 >> + | HDFGRTR_EL2_PMEVTYPERn_EL0; >> + clr = HDFGRTR_EL2_PMUSERENR_EL0 >> + | HDFGRTR_EL2_PMSELR_EL0 >> + | HDFGRTR_EL2_PMINTEN >> + | HDFGRTR_EL2_PMCNTEN >> + | HDFGRTR_EL2_PMCCNTR_EL0 >> + | HDFGRTR_EL2_PMEVCNTRn_EL0; >> + >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR_EL2, clr, set); >> + >> + set = HDFGWTR_EL2_PMOVS >> + | HDFGWTR_EL2_PMCCFILTR_EL0 >> + | HDFGWTR_EL2_PMEVTYPERn_EL0; >> + clr = HDFGWTR_EL2_PMUSERENR_EL0 >> + | HDFGWTR_EL2_PMCR_EL0 >> + | HDFGWTR_EL2_PMSELR_EL0 >> + | HDFGWTR_EL2_PMINTEN >> + | HDFGWTR_EL2_PMCNTEN >> + | HDFGWTR_EL2_PMCCNTR_EL0 >> + | HDFGWTR_EL2_PMEVCNTRn_EL0; >> + >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR_EL2, clr, set); >> + >> + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) >> + return; >> + >> + set = HDFGRTR2_EL2_nPMICFILTR_EL0 >> + | HDFGRTR2_EL2_nPMICNTR_EL0; >> + clr = 0; >> + >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR2_EL2, clr, set); >> + >> + set = HDFGWTR2_EL2_nPMICFILTR_EL0 >> + | HDFGWTR2_EL2_nPMICNTR_EL0; >> + clr = 0; >> + >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR2_EL2, clr, set); > This feels wrong. There should be one place to populate the FGTs that > apply to a guest as set from the host, not two or more. > There is such a construct in the SME series, and maybe you could have > a look at it, specially if the trap configuration is this static. > M. > -- > Without deviation from the norm, progress is not possible. I'm assuming you are referring to Mark Brown's series [1], specifically patches 5 and 18 and I see what you mean. You are probably thinking configuration should happen from sys_regs.c:kvm_calculate_traps or thereabout and should be setting bits in the existing kvm->arch.fgt array. Correct me if I'm mistaken. [1] https://lore.kernel.org/kvm/20250625-kvm-arm64-sme-v6-0-114cff4ffe04@kernel.org/
On Fri, 27 Jun 2025 21:45:57 +0100, Colton Lewis <coltonlewis@google.com> wrote: > > Marc Zyngier <maz@kernel.org> writes: > > > On Thu, 26 Jun 2025 21:04:46 +0100, > > Colton Lewis <coltonlewis@google.com> wrote: > > >> +static inline void __activate_pmu_fgt(struct kvm_vcpu *vcpu) > >> +{ > >> + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); > >> + struct kvm *kvm = kern_hyp_va(vcpu->kvm); > >> + u64 set; > >> + u64 clr; > >> + > >> + set = HDFGRTR_EL2_PMOVS > >> + | HDFGRTR_EL2_PMCCFILTR_EL0 > >> + | HDFGRTR_EL2_PMEVTYPERn_EL0; > >> + clr = HDFGRTR_EL2_PMUSERENR_EL0 > >> + | HDFGRTR_EL2_PMSELR_EL0 > >> + | HDFGRTR_EL2_PMINTEN > >> + | HDFGRTR_EL2_PMCNTEN > >> + | HDFGRTR_EL2_PMCCNTR_EL0 > >> + | HDFGRTR_EL2_PMEVCNTRn_EL0; > >> + > >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR_EL2, clr, set); > >> + > >> + set = HDFGWTR_EL2_PMOVS > >> + | HDFGWTR_EL2_PMCCFILTR_EL0 > >> + | HDFGWTR_EL2_PMEVTYPERn_EL0; > >> + clr = HDFGWTR_EL2_PMUSERENR_EL0 > >> + | HDFGWTR_EL2_PMCR_EL0 > >> + | HDFGWTR_EL2_PMSELR_EL0 > >> + | HDFGWTR_EL2_PMINTEN > >> + | HDFGWTR_EL2_PMCNTEN > >> + | HDFGWTR_EL2_PMCCNTR_EL0 > >> + | HDFGWTR_EL2_PMEVCNTRn_EL0; > >> + > >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR_EL2, clr, set); > >> + > >> + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) > >> + return; > >> + > >> + set = HDFGRTR2_EL2_nPMICFILTR_EL0 > >> + | HDFGRTR2_EL2_nPMICNTR_EL0; > >> + clr = 0; > >> + > >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGRTR2_EL2, clr, set); > >> + > >> + set = HDFGWTR2_EL2_nPMICFILTR_EL0 > >> + | HDFGWTR2_EL2_nPMICNTR_EL0; > >> + clr = 0; > >> + > >> + update_fgt_traps_cs(hctxt, vcpu, kvm, HDFGWTR2_EL2, clr, set); > > > This feels wrong. There should be one place to populate the FGTs that > > apply to a guest as set from the host, not two or more. > > > There is such a construct in the SME series, and maybe you could have > > a look at it, specially if the trap configuration is this static. > > > M. > > > -- > > Without deviation from the norm, progress is not possible. > > I'm assuming you are referring to Mark Brown's series [1], specifically > patches 5 and 18 and I see what you mean. > > You are probably thinking configuration should happen from > sys_regs.c:kvm_calculate_traps or thereabout and should be setting bits > in the existing kvm->arch.fgt array. > > Correct me if I'm mistaken. I'm saying there should be exactly one place where we write to the individual trap registers, and that the source of these settings should be equally unique when they are immutable in the lifetime of the guest. That's the existing pattern for most trap configuration, including HCR_EL2, ICH_HCR_EL2, HCRX_EL2, and the FGU configuration that trickles into the actual trap registers, and I want to stick with it if at all possible. The way it is done in the SME series may be reasonable, but I haven't reviewed this series at all. I'm merely pointing out that similar constructs exist for other features. M. -- Jazz isn't dead. It just smells funny.
© 2016 - 2025 Red Hat, Inc.