[PATCH v6 08/20] KVM: selftests: Extend {kvm,this}_pmu_has() to support fixed counters

Sean Christopherson posted 20 patches 2 years, 1 month ago
There is a newer version of this series
[PATCH v6 08/20] KVM: selftests: Extend {kvm,this}_pmu_has() to support fixed counters
Posted by Sean Christopherson 2 years, 1 month ago
Extend the kvm_x86_pmu_feature framework to allow querying for fixed
counters via {kvm,this}_pmu_has().  Like architectural events, checking
for a fixed counter annoyingly requires checking multiple CPUID fields, as
a fixed counter exists if:

  FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i);

Note, KVM currently doesn't actually support exposing fixed counters via
the bitmask, but that will hopefully change sooner than later, and Intel's
SDM explicitly "recommends" checking both the number of counters and the
mask.

Rename the intermedate "anti_feature" field to simply 'f' since the fixed
counter bitmask (thankfully) doesn't have reversed polarity like the
architectural events bitmask.

Note, ideally the helpers would use BUILD_BUG_ON() to assert on the
incoming register, but the expected usage in PMU tests can't guarantee the
inputs are compile-time constants.

Opportunistically define macros for all of the architectural events and
fixed counters that KVM currently supports.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../selftests/kvm/include/x86_64/processor.h  | 63 +++++++++++++------
 1 file changed, 45 insertions(+), 18 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 2d9771151dd9..b103c462701b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -281,24 +281,39 @@ struct kvm_x86_cpu_property {
  * that indicates the feature is _not_ supported, and a property that states
  * the length of the bit mask of unsupported features.  A feature is supported
  * if the size of the bit mask is larger than the "unavailable" bit, and said
- * bit is not set.
+ * bit is not set.  Fixed counters also bizarre enumeration, but inverted from
+ * arch events for general purpose counters.  Fixed counters are supported if a
+ * feature flag is set **OR** the total number of fixed counters is greater
+ * than index of the counter.
  *
- * Wrap the "unavailable" feature to simplify checking whether or not a given
- * architectural event is supported.
+ * Wrap the events for general purpose and fixed counters to simplify checking
+ * whether or not a given architectural event is supported.
  */
 struct kvm_x86_pmu_feature {
-	struct kvm_x86_cpu_feature anti_feature;
+	struct kvm_x86_cpu_feature f;
 };
-#define	KVM_X86_PMU_FEATURE(__bit)						\
-({										\
-	struct kvm_x86_pmu_feature feature = {					\
-		.anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),	\
-	};									\
-										\
-	feature;								\
+#define	KVM_X86_PMU_FEATURE(__reg, __bit)				\
+({									\
+	struct kvm_x86_pmu_feature feature = {				\
+		.f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit),		\
+	};								\
+									\
+	kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX ||		\
+			  KVM_CPUID_##__reg == KVM_CPUID_ECX);		\
+	feature;							\
 })
 
-#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED	KVM_X86_PMU_FEATURE(5)
+#define X86_PMU_FEATURE_CPU_CYCLES		KVM_X86_PMU_FEATURE(EBX, 0)
+#define X86_PMU_FEATURE_INSNS_RETIRED		KVM_X86_PMU_FEATURE(EBX, 1)
+#define X86_PMU_FEATURE_REFERENCE_CYCLES	KVM_X86_PMU_FEATURE(EBX, 2)
+#define X86_PMU_FEATURE_LLC_REFERENCES		KVM_X86_PMU_FEATURE(EBX, 3)
+#define X86_PMU_FEATURE_LLC_MISSES		KVM_X86_PMU_FEATURE(EBX, 4)
+#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED	KVM_X86_PMU_FEATURE(EBX, 5)
+#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED	KVM_X86_PMU_FEATURE(EBX, 6)
+
+#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED	KVM_X86_PMU_FEATURE(ECX, 0)
+#define X86_PMU_FEATURE_CPU_CYCLES_FIXED	KVM_X86_PMU_FEATURE(ECX, 1)
+#define X86_PMU_FEATURE_REFERENCE_CYCLES_FIXED	KVM_X86_PMU_FEATURE(ECX, 2)
 
 static inline unsigned int x86_family(unsigned int eax)
 {
@@ -697,10 +712,16 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
 
 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
 {
-	uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+	uint32_t nr_bits;
 
-	return nr_bits > feature.anti_feature.bit &&
-	       !this_cpu_has(feature.anti_feature);
+	if (feature.f.reg == KVM_CPUID_EBX) {
+		nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+		return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
+	}
+
+	GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
+	nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+	return nr_bits > feature.f.bit || this_cpu_has(feature.f);
 }
 
 static __always_inline uint64_t this_cpu_supported_xcr0(void)
@@ -916,10 +937,16 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
 
 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
 {
-	uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+	uint32_t nr_bits;
 
-	return nr_bits > feature.anti_feature.bit &&
-	       !kvm_cpu_has(feature.anti_feature);
+	if (feature.f.reg == KVM_CPUID_EBX) {
+		nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+		return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
+	}
+
+	TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
+	nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+	return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
 }
 
 static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
-- 
2.42.0.869.gea05f2083d-goog
Re: [PATCH v6 08/20] KVM: selftests: Extend {kvm,this}_pmu_has() to support fixed counters
Posted by Jim Mattson 2 years, 1 month ago
On Fri, Nov 3, 2023 at 5:02 PM Sean Christopherson <seanjc@google.com> wrote:
>
> Extend the kvm_x86_pmu_feature framework to allow querying for fixed
> counters via {kvm,this}_pmu_has().  Like architectural events, checking
> for a fixed counter annoyingly requires checking multiple CPUID fields, as
> a fixed counter exists if:
>
>   FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i);
>
> Note, KVM currently doesn't actually support exposing fixed counters via
> the bitmask, but that will hopefully change sooner than later, and Intel's
> SDM explicitly "recommends" checking both the number of counters and the
> mask.
>
> Rename the intermedate "anti_feature" field to simply 'f' since the fixed
> counter bitmask (thankfully) doesn't have reversed polarity like the
> architectural events bitmask.
>
> Note, ideally the helpers would use BUILD_BUG_ON() to assert on the
> incoming register, but the expected usage in PMU tests can't guarantee the
> inputs are compile-time constants.
>
> Opportunistically define macros for all of the architectural events and
> fixed counters that KVM currently supports.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  .../selftests/kvm/include/x86_64/processor.h  | 63 +++++++++++++------
>  1 file changed, 45 insertions(+), 18 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
> index 2d9771151dd9..b103c462701b 100644
> --- a/tools/testing/selftests/kvm/include/x86_64/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
> @@ -281,24 +281,39 @@ struct kvm_x86_cpu_property {
>   * that indicates the feature is _not_ supported, and a property that states
>   * the length of the bit mask of unsupported features.  A feature is supported
>   * if the size of the bit mask is larger than the "unavailable" bit, and said
> - * bit is not set.
> + * bit is not set.  Fixed counters also bizarre enumeration, but inverted from
> + * arch events for general purpose counters.  Fixed counters are supported if a
> + * feature flag is set **OR** the total number of fixed counters is greater
> + * than index of the counter.
>   *
> - * Wrap the "unavailable" feature to simplify checking whether or not a given
> - * architectural event is supported.
> + * Wrap the events for general purpose and fixed counters to simplify checking
> + * whether or not a given architectural event is supported.
>   */
>  struct kvm_x86_pmu_feature {
> -       struct kvm_x86_cpu_feature anti_feature;
> +       struct kvm_x86_cpu_feature f;
>  };
> -#define        KVM_X86_PMU_FEATURE(__bit)                                              \
> -({                                                                             \
> -       struct kvm_x86_pmu_feature feature = {                                  \
> -               .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),        \
> -       };                                                                      \
> -                                                                               \
> -       feature;                                                                \
> +#define        KVM_X86_PMU_FEATURE(__reg, __bit)                               \
> +({                                                                     \
> +       struct kvm_x86_pmu_feature feature = {                          \
> +               .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit),         \
> +       };                                                              \
> +                                                                       \
> +       kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX ||         \
> +                         KVM_CPUID_##__reg == KVM_CPUID_ECX);          \
> +       feature;                                                        \
>  })
>
> -#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED   KVM_X86_PMU_FEATURE(5)
> +#define X86_PMU_FEATURE_CPU_CYCLES             KVM_X86_PMU_FEATURE(EBX, 0)
> +#define X86_PMU_FEATURE_INSNS_RETIRED          KVM_X86_PMU_FEATURE(EBX, 1)
> +#define X86_PMU_FEATURE_REFERENCE_CYCLES       KVM_X86_PMU_FEATURE(EBX, 2)
> +#define X86_PMU_FEATURE_LLC_REFERENCES         KVM_X86_PMU_FEATURE(EBX, 3)
> +#define X86_PMU_FEATURE_LLC_MISSES             KVM_X86_PMU_FEATURE(EBX, 4)
> +#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED   KVM_X86_PMU_FEATURE(EBX, 5)
> +#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED  KVM_X86_PMU_FEATURE(EBX, 6)

Why not add top down slots now?

> +
> +#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED    KVM_X86_PMU_FEATURE(ECX, 0)
> +#define X86_PMU_FEATURE_CPU_CYCLES_FIXED       KVM_X86_PMU_FEATURE(ECX, 1)
> +#define X86_PMU_FEATURE_REFERENCE_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)

Perhaps toss 'TSC' between CYCLES and FIXED?

And add top down slots now>

>
>  static inline unsigned int x86_family(unsigned int eax)
>  {
> @@ -697,10 +712,16 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
>
>  static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
>  {
> -       uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> +       uint32_t nr_bits;
>
> -       return nr_bits > feature.anti_feature.bit &&
> -              !this_cpu_has(feature.anti_feature);
> +       if (feature.f.reg == KVM_CPUID_EBX) {
> +               nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> +               return nr_bits > feature.f.bit && !this_cpu_has(feature.f);

Ouch! Reverse polarity bits make 'this_cpu_has' non-intuitive.

> +       }
> +
> +       GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
> +       nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> +       return nr_bits > feature.f.bit || this_cpu_has(feature.f);
>  }
>
>  static __always_inline uint64_t this_cpu_supported_xcr0(void)
> @@ -916,10 +937,16 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
>
>  static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
>  {
> -       uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> +       uint32_t nr_bits;
>
> -       return nr_bits > feature.anti_feature.bit &&
> -              !kvm_cpu_has(feature.anti_feature);
> +       if (feature.f.reg == KVM_CPUID_EBX) {
> +               nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> +               return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
> +       }
> +
> +       TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
> +       nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> +       return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
>  }
>
>  static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
> --
> 2.42.0.869.gea05f2083d-goog
>
Re: [PATCH v6 08/20] KVM: selftests: Extend {kvm,this}_pmu_has() to support fixed counters
Posted by Sean Christopherson 2 years, 1 month ago
On Sat, Nov 04, 2023, Jim Mattson wrote:
> On Fri, Nov 3, 2023 at 5:02 PM Sean Christopherson <seanjc@google.com> wrote:
> > +#define        KVM_X86_PMU_FEATURE(__reg, __bit)                               \
> > +({                                                                     \
> > +       struct kvm_x86_pmu_feature feature = {                          \
> > +               .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit),         \
> > +       };                                                              \
> > +                                                                       \
> > +       kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX ||         \
> > +                         KVM_CPUID_##__reg == KVM_CPUID_ECX);          \
> > +       feature;                                                        \
> >  })
> >
> > -#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED   KVM_X86_PMU_FEATURE(5)
> > +#define X86_PMU_FEATURE_CPU_CYCLES             KVM_X86_PMU_FEATURE(EBX, 0)
> > +#define X86_PMU_FEATURE_INSNS_RETIRED          KVM_X86_PMU_FEATURE(EBX, 1)
> > +#define X86_PMU_FEATURE_REFERENCE_CYCLES       KVM_X86_PMU_FEATURE(EBX, 2)
> > +#define X86_PMU_FEATURE_LLC_REFERENCES         KVM_X86_PMU_FEATURE(EBX, 3)
> > +#define X86_PMU_FEATURE_LLC_MISSES             KVM_X86_PMU_FEATURE(EBX, 4)
> > +#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED   KVM_X86_PMU_FEATURE(EBX, 5)
> > +#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED  KVM_X86_PMU_FEATURE(EBX, 6)
> 
> Why not add top down slots now?

Laziness?  

> > +#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED    KVM_X86_PMU_FEATURE(ECX, 0)
> > +#define X86_PMU_FEATURE_CPU_CYCLES_FIXED       KVM_X86_PMU_FEATURE(ECX, 1)
> > +#define X86_PMU_FEATURE_REFERENCE_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)
> 
> Perhaps toss 'TSC' between CYCLES and FIXED?

I think X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED is more aligned with how the
SDM (and English in general) talks about reference cycles.

> And add top down slots now>

Ya.