[PATCH v7 19/19] KVM: selftests: Test PMC virtualization with forced emulation

Sean Christopherson posted 19 patches 2 years, 1 month ago
There is a newer version of this series
[PATCH v7 19/19] KVM: selftests: Test PMC virtualization with forced emulation
Posted by Sean Christopherson 2 years, 1 month ago
Extend the PMC counters test to use forced emulation to verify that KVM
emulates counter events for instructions retired and branches retired.
Force emulation for only a subset of the measured code to test that KVM
does the right thing when mixing perf events with emulated events.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 .../selftests/kvm/x86_64/pmu_counters_test.c  | 44 +++++++++++++------
 1 file changed, 30 insertions(+), 14 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index d775cc7e8fab..09332b3c0a69 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -21,6 +21,7 @@
 
 static uint8_t kvm_pmu_version;
 static bool kvm_has_perf_caps;
+static bool is_forced_emulation_enabled;
 
 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 						  void *guest_code,
@@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 	vcpu_init_descriptor_tables(*vcpu);
 
 	sync_global_to_guest(vm, kvm_pmu_version);
+	sync_global_to_guest(vm, is_forced_emulation_enabled);
 
 	/*
 	 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
@@ -138,37 +140,50 @@ static void guest_assert_event_count(uint8_t idx,
  * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
  * start of the loop to force LLC references and misses, i.e. to allow testing
  * that those events actually count.
+ *
+ * If forced emulation is enabled (and specified), force emulation on a subset
+ * of the measured code to verify that KVM correctly emulates instructions and
+ * branches retired events in conjunction with hardware also counting said
+ * events.
  */
-#define GUEST_MEASURE_EVENT(_msr, _value, clflush)				\
+#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)				\
 do {										\
 	__asm__ __volatile__("wrmsr\n\t"					\
 			     clflush "\n\t"					\
 			     "mfence\n\t"					\
 			     "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t"	\
-			     "loop .\n\t"					\
-			     "mov %%edi, %%ecx\n\t"				\
-			     "xor %%eax, %%eax\n\t"				\
-			     "xor %%edx, %%edx\n\t"				\
+			     FEP "loop .\n\t"					\
+			     FEP "mov %%edi, %%ecx\n\t"				\
+			     FEP "xor %%eax, %%eax\n\t"				\
+			     FEP "xor %%edx, %%edx\n\t"				\
 			     "wrmsr\n\t"					\
 			     :: "a"((uint32_t)_value), "d"(_value >> 32),	\
 				"c"(_msr), "D"(_msr)				\
 	);									\
 } while (0)
 
+#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP)	\
+do {										\
+	wrmsr(pmc_msr, 0);							\
+										\
+	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))				\
+		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);	\
+	else if (this_cpu_has(X86_FEATURE_CLFLUSH))				\
+		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);	\
+	else									\
+		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);		\
+										\
+	guest_assert_event_count(_idx, _event, _pmc, _pmc_msr);			\
+} while (0)
+
 static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
 				    uint32_t pmc, uint32_t pmc_msr,
 				    uint32_t ctrl_msr, uint64_t ctrl_msr_value)
 {
-	wrmsr(pmc_msr, 0);
+	GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
 
-	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))
-		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflushopt 1f");
-	else if (this_cpu_has(X86_FEATURE_CLFLUSH))
-		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflush 1f");
-	else
-		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "nop");
-
-	guest_assert_event_count(idx, event, pmc, pmc_msr);
+	if (is_forced_emulation_enabled)
+		GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
 }
 
 #define X86_PMU_FEATURE_NULL						\
@@ -545,6 +560,7 @@ int main(int argc, char *argv[])
 
 	kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
 	kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
+	is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
 
 	test_intel_counters();
 
-- 
2.42.0.869.gea05f2083d-goog
Re: [PATCH v7 19/19] KVM: selftests: Test PMC virtualization with forced emulation
Posted by Mi, Dapeng 2 years, 1 month ago
On 11/8/2023 8:31 AM, Sean Christopherson wrote:
> Extend the PMC counters test to use forced emulation to verify that KVM
> emulates counter events for instructions retired and branches retired.
> Force emulation for only a subset of the measured code to test that KVM
> does the right thing when mixing perf events with emulated events.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   .../selftests/kvm/x86_64/pmu_counters_test.c  | 44 +++++++++++++------
>   1 file changed, 30 insertions(+), 14 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> index d775cc7e8fab..09332b3c0a69 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> @@ -21,6 +21,7 @@
>   
>   static uint8_t kvm_pmu_version;
>   static bool kvm_has_perf_caps;
> +static bool is_forced_emulation_enabled;
>   
>   static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
>   						  void *guest_code,
> @@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
>   	vcpu_init_descriptor_tables(*vcpu);
>   
>   	sync_global_to_guest(vm, kvm_pmu_version);
> +	sync_global_to_guest(vm, is_forced_emulation_enabled);
>   
>   	/*
>   	 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
> @@ -138,37 +140,50 @@ static void guest_assert_event_count(uint8_t idx,
>    * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
>    * start of the loop to force LLC references and misses, i.e. to allow testing
>    * that those events actually count.
> + *
> + * If forced emulation is enabled (and specified), force emulation on a subset
> + * of the measured code to verify that KVM correctly emulates instructions and
> + * branches retired events in conjunction with hardware also counting said
> + * events.
>    */
> -#define GUEST_MEASURE_EVENT(_msr, _value, clflush)				\
> +#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)				\
>   do {										\
>   	__asm__ __volatile__("wrmsr\n\t"					\
>   			     clflush "\n\t"					\
>   			     "mfence\n\t"					\
>   			     "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t"	\
> -			     "loop .\n\t"					\
> -			     "mov %%edi, %%ecx\n\t"				\
> -			     "xor %%eax, %%eax\n\t"				\
> -			     "xor %%edx, %%edx\n\t"				\
> +			     FEP "loop .\n\t"					\
> +			     FEP "mov %%edi, %%ecx\n\t"				\
> +			     FEP "xor %%eax, %%eax\n\t"				\
> +			     FEP "xor %%edx, %%edx\n\t"				\
>   			     "wrmsr\n\t"					\
>   			     :: "a"((uint32_t)_value), "d"(_value >> 32),	\
>   				"c"(_msr), "D"(_msr)				\
>   	);									\
>   } while (0)
>   
> +#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP)	\
> +do {										\
> +	wrmsr(pmc_msr, 0);							\
> +										\
> +	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))				\
> +		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);	\
> +	else if (this_cpu_has(X86_FEATURE_CLFLUSH))				\
> +		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);	\
> +	else									\
> +		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);		\
> +										\
> +	guest_assert_event_count(_idx, _event, _pmc, _pmc_msr);			\
> +} while (0)
> +
>   static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
>   				    uint32_t pmc, uint32_t pmc_msr,
>   				    uint32_t ctrl_msr, uint64_t ctrl_msr_value)
>   {
> -	wrmsr(pmc_msr, 0);
> +	GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
>   
> -	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))
> -		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflushopt 1f");
> -	else if (this_cpu_has(X86_FEATURE_CLFLUSH))
> -		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflush 1f");
> -	else
> -		GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "nop");
> -
> -	guest_assert_event_count(idx, event, pmc, pmc_msr);
> +	if (is_forced_emulation_enabled)
> +		GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
>   }
>   
>   #define X86_PMU_FEATURE_NULL						\
> @@ -545,6 +560,7 @@ int main(int argc, char *argv[])
>   
>   	kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
>   	kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
> +	is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
>   
>   	test_intel_counters();
>   

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>