AMD does not have what is commonly referred to as an architectural PMU.
Therefore, we need to rename the following variables to be applicable for
both Intel and AMD:
- has_architectural_pmu_version
- num_architectural_pmu_gp_counters
- num_architectural_pmu_fixed_counters
For Intel processors, the meaning of pmu_version remains unchanged.
For AMD processors:
pmu_version == 1 corresponds to versions before AMD PerfMonV2.
pmu_version == 2 corresponds to AMD PerfMonV2.
Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Reviewed-by: Zhao Liu <zhao1.liu@intel.com>
---
Changed since v2:
- Change has_pmu_version to pmu_version.
- Add Reviewed-by since the change is minor.
- As a reminder, there are some contextual change due to PATCH 05,
i.e., c->edx vs. edx.
target/i386/kvm/kvm.c | 49 ++++++++++++++++++++++++-------------------
1 file changed, 28 insertions(+), 21 deletions(-)
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 4baaa069b8..824148688d 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -166,9 +166,16 @@ static bool has_msr_perf_capabs;
static bool has_msr_pkrs;
static bool has_msr_hwcr;
-static uint32_t has_architectural_pmu_version;
-static uint32_t num_architectural_pmu_gp_counters;
-static uint32_t num_architectural_pmu_fixed_counters;
+/*
+ * For Intel processors, the meaning is the architectural PMU version
+ * number.
+ *
+ * For AMD processors: 1 corresponds to the prior versions, and 2
+ * corresponds to AMD PerfMonV2.
+ */
+static uint32_t pmu_version;
+static uint32_t num_pmu_gp_counters;
+static uint32_t num_pmu_fixed_counters;
static int has_xsave2;
static int has_xcrs;
@@ -2081,24 +2088,24 @@ static void kvm_init_pmu_info(struct kvm_cpuid2 *cpuid)
return;
}
- has_architectural_pmu_version = c->eax & 0xff;
- if (has_architectural_pmu_version > 0) {
- num_architectural_pmu_gp_counters = (c->eax & 0xff00) >> 8;
+ pmu_version = c->eax & 0xff;
+ if (pmu_version > 0) {
+ num_pmu_gp_counters = (c->eax & 0xff00) >> 8;
/*
* Shouldn't be more than 32, since that's the number of bits
* available in EBX to tell us _which_ counters are available.
* Play it safe.
*/
- if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
- num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ if (num_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_pmu_gp_counters = MAX_GP_COUNTERS;
}
- if (has_architectural_pmu_version > 1) {
- num_architectural_pmu_fixed_counters = c->edx & 0x1f;
+ if (pmu_version > 1) {
+ num_pmu_fixed_counters = c->edx & 0x1f;
- if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
- num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ if (num_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_pmu_fixed_counters = MAX_FIXED_COUNTERS;
}
}
}
@@ -4051,25 +4058,25 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
}
- if (has_architectural_pmu_version > 0) {
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 0) {
+ if (pmu_version > 1) {
/* Stop the counter. */
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
/* Set the counter values. */
- for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ for (i = 0; i < num_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
- for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ for (i = 0; i < num_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 1) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
env->msr_global_status);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
@@ -4529,17 +4536,17 @@ static int kvm_get_msrs(X86CPU *cpu)
if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
}
- if (has_architectural_pmu_version > 0) {
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 0) {
+ if (pmu_version > 1) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
}
- for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ for (i = 0; i < num_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
}
- for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ for (i = 0; i < num_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
}
--
2.43.5
On 24-06-2025 13:13, Dongli Zhang wrote:
> AMD does not have what is commonly referred to as an architectural PMU.
> Therefore, we need to rename the following variables to be applicable for
> both Intel and AMD:
>
> - has_architectural_pmu_version
> - num_architectural_pmu_gp_counters
> - num_architectural_pmu_fixed_counters
>
> For Intel processors, the meaning of pmu_version remains unchanged.
>
> For AMD processors:
>
> pmu_version == 1 corresponds to versions before AMD PerfMonV2.
> pmu_version == 2 corresponds to AMD PerfMonV2.
>
> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
> Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> Reviewed-by: Zhao Liu <zhao1.liu@intel.com>
> ---
> Changed since v2:
> - Change has_pmu_version to pmu_version.
> - Add Reviewed-by since the change is minor.
> - As a reminder, there are some contextual change due to PATCH 05,
> i.e., c->edx vs. edx.
>
> target/i386/kvm/kvm.c | 49 ++++++++++++++++++++++++-------------------
> 1 file changed, 28 insertions(+), 21 deletions(-)
>
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index 4baaa069b8..824148688d 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -166,9 +166,16 @@ static bool has_msr_perf_capabs;
> static bool has_msr_pkrs;
> static bool has_msr_hwcr;
>
> -static uint32_t has_architectural_pmu_version;
> -static uint32_t num_architectural_pmu_gp_counters;
> -static uint32_t num_architectural_pmu_fixed_counters;
> +/*
> + * For Intel processors, the meaning is the architectural PMU version
> + * number.
> + *
> + * For AMD processors: 1 corresponds to the prior versions, and 2
> + * corresponds to AMD PerfMonV2.
> + */
> +static uint32_t pmu_version;
> +static uint32_t num_pmu_gp_counters;
> +static uint32_t num_pmu_fixed_counters;
>
> static int has_xsave2;
> static int has_xcrs;
> @@ -2081,24 +2088,24 @@ static void kvm_init_pmu_info(struct kvm_cpuid2 *cpuid)
> return;
> }
>
> - has_architectural_pmu_version = c->eax & 0xff;
> - if (has_architectural_pmu_version > 0) {
> - num_architectural_pmu_gp_counters = (c->eax & 0xff00) >> 8;
> + pmu_version = c->eax & 0xff;
> + if (pmu_version > 0) {
> + num_pmu_gp_counters = (c->eax & 0xff00) >> 8;
>
> /*
> * Shouldn't be more than 32, since that's the number of bits
> * available in EBX to tell us _which_ counters are available.
> * Play it safe.
> */
> - if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
> - num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
> + if (num_pmu_gp_counters > MAX_GP_COUNTERS) {
> + num_pmu_gp_counters = MAX_GP_COUNTERS;
> }
>
> - if (has_architectural_pmu_version > 1) {
> - num_architectural_pmu_fixed_counters = c->edx & 0x1f;
> + if (pmu_version > 1) {
> + num_pmu_fixed_counters = c->edx & 0x1f;
>
> - if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
> - num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
> + if (num_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
> + num_pmu_fixed_counters = MAX_FIXED_COUNTERS;
> }
> }
> }
> @@ -4051,25 +4058,25 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
> kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
> }
>
> - if (has_architectural_pmu_version > 0) {
> - if (has_architectural_pmu_version > 1) {
> + if (pmu_version > 0) {
> + if (pmu_version > 1) {
> /* Stop the counter. */
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
> }
>
> /* Set the counter values. */
> - for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
> + for (i = 0; i < num_pmu_fixed_counters; i++) {
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
> env->msr_fixed_counters[i]);
> }
> - for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
> + for (i = 0; i < num_pmu_gp_counters; i++) {
> kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
> env->msr_gp_counters[i]);
> kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
> env->msr_gp_evtsel[i]);
> }
> - if (has_architectural_pmu_version > 1) {
> + if (pmu_version > 1) {
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
> env->msr_global_status);
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
> @@ -4529,17 +4536,17 @@ static int kvm_get_msrs(X86CPU *cpu)
> if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
> kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
> }
> - if (has_architectural_pmu_version > 0) {
> - if (has_architectural_pmu_version > 1) {
> + if (pmu_version > 0) {
> + if (pmu_version > 1) {
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
> }
> - for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
> + for (i = 0; i < num_pmu_fixed_counters; i++) {
> kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
> }
> - for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
> + for (i = 0; i < num_pmu_gp_counters; i++) {
> kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
> kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
> }
Reviewed-by: Sandipan Das <sandipan.das@amd.com>
© 2016 - 2025 Red Hat, Inc.