Since perfmon-v2, the AMD PMU supports additional registers. This update
includes get/put functionality for these extra registers.
Similar to the implementation in KVM:
- MSR_CORE_PERF_GLOBAL_STATUS and MSR_AMD64_PERF_CNTR_GLOBAL_STATUS both
use env->msr_global_status.
- MSR_CORE_PERF_GLOBAL_CTRL and MSR_AMD64_PERF_CNTR_GLOBAL_CTL both use
env->msr_global_ctrl.
- MSR_CORE_PERF_GLOBAL_OVF_CTRL and MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR
both use env->msr_global_ovf_ctrl.
No changes are needed for vmstate_msr_architectural_pmu or
pmu_enable_needed().
Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
---
Changed since v1:
- Use "has_pmu_version > 1", not "has_pmu_version == 2".
Changed since v2:
- Use cpuid_find_entry() instead of cpu_x86_cpuid().
- Change has_pmu_version to pmu_version.
- Cap num_pmu_gp_counters with MAX_GP_COUNTERS.
target/i386/cpu.h | 4 ++++
target/i386/kvm/kvm.c | 48 +++++++++++++++++++++++++++++++++++--------
2 files changed, 43 insertions(+), 9 deletions(-)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 84e497f5d3..ab952ac5ad 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -490,6 +490,10 @@ typedef enum X86Seg {
#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+
#define MSR_K7_EVNTSEL0 0xc0010000
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_F15H_PERF_CTL0 0xc0010200
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 3a35fd741d..f4532e6f2a 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -2149,6 +2149,16 @@ static void kvm_init_pmu_info_amd(struct kvm_cpuid2 *cpuid, X86CPU *cpu)
}
num_pmu_gp_counters = AMD64_NUM_COUNTERS_CORE;
+
+ c = cpuid_find_entry(cpuid, 0x80000022, 0);
+ if (c && (c->eax & CPUID_8000_0022_EAX_PERFMON_V2)) {
+ pmu_version = 2;
+ num_pmu_gp_counters = c->ebx & 0xf;
+
+ if (num_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_pmu_gp_counters = MAX_GP_COUNTERS;
+ }
+ }
}
static bool is_host_compat_vendor(CPUX86State *env)
@@ -4200,13 +4210,14 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
uint32_t step = 1;
/*
- * When PERFCORE is enabled, AMD PMU uses a separate set of
- * addresses for the selector and counter registers.
- * Additionally, the address of the next selector or counter
- * register is determined by incrementing the address of the
- * current register by two.
+ * When PERFCORE or PerfMonV2 is enabled, AMD PMU uses a
+ * separate set of addresses for the selector and counter
+ * registers. Additionally, the address of the next selector or
+ * counter register is determined by incrementing the address
+ * of the current register by two.
*/
- if (num_pmu_gp_counters == AMD64_NUM_COUNTERS_CORE) {
+ if (num_pmu_gp_counters == AMD64_NUM_COUNTERS_CORE ||
+ pmu_version > 1) {
sel_base = MSR_F15H_PERF_CTL0;
ctr_base = MSR_F15H_PERF_CTR0;
step = 2;
@@ -4218,6 +4229,15 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, sel_base + i * step,
env->msr_gp_evtsel[i]);
}
+
+ if (pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
+ env->msr_global_status);
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ env->msr_global_ovf_ctrl);
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
+ env->msr_global_ctrl);
+ }
}
/*
@@ -4695,13 +4715,14 @@ static int kvm_get_msrs(X86CPU *cpu)
uint32_t step = 1;
/*
- * When PERFCORE is enabled, AMD PMU uses a separate set of
- * addresses for the selector and counter registers.
+ * When PERFCORE or PerfMonV2 is enabled, AMD PMU uses a separate
+ * set of addresses for the selector and counter registers.
* Additionally, the address of the next selector or counter
* register is determined by incrementing the address of the
* current register by two.
*/
- if (num_pmu_gp_counters == AMD64_NUM_COUNTERS_CORE) {
+ if (num_pmu_gp_counters == AMD64_NUM_COUNTERS_CORE ||
+ pmu_version > 1) {
sel_base = MSR_F15H_PERF_CTL0;
ctr_base = MSR_F15H_PERF_CTR0;
step = 2;
@@ -4711,6 +4732,12 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, ctr_base + i * step, 0);
kvm_msr_entry_add(cpu, sel_base + i * step, 0);
}
+
+ if (pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 0);
+ }
}
if (env->mcg_cap) {
@@ -5007,12 +5034,15 @@ static int kvm_get_msrs(X86CPU *cpu)
env->msr_fixed_ctr_ctrl = msrs[i].data;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
env->msr_global_ctrl = msrs[i].data;
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
env->msr_global_status = msrs[i].data;
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
env->msr_global_ovf_ctrl = msrs[i].data;
break;
case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
--
2.39.3