For AMD perf, use parsed CPUID(0x80000022) instead of direct CPUID queries
and custom perf data types.
For the uncore CPU hotplug callbacks, ensure that the correct per-CPU CPUID
table is queried.
Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
---
arch/x86/events/amd/core.c | 8 +++-----
arch/x86/events/amd/uncore.c | 18 +++++++-----------
2 files changed, 10 insertions(+), 16 deletions(-)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index d66a357f219d..b070d0be36c4 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1410,7 +1410,7 @@ static const struct attribute_group *amd_attr_update[] = {
static int __init amd_core_pmu_init(void)
{
- union cpuid_0x80000022_ebx ebx;
+ const struct leaf_0x80000022_0 *leaf = cpuid_leaf(&boot_cpu_data, 0x80000022);
u64 even_ctr_mask = 0ULL;
int i;
@@ -1430,14 +1430,12 @@ static int __init amd_core_pmu_init(void)
x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);
/* Check for Performance Monitoring v2 support */
- if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
- ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
-
+ if (leaf && boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
/* Update PMU version for later usage */
x86_pmu.version = 2;
/* Find the number of available Core PMCs */
- x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);
+ x86_pmu.cntr_mask64 = GENMASK_ULL(leaf->n_pmc_core - 1, 0);
amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 05cff39968ec..6a5d8f8cfbc0 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -692,7 +692,7 @@ static int amd_uncore_df_add(struct perf_event *event, int flags)
static
void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
{
- union cpuid_0x80000022_ebx ebx;
+ const struct leaf_0x80000022_0 *leaf = cpuid_leaf(&cpu_data(cpu), 0x80000022);
union amd_uncore_info info;
if (!boot_cpu_has(X86_FEATURE_PERFCTR_NB))
@@ -703,10 +703,8 @@ void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
info.split.gid = 0;
info.split.cid = topology_logical_package_id(cpu);
- if (pmu_version >= 2) {
- ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
- info.split.num_pmcs = ebx.split.num_df_pmc;
- }
+ if (leaf && pmu_version >= 2)
+ info.split.num_pmcs = leaf->n_pmc_northbridge;
*per_cpu_ptr(uncore->info, cpu) = info;
}
@@ -990,16 +988,14 @@ static void amd_uncore_umc_read(struct perf_event *event)
static
void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
{
- union cpuid_0x80000022_ebx ebx;
+ const struct leaf_0x80000022_0 *leaf = cpuid_leaf(&cpu_data(cpu), 0x80000022);
union amd_uncore_info info;
- unsigned int eax, ecx, edx;
- if (pmu_version < 2)
+ if (!leaf || pmu_version < 2)
return;
- cpuid(EXT_PERFMON_DEBUG_FEATURES, &eax, &ebx.full, &ecx, &edx);
- info.split.aux_data = ecx; /* stash active mask */
- info.split.num_pmcs = ebx.split.num_umc_pmc;
+ info.split.aux_data = leaf->active_umc_bitmask;
+ info.split.num_pmcs = leaf->n_pmc_umc;
info.split.gid = topology_logical_package_id(cpu);
info.split.cid = topology_logical_package_id(cpu);
*per_cpu_ptr(uncore->info, cpu) = info;
--
2.53.0