From nobody Sat Oct 4 03:07:01 2025 Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.9]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4E71C2C21F5; Thu, 21 Aug 2025 03:59:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=192.198.163.9 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755748773; cv=none; b=FaW24ruh9XcqMsmNzzmMqt+/8A3WdU1WqeVF3pl0wTj2PG/lge/YLTbR8otj+s1DTrES+GBcsGfIJsVVeQg0wTBqrlIbD/vBsxB9c8z+k9fZuFJD42SS8U4l7MM/SU1QRYK8q55y4FJnkmA3skAE+ui80JFYi0ft1eSSUFiiKxE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755748773; c=relaxed/simple; bh=8d1TlSjszd8G6HNk2AL/q0c7ULd+bMuyAXARAqdCcKs=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=ZXJ+XjmVr84Vhb4uSywyLkG3dm1sTItHPUANja4gx/t03pgJ7sWR8d41Yi83aWZXEfHgn2wzjmutbLjbvQZgPi5noMxXaMtvhqIAQ8o3eK/Lh1+3ou2XL889Lkt3/7QWFAENP4LhKdPTbSFpvBpPO7cqy32ZAb+zzjJ2x1ov8jE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=none smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=RS37HeD+; arc=none smtp.client-ip=192.198.163.9 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="RS37HeD+" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1755748771; x=1787284771; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=8d1TlSjszd8G6HNk2AL/q0c7ULd+bMuyAXARAqdCcKs=; b=RS37HeD+REVgIrJPByOApksxEJx/pvGscsLy1IvGoV01Q+s1ktCBVSRy GhPUvhByXKya/l5ZKLq41vmudIW0Tnv9+WEYHtbjPZ50URIUXZw+H8g42 Eus3TsKianPFRERY23NjI/xA3ZAkN/wx1f0d+x0FNpkr5VwXqYl1BGWXV nCuk/zlTQt8Fa9cuxZvPjTlYibu5MHsrV509XcNnUmmn03ToTlPNVxSjU MrlgWTUDx2EwlozX/TsdPlFJgQ+rNt9M4tNZlX5HiQ/f3WfboTLccQKdw ZdXu7KjP6Vf4k2TjY1TvYqR6ptrDuWoT3sNAoHihCeaYEHgjzTnv2xD6F A==; X-CSE-ConnectionGUID: sCQL4YSbR+egZB3z+bilPA== X-CSE-MsgGUID: XxMu0sYCQbq7m7WHefj/3g== X-IronPort-AV: E=McAfee;i="6800,10657,11527"; a="68731961" X-IronPort-AV: E=Sophos;i="6.17,306,1747724400"; d="scan'208";a="68731961" Received: from fmviesa009.fm.intel.com ([10.60.135.149]) by fmvoesa103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 20 Aug 2025 20:59:31 -0700 X-CSE-ConnectionGUID: WiKo1ecrQ+2vF6XU9mGhvg== X-CSE-MsgGUID: yAfAjl0ETdKtLmZtQGNDhQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.17,306,1747724400"; d="scan'208";a="168713036" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa009.fm.intel.com with ESMTP; 20 Aug 2025 20:59:28 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Ian Rogers , Adrian Hunter , Alexander Shishkin , Kan Liang , Andi Kleen , Eranian Stephane Cc: linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Dapeng Mi , Dapeng Mi Subject: [Patch v6 03/10] perf/x86/intel: Initialize architectural PEBS Date: Thu, 21 Aug 2025 11:57:58 +0800 Message-Id: <20250821035805.159494-4-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20250821035805.159494-1-dapeng1.mi@linux.intel.com> References: <20250821035805.159494-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" arch-PEBS leverages CPUID.23H.4/5 sub-leaves enumerate arch-PEBS supported capabilities and counters bitmap. This patch parses these 2 sub-leaves and initializes arch-PEBS capabilities and corresponding structures. Since IA32_PEBS_ENABLE and MSR_PEBS_DATA_CFG MSRs are no longer existed for arch-PEBS, arch-PEBS doesn't need to manipulate these MSRs. Thus add a simple pair of __intel_pmu_pebs_enable/disable() callbacks for arch-PEBS. Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 21 +++++++++--- arch/x86/events/intel/core.c | 55 +++++++++++++++++++++++-------- arch/x86/events/intel/ds.c | 52 ++++++++++++++++++++++++----- arch/x86/events/perf_event.h | 25 ++++++++++++-- arch/x86/include/asm/perf_event.h | 7 +++- 5 files changed, 129 insertions(+), 31 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 745caa6c15a3..8c25ad2bf179 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -554,14 +554,22 @@ static inline int precise_br_compat(struct perf_event= *event) return m =3D=3D b; } =20 -int x86_pmu_max_precise(void) +int x86_pmu_max_precise(struct pmu *pmu) { int precise =3D 0; =20 - /* Support for constant skid */ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { - precise++; + /* arch PEBS */ + if (x86_pmu.arch_pebs) { + precise =3D 2; + if (hybrid(pmu, arch_pebs_cap).pdists) + precise++; + + return precise; + } =20 + /* legacy PEBS - support for constant skid */ + precise++; /* Support for IP fixup */ if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >=3D 2) precise++; @@ -569,13 +577,14 @@ int x86_pmu_max_precise(void) if (x86_pmu.pebs_prec_dist) precise++; } + return precise; } =20 int x86_pmu_hw_config(struct perf_event *event) { if (event->attr.precise_ip) { - int precise =3D x86_pmu_max_precise(); + int precise =3D x86_pmu_max_precise(event->pmu); =20 if (event->attr.precise_ip > precise) return -EOPNOTSUPP; @@ -2629,7 +2638,9 @@ static ssize_t max_precise_show(struct device *cdev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); + struct pmu *pmu =3D dev_get_drvdata(cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise(pmu)); } =20 static DEVICE_ATTR_RO(max_precise); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c88bcd5d2bc4..bfb123ff7c9a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5273,34 +5273,58 @@ static inline bool intel_pmu_broken_perf_cap(void) =20 static void update_pmu_cap(struct pmu *pmu) { - unsigned int cntr, fixed_cntr, ecx, edx; - union cpuid35_eax eax; - union cpuid35_ebx ebx; + unsigned int eax, ebx, ecx, edx; + union cpuid35_eax eax_0; + union cpuid35_ebx ebx_0; + u64 cntrs_mask =3D 0; + u64 pebs_mask =3D 0; + u64 pdists_mask =3D 0; =20 - cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx); + cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx); =20 - if (ebx.split.umask2) + if (ebx_0.split.umask2) hybrid(pmu, config_mask) |=3D ARCH_PERFMON_EVENTSEL_UMASK2; - if (ebx.split.eq) + if (ebx_0.split.eq) hybrid(pmu, config_mask) |=3D ARCH_PERFMON_EVENTSEL_EQ; =20 - if (eax.split.cntr_subleaf) { + if (eax_0.split.cntr_subleaf) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, - &cntr, &fixed_cntr, &ecx, &edx); - hybrid(pmu, cntr_mask64) =3D cntr; - hybrid(pmu, fixed_cntr_mask64) =3D fixed_cntr; + &eax, &ebx, &ecx, &edx); + hybrid(pmu, cntr_mask64) =3D eax; + hybrid(pmu, fixed_cntr_mask64) =3D ebx; + cntrs_mask =3D (u64)ebx << INTEL_PMC_IDX_FIXED | eax; } =20 - if (eax.split.acr_subleaf) { + if (eax_0.split.acr_subleaf) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF, - &cntr, &fixed_cntr, &ecx, &edx); + &eax, &ebx, &ecx, &edx); /* The mask of the counters which can be reloaded */ - hybrid(pmu, acr_cntr_mask64) =3D cntr | ((u64)fixed_cntr << INTEL_PMC_ID= X_FIXED); + hybrid(pmu, acr_cntr_mask64) =3D eax | ((u64)ebx << INTEL_PMC_IDX_FIXED); =20 /* The mask of the counters which can cause a reload of reloadable count= ers */ hybrid(pmu, acr_cause_mask64) =3D ecx | ((u64)edx << INTEL_PMC_IDX_FIXED= ); } =20 + /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */ + if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) { + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF, + &eax, &ebx, &ecx, &edx); + hybrid(pmu, arch_pebs_cap).caps =3D (u64)ebx << 32; + + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF, + &eax, &ebx, &ecx, &edx); + pebs_mask =3D ((u64)ecx << INTEL_PMC_IDX_FIXED) | eax; + pdists_mask =3D ((u64)edx << INTEL_PMC_IDX_FIXED) | ebx; + hybrid(pmu, arch_pebs_cap).counters =3D pebs_mask; + hybrid(pmu, arch_pebs_cap).pdists =3D pdists_mask; + + if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) + x86_pmu.arch_pebs =3D 0; + } else { + WARN_ON(x86_pmu.arch_pebs =3D=3D 1); + x86_pmu.arch_pebs =3D 0; + } + if (!intel_pmu_broken_perf_cap()) { /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration = */ rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); @@ -6252,7 +6276,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute= *attr, int i) static umode_t pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.ds_pebs ? attr->mode : 0; + return intel_pmu_has_pebs() ? attr->mode : 0; } =20 static umode_t @@ -7728,6 +7752,9 @@ __init int intel_pmu_init(void) if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(NULL); =20 + if (x86_pmu.arch_pebs) + pr_cont("Architectural PEBS, "); + intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, &x86_pmu.fixed_cntr_mask64, &x86_pmu.intel_ctrl); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index dcf29c099ad2..276e98e5a4dd 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1531,6 +1531,15 @@ static inline void intel_pmu_drain_large_pebs(struct= cpu_hw_events *cpuc) intel_pmu_drain_pebs_buffer(); } =20 +static void __intel_pmu_pebs_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc =3D &event->hw; + + hwc->config &=3D ~ARCH_PERFMON_EVENTSEL_INT; + cpuc->pebs_enabled |=3D 1ULL << hwc->idx; +} + void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); @@ -1539,9 +1548,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) struct debug_store *ds =3D cpuc->ds; unsigned int idx =3D hwc->idx; =20 - hwc->config &=3D ~ARCH_PERFMON_EVENTSEL_INT; - - cpuc->pebs_enabled |=3D 1ULL << hwc->idx; + __intel_pmu_pebs_enable(event); =20 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5= )) cpuc->pebs_enabled |=3D 1ULL << (hwc->idx + 32); @@ -1603,14 +1610,22 @@ void intel_pmu_pebs_del(struct perf_event *event) pebs_update_state(needed_cb, cpuc, event, false); } =20 -void intel_pmu_pebs_disable(struct perf_event *event) +static void __intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc =3D &event->hw; =20 intel_pmu_drain_large_pebs(cpuc); - cpuc->pebs_enabled &=3D ~(1ULL << hwc->idx); + hwc->config |=3D ARCH_PERFMON_EVENTSEL_INT; +} + +void intel_pmu_pebs_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc =3D &event->hw; + + __intel_pmu_pebs_disable(event); =20 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) @@ -1622,8 +1637,6 @@ void intel_pmu_pebs_disable(struct perf_event *event) =20 if (cpuc->enabled) wrmsrq(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); - - hwc->config |=3D ARCH_PERFMON_EVENTSEL_INT; } =20 void intel_pmu_pebs_enable_all(void) @@ -2679,11 +2692,26 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs= *iregs, struct perf_sample_d } } =20 +static void __init intel_arch_pebs_init(void) +{ + /* + * Current hybrid platforms always both support arch-PEBS or not + * on all kinds of cores. So directly set x86_pmu.arch_pebs flag + * if boot cpu supports arch-PEBS. + */ + x86_pmu.arch_pebs =3D 1; + x86_pmu.pebs_buffer_size =3D PEBS_BUFFER_SIZE; + x86_pmu.pebs_capable =3D ~0ULL; + + x86_pmu.pebs_enable =3D __intel_pmu_pebs_enable; + x86_pmu.pebs_disable =3D __intel_pmu_pebs_disable; +} + /* * PEBS probe and setup */ =20 -void __init intel_pebs_init(void) +static void __init intel_ds_pebs_init(void) { /* * No support for 32bit formats @@ -2798,6 +2826,14 @@ void __init intel_pebs_init(void) } } =20 +void __init intel_pebs_init(void) +{ + if (x86_pmu.intel_cap.pebs_format =3D=3D 0xf) + intel_arch_pebs_init(); + else + intel_ds_pebs_init(); +} + void perf_restore_debug_store(void) { struct debug_store *ds =3D __this_cpu_read(cpu_hw_events.ds); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 2b969386dcdd..a5145e8f1ddb 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -708,6 +708,12 @@ enum hybrid_pmu_type { hybrid_big_small_tiny =3D hybrid_big | hybrid_small_tiny, }; =20 +struct arch_pebs_cap { + u64 caps; + u64 counters; + u64 pdists; +}; + struct x86_hybrid_pmu { struct pmu pmu; const char *name; @@ -752,6 +758,8 @@ struct x86_hybrid_pmu { mid_ack :1, enabled_ack :1; =20 + struct arch_pebs_cap arch_pebs_cap; + u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; }; =20 @@ -906,7 +914,7 @@ struct x86_pmu { union perf_capabilities intel_cap; =20 /* - * Intel DebugStore bits + * Intel DebugStore and PEBS bits */ unsigned int bts :1, bts_active :1, @@ -917,7 +925,8 @@ struct x86_pmu { pebs_no_tlb :1, pebs_no_isolation :1, pebs_block :1, - pebs_ept :1; + pebs_ept :1, + arch_pebs :1; int pebs_record_size; int pebs_buffer_size; u64 pebs_events_mask; @@ -929,6 +938,11 @@ struct x86_pmu { u64 rtm_abort_event; u64 pebs_capable; =20 + /* + * Intel Architectural PEBS + */ + struct arch_pebs_cap arch_pebs_cap; + /* * Intel LBR */ @@ -1217,7 +1231,7 @@ int x86_reserve_hardware(void); =20 void x86_release_hardware(void); =20 -int x86_pmu_max_precise(void); +int x86_pmu_max_precise(struct pmu *pmu); =20 void hw_perf_lbr_event_destroy(struct perf_event *event); =20 @@ -1792,6 +1806,11 @@ static inline int intel_pmu_max_num_pebs(struct pmu = *pmu) return fls((u32)hybrid(pmu, pebs_events_mask)); } =20 +static inline bool intel_pmu_has_pebs(void) +{ + return x86_pmu.ds_pebs || x86_pmu.arch_pebs; +} + #else /* CONFIG_CPU_SUP_INTEL */ =20 static inline void reserve_ds_buffers(void) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 49a4d442f3fc..0dfa06722bab 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -200,6 +200,8 @@ union cpuid10_edx { #define ARCH_PERFMON_EXT_LEAF 0x00000023 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 #define ARCH_PERFMON_ACR_LEAF 0x2 +#define ARCH_PERFMON_PEBS_CAP_LEAF 0x4 +#define ARCH_PERFMON_PEBS_COUNTER_LEAF 0x5 =20 union cpuid35_eax { struct { @@ -210,7 +212,10 @@ union cpuid35_eax { unsigned int acr_subleaf:1; /* Events Sub-Leaf */ unsigned int events_subleaf:1; - unsigned int reserved:28; + /* arch-PEBS Sub-Leaves */ + unsigned int pebs_caps_subleaf:1; + unsigned int pebs_cnts_subleaf:1; + unsigned int reserved:26; } split; unsigned int full; }; --=20 2.34.1