arch-PEBS provides CPUIDs to enumerate which counters support PEBS
sampling and precise distribution PEBS sampling. Thus PEBS constraints
should be dynamically configured base on these counter and precise
distribution bitmap instead of defining them statically.
Update event dyn_constraint base on PEBS event precise level.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
arch/x86/events/intel/core.c | 11 +++++++++++
arch/x86/events/intel/ds.c | 1 +
2 files changed, 12 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6e04d73dfae5..40ccfd80d554 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4252,6 +4252,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (event->attr.precise_ip) {
+ struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
+
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;
@@ -4265,6 +4267,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
+
+ if (x86_pmu.arch_pebs) {
+ u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
+ ~GLOBAL_CTRL_EN_PERF_METRICS;
+ u64 pebs_mask = event->attr.precise_ip >= 3 ?
+ pebs_cap.pdists : pebs_cap.counters;
+ if (cntr_mask != pebs_mask)
+ event->hw.dyn_constraint &= pebs_mask;
+ }
}
if (needs_branch_stack(event)) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 5c26a5235f94..1179980f795b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -3005,6 +3005,7 @@ static void __init intel_arch_pebs_init(void)
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs;
x86_pmu.pebs_capable = ~0ULL;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.pebs_enable = __intel_pmu_pebs_enable;
x86_pmu.pebs_disable = __intel_pmu_pebs_disable;
--
2.34.1
On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote:
> arch-PEBS provides CPUIDs to enumerate which counters support PEBS
> sampling and precise distribution PEBS sampling. Thus PEBS constraints
> should be dynamically configured base on these counter and precise
> distribution bitmap instead of defining them statically.
>
> Update event dyn_constraint base on PEBS event precise level.
What happened to this:
https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> ---
> arch/x86/events/intel/core.c | 11 +++++++++++
> arch/x86/events/intel/ds.c | 1 +
> 2 files changed, 12 insertions(+)
>
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index 6e04d73dfae5..40ccfd80d554 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -4252,6 +4252,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
> }
>
> if (event->attr.precise_ip) {
> + struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
> +
> if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
> return -EINVAL;
>
> @@ -4265,6 +4267,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
> }
> if (x86_pmu.pebs_aliases)
> x86_pmu.pebs_aliases(event);
> +
> + if (x86_pmu.arch_pebs) {
> + u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
> + ~GLOBAL_CTRL_EN_PERF_METRICS;
> + u64 pebs_mask = event->attr.precise_ip >= 3 ?
> + pebs_cap.pdists : pebs_cap.counters;
> + if (cntr_mask != pebs_mask)
> + event->hw.dyn_constraint &= pebs_mask;
> + }
> }
>
> if (needs_branch_stack(event)) {
> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
> index 5c26a5235f94..1179980f795b 100644
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -3005,6 +3005,7 @@ static void __init intel_arch_pebs_init(void)
> x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
> x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs;
> x86_pmu.pebs_capable = ~0ULL;
> + x86_pmu.flags |= PMU_FL_PEBS_ALL;
>
> x86_pmu.pebs_enable = __intel_pmu_pebs_enable;
> x86_pmu.pebs_disable = __intel_pmu_pebs_disable;
> --
> 2.34.1
>
On 11/6/2025 10:52 PM, Peter Zijlstra wrote:
> On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote:
>> arch-PEBS provides CPUIDs to enumerate which counters support PEBS
>> sampling and precise distribution PEBS sampling. Thus PEBS constraints
>> should be dynamically configured base on these counter and precise
>> distribution bitmap instead of defining them statically.
>>
>> Update event dyn_constraint base on PEBS event precise level.
> What happened to this:
>
> https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/
About the issue, Kan ever posted a patch to mitigate the risk, but it seems
the patch is not merged yet.
https://lore.kernel.org/all/20250512175542.2000708-1-kan.liang@linux.intel.com/
>
>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
>> ---
>> arch/x86/events/intel/core.c | 11 +++++++++++
>> arch/x86/events/intel/ds.c | 1 +
>> 2 files changed, 12 insertions(+)
>>
>> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
>> index 6e04d73dfae5..40ccfd80d554 100644
>> --- a/arch/x86/events/intel/core.c
>> +++ b/arch/x86/events/intel/core.c
>> @@ -4252,6 +4252,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
>> }
>>
>> if (event->attr.precise_ip) {
>> + struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
>> +
>> if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
>> return -EINVAL;
>>
>> @@ -4265,6 +4267,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
>> }
>> if (x86_pmu.pebs_aliases)
>> x86_pmu.pebs_aliases(event);
>> +
>> + if (x86_pmu.arch_pebs) {
>> + u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
>> + ~GLOBAL_CTRL_EN_PERF_METRICS;
>> + u64 pebs_mask = event->attr.precise_ip >= 3 ?
>> + pebs_cap.pdists : pebs_cap.counters;
>> + if (cntr_mask != pebs_mask)
>> + event->hw.dyn_constraint &= pebs_mask;
>> + }
>> }
>>
>> if (needs_branch_stack(event)) {
>> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
>> index 5c26a5235f94..1179980f795b 100644
>> --- a/arch/x86/events/intel/ds.c
>> +++ b/arch/x86/events/intel/ds.c
>> @@ -3005,6 +3005,7 @@ static void __init intel_arch_pebs_init(void)
>> x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
>> x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs;
>> x86_pmu.pebs_capable = ~0ULL;
>> + x86_pmu.flags |= PMU_FL_PEBS_ALL;
>>
>> x86_pmu.pebs_enable = __intel_pmu_pebs_enable;
>> x86_pmu.pebs_disable = __intel_pmu_pebs_disable;
>> --
>> 2.34.1
>>
On Fri, Nov 07, 2025 at 02:11:09PM +0800, Mi, Dapeng wrote:
>
> On 11/6/2025 10:52 PM, Peter Zijlstra wrote:
> > On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote:
> >> arch-PEBS provides CPUIDs to enumerate which counters support PEBS
> >> sampling and precise distribution PEBS sampling. Thus PEBS constraints
> >> should be dynamically configured base on these counter and precise
> >> distribution bitmap instead of defining them statically.
> >>
> >> Update event dyn_constraint base on PEBS event precise level.
> > What happened to this:
> >
> > https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/
>
> About the issue, Kan ever posted a patch to mitigate the risk, but it seems
> the patch is not merged yet.
>
> https://lore.kernel.org/all/20250512175542.2000708-1-kan.liang@linux.intel.com/
IIUC the below is what is required handle this new dynamic case, right?
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5423,6 +5423,8 @@ enum dyn_constr_type {
DYN_CONSTR_BR_CNTR,
DYN_CONSTR_ACR_CNTR,
DYN_CONSTR_ACR_CAUSE,
+ DYN_CONSTR_PEBS,
+ DYN_CONSTR_PDIST,
DYN_CONSTR_MAX,
};
@@ -5432,6 +5434,8 @@ static const char * const dyn_constr_typ
[DYN_CONSTR_BR_CNTR] = "a branch counter logging event",
[DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event",
[DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event",
+ [DYN_CONSTR_PEBS] = "a PEBS event",
+ [DYN_CONSTR_PDIST] = "a PEBS PDIST event",
};
static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
@@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
continue;
mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
break;
+ case DYN_CONSTR_PEBS:
+ if (x86_pmu.arch_pebs)
+ mask = hybrid(pmu, arch_pebs_cap).counters;
+ break;
+ case DYN_CONSTR_PDIST:
+ if (x86_pmu.arch_pebs)
+ mask = hybrid(pmu, arch_pebs_cap).pdists;
+ break;
default:
pr_warn("Unsupported dynamic constraint type %d\n", i);
}
On 11/7/2025 9:05 PM, Peter Zijlstra wrote:
> On Fri, Nov 07, 2025 at 02:11:09PM +0800, Mi, Dapeng wrote:
>> On 11/6/2025 10:52 PM, Peter Zijlstra wrote:
>>> On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote:
>>>> arch-PEBS provides CPUIDs to enumerate which counters support PEBS
>>>> sampling and precise distribution PEBS sampling. Thus PEBS constraints
>>>> should be dynamically configured base on these counter and precise
>>>> distribution bitmap instead of defining them statically.
>>>>
>>>> Update event dyn_constraint base on PEBS event precise level.
>>> What happened to this:
>>>
>>> https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/
>> About the issue, Kan ever posted a patch to mitigate the risk, but it seems
>> the patch is not merged yet.
>>
>> https://lore.kernel.org/all/20250512175542.2000708-1-kan.liang@linux.intel.com/
> IIUC the below is what is required handle this new dynamic case, right?
>
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -5423,6 +5423,8 @@ enum dyn_constr_type {
> DYN_CONSTR_BR_CNTR,
> DYN_CONSTR_ACR_CNTR,
> DYN_CONSTR_ACR_CAUSE,
> + DYN_CONSTR_PEBS,
> + DYN_CONSTR_PDIST,
>
> DYN_CONSTR_MAX,
> };
> @@ -5432,6 +5434,8 @@ static const char * const dyn_constr_typ
> [DYN_CONSTR_BR_CNTR] = "a branch counter logging event",
> [DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event",
> [DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event",
> + [DYN_CONSTR_PEBS] = "a PEBS event",
> + [DYN_CONSTR_PDIST] = "a PEBS PDIST event",
> };
>
> static void __intel_pmu_check_dyn_constr(struct event_constraint *constr,
> @@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
> continue;
> mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
> break;
> + case DYN_CONSTR_PEBS:
> + if (x86_pmu.arch_pebs)
> + mask = hybrid(pmu, arch_pebs_cap).counters;
> + break;
> + case DYN_CONSTR_PDIST:
> + if (x86_pmu.arch_pebs)
> + mask = hybrid(pmu, arch_pebs_cap).pdists;
> + break;
> default:
> pr_warn("Unsupported dynamic constraint type %d\n", i);
> }
Yes, exactly. Thanks.
On Mon, Nov 10, 2025 at 08:23:55AM +0800, Mi, Dapeng wrote:
> > @@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
> > continue;
> > mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
> > break;
> > + case DYN_CONSTR_PEBS:
> > + if (x86_pmu.arch_pebs)
> > + mask = hybrid(pmu, arch_pebs_cap).counters;
> > + break;
> > + case DYN_CONSTR_PDIST:
> > + if (x86_pmu.arch_pebs)
> > + mask = hybrid(pmu, arch_pebs_cap).pdists;
> > + break;
> > default:
> > pr_warn("Unsupported dynamic constraint type %d\n", i);
> > }
>
> Yes, exactly. Thanks.
Excellent. Could you please double check and try the bits I have in
queue/perf/core ? I don't think I've got v6 hardware at hand.
On 11/10/2025 5:03 PM, Peter Zijlstra wrote:
> On Mon, Nov 10, 2025 at 08:23:55AM +0800, Mi, Dapeng wrote:
>
>>> @@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
>>> continue;
>>> mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
>>> break;
>>> + case DYN_CONSTR_PEBS:
>>> + if (x86_pmu.arch_pebs)
>>> + mask = hybrid(pmu, arch_pebs_cap).counters;
>>> + break;
>>> + case DYN_CONSTR_PDIST:
>>> + if (x86_pmu.arch_pebs)
>>> + mask = hybrid(pmu, arch_pebs_cap).pdists;
>>> + break;
>>> default:
>>> pr_warn("Unsupported dynamic constraint type %d\n", i);
>>> }
>> Yes, exactly. Thanks.
> Excellent. Could you please double check and try the bits I have in
> queue/perf/core ? I don't think I've got v6 hardware at hand.
Sure. I would post test results tomorrow.
On 11/10/2025 5:15 PM, Mi, Dapeng wrote:
> On 11/10/2025 5:03 PM, Peter Zijlstra wrote:
>> On Mon, Nov 10, 2025 at 08:23:55AM +0800, Mi, Dapeng wrote:
>>
>>>> @@ -5536,6 +5540,14 @@ static void intel_pmu_check_dyn_constr(s
>>>> continue;
>>>> mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
>>>> break;
>>>> + case DYN_CONSTR_PEBS:
>>>> + if (x86_pmu.arch_pebs)
>>>> + mask = hybrid(pmu, arch_pebs_cap).counters;
>>>> + break;
>>>> + case DYN_CONSTR_PDIST:
>>>> + if (x86_pmu.arch_pebs)
>>>> + mask = hybrid(pmu, arch_pebs_cap).pdists;
>>>> + break;
>>>> default:
>>>> pr_warn("Unsupported dynamic constraint type %d\n", i);
>>>> }
>>> Yes, exactly. Thanks.
>> Excellent. Could you please double check and try the bits I have in
>> queue/perf/core ? I don't think I've got v6 hardware at hand.
> Sure. I would post test results tomorrow.
Hi Peter,
I tested the queue/perf/core code with a slight code refine on SPR/CWF/PTL.
In summary, all things look good. The constraints validation passes on all
these 3 platforms, no overlapped constraints are reported. Besides, perf
counting/sampling (both legacy PEBS and arch-PEBS) works well, no issue is
found.
I did a slight change for the intel_pmu_check_dyn_constr() helper. It
should be good enough to only validate the GP counters for the PEBS counter
and PDIST constraint check. Beside the code style is refined
opportunistically. Thanks.
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aad89c9d9514..81e6c8bcabde 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -5506,7 +5506,7 @@ static void __intel_pmu_check_dyn_constr(struct
event_constraint *constr,
}
if (check_fail) {
- pr_info("The two events 0x%llx and 0x%llx
may not be "
+ pr_warn("The two events 0x%llx and 0x%llx
may not be "
"fully scheduled under some
circumstances as "
"%s.\n",
c1->code, c2->code,
dyn_constr_type_name[type]);
@@ -5519,6 +5519,7 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
struct event_constraint *constr,
u64 cntr_mask)
{
+ u64 gp_mask = GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
enum dyn_constr_type i;
u64 mask;
@@ -5533,20 +5534,25 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu,
mask = x86_pmu.lbr_counters;
break;
case DYN_CONSTR_ACR_CNTR:
- mask = hybrid(pmu, acr_cntr_mask64) &
GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+ mask = hybrid(pmu, acr_cntr_mask64) & gp_mask;
break;
case DYN_CONSTR_ACR_CAUSE:
- if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu,
acr_cause_mask64))
+ if (hybrid(pmu, acr_cntr_mask64) ==
+ hybrid(pmu, acr_cause_mask64))
continue;
- mask = hybrid(pmu, acr_cause_mask64) &
GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0);
+ mask = hybrid(pmu, acr_cause_mask64) & gp_mask;
break;
case DYN_CONSTR_PEBS:
- if (x86_pmu.arch_pebs)
- mask = hybrid(pmu, arch_pebs_cap).counters;
+ if (x86_pmu.arch_pebs) {
+ mask = hybrid(pmu, arch_pebs_cap).counters &
+ gp_mask;
+ }
break;
case DYN_CONSTR_PDIST:
- if (x86_pmu.arch_pebs)
- mask = hybrid(pmu, arch_pebs_cap).pdists;
+ if (x86_pmu.arch_pebs) {
+ mask = hybrid(pmu, arch_pebs_cap).pdists &
+ gp_mask;
+ }
break;
default:
pr_warn("Unsupported dynamic constraint type %d\n", i);
>
>
>
On Tue, Nov 11, 2025 at 01:41:05PM +0800, Mi, Dapeng wrote: > I tested the queue/perf/core code with a slight code refine on SPR/CWF/PTL. > In summary, all things look good. The constraints validation passes on all > these 3 platforms, no overlapped constraints are reported. Besides, perf > counting/sampling (both legacy PEBS and arch-PEBS) works well, no issue is > found. Excellent, I pushed out to tip/perf/core. > I did a slight change for the intel_pmu_check_dyn_constr() helper. It > should be good enough to only validate the GP counters for the PEBS counter > and PDIST constraint check. Beside the code style is refined > opportunistically. Thanks. If you could send that as a proper patch -- the thing was horribly whitespace mangled.
On 11/11/2025 7:37 PM, Peter Zijlstra wrote: > On Tue, Nov 11, 2025 at 01:41:05PM +0800, Mi, Dapeng wrote: > >> I tested the queue/perf/core code with a slight code refine on SPR/CWF/PTL. >> In summary, all things look good. The constraints validation passes on all >> these 3 platforms, no overlapped constraints are reported. Besides, perf >> counting/sampling (both legacy PEBS and arch-PEBS) works well, no issue is >> found. > Excellent, I pushed out to tip/perf/core. > >> I did a slight change for the intel_pmu_check_dyn_constr() helper. It >> should be good enough to only validate the GP counters for the PEBS counter >> and PDIST constraint check. Beside the code style is refined >> opportunistically. Thanks. > If you could send that as a proper patch -- the thing was horribly > whitespace mangled. Sure. Would send the patch soon.
On Fri, Nov 07, 2025 at 02:11:09PM +0800, Mi, Dapeng wrote: > > On 11/6/2025 10:52 PM, Peter Zijlstra wrote: > > On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote: > >> arch-PEBS provides CPUIDs to enumerate which counters support PEBS > >> sampling and precise distribution PEBS sampling. Thus PEBS constraints > >> should be dynamically configured base on these counter and precise > >> distribution bitmap instead of defining them statically. > >> > >> Update event dyn_constraint base on PEBS event precise level. > > What happened to this: > > > > https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/ > > About the issue, Kan ever posted a patch to mitigate the risk, but it seems > the patch is not merged yet. > > https://lore.kernel.org/all/20250512175542.2000708-1-kan.liang@linux.intel.com/ > Clearly it became a victim of some scatter brained maintainer or something. Let me stick that near this set and go read the last few patches.
On 11/7/2025 4:28 PM, Peter Zijlstra wrote: > On Fri, Nov 07, 2025 at 02:11:09PM +0800, Mi, Dapeng wrote: >> On 11/6/2025 10:52 PM, Peter Zijlstra wrote: >>> On Wed, Oct 29, 2025 at 06:21:34PM +0800, Dapeng Mi wrote: >>>> arch-PEBS provides CPUIDs to enumerate which counters support PEBS >>>> sampling and precise distribution PEBS sampling. Thus PEBS constraints >>>> should be dynamically configured base on these counter and precise >>>> distribution bitmap instead of defining them statically. >>>> >>>> Update event dyn_constraint base on PEBS event precise level. >>> What happened to this: >>> >>> https://lore.kernel.org/all/e0b25b3e-aec0-4c43-9ab2-907186b56c71@linux.intel.com/ >> About the issue, Kan ever posted a patch to mitigate the risk, but it seems >> the patch is not merged yet. >> >> https://lore.kernel.org/all/20250512175542.2000708-1-kan.liang@linux.intel.com/ >> > Clearly it became a victim of some scatter brained maintainer or > something. > > Let me stick that near this set and go read the last few patches. Thanks. >
The following commit has been merged into the perf/core branch of tip:
Commit-ID: e89c5d1f290e8915e0aad10014f2241086ea95e4
Gitweb: https://git.kernel.org/tip/e89c5d1f290e8915e0aad10014f2241086ea95e4
Author: Dapeng Mi <dapeng1.mi@linux.intel.com>
AuthorDate: Wed, 29 Oct 2025 18:21:34 +08:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 07 Nov 2025 15:08:22 +01:00
perf/x86/intel: Update dyn_constraint base on PEBS event precise level
arch-PEBS provides CPUIDs to enumerate which counters support PEBS
sampling and precise distribution PEBS sampling. Thus PEBS constraints
should be dynamically configured base on these counter and precise
distribution bitmap instead of defining them statically.
Update event dyn_constraint base on PEBS event precise level.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251029102136.61364-11-dapeng1.mi@linux.intel.com
---
arch/x86/events/intel/core.c | 11 +++++++++++
arch/x86/events/intel/ds.c | 1 +
2 files changed, 12 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 6e04d73..40ccfd8 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4252,6 +4252,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (event->attr.precise_ip) {
+ struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap);
+
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;
@@ -4265,6 +4267,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
+
+ if (x86_pmu.arch_pebs) {
+ u64 cntr_mask = hybrid(event->pmu, intel_ctrl) &
+ ~GLOBAL_CTRL_EN_PERF_METRICS;
+ u64 pebs_mask = event->attr.precise_ip >= 3 ?
+ pebs_cap.pdists : pebs_cap.counters;
+ if (cntr_mask != pebs_mask)
+ event->hw.dyn_constraint &= pebs_mask;
+ }
}
if (needs_branch_stack(event)) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 5c26a52..1179980 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -3005,6 +3005,7 @@ static void __init intel_arch_pebs_init(void)
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs;
x86_pmu.pebs_capable = ~0ULL;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
x86_pmu.pebs_enable = __intel_pmu_pebs_enable;
x86_pmu.pebs_disable = __intel_pmu_pebs_disable;
© 2016 - 2025 Red Hat, Inc.