[Patch v2 6/6] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK

Dapeng Mi posted 6 patches 1 month, 3 weeks ago
There is a newer version of this series
[Patch v2 6/6] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK
Posted by Dapeng Mi 1 month, 3 weeks ago
ICL_FIXED_0_ADAPTIVE is missed to be added into INTEL_FIXED_BITS_MASK,
add it and opportunistically refine fixed counter enabling code.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
---
 arch/x86/events/intel/core.c      | 10 +++-------
 arch/x86/include/asm/perf_event.h |  6 +++++-
 arch/x86/kvm/pmu.h                |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cdd10370ed95..1a91b527d3c5 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2849,8 +2849,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
 {
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
-	u64 mask, bits = 0;
 	int idx = hwc->idx;
+	u64 bits = 0;
 
 	if (is_topdown_idx(idx)) {
 		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -2889,14 +2889,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
 
 	idx -= INTEL_PMC_IDX_FIXED;
 	bits = intel_fixed_bits_by_idx(idx, bits);
-	mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
-
-	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
+	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
 		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
-		mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
-	}
 
-	cpuc->fixed_ctrl_val &= ~mask;
+	cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
 	cpuc->fixed_ctrl_val |= bits;
 }
 
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f8247ac276c4..49a4d442f3fc 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -35,7 +35,6 @@
 #define ARCH_PERFMON_EVENTSEL_EQ			(1ULL << 36)
 #define ARCH_PERFMON_EVENTSEL_UMASK2			(0xFFULL << 40)
 
-#define INTEL_FIXED_BITS_MASK				0xFULL
 #define INTEL_FIXED_BITS_STRIDE			4
 #define INTEL_FIXED_0_KERNEL				(1ULL << 0)
 #define INTEL_FIXED_0_USER				(1ULL << 1)
@@ -48,6 +47,11 @@
 #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
 #define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
 
+#define INTEL_FIXED_BITS_MASK					\
+	(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER |		\
+	 INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI |	\
+	 ICL_FIXED_0_ADAPTIVE)
+
 #define intel_fixed_bits_by_idx(_idx, _bits)			\
 	((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
 
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index ad89d0bd6005..103604c4b33b 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -13,7 +13,7 @@
 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
 					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
 
-/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
+/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */
 #define fixed_ctrl_field(ctrl_reg, idx) \
 	(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
 
-- 
2.34.1
Re: [Patch v2 6/6] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK
Posted by Liang, Kan 1 month, 3 weeks ago

On 2025-08-11 2:00 a.m., Dapeng Mi wrote:
> ICL_FIXED_0_ADAPTIVE is missed to be added into INTEL_FIXED_BITS_MASK,
> add it and opportunistically refine fixed counter enabling code.
> 
> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> Tested-by: Yi Lai <yi1.lai@intel.com>
> ---
>  arch/x86/events/intel/core.c      | 10 +++-------
>  arch/x86/include/asm/perf_event.h |  6 +++++-
>  arch/x86/kvm/pmu.h                |  2 +-
>  3 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index cdd10370ed95..1a91b527d3c5 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -2849,8 +2849,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
>  {
>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>  	struct hw_perf_event *hwc = &event->hw;
> -	u64 mask, bits = 0;
>  	int idx = hwc->idx;
> +	u64 bits = 0;
>  
>  	if (is_topdown_idx(idx)) {
>  		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> @@ -2889,14 +2889,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
>  
>  	idx -= INTEL_PMC_IDX_FIXED;
>  	bits = intel_fixed_bits_by_idx(idx, bits);
> -	mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
> -
> -	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
> +	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
>  		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
> -		mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
> -	}
>  

This changes the behavior. The mask will always include the ADAPTIVE bit
even on a platform which doesn't support adaptive pebs.
The description doesn't mention why it's OK.

Thanks,
Kan> -	cpuc->fixed_ctrl_val &= ~mask;
> +	cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
>  	cpuc->fixed_ctrl_val |= bits;
>  }
>  
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index f8247ac276c4..49a4d442f3fc 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -35,7 +35,6 @@
>  #define ARCH_PERFMON_EVENTSEL_EQ			(1ULL << 36)
>  #define ARCH_PERFMON_EVENTSEL_UMASK2			(0xFFULL << 40)
>  
> -#define INTEL_FIXED_BITS_MASK				0xFULL
>  #define INTEL_FIXED_BITS_STRIDE			4
>  #define INTEL_FIXED_0_KERNEL				(1ULL << 0)
>  #define INTEL_FIXED_0_USER				(1ULL << 1)
> @@ -48,6 +47,11 @@
>  #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
>  #define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
>  
> +#define INTEL_FIXED_BITS_MASK					\
> +	(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER |		\
> +	 INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI |	\
> +	 ICL_FIXED_0_ADAPTIVE)
> +
>  #define intel_fixed_bits_by_idx(_idx, _bits)			\
>  	((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
>  
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index ad89d0bd6005..103604c4b33b 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -13,7 +13,7 @@
>  #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
>  					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
>  
> -/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
> +/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */
>  #define fixed_ctrl_field(ctrl_reg, idx) \
>  	(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
>
Re: [Patch v2 6/6] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK
Posted by Mi, Dapeng 1 month, 3 weeks ago
On 8/12/2025 8:00 AM, Liang, Kan wrote:
>
> On 2025-08-11 2:00 a.m., Dapeng Mi wrote:
>> ICL_FIXED_0_ADAPTIVE is missed to be added into INTEL_FIXED_BITS_MASK,
>> add it and opportunistically refine fixed counter enabling code.
>>
>> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
>> Tested-by: Yi Lai <yi1.lai@intel.com>
>> ---
>>  arch/x86/events/intel/core.c      | 10 +++-------
>>  arch/x86/include/asm/perf_event.h |  6 +++++-
>>  arch/x86/kvm/pmu.h                |  2 +-
>>  3 files changed, 9 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
>> index cdd10370ed95..1a91b527d3c5 100644
>> --- a/arch/x86/events/intel/core.c
>> +++ b/arch/x86/events/intel/core.c
>> @@ -2849,8 +2849,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
>>  {
>>  	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>>  	struct hw_perf_event *hwc = &event->hw;
>> -	u64 mask, bits = 0;
>>  	int idx = hwc->idx;
>> +	u64 bits = 0;
>>  
>>  	if (is_topdown_idx(idx)) {
>>  		struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
>> @@ -2889,14 +2889,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
>>  
>>  	idx -= INTEL_PMC_IDX_FIXED;
>>  	bits = intel_fixed_bits_by_idx(idx, bits);
>> -	mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
>> -
>> -	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
>> +	if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
>>  		bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
>> -		mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
>> -	}
>>  
> This changes the behavior. The mask will always include the ADAPTIVE bit
> even on a platform which doesn't support adaptive pebs.
> The description doesn't mention why it's OK.

The mask is used to clear the old fixed bits, the ICL_FIXED_0_ADAPTIVE bit
needs to be cleared regardless of if the platform really supports it. I
would enhance the commit message to describe this.


>
> Thanks,
> Kan> -	cpuc->fixed_ctrl_val &= ~mask;
>> +	cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
>>  	cpuc->fixed_ctrl_val |= bits;
>>  }
>>  
>> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
>> index f8247ac276c4..49a4d442f3fc 100644
>> --- a/arch/x86/include/asm/perf_event.h
>> +++ b/arch/x86/include/asm/perf_event.h
>> @@ -35,7 +35,6 @@
>>  #define ARCH_PERFMON_EVENTSEL_EQ			(1ULL << 36)
>>  #define ARCH_PERFMON_EVENTSEL_UMASK2			(0xFFULL << 40)
>>  
>> -#define INTEL_FIXED_BITS_MASK				0xFULL
>>  #define INTEL_FIXED_BITS_STRIDE			4
>>  #define INTEL_FIXED_0_KERNEL				(1ULL << 0)
>>  #define INTEL_FIXED_0_USER				(1ULL << 1)
>> @@ -48,6 +47,11 @@
>>  #define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
>>  #define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
>>  
>> +#define INTEL_FIXED_BITS_MASK					\
>> +	(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER |		\
>> +	 INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI |	\
>> +	 ICL_FIXED_0_ADAPTIVE)
>> +
>>  #define intel_fixed_bits_by_idx(_idx, _bits)			\
>>  	((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
>>  
>> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
>> index ad89d0bd6005..103604c4b33b 100644
>> --- a/arch/x86/kvm/pmu.h
>> +++ b/arch/x86/kvm/pmu.h
>> @@ -13,7 +13,7 @@
>>  #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
>>  					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
>>  
>> -/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
>> +/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */
>>  #define fixed_ctrl_field(ctrl_reg, idx) \
>>  	(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
>>  
>