[PATCH V2 4/4] x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH

Anshuman Khandual posted 4 patches 3 years, 7 months ago
There is a newer version of this series
[PATCH V2 4/4] x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH
Posted by Anshuman Khandual 3 years, 7 months ago
Ensure all platform specific event flags are within PERF_EVENT_FLAG_ARCH.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: x86@kernel.org
Cc: linux-perf-users@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/x86/events/perf_event.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba3d24a6a4ec..12136a33e9b7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -86,6 +86,26 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
 #define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
 #define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
 
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LDLAT) == PERF_X86_EVENT_PEBS_LDLAT);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST) == PERF_X86_EVENT_PEBS_ST);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST_HSW) == PERF_X86_EVENT_PEBS_ST_HSW);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LD_HSW) == PERF_X86_EVENT_PEBS_LD_HSW);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_NA_HSW) == PERF_X86_EVENT_PEBS_NA_HSW);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL) == PERF_X86_EVENT_EXCL);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_DYNAMIC) == PERF_X86_EVENT_DYNAMIC);
+
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL_ACCT) == PERF_X86_EVENT_EXCL_ACCT);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AUTO_RELOAD) == PERF_X86_EVENT_AUTO_RELOAD);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LARGE_PEBS) == PERF_X86_EVENT_LARGE_PEBS);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_VIA_PT) == PERF_X86_EVENT_PEBS_VIA_PT);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PAIR) == PERF_X86_EVENT_PAIR);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LBR_SELECT) == PERF_X86_EVENT_LBR_SELECT);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_TOPDOWN) == PERF_X86_EVENT_TOPDOWN);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_STLAT) == PERF_X86_EVENT_PEBS_STLAT);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AMD_BRS) == PERF_X86_EVENT_AMD_BRS);
+static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LAT_HYBRID)
+				   == PERF_X86_EVENT_PEBS_LAT_HYBRID);
+
 static inline bool is_topdown_count(struct perf_event *event)
 {
 	return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
-- 
2.25.1
Re: [PATCH V2 4/4] x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH
Posted by Peter Zijlstra 3 years, 7 months ago
On Mon, Sep 05, 2022 at 11:12:39AM +0530, Anshuman Khandual wrote:

>  arch/x86/events/perf_event.h | 20 ++++++++++++++++++++
>  1 file changed, 20 insertions(+)
> 
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index ba3d24a6a4ec..12136a33e9b7 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -86,6 +86,26 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
>  #define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
>  #define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
>  
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LDLAT) == PERF_X86_EVENT_PEBS_LDLAT);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST) == PERF_X86_EVENT_PEBS_ST);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST_HSW) == PERF_X86_EVENT_PEBS_ST_HSW);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LD_HSW) == PERF_X86_EVENT_PEBS_LD_HSW);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_NA_HSW) == PERF_X86_EVENT_PEBS_NA_HSW);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL) == PERF_X86_EVENT_EXCL);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_DYNAMIC) == PERF_X86_EVENT_DYNAMIC);
> +
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL_ACCT) == PERF_X86_EVENT_EXCL_ACCT);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AUTO_RELOAD) == PERF_X86_EVENT_AUTO_RELOAD);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LARGE_PEBS) == PERF_X86_EVENT_LARGE_PEBS);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_VIA_PT) == PERF_X86_EVENT_PEBS_VIA_PT);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PAIR) == PERF_X86_EVENT_PAIR);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LBR_SELECT) == PERF_X86_EVENT_LBR_SELECT);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_TOPDOWN) == PERF_X86_EVENT_TOPDOWN);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_STLAT) == PERF_X86_EVENT_PEBS_STLAT);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AMD_BRS) == PERF_X86_EVENT_AMD_BRS);
> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LAT_HYBRID)
> +				   == PERF_X86_EVENT_PEBS_LAT_HYBRID);


That's not half tedious...

How about something like so?

---
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -64,27 +64,25 @@ static inline bool constraint_match(stru
 	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
 }
 
+#define PERF_ARCH(name, val)	\
+	PERF_X86_EVENT_##name = val,
+
 /*
  * struct hw_perf_event.flags flags
  */
-#define PERF_X86_EVENT_PEBS_LDLAT	0x00001 /* ld+ldlat data address sampling */
-#define PERF_X86_EVENT_PEBS_ST		0x00002 /* st data address sampling */
-#define PERF_X86_EVENT_PEBS_ST_HSW	0x00004 /* haswell style datala, store */
-#define PERF_X86_EVENT_PEBS_LD_HSW	0x00008 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW	0x00010 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_EXCL		0x00020 /* HT exclusivity on counter */
-#define PERF_X86_EVENT_DYNAMIC		0x00040 /* dynamic alloc'd constraint */
-
-#define PERF_X86_EVENT_EXCL_ACCT	0x00100 /* accounted EXCL event */
-#define PERF_X86_EVENT_AUTO_RELOAD	0x00200 /* use PEBS auto-reload */
-#define PERF_X86_EVENT_LARGE_PEBS	0x00400 /* use large PEBS */
-#define PERF_X86_EVENT_PEBS_VIA_PT	0x00800 /* use PT buffer for PEBS */
-#define PERF_X86_EVENT_PAIR		0x01000 /* Large Increment per Cycle */
-#define PERF_X86_EVENT_LBR_SELECT	0x02000 /* Save/Restore MSR_LBR_SELECT */
-#define PERF_X86_EVENT_TOPDOWN		0x04000 /* Count Topdown slots/metrics events */
-#define PERF_X86_EVENT_PEBS_STLAT	0x08000 /* st+stlat data address sampling */
-#define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
-#define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
+enum {
+#include "perf_event_flags.h"
+};
+
+#undef PERF_ARCH
+
+#define PERF_ARCH(name, val)						\
+	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
+		      PERF_X86_EVENT_##name);
+
+#include "perf_event_flags.h"
+
+#undef PERF_ARCH
 
 static inline bool is_topdown_count(struct perf_event *event)
 {
--- /dev/null
+++ b/arch/x86/events/perf_event_flags.h
@@ -0,0 +1,22 @@
+
+/*
+ * struct hw_perf_event.flags flags
+ */
+PERF_ARCH(PEBS_LDLAT,		0x00001) /* ld+ldlat data address sampling */
+PERF_ARCH(PEBS_ST,		0x00002) /* st data address sampling */
+PERF_ARCH(PEBS_ST_HSW,		0x00004) /* haswell style datala, store */
+PERF_ARCH(PEBS_LD_HSW,		0x00008) /* haswell style datala, load */
+PERF_ARCH(PEBS_NA_HSW,		0x00010) /* haswell style datala, unknown */
+PERF_ARCH(EXCL,			0x00020) /* HT exclusivity on counter */
+PERF_ARCH(DYNAMIC,		0x00040) /* dynamic alloc'd constraint */
+			/*	0x00080	*/
+PERF_ARCH(EXCL_ACCT,		0x00100) /* accounted EXCL event */
+PERF_ARCH(AUTO_RELOAD,		0x00200) /* use PEBS auto-reload */
+PERF_ARCH(LARGE_PEBS,		0x00400) /* use large PEBS */
+PERF_ARCH(PEBS_VIA_PT,		0x00800) /* use PT buffer for PEBS */
+PERF_ARCH(PAIR,			0x01000) /* Large Increment per Cycle */
+PERF_ARCH(LBR_SELECT,		0x02000) /* Save/Restore MSR_LBR_SELECT */
+PERF_ARCH(TOPDOWN,		0x04000) /* Count Topdown slots/metrics events */
+PERF_ARCH(PEBS_STLAT,		0x08000) /* st+stlat data address sampling */
+PERF_ARCH(AMD_BRS,		0x10000) /* AMD Branch Sampling */
+PERF_ARCH(PEBS_LAT_HYBRID,	0x20000) /* ld and st lat for hybrid */
Re: [PATCH V2 4/4] x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH
Posted by Anshuman Khandual 3 years, 7 months ago

On 9/7/22 00:52, Peter Zijlstra wrote:
> On Mon, Sep 05, 2022 at 11:12:39AM +0530, Anshuman Khandual wrote:
> 
>>  arch/x86/events/perf_event.h | 20 ++++++++++++++++++++
>>  1 file changed, 20 insertions(+)
>>
>> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
>> index ba3d24a6a4ec..12136a33e9b7 100644
>> --- a/arch/x86/events/perf_event.h
>> +++ b/arch/x86/events/perf_event.h
>> @@ -86,6 +86,26 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
>>  #define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
>>  #define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
>>  
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LDLAT) == PERF_X86_EVENT_PEBS_LDLAT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST) == PERF_X86_EVENT_PEBS_ST);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_ST_HSW) == PERF_X86_EVENT_PEBS_ST_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LD_HSW) == PERF_X86_EVENT_PEBS_LD_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_NA_HSW) == PERF_X86_EVENT_PEBS_NA_HSW);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL) == PERF_X86_EVENT_EXCL);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_DYNAMIC) == PERF_X86_EVENT_DYNAMIC);
>> +
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_EXCL_ACCT) == PERF_X86_EVENT_EXCL_ACCT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AUTO_RELOAD) == PERF_X86_EVENT_AUTO_RELOAD);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LARGE_PEBS) == PERF_X86_EVENT_LARGE_PEBS);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_VIA_PT) == PERF_X86_EVENT_PEBS_VIA_PT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PAIR) == PERF_X86_EVENT_PAIR);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_LBR_SELECT) == PERF_X86_EVENT_LBR_SELECT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_TOPDOWN) == PERF_X86_EVENT_TOPDOWN);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_STLAT) == PERF_X86_EVENT_PEBS_STLAT);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_AMD_BRS) == PERF_X86_EVENT_AMD_BRS);
>> +static_assert((PERF_EVENT_FLAG_ARCH & PERF_X86_EVENT_PEBS_LAT_HYBRID)
>> +				   == PERF_X86_EVENT_PEBS_LAT_HYBRID);
> 
> 
> That's not half tedious...
> 
> How about something like so?

Makes sense, will fold this back. Could I also include your "Signed-off-by:"
for this patch ?

- Anshuman

> 
> ---
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -64,27 +64,25 @@ static inline bool constraint_match(stru
>  	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
>  }
>  
> +#define PERF_ARCH(name, val)	\
> +	PERF_X86_EVENT_##name = val,
> +
>  /*
>   * struct hw_perf_event.flags flags
>   */
> -#define PERF_X86_EVENT_PEBS_LDLAT	0x00001 /* ld+ldlat data address sampling */
> -#define PERF_X86_EVENT_PEBS_ST		0x00002 /* st data address sampling */
> -#define PERF_X86_EVENT_PEBS_ST_HSW	0x00004 /* haswell style datala, store */
> -#define PERF_X86_EVENT_PEBS_LD_HSW	0x00008 /* haswell style datala, load */
> -#define PERF_X86_EVENT_PEBS_NA_HSW	0x00010 /* haswell style datala, unknown */
> -#define PERF_X86_EVENT_EXCL		0x00020 /* HT exclusivity on counter */
> -#define PERF_X86_EVENT_DYNAMIC		0x00040 /* dynamic alloc'd constraint */
> -
> -#define PERF_X86_EVENT_EXCL_ACCT	0x00100 /* accounted EXCL event */
> -#define PERF_X86_EVENT_AUTO_RELOAD	0x00200 /* use PEBS auto-reload */
> -#define PERF_X86_EVENT_LARGE_PEBS	0x00400 /* use large PEBS */
> -#define PERF_X86_EVENT_PEBS_VIA_PT	0x00800 /* use PT buffer for PEBS */
> -#define PERF_X86_EVENT_PAIR		0x01000 /* Large Increment per Cycle */
> -#define PERF_X86_EVENT_LBR_SELECT	0x02000 /* Save/Restore MSR_LBR_SELECT */
> -#define PERF_X86_EVENT_TOPDOWN		0x04000 /* Count Topdown slots/metrics events */
> -#define PERF_X86_EVENT_PEBS_STLAT	0x08000 /* st+stlat data address sampling */
> -#define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
> -#define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
> +enum {
> +#include "perf_event_flags.h"
> +};
> +
> +#undef PERF_ARCH
> +
> +#define PERF_ARCH(name, val)						\
> +	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
> +		      PERF_X86_EVENT_##name);
> +
> +#include "perf_event_flags.h"
> +
> +#undef PERF_ARCH
>  
>  static inline bool is_topdown_count(struct perf_event *event)
>  {
> --- /dev/null
> +++ b/arch/x86/events/perf_event_flags.h
> @@ -0,0 +1,22 @@
> +
> +/*
> + * struct hw_perf_event.flags flags
> + */
> +PERF_ARCH(PEBS_LDLAT,		0x00001) /* ld+ldlat data address sampling */
> +PERF_ARCH(PEBS_ST,		0x00002) /* st data address sampling */
> +PERF_ARCH(PEBS_ST_HSW,		0x00004) /* haswell style datala, store */
> +PERF_ARCH(PEBS_LD_HSW,		0x00008) /* haswell style datala, load */
> +PERF_ARCH(PEBS_NA_HSW,		0x00010) /* haswell style datala, unknown */
> +PERF_ARCH(EXCL,			0x00020) /* HT exclusivity on counter */
> +PERF_ARCH(DYNAMIC,		0x00040) /* dynamic alloc'd constraint */
> +			/*	0x00080	*/
> +PERF_ARCH(EXCL_ACCT,		0x00100) /* accounted EXCL event */
> +PERF_ARCH(AUTO_RELOAD,		0x00200) /* use PEBS auto-reload */
> +PERF_ARCH(LARGE_PEBS,		0x00400) /* use large PEBS */
> +PERF_ARCH(PEBS_VIA_PT,		0x00800) /* use PT buffer for PEBS */
> +PERF_ARCH(PAIR,			0x01000) /* Large Increment per Cycle */
> +PERF_ARCH(LBR_SELECT,		0x02000) /* Save/Restore MSR_LBR_SELECT */
> +PERF_ARCH(TOPDOWN,		0x04000) /* Count Topdown slots/metrics events */
> +PERF_ARCH(PEBS_STLAT,		0x08000) /* st+stlat data address sampling */
> +PERF_ARCH(AMD_BRS,		0x10000) /* AMD Branch Sampling */
> +PERF_ARCH(PEBS_LAT_HYBRID,	0x20000) /* ld and st lat for hybrid */
Re: [PATCH V2 4/4] x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH
Posted by Peter Zijlstra 3 years, 7 months ago
On Wed, Sep 07, 2022 at 10:57:35AM +0530, Anshuman Khandual wrote:

> > How about something like so?
> 
> Makes sense, will fold this back. Could I also include your "Signed-off-by:"
> for this patch ?
> 

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

> > 
> > ---
> > --- a/arch/x86/events/perf_event.h
> > +++ b/arch/x86/events/perf_event.h
> > @@ -64,27 +64,25 @@ static inline bool constraint_match(stru
> >  	return ((ecode & c->cmask) - c->code) <= (u64)c->size;
> >  }
> >  
> > +#define PERF_ARCH(name, val)	\
> > +	PERF_X86_EVENT_##name = val,
> > +
> >  /*
> >   * struct hw_perf_event.flags flags
> >   */
> > -#define PERF_X86_EVENT_PEBS_LDLAT	0x00001 /* ld+ldlat data address sampling */
> > -#define PERF_X86_EVENT_PEBS_ST		0x00002 /* st data address sampling */
> > -#define PERF_X86_EVENT_PEBS_ST_HSW	0x00004 /* haswell style datala, store */
> > -#define PERF_X86_EVENT_PEBS_LD_HSW	0x00008 /* haswell style datala, load */
> > -#define PERF_X86_EVENT_PEBS_NA_HSW	0x00010 /* haswell style datala, unknown */
> > -#define PERF_X86_EVENT_EXCL		0x00020 /* HT exclusivity on counter */
> > -#define PERF_X86_EVENT_DYNAMIC		0x00040 /* dynamic alloc'd constraint */
> > -
> > -#define PERF_X86_EVENT_EXCL_ACCT	0x00100 /* accounted EXCL event */
> > -#define PERF_X86_EVENT_AUTO_RELOAD	0x00200 /* use PEBS auto-reload */
> > -#define PERF_X86_EVENT_LARGE_PEBS	0x00400 /* use large PEBS */
> > -#define PERF_X86_EVENT_PEBS_VIA_PT	0x00800 /* use PT buffer for PEBS */
> > -#define PERF_X86_EVENT_PAIR		0x01000 /* Large Increment per Cycle */
> > -#define PERF_X86_EVENT_LBR_SELECT	0x02000 /* Save/Restore MSR_LBR_SELECT */
> > -#define PERF_X86_EVENT_TOPDOWN		0x04000 /* Count Topdown slots/metrics events */
> > -#define PERF_X86_EVENT_PEBS_STLAT	0x08000 /* st+stlat data address sampling */
> > -#define PERF_X86_EVENT_AMD_BRS		0x10000 /* AMD Branch Sampling */
> > -#define PERF_X86_EVENT_PEBS_LAT_HYBRID	0x20000 /* ld and st lat for hybrid */
> > +enum {
> > +#include "perf_event_flags.h"
> > +};
> > +
> > +#undef PERF_ARCH
> > +
> > +#define PERF_ARCH(name, val)						\
> > +	static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) ==	\
> > +		      PERF_X86_EVENT_##name);
> > +
> > +#include "perf_event_flags.h"
> > +
> > +#undef PERF_ARCH
> >  
> >  static inline bool is_topdown_count(struct perf_event *event)
> >  {
> > --- /dev/null
> > +++ b/arch/x86/events/perf_event_flags.h
> > @@ -0,0 +1,22 @@
> > +
> > +/*
> > + * struct hw_perf_event.flags flags
> > + */
> > +PERF_ARCH(PEBS_LDLAT,		0x00001) /* ld+ldlat data address sampling */
> > +PERF_ARCH(PEBS_ST,		0x00002) /* st data address sampling */
> > +PERF_ARCH(PEBS_ST_HSW,		0x00004) /* haswell style datala, store */
> > +PERF_ARCH(PEBS_LD_HSW,		0x00008) /* haswell style datala, load */
> > +PERF_ARCH(PEBS_NA_HSW,		0x00010) /* haswell style datala, unknown */
> > +PERF_ARCH(EXCL,			0x00020) /* HT exclusivity on counter */
> > +PERF_ARCH(DYNAMIC,		0x00040) /* dynamic alloc'd constraint */
> > +			/*	0x00080	*/
> > +PERF_ARCH(EXCL_ACCT,		0x00100) /* accounted EXCL event */
> > +PERF_ARCH(AUTO_RELOAD,		0x00200) /* use PEBS auto-reload */
> > +PERF_ARCH(LARGE_PEBS,		0x00400) /* use large PEBS */
> > +PERF_ARCH(PEBS_VIA_PT,		0x00800) /* use PT buffer for PEBS */
> > +PERF_ARCH(PAIR,			0x01000) /* Large Increment per Cycle */
> > +PERF_ARCH(LBR_SELECT,		0x02000) /* Save/Restore MSR_LBR_SELECT */
> > +PERF_ARCH(TOPDOWN,		0x04000) /* Count Topdown slots/metrics events */
> > +PERF_ARCH(PEBS_STLAT,		0x08000) /* st+stlat data address sampling */
> > +PERF_ARCH(AMD_BRS,		0x10000) /* AMD Branch Sampling */
> > +PERF_ARCH(PEBS_LAT_HYBRID,	0x20000) /* ld and st lat for hybrid */