From: Edwin Török <edvin.torok@citrix.com>
This is needed so we can expose the maximum supported in CPUID,
without cpuid.c and vpmu_intel.c going out of sync.
The macros defined here take a parameter that controls how the enum
values are used: either to generate case statements or to count how many
elements we have.
They are a variation on https://en.wikipedia.org/wiki/X_Macro
No functional change.
Could be backported to 4.13.
Signed-off-by: Edwin Török <edvin.torok@citrix.com>
---
xen/arch/x86/cpu/vpmu_intel.c | 16 ++--------------
xen/arch/x86/cpuid.c | 2 +-
xen/arch/x86/include/asm/vpmu.h | 27 +++++++++++++++++++++++++++
3 files changed, 30 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 44a1ed5b10..ef8d69a0d6 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -622,15 +622,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
blocked = 1;
switch ( umaskevent )
{
- /*
- * See the Pre-Defined Architectural Performance Events table
- * from the Intel 64 and IA-32 Architectures Software
- * Developer's Manual, Volume 3B, System Programming Guide,
- * Part 2.
- */
- case 0x003c: /* UnHalted Core Cycles */
- case 0x013c: /* UnHalted Reference Cycles */
- case 0x00c0: /* Instructions Retired */
+ VPMU_IPC_EVENTS(DEFCASE)
blocked = 0;
break;
}
@@ -641,11 +633,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
/* Additional counters beyond IPC only; blocked already set. */
switch ( umaskevent )
{
- case 0x4f2e: /* Last Level Cache References */
- case 0x412e: /* Last Level Cache Misses */
- case 0x00c4: /* Branch Instructions Retired */
- case 0x00c5: /* All Branch Mispredict Retired */
- case 0x01a4: /* Topdown Slots */
+ VPMU_ARCH_EVENTS(DEFCASE)
blocked = 0;
break;
}
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 51ee89afc4..12e768ae87 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -328,7 +328,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY |
XENPMU_FEATURE_ARCH_ONLY) ) {
- unsigned limit = ( vpmu_features & XENPMU_FEATURE_ARCH_ONLY ) ? 8 : 3;
+ unsigned limit = VPMU_IPC_EVENTS_MAX + ( vpmu_features & XENPMU_FEATURE_ARCH_ONLY ) ? VPMU_ARCH_EVENTS_MAX : 0;
if (limit < u.arch_nr) {
gdprintk(XENLOG_WARNING, "Limiting architectural PMU events to %d (actual %d)", limit, u.arch_nr);
u.arch_nr = limit;
diff --git a/xen/arch/x86/include/asm/vpmu.h b/xen/arch/x86/include/asm/vpmu.h
index 1ef6089ccb..49c3e8c19a 100644
--- a/xen/arch/x86/include/asm/vpmu.h
+++ b/xen/arch/x86/include/asm/vpmu.h
@@ -146,5 +146,32 @@ static inline int vpmu_allocate_context(struct vcpu *v)
}
#endif
+/*
+ * See "20.2.1.2 Pre-Defined Architectural Performance Events"
+ * from the Intel 64 and IA-32 Architectures Software
+ * Developer's Manual, Volume 3B, System Programming Guide,
+ * Part 2.
+ */
+#define VPMU_IPC_EVENTS(DEF) \
+ DEF(0x003c) /* UnHalted Core Cycles */\
+ DEF(0x00c0) /* Instructions Retired */\
+ DEF(0x013c) /* UnHalted Reference Cycles */\
+
+
+#define VPMU_ARCH_EVENTS(DEF) \
+ VPMU_IPC_EVENTS(DEF)\
+ DEF(0x4f2e) /* Last Level Cache References */\
+ DEF(0x412e) /* Last Level Cache Misses */\
+ DEF(0x00c4) /* Branch Instructions Retired */\
+ DEF(0x00c5) /* All Branch Mispredict Retired */\
+ DEF(0x01a4) /* Topdown Slots */\
+
+#define DEFCASE(x) case (x):
+#define DEFSUM(x) +1
+#define DEFCOUNT(X) (0+X(DEFSUM))
+
+#define VPMU_IPC_EVENTS_MAX DEFCOUNT(VPMU_IPC_EVENTS)
+#define VPMU_ARCH_EVENTS_MAX DEFCOUNT(VPMU_ARCH_EVENTS)
+
#endif /* __ASM_X86_HVM_VPMU_H_*/
--
2.41.0