Move x86's perf guest callbacks into common KVM, as they are semantically
identical to arm64's callbacks (the only other such KVM callbacks).
arm64 will convert to the common versions in a future patch.
Implement the necessary arm64 arch hooks now to avoid having to provide
stubs or a temporary #define (from x86) to avoid arm64 compilation errors
when CONFIG_GUEST_PERF_EVENTS=y.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/arm64/include/asm/kvm_host.h | 8 +++++
arch/arm64/kvm/arm.c | 5 +++
arch/x86/include/asm/kvm_host.h | 3 ++
arch/x86/kvm/x86.c | 53 +++++++------------------------
include/linux/kvm_host.h | 10 ++++++
virt/kvm/kvm_main.c | 44 +++++++++++++++++++++++++
6 files changed, 81 insertions(+), 42 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ed940aec89e0..828b6eaa2c56 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -673,6 +673,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
void kvm_perf_init(void);
void kvm_perf_teardown(void);
+#ifdef CONFIG_GUEST_PERF_EVENTS
+static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ /* Any callback while a vCPU is loaded is considered to be in guest. */
+ return !!vcpu;
+}
+#endif
+
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e9a2b8f27792..2b542fdc237e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -500,6 +500,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return *vcpu_pc(vcpu);
+}
+
/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2d86a2dfc775..6efe4e03a6d2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1543,6 +1543,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
+#define kvm_arch_pmi_in_guest(vcpu) \
+ ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 412646b973bb..1bea616402e6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8264,43 +8264,12 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
-{
- return vcpu && vcpu->arch.handling_intr_from_guest;
-}
-
-static unsigned int kvm_guest_state(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- unsigned int state;
-
- if (!kvm_pmi_in_guest(vcpu))
- return 0;
-
- state = PERF_GUEST_ACTIVE;
- if (static_call(kvm_x86_get_cpl)(vcpu))
- state |= PERF_GUEST_USER;
-
- return state;
-}
-
-static unsigned long kvm_guest_get_ip(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
- if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
- return 0;
-
- return kvm_rip_read(vcpu);
-}
-
static unsigned int kvm_handle_intel_pt_intr(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
/* '0' on failure so that the !PT case can use a RET0 static call. */
- if (!kvm_pmi_in_guest(vcpu))
+ if (!kvm_arch_pmi_in_guest(vcpu))
return 0;
kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -8309,12 +8278,6 @@ static unsigned int kvm_handle_intel_pt_intr(void)
return 1;
}
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .state = kvm_guest_state,
- .get_ip = kvm_guest_get_ip,
- .handle_intel_pt_intr = NULL,
-};
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
@@ -11068,9 +11031,11 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();
+ /* Temporary ugliness. */
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
- kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
- perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ kvm_register_perf_callbacks(kvm_handle_intel_pt_intr);
+ else
+ kvm_register_perf_callbacks(NULL);
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -11099,8 +11064,7 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
- perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
- kvm_guest_cbs.handle_intel_pt_intr = NULL;
+ kvm_unregister_perf_callbacks();
static_call(kvm_x86_hardware_unsetup)();
}
@@ -11727,6 +11691,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu->arch.preempted_in_kernel;
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return kvm_rip_read(vcpu);
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e4d712e9f760..b9255a6439f2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1163,6 +1163,16 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3e67c93ca403..179fb110a00f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5460,6 +5460,50 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return &kvm_running_vcpu;
}
+#ifdef CONFIG_GUEST_PERF_EVENTS
+static unsigned int kvm_guest_state(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+ unsigned int state;
+
+ if (!kvm_arch_pmi_in_guest(vcpu))
+ return 0;
+
+ state = PERF_GUEST_ACTIVE;
+ if (!kvm_arch_vcpu_in_kernel(vcpu))
+ state |= PERF_GUEST_USER;
+
+ return state;
+}
+
+static unsigned long kvm_guest_get_ip(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
+ if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
+ return 0;
+
+ return kvm_arch_vcpu_get_ip(vcpu);
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .state = kvm_guest_state,
+ .get_ip = kvm_guest_get_ip,
+ .handle_intel_pt_intr = NULL,
+};
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
+{
+ kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
+ perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+void kvm_unregister_perf_callbacks(void)
+{
+ perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+}
+#endif
+
struct kvm_cpu_compat_check {
void *opaque;
int *ret;
--
2.33.0.464.g1972c5931b-goog
On 22/09/21 02:05, Sean Christopherson wrote: > Move x86's perf guest callbacks into common KVM, as they are semantically > identical to arm64's callbacks (the only other such KVM callbacks). > arm64 will convert to the common versions in a future patch. > > Implement the necessary arm64 arch hooks now to avoid having to provide > stubs or a temporary #define (from x86) to avoid arm64 compilation errors > when CONFIG_GUEST_PERF_EVENTS=y. > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/arm64/include/asm/kvm_host.h | 8 +++++ > arch/arm64/kvm/arm.c | 5 +++ > arch/x86/include/asm/kvm_host.h | 3 ++ > arch/x86/kvm/x86.c | 53 +++++++------------------------ > include/linux/kvm_host.h | 10 ++++++ > virt/kvm/kvm_main.c | 44 +++++++++++++++++++++++++ > 6 files changed, 81 insertions(+), 42 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > index ed940aec89e0..828b6eaa2c56 100644 > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -673,6 +673,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); > void kvm_perf_init(void); > void kvm_perf_teardown(void); > > +#ifdef CONFIG_GUEST_PERF_EVENTS > +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) > +{ > + /* Any callback while a vCPU is loaded is considered to be in guest. */ > + return !!vcpu; > +} > +#endif > + > long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); > gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); > void kvm_update_stolen_time(struct kvm_vcpu *vcpu); > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c > index e9a2b8f27792..2b542fdc237e 100644 > --- a/arch/arm64/kvm/arm.c > +++ b/arch/arm64/kvm/arm.c > @@ -500,6 +500,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) > return vcpu_mode_priv(vcpu); > } > > +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) > +{ > + return *vcpu_pc(vcpu); > +} > + > /* Just ensure a guest exit from a particular CPU */ > static void exit_vm_noop(void *info) > { > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 2d86a2dfc775..6efe4e03a6d2 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1543,6 +1543,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) > return -ENOTSUPP; > } > > +#define kvm_arch_pmi_in_guest(vcpu) \ > + ((vcpu) && (vcpu)->arch.handling_intr_from_guest) > + > int kvm_mmu_module_init(void); > void kvm_mmu_module_exit(void); > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 412646b973bb..1bea616402e6 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -8264,43 +8264,12 @@ static void kvm_timer_init(void) > kvmclock_cpu_online, kvmclock_cpu_down_prep); > } > > -static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu) > -{ > - return vcpu && vcpu->arch.handling_intr_from_guest; > -} > - > -static unsigned int kvm_guest_state(void) > -{ > - struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); > - unsigned int state; > - > - if (!kvm_pmi_in_guest(vcpu)) > - return 0; > - > - state = PERF_GUEST_ACTIVE; > - if (static_call(kvm_x86_get_cpl)(vcpu)) > - state |= PERF_GUEST_USER; > - > - return state; > -} > - > -static unsigned long kvm_guest_get_ip(void) > -{ > - struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); > - > - /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ > - if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu))) > - return 0; > - > - return kvm_rip_read(vcpu); > -} > - > static unsigned int kvm_handle_intel_pt_intr(void) > { > struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); > > /* '0' on failure so that the !PT case can use a RET0 static call. */ > - if (!kvm_pmi_in_guest(vcpu)) > + if (!kvm_arch_pmi_in_guest(vcpu)) > return 0; > > kvm_make_request(KVM_REQ_PMI, vcpu); > @@ -8309,12 +8278,6 @@ static unsigned int kvm_handle_intel_pt_intr(void) > return 1; > } > > -static struct perf_guest_info_callbacks kvm_guest_cbs = { > - .state = kvm_guest_state, > - .get_ip = kvm_guest_get_ip, > - .handle_intel_pt_intr = NULL, > -}; > - > #ifdef CONFIG_X86_64 > static void pvclock_gtod_update_fn(struct work_struct *work) > { > @@ -11068,9 +11031,11 @@ int kvm_arch_hardware_setup(void *opaque) > memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); > kvm_ops_static_call_update(); > > + /* Temporary ugliness. */ > if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) > - kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr; > - perf_register_guest_info_callbacks(&kvm_guest_cbs); > + kvm_register_perf_callbacks(kvm_handle_intel_pt_intr); > + else > + kvm_register_perf_callbacks(NULL); > > if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) > supported_xss = 0; > @@ -11099,8 +11064,7 @@ int kvm_arch_hardware_setup(void *opaque) > > void kvm_arch_hardware_unsetup(void) > { > - perf_unregister_guest_info_callbacks(&kvm_guest_cbs); > - kvm_guest_cbs.handle_intel_pt_intr = NULL; > + kvm_unregister_perf_callbacks(); > > static_call(kvm_x86_hardware_unsetup)(); > } > @@ -11727,6 +11691,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) > return vcpu->arch.preempted_in_kernel; > } > > +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) > +{ > + return kvm_rip_read(vcpu); > +} > + > int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) > { > return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index e4d712e9f760..b9255a6439f2 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -1163,6 +1163,16 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm) > } > #endif > > +#ifdef CONFIG_GUEST_PERF_EVENTS > +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); > + > +void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)); > +void kvm_unregister_perf_callbacks(void); > +#else > +static inline void kvm_register_perf_callbacks(void *ign) {} > +static inline void kvm_unregister_perf_callbacks(void) {} > +#endif /* CONFIG_GUEST_PERF_EVENTS */ > + > int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); > void kvm_arch_destroy_vm(struct kvm *kvm); > void kvm_arch_sync_events(struct kvm *kvm); > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 3e67c93ca403..179fb110a00f 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -5460,6 +5460,50 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) > return &kvm_running_vcpu; > } > > +#ifdef CONFIG_GUEST_PERF_EVENTS > +static unsigned int kvm_guest_state(void) > +{ > + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); > + unsigned int state; > + > + if (!kvm_arch_pmi_in_guest(vcpu)) > + return 0; > + > + state = PERF_GUEST_ACTIVE; > + if (!kvm_arch_vcpu_in_kernel(vcpu)) > + state |= PERF_GUEST_USER; > + > + return state; > +} > + > +static unsigned long kvm_guest_get_ip(void) > +{ > + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); > + > + /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */ > + if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu))) > + return 0; > + > + return kvm_arch_vcpu_get_ip(vcpu); > +} > + > +static struct perf_guest_info_callbacks kvm_guest_cbs = { > + .state = kvm_guest_state, > + .get_ip = kvm_guest_get_ip, > + .handle_intel_pt_intr = NULL, > +}; > + > +void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void)) > +{ > + kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler; > + perf_register_guest_info_callbacks(&kvm_guest_cbs); > +} > +void kvm_unregister_perf_callbacks(void) > +{ > + perf_unregister_guest_info_callbacks(&kvm_guest_cbs); > +} > +#endif > + > struct kvm_cpu_compat_check { > void *opaque; > int *ret; > Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
On Wed, 22 Sep 2021 01:05:29 +0100, Sean Christopherson <seanjc@google.com> wrote: > > Move x86's perf guest callbacks into common KVM, as they are semantically > identical to arm64's callbacks (the only other such KVM callbacks). > arm64 will convert to the common versions in a future patch. > > Implement the necessary arm64 arch hooks now to avoid having to provide > stubs or a temporary #define (from x86) to avoid arm64 compilation errors > when CONFIG_GUEST_PERF_EVENTS=y. > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/arm64/include/asm/kvm_host.h | 8 +++++ > arch/arm64/kvm/arm.c | 5 +++ > arch/x86/include/asm/kvm_host.h | 3 ++ > arch/x86/kvm/x86.c | 53 +++++++------------------------ > include/linux/kvm_host.h | 10 ++++++ > virt/kvm/kvm_main.c | 44 +++++++++++++++++++++++++ > 6 files changed, 81 insertions(+), 42 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > index ed940aec89e0..828b6eaa2c56 100644 > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -673,6 +673,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); > void kvm_perf_init(void); > void kvm_perf_teardown(void); > > +#ifdef CONFIG_GUEST_PERF_EVENTS > +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) Pardon my x86 ignorance, what is PMI? PMU Interrupt? > +{ > + /* Any callback while a vCPU is loaded is considered to be in guest. */ > + return !!vcpu; > +} > +#endif Do you really need this #ifdef? > + > long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); > gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); > void kvm_update_stolen_time(struct kvm_vcpu *vcpu); > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c > index e9a2b8f27792..2b542fdc237e 100644 > --- a/arch/arm64/kvm/arm.c > +++ b/arch/arm64/kvm/arm.c > @@ -500,6 +500,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) > return vcpu_mode_priv(vcpu); > } > > +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) > +{ > + return *vcpu_pc(vcpu); > +} > + > /* Just ensure a guest exit from a particular CPU */ > static void exit_vm_noop(void *info) > { The above nits notwithstanding, Acked-by: Marc Zyngier <maz@kernel.org> M. -- Without deviation from the norm, progress is not possible.
On Mon, Oct 11, 2021, Marc Zyngier wrote: > On Wed, 22 Sep 2021 01:05:29 +0100, Sean Christopherson <seanjc@google.com> wrote: > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > > index ed940aec89e0..828b6eaa2c56 100644 > > --- a/arch/arm64/include/asm/kvm_host.h > > +++ b/arch/arm64/include/asm/kvm_host.h > > @@ -673,6 +673,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); > > void kvm_perf_init(void); > > void kvm_perf_teardown(void); > > > > +#ifdef CONFIG_GUEST_PERF_EVENTS > > +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) > > Pardon my x86 ignorance, what is PMI? PMU Interrupt? Ya, Performance Monitoring Interrupt. I didn't realize the term wasn't common perf terminology. Maybe kvm_arch_perf_events_in_guest() to be less x86-centric? > > +{ > > + /* Any callback while a vCPU is loaded is considered to be in guest. */ > > + return !!vcpu; > > +} > > +#endif > > Do you really need this #ifdef? Nope, should compile fine without it, though simply dropping the #ifdef would make make the semantics of the function wrong, even if nothing consumes it. Tweak it to use IS_ENABLED()? return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
On Mon, 11 Oct 2021 15:46:25 +0100, Sean Christopherson <seanjc@google.com> wrote: > > On Mon, Oct 11, 2021, Marc Zyngier wrote: > > On Wed, 22 Sep 2021 01:05:29 +0100, Sean Christopherson <seanjc@google.com> wrote: > > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > > > index ed940aec89e0..828b6eaa2c56 100644 > > > --- a/arch/arm64/include/asm/kvm_host.h > > > +++ b/arch/arm64/include/asm/kvm_host.h > > > @@ -673,6 +673,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); > > > void kvm_perf_init(void); > > > void kvm_perf_teardown(void); > > > > > > +#ifdef CONFIG_GUEST_PERF_EVENTS > > > +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) > > > > Pardon my x86 ignorance, what is PMI? PMU Interrupt? > > Ya, Performance Monitoring Interrupt. I didn't realize the term wasn't > common perf terminology. Maybe kvm_arch_perf_events_in_guest() to be > less x86-centric? Up to you. I would be happy with just a comment. > > > > +{ > > > + /* Any callback while a vCPU is loaded is considered to be in guest. */ > > > + return !!vcpu; > > > +} > > > +#endif > > > > Do you really need this #ifdef? > > Nope, should compile fine without it, though simply dropping the #ifdef > would make make the semantics of the function wrong, even if nothing > consumes it. Tweak it to use IS_ENABLED()? > > return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; LGTM. M. -- Without deviation from the norm, progress is not possible.
© 2016 - 2024 Red Hat, Inc.