On 2/10/2023 8:31 AM, Sean Christopherson wrote:
> Add a helper to query if a vCPU has run so that KVM doesn't have to open
> code the check on last_vmentry_cpu being set to a magic value.
>
> No functional change intended.
>
> Suggested-by: Xiaoyao Li <xiaoyao.li@intel.com>
> Cc: Like Xu <like.xu.linux@gmail.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
> arch/x86/kvm/cpuid.c | 2 +-
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/kvm/x86.h | 5 +++++
> 3 files changed, 7 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 8f8edeaf8177..448d627ce891 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -420,7 +420,7 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
> * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
> * whether the supplied CPUID data is equal to what's already set.
> */
> - if (vcpu->arch.last_vmentry_cpu != -1) {
> + if (kvm_vcpu_has_run(vcpu)) {
> r = kvm_cpuid_check_equal(vcpu, e2, nent);
> if (r)
> return r;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index c91ee2927dd7..b0693195273b 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5393,7 +5393,7 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
> * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
> * kvm_arch_vcpu_ioctl().
> */
> - KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
> + KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
> }
>
> void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index a8167b47b8c8..754190af1791 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -83,6 +83,11 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
> void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
> int kvm_check_nested_events(struct kvm_vcpu *vcpu);
>
> +static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
> +{
> + return vcpu->arch.last_vmentry_cpu != -1;
> +}
> +
> static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
> {
> return vcpu->arch.exception.pending ||