From nobody Mon Sep 16 19:32:40 2024 Received: from mail.loongson.cn (mail.loongson.cn [114.242.206.163]) by smtp.subspace.kernel.org (Postfix) with ESMTP id A433B13C9A3; Tue, 23 Jul 2024 07:38:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=114.242.206.163 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1721720312; cv=none; b=RFNzFok9ujzmXVuf+kKfNlJCDxfDRmsoEZOTVkcqlanF+V/g42Asii63W77elIuQnKJnBE8Sp0xQrQVB0SrOcTTkDb0sIhRiRqAZjtym/dEtniB453gd+lBAw0hSuatzE+vdvLvTUSg8oAzJJGLKjvPs2msiaWeF1UfDAWXKF70= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1721720312; c=relaxed/simple; bh=FjNgx6rpn9nCPF8fEc6SvCXKltByWMlDh/rZv5Nyk8Y=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=W6hxQTCtu2NP40cVMtWYrKNXOzfSKU/lDJ67SdPF9Xh3Mo+lahPiWbfzhkAOmsF8C5YXNDzuHbIROW1+ywRCwllZ9Iegi67RcVG22J3zipVP+WFyyyGlHD6v78BgKkOzNvaB/0L5ivRPaJ2hXBGxmI8OyqOYSa1OWOBBOktID7o= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=loongson.cn; spf=pass smtp.mailfrom=loongson.cn; arc=none smtp.client-ip=114.242.206.163 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=loongson.cn Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=loongson.cn Received: from loongson.cn (unknown [10.2.5.213]) by gateway (Coremail) with SMTP id _____8Bx7eryXZ9m7GAAAA--.1646S3; Tue, 23 Jul 2024 15:38:26 +0800 (CST) Received: from localhost.localdomain (unknown [10.2.5.213]) by localhost.localdomain (Coremail) with SMTP id AQAAf8DxusbxXZ9m+l9VAA--.59486S3; Tue, 23 Jul 2024 15:38:26 +0800 (CST) From: Bibo Mao To: Huacai Chen , Tianrui Zhao , Peter Zijlstra , Waiman Long Cc: WANG Xuerui , loongarch@lists.linux.dev, linux-kernel@vger.kernel.org, kvm@vger.kernel.org, virtualization@lists.linux.dev Subject: [PATCH 1/2] LoongArch: KVM: Add paravirt qspinlock in kvm side Date: Tue, 23 Jul 2024 15:38:24 +0800 Message-Id: <20240723073825.1811600-2-maobibo@loongson.cn> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20240723073825.1811600-1-maobibo@loongson.cn> References: <20240723073825.1811600-1-maobibo@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: AQAAf8DxusbxXZ9m+l9VAA--.59486S3 X-CM-SenderInfo: xpdruxter6z05rqj20fqof0/ X-Coremail-Antispam: 1Uk129KBjDUn29KB7ZKAUJUUUUU529EdanIXcx71UUUUU7KY7 ZEXasCq-sGcSsGvfJ3UbIjqfuFe4nvWSU5nxnvy29KBjDU0xBIdaVrnUUvcSsGvfC2Kfnx nUUI43ZEXa7xR_UUUUUUUUU== Content-Type: text/plain; charset="utf-8" Add paravirt spinlock in kvm side, idle instruction is used with pv_wait() function so that vCPU thread releases pCPU and sleeps on the wait queue. With pv_kick_cpu() function, hypercall instruction is used to wake up vCPU thread and yield to vcpu thread, caller vcpu thread gives up schedule. Signed-off-by: Bibo Mao --- arch/loongarch/include/asm/kvm_host.h | 4 ++++ arch/loongarch/include/asm/kvm_para.h | 1 + arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/exit.c | 24 +++++++++++++++++++++++- arch/loongarch/kvm/vcpu.c | 13 ++++++++++++- 5 files changed, 41 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include= /asm/kvm_host.h index 44b54965f5b4..9c60c1018410 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -32,6 +32,7 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) +#define KVM_REQ_EVENT KVM_ARCH_REQ(2) =20 #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) @@ -214,6 +215,9 @@ struct kvm_vcpu_arch { u64 last_steal; struct gfn_to_hva_cache cache; } st; + struct { + bool pv_unhalted; + } pv; }; =20 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int = reg) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include= /asm/kvm_para.h index d134b63b921f..67aef57e7490 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -15,6 +15,7 @@ #define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE= _SERVICE) #define KVM_HCALL_FUNC_IPI 1 #define KVM_HCALL_FUNC_NOTIFY 2 +#define KVM_HCALL_FUNC_KICK 3 =20 #define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_= SWDBG) =20 diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/includ= e/asm/loongarch.h index 7a4633ef284b..27961668bfd9 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -170,6 +170,7 @@ #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_IPI BIT(1) #define KVM_FEATURE_STEAL_TIME BIT(2) +#define KVM_FEATURE_PARAVIRT_SPINLOCK BIT(3) =20 #ifndef __ASSEMBLY__ =20 diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ea73f9dc2cc6..bed182573b91 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -50,7 +50,7 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_in= st inst) vcpu->arch.gprs[rd] =3D *(unsigned int *)KVM_SIGNATURE; break; case CPUCFG_KVM_FEATURE: - ret =3D KVM_FEATURE_IPI; + ret =3D KVM_FEATURE_IPI | KVM_FEATURE_PARAVIRT_SPINLOCK; if (kvm_pvtime_supported()) ret |=3D KVM_FEATURE_STEAL_TIME; vcpu->arch.gprs[rd] =3D ret; @@ -776,6 +776,25 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) return 0; } =20 +static long kvm_pv_kick_cpu(struct kvm_vcpu *vcpu) +{ + int cpu =3D vcpu->arch.gprs[LOONGARCH_GPR_A1]; + struct kvm_vcpu *dst; + + dst =3D kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu); + if (!dst) + return KVM_HCALL_INVALID_PARAMETER; + + dst->arch.pv.pv_unhalted =3D true; + kvm_make_request(KVM_REQ_EVENT, dst); + kvm_vcpu_kick(dst); + /* Ignore requests to yield to self */ + if (dst !=3D vcpu) + kvm_vcpu_yield_to(dst); + + return 0; +} + /* * Hypercall emulation always return to guest, Caller should check retval. */ @@ -792,6 +811,9 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu) case KVM_HCALL_FUNC_NOTIFY: ret =3D kvm_save_notify(vcpu); break; + case KVM_HCALL_FUNC_KICK: + ret =3D kvm_pv_kick_cpu(vcpu); + break; default: ret =3D KVM_HCALL_INVALID_CODE; break; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 16756ffb55e8..19446b9a32e6 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -95,6 +95,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) kvm_update_stolen_time(vcpu); =20 + if (kvm_check_request(KVM_REQ_EVENT, vcpu)) + vcpu->arch.pv.pv_unhalted =3D false; + return RESUME_GUEST; } =20 @@ -222,9 +225,17 @@ static int kvm_handle_exit(struct kvm_run *run, struct= kvm_vcpu *vcpu) return RESUME_GUEST; } =20 +static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.pv.pv_unhalted) + return true; + + return false; +} + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { - return !!(vcpu->arch.irq_pending) && + return (!!vcpu->arch.irq_pending || kvm_vcpu_has_events(vcpu)) && vcpu->arch.mp_state.mp_state =3D=3D KVM_MP_STATE_RUNNABLE; } =20 --=20 2.39.3 From nobody Mon Sep 16 19:32:40 2024 Received: from mail.loongson.cn (mail.loongson.cn [114.242.206.163]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 6DF4B14A4EF; Tue, 23 Jul 2024 07:38:30 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=114.242.206.163 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1721720313; cv=none; b=SLiZrceIGfmkIxML78l3XG5g09HBKgHJEufb8AVTx5MGIkUBvBRmv9k0+lrU0xqeNmTfwfEtBobkHBhLXQ/A10by8d7lA0FsR2u5qevLRXlDJyslEksaimtoijkCbL5vaRkTMAaTyUrQToc+Ys1pwrMYZLKjFkux+/ir1YKh/yo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1721720313; c=relaxed/simple; bh=2QBpHT0ze2oXtG0u7q1lbwjm7BGMngAeMpX/dwQ+BnQ=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=fXgCyxqd3bmWUIq0nqGP04KF+axEeJKjErpZX0BgyQiYDoOJ+mtJMNAFL+Ct+DyW54vz7OfrTd38TgF2S3nQXX5XCTdQ/FaSeOmPlW/odvl6/VVFgSypY+bw3xIEM7LVud1ml/hBOxgncRAU18Y/KcDKGDwi21GpVnVNDEdgimo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=loongson.cn; spf=pass smtp.mailfrom=loongson.cn; arc=none smtp.client-ip=114.242.206.163 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=loongson.cn Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=loongson.cn Received: from loongson.cn (unknown [10.2.5.213]) by gateway (Coremail) with SMTP id _____8AxGur0XZ9m8mAAAA--.1436S3; Tue, 23 Jul 2024 15:38:28 +0800 (CST) Received: from localhost.localdomain (unknown [10.2.5.213]) by localhost.localdomain (Coremail) with SMTP id AQAAf8DxusbxXZ9m+l9VAA--.59486S4; Tue, 23 Jul 2024 15:38:26 +0800 (CST) From: Bibo Mao To: Huacai Chen , Tianrui Zhao , Peter Zijlstra , Waiman Long Cc: WANG Xuerui , loongarch@lists.linux.dev, linux-kernel@vger.kernel.org, kvm@vger.kernel.org, virtualization@lists.linux.dev Subject: [PATCH 2/2] LoongArch: KVM: Add paravirt qspinlock in guest side Date: Tue, 23 Jul 2024 15:38:25 +0800 Message-Id: <20240723073825.1811600-3-maobibo@loongson.cn> X-Mailer: git-send-email 2.39.3 In-Reply-To: <20240723073825.1811600-1-maobibo@loongson.cn> References: <20240723073825.1811600-1-maobibo@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: AQAAf8DxusbxXZ9m+l9VAA--.59486S4 X-CM-SenderInfo: xpdruxter6z05rqj20fqof0/ X-Coremail-Antispam: 1Uk129KBjDUn29KB7ZKAUJUUUUU529EdanIXcx71UUUUU7KY7 ZEXasCq-sGcSsGvfJ3UbIjqfuFe4nvWSU5nxnvy29KBjDU0xBIdaVrnUUvcSsGvfC2Kfnx nUUI43ZEXa7xR_UUUUUUUUU== Content-Type: text/plain; charset="utf-8" Option PARAVIRT_SPINLOCKS is added on LoongArch system, and pv_lock_ops template is added here. If option PARAVIRT_SPINLOCKS is enabled, the native ops works on host machine. Two functions kvm_wait() and kvm_kick_cpu() are added specicial for VM, if VM detects hypervisor supports pv spinlock. With kvm_wait() vCPU thread will exit to hypervisor and give up scheduleing on pCPU, and with function kvm_kick_cpu() one hypercall function is used to notify hypervisor to wakeup previously waited vCPU. Signed-off-by: Bibo Mao --- arch/loongarch/Kconfig | 14 +++ arch/loongarch/include/asm/Kbuild | 1 - arch/loongarch/include/asm/paravirt.h | 47 ++++++++++ arch/loongarch/include/asm/qspinlock.h | 39 ++++++++ .../include/asm/qspinlock_paravirt.h | 6 ++ arch/loongarch/kernel/paravirt.c | 88 +++++++++++++++++++ arch/loongarch/kernel/smp.c | 4 +- 7 files changed, 197 insertions(+), 2 deletions(-) create mode 100644 arch/loongarch/include/asm/qspinlock.h create mode 100644 arch/loongarch/include/asm/qspinlock_paravirt.h diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index b81d0eba5c7e..7ad63db2fafd 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -660,6 +660,20 @@ config PARAVIRT_TIME_ACCOUNTING =20 If in doubt, say N here. =20 +config PARAVIRT_SPINLOCKS + bool "Paravirtual queued spinlocks" + select PARAVIRT + depends on SMP + help + Paravirtualized spinlocks allow a pvops backend to replace the + spinlock implementation with something virtualization-friendly + (for example, block the virtual CPU rather than spinning). + + It has a minimal impact on native kernels and gives a nice performance + benefit on paravirtualized kernels. + + If you are unsure how to answer this question, answer Y. + endmenu =20 config ARCH_SELECT_MEMORY_MODEL diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm= /Kbuild index 2bb3676429c0..4635b755b2b4 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y +=3D mcs_spinlock.h generic-y +=3D parport.h generic-y +=3D early_ioremap.h generic-y +=3D qrwlock.h -generic-y +=3D qspinlock.h generic-y +=3D user.h generic-y +=3D ioctl.h generic-y +=3D statfs.h diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include= /asm/paravirt.h index dddec49671ae..2617d635171b 100644 --- a/arch/loongarch/include/asm/paravirt.h +++ b/arch/loongarch/include/asm/paravirt.h @@ -20,6 +20,47 @@ static inline u64 paravirt_steal_clock(int cpu) int __init pv_ipi_init(void); int __init pv_time_init(void); =20 +#if defined(CONFIG_PARAVIRT_SPINLOCKS) +struct qspinlock; +struct pv_lock_ops { + void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); + void (*queued_spin_unlock)(struct qspinlock *lock); + void (*wait)(u8 *ptr, u8 val); + void (*kick)(int cpu); + bool (*vcpu_is_preempted)(int cpu); +}; + +extern struct pv_lock_ops pv_lock_ops; + +void __init kvm_spinlock_init(void); +bool pv_is_native_spin_unlock(void); + +static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock = *lock, + u32 val) +{ + pv_lock_ops.queued_spin_lock_slowpath(lock, val); +} + +static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + pv_lock_ops.queued_spin_unlock(lock); +} + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + pv_lock_ops.wait(ptr, val); +} + +static __always_inline void pv_kick(int cpu) +{ + pv_lock_ops.kick(cpu); +} + +static __always_inline bool pv_vcpu_is_preempted(long cpu) +{ + return pv_lock_ops.vcpu_is_preempted(cpu); +} +#endif /* PARAVIRT_SPINLOCKS */ #else =20 static inline int pv_ipi_init(void) @@ -32,4 +73,10 @@ static inline int pv_time_init(void) return 0; } #endif // CONFIG_PARAVIRT + +#ifndef CONFIG_PARAVIRT_SPINLOCKS +static inline void kvm_spinlock_init(void) +{ +} +#endif #endif diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/includ= e/asm/qspinlock.h new file mode 100644 index 000000000000..8e1b14c9e906 --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_QSPINLOCK_H +#define _ASM_LOONGARCH_QSPINLOCK_H + +#include + +#define _Q_PENDING_LOOPS (1 << 9) + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +/* How long a lock should spin before we consider blocking */ +#define SPIN_THRESHOLD (1 << 15) + +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 v= al); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val= ); +extern void __pv_queued_spin_unlock(struct qspinlock *lock); +extern bool nopvspin; + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 v= al) +{ + pv_queued_spin_lock_slowpath(lock, val); +} + +#define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + pv_queued_spin_unlock(lock); +} + +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(long cpu) +{ + return pv_vcpu_is_preempted(cpu); +} +#endif + +#include + +#endif // _ASM_LOONGARCH_QSPINLOCK_H diff --git a/arch/loongarch/include/asm/qspinlock_paravirt.h b/arch/loongar= ch/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..d6d7f487daea --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock_paravirt.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_QSPINLOCK_PARAVIRT_H +#define __ASM_QSPINLOCK_PARAVIRT_H + +void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u= 8 locked); +#endif diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/parav= irt.c index aee44610007d..758039eabdde 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -298,3 +298,91 @@ int __init pv_time_init(void) =20 return 0; } + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +static bool native_vcpu_is_preempted(int cpu) +{ + return false; +} + +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static void native_queued_spin_unlock(struct qspinlock *lock) +{ + /* + * unlock() needs release semantics: + */ + smp_store_release(&lock->locked, 0); +} + +static void paravirt_nop_kick(int cpu) +{ +} + +static void paravirt_nop_wait(u8 *ptr, u8 val) +{ +} + +static void kvm_wait(u8 *ptr, u8 val) +{ + if (READ_ONCE(*ptr) !=3D val) + return; + + __asm__ __volatile__("idle 0\n\t" : : : "memory"); +} + +/* Kick a cpu. Used to wake up a halted vcpu */ +static void kvm_kick_cpu(int cpu) +{ + kvm_hypercall1(KVM_HCALL_FUNC_KICK, cpu_logical_map(cpu)); +} + +bool pv_is_native_spin_unlock(void) +{ + return pv_lock_ops.queued_spin_unlock =3D=3D native_queued_spin_unlock; +} + +/* + * Setup pv_lock_ops for guest kernel. + */ +void __init kvm_spinlock_init(void) +{ + int feature; + + /* + * pv_hash()/pv_unhas() need it whatever pv spinlock is + * enabled or not + */ + __pv_init_lock_hash(); + + if (!kvm_para_available()) + return; + + /* Don't use the pvqspinlock code if there is only 1 vCPU. */ + if (num_possible_cpus() =3D=3D 1) + return; + + feature =3D kvm_arch_para_features(); + if (!(feature & KVM_FEATURE_PARAVIRT_SPINLOCK)) + return; + + if (nopvspin) + return; + + pr_info("Using paravirt qspinlock\n"); + pv_lock_ops.queued_spin_lock_slowpath =3D __pv_queued_spin_lock_slowpath; + pv_lock_ops.queued_spin_unlock =3D __pv_queued_spin_unlock; + pv_lock_ops.wait =3D kvm_wait; + pv_lock_ops.kick =3D kvm_kick_cpu; +} + +struct pv_lock_ops pv_lock_ops =3D { + .queued_spin_lock_slowpath =3D native_queued_spin_lock_slowpath, + .queued_spin_unlock =3D native_queued_spin_unlock, + .wait =3D paravirt_nop_wait, + .kick =3D paravirt_nop_kick, + .vcpu_is_preempted =3D native_vcpu_is_preempted, +}; +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 1436d2465939..6bc0b182a2ce 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -463,7 +463,7 @@ core_initcall(ipi_pm_init); #endif =20 /* Preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { unsigned int cpu, node, rr_node; =20 @@ -496,6 +496,8 @@ void smp_prepare_boot_cpu(void) rr_node =3D next_node_in(rr_node, node_online_map); } } + + kvm_spinlock_init(); } =20 /* called from main before smp_init() */ --=20 2.39.3