[PATCH] riscv: qspinlock: Add virt_spin_lock() support for KVM guests

guoren@kernel.org posted 1 patch 1 year ago
arch/riscv/include/asm/sbi.h      | 16 ++++++++++++++++
arch/riscv/include/asm/spinlock.h | 25 +++++++++++++++++++++++++
arch/riscv/kernel/sbi.c           |  2 +-
arch/riscv/kernel/setup.c         | 17 +++++++++++++++++
4 files changed, 59 insertions(+), 1 deletion(-)
[PATCH] riscv: qspinlock: Add virt_spin_lock() support for KVM guests
Posted by guoren@kernel.org 1 year ago
From: Guo Ren <guoren@linux.alibaba.com>

Add a static key controlling whether virt_spin_lock() should be
called or not. When running on bare metal set the new key to
false.

The VM guests should fall back to a Test-and-Set spinlock,
because fair locks have horrible lock 'holder' preemption issues.
The virt_spin_lock_key would shortcut for the queued_spin_lock_-
slowpath() function that allow virt_spin_lock to hijack it.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
 arch/riscv/include/asm/sbi.h      | 16 ++++++++++++++++
 arch/riscv/include/asm/spinlock.h | 25 +++++++++++++++++++++++++
 arch/riscv/kernel/sbi.c           |  2 +-
 arch/riscv/kernel/setup.c         | 17 +++++++++++++++++
 4 files changed, 59 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6c82318065cf..076ae2eb15a1 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -55,6 +55,21 @@ enum sbi_ext_base_fid {
 	SBI_EXT_BASE_GET_MIMPID,
 };
 
+enum sbi_ext_base_impl_id {
+	SBI_EXT_BASE_IMPL_ID_BBL = 0,
+	SBI_EXT_BASE_IMPL_ID_OPENSBI,
+	SBI_EXT_BASE_IMPL_ID_XVISOR,
+	SBI_EXT_BASE_IMPL_ID_KVM,
+	SBI_EXT_BASE_IMPL_ID_RUSTSBI,
+	SBI_EXT_BASE_IMPL_ID_DIOSIX,
+	SBI_EXT_BASE_IMPL_ID_COFFER,
+	SBI_EXT_BASE_IMPL_ID_XEN,
+	SBI_EXT_BASE_IMPL_ID_POLARFIRE,
+	SBI_EXT_BASE_IMPL_ID_COREBOOT,
+	SBI_EXT_BASE_IMPL_ID_OREBOOT,
+	SBI_EXT_BASE_IMPL_ID_BHYVE,
+};
+
 enum sbi_ext_time_fid {
 	SBI_EXT_TIME_SET_TIMER = 0,
 };
@@ -444,6 +459,7 @@ static inline int sbi_console_getchar(void) { return -ENOENT; }
 long sbi_get_mvendorid(void);
 long sbi_get_marchid(void);
 long sbi_get_mimpid(void);
+long sbi_get_firmware_id(void);
 void sbi_set_timer(uint64_t stime_value);
 void sbi_shutdown(void);
 void sbi_send_ipi(unsigned int cpu);
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
index e5121b89acea..bcbb38194237 100644
--- a/arch/riscv/include/asm/spinlock.h
+++ b/arch/riscv/include/asm/spinlock.h
@@ -42,6 +42,31 @@ SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
 
 #endif
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/jump_label.h>
+
+/*
+ * The KVM guests fall back to a Test-and-Set spinlock, because fair locks
+ * have horrible lock 'holder' preemption issues. The test_and_set_spinlock_key
+ * would shortcut for the queued_spin_lock_slowpath() function that allow
+ * virt_spin_lock to hijack it.
+ */
+DECLARE_STATIC_KEY_FALSE(test_and_set_spinlock_key);
+
+#define virt_spin_lock test_and_set_spinlock
+static inline bool test_and_set_spinlock(struct qspinlock *lock)
+{
+	if (!static_branch_likely(&test_and_set_spinlock_key))
+		return false;
+
+	do {
+		smp_cond_load_relaxed((s32 *)&lock->val, VAL == 0);
+	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
+
+	return true;
+}
+#endif
+
 #include <asm/qrwlock.h>
 
 #endif /* __ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 1989b8cade1b..2cbacc345e5d 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -488,7 +488,7 @@ static inline long sbi_get_spec_version(void)
 	return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
 }
 
-static inline long sbi_get_firmware_id(void)
+long sbi_get_firmware_id(void)
 {
 	return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
 }
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 45010e71df86..7e98c1c8ff0d 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -249,6 +249,16 @@ DEFINE_STATIC_KEY_TRUE(qspinlock_key);
 EXPORT_SYMBOL(qspinlock_key);
 #endif
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+DEFINE_STATIC_KEY_FALSE(test_and_set_spinlock_key);
+
+static void __init virt_spin_lock_init(void)
+{
+	if (sbi_get_firmware_id() == SBI_EXT_BASE_IMPL_ID_KVM)
+		static_branch_enable(&test_and_set_spinlock_key);
+}
+#endif
+
 static void __init riscv_spinlock_init(void)
 {
 	char *using_ext = NULL;
@@ -274,10 +284,17 @@ static void __init riscv_spinlock_init(void)
 	}
 #endif
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+	virt_spin_lock_init();
+
+	if (sbi_get_firmware_id() == SBI_EXT_BASE_IMPL_ID_KVM)
+		using_ext = "using test and set\n";
+
 	if (!using_ext)
 		pr_err("Queued spinlock without Zabha or Ziccrse");
 	else
 		pr_info("Queued spinlock %s: enabled\n", using_ext);
+#endif
 }
 
 extern void __init init_rt_signal_env(void);
-- 
2.40.1
Re: [PATCH] riscv: qspinlock: Add virt_spin_lock() support for KVM guests
Posted by Radim Krčmář 1 year ago
2024-12-15T11:13:22-0500, guoren@kernel.org:
> Add a static key controlling whether virt_spin_lock() should be
> called or not. When running on bare metal set the new key to
> false.

Wouldn't re-using the combo spinlock qspinlock_key be better?

> The VM guests should fall back to a Test-and-Set spinlock,
> because fair locks have horrible lock 'holder' preemption issues.
> The virt_spin_lock_key would shortcut for the queued_spin_lock_-
> slowpath() function that allow virt_spin_lock to hijack it.

I think we want the proper paravirtualized slowpath, have the
discussions stalled on the SBI side?

Btw. how bad are the performance numbers without this patch?

Thanks.
Re: [PATCH] riscv: qspinlock: Add virt_spin_lock() support for KVM guests
Posted by Guo Ren 12 months ago
On Mon, Dec 16, 2024 at 5:47 PM Radim Krčmář <rkrcmar@ventanamicro.com> wrote:
>
> 2024-12-15T11:13:22-0500, guoren@kernel.org:
> > Add a static key controlling whether virt_spin_lock() should be
> > called or not. When running on bare metal set the new key to
> > false.
>
> Wouldn't re-using the combo spinlock qspinlock_key be better?
>
> > The VM guests should fall back to a Test-and-Set spinlock,
> > because fair locks have horrible lock 'holder' preemption issues.
> > The virt_spin_lock_key would shortcut for the queued_spin_lock_-
> > slowpath() function that allow virt_spin_lock to hijack it.
>
> I think we want the proper paravirtualized slowpath, have the
> discussions stalled on the SBI side?

Here is the up-to-date paravirtualized slowpath version:
https://lore.kernel.org/linux-riscv/20241222033917.1754495-1-guoren@kernel.org/

We could continue the discussion on the SBI spec side.

>
> Btw. how bad are the performance numbers without this patch?
>
> Thanks.



-- 
Best Regards
 Guo Ren