Instead of having the pv spinlock function definitions in paravirt.h,
move them into the new header paravirt-spinlock.h.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- use new header instead of qspinlock.h
- use dedicated pv_ops_lock array
- move more paravirt related lock code
V3:
- hide native_pv_lock_init() with CONFIG_SMP (kernel test robot)
V4:
- don't reference pv_ops_lock without CONFIG_PARAVIRT_SPINLOCKS
(kernel test robot)
---
arch/x86/hyperv/hv_spinlock.c | 10 +-
arch/x86/include/asm/paravirt-spinlock.h | 146 +++++++++++++++++++++++
arch/x86/include/asm/paravirt.h | 61 ----------
arch/x86/include/asm/paravirt_types.h | 17 ---
arch/x86/include/asm/qspinlock.h | 89 ++------------
arch/x86/kernel/Makefile | 2 +-
arch/x86/kernel/kvm.c | 12 +-
arch/x86/kernel/paravirt-spinlocks.c | 26 +++-
arch/x86/kernel/paravirt.c | 21 ----
arch/x86/xen/spinlock.c | 10 +-
tools/objtool/check.c | 1 +
11 files changed, 196 insertions(+), 199 deletions(-)
create mode 100644 arch/x86/include/asm/paravirt-spinlock.h
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index 2a3c2afb0154..210b494e4de0 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -78,11 +78,11 @@ void __init hv_init_spinlocks(void)
pr_info("PV spinlocks enabled\n");
__pv_init_lock_hash();
- pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_ops.lock.wait = hv_qlock_wait;
- pv_ops.lock.kick = hv_qlock_kick;
- pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
+ pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops_lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops_lock.wait = hv_qlock_wait;
+ pv_ops_lock.kick = hv_qlock_kick;
+ pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
}
static __init int hv_parse_nopvspin(char *arg)
diff --git a/arch/x86/include/asm/paravirt-spinlock.h b/arch/x86/include/asm/paravirt-spinlock.h
new file mode 100644
index 000000000000..ed3ed343903d
--- /dev/null
+++ b/arch/x86/include/asm/paravirt-spinlock.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_PARAVIRT_SPINLOCK_H
+#define _ASM_X86_PARAVIRT_SPINLOCK_H
+
+#include <asm/paravirt_types.h>
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#endif
+
+struct qspinlock;
+
+struct pv_lock_ops {
+ void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+ struct paravirt_callee_save queued_spin_unlock;
+
+ void (*wait)(u8 *ptr, u8 val);
+ void (*kick)(int cpu);
+
+ struct paravirt_callee_save vcpu_is_preempted;
+} __no_randomize_layout;
+
+extern struct pv_lock_ops pv_ops_lock;
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init paravirt_set_cap(void);
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
+extern bool nopvspin;
+
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+ u32 val)
+{
+ PVOP_VCALL2(pv_ops_lock, queued_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock,
+ "movb $0, (%%" _ASM_ARG1 ");",
+ ALT_NOT(X86_FEATURE_PVUNLOCK));
+}
+
+static __always_inline bool pv_vcpu_is_preempted(long cpu)
+{
+ return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
+ "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+ ALT_NOT(X86_FEATURE_VCPUPREEMPT));
+}
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * A smp_store_release() on the least-significant byte.
+ */
+static inline void native_queued_spin_unlock(struct qspinlock *lock)
+{
+ smp_store_release(&lock->locked, 0);
+}
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ pv_queued_spin_lock_slowpath(lock, val);
+}
+
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ kcsan_release();
+ pv_queued_spin_unlock(lock);
+}
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(long cpu)
+{
+ return pv_vcpu_is_preempted(cpu);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ PVOP_VCALL2(pv_ops_lock, wait, ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+ PVOP_VCALL1(pv_ops_lock, kick, cpu);
+}
+
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+void __init native_pv_lock_init(void);
+__visible void __native_queued_spin_unlock(struct qspinlock *lock);
+bool pv_is_native_spin_unlock(void);
+__visible bool __native_vcpu_is_preempted(long cpu);
+bool pv_is_native_vcpu_is_preempted(void);
+
+/*
+ * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
+ *
+ * Native (and PV wanting native due to vCPU pinning) should keep this key
+ * disabled. Native does not touch the key.
+ *
+ * When in a guest then native_pv_lock_init() enables the key first and
+ * KVM/XEN might conditionally disable it later in the boot process again.
+ */
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+/*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+ * virt to hijack it.
+ *
+ * Returns:
+ * true - lock has been negotiated, all done;
+ * false - queued_spin_lock_slowpath() will do its thing.
+ */
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ int val;
+
+ if (!static_branch_likely(&virt_spin_lock_key))
+ return false;
+
+ /*
+ * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+ * back to a Test-and-Set spinlock, because fair locks have
+ * horrible lock 'holder' preemption issues.
+ */
+
+ __retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+ cpu_relax();
+ goto __retry;
+ }
+
+ return true;
+}
+
+#endif /* _ASM_X86_PARAVIRT_SPINLOCK_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ec274d13bae0..b21072af731d 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -19,15 +19,6 @@
#include <linux/cpumask.h>
#include <asm/frame.h>
-__visible void __native_queued_spin_unlock(struct qspinlock *lock);
-bool pv_is_native_spin_unlock(void);
-__visible bool __native_vcpu_is_preempted(long cpu);
-bool pv_is_native_vcpu_is_preempted(void);
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-void __init paravirt_set_cap(void);
-#endif
-
/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
{
@@ -522,46 +513,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
{
pv_ops.mmu.set_fixmap(idx, phys, flags);
}
-#endif
-
-#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-
-static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
- u32 val)
-{
- PVOP_VCALL2(pv_ops, lock.queued_spin_lock_slowpath, lock, val);
-}
-
-static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
-{
- PVOP_ALT_VCALLEE1(pv_ops, lock.queued_spin_unlock, lock,
- "movb $0, (%%" _ASM_ARG1 ");",
- ALT_NOT(X86_FEATURE_PVUNLOCK));
-}
-
-static __always_inline void pv_wait(u8 *ptr, u8 val)
-{
- PVOP_VCALL2(pv_ops, lock.wait, ptr, val);
-}
-
-static __always_inline void pv_kick(int cpu)
-{
- PVOP_VCALL1(pv_ops, lock.kick, cpu);
-}
-
-static __always_inline bool pv_vcpu_is_preempted(long cpu)
-{
- return PVOP_ALT_CALLEE1(bool, pv_ops, lock.vcpu_is_preempted, cpu,
- "xor %%" _ASM_AX ", %%" _ASM_AX ";",
- ALT_NOT(X86_FEATURE_VCPUPREEMPT));
-}
-void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
-bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
-
-#endif /* SMP && PARAVIRT_SPINLOCKS */
-
-#ifdef CONFIG_PARAVIRT_XXL
static __always_inline unsigned long arch_local_save_flags(void)
{
return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax;",
@@ -588,8 +540,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
}
#endif
-void native_pv_lock_init(void) __init;
-
#else /* __ASSEMBLER__ */
#ifdef CONFIG_X86_64
@@ -613,12 +563,6 @@ void native_pv_lock_init(void) __init;
#endif /* __ASSEMBLER__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
-
-#ifndef __ASSEMBLER__
-static inline void native_pv_lock_init(void)
-{
-}
-#endif
#endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLER__
@@ -634,10 +578,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
}
#endif
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline void paravirt_set_cap(void)
-{
-}
-#endif
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 01a485f1a7f1..e2b487d35d14 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -184,22 +184,6 @@ struct pv_mmu_ops {
#endif
} __no_randomize_layout;
-#ifdef CONFIG_SMP
-#include <asm/spinlock_types.h>
-#endif
-
-struct qspinlock;
-
-struct pv_lock_ops {
- void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
- struct paravirt_callee_save queued_spin_unlock;
-
- void (*wait)(u8 *ptr, u8 val);
- void (*kick)(int cpu);
-
- struct paravirt_callee_save vcpu_is_preempted;
-} __no_randomize_layout;
-
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
* what to patch. */
@@ -207,7 +191,6 @@ struct paravirt_patch_template {
struct pv_cpu_ops cpu;
struct pv_irq_ops irq;
struct pv_mmu_ops mmu;
- struct pv_lock_ops lock;
} __no_randomize_layout;
extern struct paravirt_patch_template pv_ops;
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 68da67df304d..a2668bdf4c84 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -7,6 +7,9 @@
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
#include <asm/rmwcc.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt-spinlock.h>
+#endif
#define _Q_PENDING_LOOPS (1 << 9)
@@ -27,89 +30,13 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
return val;
}
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
-extern void __pv_init_lock_hash(void);
-extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
-extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
-extern bool nopvspin;
-
-#define queued_spin_unlock queued_spin_unlock
-/**
- * queued_spin_unlock - release a queued spinlock
- * @lock : Pointer to queued spinlock structure
- *
- * A smp_store_release() on the least-significant byte.
- */
-static inline void native_queued_spin_unlock(struct qspinlock *lock)
-{
- smp_store_release(&lock->locked, 0);
-}
-
-static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
-{
- pv_queued_spin_lock_slowpath(lock, val);
-}
-
-static inline void queued_spin_unlock(struct qspinlock *lock)
-{
- kcsan_release();
- pv_queued_spin_unlock(lock);
-}
-
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(long cpu)
-{
- return pv_vcpu_is_preempted(cpu);
-}
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void paravirt_set_cap(void) { }
#endif
-#ifdef CONFIG_PARAVIRT
-/*
- * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
- *
- * Native (and PV wanting native due to vCPU pinning) should keep this key
- * disabled. Native does not touch the key.
- *
- * When in a guest then native_pv_lock_init() enables the key first and
- * KVM/XEN might conditionally disable it later in the boot process again.
- */
-DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
-
-/*
- * Shortcut for the queued_spin_lock_slowpath() function that allows
- * virt to hijack it.
- *
- * Returns:
- * true - lock has been negotiated, all done;
- * false - queued_spin_lock_slowpath() will do its thing.
- */
-#define virt_spin_lock virt_spin_lock
-static inline bool virt_spin_lock(struct qspinlock *lock)
-{
- int val;
-
- if (!static_branch_likely(&virt_spin_lock_key))
- return false;
-
- /*
- * On hypervisors without PARAVIRT_SPINLOCKS support we fall
- * back to a Test-and-Set spinlock, because fair locks have
- * horrible lock 'holder' preemption issues.
- */
-
- __retry:
- val = atomic_read(&lock->val);
-
- if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
- cpu_relax();
- goto __retry;
- }
-
- return true;
-}
-
-#endif /* CONFIG_PARAVIRT */
+#ifndef CONFIG_PARAVIRT
+static inline void native_pv_lock_init(void) { }
+#endif
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index bc184dd38d99..e9aeeeafad17 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -126,7 +126,7 @@ obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o
-obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
+obj-$(CONFIG_PARAVIRT) += paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d54fd2bc0402..e767f8ed405a 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -824,8 +824,10 @@ static void __init kvm_guest_init(void)
has_steal_clock = 1;
static_call_update(pv_steal_clock, kvm_steal_clock);
- pv_ops.lock.vcpu_is_preempted =
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ pv_ops_lock.vcpu_is_preempted =
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+#endif
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -1121,11 +1123,11 @@ void __init kvm_spinlock_init(void)
pr_info("PV spinlocks enabled\n");
__pv_init_lock_hash();
- pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_ops.lock.queued_spin_unlock =
+ pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops_lock.queued_spin_unlock =
PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_ops.lock.wait = kvm_wait;
- pv_ops.lock.kick = kvm_kick_cpu;
+ pv_ops_lock.wait = kvm_wait;
+ pv_ops_lock.kick = kvm_kick_cpu;
/*
* When PV spinlock is enabled which is preferred over
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 9e1ea99ad9df..95452444868f 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -3,12 +3,22 @@
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
+#include <linux/static_call.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/jump_label.h>
-#include <asm/paravirt.h>
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
+#ifdef CONFIG_SMP
+void __init native_pv_lock_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_enable(&virt_spin_lock_key);
+}
+#endif
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
{
native_queued_spin_unlock(lock);
@@ -17,7 +27,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
{
- return pv_ops.lock.queued_spin_unlock.func ==
+ return pv_ops_lock.queued_spin_unlock.func ==
__raw_callee_save___native_queued_spin_unlock;
}
@@ -29,7 +39,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_ops.lock.vcpu_is_preempted.func ==
+ return pv_ops_lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
@@ -41,3 +51,13 @@ void __init paravirt_set_cap(void)
if (!pv_is_native_vcpu_is_preempted())
setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
}
+
+struct pv_lock_ops pv_ops_lock = {
+ .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+ .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
+ .wait = paravirt_nop,
+ .kick = paravirt_nop,
+ .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+};
+EXPORT_SYMBOL(pv_ops_lock);
+#endif
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 5dfbd3f55792..a6ed52cae003 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -57,14 +57,6 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif
-DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
-
-void __init native_pv_lock_init(void)
-{
- if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
- static_branch_enable(&virt_spin_lock_key);
-}
-
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
@@ -221,19 +213,6 @@ struct paravirt_patch_template pv_ops = {
.mmu.set_fixmap = native_set_fixmap,
#endif /* CONFIG_PARAVIRT_XXL */
-
-#if defined(CONFIG_PARAVIRT_SPINLOCKS)
- /* Lock ops. */
-#ifdef CONFIG_SMP
- .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
- .lock.queued_spin_unlock =
- PV_CALLEE_SAVE(__native_queued_spin_unlock),
- .lock.wait = paravirt_nop,
- .lock.kick = paravirt_nop,
- .lock.vcpu_is_preempted =
- PV_CALLEE_SAVE(__native_vcpu_is_preempted),
-#endif /* SMP */
-#endif
};
#ifdef CONFIG_PARAVIRT_XXL
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index fe56646d6919..83ac24ead289 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -134,10 +134,10 @@ void __init xen_init_spinlocks(void)
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
__pv_init_lock_hash();
- pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_ops.lock.queued_spin_unlock =
+ pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops_lock.queued_spin_unlock =
PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_ops.lock.wait = xen_qlock_wait;
- pv_ops.lock.kick = xen_qlock_kick;
- pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+ pv_ops_lock.wait = xen_qlock_wait;
+ pv_ops_lock.kick = xen_qlock_kick;
+ pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index d63d0891924a..36e04988babe 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -550,6 +550,7 @@ static struct {
int idx_off;
} pv_ops_tables[] = {
{ .name = "pv_ops", },
+ { .name = "pv_ops_lock", },
{ .name = NULL, .idx_off = -1 }
};
--
2.51.0
Hi Juergen,
kernel test robot noticed the following build errors:
[auto build test ERROR on tip/x86/core]
[also build test ERROR on tip/sched/core kvm/queue kvm/next linus/master v6.18-rc7]
[cannot apply to kvm/linux-next next-20251127]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Juergen-Gross/x86-paravirt-Remove-not-needed-includes-of-paravirt-h/20251127-152054
base: tip/x86/core
patch link: https://lore.kernel.org/r/20251127070844.21919-22-jgross%40suse.com
patch subject: [PATCH v4 21/21] x86/pvlocks: Move paravirt spinlock functions into own header
config: x86_64-allnoconfig (https://download.01.org/0day-ci/archive/20251127/202511271747.smpLdjsz-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251127/202511271747.smpLdjsz-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511271747.smpLdjsz-lkp@intel.com/
All errors (new ones prefixed by >>):
>> arch/x86/kernel/alternative.c:2373:2: error: call to undeclared function 'paravirt_set_cap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
2373 | paravirt_set_cap();
| ^
arch/x86/kernel/alternative.c:2373:2: note: did you mean 'paravirt_ret0'?
arch/x86/include/asm/paravirt-base.h:23:15: note: 'paravirt_ret0' declared here
23 | unsigned long paravirt_ret0(void);
| ^
1 error generated.
vim +/paravirt_set_cap +2373 arch/x86/kernel/alternative.c
270a69c4485d7d arch/x86/kernel/alternative.c Peter Zijlstra 2023-02-08 2344
9a0b5817ad97bb arch/i386/kernel/alternative.c Gerd Hoffmann 2006-03-23 2345 void __init alternative_instructions(void)
9a0b5817ad97bb arch/i386/kernel/alternative.c Gerd Hoffmann 2006-03-23 2346 {
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2347 u64 ibt;
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2348
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2349 int3_selftest();
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2350
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2351 /*
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2352 * The patching is not fully atomic, so try to avoid local
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2353 * interruptions that might execute the to be patched code.
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2354 * Other CPUs are not running.
7457c0da024b18 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2355 */
8f4e956b313dcc arch/i386/kernel/alternative.c Andi Kleen 2007-07-22 2356 stop_nmi();
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2357
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2358 /*
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2359 * Don't stop machine check exceptions while patching.
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2360 * MCEs only happen when something got corrupted and in this
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2361 * case we must do something about the corruption.
32b1cbe380417f arch/x86/kernel/alternative.c Marco Ammon 2019-09-02 2362 * Ignoring it is worse than an unlikely patching race.
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2363 * Also machine checks tend to be broadcast and if one CPU
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2364 * goes into machine check the others follow quickly, so we don't
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2365 * expect a machine check to cause undue problems during to code
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2366 * patching.
123aa76ec0cab5 arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2367 */
8f4e956b313dcc arch/i386/kernel/alternative.c Andi Kleen 2007-07-22 2368
4e6292114c7412 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2369 /*
f7af6977621a41 arch/x86/kernel/alternative.c Juergen Gross 2023-12-10 2370 * Make sure to set (artificial) features depending on used paravirt
f7af6977621a41 arch/x86/kernel/alternative.c Juergen Gross 2023-12-10 2371 * functions which can later influence alternative patching.
4e6292114c7412 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2372 */
4e6292114c7412 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 @2373 paravirt_set_cap();
4e6292114c7412 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2374
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2375 /* Keep CET-IBT disabled until caller/callee are patched */
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2376 ibt = ibt_save(/*disable*/ true);
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2377
931ab63664f02b arch/x86/kernel/alternative.c Peter Zijlstra 2022-10-27 2378 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1d7e707af44613 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2379) __cfi_sites, __cfi_sites_end, true);
026211c40b0554 arch/x86/kernel/alternative.c Kees Cook 2025-09-03 2380 cfi_debug = false;
931ab63664f02b arch/x86/kernel/alternative.c Peter Zijlstra 2022-10-27 2381
7508500900814d arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2382 /*
7508500900814d arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2383 * Rewrite the retpolines, must be done before alternatives since
7508500900814d arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2384 * those can rewrite the retpoline thunks.
7508500900814d arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2385 */
1d7e707af44613 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2386) apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1d7e707af44613 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2387) apply_returns(__return_sites, __return_sites_end);
7508500900814d arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2388
a82b26451de126 arch/x86/kernel/alternative.c Peter Zijlstra (Intel 2025-06-03 2389) its_fini_core();
a82b26451de126 arch/x86/kernel/alternative.c Peter Zijlstra (Intel 2025-06-03 2390)
e81dc127ef6988 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2391 /*
ab9fea59487d8b arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2392 * Adjust all CALL instructions to point to func()-10, including
ab9fea59487d8b arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2393 * those in .altinstr_replacement.
e81dc127ef6988 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2394 */
e81dc127ef6988 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2395 callthunks_patch_builtin_calls();
e81dc127ef6988 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2396
ab9fea59487d8b arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2397 apply_alternatives(__alt_instructions, __alt_instructions_end);
ab9fea59487d8b arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2398
be0fffa5ca894a arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2399 /*
be0fffa5ca894a arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2400 * Seal all functions that do not have their address taken.
be0fffa5ca894a arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2401 */
1d7e707af44613 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2402) apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
ed53a0d971926e arch/x86/kernel/alternative.c Peter Zijlstra 2022-03-08 2403
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2404 ibt_restore(ibt);
ebebe30794d38c arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2405
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Juergen,
kernel test robot noticed the following build errors:
[auto build test ERROR on tip/x86/core]
[also build test ERROR on tip/sched/core kvm/queue kvm/next linus/master v6.18-rc7]
[cannot apply to kvm/linux-next next-20251127]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Juergen-Gross/x86-paravirt-Remove-not-needed-includes-of-paravirt-h/20251127-152054
base: tip/x86/core
patch link: https://lore.kernel.org/r/20251127070844.21919-22-jgross%40suse.com
patch subject: [PATCH v4 21/21] x86/pvlocks: Move paravirt spinlock functions into own header
config: i386-allnoconfig (https://download.01.org/0day-ci/archive/20251127/202511271704.MdDOB4pB-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251127/202511271704.MdDOB4pB-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202511271704.MdDOB4pB-lkp@intel.com/
All errors (new ones prefixed by >>):
arch/x86/kernel/alternative.c: In function 'alternative_instructions':
>> arch/x86/kernel/alternative.c:2373:9: error: implicit declaration of function 'paravirt_set_cap'; did you mean 'paravirt_ret0'? [-Wimplicit-function-declaration]
2373 | paravirt_set_cap();
| ^~~~~~~~~~~~~~~~
| paravirt_ret0
vim +2373 arch/x86/kernel/alternative.c
270a69c4485d7d0 arch/x86/kernel/alternative.c Peter Zijlstra 2023-02-08 2344
9a0b5817ad97bb7 arch/i386/kernel/alternative.c Gerd Hoffmann 2006-03-23 2345 void __init alternative_instructions(void)
9a0b5817ad97bb7 arch/i386/kernel/alternative.c Gerd Hoffmann 2006-03-23 2346 {
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2347 u64 ibt;
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2348
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2349 int3_selftest();
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2350
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2351 /*
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2352 * The patching is not fully atomic, so try to avoid local
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2353 * interruptions that might execute the to be patched code.
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2354 * Other CPUs are not running.
7457c0da024b181 arch/x86/kernel/alternative.c Peter Zijlstra 2019-05-03 2355 */
8f4e956b313dccc arch/i386/kernel/alternative.c Andi Kleen 2007-07-22 2356 stop_nmi();
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2357
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2358 /*
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2359 * Don't stop machine check exceptions while patching.
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2360 * MCEs only happen when something got corrupted and in this
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2361 * case we must do something about the corruption.
32b1cbe380417f2 arch/x86/kernel/alternative.c Marco Ammon 2019-09-02 2362 * Ignoring it is worse than an unlikely patching race.
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2363 * Also machine checks tend to be broadcast and if one CPU
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2364 * goes into machine check the others follow quickly, so we don't
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2365 * expect a machine check to cause undue problems during to code
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2366 * patching.
123aa76ec0cab5d arch/x86/kernel/alternative.c Andi Kleen 2009-02-12 2367 */
8f4e956b313dccc arch/i386/kernel/alternative.c Andi Kleen 2007-07-22 2368
4e6292114c74122 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2369 /*
f7af6977621a416 arch/x86/kernel/alternative.c Juergen Gross 2023-12-10 2370 * Make sure to set (artificial) features depending on used paravirt
f7af6977621a416 arch/x86/kernel/alternative.c Juergen Gross 2023-12-10 2371 * functions which can later influence alternative patching.
4e6292114c74122 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2372 */
4e6292114c74122 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 @2373 paravirt_set_cap();
4e6292114c74122 arch/x86/kernel/alternative.c Juergen Gross 2021-03-11 2374
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2375 /* Keep CET-IBT disabled until caller/callee are patched */
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2376 ibt = ibt_save(/*disable*/ true);
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2377
931ab63664f02b1 arch/x86/kernel/alternative.c Peter Zijlstra 2022-10-27 2378 __apply_fineibt(__retpoline_sites, __retpoline_sites_end,
1d7e707af446134 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2379) __cfi_sites, __cfi_sites_end, true);
026211c40b05548 arch/x86/kernel/alternative.c Kees Cook 2025-09-03 2380 cfi_debug = false;
931ab63664f02b1 arch/x86/kernel/alternative.c Peter Zijlstra 2022-10-27 2381
7508500900814d1 arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2382 /*
7508500900814d1 arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2383 * Rewrite the retpolines, must be done before alternatives since
7508500900814d1 arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2384 * those can rewrite the retpoline thunks.
7508500900814d1 arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2385 */
1d7e707af446134 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2386) apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1d7e707af446134 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2387) apply_returns(__return_sites, __return_sites_end);
7508500900814d1 arch/x86/kernel/alternative.c Peter Zijlstra 2021-10-26 2388
a82b26451de126a arch/x86/kernel/alternative.c Peter Zijlstra (Intel 2025-06-03 2389) its_fini_core();
a82b26451de126a arch/x86/kernel/alternative.c Peter Zijlstra (Intel 2025-06-03 2390)
e81dc127ef69887 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2391 /*
ab9fea59487d8b5 arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2392 * Adjust all CALL instructions to point to func()-10, including
ab9fea59487d8b5 arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2393 * those in .altinstr_replacement.
e81dc127ef69887 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2394 */
e81dc127ef69887 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2395 callthunks_patch_builtin_calls();
e81dc127ef69887 arch/x86/kernel/alternative.c Thomas Gleixner 2022-09-15 2396
ab9fea59487d8b5 arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2397 apply_alternatives(__alt_instructions, __alt_instructions_end);
ab9fea59487d8b5 arch/x86/kernel/alternative.c Peter Zijlstra 2025-02-07 2398
be0fffa5ca894a9 arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2399 /*
be0fffa5ca894a9 arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2400 * Seal all functions that do not have their address taken.
be0fffa5ca894a9 arch/x86/kernel/alternative.c Peter Zijlstra 2023-06-22 2401 */
1d7e707af446134 arch/x86/kernel/alternative.c Mike Rapoport (Microsoft 2025-01-26 2402) apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
ed53a0d971926e4 arch/x86/kernel/alternative.c Peter Zijlstra 2022-03-08 2403
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2404 ibt_restore(ibt);
ebebe30794d38c5 arch/x86/kernel/alternative.c Pawan Gupta 2025-05-03 2405
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.