[PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush

Pawan Gupta posted 10 patches 2 weeks, 3 days ago
There is a newer version of this series
[PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 3 days ago
Adding more mitigation options at exit-to-userspace for VMSCAPE would
usually require a series of checks to decide which mitigation to use. In
this case, the mitigation is done by calling a function, which is decided
at boot. So, adding more feature flags and multiple checks can be avoided
by using static_call() to the mitigating function.

Replace the flag-based mitigation selector with a static_call(). This also
frees the existing X86_FEATURE_IBPB_EXIT_TO_USER.

Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
---
 arch/x86/Kconfig                     |  1 +
 arch/x86/include/asm/cpufeatures.h   |  2 +-
 arch/x86/include/asm/entry-common.h  |  7 +++----
 arch/x86/include/asm/nospec-branch.h |  3 +++
 arch/x86/kernel/cpu/bugs.c           | 13 ++++++++++++-
 arch/x86/kvm/x86.c                   |  2 +-
 6 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e2df1b147184..5b8def9ddb98 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2720,6 +2720,7 @@ config MITIGATION_TSA
 config MITIGATION_VMSCAPE
 	bool "Mitigate VMSCAPE"
 	depends on KVM
+	depends on HAVE_STATIC_CALL
 	default y
 	help
 	  Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dbe104df339b..b4d529dd6d30 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -503,7 +503,7 @@
 #define X86_FEATURE_TSA_SQ_NO		(21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
 #define X86_FEATURE_TSA_L1_NO		(21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
 #define X86_FEATURE_CLEAR_CPU_BUF_VM	(21*32+13) /* Clear CPU buffers using VERW before VMRUN */
-#define X86_FEATURE_IBPB_EXIT_TO_USER	(21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
+/* Free */
 #define X86_FEATURE_ABMC		(21*32+15) /* Assignable Bandwidth Monitoring Counters */
 #define X86_FEATURE_MSR_IMM		(21*32+16) /* MSR immediate form instructions */
 #define X86_FEATURE_SGX_EUPDATESVN	(21*32+17) /* Support for ENCLS[EUPDATESVN] instruction */
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 78b143673ca7..783e7cb50cae 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -4,6 +4,7 @@
 
 #include <linux/randomize_kstack.h>
 #include <linux/user-return-notifier.h>
+#include <linux/static_call_types.h>
 
 #include <asm/nospec-branch.h>
 #include <asm/io_bitmap.h>
@@ -94,10 +95,8 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
 	 */
 	choose_random_kstack_offset(rdtsc());
 
-	/* Avoid unnecessary reads of 'x86_predictor_flush_exit_to_user' */
-	if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
-	    this_cpu_read(x86_predictor_flush_exit_to_user)) {
-		write_ibpb();
+	if (unlikely(this_cpu_read(x86_predictor_flush_exit_to_user))) {
+		static_call_cond(vmscape_predictor_flush)();
 		this_cpu_write(x86_predictor_flush_exit_to_user, false);
 	}
 }
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 0a55b1c64741..e45e49f1e0c9 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -542,6 +542,9 @@ static inline void indirect_branch_prediction_barrier(void)
 			    :: "rax", "rcx", "rdx", "memory");
 }
 
+#include <linux/static_call_types.h>
+DECLARE_STATIC_CALL(vmscape_predictor_flush, write_ibpb);
+
 /* The Intel SPEC CTRL MSR base value cache */
 extern u64 x86_spec_ctrl_base;
 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 68e2df3e3bf5..b75eda114503 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
  */
 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
+/*
+ * Controls CPU Fill buffer clear before VMenter. This is a subset of
+ * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
+ * mitigation is required.
+ */
+DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
+EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
+
+DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
+EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);
+
 #undef pr_fmt
 #define pr_fmt(fmt)	"mitigations: " fmt
 
@@ -3129,7 +3140,7 @@ static void __init vmscape_update_mitigation(void)
 static void __init vmscape_apply_mitigation(void)
 {
 	if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
-		setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
+		static_call_update(vmscape_predictor_flush, write_ibpb);
 }
 
 #undef pr_fmt
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 45d7cfedc507..5582056b2fa1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11463,7 +11463,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	 * set for the CPU that actually ran the guest, and not the CPU that it
 	 * may migrate to.
 	 */
-	if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
+	if (static_call_query(vmscape_predictor_flush))
 		this_cpu_write(x86_predictor_flush_exit_to_user, true);
 
 	/*

-- 
2.34.1
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Peter Zijlstra 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 08:41:54AM -0700, Pawan Gupta wrote:
> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> index 68e2df3e3bf5..b75eda114503 100644
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
>   */
>  DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
>  
> +/*
> + * Controls CPU Fill buffer clear before VMenter. This is a subset of
> + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
> + * mitigation is required.
> + */
> +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
> +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
> +
> +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
> +EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);

Does that want to be:

EXPORT_STATIC_CALL_TRAMP_GPL(vmscape_predictor_flush);

The distinction being that if you only export the trampoline, modules
can do the static_call() thing, but cannot do static_call_update().
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 09:58:02PM +0100, Peter Zijlstra wrote:
> On Thu, Mar 19, 2026 at 08:41:54AM -0700, Pawan Gupta wrote:
> > diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> > index 68e2df3e3bf5..b75eda114503 100644
> > --- a/arch/x86/kernel/cpu/bugs.c
> > +++ b/arch/x86/kernel/cpu/bugs.c
> > @@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
> >   */
> >  DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
> >  
> > +/*
> > + * Controls CPU Fill buffer clear before VMenter. This is a subset of
> > + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
> > + * mitigation is required.
> > + */
> > +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
> > +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
> > +
> > +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
> > +EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);
> 
> Does that want to be:
> 
> EXPORT_STATIC_CALL_TRAMP_GPL(vmscape_predictor_flush);
> 
> The distinction being that if you only export the trampoline, modules
> can do the static_call() thing, but cannot do static_call_update().

Right, modules shouldn't be updating this static_call().

One caveat of not exporting the static key is that KVM uses the key to
determine whether the mitigation is deployed or not:

  vcpu_enter_guest()
  {
      ...

     /*
      * Mark this CPU as needing a branch predictor flush before running
      * userspace. Must be done before enabling preemption to ensure it gets
      * set for the CPU that actually ran the guest, and not the CPU that it
      * may migrate to.
      */
     if (static_call_query(vmscape_predictor_flush))
                   this_cpu_write(x86_predictor_flush_exit_to_user, true);

With _TRAMP, KVM complains:

 ERROR: modpost: "__SCK__vmscape_predictor_flush" [arch/x86/kvm/kvm.ko] undefined!

Probably one option is to somehow make sure that the key can be set to
__ro_after_init? I don't see a use case for modifying the static_call() after
boot.
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Peter Zijlstra 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 02:34:21PM -0700, Pawan Gupta wrote:
> On Thu, Mar 19, 2026 at 09:58:02PM +0100, Peter Zijlstra wrote:
> > On Thu, Mar 19, 2026 at 08:41:54AM -0700, Pawan Gupta wrote:
> > > diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> > > index 68e2df3e3bf5..b75eda114503 100644
> > > --- a/arch/x86/kernel/cpu/bugs.c
> > > +++ b/arch/x86/kernel/cpu/bugs.c
> > > @@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
> > >   */
> > >  DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
> > >  
> > > +/*
> > > + * Controls CPU Fill buffer clear before VMenter. This is a subset of
> > > + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
> > > + * mitigation is required.
> > > + */
> > > +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
> > > +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
> > > +
> > > +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
> > > +EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);
> > 
> > Does that want to be:
> > 
> > EXPORT_STATIC_CALL_TRAMP_GPL(vmscape_predictor_flush);
> > 
> > The distinction being that if you only export the trampoline, modules
> > can do the static_call() thing, but cannot do static_call_update().
> 
> Right, modules shouldn't be updating this static_call().
> 
> One caveat of not exporting the static key is that KVM uses the key to
> determine whether the mitigation is deployed or not:
> 
>   vcpu_enter_guest()
>   {
>       ...
> 
>      /*
>       * Mark this CPU as needing a branch predictor flush before running
>       * userspace. Must be done before enabling preemption to ensure it gets
>       * set for the CPU that actually ran the guest, and not the CPU that it
>       * may migrate to.
>       */
>      if (static_call_query(vmscape_predictor_flush))
>                    this_cpu_write(x86_predictor_flush_exit_to_user, true);
> 
> With _TRAMP, KVM complains:
> 
>  ERROR: modpost: "__SCK__vmscape_predictor_flush" [arch/x86/kvm/kvm.ko] undefined!

Ah, tricky. Yeah, this would need to be solved differenlty. Perhaps wrap
this in a helper and export that?

Or use the below little thing and change it to
EXPORT_STATIC_CALL_FOR_MODULES(foo, "kvm"); or whatnot.

> Probably one option is to somehow make sure that the key can be set to
> __ro_after_init? I don't see a use case for modifying the static_call() after
> boot.

So we have __ro_after_init for static_branch, but we'd not done
it for static_call yet. It shouldn't be terribly difficult, just hasn't
been done. Not sure this is the moment to do so.


---
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 78a77a4ae0ea..b610afd1ed55 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -216,6 +216,9 @@ extern long __static_call_return0(void);
 #define EXPORT_STATIC_CALL_GPL(name)					\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
+	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods);		\
+	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_TRAMP(name), mods)
 
 /* Leave the key unexported, so modules can't change static call targets: */
 #define EXPORT_STATIC_CALL_TRAMP(name)					\
@@ -276,6 +279,9 @@ extern long __static_call_return0(void);
 #define EXPORT_STATIC_CALL_GPL(name)					\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
+	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods);		\
+	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_TRAMP(name), mods)
 
 /* Leave the key unexported, so modules can't change static call targets: */
 #define EXPORT_STATIC_CALL_TRAMP(name)					\
@@ -346,6 +352,8 @@ static inline int static_call_text_reserved(void *start, void *end)
 
 #define EXPORT_STATIC_CALL(name)	EXPORT_SYMBOL(STATIC_CALL_KEY(name))
 #define EXPORT_STATIC_CALL_GPL(name)	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
+#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
+	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods)
 
 #endif /* CONFIG_HAVE_STATIC_CALL */
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 10:44:09PM +0100, Peter Zijlstra wrote:
...
> > With _TRAMP, KVM complains:
> > 
> >  ERROR: modpost: "__SCK__vmscape_predictor_flush" [arch/x86/kvm/kvm.ko] undefined!
> 
> Ah, tricky. Yeah, this would need to be solved differenlty. Perhaps wrap
> this in a helper and export that?

bool vmscape_mitigation_enabled(void)
{
       return unlikely(static_call_query(vmscape_predictor_flush));
}
EXPORT_SYMBOL_FOR_KVM(vmscape_mitigation_enabled);

This is definitely simpler option, but adds an exported function and an
unnecessary call to it.

> Or use the below little thing and change it to
> EXPORT_STATIC_CALL_FOR_MODULES(foo, "kvm"); or whatnot.
> 
> > Probably one option is to somehow make sure that the key can be set to
> > __ro_after_init? I don't see a use case for modifying the static_call() after
> > boot.
> 
> So we have __ro_after_init for static_branch, but we'd not done
> it for static_call yet. It shouldn't be terribly difficult, just hasn't
> been done. Not sure this is the moment to do so.
> 
> 
> ---
> diff --git a/include/linux/static_call.h b/include/linux/static_call.h
> index 78a77a4ae0ea..b610afd1ed55 100644
> --- a/include/linux/static_call.h
> +++ b/include/linux/static_call.h
> @@ -216,6 +216,9 @@ extern long __static_call_return0(void);
>  #define EXPORT_STATIC_CALL_GPL(name)					\
>  	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
>  	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
> +#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
> +	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods);		\
> +	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_TRAMP(name), mods)
>  
>  /* Leave the key unexported, so modules can't change static call targets: */
>  #define EXPORT_STATIC_CALL_TRAMP(name)					\
> @@ -276,6 +279,9 @@ extern long __static_call_return0(void);
>  #define EXPORT_STATIC_CALL_GPL(name)					\
>  	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
>  	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
> +#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
> +	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods);		\
> +	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_TRAMP(name), mods)
>  
>  /* Leave the key unexported, so modules can't change static call targets: */
>  #define EXPORT_STATIC_CALL_TRAMP(name)					\
> @@ -346,6 +352,8 @@ static inline int static_call_text_reserved(void *start, void *end)
>  
>  #define EXPORT_STATIC_CALL(name)	EXPORT_SYMBOL(STATIC_CALL_KEY(name))
>  #define EXPORT_STATIC_CALL_GPL(name)	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
> +#define EXPORT_STATIC_CALL_FOR_MODULES(name, mods)			\
> +	EXPORT_SYMBOL_FOR_MODULES(STATIC_CALL_KEY(name), mods)
>  
>  #endif /* CONFIG_HAVE_STATIC_CALL */

This plus extending it to support EXPORT_STATIC_CALL_FOR_KVM() is probably
a better solution. Please let me know which one you prefer.

diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index a568d8e6f4e8..89c4964c8560 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -13,6 +13,9 @@
 	EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
 #define EXPORT_SYMBOL_FOR_KVM(symbol) \
 	EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES))
+#define EXPORT_STATIC_CALL_FOR_KVM(symbol) \
+	EXPORT_STATIC_CALL_FOR_MODULES(symbol ,"kvm," __stringify(KVM_SUB_MODULES))
+
 #else
 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
 /*
@@ -23,6 +26,7 @@
 #ifndef EXPORT_SYMBOL_FOR_KVM
 #if IS_MODULE(CONFIG_KVM)
 #define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm")
+#define EXPORT_STATIC_CALL_FOR_KVM(symbol) EXPORT_STATIC_CALL_FOR_MODULES(symbol ,"kvm")
 #else
 #define EXPORT_SYMBOL_FOR_KVM(symbol)
 #endif /* IS_MODULE(CONFIG_KVM) */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 5f102af4921f..0abdb54f7510 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 
 DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
-EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);
+EXPORT_STATIC_CALL_FOR_KVM(vmscape_predictor_flush);
 
 #undef pr_fmt
 #define pr_fmt(fmt)	"mitigations: " fmt
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Peter Zijlstra 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 11:22:06PM -0700, Pawan Gupta wrote:

> This plus extending it to support EXPORT_STATIC_CALL_FOR_KVM() is probably
> a better solution. Please let me know which one you prefer.

The EXPORT twiddling will do I suppose. I'll try and not forget looking
at doing the RO static_call thing some time.
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Borislav Petkov 2 weeks, 2 days ago
On Fri, Mar 20, 2026 at 10:03:40AM +0100, Peter Zijlstra wrote:
> On Thu, Mar 19, 2026 at 11:22:06PM -0700, Pawan Gupta wrote:
> 
> > This plus extending it to support EXPORT_STATIC_CALL_FOR_KVM() is probably
> > a better solution. Please let me know which one you prefer.
> 
> The EXPORT twiddling will do I suppose. I'll try and not forget looking
> at doing the RO static_call thing some time.

Dunno, but exporting a static_call sounds really really wrong to me. No matter
where. As in: we're exporting the underlying inner workings of it and that
should be a big fat no-no.

So definitely +1 on exporting the helper instead.

I'd say...

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 2 days ago
On Fri, Mar 20, 2026 at 12:31:34PM +0100, Borislav Petkov wrote:
> On Fri, Mar 20, 2026 at 10:03:40AM +0100, Peter Zijlstra wrote:
> > On Thu, Mar 19, 2026 at 11:22:06PM -0700, Pawan Gupta wrote:
> > 
> > > This plus extending it to support EXPORT_STATIC_CALL_FOR_KVM() is probably
> > > a better solution. Please let me know which one you prefer.
> > 
> > The EXPORT twiddling will do I suppose. I'll try and not forget looking
> > at doing the RO static_call thing some time.
> 
> Dunno, but exporting a static_call sounds really really wrong to me. No matter
> where. As in: we're exporting the underlying inner workings of it and that
> should be a big fat no-no.

I am curious, what problems do you anticipate? There are nearly 50
instances of static key being exported. For example:

$ git grep -A1 -n DEFINE_STATIC_KEY | grep -B 1 EXPORT_SYMBOL
  arch/arm64/kernel/mte.c:34:DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
  arch/arm64/kernel/mte.c-35-EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
  --
  arch/arm64/kernel/rsi.c:22:DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
  arch/arm64/kernel/rsi.c-23-EXPORT_SYMBOL(rsi_present);
  --
  arch/powerpc/kernel/firmware.c:25:DEFINE_STATIC_KEY_FALSE(kvm_guest);
  arch/powerpc/kernel/firmware.c-26-EXPORT_SYMBOL_GPL(kvm_guest);
  ...

Since EXPORT_STATIC_CALL_FOR_KVM() exports only to a module that needs it,
it limits the scope of the problem.

> So definitely +1 on exporting the helper instead.

The helper approach can be easily replaced with the static_call export
later. I can go with the helper for now.
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Borislav Petkov 1 week, 5 days ago
On Fri, Mar 20, 2026 at 11:23:08AM -0700, Pawan Gupta wrote:
> I am curious, what problems do you anticipate? There are nearly 50

What's easier when you need to change the underlying implementation: unexport
the static key and touch a bunch of places in the process or simply change the
accessor's body and all the callers don't notice a thing?

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 1 week, 5 days ago
On Tue, Mar 24, 2026 at 09:00:26PM +0100, Borislav Petkov wrote:
> On Fri, Mar 20, 2026 at 11:23:08AM -0700, Pawan Gupta wrote:
> > I am curious, what problems do you anticipate? There are nearly 50
> 
> What's easier when you need to change the underlying implementation: unexport
> the static key and touch a bunch of places in the process or simply change the
> accessor's body and all the callers don't notice a thing?

I see. I switched to accessor in v8 I sent today.
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 10:44:09PM +0100, Peter Zijlstra wrote:
> > With _TRAMP, KVM complains:
> > 
> >  ERROR: modpost: "__SCK__vmscape_predictor_flush" [arch/x86/kvm/kvm.ko] undefined!
> 
> Ah, tricky. Yeah, this would need to be solved differenlty. Perhaps wrap
> this in a helper and export that?

Wrapper seems to be a simpler option.

> Or use the below little thing and change it to
> EXPORT_STATIC_CALL_FOR_MODULES(foo, "kvm"); or whatnot.

This would be nice if there are other static calls that would benefit from
exporting to specific modules only. At this point, I don't know.
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by bot+bpf-ci@kernel.org 2 weeks, 3 days ago
> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> index 68e2df3e3bf58..b75eda114503a 100644
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
>   */
>  DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
>
> +/*
> + * Controls CPU Fill buffer clear before VMenter. This is a subset of
> + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
> + * mitigation is required.
> + */
> +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
> +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);

cpu_buf_vm_clear is defined and exported here but appears to have no
users. There is no declaration in any header, and nothing in this
patch series references it.

The comment describes CPU fill buffer clearing before VMenter, which
seems unrelated to the predictor flush static_call conversion that
this patch is doing. Was this included by mistake, or is it intended
for a follow-up series?

> +
> +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
> +EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);

[ ... ]


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/23303796353
Re: [PATCH v7 07/10] x86/vmscape: Use static_call() for predictor flush
Posted by Pawan Gupta 2 weeks, 3 days ago
On Thu, Mar 19, 2026 at 04:56:52PM +0000, bot+bpf-ci@kernel.org wrote:
> > diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> > index 68e2df3e3bf58..b75eda114503a 100644
> > --- a/arch/x86/kernel/cpu/bugs.c
> > +++ b/arch/x86/kernel/cpu/bugs.c
> > @@ -144,6 +144,17 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
> >   */
> >  DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
> >
> > +/*
> > + * Controls CPU Fill buffer clear before VMenter. This is a subset of
> > + * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
> > + * mitigation is required.
> > + */
> > +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
> > +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
> 
> cpu_buf_vm_clear is defined and exported here but appears to have no
> users. There is no declaration in any header, and nothing in this
> patch series references it.
> 
> The comment describes CPU fill buffer clearing before VMenter, which
> seems unrelated to the predictor flush static_call conversion that
> this patch is doing. Was this included by mistake, or is it intended
> for a follow-up series?

Agh, that is introduced by a wrong conflict resolution during rebase,
removed now.

> > +
> > +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
> > +EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);