A global ASID is allocated for the lifetime of a process.
Free the global ASID at process exit time.
Signed-off-by: Rik van Riel <riel@surriel.com>
---
arch/x86/include/asm/mmu_context.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index a2c70e495b1b..b47ac6d270e6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -163,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
+
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ mm->context.global_asid = 0;
+ mm->context.asid_transition = false;
+ }
+#endif
+
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -172,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ mm_free_global_asid(mm);
+#endif
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
--
2.47.1
On Tue, Feb 25, 2025 at 10:00:44PM -0500, Rik van Riel wrote:
> A global ASID is allocated for the lifetime of a process.
>
> Free the global ASID at process exit time.
>
> Signed-off-by: Rik van Riel <riel@surriel.com>
> ---
> arch/x86/include/asm/mmu_context.h | 12 ++++++++++++
> 1 file changed, 12 insertions(+)
So I don't like the ifdeffery and tried removing it, see below.
So I added helpers.
Then I entered the include hell.
And then I caught a bug with the DISABLED_FEATURE stuff.
Anyway, see below. It builds the allno-, defconfig and mine I use on this
machine so more testing is definitely needed. But the end result looks a lot
better now.
I'll integrate the other changes into the respective patches and I probably
should push my branch somewhere as we have accumulated a lot of changes by now
so tracking them over mail is getting not that easy anymore.
Will do so on Monday.
diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures
index f9af51205f07..3207e5546438 100644
--- a/arch/x86/Kconfig.cpufeatures
+++ b/arch/x86/Kconfig.cpufeatures
@@ -196,6 +196,6 @@ config X86_DISABLED_FEATURE_SEV_SNP
def_bool y
depends on !KVM_AMD_SEV
-config X86_DISABLED_FEATURE_BROADCAST_TLB_FLUSH
+config X86_DISABLED_FEATURE_INVLPGB
def_bool y
depends on !X86_BROADCAST_TLB_FLUSH
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index a2c70e495b1b..2398058b6e83 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_MMU_CONTEXT_H
#define _ASM_X86_MMU_CONTEXT_H
-#include <asm/desc.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/pkeys.h>
@@ -13,6 +12,7 @@
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/gsseg.h>
+#include <asm/desc.h>
extern atomic64_t last_mm_ctx_id;
@@ -139,6 +139,9 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+#define mm_init_global_asid mm_init_global_asid
+extern void mm_init_global_asid(struct mm_struct *mm);
+
extern void mm_free_global_asid(struct mm_struct *mm);
/*
@@ -163,6 +166,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
+
+ mm_init_global_asid(mm);
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -172,6 +177,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
+ mm_free_global_asid(mm);
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 37b735dcf025..01d8a152f04a 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -261,6 +261,14 @@ static inline u16 mm_global_asid(struct mm_struct *mm)
return asid;
}
+static inline void mm_init_global_asid(struct mm_struct *mm)
+{
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ mm->context.global_asid = 0;
+ mm->context.asid_transition = false;
+ }
+}
+
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
{
/*
@@ -272,7 +280,7 @@ static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
smp_store_release(&mm->context.global_asid, asid);
}
-static inline bool in_asid_transition(struct mm_struct *mm)
+static inline bool mm_in_asid_transition(struct mm_struct *mm)
{
if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
return false;
@@ -280,19 +288,10 @@ static inline bool in_asid_transition(struct mm_struct *mm)
return mm && READ_ONCE(mm->context.asid_transition);
}
#else
-static inline u16 mm_global_asid(struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
-{
-}
-
-static inline bool in_asid_transition(struct mm_struct *mm)
-{
- return false;
-}
+static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_global_asid(struct mm_struct *mm) { }
+static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
+static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
#endif
#ifdef CONFIG_PARAVIRT
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index cb43ab08ea4a..c2167b331bbe 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -396,6 +396,9 @@ static void use_global_asid(struct mm_struct *mm)
void mm_free_global_asid(struct mm_struct *mm)
{
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return;
+
if (!mm_global_asid(mm))
return;
@@ -1161,7 +1164,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
- if (info->freed_tables || in_asid_transition(info->mm))
+ if (info->freed_tables || mm_in_asid_transition(info->mm))
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
--
2.43.0
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Sun, 2025-03-02 at 13:38 +0100, Borislav Petkov wrote: > On Tue, Feb 25, 2025 at 10:00:44PM -0500, Rik van Riel wrote: > > A global ASID is allocated for the lifetime of a process. > > > > Free the global ASID at process exit time. > > > > Signed-off-by: Rik van Riel <riel@surriel.com> > > --- > > arch/x86/include/asm/mmu_context.h | 12 ++++++++++++ > > 1 file changed, 12 insertions(+) > > So I don't like the ifdeffery and tried removing it, see below. > > So I added helpers. > > Then I entered the include hell. > > And then I caught a bug with the DISABLED_FEATURE stuff. I've been there. Repeatedly :) Thank you for these changes, it does look better than before now. -- All Rights Reversed.
On Sun, Mar 02, 2025 at 08:53:10AM -0500, Rik van Riel wrote:
> I've been there. Repeatedly :)
Yap, it is. And despite all the compile-time disabling fun, clang still can't
do proper DCE and complains about the inline asm in __invlpgb() using a u64 on
32-bit builds.
So I did the fix below, ontop and with that randconfig builds all pass fine.
What I have so far is here:
https://web.git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git/log/?h=tip-x86-cpu-tlbi
Lemme go through the rest of your patches now.
Thx.
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 5375145eb959..04f2c6f4cee3 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -29,6 +29,7 @@ static inline void invlpg(unsigned long addr)
}
+#ifdef CONFIG_BROADCAST_TLB_FLUSH
/*
* INVLPGB does broadcast TLB invalidation across all the CPUs in the system.
*
@@ -74,6 +75,14 @@ static inline void __tlbsync(void)
/* TLBSYNC: supported in binutils >= 0.36. */
asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory");
}
+#else
+/* Some compilers simply can't do DCE */
+static inline void __invlpgb(unsigned long asid, unsigned long pcid,
+ unsigned long addr, u16 nr_pages,
+ bool pmd_stride, u8 flags) { }
+
+static inline void __tlbsync(void) { }
+#endif
/*
* INVLPGB can be targeted by virtual address, PCID, ASID, or any combination
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
The following commit has been merged into the x86/core branch of tip:
Commit-ID: c9826613a9cbbf889b1fec6b7b943fe88ec2c212
Gitweb: https://git.kernel.org/tip/c9826613a9cbbf889b1fec6b7b943fe88ec2c212
Author: Rik van Riel <riel@surriel.com>
AuthorDate: Tue, 25 Feb 2025 22:00:44 -05:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 19 Mar 2025 11:12:29 +01:00
x86/mm: Add global ASID process exit helpers
A global ASID is allocated for the lifetime of a process. Free the global ASID
at process exit time.
[ bp: Massage, create helpers, hide details inside them. ]
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250226030129.530345-10-riel@surriel.com
---
arch/x86/include/asm/mmu_context.h | 8 +++++++-
arch/x86/include/asm/tlbflush.h | 9 +++++++++
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index a2c70e4..2398058 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_MMU_CONTEXT_H
#define _ASM_X86_MMU_CONTEXT_H
-#include <asm/desc.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/pkeys.h>
@@ -13,6 +12,7 @@
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/gsseg.h>
+#include <asm/desc.h>
extern atomic64_t last_mm_ctx_id;
@@ -139,6 +139,9 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+#define mm_init_global_asid mm_init_global_asid
+extern void mm_init_global_asid(struct mm_struct *mm);
+
extern void mm_free_global_asid(struct mm_struct *mm);
/*
@@ -163,6 +166,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
+
+ mm_init_global_asid(mm);
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -172,6 +177,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
+ mm_free_global_asid(mm);
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 1f61a39..e6c3be0 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -261,6 +261,14 @@ static inline u16 mm_global_asid(struct mm_struct *mm)
return asid;
}
+static inline void mm_init_global_asid(struct mm_struct *mm)
+{
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ mm->context.global_asid = 0;
+ mm->context.asid_transition = false;
+ }
+}
+
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
{
/*
@@ -281,6 +289,7 @@ static inline bool mm_in_asid_transition(struct mm_struct *mm)
}
#else
static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_global_asid(struct mm_struct *mm) { }
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
#endif /* CONFIG_BROADCAST_TLB_FLUSH */
The following commit has been merged into the x86/mm branch of tip:
Commit-ID: 26a3633bfddfcdd57f5f2e3d157806d68aff5ac4
Gitweb: https://git.kernel.org/tip/26a3633bfddfcdd57f5f2e3d157806d68aff5ac4
Author: Rik van Riel <riel@surriel.com>
AuthorDate: Tue, 25 Feb 2025 22:00:44 -05:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Wed, 05 Mar 2025 17:19:53 +01:00
x86/mm: Add global ASID process exit helpers
A global ASID is allocated for the lifetime of a process. Free the global ASID
at process exit time.
[ bp: Massage, create helpers, hide details inside them. ]
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20250226030129.530345-10-riel@surriel.com
---
arch/x86/include/asm/mmu_context.h | 8 +++++++-
arch/x86/include/asm/tlbflush.h | 9 +++++++++
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index a2c70e4..2398058 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_MMU_CONTEXT_H
#define _ASM_X86_MMU_CONTEXT_H
-#include <asm/desc.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/pkeys.h>
@@ -13,6 +12,7 @@
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/gsseg.h>
+#include <asm/desc.h>
extern atomic64_t last_mm_ctx_id;
@@ -139,6 +139,9 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+#define mm_init_global_asid mm_init_global_asid
+extern void mm_init_global_asid(struct mm_struct *mm);
+
extern void mm_free_global_asid(struct mm_struct *mm);
/*
@@ -163,6 +166,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
+
+ mm_init_global_asid(mm);
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -172,6 +177,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
+ mm_free_global_asid(mm);
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 1f61a39..e6c3be0 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -261,6 +261,14 @@ static inline u16 mm_global_asid(struct mm_struct *mm)
return asid;
}
+static inline void mm_init_global_asid(struct mm_struct *mm)
+{
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ mm->context.global_asid = 0;
+ mm->context.asid_transition = false;
+ }
+}
+
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
{
/*
@@ -281,6 +289,7 @@ static inline bool mm_in_asid_transition(struct mm_struct *mm)
}
#else
static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_global_asid(struct mm_struct *mm) { }
static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
#endif /* CONFIG_BROADCAST_TLB_FLUSH */
© 2016 - 2025 Red Hat, Inc.