From: Sohil Mehta <sohil.mehta@intel.com>
Linear Address Space Separation (LASS) is a security feature that
intends to prevent malicious virtual address space accesses across
user/kernel mode.
Such mode based access protection already exists today with paging and
features such as SMEP and SMAP. However, to enforce these protections,
the processor must traverse the paging structures in memory. Malicious
software can use timing information resulting from this traversal to
determine details about the paging structures, and these details may
also be used to determine the layout of the kernel memory.
The LASS mechanism provides the same mode-based protections as paging
but without traversing the paging structures. Because the protections
enforced by LASS are applied before paging, software will not be able to
derive paging-based timing information from the various caching
structures such as the TLBs, mid-level caches, page walker, data caches,
etc.
LASS enforcement relies on the typical kernel implementation to divide
the 64-bit virtual address space into two halves:
Addr[63]=0 -> User address space
Addr[63]=1 -> Kernel address space
Any data access or code execution across address spaces typically
results in a #GP fault.
The LASS enforcement for kernel data access is dependent on CR4.SMAP
being set. The enforcement can be disabled by toggling the RFLAGS.AC bit
similar to SMAP.
Define the CPU feature bits to enumerate this feature and include
feature dependencies to reflect the same.
Co-developed-by: Yian Chen <yian.chen@intel.com>
Signed-off-by: Yian Chen <yian.chen@intel.com>
Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/disabled-features.h | 4 +++-
arch/x86/include/asm/smap.h | 18 ++++++++++++++++++
arch/x86/include/uapi/asm/processor-flags.h | 2 ++
arch/x86/kernel/cpu/cpuid-deps.c | 1 +
tools/arch/x86/include/asm/cpufeatures.h | 1 +
6 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ea33439a5d00..acb3ccea2bd7 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -319,6 +319,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_LASS (12*32+ 6) /* "lass" Linear Address Space Separation */
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */
#define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index c492bdc97b05..76c7d362af94 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -22,12 +22,14 @@
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
# define DISABLE_PCID 0
+# define DISABLE_LASS 0
#else
# define DISABLE_VME 0
# define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0
# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
+# define DISABLE_LASS (1<<(X86_FEATURE_LASS & 31))
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -146,7 +148,7 @@
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK)
#define DISABLED_MASK12 (DISABLE_FRED|DISABLE_LAM)
-#define DISABLED_MASK13 0
+#define DISABLED_MASK13 (DISABLE_LASS)
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index bab490379c65..8cb6f004800b 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -27,6 +27,12 @@
#else /* __ASSEMBLY__ */
+/*
+ * The CLAC/STAC instructions toggle enforcement of X86_FEATURE_SMAP.
+ * Add dedicated lass_*() variants for cases that are necessitated by
+ * LASS (X86_FEATURE_LASS) enforcement, which helps readability and
+ * avoids AC flag flipping on CPUs that don't support LASS.
+ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
@@ -39,6 +45,18 @@ static __always_inline void stac(void)
alternative("", __ASM_STAC, X86_FEATURE_SMAP);
}
+static __always_inline void lass_clac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative("", __ASM_CLAC, X86_FEATURE_LASS);
+}
+
+static __always_inline void lass_stac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative("", __ASM_STAC, X86_FEATURE_LASS);
+}
+
static __always_inline unsigned long smap_save(void)
{
unsigned long flags;
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index f1a4adc78272..81d0c8bf1137 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -136,6 +136,8 @@
#define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT)
#define X86_CR4_CET_BIT 23 /* enable Control-flow Enforcement Technology */
#define X86_CR4_CET _BITUL(X86_CR4_CET_BIT)
+#define X86_CR4_LASS_BIT 27 /* enable Linear Address Space Separation support */
+#define X86_CR4_LASS _BITUL(X86_CR4_LASS_BIT)
#define X86_CR4_LAM_SUP_BIT 28 /* LAM for supervisor pointers */
#define X86_CR4_LAM_SUP _BITUL(X86_CR4_LAM_SUP_BIT)
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 8bd84114c2d9..3f73c4b03348 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -83,6 +83,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
{ X86_FEATURE_SHSTK, X86_FEATURE_XSAVES },
{ X86_FEATURE_FRED, X86_FEATURE_LKGS },
+ { X86_FEATURE_LASS, X86_FEATURE_SMAP },
{}
};
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 23698d0f4bb4..538930159f9f 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -319,6 +319,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_LASS (12*32+ 6) /* "lass" Linear Address Space Separation */
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */
#define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */
--
2.45.2
On Mon, Oct 28, 2024 at 06:07:49PM +0200, Alexander Shishkin wrote: > From: Sohil Mehta <sohil.mehta@intel.com> > > Linear Address Space Separation (LASS) is a security feature that > intends to prevent malicious virtual address space accesses across > user/kernel mode. > > Such mode based access protection already exists today with paging and > features such as SMEP and SMAP. However, to enforce these protections, > the processor must traverse the paging structures in memory. Malicious > software can use timing information resulting from this traversal to > determine details about the paging structures, and these details may > also be used to determine the layout of the kernel memory. > > The LASS mechanism provides the same mode-based protections as paging > but without traversing the paging structures. Because the protections > enforced by LASS are applied before paging, software will not be able to > derive paging-based timing information from the various caching > structures such as the TLBs, mid-level caches, page walker, data caches, > etc. > > LASS enforcement relies on the typical kernel implementation to divide > the 64-bit virtual address space into two halves: > Addr[63]=0 -> User address space > Addr[63]=1 -> Kernel address space > > Any data access or code execution across address spaces typically > results in a #GP fault. SDM mentions #SS for LASS violations on stack instructions. Do we care to provide a sensible error message on #SS as we do for #GP? > The LASS enforcement for kernel data access is dependent on CR4.SMAP > being set. The enforcement can be disabled by toggling the RFLAGS.AC bit > similar to SMAP. > > Define the CPU feature bits to enumerate this feature and include > feature dependencies to reflect the same. > > Co-developed-by: Yian Chen <yian.chen@intel.com> > Signed-off-by: Yian Chen <yian.chen@intel.com> > Signed-off-by: Sohil Mehta <sohil.mehta@intel.com> > Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> > --- > arch/x86/include/asm/cpufeatures.h | 1 + > arch/x86/include/asm/disabled-features.h | 4 +++- > arch/x86/include/asm/smap.h | 18 ++++++++++++++++++ > arch/x86/include/uapi/asm/processor-flags.h | 2 ++ > arch/x86/kernel/cpu/cpuid-deps.c | 1 + > tools/arch/x86/include/asm/cpufeatures.h | 1 + > 6 files changed, 26 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h > index ea33439a5d00..acb3ccea2bd7 100644 > --- a/arch/x86/include/asm/cpufeatures.h > +++ b/arch/x86/include/asm/cpufeatures.h > @@ -319,6 +319,7 @@ > /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ > #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */ > #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */ > +#define X86_FEATURE_LASS (12*32+ 6) /* "lass" Linear Address Space Separation */ > #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */ > #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */ > #define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */ > diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h > index c492bdc97b05..76c7d362af94 100644 > --- a/arch/x86/include/asm/disabled-features.h > +++ b/arch/x86/include/asm/disabled-features.h > @@ -22,12 +22,14 @@ > # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) > # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) > # define DISABLE_PCID 0 > +# define DISABLE_LASS 0 > #else > # define DISABLE_VME 0 > # define DISABLE_K6_MTRR 0 > # define DISABLE_CYRIX_ARR 0 > # define DISABLE_CENTAUR_MCR 0 > # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) > +# define DISABLE_LASS (1<<(X86_FEATURE_LASS & 31)) > #endif /* CONFIG_X86_64 */ > > #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS > @@ -146,7 +148,7 @@ > #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \ > DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK) > #define DISABLED_MASK12 (DISABLE_FRED|DISABLE_LAM) > -#define DISABLED_MASK13 0 > +#define DISABLED_MASK13 (DISABLE_LASS) > #define DISABLED_MASK14 0 > #define DISABLED_MASK15 0 > #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \ > diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h > index bab490379c65..8cb6f004800b 100644 > --- a/arch/x86/include/asm/smap.h > +++ b/arch/x86/include/asm/smap.h > @@ -27,6 +27,12 @@ > > #else /* __ASSEMBLY__ */ > > +/* > + * The CLAC/STAC instructions toggle enforcement of X86_FEATURE_SMAP. > + * Add dedicated lass_*() variants for cases that are necessitated by > + * LASS (X86_FEATURE_LASS) enforcement, which helps readability and > + * avoids AC flag flipping on CPUs that don't support LASS. > + */ Maybe add a new line here? The comment is for the group of helpers, not for clac() specifically. > static __always_inline void clac(void) > { > /* Note: a barrier is implicit in alternative() */ > @@ -39,6 +45,18 @@ static __always_inline void stac(void) > alternative("", __ASM_STAC, X86_FEATURE_SMAP); > } > > +static __always_inline void lass_clac(void) > +{ > + /* Note: a barrier is implicit in alternative() */ > + alternative("", __ASM_CLAC, X86_FEATURE_LASS); > +} > + > +static __always_inline void lass_stac(void) > +{ > + /* Note: a barrier is implicit in alternative() */ > + alternative("", __ASM_STAC, X86_FEATURE_LASS); > +} > + > static __always_inline unsigned long smap_save(void) > { > unsigned long flags; > diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h > index f1a4adc78272..81d0c8bf1137 100644 > --- a/arch/x86/include/uapi/asm/processor-flags.h > +++ b/arch/x86/include/uapi/asm/processor-flags.h > @@ -136,6 +136,8 @@ > #define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT) > #define X86_CR4_CET_BIT 23 /* enable Control-flow Enforcement Technology */ > #define X86_CR4_CET _BITUL(X86_CR4_CET_BIT) > +#define X86_CR4_LASS_BIT 27 /* enable Linear Address Space Separation support */ > +#define X86_CR4_LASS _BITUL(X86_CR4_LASS_BIT) > #define X86_CR4_LAM_SUP_BIT 28 /* LAM for supervisor pointers */ > #define X86_CR4_LAM_SUP _BITUL(X86_CR4_LAM_SUP_BIT) > > diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c > index 8bd84114c2d9..3f73c4b03348 100644 > --- a/arch/x86/kernel/cpu/cpuid-deps.c > +++ b/arch/x86/kernel/cpu/cpuid-deps.c > @@ -83,6 +83,7 @@ static const struct cpuid_dep cpuid_deps[] = { > { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, > { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, > { X86_FEATURE_FRED, X86_FEATURE_LKGS }, > + { X86_FEATURE_LASS, X86_FEATURE_SMAP }, > {} > }; > > diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h > index 23698d0f4bb4..538930159f9f 100644 > --- a/tools/arch/x86/include/asm/cpufeatures.h > +++ b/tools/arch/x86/include/asm/cpufeatures.h > @@ -319,6 +319,7 @@ > /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ > #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */ > #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */ > +#define X86_FEATURE_LASS (12*32+ 6) /* "lass" Linear Address Space Separation */ > #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */ > #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */ > #define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */ > -- > 2.45.2 > -- Kiryl Shutsemau / Kirill A. Shutemov
>> +/* >> + * The CLAC/STAC instructions toggle enforcement of X86_FEATURE_SMAP. >> + * Add dedicated lass_*() variants for cases that are necessitated by It would be useful to know when such a situation is necessitated. For example, text_poke_mem* doesn't get flagged by SMAP but only by LASS. I guess the answer is related to paging but it would be useful to describe it in a commit message or a comment. I am imagining a scenario where someone needs to use one of these stac()/clac() pairs but isn't sure which one to use. Both of them would seem to work but one is better suited than other. >> + * LASS (X86_FEATURE_LASS) enforcement, which helps readability and >> + * avoids AC flag flipping on CPUs that don't support LASS. >> + */ > > Maybe add a new line here? The comment is for the group of helpers, not > for clac() specifically. > Also, it might be better to move the common text "/* Note: a barrier is implicit in alternative() */" to the above comment as well. Repeating it 4 times makes it unnecessarily distracting to read the code. >> static __always_inline void clac(void) >> { >> /* Note: a barrier is implicit in alternative() */ >> @@ -39,6 +45,18 @@ static __always_inline void stac(void) >> alternative("", __ASM_STAC, X86_FEATURE_SMAP); >> } >> >> +static __always_inline void lass_clac(void) >> +{ >> + /* Note: a barrier is implicit in alternative() */ >> + alternative("", __ASM_CLAC, X86_FEATURE_LASS); >> +} >> + >> +static __always_inline void lass_stac(void) >> +{ >> + /* Note: a barrier is implicit in alternative() */ >> + alternative("", __ASM_STAC, X86_FEATURE_LASS); >> +} >> +
© 2016 - 2024 Red Hat, Inc.