arch/x86/include/asm/kfence.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
The original patch inverted the PTE unconditionally to avoid
L1TF-vulnerable PTEs, but Linux doesn't make this adjustment in 2-level
paging.
Adjust the logic to use the flip_protnone_guard() helper, which is a nop on
2-level paging but inverts the address bits in all other paging modes.
This doesn't matter for the Xen aspect of the original change. Linux no
longer supports running 32bit PV under Xen, and Xen doesn't support running
any 32bit PV guests without using PAE paging.
Fixes: b505f1944535 ("x86/kfence: avoid writing L1TF-vulnerable PTEs")
Reported-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Closes: https://lore.kernel.org/lkml/CAKFNMokwjw68ubYQM9WkzOuH51wLznHpEOMSqtMoV1Rn9JV_gw@mail.gmail.com/
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Ryusuke Konishi <konishi.ryusuke@gmail.com>
CC: Alexander Potapenko <glider@google.com>
CC: Marco Elver <elver@google.com>
CC: Dmitry Vyukov <dvyukov@google.com>
CC: Thomas Gleixner <tglx@linutronix.de>
CC: Ingo Molnar <mingo@redhat.com>
CC: Borislav Petkov <bp@alien8.de>
CC: Dave Hansen <dave.hansen@linux.intel.com>
CC: x86@kernel.org
CC: "H. Peter Anvin" <hpa@zytor.com>
CC: Andrew Morton <akpm@linux-foundation.org>
CC: Jann Horn <jannh@google.com>
CC: kasan-dev@googlegroups.com
CC: linux-kernel@vger.kernel.org
---
arch/x86/include/asm/kfence.h | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index acf9ffa1a171..40cf6a5d781d 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -42,7 +42,7 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
unsigned int level;
pte_t *pte = lookup_address(addr, &level);
- pteval_t val;
+ pteval_t val, new;
if (WARN_ON(!pte || level != PG_LEVEL_4K))
return false;
@@ -57,11 +57,12 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
/*
- * Otherwise, invert the entire PTE. This avoids writing out an
- * L1TF-vulnerable PTE (not present, without the high address bits
+ * Otherwise, flip the Present bit, taking care to avoid writing an
+ * L1TF-vulenrable PTE (not present, without the high address bits
* set).
*/
- set_pte(pte, __pte(~val));
+ new = val ^ _PAGE_PRESENT;
+ set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));
/*
* If the page was protected (non-present) and we're making it
base-commit: fcb70a56f4d81450114034b2c61f48ce7444a0e2
--
2.39.5
On 26/01/2026 9:06 pm, Andrew Cooper wrote:
> The original patch inverted the PTE unconditionally to avoid
> L1TF-vulnerable PTEs, but Linux doesn't make this adjustment in 2-level
> paging.
>
> Adjust the logic to use the flip_protnone_guard() helper, which is a nop on
> 2-level paging but inverts the address bits in all other paging modes.
>
> This doesn't matter for the Xen aspect of the original change. Linux no
> longer supports running 32bit PV under Xen, and Xen doesn't support running
> any 32bit PV guests without using PAE paging.
>
> Fixes: b505f1944535 ("x86/kfence: avoid writing L1TF-vulnerable PTEs")
> Reported-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
> Closes: https://lore.kernel.org/lkml/CAKFNMokwjw68ubYQM9WkzOuH51wLznHpEOMSqtMoV1Rn9JV_gw@mail.gmail.com/
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> CC: Ryusuke Konishi <konishi.ryusuke@gmail.com>
> CC: Alexander Potapenko <glider@google.com>
> CC: Marco Elver <elver@google.com>
> CC: Dmitry Vyukov <dvyukov@google.com>
> CC: Thomas Gleixner <tglx@linutronix.de>
> CC: Ingo Molnar <mingo@redhat.com>
> CC: Borislav Petkov <bp@alien8.de>
> CC: Dave Hansen <dave.hansen@linux.intel.com>
> CC: x86@kernel.org
> CC: "H. Peter Anvin" <hpa@zytor.com>
> CC: Andrew Morton <akpm@linux-foundation.org>
> CC: Jann Horn <jannh@google.com>
> CC: kasan-dev@googlegroups.com
> CC: linux-kernel@vger.kernel.org
> ---
> arch/x86/include/asm/kfence.h | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
> index acf9ffa1a171..40cf6a5d781d 100644
> --- a/arch/x86/include/asm/kfence.h
> +++ b/arch/x86/include/asm/kfence.h
> @@ -42,7 +42,7 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
> {
> unsigned int level;
> pte_t *pte = lookup_address(addr, &level);
> - pteval_t val;
> + pteval_t val, new;
>
> if (WARN_ON(!pte || level != PG_LEVEL_4K))
> return false;
> @@ -57,11 +57,12 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
> return true;
>
> /*
> - * Otherwise, invert the entire PTE. This avoids writing out an
> - * L1TF-vulnerable PTE (not present, without the high address bits
> + * Otherwise, flip the Present bit, taking care to avoid writing an
> + * L1TF-vulenrable PTE (not present, without the high address bits
> * set).
> */
> - set_pte(pte, __pte(~val));
> + new = val ^ _PAGE_PRESENT;
> + set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));
>
> /*
> * If the page was protected (non-present) and we're making it
>
> base-commit: fcb70a56f4d81450114034b2c61f48ce7444a0e2
And I apparently can't spell. I'll do a v2 immediately, seeing as this
is somewhat urgent.
~Andrew
© 2016 - 2026 Red Hat, Inc.