From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
With KASAN software tag-based mode arbitrary kernel pointers can be
tagged, and any place where pointer arithmetic is used to convert a
virtual address into a physical one can raise errors if the virtual
address is tagged. To lower the amount of lines added it's beneficial to
move the similar pointer arithmetic to a helper first.
Reset the tag in the __phys_addr_kernel_start() which plugs into all the
relevant virtual to physical address helpers.
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
---
Changelog v11:
- Split off this patch from the tag reset patch. Move x -
__START_KERNEL_map to a helper so less lines are added.
arch/x86/include/asm/page_64.h | 11 +++++++++--
arch/x86/mm/physaddr.c | 4 ++--
2 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 1895c207f629..9260a0d693d6 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -7,6 +7,7 @@
#ifndef __ASSEMBLER__
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
+#include <asm/kasan.h>
#include <linux/kmsan-checks.h>
#include <linux/mmdebug.h>
@@ -20,9 +21,15 @@ extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
extern unsigned long direct_map_physmem_end;
+static __always_inline unsigned long __phys_addr_kernel_start(unsigned long x)
+{
+ x = __tag_reset(x);
+ return x - __START_KERNEL_map;
+}
+
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
{
- unsigned long y = x - __START_KERNEL_map;
+ unsigned long y = __phys_addr_kernel_start(x);
/* use the carry flag to determine if x was < __START_KERNEL_map */
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
@@ -38,7 +45,7 @@ extern unsigned long __phys_addr(unsigned long);
static inline unsigned long __phys_addr_symbol(unsigned long x)
{
- unsigned long y = x - __START_KERNEL_map;
+ unsigned long y = __phys_addr_kernel_start(x);
/* only check upper bounds since lower bounds will trigger carry */
VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index 8d31c6b9e184..682e23541228 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -14,7 +14,7 @@
#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x)
{
- unsigned long y = x - __START_KERNEL_map;
+ unsigned long y = __phys_addr_kernel_start(x);
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
@@ -35,7 +35,7 @@ EXPORT_SYMBOL(__phys_addr);
bool __virt_addr_valid(unsigned long x)
{
- unsigned long y = x - __START_KERNEL_map;
+ unsigned long y = __phys_addr_kernel_start(x);
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
--
2.53.0