[PATCH v12 06/15] kasan: arm64: x86: Make page_to_virt() KASAN aware

Maciej Wieczor-Retman posted 15 patches 2 days, 9 hours ago
[PATCH v12 06/15] kasan: arm64: x86: Make page_to_virt() KASAN aware
Posted by Maciej Wieczor-Retman 2 days, 9 hours ago
From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>

Special page_to_virt() implementation is needed if an architecture wants
to enable KASAN software tag-based mode.

Make page_to_virt() KASAN aware in arch-independent code so
architectures implementing the software tag-based mode don't have to
define their own implementations anymore. When KASAN is disabled or for
architectures that don't implement the software tag-based mode
page_to_virt() will be optimized to it's previous form.

Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
---
Changelog v11:
- Redo the patch to work on the page_to_virt macro. Split off changes
  about virt to phys conversion to an earlier patch.
- Remove Alexander's acked-by due to bigger changes.

Changelog v7:
- Add Alexander's Acked-by tag.

Changelog v5:
- Move __tag_reset() calls into __phys_addr_nodebug() and
  __virt_addr_valid() instead of calling it on the arguments of higher
  level functions.

Changelog v4:
- Simplify page_to_virt() by removing pointless casts.
- Remove change in __is_canonical_address() because it's taken care of
  in a later patch due to a LAM compatible definition of canonical.

 arch/arm64/include/asm/memory.h |  5 -----
 include/linux/kasan.h           | 10 ++++++++++
 include/linux/mm.h              |  5 ++++-
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 875c0bd0d85a..39dd0071d3ec 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -411,11 +411,6 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
  */
 
 #if defined(CONFIG_DEBUG_VIRTUAL)
-#define page_to_virt(x)	({						\
-	__typeof__(x) __page = x;					\
-	void *__addr = __va(page_to_phys(__page));			\
-	(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
-})
 #define virt_to_page(x)		pfn_to_page(virt_to_pfn(x))
 #else
 #define page_to_virt(x)	({						\
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index e18908f3ad6e..271c59e9f422 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -527,6 +527,11 @@ static inline void *kasan_reset_tag(const void *addr)
 	return (void *)arch_kasan_reset_tag(addr);
 }
 
+static inline void *kasan_set_tag(const void *addr, u8 tag)
+{
+	return (void *)arch_kasan_set_tag(addr, tag);
+}
+
 /**
  * kasan_report - print a report about a bad memory access detected by KASAN
  * @addr: address of the bad access
@@ -544,6 +549,11 @@ static inline void *kasan_reset_tag(const void *addr)
 	return (void *)addr;
 }
 
+static inline void *kasan_set_tag(const void *addr, u8 tag)
+{
+	return (void *)addr;
+}
+
 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
 
 #ifdef CONFIG_KASAN_HW_TAGS
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 09044934dda8..f234650a4edf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -117,7 +117,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
 #endif
 
 #ifndef page_to_virt
-#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
+#define page_to_virt(x) ({							\
+	void *__addr = __va(PFN_PHYS(page_to_pfn((struct page *)x)));		\
+	kasan_set_tag(__addr, page_kasan_tag(x));				\
+})
 #endif
 
 #ifndef lm_alias
-- 
2.53.0