Performance when clearing with string instructions (x86-64-stosq and
similar) can vary significantly based on the chunk-size used.
$ perf bench mem memset -k 4KB -s 4GB -f x86-64-stosq
# Running 'mem/memset' benchmark:
# function 'x86-64-stosq' (movsq-based memset() in arch/x86/lib/memset_64.S)
# Copying 4GB bytes ...
13.748208 GB/sec
$ perf bench mem memset -k 2MB -s 4GB -f x86-64-stosq
# Running 'mem/memset' benchmark:
# function 'x86-64-stosq' (movsq-based memset() in
# arch/x86/lib/memset_64.S)
# Copying 4GB bytes ...
15.067900 GB/sec
$ perf bench mem memset -k 1GB -s 4GB -f x86-64-stosq
# Running 'mem/memset' benchmark:
# function 'x86-64-stosq' (movsq-based memset() in arch/x86/lib/memset_64.S)
# Copying 4GB bytes ...
38.104311 GB/sec
(Both on AMD Milan.)
With a change in chunk-size of 4KB to 1GB, we see the performance go
from 13.7 GB/sec to 38.1 GB/sec. For a chunk-size of 2MB the change isn't
quite as drastic but it is worth adding a clear_page() variant that can
handle contiguous page-extents.
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
arch/x86/include/asm/page_32.h | 15 +++++++++++----
arch/x86/include/asm/page_64.h | 24 ++++++++++++++++--------
2 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index a8ff43bb9652..561f416b61e5 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -18,16 +18,23 @@ extern unsigned long __phys_addr(unsigned long);
#include <linux/string.h>
/*
- * clear_page() - clear kernel page.
- * @page: address of kernel page
+ * clear_pages() - clear kernel page range.
+ * @addr: start address of page range
+ * @npages: number of pages
*
+ * Assumes that (@addr, +@npages) references a kernel region.
* Does absolutely no exception handling.
*/
-static inline void clear_page(void *page)
+static inline void clear_pages(void *addr, u64 npages)
{
- memset(page, 0, PAGE_SIZE);
+ for (u64 i = 0; i < npages; i++)
+ memset(addr + i * PAGE_SIZE, 0, PAGE_SIZE);
}
+static inline void clear_page(void *addr)
+{
+ clear_pages(addr, 1);
+}
static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 28b9adbc5f00..5625d616bd00 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -43,8 +43,11 @@ extern unsigned long __phys_addr_symbol(unsigned long);
void memzero_page_aligned_unrolled(void *addr, u64 len);
/*
- * clear_page() - clear kernel page.
- * @page: address of kernel page
+ * clear_pages() - clear kernel page range.
+ * @addr: start address of page range
+ * @npages: number of pages
+ *
+ * Assumes that (@addr, +@npages) references a kernel region.
*
* Switch between three implementations of page clearing based on CPU
* capabilities:
@@ -65,22 +68,27 @@ void memzero_page_aligned_unrolled(void *addr, u64 len);
*
* Does absolutely no exception handling.
*/
-static inline void clear_page(void *page)
+static inline void clear_pages(void *addr, u64 npages)
{
- u64 len = PAGE_SIZE;
+ u64 len = npages * PAGE_SIZE;
/*
- * Clean up KMSAN metadata for the page being cleared. The assembly call
- * below clobbers @page, so we perform unpoisoning before it.
+ * Clean up KMSAN metadata for the pages being cleared. The assembly call
+ * below clobbers @addr, so we perform unpoisoning before it.
*/
- kmsan_unpoison_memory(page, len);
+ kmsan_unpoison_memory(addr, len);
asm volatile(ALTERNATIVE_2("call memzero_page_aligned_unrolled",
"shrq $3, %%rcx; rep stosq", X86_FEATURE_REP_GOOD,
"rep stosb", X86_FEATURE_ERMS)
- : "+c" (len), "+D" (page), ASM_CALL_CONSTRAINT
+ : "+c" (len), "+D" (addr), ASM_CALL_CONSTRAINT
: "a" (0)
: "cc", "memory");
}
+static inline void clear_page(void *addr)
+{
+ clear_pages(addr, 1);
+}
+
void copy_page(void *to, void *from);
KCFI_REFERENCE(copy_page);
--
2.43.5