The present clear_page_sse2() is useful in case a page isn't going to
get touched again soon, or if we want to limit churn on the caches.
Amend it by alternatively using CLZERO, which has been found to be quite
a bit faster on Zen2 hardware at least. Note that to use CLZERO, we need
to know the cache line size, and hence a feature dependency on CLFLUSH
gets introduced.
For cases where latency is the most important aspect, or when it is
expected that sufficiently large parts of a page will get accessed again
soon after the clearing, introduce a "hot" alternative. Again use
alternatives patching to select between a "legacy" and an ERMS variant.
Don't switch any callers just yet - this will be the subject of
subsequent changes.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Re-base.
v2: New.
---
Note: Ankur indicates that for ~L3-size or larger regions MOVNT/CLZERO
is better even latency-wise.
--- a/xen/arch/x86/clear_page.S
+++ b/xen/arch/x86/clear_page.S
@@ -1,9 +1,9 @@
.file __FILE__
-#include <xen/linkage.h>
-#include <asm/page.h>
+#include <xen/page-size.h>
+#include <asm/asm_defns.h>
-FUNC(clear_page_sse2)
+ .macro clear_page_sse2
mov $PAGE_SIZE/32, %ecx
xor %eax,%eax
@@ -17,4 +17,43 @@ FUNC(clear_page_sse2)
sfence
ret
-END(clear_page_sse2)
+ .endm
+
+ .macro clear_page_clzero
+ mov %rdi, %rax
+ mov $PAGE_SIZE/64, %ecx
+ .globl clear_page_clzero_post_count
+clear_page_clzero_post_count:
+
+0: clzero
+ sub $-64, %rax
+ .globl clear_page_clzero_post_neg_size
+clear_page_clzero_post_neg_size:
+ sub $1, %ecx
+ jnz 0b
+
+ sfence
+ ret
+ .endm
+
+FUNC(clear_page_cold)
+ ALTERNATIVE clear_page_sse2, clear_page_clzero, X86_FEATURE_CLZERO
+END(clear_page_cold)
+
+ .macro clear_page_stosb
+ mov $PAGE_SIZE, %ecx
+ xor %eax,%eax
+ rep stosb
+ ret
+ .endm
+
+ .macro clear_page_stosq
+ mov $PAGE_SIZE/8, %ecx
+ xor %eax, %eax
+ rep stosq
+ ret
+ .endm
+
+FUNC(clear_page_hot)
+ ALTERNATIVE clear_page_stosq, clear_page_stosb, X86_FEATURE_ERMS
+END(clear_page_hot)
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -58,6 +58,9 @@ DEFINE_PER_CPU(bool, full_gdt_loaded);
DEFINE_PER_CPU(uint32_t, pkrs);
+extern uint32_t clear_page_clzero_post_count[];
+extern int8_t clear_page_clzero_post_neg_size[];
+
void __init setup_clear_cpu_cap(unsigned int cap)
{
const uint32_t *dfs;
@@ -355,8 +358,38 @@ void __init early_cpu_init(bool verbose)
edx &= ~cleared_caps[FEATURESET_1d];
ecx &= ~cleared_caps[FEATURESET_1c];
- if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH))
- c->x86_cache_alignment = ((ebx >> 8) & 0xff) * 8;
+ if (edx & cpufeat_mask(X86_FEATURE_CLFLUSH)) {
+ unsigned int size = ((ebx >> 8) & 0xff) * 8;
+
+ c->x86_cache_alignment = size;
+
+ /*
+ * Patch in parameters of clear_page_cold()'s CLZERO
+ * alternative. Note that for now we cap this at 128 bytes.
+ * Larger cache line sizes would still be dealt with
+ * correctly, but would cause redundant work done.
+ */
+ if (size > 128)
+ size = 128;
+ if (size && !(size & (size - 1))) {
+ /*
+ * Need to play some games to keep the compiler from
+ * recognizing the negative array index as being out
+ * of bounds. The labels in assembler code really are
+ * _after_ the locations to be patched, so the
+ * negative index is intentional.
+ */
+ uint32_t *pcount = clear_page_clzero_post_count;
+ int8_t *neg_size = clear_page_clzero_post_neg_size;
+
+ OPTIMIZER_HIDE_VAR(pcount);
+ OPTIMIZER_HIDE_VAR(neg_size);
+ pcount[-1] = PAGE_SIZE / size;
+ neg_size[-1] = -size;
+ }
+ else
+ setup_clear_cpu_cap(X86_FEATURE_CLZERO);
+ }
/* Leaf 0x1 capabilities filled in early for Xen. */
c->x86_capability[FEATURESET_1d] = edx;
c->x86_capability[FEATURESET_1c] = ecx;
--- a/xen/arch/x86/include/asm/asm-defns.h
+++ b/xen/arch/x86/include/asm/asm-defns.h
@@ -20,6 +20,10 @@
.byte 0x0f, 0x01, 0xdd
.endm
+.macro clzero
+ .byte 0x0f, 0x01, 0xfc
+.endm
+
/*
* Call a noreturn function. This could be JMP, but CALL results in a more
* helpful backtrace. BUG is to catch functions which do decide to return...
--- a/xen/arch/x86/include/asm/page.h
+++ b/xen/arch/x86/include/asm/page.h
@@ -219,10 +219,11 @@ typedef struct { u64 pfn; } pagetable_t;
#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
#define pagetable_null() pagetable_from_pfn(0)
-void clear_page_sse2(void *pg);
+void clear_page_hot(void *pg);
+void clear_page_cold(void *pg);
void copy_page_sse2(void *to, const void *from);
-#define clear_page(_p) clear_page_sse2(_p)
+#define clear_page(_p) clear_page_cold(_p)
#define copy_page(_t, _f) copy_page_sse2(_t, _f)
/* Convert between Xen-heap virtual addresses and machine addresses. */
--- a/xen/tools/gen-cpuid.py
+++ b/xen/tools/gen-cpuid.py
@@ -212,6 +212,10 @@ def crunch_numbers(state):
# the first place.
APIC: [X2APIC, TSC_DEADLINE, EXTAPIC],
+ # The CLZERO insn requires a means to determine the cache line size,
+ # which is tied to the CLFLUSH insn.
+ CLFLUSH: [CLZERO],
+
# AMD built MMXExtentions and 3DNow as extentions to MMX.
MMX: [MMXEXT, _3DNOW],