The function flush_xen_text_tlb_local() has been misused and will result
to invalidate the instruction cache more than necessary.
For instance, there is no need to invalidate the instruction cache if
we are setting SCTLR_EL2.WXN.
There is effectively only one caller (i.e free_init_memory() who would
need to invalidate the instruction cache.
So rather than keeping around the function flush_xen_text_tlb_local()
replace it with call to flush_xen_tlb_local() and explicitely flush
the cache when necessary.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
---
Changes in v3:
- Fix typoes
Changes in v2:
- Add Andrii's reviewed-by
---
xen/arch/arm/mm.c | 17 ++++++++++++++---
xen/include/asm-arm/arm32/page.h | 23 +++++++++--------------
xen/include/asm-arm/arm64/page.h | 21 +++++----------------
3 files changed, 28 insertions(+), 33 deletions(-)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 93ad118183..dfbe39c70a 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -610,8 +610,12 @@ void __init remove_early_mappings(void)
static void xen_pt_enforce_wnx(void)
{
WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
- /* Flush everything after setting WXN bit. */
- flush_xen_text_tlb_local();
+ /*
+ * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+ * before flushing the TLBs.
+ */
+ isb();
+ flush_xen_data_tlb_local();
}
extern void switch_ttbr(uint64_t ttbr);
@@ -1123,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
}
write_pte(xen_xenmap + i, pte);
}
- flush_xen_text_tlb_local();
+ flush_xen_data_tlb_local();
}
/* Release all __init and __initdata ranges to be reused */
@@ -1136,6 +1140,13 @@ void free_init_memory(void)
uint32_t *p;
set_pte_flags_on_range(__init_begin, len, mg_rw);
+
+ /*
+ * From now on, init will not be used for execution anymore,
+ * so nuke the instruction cache to remove entries related to init.
+ */
+ invalidate_icache_local();
+
#ifdef CONFIG_ARM_32
/* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
insn = 0xe7f000f0;
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index ea4b312c70..40a77daa9d 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -46,24 +46,19 @@ static inline void invalidate_icache(void)
}
/*
- * Flush all hypervisor mappings from the TLB and branch predictor of
- * the local processor.
- *
- * This is needed after changing Xen code mappings.
- *
- * The caller needs to issue the necessary DSB and D-cache flushes
- * before calling flush_xen_text_tlb.
+ * Invalidate all instruction caches on the local processor to PoU.
+ * We also need to flush the branch predictor for ARMv7 as it may be
+ * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b).
*/
-static inline void flush_xen_text_tlb_local(void)
+static inline void invalidate_icache_local(void)
{
asm volatile (
- "isb;" /* Ensure synchronization with previous changes to text */
- CMD_CP32(TLBIALLH) /* Flush hypervisor TLB */
- CMD_CP32(ICIALLU) /* Flush I-cache */
- CMD_CP32(BPIALL) /* Flush branch predictor */
- "dsb;" /* Ensure completion of TLB+BP flush */
- "isb;"
+ CMD_CP32(ICIALLU) /* Flush I-cache. */
+ CMD_CP32(BPIALL) /* Flush branch predictor. */
: : : "memory");
+
+ dsb(nsh); /* Ensure completion of the flush I-cache */
+ isb(); /* Synchronize fetched instruction stream. */
}
/*
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 23d778154d..6c36d0210f 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -37,23 +37,12 @@ static inline void invalidate_icache(void)
isb();
}
-/*
- * Flush all hypervisor mappings from the TLB of the local processor.
- *
- * This is needed after changing Xen code mappings.
- *
- * The caller needs to issue the necessary DSB and D-cache flushes
- * before calling flush_xen_text_tlb.
- */
-static inline void flush_xen_text_tlb_local(void)
+/* Invalidate all instruction caches on the local processor to PoU */
+static inline void invalidate_icache_local(void)
{
- asm volatile (
- "isb;" /* Ensure synchronization with previous changes to text */
- "tlbi alle2;" /* Flush hypervisor TLB */
- "ic iallu;" /* Flush I-cache */
- "dsb sy;" /* Ensure completion of TLB flush */
- "isb;"
- : : : "memory");
+ asm volatile ("ic iallu");
+ dsb(nsh); /* Ensure completion of the I-cache flush */
+ isb();
}
/*
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Tue, 14 May 2019, Julien Grall wrote:
> The function flush_xen_text_tlb_local() has been misused and will result
> to invalidate the instruction cache more than necessary.
>
> For instance, there is no need to invalidate the instruction cache if
> we are setting SCTLR_EL2.WXN.
>
> There is effectively only one caller (i.e free_init_memory() who would
> need to invalidate the instruction cache.
>
> So rather than keeping around the function flush_xen_text_tlb_local()
> replace it with call to flush_xen_tlb_local() and explicitely flush
^ explicitly
> the cache when necessary.
>
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> Changes in v3:
> - Fix typoes
>
> Changes in v2:
> - Add Andrii's reviewed-by
> ---
> xen/arch/arm/mm.c | 17 ++++++++++++++---
> xen/include/asm-arm/arm32/page.h | 23 +++++++++--------------
> xen/include/asm-arm/arm64/page.h | 21 +++++----------------
> 3 files changed, 28 insertions(+), 33 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 93ad118183..dfbe39c70a 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -610,8 +610,12 @@ void __init remove_early_mappings(void)
> static void xen_pt_enforce_wnx(void)
> {
> WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
> - /* Flush everything after setting WXN bit. */
> - flush_xen_text_tlb_local();
> + /*
> + * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
> + * before flushing the TLBs.
> + */
> + isb();
> + flush_xen_data_tlb_local();
> }
>
> extern void switch_ttbr(uint64_t ttbr);
> @@ -1123,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
> }
> write_pte(xen_xenmap + i, pte);
> }
> - flush_xen_text_tlb_local();
> + flush_xen_data_tlb_local();
> }
>
> /* Release all __init and __initdata ranges to be reused */
> @@ -1136,6 +1140,13 @@ void free_init_memory(void)
> uint32_t *p;
>
> set_pte_flags_on_range(__init_begin, len, mg_rw);
> +
> + /*
> + * From now on, init will not be used for execution anymore,
> + * so nuke the instruction cache to remove entries related to init.
> + */
> + invalidate_icache_local();
> +
> #ifdef CONFIG_ARM_32
> /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
> insn = 0xe7f000f0;
> diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
> index ea4b312c70..40a77daa9d 100644
> --- a/xen/include/asm-arm/arm32/page.h
> +++ b/xen/include/asm-arm/arm32/page.h
> @@ -46,24 +46,19 @@ static inline void invalidate_icache(void)
> }
>
> /*
> - * Flush all hypervisor mappings from the TLB and branch predictor of
> - * the local processor.
> - *
> - * This is needed after changing Xen code mappings.
> - *
> - * The caller needs to issue the necessary DSB and D-cache flushes
> - * before calling flush_xen_text_tlb.
> + * Invalidate all instruction caches on the local processor to PoU.
> + * We also need to flush the branch predictor for ARMv7 as it may be
> + * architecturally visible to the software (see B2.2.4 in ARM DDI 0406C.b).
> */
> -static inline void flush_xen_text_tlb_local(void)
> +static inline void invalidate_icache_local(void)
> {
> asm volatile (
> - "isb;" /* Ensure synchronization with previous changes to text */
> - CMD_CP32(TLBIALLH) /* Flush hypervisor TLB */
> - CMD_CP32(ICIALLU) /* Flush I-cache */
> - CMD_CP32(BPIALL) /* Flush branch predictor */
> - "dsb;" /* Ensure completion of TLB+BP flush */
> - "isb;"
> + CMD_CP32(ICIALLU) /* Flush I-cache. */
> + CMD_CP32(BPIALL) /* Flush branch predictor. */
> : : : "memory");
> +
> + dsb(nsh); /* Ensure completion of the flush I-cache */
> + isb(); /* Synchronize fetched instruction stream. */
> }
>
> /*
> diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
> index 23d778154d..6c36d0210f 100644
> --- a/xen/include/asm-arm/arm64/page.h
> +++ b/xen/include/asm-arm/arm64/page.h
> @@ -37,23 +37,12 @@ static inline void invalidate_icache(void)
> isb();
> }
>
> -/*
> - * Flush all hypervisor mappings from the TLB of the local processor.
> - *
> - * This is needed after changing Xen code mappings.
> - *
> - * The caller needs to issue the necessary DSB and D-cache flushes
> - * before calling flush_xen_text_tlb.
> - */
> -static inline void flush_xen_text_tlb_local(void)
> +/* Invalidate all instruction caches on the local processor to PoU */
> +static inline void invalidate_icache_local(void)
> {
> - asm volatile (
> - "isb;" /* Ensure synchronization with previous changes to text */
> - "tlbi alle2;" /* Flush hypervisor TLB */
> - "ic iallu;" /* Flush I-cache */
> - "dsb sy;" /* Ensure completion of TLB flush */
> - "isb;"
> - : : : "memory");
> + asm volatile ("ic iallu");
> + dsb(nsh); /* Ensure completion of the I-cache flush */
> + isb();
> }
>
> /*
> --
> 2.11.0
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2026 Red Hat, Inc.