From: Richard Henderson <richard.henderson@linaro.org>
Forward tlb_flush_page_bits_by_mmuidx_all_cpus_synced to
tlb_flush_range_by_mmuidx_all_cpus_synced passing TARGET_PAGE_SIZE.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20210509151618.2331764-7-f4bug@amsat.org
Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org>
[PMD: Split from bigger patch]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
include/exec/exec-all.h | 12 ++++++++++++
accel/tcg/cputlb.c | 27 ++++++++++++++++++++-------
2 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 9a3dbb7ec08..8021adf38f4 100644
@@ -281,6 +281,11 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
target_ulong len, uint16_t idxmap,
unsigned bits);
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits);
/**
* tlb_set_page_with_attrs:
@@ -397,6 +402,13 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
unsigned bits)
{
}
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
+ target_ulong addr,
+ target_long len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
/**
* probe_access:
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a63cf187a4f..4b3ac7093cb 100644
@@ -887,16 +887,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
idxmap, bits);
}
-void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap,
- unsigned bits)
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
{
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
@@ -908,7 +912,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
- d.len = TARGET_PAGE_SIZE;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
@@ -926,6 +930,15 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
RUN_ON_CPU_HOST_PTR(p));
}
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
--
2.20.1