With function helper_invtlb_page_asid_or_g(), only one TLB entry in
LoongArch emulated TLB is invalidated. so with QEMU TLB, it is not
necessary to flush all QEMU TLB, only flush address range specified
LoongArch emulated TLB is ok. Here invalidate_tlb_entry() is called
so that only QEMU TLB entry with specified address range is flushed.
Signed-off-by: Bibo Mao <maobibo@loongson.cn
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
target/loongarch/tcg/tlb_helper.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index ee40684a18..017c0d2f5b 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -560,7 +560,6 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
target_ulong info, target_ulong addr)
{
int index, asid = info & 0x3ff;
- LoongArchTLB *tlb;
tlb_match func;
bool ret;
@@ -570,9 +569,7 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
return;
}
- tlb = &env->tlb[index];
- tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
- tlb_flush(env_cpu(env));
+ invalidate_tlb(env, index);
}
bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
--
2.39.3