[PATCH 11/23] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked

Richard Henderson posted 23 patches 1 month, 2 weeks ago
There is a newer version of this series
[PATCH 11/23] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked
Posted by Richard Henderson 1 month, 2 weeks ago
Flush a masked range of pages from the IntervalTree cache.
When the mask is not used there is a redundant comparison,
but that is better than duplicating code at this point.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cputlb.c | 25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 772656c7f8..709ad75616 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -311,6 +311,13 @@ static CPUTLBEntryTree *tlbtree_lookup_range(CPUTLBDesc *desc, vaddr s, vaddr l)
     return i ? container_of(i, CPUTLBEntryTree, itree) : NULL;
 }
 
+static CPUTLBEntryTree *tlbtree_lookup_range_next(CPUTLBEntryTree *prev,
+                                                  vaddr s, vaddr l)
+{
+    IntervalTreeNode *i = interval_tree_iter_next(&prev->itree, s, l);
+    return i ? container_of(i, CPUTLBEntryTree, itree) : NULL;
+}
+
 static CPUTLBEntryTree *tlbtree_lookup_addr(CPUTLBDesc *desc, vaddr addr)
 {
     return tlbtree_lookup_range(desc, addr, addr);
@@ -744,6 +751,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
     vaddr mask = MAKE_64BIT_MASK(0, bits);
+    CPUTLBEntryTree *node;
+    vaddr addr_mask, last_mask, last_imask;
 
     /*
      * Check if we need to flush due to large pages.
@@ -764,6 +773,22 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
         vaddr page = addr + i;
         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
     }
+
+    addr_mask = addr & mask;
+    last_mask = addr_mask + len - 1;
+    last_imask = last_mask | ~mask;
+    node = tlbtree_lookup_range(d, addr_mask, last_imask);
+    while (node) {
+        CPUTLBEntryTree *next =
+            tlbtree_lookup_range_next(node, addr_mask, last_imask);
+        vaddr page_mask = node->itree.start & mask;
+
+        if (page_mask >= addr_mask && page_mask < last_mask) {
+            interval_tree_remove(&node->itree, &d->iroot);
+            g_free(node);
+        }
+        node = next;
+    }
 }
 
 typedef struct {
-- 
2.43.0
Re: [PATCH 11/23] accel/tcg: Remove IntervalTree entries in tlb_flush_range_locked
Posted by Pierrick Bouvier 1 month, 2 weeks ago
On 10/9/24 08:08, Richard Henderson wrote:
> Flush a masked range of pages from the IntervalTree cache.
> When the mask is not used there is a redundant comparison,
> but that is better than duplicating code at this point.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/cputlb.c | 25 +++++++++++++++++++++++++
>   1 file changed, 25 insertions(+)
> 
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index 772656c7f8..709ad75616 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -311,6 +311,13 @@ static CPUTLBEntryTree *tlbtree_lookup_range(CPUTLBDesc *desc, vaddr s, vaddr l)
>       return i ? container_of(i, CPUTLBEntryTree, itree) : NULL;
>   }
>   
> +static CPUTLBEntryTree *tlbtree_lookup_range_next(CPUTLBEntryTree *prev,
> +                                                  vaddr s, vaddr l)
> +{
> +    IntervalTreeNode *i = interval_tree_iter_next(&prev->itree, s, l);
> +    return i ? container_of(i, CPUTLBEntryTree, itree) : NULL;
> +}
> +
>   static CPUTLBEntryTree *tlbtree_lookup_addr(CPUTLBDesc *desc, vaddr addr)
>   {
>       return tlbtree_lookup_range(desc, addr, addr);
> @@ -744,6 +751,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>       CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
>       CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
>       vaddr mask = MAKE_64BIT_MASK(0, bits);
> +    CPUTLBEntryTree *node;
> +    vaddr addr_mask, last_mask, last_imask;
>   
>       /*
>        * Check if we need to flush due to large pages.
> @@ -764,6 +773,22 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>           vaddr page = addr + i;
>           tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
>       }
> +
> +    addr_mask = addr & mask;
> +    last_mask = addr_mask + len - 1;
> +    last_imask = last_mask | ~mask;
> +    node = tlbtree_lookup_range(d, addr_mask, last_imask);
> +    while (node) {
> +        CPUTLBEntryTree *next =
> +            tlbtree_lookup_range_next(node, addr_mask, last_imask);
> +        vaddr page_mask = node->itree.start & mask;
> +
> +        if (page_mask >= addr_mask && page_mask < last_mask) {
> +            interval_tree_remove(&node->itree, &d->iroot);
> +            g_free(node); > +        }
> +        node = next;
> +    }
>   }
>   
>   typedef struct {

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>