[PATCH 24/82] include/hw/core/cpu: Introduce cpu_tlb_fast

Richard Henderson posted 82 patches 4 months, 2 weeks ago
There is a newer version of this series
[PATCH 24/82] include/hw/core/cpu: Introduce cpu_tlb_fast
Posted by Richard Henderson 4 months, 2 weeks ago
Encapsulate access to cpu->neg.tlb.f[] in a function.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/hw/core/cpu.h |  7 +++++++
 accel/tcg/cputlb.c    | 16 ++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 1153cadb70..bd835b07d5 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -593,6 +593,13 @@ static inline CPUArchState *cpu_env(CPUState *cpu)
     return (CPUArchState *)(cpu + 1);
 }
 
+#ifdef CONFIG_TCG
+static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx)
+{
+    return &cpu->neg.tlb.f[mmu_idx];
+}
+#endif
+
 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
 extern CPUTailQ cpus_queue;
 
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d324f33339..2a6aa01c57 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -129,7 +129,7 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
 static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
                                   vaddr addr)
 {
-    uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+    uintptr_t size_mask = cpu_tlb_fast(cpu, mmu_idx)->mask >> CPU_TLB_ENTRY_BITS;
 
     return (addr >> TARGET_PAGE_BITS) & size_mask;
 }
@@ -138,7 +138,7 @@ static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
 static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
                                      vaddr addr)
 {
-    return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
+    return &cpu_tlb_fast(cpu, mmu_idx)->table[tlb_index(cpu, mmu_idx, addr)];
 }
 
 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
@@ -292,7 +292,7 @@ static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
                                         int64_t now)
 {
     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
-    CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
+    CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx);
 
     tlb_mmu_resize_locked(desc, fast, now);
     tlb_mmu_flush_locked(desc, fast);
@@ -331,7 +331,7 @@ void tlb_init(CPUState *cpu)
     cpu->neg.tlb.c.dirty = 0;
 
     for (i = 0; i < NB_MMU_MODES; i++) {
-        tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
+        tlb_mmu_init(&cpu->neg.tlb.d[i], cpu_tlb_fast(cpu, i), now);
     }
 }
 
@@ -342,7 +342,7 @@ void tlb_destroy(CPUState *cpu)
     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
     for (i = 0; i < NB_MMU_MODES; i++) {
         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
-        CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
+        CPUTLBDescFast *fast = cpu_tlb_fast(cpu, i);
 
         g_free(fast->table);
         g_free(desc->fulltlb);
@@ -667,7 +667,7 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
                                    unsigned bits)
 {
     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
-    CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
+    CPUTLBDescFast *f = cpu_tlb_fast(cpu, midx);
     vaddr mask = MAKE_64BIT_MASK(0, bits);
 
     /*
@@ -923,7 +923,7 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
     qemu_spin_lock(&cpu->neg.tlb.c.lock);
     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
         CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
-        CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
+        CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx);
         unsigned int n = tlb_n_entries(fast);
         unsigned int i;
 
@@ -1316,7 +1316,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
 
         if (cmp == page) {
             /* Found entry in victim tlb, swap tlb and iotlb.  */
-            CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
+            CPUTLBEntry tmptlb, *tlb = &cpu_tlb_fast(cpu, mmu_idx)->table[index];
 
             qemu_spin_lock(&cpu->neg.tlb.c.lock);
             copy_tlb_helper_locked(&tmptlb, tlb);
-- 
2.43.0
Re: [PATCH 24/82] include/hw/core/cpu: Introduce cpu_tlb_fast
Posted by Pierrick Bouvier 4 months, 2 weeks ago
On 7/27/25 1:01 AM, Richard Henderson wrote:
> Encapsulate access to cpu->neg.tlb.f[] in a function.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   include/hw/core/cpu.h |  7 +++++++
>   accel/tcg/cputlb.c    | 16 ++++++++--------
>   2 files changed, 15 insertions(+), 8 deletions(-)
> 
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index 1153cadb70..bd835b07d5 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -593,6 +593,13 @@ static inline CPUArchState *cpu_env(CPUState *cpu)
>       return (CPUArchState *)(cpu + 1);
>   }
>   
> +#ifdef CONFIG_TCG
> +static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx)
> +{
> +    return &cpu->neg.tlb.f[mmu_idx];
> +}
> +#endif
> +
>   typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
>   extern CPUTailQ cpus_queue;
>   
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index d324f33339..2a6aa01c57 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -129,7 +129,7 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
>   static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
>                                     vaddr addr)
>   {
> -    uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
> +    uintptr_t size_mask = cpu_tlb_fast(cpu, mmu_idx)->mask >> CPU_TLB_ENTRY_BITS;
>   
>       return (addr >> TARGET_PAGE_BITS) & size_mask;
>   }
> @@ -138,7 +138,7 @@ static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
>   static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
>                                        vaddr addr)
>   {
> -    return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
> +    return &cpu_tlb_fast(cpu, mmu_idx)->table[tlb_index(cpu, mmu_idx, addr)];
>   }
>   
>   static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
> @@ -292,7 +292,7 @@ static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
>                                           int64_t now)
>   {
>       CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
> -    CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
> +    CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx);
>   
>       tlb_mmu_resize_locked(desc, fast, now);
>       tlb_mmu_flush_locked(desc, fast);
> @@ -331,7 +331,7 @@ void tlb_init(CPUState *cpu)
>       cpu->neg.tlb.c.dirty = 0;
>   
>       for (i = 0; i < NB_MMU_MODES; i++) {
> -        tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
> +        tlb_mmu_init(&cpu->neg.tlb.d[i], cpu_tlb_fast(cpu, i), now);
>       }
>   }
>   
> @@ -342,7 +342,7 @@ void tlb_destroy(CPUState *cpu)
>       qemu_spin_destroy(&cpu->neg.tlb.c.lock);
>       for (i = 0; i < NB_MMU_MODES; i++) {
>           CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
> -        CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
> +        CPUTLBDescFast *fast = cpu_tlb_fast(cpu, i);
>   
>           g_free(fast->table);
>           g_free(desc->fulltlb);
> @@ -667,7 +667,7 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx,
>                                      unsigned bits)
>   {
>       CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
> -    CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
> +    CPUTLBDescFast *f = cpu_tlb_fast(cpu, midx);
>       vaddr mask = MAKE_64BIT_MASK(0, bits);
>   
>       /*
> @@ -923,7 +923,7 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
>       qemu_spin_lock(&cpu->neg.tlb.c.lock);
>       for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
>           CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
> -        CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
> +        CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx);
>           unsigned int n = tlb_n_entries(fast);
>           unsigned int i;
>   
> @@ -1316,7 +1316,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
>   
>           if (cmp == page) {
>               /* Found entry in victim tlb, swap tlb and iotlb.  */
> -            CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
> +            CPUTLBEntry tmptlb, *tlb = &cpu_tlb_fast(cpu, mmu_idx)->table[index];
>   
>               qemu_spin_lock(&cpu->neg.tlb.c.lock);
>               copy_tlb_helper_locked(&tmptlb, tlb);

It's sad, my eyes were just getting used to read those accesses :)
More seriously, that's clearer, thanks.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>