The CPU_TLB_DYN_{MIN,MAX}_BITS definitions are not required
outside of cputlb.c and translate-all.c.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/tb-internal.h | 27 ---------------------------
accel/tcg/tlb-bounds.h | 32 ++++++++++++++++++++++++++++++++
accel/tcg/cputlb.c | 1 +
accel/tcg/translate-all.c | 1 +
4 files changed, 34 insertions(+), 27 deletions(-)
create mode 100644 accel/tcg/tlb-bounds.h
diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h
index f9a06bcbab..08538e2896 100644
--- a/accel/tcg/tb-internal.h
+++ b/accel/tcg/tb-internal.h
@@ -22,33 +22,6 @@
*/
#define GETPC_ADJ 2
-#ifdef CONFIG_SOFTMMU
-
-#define CPU_TLB_DYN_MIN_BITS 6
-#define CPU_TLB_DYN_DEFAULT_BITS 8
-
-# if HOST_LONG_BITS == 32
-/* Make sure we do not require a double-word shift for the TLB load */
-# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
-# else /* HOST_LONG_BITS == 64 */
-/*
- * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
- * 2**34 == 16G of address space. This is roughly what one would expect a
- * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
- * Skylake's Level-2 STLB has 16 1G entries.
- * Also, make sure we do not size the TLB past the guest's address space.
- */
-# ifdef TARGET_PAGE_BITS_VARY
-# define CPU_TLB_DYN_MAX_BITS \
- MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# else
-# define CPU_TLB_DYN_MAX_BITS \
- MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# endif
-# endif
-
-#endif /* CONFIG_SOFTMMU */
-
void tb_lock_page0(tb_page_addr_t);
#ifdef CONFIG_USER_ONLY
diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h
new file mode 100644
index 0000000000..efd34d4793
--- /dev/null
+++ b/accel/tcg/tlb-bounds.h
@@ -0,0 +1,32 @@
+/*
+ * softmmu size bounds
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_TLB_BOUNDS_H
+#define ACCEL_TCG_TLB_BOUNDS_H
+
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+# if HOST_LONG_BITS == 32
+/* Make sure we do not require a double-word shift for the TLB load */
+# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+# else /* HOST_LONG_BITS == 64 */
+/*
+ * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
+ * 2**34 == 16G of address space. This is roughly what one would expect a
+ * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
+ * Skylake's Level-2 STLB has 16 1G entries.
+ * Also, make sure we do not size the TLB past the guest's address space.
+ */
+# ifdef TARGET_PAGE_BITS_VARY
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# else
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# endif
+# endif
+
+#endif /* ACCEL_TCG_TLB_BOUNDS_H */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 28c47d4872..a717f357d5 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -41,6 +41,7 @@
#include "trace.h"
#include "tb-hash.h"
#include "tb-internal.h"
+#include "tlb-bounds.h"
#include "internal-common.h"
#include "internal-target.h"
#ifdef CONFIG_PLUGIN
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bb161ae61a..87fb6c51d3 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -47,6 +47,7 @@
#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "tb-internal.h"
+#include "tlb-bounds.h"
#include "exec/translator.h"
#include "exec/tb-flush.h"
#include "qemu/bitmap.h"
--
2.43.0
On 3/28/25 13:04, Richard Henderson wrote: > The CPU_TLB_DYN_{MIN,MAX}_BITS definitions are not required > outside of cputlb.c and translate-all.c. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/tb-internal.h | 27 --------------------------- > accel/tcg/tlb-bounds.h | 32 ++++++++++++++++++++++++++++++++ > accel/tcg/cputlb.c | 1 + > accel/tcg/translate-all.c | 1 + > 4 files changed, 34 insertions(+), 27 deletions(-) > create mode 100644 accel/tcg/tlb-bounds.h > > diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h > index f9a06bcbab..08538e2896 100644 > --- a/accel/tcg/tb-internal.h > +++ b/accel/tcg/tb-internal.h > @@ -22,33 +22,6 @@ > */ > #define GETPC_ADJ 2 > > -#ifdef CONFIG_SOFTMMU > - > -#define CPU_TLB_DYN_MIN_BITS 6 > -#define CPU_TLB_DYN_DEFAULT_BITS 8 > - > -# if HOST_LONG_BITS == 32 > -/* Make sure we do not require a double-word shift for the TLB load */ > -# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) > -# else /* HOST_LONG_BITS == 64 */ > -/* > - * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == > - * 2**34 == 16G of address space. This is roughly what one would expect a > - * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel > - * Skylake's Level-2 STLB has 16 1G entries. > - * Also, make sure we do not size the TLB past the guest's address space. > - */ > -# ifdef TARGET_PAGE_BITS_VARY > -# define CPU_TLB_DYN_MAX_BITS \ > - MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) > -# else > -# define CPU_TLB_DYN_MAX_BITS \ > - MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) > -# endif > -# endif > - > -#endif /* CONFIG_SOFTMMU */ > - > void tb_lock_page0(tb_page_addr_t); > > #ifdef CONFIG_USER_ONLY > diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h > new file mode 100644 > index 0000000000..efd34d4793 > --- /dev/null > +++ b/accel/tcg/tlb-bounds.h > @@ -0,0 +1,32 @@ > +/* > + * softmmu size bounds > + * SPDX-License-Identifier: LGPL-2.1-or-later > + */ > + > +#ifndef ACCEL_TCG_TLB_BOUNDS_H > +#define ACCEL_TCG_TLB_BOUNDS_H > + > +#define CPU_TLB_DYN_MIN_BITS 6 > +#define CPU_TLB_DYN_DEFAULT_BITS 8 > + > +# if HOST_LONG_BITS == 32 > +/* Make sure we do not require a double-word shift for the TLB load */ > +# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) > +# else /* HOST_LONG_BITS == 64 */ > +/* > + * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == > + * 2**34 == 16G of address space. This is roughly what one would expect a > + * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel > + * Skylake's Level-2 STLB has 16 1G entries. > + * Also, make sure we do not size the TLB past the guest's address space. > + */ > +# ifdef TARGET_PAGE_BITS_VARY > +# define CPU_TLB_DYN_MAX_BITS \ > + MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) > +# else > +# define CPU_TLB_DYN_MAX_BITS \ > + MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) > +# endif > +# endif > + > +#endif /* ACCEL_TCG_TLB_BOUNDS_H */ > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index 28c47d4872..a717f357d5 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -41,6 +41,7 @@ > #include "trace.h" > #include "tb-hash.h" > #include "tb-internal.h" > +#include "tlb-bounds.h" > #include "internal-common.h" > #include "internal-target.h" > #ifdef CONFIG_PLUGIN > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c > index bb161ae61a..87fb6c51d3 100644 > --- a/accel/tcg/translate-all.c > +++ b/accel/tcg/translate-all.c > @@ -47,6 +47,7 @@ > #include "exec/page-protection.h" > #include "exec/mmap-lock.h" > #include "tb-internal.h" > +#include "tlb-bounds.h" > #include "exec/translator.h" > #include "exec/tb-flush.h" > #include "qemu/bitmap.h" Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
On 28/3/25 21:04, Richard Henderson wrote: > The CPU_TLB_DYN_{MIN,MAX}_BITS definitions are not required > outside of cputlb.c and translate-all.c. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/tb-internal.h | 27 --------------------------- > accel/tcg/tlb-bounds.h | 32 ++++++++++++++++++++++++++++++++ > accel/tcg/cputlb.c | 1 + > accel/tcg/translate-all.c | 1 + > 4 files changed, 34 insertions(+), 27 deletions(-) > create mode 100644 accel/tcg/tlb-bounds.h Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
© 2016 - 2025 Red Hat, Inc.