Don't base the values on TARGET_PAGE_BITS_MIN, but do verify
that TLB_FLAGS_MASK does not overlap minimum page size.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/tlb-flags.h | 68 +++++++++++++++++++---------------------
accel/tcg/cputlb.c | 2 ++
2 files changed, 34 insertions(+), 36 deletions(-)
diff --git a/include/exec/tlb-flags.h b/include/exec/tlb-flags.h
index 54a6bae768..357e79095c 100644
--- a/include/exec/tlb-flags.h
+++ b/include/exec/tlb-flags.h
@@ -19,54 +19,29 @@
#ifndef TLB_FLAGS_H
#define TLB_FLAGS_H
-#include "exec/cpu-defs.h"
+/*
+ * Flags returned for lookup of a TLB virtual address.
+ */
#ifdef CONFIG_USER_ONLY
/*
- * Allow some level of source compatibility with softmmu. We do not
- * support any of the more exotic features, so only invalid pages may
- * be signaled by probe_access_flags().
+ * Allow some level of source compatibility with softmmu.
+ * Invalid is set when the page does not have requested permissions.
+ * MMIO is set when we want the target helper to use the functional
+ * interface for load/store so that plugins see the access.
*/
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
-#define TLB_WATCHPOINT 0
+#define TLB_INVALID_MASK (1 << 0)
+#define TLB_MMIO (1 << 1)
+#define TLB_WATCHPOINT 0
#else
-/*
- * Flags stored in the low bits of the TLB virtual address.
- * These are defined so that fast path ram access is all zeros.
- * The flags all must be between TARGET_PAGE_BITS and
- * maximum address alignment bit.
- *
- * Use TARGET_PAGE_BITS_MIN so that these bits are constant
- * when TARGET_PAGE_BITS_VARY is in effect.
- *
- * The count, if not the placement of these bits is known
- * to tcg/tcg-op-ldst.c, check_max_alignment().
- */
-/* Zero if TLB entry is valid. */
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
-/*
- * Set if TLB entry references a clean RAM page. The iotlb entry will
- * contain the page physical address.
- */
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
-/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
-#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 3))
-
-/*
- * Use this mask to check interception with an alignment mask
- * in a TCG backend.
- */
-#define TLB_FLAGS_MASK \
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW)
-
/*
* Flags stored in CPUTLBEntryFull.slow_flags[x].
* TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
*/
+
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
@@ -82,6 +57,27 @@
(TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED | \
TLB_DISCARD_WRITE | TLB_MMIO)
+/*
+ * Flags stored in CPUTLBEntry.addr_idx[x].
+ * These must be above the largest alignment (64 bytes),
+ * and below the smallest page size (1024 bytes).
+ * This leaves bits [9:6] available for use.
+ */
+
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 6)
+/* Set if TLB entry references a clean RAM page. */
+#define TLB_NOTDIRTY (1 << 7)
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW (1 << 8)
+
+/*
+ * Use this mask to check interception with an alignment mask
+ * in a TCG backend.
+ */
+#define TLB_FLAGS_MASK \
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW)
+
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a717f357d5..39314e86f3 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -49,6 +49,8 @@
#endif
#include "tcg/tcg-ldst.h"
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & ((1u < TARGET_PAGE_BITS_MIN) - 1));
+
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
/* #define DEBUG_TLB_LOG */
--
2.43.0
On 3/28/25 13:04, Richard Henderson wrote: > Don't base the values on TARGET_PAGE_BITS_MIN, but do verify > that TLB_FLAGS_MASK does not overlap minimum page size. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/exec/tlb-flags.h | 68 +++++++++++++++++++--------------------- > accel/tcg/cputlb.c | 2 ++ > 2 files changed, 34 insertions(+), 36 deletions(-) > > diff --git a/include/exec/tlb-flags.h b/include/exec/tlb-flags.h > index 54a6bae768..357e79095c 100644 > --- a/include/exec/tlb-flags.h > +++ b/include/exec/tlb-flags.h > @@ -19,54 +19,29 @@ > #ifndef TLB_FLAGS_H > #define TLB_FLAGS_H > > -#include "exec/cpu-defs.h" > +/* > + * Flags returned for lookup of a TLB virtual address. > + */ > > #ifdef CONFIG_USER_ONLY > > /* > - * Allow some level of source compatibility with softmmu. We do not > - * support any of the more exotic features, so only invalid pages may > - * be signaled by probe_access_flags(). > + * Allow some level of source compatibility with softmmu. > + * Invalid is set when the page does not have requested permissions. > + * MMIO is set when we want the target helper to use the functional > + * interface for load/store so that plugins see the access. > */ > -#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) > -#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2)) > -#define TLB_WATCHPOINT 0 > +#define TLB_INVALID_MASK (1 << 0) > +#define TLB_MMIO (1 << 1) > +#define TLB_WATCHPOINT 0 > > #else > > -/* > - * Flags stored in the low bits of the TLB virtual address. > - * These are defined so that fast path ram access is all zeros. > - * The flags all must be between TARGET_PAGE_BITS and > - * maximum address alignment bit. > - * > - * Use TARGET_PAGE_BITS_MIN so that these bits are constant > - * when TARGET_PAGE_BITS_VARY is in effect. > - * > - * The count, if not the placement of these bits is known > - * to tcg/tcg-op-ldst.c, check_max_alignment(). > - */ > -/* Zero if TLB entry is valid. */ > -#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) > -/* > - * Set if TLB entry references a clean RAM page. The iotlb entry will > - * contain the page physical address. > - */ > -#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) > -/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ > -#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 3)) > - > -/* > - * Use this mask to check interception with an alignment mask > - * in a TCG backend. > - */ > -#define TLB_FLAGS_MASK \ > - (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW) > - > /* > * Flags stored in CPUTLBEntryFull.slow_flags[x]. > * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. > */ > + > /* Set if TLB entry requires byte swap. */ > #define TLB_BSWAP (1 << 0) > /* Set if TLB entry contains a watchpoint. */ > @@ -82,6 +57,27 @@ > (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED | \ > TLB_DISCARD_WRITE | TLB_MMIO) > > +/* > + * Flags stored in CPUTLBEntry.addr_idx[x]. > + * These must be above the largest alignment (64 bytes), > + * and below the smallest page size (1024 bytes). > + * This leaves bits [9:6] available for use. > + */ > + > +/* Zero if TLB entry is valid. */ > +#define TLB_INVALID_MASK (1 << 6) > +/* Set if TLB entry references a clean RAM page. */ > +#define TLB_NOTDIRTY (1 << 7) > +/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ > +#define TLB_FORCE_SLOW (1 << 8) > + > +/* > + * Use this mask to check interception with an alignment mask > + * in a TCG backend. > + */ > +#define TLB_FLAGS_MASK \ > + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW) > + > /* The two sets of flags must not overlap. */ > QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); > > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index a717f357d5..39314e86f3 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -49,6 +49,8 @@ > #endif > #include "tcg/tcg-ldst.h" > > +QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & ((1u < TARGET_PAGE_BITS_MIN) - 1)); > + > /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ > /* #define DEBUG_TLB */ > /* #define DEBUG_TLB_LOG */ Hurrah! Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Maybe we could define MEMOP_MAX_ALIGNMENT, reuse use that in check_max_alignment(), and add a compile time check here as well. We can as well mention in a comment that all architectures now have the same placement for those flags, simplifying MMU management when we'll mix several architectures.
© 2016 - 2025 Red Hat, Inc.