This eliminates a set of runtime shifts. It turns out that we
require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so
redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of
the other way around.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 8 ++++++--
exec-vary.c | 1 +
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 5246770271..2db73c7a27 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -213,19 +213,23 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
typedef struct {
bool decided;
int bits;
+ target_long mask;
} TargetPageBits;
extern const TargetPageBits target_page;
# ifdef CONFIG_DEBUG_TCG
# define TARGET_PAGE_BITS (assert(target_page.decided), target_page.bits)
+# define TARGET_PAGE_MASK (assert(target_page.decided), target_page.mask)
# else
# define TARGET_PAGE_BITS target_page.bits
+# define TARGET_PAGE_MASK target_page.mask
# endif
+# define TARGET_PAGE_SIZE ((int)-TARGET_PAGE_MASK)
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#endif
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_ALIGN(addr) \
(((addr) + ~TARGET_PAGE_MASK) & TARGET_PAGE_MASK)
diff --git a/exec-vary.c b/exec-vary.c
index 67cdf57a9c..26daf281f2 100644
--- a/exec-vary.c
+++ b/exec-vary.c
@@ -83,5 +83,6 @@ void finalize_target_page_bits(void)
init_target_page.bits = TARGET_PAGE_BITS_MIN;
}
init_target_page.decided = true;
+ init_target_page.mask = (target_long)-1 << init_target_page.bits;
#endif
}
--
2.17.1