This eliminates a set of runtime shifts. It turns out that we
require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so
redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of
the other way around.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
include/exec/cpu-all.h | 8 ++++++--
exec-vary.c | 1 +
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index ba6d3306bf..08b3a5ab06 100644
@@ -213,6 +213,7 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
typedef struct {
bool decided;
int bits;
+ target_long mask;
} TargetPageBits;
# if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
extern const TargetPageBits target_page;
@@ -221,15 +222,18 @@ extern TargetPageBits target_page;
# endif
# ifdef CONFIG_DEBUG_TCG
# define TARGET_PAGE_BITS (assert(target_page.decided), target_page.bits)
+# define TARGET_PAGE_MASK (assert(target_page.decided), target_page.mask)
# else
# define TARGET_PAGE_BITS target_page.bits
+# define TARGET_PAGE_MASK target_page.mask
# endif
+# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#endif
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
diff --git a/exec-vary.c b/exec-vary.c
index e0befd502a..0594f61fef 100644
@@ -97,5 +97,6 @@ void finalize_target_page_bits(void)
init_target_page.bits = TARGET_PAGE_BITS_MIN;
}
init_target_page.decided = true;
+ init_target_page.mask = (target_long)-1 << init_target_page.bits;
#endif
}
--
2.17.1