Implement ffs64() and fls64() as plain static inlines, dropping the ifdefary
and intermediate generic_f?s64() forms.
Add tests for all interesting bit positions at 32bit boundaries.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien@xen.org>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
CC: Bertrand Marquis <bertrand.marquis@arm.com>
CC: Michal Orzel <michal.orzel@amd.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>
CC: Shawn Anastasio <sanastasio@raptorengineering.com>
CC: consulting@bugseng.com <consulting@bugseng.com>
CC: Simone Ballarin <simone.ballarin@bugseng.com>
CC: Federico Serafini <federico.serafini@bugseng.com>
CC: Nicola Vetrini <nicola.vetrini@bugseng.com>
v2:
* Use ULL rather than a uint64_t cast.
* Extend to fls64() too.
---
xen/common/bitops.c | 32 ++++++++++++++++++++++++++++++
xen/include/xen/bitops.h | 42 +++++++++++++++++++---------------------
2 files changed, 52 insertions(+), 22 deletions(-)
diff --git a/xen/common/bitops.c b/xen/common/bitops.c
index b4845d9e84d1..5482e5a1218d 100644
--- a/xen/common/bitops.c
+++ b/xen/common/bitops.c
@@ -24,6 +24,22 @@ static void __init test_ffs(void)
CHECK(ffsl, 1UL << 32, 33);
CHECK(ffsl, 1UL << 63, 64);
#endif
+
+ /*
+ * unsigned int ffs64(uint64_t)
+ *
+ * 32-bit builds of Xen have to split this into two adjacent operations,
+ * so test all interesting bit positions across the divide.
+ */
+ CHECK(ffs64, 0, 0);
+ CHECK(ffs64, 1, 1);
+ CHECK(ffs64, 3, 1);
+ CHECK(ffs64, 7, 1);
+ CHECK(ffs64, 6, 2);
+
+ CHECK(ffs64, 0x8000000080000000ULL, 32);
+ CHECK(ffs64, 0x8000000100000000ULL, 33);
+ CHECK(ffs64, 0x8000000000000000ULL, 64);
}
static void __init test_fls(void)
@@ -48,6 +64,22 @@ static void __init test_fls(void)
CHECK(flsl, 1 | (1UL << 32), 33);
CHECK(flsl, 1 | (1UL << 63), 64);
#endif
+
+ /*
+ * unsigned int ffl64(uint64_t)
+ *
+ * 32-bit builds of Xen have to split this into two adjacent operations,
+ * so test all interesting bit positions across the divide.
+ */
+ CHECK(fls64, 0, 0);
+ CHECK(fls64, 1, 1);
+ CHECK(fls64, 3, 2);
+ CHECK(fls64, 7, 3);
+ CHECK(fls64, 6, 3);
+
+ CHECK(fls64, 0x0000000080000001ULL, 32);
+ CHECK(fls64, 0x0000000100000001ULL, 33);
+ CHECK(fls64, 0x8000000000000001ULL, 64);
}
static void __init __constructor test_bitops(void)
diff --git a/xen/include/xen/bitops.h b/xen/include/xen/bitops.h
index e7df6377372d..c5518d2c8552 100644
--- a/xen/include/xen/bitops.h
+++ b/xen/include/xen/bitops.h
@@ -60,6 +60,14 @@ static always_inline __pure unsigned int ffsl(unsigned long x)
#endif
}
+static always_inline __pure unsigned int ffs64(uint64_t x)
+{
+ if ( BITS_PER_LONG == 64 )
+ return ffsl(x);
+ else
+ return !x || (uint32_t)x ? ffs(x) : ffs(x >> 32) + 32;
+}
+
static always_inline __pure unsigned int fls(unsigned int x)
{
if ( __builtin_constant_p(x) )
@@ -84,6 +92,18 @@ static always_inline __pure unsigned int flsl(unsigned long x)
#endif
}
+static always_inline __pure unsigned int fls64(uint64_t x)
+{
+ if ( BITS_PER_LONG == 64 )
+ return flsl(x);
+ else
+ {
+ uint32_t h = x >> 32;
+
+ return h ? fls(h) + 32 : fls(x);
+ }
+}
+
/* --------------------- Please tidy below here --------------------- */
#ifndef find_next_bit
@@ -134,28 +154,6 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
#endif
-#if BITS_PER_LONG == 64
-# define fls64 flsl
-# define ffs64 ffsl
-#else
-# ifndef ffs64
-static inline int generic_ffs64(__u64 x)
-{
- return !x || (__u32)x ? ffs(x) : ffs(x >> 32) + 32;
-}
-# define ffs64 generic_ffs64
-# endif
-# ifndef fls64
-static inline int generic_fls64(__u64 x)
-{
- __u32 h = x >> 32;
-
- return h ? fls(h) + 32 : fls(x);
-}
-# define fls64 generic_fls64
-# endif
-#endif
-
static inline int get_bitmask_order(unsigned int count)
{
int order;
--
2.30.2