In {big,little}_endian.h the changes are entirely mechanical, except for
dealing with casting away of const from pointers-to-const on lines
touched anyway.
In swab.h the casting of constants is done away with as well - I simply
don't see what the respective comment is concerned about in our
environment (sizeof(int) >= 4, sizeof(long) >= {4,8} depending on
architecture, sizeof(long long) >= 8). The comment is certainly relevant
in more general cases. Excess parentheses are dropped as well,
___swab32()'s local variable is renamed, and __arch__swab32()'s is
dropped as being redundant with ___swab32()'s.
The masking operation is also dropped from __fswab64().
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I'm unconvinced of the need of the separate ___constant_swab32(). I'm
also unconvinced of the need for some of said constants (that even had
casts on them).
--- a/xen/include/xen/byteorder/big_endian.h
+++ b/xen/include/xen/byteorder/big_endian.h
@@ -14,25 +14,25 @@
#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
-#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
+#define __constant_le32_to_cpu(x) ___constant_swab32((__force uint32_t)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
#define __constant_le16_to_cpu(x) ___constant_swab16((__force uint16_t)(__le16)(x))
#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
-#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
-#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __constant_cpu_to_be32(x) ((__force __be32)(uint32_t)(x))
+#define __constant_be32_to_cpu(x) ((__force uint32_t)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)(uint16_t)(x))
#define __constant_be16_to_cpu(x) ((__force uint16_t)(__be16)(x))
#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
-#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+#define __le32_to_cpu(x) __swab32((__force uint32_t)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
#define __le16_to_cpu(x) __swab16((__force uint16_t)(__le16)(x))
#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
-#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
-#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+#define __cpu_to_be32(x) ((__force __be32)(uint32_t)(x))
+#define __be32_to_cpu(x) ((__force uint32_t)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)(uint16_t)(x))
#define __be16_to_cpu(x) ((__force uint16_t)(__be16)(x))
@@ -44,13 +44,13 @@ static inline __u64 __le64_to_cpup(const
{
return __swab64p((__u64 *)p);
}
-static inline __le32 __cpu_to_le32p(const __u32 *p)
+static inline __le32 __cpu_to_le32p(const uint32_t *p)
{
return (__force __le32)__swab32p(p);
}
-static inline __u32 __le32_to_cpup(const __le32 *p)
+static inline uint32_t __le32_to_cpup(const __le32 *p)
{
- return __swab32p((__u32 *)p);
+ return __swab32p((const uint32_t *)p);
}
static inline __le16 __cpu_to_le16p(const uint16_t *p)
{
@@ -68,13 +68,13 @@ static inline __u64 __be64_to_cpup(const
{
return (__force __u64)*p;
}
-static inline __be32 __cpu_to_be32p(const __u32 *p)
+static inline __be32 __cpu_to_be32p(const uint32_t *p)
{
return (__force __be32)*p;
}
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static inline uint32_t __be32_to_cpup(const __be32 *p)
{
- return (__force __u32)*p;
+ return (__force uint32_t)*p;
}
static inline __be16 __cpu_to_be16p(const uint16_t *p)
{
--- a/xen/include/xen/byteorder/little_endian.h
+++ b/xen/include/xen/byteorder/little_endian.h
@@ -13,26 +13,26 @@
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
-#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
-#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __constant_cpu_to_le32(x) ((__force __le32)(uint32_t)(x))
+#define __constant_le32_to_cpu(x) ((__force uint32_t)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)(uint16_t)(x))
#define __constant_le16_to_cpu(x) ((__force uint16_t)(__le16)(x))
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
-#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
+#define __constant_be32_to_cpu(x) ___constant_swab32((__force uint32_t)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
#define __constant_be16_to_cpu(x) ___constant_swab16((__force uint16_t)(__be16)(x))
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
-#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
-#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+#define __cpu_to_le32(x) ((__force __le32)(uint32_t)(x))
+#define __le32_to_cpu(x) ((__force uint32_t)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)(uint16_t)(x))
#define __le16_to_cpu(x) ((__force uint16_t)(__le16)(x))
#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
-#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+#define __be32_to_cpu(x) __swab32((__force uint32_t)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force uint16_t)(__be16)(x))
@@ -44,13 +44,13 @@ static inline __u64 __le64_to_cpup(const
{
return (__force __u64)*p;
}
-static inline __le32 __cpu_to_le32p(const __u32 *p)
+static inline __le32 __cpu_to_le32p(const uint32_t *p)
{
return (__force __le32)*p;
}
-static inline __u32 __le32_to_cpup(const __le32 *p)
+static inline uint32_t __le32_to_cpup(const __le32 *p)
{
- return (__force __u32)*p;
+ return (__force uint32_t)*p;
}
static inline __le16 __cpu_to_le16p(const uint16_t *p)
{
@@ -68,13 +68,13 @@ static inline __u64 __be64_to_cpup(const
{
return __swab64p((__u64 *)p);
}
-static inline __be32 __cpu_to_be32p(const __u32 *p)
+static inline __be32 __cpu_to_be32p(const uint32_t *p)
{
return (__force __be32)__swab32p(p);
}
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static inline uint32_t __be32_to_cpup(const __be32 *p)
{
- return __swab32p((__u32 *)p);
+ return __swab32p((const uint32_t *)p);
}
static inline __be16 __cpu_to_be16p(const uint16_t *p)
{
--- a/xen/include/xen/byteorder/swab.h
+++ b/xen/include/xen/byteorder/swab.h
@@ -12,7 +12,7 @@
/*
* Casts are necessary for constants, because we never know for sure how
- * U/UL/ULL map to __u32, __u64. At least not in a portable way.
+ * UL/ULL map to __u64. At least not in a portable way.
*/
#define ___swab16(x) \
({ \
@@ -24,12 +24,12 @@
#define ___swab32(x) \
({ \
- __u32 __x = (x); \
- ((__u32)( \
- (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
- (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
- (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
- (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+ uint32_t x_ = (x); \
+ (uint32_t)( \
+ (((uint32_t)(x_) & 0x000000ffU) << 24) | \
+ (((uint32_t)(x_) & 0x0000ff00U) << 8) | \
+ (((uint32_t)(x_) & 0x00ff0000U) >> 8) | \
+ (((uint32_t)(x_) & 0xff000000U) >> 24)); \
})
#define ___swab64(x) \
@@ -51,11 +51,11 @@
(((uint16_t)(x) & 0x00ffU) << 8) | \
(((uint16_t)(x) & 0xff00U) >> 8)))
#define ___constant_swab32(x) \
- ((__u32)( \
- (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
- (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
- (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
- (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
+ ((uint32_t)( \
+ (((uint32_t)(x) & 0x000000ffU) << 24) | \
+ (((uint32_t)(x) & 0x0000ff00U) << 8) | \
+ (((uint32_t)(x) & 0x00ff0000U) >> 8) | \
+ (((uint32_t)(x) & 0xff000000U) >> 24)))
#define ___constant_swab64(x) \
((__u64)( \
(__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
@@ -74,7 +74,7 @@
# define __arch__swab16(x) ___swab16(x)
#endif
#ifndef __arch__swab32
-# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+# define __arch__swab32(x) ___swab32(x)
#endif
#ifndef __arch__swab64
# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
@@ -110,7 +110,7 @@
___swab16((x)) : \
__fswab16((x)))
# define __swab32(x) \
-(__builtin_constant_p((__u32)(x)) ? \
+(__builtin_constant_p((uint32_t)(x)) ? \
___swab32((x)) : \
__fswab32((x)))
# define __swab64(x) \
@@ -137,15 +137,15 @@ static inline void __swab16s(uint16_t *a
__arch__swab16s(addr);
}
-static inline attr_const __u32 __fswab32(__u32 x)
+static inline attr_const uint32_t __fswab32(uint32_t x)
{
return __arch__swab32(x);
}
-static inline __u32 __swab32p(const __u32 *x)
+static inline uint32_t __swab32p(const uint32_t *x)
{
return __arch__swab32p(x);
}
-static inline void __swab32s(__u32 *addr)
+static inline void __swab32s(uint32_t *addr)
{
__arch__swab32s(addr);
}
@@ -154,8 +154,7 @@ static inline void __swab32s(__u32 *addr
static inline attr_const __u64 __fswab64(__u64 x)
{
# ifdef __SWAB_64_THRU_32__
- __u32 h = x >> 32;
- __u32 l = x & ((1ULL<<32)-1);
+ uint32_t h = x >> 32, l = x;
return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
# else
return __arch__swab64(x);
© 2016 - 2024 Red Hat, Inc.