[PATCH 6/6] riscv: cmpxchg: Convert to use_alternative_likely

Vivian Wang posted 6 patches 1 month, 2 weeks ago
[PATCH 6/6] riscv: cmpxchg: Convert to use_alternative_likely
Posted by Vivian Wang 1 month, 2 weeks ago
Use use_alternative_likely() to check for RISCV_ISA_EXT_ZAWRS, replacing
the use of asm goto with ALTERNATIVE.

The "likely" variant is used to match the behavior of the original
implementation using ALTERNATIVE("j %l[no_zawrs]", "nop", ...).

Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn>
---
 arch/riscv/include/asm/cmpxchg.h | 125 +++++++++++++++++++--------------------
 1 file changed, 61 insertions(+), 64 deletions(-)

diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 0b749e7102162477432f7cf9a34768fbdf2e8cc7..1ef6e9de5f6d2721d325fa07f2e636ebc951dc7e 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -370,74 +370,71 @@ static __always_inline void __cmpwait(volatile void *ptr,
 	u32 *__ptr32b;
 	ulong __s, __val, __mask;
 
-	asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
-			     0, RISCV_ISA_EXT_ZAWRS, 1)
-		 : : : : no_zawrs);
-
-	switch (size) {
-	case 1:
-		__ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
-		__s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
-		__val = val << __s;
-		__mask = 0xff << __s;
-
-		asm volatile(
-		"	lr.w	%0, %1\n"
-		"	and	%0, %0, %3\n"
-		"	xor	%0, %0, %2\n"
-		"	bnez	%0, 1f\n"
-			ZAWRS_WRS_NTO "\n"
-		"1:"
-		: "=&r" (tmp), "+A" (*(__ptr32b))
-		: "r" (__val), "r" (__mask)
-		: "memory");
-		break;
-	case 2:
-		__ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
-		__s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
-		__val = val << __s;
-		__mask = 0xffff << __s;
-
-		asm volatile(
-		"	lr.w	%0, %1\n"
-		"	and	%0, %0, %3\n"
-		"	xor	%0, %0, %2\n"
-		"	bnez	%0, 1f\n"
-			ZAWRS_WRS_NTO "\n"
-		"1:"
-		: "=&r" (tmp), "+A" (*(__ptr32b))
-		: "r" (__val), "r" (__mask)
-		: "memory");
-		break;
-	case 4:
-		asm volatile(
-		"	lr.w	%0, %1\n"
-		"	xor	%0, %0, %2\n"
-		"	bnez	%0, 1f\n"
-			ZAWRS_WRS_NTO "\n"
-		"1:"
-		: "=&r" (tmp), "+A" (*(u32 *)ptr)
-		: "r" (val));
-		break;
+	if (use_alternative_likely(0, RISCV_ISA_EXT_ZAWRS)) {
+		switch (size) {
+		case 1:
+			__ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+			__s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
+			__val = val << __s;
+			__mask = 0xff << __s;
+
+			asm volatile(
+			"	lr.w	%0, %1\n"
+			"	and	%0, %0, %3\n"
+			"	xor	%0, %0, %2\n"
+			"	bnez	%0, 1f\n"
+				ZAWRS_WRS_NTO "\n"
+			"1:"
+			: "=&r" (tmp), "+A" (*(__ptr32b))
+			: "r" (__val), "r" (__mask)
+			: "memory");
+			break;
+		case 2:
+			__ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+			__s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
+			__val = val << __s;
+			__mask = 0xffff << __s;
+
+			asm volatile(
+			"	lr.w	%0, %1\n"
+			"	and	%0, %0, %3\n"
+			"	xor	%0, %0, %2\n"
+			"	bnez	%0, 1f\n"
+				ZAWRS_WRS_NTO "\n"
+			"1:"
+			: "=&r" (tmp), "+A" (*(__ptr32b))
+			: "r" (__val), "r" (__mask)
+			: "memory");
+			break;
+		case 4:
+			asm volatile(
+			"	lr.w	%0, %1\n"
+			"	xor	%0, %0, %2\n"
+			"	bnez	%0, 1f\n"
+				ZAWRS_WRS_NTO "\n"
+			"1:"
+			: "=&r" (tmp), "+A" (*(u32 *)ptr)
+			: "r" (val));
+			break;
 #if __riscv_xlen == 64
-	case 8:
-		asm volatile(
-		"	lr.d	%0, %1\n"
-		"	xor	%0, %0, %2\n"
-		"	bnez	%0, 1f\n"
-			ZAWRS_WRS_NTO "\n"
-		"1:"
-		: "=&r" (tmp), "+A" (*(u64 *)ptr)
-		: "r" (val));
-		break;
+		case 8:
+			asm volatile(
+			"	lr.d	%0, %1\n"
+			"	xor	%0, %0, %2\n"
+			"	bnez	%0, 1f\n"
+				ZAWRS_WRS_NTO "\n"
+			"1:"
+			: "=&r" (tmp), "+A" (*(u64 *)ptr)
+			: "r" (val));
+			break;
 #endif
-	default:
-		BUILD_BUG();
-	}
+		default:
+			BUILD_BUG();
+		}
 
-	return;
+		return;
+	}
 
-no_zawrs:
 	asm volatile(RISCV_PAUSE : : : "memory");
 }
 

-- 
2.50.1