From 0b005e118190e19b4d8c6c21791441a59eb8220e Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@arm.com>
Date: Wed, 22 May 2019 13:37:53 -0700
Subject: [PATCH v2 4.10 08/20] xen/arm64: cmpxchg: Simplify the cmpxchg
 implementation

The only difference between each case of the cmpxchg is the size of
used. Rather than duplicating the code, provide a macro to generate each
cases.

This makes the code easier to read and modify.

This is part of XSA-295.

Signed-off-by; Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Stefano Stabellini <stefanos@xilinx.com>
---
 xen/include/asm-arm/arm64/cmpxchg.h | 98 +++++++++++------------------
 1 file changed, 36 insertions(+), 62 deletions(-)

diff --git a/xen/include/asm-arm/arm64/cmpxchg.h b/xen/include/asm-arm/arm64/cmpxchg.h
index ae42b2f5ff..393fbca6a5 100644
--- a/xen/include/asm-arm/arm64/cmpxchg.h
+++ b/xen/include/asm-arm/arm64/cmpxchg.h
@@ -61,80 +61,54 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
 	__ret; \
 })
 
-extern void __bad_cmpxchg(volatile void *ptr, int size);
+extern unsigned long __bad_cmpxchg(volatile void *ptr, int size);
+
+#define __CMPXCHG_CASE(w, sz, name)					\
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,	\
+						  unsigned long old,	\
+						  unsigned long new)	\
+{									\
+	unsigned long res, oldval;					\
+									\
+	do {								\
+		asm volatile("// __cmpxchg_case_" #name "\n"		\
+		"	ldxr" #sz "	%" #w "1, %2\n"			\
+		"	mov	%w0, #0\n"				\
+		"	cmp	%" #w "1, %" #w "3\n"			\
+		"	b.ne	1f\n"					\
+		"	stxr" #sz "	%w0, %" #w "4, %2\n"		\
+		"1:\n"							\
+		: "=&r" (res), "=&r" (oldval),				\
+		  "+Q" (*(unsigned long *)ptr)				\
+		: "Ir" (old), "r" (new)					\
+		: "cc");						\
+	} while (res);							\
+									\
+	return oldval;							\
+}
+
+__CMPXCHG_CASE(w, b, 1)
+__CMPXCHG_CASE(w, h, 2)
+__CMPXCHG_CASE(w,  , 4)
+__CMPXCHG_CASE( ,  , 8)
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				      unsigned long new, int size)
 {
-	unsigned long oldval = 0, res;
-
 	switch (size) {
 	case 1:
-		do {
-			asm volatile("// __cmpxchg1\n"
-			"	ldxrb	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrb	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_1(ptr, old, new);
 	case 2:
-		do {
-			asm volatile("// __cmpxchg2\n"
-			"	ldxrh	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrh	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_2(ptr, old, new);
 	case 4:
-		do {
-			asm volatile("// __cmpxchg4\n"
-			"	ldxr	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_4(ptr, old, new);
 	case 8:
-		do {
-			asm volatile("// __cmpxchg8\n"
-			"	ldxr	%1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%1, %3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_8(ptr, old, new);
 	default:
-		__bad_cmpxchg(ptr, size);
-		oldval = 0;
+		return __bad_cmpxchg(ptr, size);
 	}
 
-	return oldval;
+	ASSERT_UNREACHABLE();
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-- 
2.17.1

