From: Ingo Molnar <mingo@kernel.org>
Date: Mon, 11 Feb 2019 13:27:05 +0000 (+0100)
Subject: Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=41b8687191cfd0326db03b0e82fb09d8c98ca641;p=linux.git

Merge branch 'locking/atomics' into locking/core, to pick up WIP commits

Signed-off-by: Ingo Molnar <mingo@kernel.org>
---

41b8687191cfd0326db03b0e82fb09d8c98ca641
diff --cc arch/arm64/include/asm/atomic_ll_sc.h
index af7b990054536,3b5e28d645827..e321293e0c895
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@@ -246,24 -246,15 +246,24 @@@ __LL_SC_PREFIX(arch_atomic64_dec_if_pos
  
  	return result;
  }
- __LL_SC_EXPORT(atomic64_dec_if_positive);
+ __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
  
 -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)			\
 -__LL_SC_INLINE unsigned long						\
 -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,		\
 -				     unsigned long old,			\
 -				     unsigned long new))		\
 +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)		\
 +__LL_SC_INLINE u##sz							\
 +__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,		\
 +					 unsigned long old,		\
 +					 u##sz new))			\
  {									\
 -	unsigned long tmp, oldval;					\
 +	unsigned long tmp;						\
 +	u##sz oldval;							\
 +									\
 +	/*								\
 +	 * Sub-word sizes require explicit casting so that the compare  \
 +	 * part of the cmpxchg doesn't end up interpreting non-zero	\
 +	 * upper bits of the register containing "old".			\
 +	 */								\
 +	if (sz < 32)							\
 +		old = (u##sz)old;					\
  									\
  	asm volatile(							\
  	"	prfm	pstl1strm, %[v]\n"				\
diff --cc arch/arm64/include/asm/cmpxchg.h
index 3f9376f1c409f,e825e61bbfe2f..e6ea0f42e097b
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@@ -177,29 -177,29 +177,29 @@@ __CMPXCHG_GEN(_mb
  	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
  })
  
- #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
- 	int __ret;\
- 	__cmpxchg_double_check(ptr1, ptr2); \
- 	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
- 				     (unsigned long)(n1), (unsigned long)(n2), \
- 				     ptr1); \
- 	__ret; \
+ #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)				\
+ ({										\
+ 	int __ret;								\
+ 	__cmpxchg_double_check(ptr1, ptr2);					\
+ 	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),	\
+ 				     (unsigned long)(n1), (unsigned long)(n2),	\
+ 				     ptr1);					\
+ 	__ret;									\
  })
  
- #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
- 	int __ret;\
- 	__cmpxchg_double_check(ptr1, ptr2); \
- 	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
- 				  (unsigned long)(n1), (unsigned long)(n2), \
- 				  ptr1); \
- 	__ret; \
+ #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)			\
+ ({										\
+ 	int __ret;								\
+ 	__cmpxchg_double_check(ptr1, ptr2);					\
+ 	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),	\
+ 				  (unsigned long)(n1), (unsigned long)(n2),	\
+ 				  ptr1);					\
+ 	__ret;									\
  })
  
 -#define __CMPWAIT_CASE(w, sz, name)					\
 -static inline void __cmpwait_case_##name(volatile void *ptr,		\
 -					 unsigned long val)		\
 +#define __CMPWAIT_CASE(w, sfx, sz)					\
 +static inline void __cmpwait_case_##sz(volatile void *ptr,		\
 +				       unsigned long val)		\
  {									\
  	unsigned long tmp;						\
  									\