From d475f3f47a0427dfee483cecf9a7e9109e991423 Mon Sep 17 00:00:00 2001
From: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Date: Fri, 21 Oct 2005 22:06:15 +0400
Subject: [PATCH] [PATCH] alpha: additional smp barriers

As stated in Documentation/atomic_ops.txt, atomic functions
returning values must have the memory barriers both before and after
the operation.

Thanks to DaveM for pointing that out.

Signed-off-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 include/asm-alpha/atomic.h | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 1b383e3cb68c9..0b40bad002898 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
 static __inline__ long atomic_add_return(int i, atomic_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%1\n"
 	"	addl %0,%3,%2\n"
 	"	addl %0,%3,%0\n"
 	"	stl_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
@@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%1\n"
 	"	addq %0,%3,%2\n"
 	"	addq %0,%3,%0\n"
 	"	stq_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
 static __inline__ long atomic_sub_return(int i, atomic_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldl_l %0,%1\n"
 	"	subl %0,%3,%2\n"
 	"	subl %0,%3,%0\n"
 	"	stl_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 {
 	long temp, result;
+	smp_mb();
 	__asm__ __volatile__(
 	"1:	ldq_l %0,%1\n"
 	"	subq %0,%3,%2\n"
 	"	subq %0,%3,%0\n"
 	"	stq_c %0,%1\n"
 	"	beq %0,2f\n"
-	"	mb\n"
 	".subsection 2\n"
 	"2:	br 1b\n"
 	".previous"
 	:"=&r" (temp), "=m" (v->counter), "=&r" (result)
 	:"Ir" (i), "m" (v->counter) : "memory");
+	smp_mb();
 	return result;
 }
 
-- 
2.30.2