summary refs log tree commit diff
path: root/include/asm-sh
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2007-11-30 16:12:36 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-01-28 13:18:58 +0900
commit1efe4ce3ca126da77e450d5a83f7201949d76f62 (patch)
treefbae9902aa4103a9e86d06f841d580f24682e7b3 /include/asm-sh
parent53ff09422e5e7a6d6198b767c8f494e43ec8e3ae (diff)
downloadlinux-1efe4ce3ca126da77e450d5a83f7201949d76f62.tar.gz
sh: GUSA atomic rollback support.
This implements kernel-level atomic rollback built on top of gUSA,
as an alternative non-IRQ based atomicity method. This is generally
a faster method for platforms that are lacking the LL/SC pairs that
SH-4A and later use, and is only supportable on legacy cores.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/atomic-grb.h169
-rw-r--r--include/asm-sh/atomic.h10
-rw-r--r--include/asm-sh/bitops-grb.h169
-rw-r--r--include/asm-sh/bitops-irq.h91
-rw-r--r--include/asm-sh/bitops.h91
-rw-r--r--include/asm-sh/cmpxchg-grb.h70
-rw-r--r--include/asm-sh/cmpxchg-irq.h40
-rw-r--r--include/asm-sh/system.h40
8 files changed, 557 insertions, 123 deletions
diff --git a/include/asm-sh/atomic-grb.h b/include/asm-sh/atomic-grb.h
new file mode 100644
index 000000000000..4c5b7dbfcedb
--- /dev/null
+++ b/include/asm-sh/atomic-grb.h
@@ -0,0 +1,169 @@
+#ifndef __ASM_SH_ATOMIC_GRB_H
+#define __ASM_SH_ATOMIC_GRB_H
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	int tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   add     %2,   %0      \n\t" /* add */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (i)
+		: "memory" , "r0", "r1");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	int tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov     r15,  r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   sub     %2,   %0      \n\t" /* sub */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (i)
+		: "memory" , "r0", "r1");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	int tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   add     %2,   %0      \n\t" /* add */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (i)
+		: "memory" , "r0", "r1");
+
+	return tmp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	int tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   sub     %2,   %0      \n\t" /* sub */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (i)
+		: "memory", "r0", "r1");
+
+	return tmp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	int tmp;
+	unsigned int _mask = ~mask;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   and     %2,   %0      \n\t" /* add */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (_mask)
+		: "memory" , "r0", "r1");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	int tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   or      %2,   %0      \n\t" /* or */
+		"   mov.l   %0,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "+r"  (v)
+		: "r"   (mask)
+		: "memory" , "r0", "r1");
+}
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+	int ret;
+
+	__asm__ __volatile__ (
+		"   .align 2		\n\t"
+		"   mova     1f,  r0	\n\t"
+		"   nop			\n\t"
+		"   mov     r15,  r1	\n\t"
+		"   mov    #-8,  r15	\n\t"
+		"   mov.l   @%1,  %0	\n\t"
+		"   cmp/eq   %2,  %0	\n\t"
+		"   bf	     1f		\n\t"
+		"   mov.l    %3, @%1	\n\t"
+		"1: mov      r1,  r15	\n\t"
+		: "=&r" (ret)
+		: "r" (v), "r" (old), "r" (new)
+		: "memory" , "r0", "r1" , "t");
+
+	return ret;
+}
+
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+	int ret;
+	unsigned long tmp;
+
+	__asm__ __volatile__ (
+		"   .align 2		\n\t"
+		"   mova    1f,   r0	\n\t"
+		"   nop			\n\t"
+		"   mov    r15,   r1	\n\t"
+		"   mov    #-12,  r15	\n\t"
+		"   mov.l  @%2,   %1	\n\t"
+		"   mov	    %1,   %0    \n\t"
+		"   cmp/eq  %4,   %0	\n\t"
+		"   bt/s    1f		\n\t"
+		"    add    %3,   %1	\n\t"
+		"   mov.l   %1,  @%2	\n\t"
+		"1: mov     r1,   r15	\n\t"
+		: "=&r" (ret), "=&r" (tmp)
+		: "r" (v), "r" (a), "r" (u)
+		: "memory" , "r0", "r1" , "t");
+
+	return ret != u;
+}
+#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index e12570b9339d..c043ef003028 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,7 +17,9 @@ typedef struct { volatile int counter; } atomic_t;
 #include <linux/compiler.h>
 #include <asm/system.h>
 
-#ifdef CONFIG_CPU_SH4A
+#if defined(CONFIG_GUSA_RB)
+#include <asm/atomic-grb.h>
+#elif defined(CONFIG_CPU_SH4A)
 #include <asm/atomic-llsc.h>
 #else
 #include <asm/atomic-irq.h>
@@ -44,6 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
 #define atomic_inc(v) atomic_add(1,(v))
 #define atomic_dec(v) atomic_sub(1,(v))
 
+#ifndef CONFIG_GUSA_RB
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int ret;
@@ -58,8 +61,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 	return ret;
 }
 
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
 static inline int atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int ret;
@@ -73,6 +74,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 
 	return ret != u;
 }
+#endif
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 /* Atomic operations are already serializing on SH */
diff --git a/include/asm-sh/bitops-grb.h b/include/asm-sh/bitops-grb.h
new file mode 100644
index 000000000000..a5907b94395b
--- /dev/null
+++ b/include/asm-sh/bitops-grb.h
@@ -0,0 +1,169 @@
+#ifndef __ASM_SH_BITOPS_GRB_H
+#define __ASM_SH_BITOPS_GRB_H
+
+static inline void set_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+                "   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+                "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+                "   mov.l  @%1,   %0      \n\t" /* load  old value */
+                "   or      %2,   %0      \n\t" /* or */
+                "   mov.l   %0,   @%1     \n\t" /* store new value */
+                "1: mov     r1,   r15     \n\t" /* LOGOUT */
+                : "=&r" (tmp),
+                  "+r"  (a)
+                : "r"   (mask)
+                : "memory" , "r0", "r1");
+}
+
+static inline void clear_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+        unsigned long tmp;
+
+	a += nr >> 5;
+        mask = ~(1 << (nr & 0x1f));
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+                "   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+                "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+                "   mov.l  @%1,   %0      \n\t" /* load  old value */
+                "   and     %2,   %0      \n\t" /* and */
+                "   mov.l   %0,   @%1     \n\t" /* store new value */
+                "1: mov     r1,   r15     \n\t" /* LOGOUT */
+                : "=&r" (tmp),
+                  "+r"  (a)
+                : "r"   (mask)
+                : "memory" , "r0", "r1");
+}
+
+static inline void change_bit(int nr, volatile void * addr)
+{
+        int     mask;
+        volatile unsigned int *a = addr;
+        unsigned long tmp;
+
+        a += nr >> 5;
+        mask = 1 << (nr & 0x1f);
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+                "   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+                "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
+                "   mov.l  @%1,   %0      \n\t" /* load  old value */
+                "   xor     %2,   %0      \n\t" /* xor */
+                "   mov.l   %0,   @%1     \n\t" /* store new value */
+                "1: mov     r1,   r15     \n\t" /* LOGOUT */
+                : "=&r" (tmp),
+                  "+r"  (a)
+                : "r"   (mask)
+                : "memory" , "r0", "r1");
+}
+
+static inline int test_and_set_bit(int nr, volatile void * addr)
+{
+        int     mask, retval;
+	volatile unsigned int *a = addr;
+        unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+                "   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+                "   mov   #-14,   r15     \n\t" /* LOGIN: r15 = size */
+                "   mov.l  @%2,   %0      \n\t" /* load old value */
+                "   mov     %0,   %1      \n\t"
+                "   tst     %1,   %3      \n\t" /* T = ((*a & mask) == 0) */
+                "   mov    #-1,   %1      \n\t" /* retvat = -1 */
+                "   negc    %1,   %1      \n\t" /* retval = (mask & *a) != 0 */
+                "   or      %3,   %0      \n\t"
+                "   mov.l   %0,  @%2      \n\t" /* store new value */
+                "1: mov     r1,  r15      \n\t" /* LOGOUT */
+                : "=&r" (tmp),
+                  "=&r" (retval),
+                  "+r"  (a)
+                : "r"   (mask)
+                : "memory" , "r0", "r1" ,"t");
+
+        return retval;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+        int     mask, retval,not_mask;
+        volatile unsigned int *a = addr;
+        unsigned long tmp;
+
+        a += nr >> 5;
+        mask = 1 << (nr & 0x1f);
+
+	not_mask = ~mask;
+
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov   #-14,   r15     \n\t" /* LOGIN */
+		"   mov.l  @%2,   %0      \n\t" /* load old value */
+                "   mov     %0,   %1      \n\t" /* %1 = *a */
+                "   tst     %1,   %3      \n\t" /* T = ((*a & mask) == 0) */
+		"   mov    #-1,   %1      \n\t" /* retvat = -1 */
+                "   negc    %1,   %1      \n\t" /* retval = (mask & *a) != 0 */
+                "   and     %4,   %0      \n\t"
+                "   mov.l   %0,  @%2      \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (tmp),
+		  "=&r" (retval),
+		  "+r"  (a)
+		: "r"   (mask),
+		  "r"   (not_mask)
+		: "memory" , "r0", "r1", "t");
+
+        return retval;
+}
+
+static inline int test_and_change_bit(int nr, volatile void * addr)
+{
+        int     mask, retval;
+        volatile unsigned int *a = addr;
+        unsigned long tmp;
+
+        a += nr >> 5;
+        mask = 1 << (nr & 0x1f);
+
+        __asm__ __volatile__ (
+                "   .align 2              \n\t"
+                "   mova    1f,   r0      \n\t" /* r0 = end point */
+                "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+                "   mov   #-14,   r15     \n\t" /* LOGIN */
+                "   mov.l  @%2,   %0      \n\t" /* load old value */
+                "   mov     %0,   %1      \n\t" /* %1 = *a */
+                "   tst     %1,   %3      \n\t" /* T = ((*a & mask) == 0) */
+                "   mov    #-1,   %1      \n\t" /* retvat = -1 */
+                "   negc    %1,   %1      \n\t" /* retval = (mask & *a) != 0 */
+                "   xor     %3,   %0      \n\t"
+                "   mov.l   %0,  @%2      \n\t" /* store new value */
+                "1: mov     r1,   r15     \n\t" /* LOGOUT */
+                : "=&r" (tmp),
+                  "=&r" (retval),
+                  "+r"  (a)
+                : "r"   (mask)
+                : "memory" , "r0", "r1", "t");
+
+        return retval;
+}
+#endif /* __ASM_SH_BITOPS_GRB_H */
diff --git a/include/asm-sh/bitops-irq.h b/include/asm-sh/bitops-irq.h
new file mode 100644
index 000000000000..653a12750584
--- /dev/null
+++ b/include/asm-sh/bitops-irq.h
@@ -0,0 +1,91 @@
+#ifndef __ASM_SH_BITOPS_IRQ_H
+#define __ASM_SH_BITOPS_IRQ_H
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a |= mask;
+	local_irq_restore(flags);
+}
+
+static inline void clear_bit(int nr, volatile void *addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a &= ~mask;
+	local_irq_restore(flags);
+}
+
+static inline void change_bit(int nr, volatile void *addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	*a ^= mask;
+	local_irq_restore(flags);
+}
+
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a |= mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a &= ~mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save(flags);
+	retval = (mask & *a) != 0;
+	*a ^= mask;
+	local_irq_restore(flags);
+
+	return retval;
+}
+
+#endif /* __ASM_SH_BITOPS_IRQ_H */
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index a7bd81a7f064..b6ba5a60dec2 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -11,97 +11,18 @@
 /* For __swab32 */
 #include <asm/byteorder.h>
 
-static inline void set_bit(int nr, volatile void * addr)
-{
-	int	mask;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	*a |= mask;
-	local_irq_restore(flags);
-}
+#ifdef CONFIG_GUSA_RB
+#include <asm/bitops-grb.h>
+#else
+#include <asm/bitops-irq.h>
+#endif
+
 
 /*
  * clear_bit() doesn't provide any barrier for the compiler.
  */
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
-static inline void clear_bit(int nr, volatile void * addr)
-{
-	int	mask;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	*a &= ~mask;
-	local_irq_restore(flags);
-}
-
-static inline void change_bit(int nr, volatile void * addr)
-{
-	int	mask;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	*a ^= mask;
-	local_irq_restore(flags);
-}
-
-static inline int test_and_set_bit(int nr, volatile void * addr)
-{
-	int	mask, retval;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
-	*a |= mask;
-	local_irq_restore(flags);
-
-	return retval;
-}
-
-static inline int test_and_clear_bit(int nr, volatile void * addr)
-{
-	int	mask, retval;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
-	*a &= ~mask;
-	local_irq_restore(flags);
-
-	return retval;
-}
-
-static inline int test_and_change_bit(int nr, volatile void * addr)
-{
-	int	mask, retval;
-	volatile unsigned int *a = addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
-	*a ^= mask;
-	local_irq_restore(flags);
-
-	return retval;
-}
 
 #include <asm-generic/bitops/non-atomic.h>
 
diff --git a/include/asm-sh/cmpxchg-grb.h b/include/asm-sh/cmpxchg-grb.h
new file mode 100644
index 000000000000..e2681abe764f
--- /dev/null
+++ b/include/asm-sh/cmpxchg-grb.h
@@ -0,0 +1,70 @@
+#ifndef __ASM_SH_CMPXCHG_GRB_H
+#define __ASM_SH_CMPXCHG_GRB_H
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+	unsigned long retval;
+
+	__asm__ __volatile__ (
+		"   .align 2              \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   nop                   \n\t"
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-4,   r15     \n\t" /* LOGIN */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   mov.l   %2,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (retval),
+		  "+r"  (m)
+		: "r"   (val)
+		: "memory", "r0", "r1");
+
+	return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+	unsigned long retval;
+
+	__asm__ __volatile__ (
+		"   .align  2             \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-6,   r15     \n\t" /* LOGIN */
+		"   mov.b  @%1,   %0      \n\t" /* load  old value */
+		"   extu.b  %0,   %0      \n\t" /* extend as unsigned */
+		"   mov.b   %2,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (retval),
+		  "+r"  (m)
+		: "r"   (val)
+		: "memory" , "r0", "r1");
+
+	return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+					  unsigned long new)
+{
+	unsigned long retval;
+
+	__asm__ __volatile__ (
+		"   .align  2             \n\t"
+		"   mova    1f,   r0      \n\t" /* r0 = end point */
+		"   nop                   \n\t"
+		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
+		"   mov    #-8,   r15     \n\t" /* LOGIN */
+		"   mov.l  @%1,   %0      \n\t" /* load  old value */
+		"   cmp/eq  %0,   %2      \n\t"
+		"   bf            1f      \n\t" /* if not equal */
+		"   mov.l   %2,   @%1     \n\t" /* store new value */
+		"1: mov     r1,   r15     \n\t" /* LOGOUT */
+		: "=&r" (retval),
+		  "+r"  (m)
+		: "r"   (new)
+		: "memory" , "r0", "r1", "t");
+
+	return retval;
+}
+
+#endif /* __ASM_SH_CMPXCHG_GRB_H */
diff --git a/include/asm-sh/cmpxchg-irq.h b/include/asm-sh/cmpxchg-irq.h
new file mode 100644
index 000000000000..43049ec0554b
--- /dev/null
+++ b/include/asm-sh/cmpxchg-irq.h
@@ -0,0 +1,40 @@
+#ifndef __ASM_SH_CMPXCHG_IRQ_H
+#define __ASM_SH_CMPXCHG_IRQ_H
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+	unsigned long flags, retval;
+
+	local_irq_save(flags);
+	retval = *m;
+	*m = val & 0xff;
+	local_irq_restore(flags);
+	return retval;
+}
+
+static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+	unsigned long new)
+{
+	__u32 retval;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	retval = *m;
+	if (retval == old)
+		*m = new;
+	local_irq_restore(flags);       /* implies memory barrier  */
+	return retval;
+}
+
+#endif /* __ASM_SH_CMPXCHG_IRQ_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index ad3d2a636130..969f3d4afe2a 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -68,27 +68,11 @@
 
 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
-static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
-{
-	unsigned long flags, retval;
-
-	local_irq_save(flags);
-	retval = *m;
-	*m = val;
-	local_irq_restore(flags);
-	return retval;
-}
-
-static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
-{
-	unsigned long flags, retval;
-
-	local_irq_save(flags);
-	retval = *m;
-	*m = val & 0xff;
-	local_irq_restore(flags);
-	return retval;
-}
+#ifdef CONFIG_GUSA_RB
+#include <asm/cmpxchg-grb.h>
+#else
+#include <asm/cmpxchg-irq.h>
+#endif
 
 extern void __xchg_called_with_bad_pointer(void);
 
@@ -115,20 +99,6 @@ extern void __xchg_called_with_bad_pointer(void);
 #define xchg(ptr,x)	\
 	((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
 
-static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
-	unsigned long new)
-{
-	__u32 retval;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	retval = *m;
-	if (retval == old)
-		*m = new;
-	local_irq_restore(flags);       /* implies memory barrier  */
-	return retval;
-}
-
 /* This function doesn't exist, so you'll get a linker error
  * if something tries to do an invalid cmpxchg(). */
 extern void __cmpxchg_called_with_bad_pointer(void);