summary refs log tree commit diff
path: root/arch/frv/include
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-07-02 00:46:16 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-01 19:38:09 -0700
commit00460f41fffc0435dbb6ab4b058a190163d57ce6 (patch)
treede37274bc9657fff1a247833e8ead02e38270a2d /arch/frv/include
parent5a475ce4692f668b2615ae4ea1365c7c2d93f1dd (diff)
downloadlinux-00460f41fffc0435dbb6ab4b058a190163d57ce6.tar.gz
FRV: Implement atomic64_t
Implement atomic64_t and its ops for FRV.  Tested with the following patch:

	diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
	index 55e4fab..086d50d 100644
	--- a/arch/frv/kernel/setup.c
	+++ b/arch/frv/kernel/setup.c
	@@ -746,6 +746,52 @@ static void __init parse_cmdline_early(char *cmdline)

	 } /* end parse_cmdline_early() */

	+static atomic64_t xxx;
	+
	+static void test_atomic64(void)
	+{
	+	atomic64_set(&xxx, 0x12300000023LL);
	+
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000023LL);
	+	mb();
	+	if (atomic64_inc_return(&xxx) != 0x12300000024LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x12300000024LL);
	+	mb();
	+	if (atomic64_sub_return(0x36900000050LL, &xxx) != -0x2460000002cLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002cLL);
	+	mb();
	+	if (atomic64_dec_return(&xxx) != -0x2460000002dLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != -0x2460000002dLL);
	+	mb();
	+	if (atomic64_add_return(0x36800000001LL, &xxx) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x123456789abcdefLL, 0x121ffffffd4LL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x121ffffffd4LL);
	+	mb();
	+	if (atomic64_cmpxchg(&xxx, 0x121ffffffd4LL, 0x123456789abcdefLL) != 0x121ffffffd4LL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0x123456789abcdefLL);
	+	mb();
	+	if (atomic64_xchg(&xxx, 0xabcdef123456789LL) != 0x123456789abcdefLL)
	+		BUG();
	+	mb();
	+	BUG_ON(atomic64_read(&xxx) != 0xabcdef123456789LL);
	+	mb();
	+}
	+
	 /*****************************************************************************/
	 /*
	  *
	@@ -845,6 +891,8 @@ void __init setup_arch(char **cmdline_p)
	 //	asm volatile("movgs %0,timerd" :: "r"(10000000));
	 //	__set_HSR(0, __get_HSR(0) | HSR0_ETMD);

	+	test_atomic64();
	+
	 } /* end setup_arch() */

	 #if 0

Note that this doesn't cover all the trivial wrappers, but does cover all the
substantial implementations.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/frv/include')
-rw-r--r--arch/frv/include/asm/atomic.h68
-rw-r--r--arch/frv/include/asm/system.h2
2 files changed, 68 insertions, 2 deletions
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0409d981fd39..00a57af79afc 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -121,10 +121,72 @@ static inline void atomic_dec(atomic_t *v)
 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
 
+/*
+ * 64-bit atomic ops
+ */
+typedef struct {
+	volatile long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i)	{ (i) }
+
+static inline long long atomic64_read(atomic64_t *v)
+{
+	long long counter;
+
+	asm("ldd%I1 %M1,%0"
+	    : "=e"(counter)
+	    : "m"(v->counter));
+	return counter;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+	asm volatile("std%I0 %1,%M0"
+		     : "=m"(v->counter)
+		     : "e"(i));
+}
+
+extern long long atomic64_inc_return(atomic64_t *v);
+extern long long atomic64_dec_return(atomic64_t *v);
+extern long long atomic64_add_return(long long i, atomic64_t *v);
+extern long long atomic64_sub_return(long long i, atomic64_t *v);
+
+static inline long long atomic64_add_negative(long long i, atomic64_t *v)
+{
+	return atomic64_add_return(i, v) < 0;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+	atomic64_add_return(i, v);
+}
+
+static inline void atomic64_sub(long long i, atomic64_t *v)
+{
+	atomic64_sub_return(i, v);
+}
+
+static inline void atomic64_inc(atomic64_t *v)
+{
+	atomic64_inc_return(v);
+}
+
+static inline void atomic64_dec(atomic64_t *v)
+{
+	atomic64_dec_return(v);
+}
+
+#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
+#define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
+
 /*****************************************************************************/
 /*
  * exchange value with memory
  */
+extern uint64_t __xchg_64(uint64_t i, volatile void *v);
+
 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
 
 #define xchg(ptr, x)								\
@@ -174,8 +236,10 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
 
 #define tas(ptr) (xchg((ptr), 1))
 
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
+#define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
+#define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
+#define atomic64_xchg(v, new)		(__xchg_64(new, &(v)->counter))
 
 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
 {
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h
index 7742ec000cc4..efd22d9077ac 100644
--- a/arch/frv/include/asm/system.h
+++ b/arch/frv/include/asm/system.h
@@ -208,6 +208,8 @@ extern void free_initmem(void);
  * - if (*ptr == test) then orig = *ptr; *ptr = test;
  * - if (*ptr != test) then orig = *ptr;
  */
+extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
+
 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
 
 #define cmpxchg(ptr, test, new)							\