summary refs log tree commit diff
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2010-02-15 16:41:40 +0100
committerMichal Simek <monstr@monstr.eu>2010-03-11 14:12:50 +0100
commit22607a28213068af113b46862eafa785f00a482e (patch)
treec16ac26ef2a6c7e4f04673689dafdd836e01a7a5
parentdcbae4be907488df5e1cc8a89b7df1a0565c257c (diff)
downloadlinux-22607a28213068af113b46862eafa785f00a482e.tar.gz
microblaze: Add define for ASM_LOOP
It is default option but both options must be measured.

Signed-off-by: Michal Simek <monstr@monstr.eu>
-rw-r--r--arch/microblaze/kernel/cpu/cache.c204
1 files changed, 161 insertions, 43 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 13f0c1de3234..f04d8a86dead 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
 #include <asm/cpuinfo.h>
 #include <asm/pvr.h>
 
-static inline void __invalidate_flush_icache(unsigned int addr)
-{
-	__asm__ __volatile__ ("wic	%0, r0;"	\
-					: : "r" (addr));
-}
-
-static inline void __flush_dcache(unsigned int addr)
-{
-	__asm__ __volatile__ ("wdc.flush	%0, r0;"	\
-					: : "r" (addr));
-}
-
-static inline void __invalidate_dcache(unsigned int baseaddr,
-						unsigned int offset)
-{
-	__asm__ __volatile__ ("wdc.clear	%0, %1;"	\
-					: : "r" (baseaddr), "r" (offset));
-}
-
 static inline void __enable_icache_msr(void)
 {
 	__asm__ __volatile__ ("	msrset	r0, %0;		\
@@ -148,9 +129,9 @@ do {									\
 	int step = -line_length;					\
 	BUG_ON(step >= 0);						\
 									\
-	__asm__ __volatile__ (" 1:      " #op " r0, %0;			\
-					bgtid   %0, 1b;			\
-					addk    %0, %0, %1;		\
+	__asm__ __volatile__ (" 1:	" #op "	r0, %0;			\
+					bgtid	%0, 1b;			\
+					addk	%0, %0, %1;		\
 					" : : "r" (len), "r" (step)	\
 					: "memory");			\
 } while (0);
@@ -162,9 +143,9 @@ do {									\
 	int count = end - start;					\
 	BUG_ON(count <= 0);						\
 									\
-	__asm__ __volatile__ (" 1:	" #op " %0, %1;			\
-					bgtid   %1, 1b;			\
-					addk    %1, %1, %2;		\
+	__asm__ __volatile__ (" 1:	" #op "	%0, %1;			\
+					bgtid	%1, 1b;			\
+					addk	%1, %1, %2;		\
 					" : : "r" (start), "r" (count),	\
 					"r" (step) : "memory");		\
 } while (0);
@@ -175,7 +156,7 @@ do {									\
 	int volatile temp;						\
 	BUG_ON(end - start <= 0);					\
 									\
-	__asm__ __volatile__ (" 1:	" #op " %1, r0;			\
+	__asm__ __volatile__ (" 1:	" #op "	%1, r0;			\
 					cmpu	%0, %1, %2;		\
 					bgtid	%0, 1b;			\
 					addk	%1, %1, %3;		\
@@ -183,10 +164,14 @@ do {									\
 					"r" (line_length) : "memory");	\
 } while (0);
 
+#define ASM_LOOP
+
 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
 	local_irq_save(flags);
 	__disable_icache_msr();
 
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
-
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
 	__enable_icache_msr();
 	local_irq_restore(flags);
 }
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
 				unsigned long end)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
 	local_irq_save(flags);
 	__disable_icache_nomsr();
 
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
 
 	__enable_icache_nomsr();
 	local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
 static void __flush_icache_range_noirq(unsigned long start,
 				unsigned long end)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
 	CACHE_LOOP_LIMITS(start, end,
 			cpuinfo.icache_line_length, cpuinfo.icache_size);
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wic	%0, r0;"	\
+				: : "r" (i));
+#endif
 }
 
 static void __flush_icache_all_msr_irq(void)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
 
 	local_irq_save(flags);
 	__disable_icache_msr();
-
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
 	__enable_icache_msr();
 	local_irq_restore(flags);
 }
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
 static void __flush_icache_all_nomsr_irq(void)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
 
 	local_irq_save(flags);
 	__disable_icache_nomsr();
-
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
 	__enable_icache_nomsr();
 	local_irq_restore(flags);
 }
 
 static void __flush_icache_all_noirq(void)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+	for (i = 0; i < cpuinfo.icache_size;
+		 i += cpuinfo.icache_line_length)
+			__asm__ __volatile__ ("wic	%0, r0;" \
+					: : "r" (i));
+#endif
 }
 
 static void __invalidate_dcache_all_msr_irq(void)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
 
 	local_irq_save(flags);
 	__disable_dcache_msr();
-
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
 	__enable_dcache_msr();
 	local_irq_restore(flags);
 }
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
 static void __invalidate_dcache_all_nomsr_irq(void)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
 
 	local_irq_save(flags);
 	__disable_dcache_nomsr();
-
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
 	__enable_dcache_nomsr();
 	local_irq_restore(flags);
 }
 
 static void __invalidate_dcache_all_noirq_wt(void)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc	%0, r0;" \
+					: : "r" (i));
+#endif
 }
 
 /* FIXME this is weird - should be only wdc but not work
  * MS: I am getting bus errors and other weird things */
 static void __invalidate_dcache_all_wb(void)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
 					wdc.clear)
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc.clear	%0, r0;" \
+					: : "r" (i));
+#endif
 }
 
 static void __invalidate_dcache_range_wb(unsigned long start,
 						unsigned long end)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
 	CACHE_LOOP_LIMITS(start, end,
 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wdc.clear	%0, r0;"	\
+				: : "r" (i));
+#endif
 }
 
 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
 							unsigned long end)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 	CACHE_LOOP_LIMITS(start, end,
 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
 
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
 }
 
 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
 							unsigned long end)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 	CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
 	local_irq_save(flags);
 	__disable_dcache_msr();
 
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
 
 	__enable_dcache_msr();
 	local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
 							unsigned long end)
 {
 	unsigned long flags;
-
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
 	local_irq_save(flags);
 	__disable_dcache_nomsr();
 
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wdc	%0, r0;"	\
+				: : "r" (i));
+#endif
 
 	__enable_dcache_nomsr();
 	local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
 
 static void __flush_dcache_all_wb(void)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
 	CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
 				wdc.flush);
+#else
+	for (i = 0; i < cpuinfo.dcache_size;
+		 i += cpuinfo.dcache_line_length)
+			__asm__ __volatile__ ("wdc.flush	%0, r0;" \
+					: : "r" (i));
+#endif
 }
 
 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
 {
+#ifndef ASM_LOOP
+	int i;
+#endif
 	pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
 				(unsigned int)start, (unsigned int) end);
 
 	CACHE_LOOP_LIMITS(start, end,
 			cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
 	CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+#else
+	for (i = start; i < end; i += cpuinfo.icache_line_length)
+		__asm__ __volatile__ ("wdc.flush	%0, r0;"	\
+				: : "r" (i));
+#endif
 }
 
 /* struct for wb caches and for wt caches */