summary refs log tree commit diff
path: root/arch/mips/lib
diff options
context:
space:
mode:
authorJim Quinlan <jim2101024@gmail.com>2012-09-06 11:36:56 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-11-09 10:59:21 +0100
commite97c5b609880d97313b13eb71830fca62cee50c2 (patch)
treed43e3033f37cc9181dfe145b1019237429cfb418 /arch/mips/lib
parent92d11594f688c8b55b51e80f2eac4417396237a4 (diff)
downloadlinux-e97c5b609880d97313b13eb71830fca62cee50c2.tar.gz
MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
For non MIPSr2 processors, such as the BMIPS 5000, calls to
arch_local_irq_disable() and others may be preempted, and in doing
so a stale value may be restored to c0_status.  This fix disables
preemption for such processors prior to the call and enables it
after the call.

Those functions that needed this fix have been "outlined" to
mips-atomic.c, as they are no longer good candidates for inlining.

This bug was observed in a BMIPS 5000, occuring once every few hours
in a continuous reboot test.  It was traced to the write_lock_irq()
function which was being invoked in release_task() in exit.c.
By placing a number of "nops" inbetween the mfc0/mtc0 pair in
arch_local_irq_disable(), which is called by write_lock_irq(), we
were able to greatly increase the occurance of this bug.  Similarly,
the application of this commit silenced the bug.

Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: David Daney <ddaney.cavm@gmail.com>
Cc: Kevin Cernekee cernekee@gmail.com
Cc: Jim Quinlan <jim2101024@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/4321/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/mips-atomic.c176
2 files changed, 178 insertions, 1 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index a7b893714354..eeddc58802e1 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,8 @@
 #
 
 lib-y	+= bitops.o csum_partial.o delay.o memcpy.o memset.o \
-	   strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+	   mips-atomic.o strlen_user.o strncpy_user.o \
+	   strnlen_user.o uncached.o
 
 obj-y			+= iomap.o
 obj-$(CONFIG_PCI)	+= iomap-pci.o
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
new file mode 100644
index 000000000000..e091430dbeb1
--- /dev/null
+++ b/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <asm/irqflags.h>
+#include <asm/hazards.h>
+#include <linux/compiler.h>
+#include <linux/preempt.h>
+#include <linux/export.h>
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+	"	.macro	arch_local_irq_disable\n"
+	"	.set	push						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	ori	$1, 0x400					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1,$12						\n"
+	"	ori	$1,0x1f						\n"
+	"	xori	$1,0x1f						\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1,$12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+void arch_local_irq_disable(void)
+{
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_disable"
+		: /* no outputs */
+		: /* no inputs */
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+	"	.macro	arch_local_irq_save result			\n"
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	\\result, $2, 1					\n"
+	"	ori	$1, \\result, 0x400				\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $2, 1					\n"
+	"	andi	\\result, \\result, 0x400			\n"
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	\\result, $12					\n"
+	"	ori	$1, \\result, 0x1f				\n"
+	"	xori	$1, 0x1f					\n"
+	"	.set	noreorder					\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+	preempt_disable();
+	asm volatile("arch_local_irq_save\t%0"
+		     : "=r" (flags)
+		     : /* no inputs */
+		     : "memory");
+	preempt_enable();
+	return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+	"	.macro	arch_local_irq_restore flags			\n"
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"mfc0	$1, $2, 1						\n"
+	"andi	\\flags, 0x400						\n"
+	"ori	$1, 0x400						\n"
+	"xori	$1, 0x400						\n"
+	"or	\\flags, $1						\n"
+	"mtc0	\\flags, $2, 1						\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	\\flags, 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	\\flags, $1					\n"
+	"	mtc0	\\flags, $12					\n"
+#endif
+	"	irq_disable_hazard					\n"
+	"	.set	pop						\n"
+	"	.endm							\n");
+
+void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of branch and call overhead on each
+	 * local_irq_restore()
+	 */
+	if (unlikely(!(flags & 0x0400)))
+		smtc_ipi_replay();
+#endif
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+void __arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+	preempt_disable();
+	__asm__ __volatile__(
+		"arch_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+	preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */