summary refs log tree commit diff
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2008-04-28 17:14:26 +0100
committerRalf Baechle <ralf@linux-mips.org>2008-04-28 17:14:26 +0100
commit39b8d5254246ac56342b72f812255c8f7a74dca9 (patch)
treea9ec6bfb5d09a8367c34cc2067328d1b49bb46c1
parent308402445e005a039a72b315cd9b5ceeaea0063c (diff)
downloadlinux-39b8d5254246ac56342b72f812255c8f7a74dca9.tar.gz
[MIPS] Add support for MIPS CMP platform.
Signed-off-by: Chris Dearman <chris@mips.com>
Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/Kconfig18
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/irq-gic.c295
-rw-r--r--arch/mips/kernel/smp-cmp.c265
-rw-r--r--arch/mips/kernel/smp-mt.c96
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c3
-rw-r--r--arch/mips/kernel/sync-r4k.c159
-rw-r--r--arch/mips/kernel/traps.c111
-rw-r--r--arch/mips/mips-boards/generic/Makefile1
-rw-r--r--arch/mips/mips-boards/generic/amon.c80
-rw-r--r--arch/mips/mips-boards/generic/init.c3
-rw-r--r--arch/mips/mips-boards/generic/time.c29
-rw-r--r--arch/mips/mips-boards/malta/Makefile1
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c346
-rw-r--r--arch/mips/mips-boards/malta/malta_setup.c6
-rw-r--r--arch/mips/mm/c-r4k.c43
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c34
-rw-r--r--include/asm-mips/cmp.h18
-rw-r--r--include/asm-mips/cpu.h7
-rw-r--r--include/asm-mips/gcmpregs.h117
-rw-r--r--include/asm-mips/gic.h487
-rw-r--r--include/asm-mips/mips-boards/launch.h35
-rw-r--r--include/asm-mips/mips-boards/malta.h23
-rw-r--r--include/asm-mips/mips-boards/maltaint.h27
-rw-r--r--include/asm-mips/mips-boards/maltasmp.h36
-rw-r--r--include/asm-mips/mipsmtregs.h8
-rw-r--r--include/asm-mips/r4k-timer.h30
-rw-r--r--include/asm-mips/smp-ops.h1
-rw-r--r--include/asm-mips/smtc.h1
-rw-r--r--include/asm-mips/smvp.h19
34 files changed, 2191 insertions, 123 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8724ed3298d3..89b03775a195 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -221,6 +221,7 @@ config MIPS_MALTA
 	select DMA_NONCOHERENT
 	select GENERIC_ISA_DMA
 	select IRQ_CPU
+	select IRQ_GIC
 	select HW_HAS_PCI
 	select I8253
 	select I8259
@@ -840,6 +841,9 @@ config MIPS_NILE4
 config MIPS_DISABLE_OBSOLETE_IDE
 	bool
 
+config SYNC_R4K
+	bool
+
 config NO_IOPORT
 	def_bool n
 
@@ -909,6 +913,9 @@ config IRQ_TXX9
 config IRQ_GT641XX
 	bool
 
+config IRQ_GIC
+	bool
+
 config MIPS_BOARDS_GEN
 	bool
 
@@ -1811,6 +1818,17 @@ config NR_CPUS
 	  performance should round up your number of processors to the next
 	  power of two.
 
+config MIPS_CMP
+	bool "MIPS CMP framework support"
+	depends on SMP
+	select SYNC_R4K
+	select SYS_SUPPORTS_SCHED_SMT
+	select WEAK_ORDERING
+	default n
+	help
+	  This is a placeholder option for the GCMP work. It will need to
+	  be handled differently...
+
 source "kernel/time/Kconfig"
 
 #
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 67d97fb02c38..d0ca4d41bb74 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CEVT_TXX9)		+= cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)	+= csrc-bcm1480.o
 obj-$(CONFIG_CSRC_R4K)		+= csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)	+= csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 
 binfmt_irix-objs	:= irixelf.o irixinv.o irixioctl.o irixsig.o	\
 			   irix5sys.o sysirix.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_MIPS_MT)		+= mips-mt.o
 obj-$(CONFIG_MIPS_MT_FPAFF)	+= mips-mt-fpaff.o
 obj-$(CONFIG_MIPS_MT_SMTC)	+= smtc.o smtc-asm.o smtc-proc.o
 obj-$(CONFIG_MIPS_MT_SMP)	+= smp-mt.o
+obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o
 obj-$(CONFIG_CPU_MIPSR2)	+= spram.o
 
 obj-$(CONFIG_MIPS_APSP_KSPD)	+= kspd.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K)	+= irq-rm9000.o
 obj-$(CONFIG_MIPS_BOARDS_GEN)	+= irq-msc01.o
 obj-$(CONFIG_IRQ_TXX9)		+= irq_txx9.o
 obj-$(CONFIG_IRQ_GT641XX)	+= irq-gt641xx.o
+obj-$(CONFIG_IRQ_GIC)		+= irq-gic.o
 
 obj-$(CONFIG_32BIT)		+= scall32-o32.o
 obj-$(CONFIG_64BIT)		+= scall64-64.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index add717dccf77..a742a967169a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -169,6 +169,7 @@ static inline void check_wait(void)
 
 	case CPU_24K:
 	case CPU_34K:
+	case CPU_1004K:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
@@ -717,6 +718,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
 	case PRID_IMP_74K:
 		c->cputype = CPU_74K;
 		break;
+	case PRID_IMP_1004K:
+		c->cputype = CPU_1004K;
+		break;
 	}
 
 	spram_config();
@@ -884,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
 	case CPU_24K:		name = "MIPS 24K"; break;
 	case CPU_25KF:		name = "MIPS 25Kf"; break;
 	case CPU_34K:		name = "MIPS 34K"; break;
+	case CPU_1004K:		name = "MIPS 1004K"; break;
 	case CPU_74K:		name = "MIPS 74K"; break;
 	case CPU_VR4111:	name = "NEC VR4111"; break;
 	case CPU_VR4121:	name = "NEC VR4121"; break;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 000000000000..f0a4bb19e096
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,295 @@
+#undef DEBUG
+
+#include <linux/bitmap.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+#include <asm/mips-boards/maltaint.h>
+#include <asm/irq.h>
+#include <linux/hardirq.h>
+#include <asm-generic/bitops/find.h>
+
+
+static unsigned long _gic_base;
+static unsigned int _irqbase, _mapsize, numvpes, numintrs;
+static struct gic_intr_map *_intrmap;
+
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static struct gic_pending_regs pending_regs[NR_CPUS];
+static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+
+#define gic_wedgeb2bok 0	/*
+				 * Can GIC handle b2b writes to wedge register?
+				 */
+#if gic_wedgeb2bok == 0
+static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
+#endif
+
+void gic_send_ipi(unsigned int intr)
+{
+#if gic_wedgeb2bok == 0
+	unsigned long flags;
+#endif
+	pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
+		 read_c0_status());
+	if (!gic_wedgeb2bok)
+		spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+	GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
+	if (!gic_wedgeb2bok) {
+		(void) GIC_REG(SHARED, GIC_SH_CONFIG);
+		spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+	}
+}
+
+/* This is Malta specific and needs to be exported */
+static void vpe_local_setup(unsigned int numvpes)
+{
+	int i;
+	unsigned long timer_interrupt = 5, perf_interrupt = 5;
+	unsigned int vpe_ctl;
+
+	/*
+	 * Setup the default performance counter timer interrupts
+	 * for all VPEs
+	 */
+	for (i = 0; i < numvpes; i++) {
+		GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+		/* Are Interrupts locally routable? */
+		GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
+		if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
+			GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+				 GIC_MAP_TO_PIN_MSK | timer_interrupt);
+
+		if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
+			GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+				 GIC_MAP_TO_PIN_MSK | perf_interrupt);
+	}
+}
+
+unsigned int gic_get_int(void)
+{
+	unsigned int i;
+	unsigned long *pending, *intrmask, *pcpu_mask;
+	unsigned long *pending_abs, *intrmask_abs;
+
+	/* Get per-cpu bitmaps */
+	pending = pending_regs[smp_processor_id()].pending;
+	intrmask = intrmask_regs[smp_processor_id()].intrmask;
+	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+	pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+							 GIC_SH_PEND_31_0_OFS);
+	intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+							  GIC_SH_MASK_31_0_OFS);
+
+	for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
+		GICREAD(*pending_abs, pending[i]);
+		GICREAD(*intrmask_abs, intrmask[i]);
+		pending_abs++;
+		intrmask_abs++;
+	}
+
+	bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
+	bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+
+	i = find_first_bit(pending, GIC_NUM_INTRS);
+
+	pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
+
+	return i;
+}
+
+static unsigned int gic_irq_startup(unsigned int irq)
+{
+	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+	irq -= _irqbase;
+	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+		 1 << (irq % 32));
+	return 0;
+}
+
+static void gic_irq_ack(unsigned int irq)
+{
+#if gic_wedgeb2bok == 0
+	unsigned long flags;
+#endif
+	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+	irq -= _irqbase;
+	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+		 1 << (irq % 32));
+
+	if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
+		if (!gic_wedgeb2bok)
+			spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+		GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
+		if (!gic_wedgeb2bok) {
+			(void) GIC_REG(SHARED, GIC_SH_CONFIG);
+			spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+		}
+	}
+}
+
+static void gic_mask_irq(unsigned int irq)
+{
+	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+	irq -= _irqbase;
+	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+		 1 << (irq % 32));
+}
+
+static void gic_unmask_irq(unsigned int irq)
+{
+	pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+	irq -= _irqbase;
+	/* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+	GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+		 1 << (irq % 32));
+}
+
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(gic_lock);
+
+static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+	cpumask_t	tmp = CPU_MASK_NONE;
+	unsigned long	flags;
+	int		i;
+
+	pr_debug(KERN_DEBUG "%s called\n", __func__);
+	irq -= _irqbase;
+
+	cpus_and(tmp, cpumask, cpu_online_map);
+	if (cpus_empty(tmp))
+		return;
+
+	/* Assumption : cpumask refers to a single CPU */
+	spin_lock_irqsave(&gic_lock, flags);
+	for (;;) {
+		/* Re-route this IRQ */
+		GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+
+		/*
+		 * FIXME: assumption that _intrmap is ordered and has no holes
+		 */
+
+		/* Update the intr_map */
+		_intrmap[irq].cpunum = first_cpu(tmp);
+
+		/* Update the pcpu_masks */
+		for (i = 0; i < NR_CPUS; i++)
+			clear_bit(irq, pcpu_masks[i].pcpu_mask);
+		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+	}
+	irq_desc[irq].affinity = cpumask;
+	spin_unlock_irqrestore(&gic_lock, flags);
+
+}
+#endif
+
+static struct irq_chip gic_irq_controller = {
+	.name		=	"MIPS GIC",
+	.startup	=	gic_irq_startup,
+	.ack		=	gic_irq_ack,
+	.mask		=	gic_mask_irq,
+	.mask_ack	=	gic_mask_irq,
+	.unmask		=	gic_unmask_irq,
+	.eoi		=	gic_unmask_irq,
+#ifdef CONFIG_SMP
+	.set_affinity	=	gic_set_affinity,
+#endif
+};
+
+static void __init setup_intr(unsigned int intr, unsigned int cpu,
+	unsigned int pin, unsigned int polarity, unsigned int trigtype)
+{
+	/* Setup Intr to Pin mapping */
+	if (pin & GIC_MAP_TO_NMI_MSK) {
+		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+		/* FIXME: hack to route NMI to all cpu's */
+		for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+			GICWRITE(GIC_REG_ADDR(SHARED,
+					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+				 0xffffffff);
+		}
+	} else {
+		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
+			 GIC_MAP_TO_PIN_MSK | pin);
+		/* Setup Intr to CPU mapping */
+		GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+	}
+
+	/* Setup Intr Polarity */
+	GIC_SET_POLARITY(intr, polarity);
+
+	/* Setup Intr Trigger Type */
+	GIC_SET_TRIGGER(intr, trigtype);
+
+	/* Init Intr Masks */
+	GIC_SET_INTR_MASK(intr, 0);
+}
+
+static void __init gic_basic_init(void)
+{
+	unsigned int i, cpu;
+
+	/* Setup defaults */
+	for (i = 0; i < GIC_NUM_INTRS; i++) {
+		GIC_SET_POLARITY(i, GIC_POL_POS);
+		GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
+		GIC_SET_INTR_MASK(i, 0);
+	}
+
+	/* Setup specifics */
+	for (i = 0; i < _mapsize; i++) {
+		cpu = _intrmap[i].cpunum;
+		if (cpu == X)
+			continue;
+
+		setup_intr(_intrmap[i].intrnum,
+				_intrmap[i].cpunum,
+				_intrmap[i].pin,
+				_intrmap[i].polarity,
+				_intrmap[i].trigtype);
+		/* Initialise per-cpu Interrupt software masks */
+		if (_intrmap[i].ipiflag)
+			set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
+	}
+
+	vpe_local_setup(numvpes);
+
+	for (i = _irqbase; i < (_irqbase + numintrs); i++)
+		set_irq_chip(i, &gic_irq_controller);
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+		     unsigned long gic_addrspace_size,
+		     struct gic_intr_map *intr_map, unsigned int intr_map_size,
+		     unsigned int irqbase)
+{
+	unsigned int gicconfig;
+
+	_gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
+						    gic_addrspace_size);
+	_irqbase = irqbase;
+	_intrmap = intr_map;
+	_mapsize = intr_map_size;
+
+	GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+	numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+		   GIC_SH_CONFIG_NUMINTRS_SHF;
+	numintrs = ((numintrs + 1) * 8);
+
+	numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+		  GIC_SH_CONFIG_NUMVPES_SHF;
+
+	pr_debug("%s called\n", __func__);
+
+	gic_basic_init();
+}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000000..ca476c4f62a5
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,265 @@
+/*
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ *    Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+
+/*
+ * Crude manipulation of the CPU masks to control which
+ * which CPU's are brought online during initialisation
+ *
+ * Beware... this needs to be called after CPU discovery
+ * but before CPU bringup
+ */
+static int __init allowcpus(char *str)
+{
+	cpumask_t cpu_allow_map;
+	char buf[256];
+	int len;
+
+	cpus_clear(cpu_allow_map);
+	if (cpulist_parse(str, cpu_allow_map) == 0) {
+		cpu_set(0, cpu_allow_map);
+		cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
+		len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
+		buf[len] = '\0';
+		pr_debug("Allowable CPUs: %s\n", buf);
+		return 1;
+	} else
+		return 0;
+}
+__setup("allowcpus=", allowcpus);
+
+static void ipi_call_function(unsigned int cpu)
+{
+	unsigned int action = 0;
+
+	pr_debug("CPU%d: %s cpu %d status %08x\n",
+		 smp_processor_id(), __func__, cpu, read_c0_status());
+
+	switch (cpu) {
+	case 0:
+		action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
+		break;
+	case 1:
+		action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
+		break;
+	case 2:
+		action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
+		break;
+	case 3:
+		action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
+		break;
+	}
+	gic_send_ipi(action);
+}
+
+
+static void ipi_resched(unsigned int cpu)
+{
+	unsigned int action = 0;
+
+	pr_debug("CPU%d: %s cpu %d status %08x\n",
+		 smp_processor_id(), __func__, cpu, read_c0_status());
+
+	switch (cpu) {
+	case 0:
+		action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
+		break;
+	case 1:
+		action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
+		break;
+	case 2:
+		action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
+		break;
+	case 3:
+		action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
+		break;
+	}
+	gic_send_ipi(action);
+}
+
+/*
+ * FIXME: This isn't restricted to CMP
+ * The SMVP kernel could use GIC interrupts if available
+ */
+void cmp_send_ipi_single(int cpu, unsigned int action)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	switch (action) {
+	case SMP_CALL_FUNCTION:
+		ipi_call_function(cpu);
+		break;
+
+	case SMP_RESCHEDULE_YOURSELF:
+		ipi_resched(cpu);
+		break;
+	}
+
+	local_irq_restore(flags);
+}
+
+static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+	unsigned int i;
+
+	for_each_cpu_mask(i, mask)
+		cmp_send_ipi_single(i, action);
+}
+
+static void cmp_init_secondary(void)
+{
+	struct cpuinfo_mips *c = &current_cpu_data;
+
+	/* Assume GIC is present */
+	change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
+				 STATUSF_IP7);
+
+	/* Enable per-cpu interrupts: platform specific */
+
+	c->core = (read_c0_ebase() >> 1) & 0xff;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+	c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+#endif
+#ifdef CONFIG_MIPS_MT_SMTC
+	c->tc_id  = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+	pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+	/* CDFIXME: remove this? */
+	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+	/* If we have an FPU, enroll ourselves in the FPU-full mask */
+	if (cpu_has_fpu)
+		cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+	local_irq_enable();
+}
+
+static void cmp_cpus_done(void)
+{
+	pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static void cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+	struct thread_info *gp = task_thread_info(idle);
+	unsigned long sp = __KSTK_TOS(idle);
+	unsigned long pc = (unsigned long)&smp_bootstrap;
+	unsigned long a0 = 0;
+
+	pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+		__func__, cpu);
+
+#if 0
+	/* Needed? */
+	flush_icache_range((unsigned long)gp,
+			   (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+	amon_cpu_start(cpu, pc, sp, gp, a0);
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+	int i;
+	int ncpu = 0;
+
+	pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+	/* If we have an FPU, enroll ourselves in the FPU-full mask */
+	if (cpu_has_fpu)
+		cpu_set(0, mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+	for (i = 1; i < NR_CPUS; i++) {
+		if (amon_cpu_avail(i)) {
+			cpu_set(i, phys_cpu_present_map);
+			__cpu_number_map[i]	= ++ncpu;
+			__cpu_logical_map[ncpu]	= i;
+		}
+	}
+
+	if (cpu_has_mipsmt) {
+		unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+
+		nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+		smp_num_siblings = nvpe;
+	}
+	pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+	pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+		 smp_processor_id(), __func__, max_cpus);
+
+	/*
+	 * FIXME: some of these options are per-system, some per-core and
+	 * some per-cpu
+	 */
+	mips_mt_set_cpuoptions();
+}
+
+struct plat_smp_ops cmp_smp_ops = {
+	.send_ipi_single	= cmp_send_ipi_single,
+	.send_ipi_mask		= cmp_send_ipi_mask,
+	.init_secondary		= cmp_init_secondary,
+	.smp_finish		= cmp_smp_finish,
+	.cpus_done		= cmp_cpus_done,
+	.boot_secondary		= cmp_boot_secondary,
+	.smp_setup		= cmp_smp_setup,
+	.prepare_cpus		= cmp_prepare_cpus,
+};
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e9c393a41775..87a1816c1f45 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -36,63 +36,7 @@
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
 
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
-#define MIPS_CPU_IPI_CALL_IRQ 1
-
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-#if 0
-static void dump_mtregisters(int vpe, int tc)
-{
-	printk("vpe %d tc %d\n", vpe, tc);
-
-	settc(tc);
-
-	printk("  c0 status  0x%lx\n", read_vpe_c0_status());
-	printk("  vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
-	printk("  vpeconf0    0x%lx\n", read_vpe_c0_vpeconf0());
-	printk("  tcstatus 0x%lx\n", read_tc_c0_tcstatus());
-	printk("  tcrestart 0x%lx\n", read_tc_c0_tcrestart());
-	printk("  tcbind 0x%lx\n", read_tc_c0_tcbind());
-	printk("  tchalt 0x%lx\n", read_tc_c0_tchalt());
-}
-#endif
-
-static void ipi_resched_dispatch(void)
-{
-	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
-	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
-	smp_call_function_interrupt();
-
-	return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
-	.handler	= ipi_resched_interrupt,
-	.flags		= IRQF_DISABLED|IRQF_PERCPU,
-	.name		= "IPI_resched"
-};
-
-static struct irqaction irq_call = {
-	.handler	= ipi_call_interrupt,
-	.flags		= IRQF_DISABLED|IRQF_PERCPU,
-	.name		= "IPI_call"
-};
-
-static void __init smp_copy_vpe_config(void)
+static void __init smvp_copy_vpe_config(void)
 {
 	write_vpe_c0_status(
 		(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -109,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
 	write_vpe_c0_count(read_c0_count());
 }
 
-static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
 	unsigned int ncpu)
 {
 	if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -135,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
 	write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
 
 	if (tc != 0)
-		smp_copy_vpe_config();
+		smvp_copy_vpe_config();
 
 	return ncpu;
 }
 
-static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
 {
 	unsigned long tmp;
 
@@ -207,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
 
 static void __cpuinit vsmp_init_secondary(void)
 {
-	/* Enable per-cpu interrupts */
+	extern int gic_present;
 
 	/* This is Malta specific: IPI,performance and timer inetrrupts */
-	write_c0_status((read_c0_status() & ~ST0_IM ) |
-	                (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
+	if (gic_present)
+		change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+					 STATUSF_IP6 | STATUSF_IP7);
+	else
+		change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+					 STATUSF_IP6 | STATUSF_IP7);
 }
 
 static void __cpuinit vsmp_smp_finish(void)
 {
+	/* CDFIXME: remove this? */
 	write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
 
 #ifdef CONFIG_MIPS_MT_FPAFF
@@ -276,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
 /*
  * Common setup before any secondaries are started
  * Make sure all CPU's are in a sensible state before we boot any of the
- * secondarys
+ * secondaries
  */
 static void __init vsmp_smp_setup(void)
 {
@@ -309,8 +258,8 @@ static void __init vsmp_smp_setup(void)
 	for (tc = 0; tc <= ntc; tc++) {
 		settc(tc);
 
-		smp_tc_init(tc, mvpconf0);
-		ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
+		smvp_tc_init(tc, mvpconf0);
+		ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
 	}
 
 	/* Release config state */
@@ -324,21 +273,6 @@ static void __init vsmp_smp_setup(void)
 static void __init vsmp_prepare_cpus(unsigned int max_cpus)
 {
 	mips_mt_set_cpuoptions();
-
-	/* set up ipi interrupts */
-	if (cpu_has_vint) {
-		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
-		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
-	}
-
-	cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
-	cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
-
-	setup_irq(cpu_ipi_resched_irq, &irq_resched);
-	setup_irq(cpu_ipi_call_irq, &irq_call);
-
-	set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
-	set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
 }
 
 struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab90a80..33780cc61ce9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -35,6 +35,7 @@
 #include <asm/atomic.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
+#include <asm/r4k-timer.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
 
 	cpu_set(cpu, cpu_callin_map);
 
+	synchronise_count_slave();
+
 	cpu_idle();
 }
 
@@ -287,6 +290,7 @@ void smp_send_stop(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	mp_ops->cpus_done();
+	synchronise_count_master();
 }
 
 /* called from main before smp_init() */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 4705b3c11e5f..3e863186cd22 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -331,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
 	/* In general, all TCs should have the same cpu_data indications */
 	memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
 	/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
-	if (cpu_data[0].cputype == CPU_34K)
+	if (cpu_data[0].cputype == CPU_34K ||
+	    cpu_data[0].cputype == CPU_1004K)
 		cpu_data[cpu].options &= ~MIPS_CPU_FPU;
 	cpu_data[cpu].vpe_id = vpe;
 	cpu_data[cpu].tc_id = tc;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000000..9021108eb9c1
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,159 @@
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 expirelo
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ *
+ * FIXME: broken for SMTC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/r4k-timer.h>
+
+#include <asm/atomic.h>
+#include <asm/barrier.h>
+#include <asm/cpumask.h>
+#include <asm/mipsregs.h>
+
+static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON	100
+#define NR_LOOPS 5
+
+void __init synchronise_count_master(void)
+{
+	int i;
+	unsigned long flags;
+	unsigned int initcount;
+	int nslaves;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC needs to synchronise per VPE, not per CPU
+	 * ignore for now
+	 */
+	return;
+#endif
+
+	pr_info("Checking COUNT synchronization across %u CPUs: ",
+		num_online_cpus());
+
+	local_irq_save(flags);
+
+	/*
+	 * Notify the slaves that it's time to start
+	 */
+	atomic_set(&count_start_flag, 1);
+	smp_wmb();
+
+	/* Count will be initialised to expirelo for all CPU's */
+	initcount = expirelo;
+
+	/*
+	 * We loop a few times to get a primed instruction cache,
+	 * then the last pass is more or less synchronised and
+	 * the master and slaves each set their cycle counters to a known
+	 * value all at once. This reduces the chance of having random offsets
+	 * between the processors, and guarantees that the maximum
+	 * delay between the cycle counters is never bigger than
+	 * the latency of information-passing (cachelines) between
+	 * two CPUs.
+	 */
+
+	nslaves = num_online_cpus()-1;
+	for (i = 0; i < NR_LOOPS; i++) {
+		/* slaves loop on '!= ncpus' */
+		while (atomic_read(&count_count_start) != nslaves)
+			mb();
+		atomic_set(&count_count_stop, 0);
+		smp_wmb();
+
+		/* this lets the slaves write their count register */
+		atomic_inc(&count_count_start);
+
+		/*
+		 * Everyone initialises count in the last loop:
+		 */
+		if (i == NR_LOOPS-1)
+			write_c0_count(initcount);
+
+		/*
+		 * Wait for all slaves to leave the synchronization point:
+		 */
+		while (atomic_read(&count_count_stop) != nslaves)
+			mb();
+		atomic_set(&count_count_start, 0);
+		smp_wmb();
+		atomic_inc(&count_count_stop);
+	}
+	/* Arrange for an interrupt in a short while */
+	write_c0_compare(read_c0_count() + COUNTON);
+
+	local_irq_restore(flags);
+
+	/*
+	 * i386 code reported the skew here, but the
+	 * count registers were almost certainly out of sync
+	 * so no point in alarming people
+	 */
+	printk("done.\n");
+}
+
+void __init synchronise_count_slave(void)
+{
+	int i;
+	unsigned long flags;
+	unsigned int initcount;
+	int ncpus;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC needs to synchronise per VPE, not per CPU
+	 * ignore for now
+	 */
+	return;
+#endif
+
+	local_irq_save(flags);
+
+	/*
+	 * Not every cpu is online at the time this gets called,
+	 * so we first wait for the master to say everyone is ready
+	 */
+
+	while (!atomic_read(&count_start_flag))
+		mb();
+
+	/* Count will be initialised to expirelo for all CPU's */
+	initcount = expirelo;
+
+	ncpus = num_online_cpus();
+	for (i = 0; i < NR_LOOPS; i++) {
+		atomic_inc(&count_count_start);
+		while (atomic_read(&count_count_start) != ncpus)
+			mb();
+
+		/*
+		 * Everyone initialises count in the last loop:
+		 */
+		if (i == NR_LOOPS-1)
+			write_c0_count(initcount);
+
+		atomic_inc(&count_count_stop);
+		while (atomic_read(&count_count_stop) != ncpus)
+			mb();
+	}
+	/* Arrange for an interrupt in a short while */
+	write_c0_compare(read_c0_count() + COUNTON);
+
+	local_irq_restore(flags);
+}
+#undef NR_LOOPS
+#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d51f4e98455f..88185cd40c3b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/bootmem.h>
 #include <linux/interrupt.h>
+#include <linux/ptrace.h>
 
 #include <asm/bootinfo.h>
 #include <asm/branch.h>
@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
 
 static void show_raw_backtrace(unsigned long reg29)
 {
-	unsigned long *sp = (unsigned long *)reg29;
+	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 	unsigned long addr;
 
 	printk("Call Trace:");
 #ifdef CONFIG_KALLSYMS
 	printk("\n");
 #endif
-	while (!kstack_end(sp)) {
-		addr = *sp++;
-		if (__kernel_text_address(addr))
-			print_ip_sym(addr);
+#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
+	if (IS_KVA01(sp)) {
+		while (!kstack_end(sp)) {
+			addr = *sp++;
+			if (__kernel_text_address(addr))
+				print_ip_sym(addr);
+		}
+		printk("\n");
 	}
-	printk("\n");
 }
 
 #ifdef CONFIG_KALLSYMS
@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
 static void show_code(unsigned int __user *pc)
 {
 	long i;
+	unsigned short __user *pc16 = NULL;
 
 	printk("\nCode:");
 
+	if ((unsigned long)pc & 1)
+		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 	for(i = -3 ; i < 6 ; i++) {
 		unsigned int insn;
-		if (__get_user(insn, pc + i)) {
+		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 			printk(" (Bad address in epc)\n");
 			break;
 		}
-		printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+		printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 	}
 }
 
@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
 
 void show_registers(const struct pt_regs *regs)
 {
+	const int field = 2 * sizeof(unsigned long);
+
 	__show_regs(regs);
 	print_modules();
-	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
-	        current->comm, task_pid_nr(current), current_thread_info(), current);
+	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+	       current->comm, current->pid, current_thread_info(), current,
+	      field, current_thread_info()->tp_value);
+	if (cpu_has_userlocal) {
+		unsigned long tls;
+
+		tls = read_c0_userlocal();
+		if (tls != current_thread_info()->tp_value)
+			printk("*HwTLS: %0*lx\n", field, tls);
+	}
+
 	show_stacktrace(current, regs);
 	show_code((unsigned int __user *) regs->cp0_epc);
 	printk("\n");
@@ -985,6 +1003,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
 	      (regs->cp0_cause & 0x7f) >> 2);
 }
 
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+	l1parity = 0;
+	return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+	l2parity = 0;
+	return 1;
+}
+__setup("nol2par", nol2parity);
+
 /*
  * Some MIPS CPUs can enable/disable for cache parity detection, but do
  * it different ways.
@@ -994,6 +1027,62 @@ static inline void parity_protection_init(void)
 	switch (current_cpu_type()) {
 	case CPU_24K:
 	case CPU_34K:
+	case CPU_74K:
+	case CPU_1004K:
+		{
+#define ERRCTL_PE	0x80000000
+#define ERRCTL_L2P	0x00800000
+			unsigned long errctl;
+			unsigned int l1parity_present, l2parity_present;
+
+			errctl = read_c0_ecc();
+			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+			/* probe L1 parity support */
+			write_c0_ecc(errctl | ERRCTL_PE);
+			back_to_back_c0_hazard();
+			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+			/* probe L2 parity support */
+			write_c0_ecc(errctl|ERRCTL_L2P);
+			back_to_back_c0_hazard();
+			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+			if (l1parity_present && l2parity_present) {
+				if (l1parity)
+					errctl |= ERRCTL_PE;
+				if (l1parity ^ l2parity)
+					errctl |= ERRCTL_L2P;
+			} else if (l1parity_present) {
+				if (l1parity)
+					errctl |= ERRCTL_PE;
+			} else if (l2parity_present) {
+				if (l2parity)
+					errctl |= ERRCTL_L2P;
+			} else {
+				/* No parity available */
+			}
+
+			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+			write_c0_ecc(errctl);
+			back_to_back_c0_hazard();
+			errctl = read_c0_ecc();
+			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+			if (l1parity_present)
+				printk(KERN_INFO "Cache parity protection %sabled\n",
+				       (errctl & ERRCTL_PE) ? "en" : "dis");
+
+			if (l2parity_present) {
+				if (l1parity_present && l1parity)
+					errctl ^= ERRCTL_L2P;
+				printk(KERN_INFO "L2 cache parity protection %sabled\n",
+				       (errctl & ERRCTL_L2P) ? "en" : "dis");
+			}
+		}
+		break;
+
 	case CPU_5KC:
 		write_c0_ecc(0x80000000);
 		back_to_back_c0_hazard();
@@ -1353,7 +1442,6 @@ void __cpuinit per_cpu_trap_init(void)
 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
 			 status_set);
 
-#ifdef CONFIG_CPU_MIPSR2
 	if (cpu_has_mips_r2) {
 		unsigned int enable = 0x0000000f;
 
@@ -1362,7 +1450,6 @@ void __cpuinit per_cpu_trap_init(void)
 
 		write_c0_hwrena(enable);
 	}
-#endif
 
 #ifdef CONFIG_MIPS_MT_SMTC
 	if (!secondaryTC) {
diff --git a/arch/mips/mips-boards/generic/Makefile b/arch/mips/mips-boards/generic/Makefile
index b31d8dfed1be..f7f87fc09d1e 100644
--- a/arch/mips/mips-boards/generic/Makefile
+++ b/arch/mips/mips-boards/generic/Makefile
@@ -20,6 +20,7 @@
 
 obj-y				:= reset.o display.o init.o memory.o \
 				   cmdline.o time.o
+obj-y				+= amon.o
 
 obj-$(CONFIG_EARLY_PRINTK)	+= console.o
 obj-$(CONFIG_PCI)		+= pci.o
diff --git a/arch/mips/mips-boards/generic/amon.c b/arch/mips/mips-boards/generic/amon.c
new file mode 100644
index 000000000000..b7633fda4180
--- /dev/null
+++ b/arch/mips/mips-boards/generic/amon.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2007  MIPS Technologies, Inc.
+ *	All rights reserved.
+
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Arbitrary Monitor interface
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm-mips/addrspace.h>
+#include <asm-mips/mips-boards/launch.h>
+#include <asm-mips/mipsmtregs.h>
+
+int amon_cpu_avail(int cpu)
+{
+	struct cpulaunch *launch = (struct cpulaunch *)KSEG0ADDR(CPULAUNCH);
+
+	if (cpu < 0 || cpu >= NCPULAUNCH) {
+		pr_debug("avail: cpu%d is out of range\n", cpu);
+		return 0;
+	}
+
+	launch += cpu;
+	if (!(launch->flags & LAUNCH_FREADY)) {
+		pr_debug("avail: cpu%d is not ready\n", cpu);
+		return 0;
+	}
+	if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) {
+		pr_debug("avail: too late.. cpu%d is already gone\n", cpu);
+		return 0;
+	}
+
+	return 1;
+}
+
+void amon_cpu_start(int cpu,
+		    unsigned long pc, unsigned long sp,
+		    unsigned long gp, unsigned long a0)
+{
+	volatile struct cpulaunch *launch =
+		(struct cpulaunch  *)KSEG0ADDR(CPULAUNCH);
+
+	if (!amon_cpu_avail(cpu))
+		return;
+	if (cpu == smp_processor_id()) {
+		pr_debug("launch: I am cpu%d!\n", cpu);
+		return;
+	}
+	launch += cpu;
+
+	pr_debug("launch: starting cpu%d\n", cpu);
+
+	launch->pc = pc;
+	launch->gp = gp;
+	launch->sp = sp;
+	launch->a0 = a0;
+
+	/* Make sure target sees parameters before the go bit */
+	smp_mb();
+
+	launch->flags |= LAUNCH_FGO;
+	while ((launch->flags & LAUNCH_FGONE) == 0)
+		;
+	pr_debug("launch: cpu%d gone!\n", cpu);
+}
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c
index 07671fb9074f..852b19492d8c 100644
--- a/arch/mips/mips-boards/generic/init.c
+++ b/arch/mips/mips-boards/generic/init.c
@@ -424,6 +424,9 @@ void __init prom_init(void)
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 	console_config();
 #endif
+#ifdef CONFIG_MIPS_CMP
+	register_smp_ops(&cmp_smp_ops);
+#endif
 #ifdef CONFIG_MIPS_MT_SMP
 	register_smp_ops(&vsmp_smp_ops);
 #endif
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index b50e0fc406ac..4fe62fca994e 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -55,16 +55,36 @@
 unsigned long cpu_khz;
 
 static int mips_cpu_timer_irq;
+static int mips_cpu_perf_irq;
 extern int cp0_perfcount_irq;
 
+DEFINE_PER_CPU(unsigned int, tickcount);
+#define tickcount_this_cpu __get_cpu_var(tickcount)
+static unsigned long ledbitmask;
+
 static void mips_timer_dispatch(void)
 {
+#if defined(CONFIG_MIPS_MALTA) || defined(CONFIG_MIPS_ATLAS)
+	/*
+	 * Yes, this is very tacky, won't work as expected with SMTC and
+	 * dyntick will break it,
+	 * but it gives me a nice warm feeling during debug
+	 */
+#define LEDBAR 0xbf000408
+	if (tickcount_this_cpu++ >= HZ) {
+		tickcount_this_cpu = 0;
+		change_bit(smp_processor_id(), &ledbitmask);
+		smp_wmb(); /* Make sure every one else sees the change */
+		/* This will pick up any recent changes made by other CPU's */
+		*(unsigned int *)LEDBAR = ledbitmask;
+	}
+#endif
 	do_IRQ(mips_cpu_timer_irq);
 }
 
 static void mips_perf_dispatch(void)
 {
-	do_IRQ(cp0_perfcount_irq);
+	do_IRQ(mips_cpu_perf_irq);
 }
 
 /*
@@ -129,19 +149,18 @@ unsigned long read_persistent_clock(void)
 
 void __init plat_perf_setup(void)
 {
-	cp0_perfcount_irq = -1;
-
 #ifdef MSC01E_INT_BASE
 	if (cpu_has_veic) {
 		set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
-		cp0_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+		mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
 	} else
 #endif
 	if (cp0_perfcount_irq >= 0) {
 		if (cpu_has_vint)
 			set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
+		mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 #ifdef CONFIG_SMP
-		set_irq_handler(cp0_perfcount_irq, handle_percpu_irq);
+		set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq);
 #endif
 	}
 }
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile
index 931ca4600a63..8dc6e2ac4c03 100644
--- a/arch/mips/mips-boards/malta/Makefile
+++ b/arch/mips/mips-boards/malta/Makefile
@@ -22,6 +22,7 @@
 obj-y := malta_int.o malta_platform.o malta_setup.o
 
 obj-$(CONFIG_MTD) += malta_mtd.o
+# FIXME FIXME FIXME
 obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
 
 EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index dbe60eb55e29..e1744ae855ce 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/random.h>
 
+#include <asm/traps.h>
 #include <asm/i8259.h>
 #include <asm/irq_cpu.h>
 #include <asm/irq_regs.h>
@@ -41,6 +42,14 @@
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/msc01_pci.h>
 #include <asm/msc01_ic.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+
+int gcmp_present = -1;
+int gic_present;
+static unsigned long _msc01_biu_base;
+static unsigned long _gcmp_base;
+static unsigned int ipi_map[NR_CPUS];
 
 static DEFINE_SPINLOCK(mips_irq_lock);
 
@@ -121,6 +130,17 @@ static void malta_hw0_irqdispatch(void)
 	do_IRQ(MALTA_INT_BASE + irq);
 }
 
+static void malta_ipi_irqdispatch(void)
+{
+	int irq;
+
+	irq = gic_get_int();
+	if (irq < 0)
+		return;  /* interrupt has already been cleared */
+
+	do_IRQ(MIPS_GIC_IRQ_BASE + irq);
+}
+
 static void corehi_irqdispatch(void)
 {
 	unsigned int intedge, intsteer, pcicmd, pcibadaddr;
@@ -257,12 +277,61 @@ asmlinkage void plat_irq_dispatch(void)
 
 	if (irq == MIPSCPU_INT_I8259A)
 		malta_hw0_irqdispatch();
+	else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
+		malta_ipi_irqdispatch();
 	else if (irq >= 0)
 		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
 	else
 		spurious_interrupt();
 }
 
+#ifdef CONFIG_MIPS_MT_SMP
+
+
+#define GIC_MIPS_CPU_IPI_RESCHED_IRQ	3
+#define GIC_MIPS_CPU_IPI_CALL_IRQ	4
+
+#define MIPS_CPU_IPI_RESCHED_IRQ 0	/* SW int 0 for resched */
+#define C_RESCHED C_SW0
+#define MIPS_CPU_IPI_CALL_IRQ 1		/* SW int 1 for resched */
+#define C_CALL C_SW1
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+
+static void ipi_resched_dispatch(void)
+{
+	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ipi_call_dispatch(void)
+{
+	do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+	smp_call_function_interrupt();
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+	.handler	= ipi_resched_interrupt,
+	.flags		= IRQF_DISABLED|IRQF_PERCPU,
+	.name		= "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+	.handler	= ipi_call_interrupt,
+	.flags		= IRQF_DISABLED|IRQF_PERCPU,
+	.name		= "IPI_call"
+};
+#endif /* CONFIG_MIPS_MT_SMP */
+
 static struct irqaction i8259irq = {
 	.handler = no_action,
 	.name = "XT-PIC cascade"
@@ -291,15 +360,90 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
 	{MSC01E_INT_PERFCTR,		MSC01_IRQ_LEVEL, 0},
 	{MSC01E_INT_CPUCTR,		MSC01_IRQ_LEVEL, 0}
 };
+
 int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
 
+/*
+ * This GIC specific tabular array defines the association between External
+ * Interrupts and CPUs/Core Interrupts. The nature of the External
+ * Interrupts is also defined here - polarity/trigger.
+ */
+static struct gic_intr_map gic_intr_map[] = {
+	{ GIC_EXT_INTR(0), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(1), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(2), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(3), 	0,	GIC_CPU_INT0,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(4), 	0,	GIC_CPU_INT1,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(5), 	0,	GIC_CPU_INT2,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(6), 	0,	GIC_CPU_INT3,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(7), 	0,	GIC_CPU_INT4,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(8), 	0,	GIC_CPU_INT3,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(9), 	0,	GIC_CPU_INT3,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(10), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(11), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(12), 	0,	GIC_CPU_INT3,	GIC_POL_POS, 	GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(13), 	0,	GIC_MAP_TO_NMI_MSK,	GIC_POL_POS, GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(14), 	0,	GIC_MAP_TO_NMI_MSK,	GIC_POL_POS, GIC_TRIG_LEVEL,	0 },
+	{ GIC_EXT_INTR(15), 	X,	X,		X, 		X,		0 },
+	{ GIC_EXT_INTR(16), 	0,	GIC_CPU_INT1,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(17), 	0,	GIC_CPU_INT2,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(18), 	1,	GIC_CPU_INT1,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(19), 	1,	GIC_CPU_INT2,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(20), 	2,	GIC_CPU_INT1,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(21), 	2,	GIC_CPU_INT2,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(22), 	3,	GIC_CPU_INT1,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+	{ GIC_EXT_INTR(23), 	3,	GIC_CPU_INT2,	GIC_POL_POS, GIC_TRIG_EDGE,	1 },
+};
+
+/*
+ * GCMP needs to be detected before any SMP initialisation
+ */
+int __init gcmp_probe(unsigned long addr, unsigned long size)
+{
+	if (gcmp_present >= 0)
+		return gcmp_present;
+
+	_gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
+	_msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
+	gcmp_present = (GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == GCMP_BASE_ADDR;
+
+	if (gcmp_present)
+		printk(KERN_DEBUG "GCMP present\n");
+	return gcmp_present;
+}
+
+void __init fill_ipi_map(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gic_intr_map); i++) {
+		if (gic_intr_map[i].ipiflag && (gic_intr_map[i].cpunum != X))
+			ipi_map[gic_intr_map[i].cpunum] |=
+				(1 << (gic_intr_map[i].pin + 2));
+	}
+}
+
 void __init arch_init_irq(void)
 {
+	int gic_present, gcmp_present;
+
 	init_i8259_irqs();
 
 	if (!cpu_has_veic)
 		mips_cpu_irq_init();
 
+	gcmp_present = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
+	if (gcmp_present)  {
+		GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK;
+		gic_present = 1;
+	} else {
+		_msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
+		gic_present = (REG(_msc01_biu_base, MSC01_SC_CFG) &
+		MSC01_SC_CFG_GICPRES_MSK) >> MSC01_SC_CFG_GICPRES_SHF;
+	}
+	if (gic_present)
+		printk(KERN_DEBUG "GIC present\n");
+
 	switch (mips_revision_sconid) {
 	case MIPS_REVISION_SCON_SOCIT:
 	case MIPS_REVISION_SCON_ROCIT:
@@ -360,4 +504,206 @@ void __init arch_init_irq(void)
 		setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
 						&corehi_irqaction);
 	}
+
+#if defined(CONFIG_MIPS_MT_SMP)
+	if (gic_present) {
+		/* FIXME */
+		int i;
+		struct {
+			unsigned int resched;
+			unsigned int call;
+		} ipiirq[] = {
+			{
+				.resched = GIC_IPI_EXT_INTR_RESCHED_VPE0,
+				.call =  GIC_IPI_EXT_INTR_CALLFNC_VPE0},
+			{
+				.resched = GIC_IPI_EXT_INTR_RESCHED_VPE1,
+				.call =  GIC_IPI_EXT_INTR_CALLFNC_VPE1
+			}, {
+				.resched = GIC_IPI_EXT_INTR_RESCHED_VPE2,
+				.call =  GIC_IPI_EXT_INTR_CALLFNC_VPE2
+			}, {
+				.resched = GIC_IPI_EXT_INTR_RESCHED_VPE3,
+				.call =  GIC_IPI_EXT_INTR_CALLFNC_VPE3
+			}
+		};
+#define NIPI (sizeof(ipiirq)/sizeof(ipiirq[0]))
+		fill_ipi_map();
+		gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
+		if (!gcmp_present) {
+			/* Enable the GIC */
+			i = REG(_msc01_biu_base, MSC01_SC_CFG);
+			REG(_msc01_biu_base, MSC01_SC_CFG) =
+				(i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
+			pr_debug("GIC Enabled\n");
+		}
+
+		/* set up ipi interrupts */
+		if (cpu_has_vint) {
+			set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
+			set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
+		}
+		/* Argh.. this really needs sorting out.. */
+		printk("CPU%d: status register was %08x\n", smp_processor_id(), read_c0_status());
+		write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
+		printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
+		write_c0_status(0x1100dc00);
+		printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
+		for (i = 0; i < NIPI; i++) {
+			setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, &irq_resched);
+			setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].call, &irq_call);
+
+			set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, handle_percpu_irq);
+			set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].call, handle_percpu_irq);
+		}
+	} else {
+		/* set up ipi interrupts */
+		if (cpu_has_veic) {
+			set_vi_handler (MSC01E_INT_SW0, ipi_resched_dispatch);
+			set_vi_handler (MSC01E_INT_SW1, ipi_call_dispatch);
+			cpu_ipi_resched_irq = MSC01E_INT_SW0;
+			cpu_ipi_call_irq = MSC01E_INT_SW1;
+		} else {
+			if (cpu_has_vint) {
+				set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+				set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+			}
+			cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
+			cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+		}
+
+		setup_irq(cpu_ipi_resched_irq, &irq_resched);
+		setup_irq(cpu_ipi_call_irq, &irq_call);
+
+		set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
+		set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
+	}
+#endif
+}
+
+void malta_be_init(void)
+{
+	if (gcmp_present) {
+		/* Could change CM error mask register */
+	}
+}
+
+
+static char *tr[8] = {
+	"mem",	"gcr",	"gic",	"mmio",
+	"0x04",	"0x05",	"0x06",	"0x07"
+};
+
+static char *mcmd[32] = {
+	[0x00] = "0x00",
+	[0x01] = "Legacy Write",
+	[0x02] = "Legacy Read",
+	[0x03] = "0x03",
+	[0x04] = "0x04",
+	[0x05] = "0x05",
+	[0x06] = "0x06",
+	[0x07] = "0x07",
+	[0x08] = "Coherent Read Own",
+	[0x09] = "Coherent Read Share",
+	[0x0a] = "Coherent Read Discard",
+	[0x0b] = "Coherent Ready Share Always",
+	[0x0c] = "Coherent Upgrade",
+	[0x0d] = "Coherent Writeback",
+	[0x0e] = "0x0e",
+	[0x0f] = "0x0f",
+	[0x10] = "Coherent Copyback",
+	[0x11] = "Coherent Copyback Invalidate",
+	[0x12] = "Coherent Invalidate",
+	[0x13] = "Coherent Write Invalidate",
+	[0x14] = "Coherent Completion Sync",
+	[0x15] = "0x15",
+	[0x16] = "0x16",
+	[0x17] = "0x17",
+	[0x18] = "0x18",
+	[0x19] = "0x19",
+	[0x1a] = "0x1a",
+	[0x1b] = "0x1b",
+	[0x1c] = "0x1c",
+	[0x1d] = "0x1d",
+	[0x1e] = "0x1e",
+	[0x1f] = "0x1f"
+};
+
+static char *core[8] = {
+	"Invalid/OK", 	"Invalid/Data",
+	"Shared/OK",	"Shared/Data",
+	"Modified/OK",	"Modified/Data",
+	"Exclusive/OK",	"Exclusive/Data"
+};
+
+static char *causes[32] = {
+	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+	"0x08", "0x09", "0x0a", "0x0b",
+	"0x0c", "0x0d", "0x0e", "0x0f",
+	"0x10", "0x11", "0x12", "0x13",
+	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
+	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+	"0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+int malta_be_handler(struct pt_regs *regs, int is_fixup)
+{
+	/* This duplicates the handling in do_be which seems wrong */
+	int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+
+	if (gcmp_present) {
+		unsigned long cm_error = GCMPGCB(GCMEC);
+		unsigned long cm_addr = GCMPGCB(GCMEA);
+		unsigned long cm_other = GCMPGCB(GCMEO);
+		unsigned long cause, ocause;
+		char buf[256];
+
+		cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK);
+		if (cause != 0) {
+			cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF;
+			if (cause < 16) {
+				unsigned long cca_bits = (cm_error >> 15) & 7;
+				unsigned long tr_bits = (cm_error >> 12) & 7;
+				unsigned long mcmd_bits = (cm_error >> 7) & 0x1f;
+				unsigned long stag_bits = (cm_error >> 3) & 15;
+				unsigned long sport_bits = (cm_error >> 0) & 7;
+
+				snprintf(buf, sizeof(buf),
+					 "CCA=%lu TR=%s MCmd=%s STag=%lu "
+					 "SPort=%lu\n",
+					 cca_bits, tr[tr_bits], mcmd[mcmd_bits],
+					 stag_bits, sport_bits);
+			} else {
+				/* glob state & sresp together */
+				unsigned long c3_bits = (cm_error >> 18) & 7;
+				unsigned long c2_bits = (cm_error >> 15) & 7;
+				unsigned long c1_bits = (cm_error >> 12) & 7;
+				unsigned long c0_bits = (cm_error >> 9) & 7;
+				unsigned long sc_bit = (cm_error >> 8) & 1;
+				unsigned long mcmd_bits = (cm_error >> 3) & 0x1f;
+				unsigned long sport_bits = (cm_error >> 0) & 7;
+				snprintf(buf, sizeof(buf),
+					 "C3=%s C2=%s C1=%s C0=%s SC=%s "
+					 "MCmd=%s SPort=%lu\n",
+					 core[c3_bits], core[c2_bits],
+					 core[c1_bits], core[c0_bits],
+					 sc_bit ? "True" : "False",
+					 mcmd[mcmd_bits], sport_bits);
+			}
+
+			ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
+				 GCMP_GCB_GMEO_ERROR_2ND_SHF;
+
+			printk("CM_ERROR=%08lx %s <%s>\n", cm_error,
+			       causes[cause], buf);
+			printk("CM_ADDR =%08lx\n", cm_addr);
+			printk("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
+
+			/* reprime cause register */
+			GCMPGCB(GCMEC) = 0;
+		}
+	}
+
+	return retval;
 }
diff --git a/arch/mips/mips-boards/malta/malta_setup.c b/arch/mips/mips-boards/malta/malta_setup.c
index 2cd8f5734b36..4a0f21c76e7b 100644
--- a/arch/mips/mips-boards/malta/malta_setup.c
+++ b/arch/mips/mips-boards/malta/malta_setup.c
@@ -36,6 +36,9 @@
 #include <linux/console.h>
 #endif
 
+extern void malta_be_init(void);
+extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
+
 struct resource standard_io_resources[] = {
 	{
 		.name = "dma1",
@@ -220,4 +223,7 @@ void __init plat_mem_setup(void)
 	screen_info_setup();
 #endif
 	mips_reboot_setup();
+
+	board_be_init = malta_be_init;
+	board_be_handler = malta_be_handler;
 }
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 3d3e53651341..643c8bcffff3 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -54,6 +54,12 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
 	preempt_enable();
 }
 
+#if defined(CONFIG_MIPS_CMP)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
 /*
  * Must die.
  */
@@ -482,6 +488,8 @@ static inline void local_r4k_flush_cache_page(void *args)
 
 	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
 		r4k_blast_dcache_page(addr);
+		if (exec && !cpu_icache_snoops_remote_store)
+			r4k_blast_scache_page(addr);
 	}
 	if (exec) {
 		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
@@ -584,7 +592,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 	 * subset property so we have to flush the primary caches
 	 * explicitly
 	 */
-	if (size >= dcache_size) {
+	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
@@ -607,7 +615,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 		return;
 	}
 
-	if (size >= dcache_size) {
+	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 		r4k_blast_dcache();
 	} else {
 		R4600_HIT_CACHEOP_WAR_IMPL;
@@ -969,6 +977,7 @@ static void __cpuinit probe_pcache(void)
 	case CPU_24K:
 	case CPU_34K:
 	case CPU_74K:
+	case CPU_1004K:
 		if ((read_c0_config7() & (1 << 16))) {
 			/* effectively physically indexed dcache,
 			   thus no virtual aliases. */
@@ -1265,6 +1274,20 @@ static void __cpuinit coherency_setup(void)
 	}
 }
 
+#if defined(CONFIG_DMA_NONCOHERENT)
+
+static int __cpuinitdata coherentio;
+
+static int __init setcoherentio(char *str)
+{
+	coherentio = 1;
+
+	return 1;
+}
+
+__setup("coherentio", setcoherentio);
+#endif
+
 void __cpuinit r4k_cache_init(void)
 {
 	extern void build_clear_page(void);
@@ -1324,14 +1347,22 @@ void __cpuinit r4k_cache_init(void)
 	flush_data_cache_page	= r4k_flush_data_cache_page;
 	flush_icache_range	= r4k_flush_icache_range;
 
-#ifdef CONFIG_DMA_NONCOHERENT
-	_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
-	_dma_cache_wback	= r4k_dma_cache_wback_inv;
-	_dma_cache_inv		= r4k_dma_cache_inv;
+#if defined(CONFIG_DMA_NONCOHERENT)
+	if (coherentio) {
+		_dma_cache_wback_inv	= (void *)cache_noop;
+		_dma_cache_wback	= (void *)cache_noop;
+		_dma_cache_inv		= (void *)cache_noop;
+	} else {
+		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
+		_dma_cache_wback	= r4k_dma_cache_wback_inv;
+		_dma_cache_inv		= r4k_dma_cache_inv;
+	}
 #endif
 
 	build_clear_page();
 	build_copy_page();
+#if !defined(CONFIG_MIPS_CMP)
 	local_r4k___flush_cache_all(NULL);
+#endif
 	coherency_setup();
 }
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 235833af3a8b..05ac6c6123ca 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -221,7 +221,7 @@ void copy_user_highpage(struct page *to, struct page *from,
 		copy_page(vto, vfrom);
 		kunmap_atomic(vfrom, KM_USER0);
 	}
-	if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+	if ((!cpu_has_ic_fills_f_dc) ||
 	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
 		flush_data_cache_page((unsigned long)vto);
 	kunmap_atomic(vto, KM_USER1);
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index aa52aa146cea..b5f6f71b27bc 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -80,6 +80,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 	case CPU_24K:
 	case CPU_25KF:
 	case CPU_34K:
+	case CPU_1004K:
 	case CPU_74K:
 	case CPU_SB1:
 	case CPU_SB1A:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index ccbea229a0e6..ca65469d7e30 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -32,8 +32,11 @@
 #define M_COUNTER_OVERFLOW		(1UL      << 31)
 
 #ifdef CONFIG_MIPS_MT_SMP
-#define WHAT		(M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
-#define vpe_id()	smp_processor_id()
+static int cpu_has_mipsmt_pertccounters;
+#define WHAT		(M_TC_EN_VPE | \
+			 M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
+#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
+			0 : cpu_data[smp_processor_id()].vpe_id)
 
 /*
  * The number of bits to shift to convert between counters per core and
@@ -243,11 +246,11 @@ static inline int __n_counters(void)
 {
 	if (!(read_c0_config1() & M_CONFIG1_PC))
 		return 0;
-	if (!(r_c0_perfctrl0() & M_PERFCTL_MORE))
+	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 		return 1;
-	if (!(r_c0_perfctrl1() & M_PERFCTL_MORE))
+	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
 		return 2;
-	if (!(r_c0_perfctrl2() & M_PERFCTL_MORE))
+	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
 		return 3;
 
 	return 4;
@@ -274,8 +277,9 @@ static inline int n_counters(void)
 	return counters;
 }
 
-static inline void reset_counters(int counters)
+static void reset_counters(void *arg)
 {
+	int counters = (int)arg;
 	switch (counters) {
 	case 4:
 		w_c0_perfctrl3(0);
@@ -302,9 +306,12 @@ static int __init mipsxx_init(void)
 		return -ENODEV;
 	}
 
-	reset_counters(counters);
-
-	counters = counters_total_to_per_cpu(counters);
+#ifdef CONFIG_MIPS_MT_SMP
+	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+	if (!cpu_has_mipsmt_pertccounters)
+		counters = counters_total_to_per_cpu(counters);
+#endif
+	on_each_cpu(reset_counters, (void *)counters, 0, 1);
 
 	op_model_mipsxx_ops.num_counters = counters;
 	switch (current_cpu_type()) {
@@ -320,6 +327,13 @@ static int __init mipsxx_init(void)
 		op_model_mipsxx_ops.cpu_type = "mips/25K";
 		break;
 
+	case CPU_1004K:
+#if 0
+		/* FIXME: report as 34K for now */
+		op_model_mipsxx_ops.cpu_type = "mips/1004K";
+		break;
+#endif
+
 	case CPU_34K:
 		op_model_mipsxx_ops.cpu_type = "mips/34K";
 		break;
@@ -365,7 +379,7 @@ static void mipsxx_exit(void)
 	int counters = op_model_mipsxx_ops.num_counters;
 
 	counters = counters_per_cpu_to_total(counters);
-	reset_counters(counters);
+	on_each_cpu(reset_counters, (void *)counters, 0, 1);
 
 	perf_irq = null_perf_irq;
 }
diff --git a/include/asm-mips/cmp.h b/include/asm-mips/cmp.h
new file mode 100644
index 000000000000..89a73fb93ae6
--- /dev/null
+++ b/include/asm-mips/cmp.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_CMP_H
+#define _ASM_CMP_H
+
+/*
+ * Definitions for CMP multitasking on MIPS cores
+ */
+struct task_struct;
+
+extern void cmp_smp_setup(void);
+extern void cmp_smp_finish(void);
+extern void cmp_boot_secondary(int cpu, struct task_struct *t);
+extern void cmp_init_secondary(void);
+extern void cmp_cpus_done(void);
+extern void cmp_prepare_cpus(unsigned int max_cpus);
+
+/* This is platform specific */
+extern void cmp_send_ipi(int cpu, unsigned int action);
+#endif /*  _ASM_CMP_H */
diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h
index bf5bbc78a9f7..6d04ea912254 100644
--- a/include/asm-mips/cpu.h
+++ b/include/asm-mips/cpu.h
@@ -89,6 +89,7 @@
 #define PRID_IMP_34K		0x9500
 #define PRID_IMP_24KE		0x9600
 #define PRID_IMP_74K		0x9700
+#define PRID_IMP_1004K		0x9900
 #define PRID_IMP_LOONGSON1      0x4200
 #define PRID_IMP_LOONGSON2      0x6300
 
@@ -194,9 +195,9 @@ enum cpu_type_enum {
 	/*
 	 * MIPS32 class processors
 	 */
-	CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000,
-	CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550,
-	CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
+	CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
+	CPU_AU1000, CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500,
+	CPU_AU1550, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
 
 	/*
 	 * MIPS64 class processors
diff --git a/include/asm-mips/gcmpregs.h b/include/asm-mips/gcmpregs.h
new file mode 100644
index 000000000000..d74a8a4ca861
--- /dev/null
+++ b/include/asm-mips/gcmpregs.h
@@ -0,0 +1,117 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ *
+ * Multiprocessor Subsystem Register Definitions
+ *
+ */
+#ifndef _ASM_GCMPREGS_H
+#define _ASM_GCMPREGS_H
+
+
+/* Offsets to major blocks within GCMP from GCMP base */
+#define GCMP_GCB_OFS		0x0000 /* Global Control Block */
+#define GCMP_CLCB_OFS		0x2000 /* Core Local Control Block */
+#define GCMP_COCB_OFS		0x4000 /* Core Other Control Block */
+#define GCMP_GDB_OFS		0x8000 /* Global Debug Block */
+
+/* Offsets to individual GCMP registers from GCMP base */
+#define GCMPOFS(block, tag, reg)	(GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
+
+#define GCMPGCBOFS(reg)		GCMPOFS(GCB, GCB, reg)
+#define GCMPCLCBOFS(reg)	GCMPOFS(CLCB, CCB, reg)
+#define GCMPCOCBOFS(reg)	GCMPOFS(COCB, CCB, reg)
+#define GCMPGDBOFS(reg)		GCMPOFS(GDB, GDB, reg)
+
+/* GCMP register access */
+#define GCMPGCB(reg)			REGP(_gcmp_base, GCMPGCBOFS(reg))
+#define GCMPCLCB(reg)			REGP(_gcmp_base, GCMPCLCBOFS(reg))
+#define GCMPCOCB(reg)			REGP(_gcmp_base, GCMPCOCBOFS(reg))
+#define GCMPGDB(reg)			REGP(_gcmp_base, GCMPGDBOFS(reg))
+
+/* Mask generation */
+#define GCMPMSK(block, reg, bits)	(MSK(bits)<<GCMP_##block##_##reg##_SHF)
+#define GCMPGCBMSK(reg, bits)		GCMPMSK(GCB, reg, bits)
+#define GCMPCCBMSK(reg, bits)		GCMPMSK(CCB, reg, bits)
+#define GCMPGDBMSK(reg, bits)		GCMPMSK(GDB, reg, bits)
+
+/* GCB registers */
+#define GCMP_GCB_GC_OFS			0x0000	/* Global Config Register */
+#define  GCMP_GCB_GC_NUMIOCU_SHF	8
+#define  GCMP_GCB_GC_NUMIOCU_MSK	GCMPGCBMSK(GC_NUMIOCU, 4)
+#define  GCMP_GCB_GC_NUMCORES_SHF	0
+#define  GCMP_GCB_GC_NUMCORES_MSK	GCMPGCBMSK(GC_NUMCORES, 8)
+#define GCMP_GCB_GCMPB_OFS		0x0008		/* Global GCMP Base */
+#define  GCMP_GCB_GCMPB_GCMPBASE_SHF	15
+#define  GCMP_GCB_GCMPB_GCMPBASE_MSK	GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#define  GCMP_GCB_GCMPB_CMDEFTGT_SHF	0
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MSK	GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MEM	0
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MEM1	1
+#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
+#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
+#define GCMP_GCB_CCMC_OFS		0x0010	/* Global CM Control */
+#define GCMP_GCB_GCSRAP_OFS		0x0020	/* Global CSR Access Privilege */
+#define  GCMP_GCB_GCSRAP_CMACCESS_SHF	0
+#define  GCMP_GCB_GCSRAP_CMACCESS_MSK	GCMPGCBMSK(GCSRAP_CMACCESS, 8)
+#define GCMP_GCB_GCMPREV_OFS		0x0030	/* GCMP Revision Register */
+#define GCMP_GCB_GCMEM_OFS		0x0040	/* Global CM Error Mask */
+#define GCMP_GCB_GCMEC_OFS		0x0048	/* Global CM Error Cause */
+#define  GCMP_GCB_GMEC_ERROR_TYPE_SHF	27
+#define  GCMP_GCB_GMEC_ERROR_TYPE_MSK	GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
+#define  GCMP_GCB_GMEC_ERROR_INFO_SHF	0
+#define  GCMP_GCB_GMEC_ERROR_INFO_MSK	GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define GCMP_GCB_GCMEA_OFS		0x0050	/* Global CM Error Address */
+#define GCMP_GCB_GCMEO_OFS		0x0058	/* Global CM Error Multiple */
+#define  GCMP_GCB_GMEO_ERROR_2ND_SHF	0
+#define  GCMP_GCB_GMEO_ERROR_2ND_MSK	GCMPGCBMSK(GMEO_ERROR_2ND, 5)
+#define GCMP_GCB_GICBA_OFS		0x0080	/* Global Interrupt Controller Base Address */
+#define  GCMP_GCB_GICBA_BASE_SHF	17
+#define  GCMP_GCB_GICBA_BASE_MSK	GCMPGCBMSK(GICBA_BASE, 15)
+#define  GCMP_GCB_GICBA_EN_SHF		0
+#define  GCMP_GCB_GICBA_EN_MSK		GCMPGCBMSK(GICBA_EN, 1)
+
+/* GCB Regions */
+#define GCMP_GCB_CMxBASE_OFS(n)		(0x0090+16*(n))		/* Global Region[0-3] Base Address */
+#define  GCMP_GCB_CMxBASE_BASE_SHF	16
+#define  GCMP_GCB_CMxBASE_BASE_MSK	GCMPGCBMSK(CMxBASE_BASE, 16)
+#define GCMP_GCB_CMxMASK_OFS(n)		(0x0098+16*(n))		/* Global Region[0-3] Address Mask */
+#define  GCMP_GCB_CMxMASK_MASK_SHF	16
+#define  GCMP_GCB_CMxMASK_MASK_MSK	GCMPGCBMSK(CMxMASK_MASK, 16)
+#define  GCMP_GCB_CMxMASK_CMREGTGT_SHF	0
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MSK	GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM	 0
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM1  1
+#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
+#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+
+
+/* Core local/Core other control block registers */
+#define GCMP_CCB_RESETR_OFS		0x0000			/* Reset Release */
+#define  GCMP_CCB_RESETR_INRESET_SHF	0
+#define  GCMP_CCB_RESETR_INRESET_MSK	GCMPCCBMSK(RESETR_INRESET, 16)
+#define GCMP_CCB_COHCTL_OFS		0x0008			/* Coherence Control */
+#define  GCMP_CCB_COHCTL_DOMAIN_SHF	0
+#define  GCMP_CCB_COHCTL_DOMAIN_MSK	GCMPCCBMSK(COHCTL_DOMAIN, 8)
+#define GCMP_CCB_CFG_OFS		0x0010			/* Config */
+#define  GCMP_CCB_CFG_IOCUTYPE_SHF	10
+#define  GCMP_CCB_CFG_IOCUTYPE_MSK	GCMPCCBMSK(CFG_IOCUTYPE, 2)
+#define   GCMP_CCB_CFG_IOCUTYPE_CPU	0
+#define   GCMP_CCB_CFG_IOCUTYPE_NCIOCU	1
+#define   GCMP_CCB_CFG_IOCUTYPE_CIOCU	2
+#define  GCMP_CCB_CFG_NUMVPE_SHF	0
+#define  GCMP_CCB_CFG_NUMVPE_MSK	GCMPCCBMSK(CFG_NUMVPE, 10)
+#define GCMP_CCB_OTHER_OFS		0x0018		/* Other Address */
+#define  GCMP_CCB_OTHER_CORENUM_SHF	16
+#define  GCMP_CCB_OTHER_CORENUM_MSK	GCMPCCBMSK(OTHER_CORENUM, 16)
+#define GCMP_CCB_RESETBASE_OFS		0x0020		/* Reset Exception Base */
+#define  GCMP_CCB_RESETBASE_BEV_SHF	12
+#define  GCMP_CCB_RESETBASE_BEV_MSK	GCMPCCBMSK(RESETBASE_BEV, 20)
+#define GCMP_CCB_ID_OFS			0x0028		/* Identification */
+#define GCMP_CCB_DINTGROUP_OFS		0x0030		/* DINT Group Participate */
+#define GCMP_CCB_DBGGROUP_OFS		0x0100		/* DebugBreak Group */
+
+#endif /* _ASM_GCMPREGS_H */
diff --git a/include/asm-mips/gic.h b/include/asm-mips/gic.h
new file mode 100644
index 000000000000..01b2f92dc33d
--- /dev/null
+++ b/include/asm-mips/gic.h
@@ -0,0 +1,487 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ *
+ * GIC Register Definitions
+ *
+ */
+#ifndef _ASM_GICREGS_H
+#define _ASM_GICREGS_H
+
+#undef	GICISBYTELITTLEENDIAN
+#define GICISWORDLITTLEENDIAN
+
+/* Constants */
+#define GIC_POL_POS			1
+#define GIC_POL_NEG			0
+#define GIC_TRIG_EDGE			1
+#define GIC_TRIG_LEVEL			0
+
+#define GIC_NUM_INTRS			32
+
+#define MSK(n) ((1 << (n)) - 1)
+#define REG32(addr)		(*(volatile unsigned int *) (addr))
+#define REG(base, offs)		REG32((unsigned int)(base) + offs##_##OFS)
+#define REGP(base, phys)	REG32((unsigned int)(base) + (phys))
+
+/* Accessors */
+#define GIC_REG(segment, offset) \
+	REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
+#define GIC_REG_ADDR(segment, offset) \
+	REG32(_gic_base + segment##_##SECTION_OFS + offset)
+
+#define GIC_ABS_REG(segment, offset) \
+       (_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
+#define GIC_REG_ABS_ADDR(segment, offset) \
+       (_gic_base + segment##_##SECTION_OFS + offset)
+
+#ifdef GICISBYTELITTLEENDIAN
+#define GICREAD(reg, data)	(data) = (reg), (data) = le32_to_cpu(data)
+#define GICWRITE(reg, data)	(reg) = cpu_to_le32(data)
+#define GICBIS(reg, bits)			\
+	({unsigned int data;			\
+		GICREAD(reg, data);		\
+		data |= bits;			\
+		GICWRITE(reg, data);		\
+	})
+
+#else
+#define GICREAD(reg, data)	(data) = (reg)
+#define GICWRITE(reg, data)	(reg) = (data)
+#define GICBIS(reg, bits)	(reg) |= (bits)
+#endif
+
+
+/* GIC Address Space */
+#define SHARED_SECTION_OFS		0x0000
+#define SHARED_SECTION_SIZE		0x8000
+#define VPE_LOCAL_SECTION_OFS		0x8000
+#define VPE_LOCAL_SECTION_SIZE		0x4000
+#define VPE_OTHER_SECTION_OFS		0xc000
+#define VPE_OTHER_SECTION_SIZE		0x4000
+#define USM_VISIBLE_SECTION_OFS		0x10000
+#define USM_VISIBLE_SECTION_SIZE	0x10000
+
+/* Register Map for Shared Section */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(GICISWORDLITTLEENDIAN)
+
+#define	GIC_SH_CONFIG_OFS		0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS	0x0010
+#define GIC_SH_COUNTER_63_32_OFS	0x0014
+
+/* Interrupt Polarity */
+#define GIC_SH_POL_31_0_OFS		0x0100
+#define GIC_SH_POL_63_32_OFS		0x0104
+#define GIC_SH_POL_95_64_OFS		0x0108
+#define GIC_SH_POL_127_96_OFS		0x010c
+#define GIC_SH_POL_159_128_OFS		0x0110
+#define GIC_SH_POL_191_160_OFS		0x0114
+#define GIC_SH_POL_223_192_OFS		0x0118
+#define GIC_SH_POL_255_224_OFS		0x011c
+
+/* Edge/Level Triggering */
+#define GIC_SH_TRIG_31_0_OFS		0x0180
+#define GIC_SH_TRIG_63_32_OFS		0x0184
+#define GIC_SH_TRIG_95_64_OFS		0x0188
+#define GIC_SH_TRIG_127_96_OFS		0x018c
+#define GIC_SH_TRIG_159_128_OFS		0x0190
+#define GIC_SH_TRIG_191_160_OFS		0x0194
+#define GIC_SH_TRIG_223_192_OFS		0x0198
+#define GIC_SH_TRIG_255_224_OFS		0x019c
+
+/* Dual Edge Triggering */
+#define GIC_SH_DUAL_31_0_OFS		0x0200
+#define GIC_SH_DUAL_63_32_OFS		0x0204
+#define GIC_SH_DUAL_95_64_OFS		0x0208
+#define GIC_SH_DUAL_127_96_OFS		0x020c
+#define GIC_SH_DUAL_159_128_OFS		0x0210
+#define GIC_SH_DUAL_191_160_OFS		0x0214
+#define GIC_SH_DUAL_223_192_OFS		0x0218
+#define GIC_SH_DUAL_255_224_OFS		0x021c
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS		0x0280
+
+/* Reset Mask - Disables Interrupt */
+#define GIC_SH_RMASK_31_0_OFS		0x0300
+#define GIC_SH_RMASK_63_32_OFS		0x0304
+#define GIC_SH_RMASK_95_64_OFS		0x0308
+#define GIC_SH_RMASK_127_96_OFS		0x030c
+#define GIC_SH_RMASK_159_128_OFS	0x0310
+#define GIC_SH_RMASK_191_160_OFS	0x0314
+#define GIC_SH_RMASK_223_192_OFS	0x0318
+#define GIC_SH_RMASK_255_224_OFS	0x031c
+
+/* Set Mask (WO) - Enables Interrupt */
+#define GIC_SH_SMASK_31_0_OFS		0x0380
+#define GIC_SH_SMASK_63_32_OFS		0x0384
+#define GIC_SH_SMASK_95_64_OFS		0x0388
+#define GIC_SH_SMASK_127_96_OFS		0x038c
+#define GIC_SH_SMASK_159_128_OFS	0x0390
+#define GIC_SH_SMASK_191_160_OFS	0x0394
+#define GIC_SH_SMASK_223_192_OFS	0x0398
+#define GIC_SH_SMASK_255_224_OFS	0x039c
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_31_0_OFS		0x0400
+#define GIC_SH_MASK_63_32_OFS		0x0404
+#define GIC_SH_MASK_95_64_OFS		0x0408
+#define GIC_SH_MASK_127_96_OFS		0x040c
+#define GIC_SH_MASK_159_128_OFS		0x0410
+#define GIC_SH_MASK_191_160_OFS		0x0414
+#define GIC_SH_MASK_223_192_OFS		0x0418
+#define GIC_SH_MASK_255_224_OFS		0x041c
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_31_0_OFS		0x0480
+#define GIC_SH_PEND_63_32_OFS		0x0484
+#define GIC_SH_PEND_95_64_OFS		0x0488
+#define GIC_SH_PEND_127_96_OFS		0x048c
+#define GIC_SH_PEND_159_128_OFS		0x0490
+#define GIC_SH_PEND_191_160_OFS		0x0494
+#define GIC_SH_PEND_223_192_OFS		0x0498
+#define GIC_SH_PEND_255_224_OFS		0x049c
+
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS	0x0500
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_MAP_TO_PIN(intr) \
+	(GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
+
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS	0x2000
+
+/* Maps Interrupt X to a VPE */
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+	(GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe)	(1 << ((vpe) % 32))
+
+/* Polarity : Reset Value is always 0 */
+#define GIC_SH_SET_POLARITY_OFS		0x0100
+#define GIC_SET_POLARITY(intr, pol) \
+	GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + (((intr) / 32) * 4)), (pol) << ((intr) % 32))
+
+/* Triggering : Reset Value is always 0 */
+#define GIC_SH_SET_TRIGGER_OFS		0x0180
+#define GIC_SET_TRIGGER(intr, trig) \
+	GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + (((intr) / 32) * 4)), (trig) << ((intr) % 32))
+
+/* Mask manipulation */
+#define GIC_SH_SMASK_OFS		0x0380
+#define GIC_SET_INTR_MASK(intr, val) \
+	GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32)))
+
+#define GIC_SH_RMASK_OFS		0x0300
+#define GIC_CLR_INTR_MASK(intr, val) \
+	GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32)))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS			0x0000
+#define GIC_VPE_PEND_OFS		0x0004
+#define GIC_VPE_MASK_OFS		0x0008
+#define GIC_VPE_RMASK_OFS		0x000c
+#define GIC_VPE_SMASK_OFS		0x0010
+#define GIC_VPE_WD_MAP_OFS		0x0040
+#define GIC_VPE_COMPARE_MAP_OFS		0x0044
+#define GIC_VPE_TIMER_MAP_OFS		0x0048
+#define GIC_VPE_PERFCTR_MAP_OFS		0x0050
+#define GIC_VPE_SWINT0_MAP_OFS		0x0054
+#define GIC_VPE_SWINT1_MAP_OFS		0x0058
+#define GIC_VPE_OTHER_ADDR_OFS		0x0080
+#define GIC_VPE_WD_CONFIG0_OFS		0x0090
+#define GIC_VPE_WD_COUNT0_OFS		0x0094
+#define GIC_VPE_WD_INITIAL0_OFS		0x0098
+#define GIC_VPE_COMPARE_LO_OFS		0x00a0
+#define GIC_VPE_COMPARE_HI		0x00a4
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE	0x0100
+#define GIC_VPE_EIC_SS(intr) \
+	(GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+
+#define GIC_VPE_EIC_VEC_BASE		0x0800
+#define GIC_VPE_EIC_VEC(intr) \
+	(GIC_VPE_EIC_VEC_BASE + (4 * intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS		0x1000
+#define GIC_VPE_TENABLE_YQ_OFS		0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS	0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS	0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS	0x0000
+#define GIC_UMV_SH_COUNTER_63_32_OFS	0x0004
+
+#else /* CONFIG_CPU_BIG_ENDIAN */
+
+#define	GIC_SH_CONFIG_OFS		0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS	0x0014
+#define GIC_SH_COUNTER_63_32_OFS	0x0010
+
+/* Interrupt Polarity */
+#define GIC_SH_POL_31_0_OFS		0x0104
+#define GIC_SH_POL_63_32_OFS		0x0100
+#define GIC_SH_POL_95_64_OFS		0x010c
+#define GIC_SH_POL_127_96_OFS		0x0108
+#define GIC_SH_POL_159_128_OFS		0x0114
+#define GIC_SH_POL_191_160_OFS		0x0110
+#define GIC_SH_POL_223_192_OFS		0x011c
+#define GIC_SH_POL_255_224_OFS		0x0118
+
+/* Edge/Level Triggering */
+#define GIC_SH_TRIG_31_0_OFS		0x0184
+#define GIC_SH_TRIG_63_32_OFS		0x0180
+#define GIC_SH_TRIG_95_64_OFS		0x018c
+#define GIC_SH_TRIG_127_96_OFS		0x0188
+#define GIC_SH_TRIG_159_128_OFS		0x0194
+#define GIC_SH_TRIG_191_160_OFS		0x0190
+#define GIC_SH_TRIG_223_192_OFS		0x019c
+#define GIC_SH_TRIG_255_224_OFS		0x0198
+
+/* Dual Edge Triggering */
+#define GIC_SH_DUAL_31_0_OFS		0x0204
+#define GIC_SH_DUAL_63_32_OFS		0x0200
+#define GIC_SH_DUAL_95_64_OFS		0x020c
+#define GIC_SH_DUAL_127_96_OFS		0x0208
+#define GIC_SH_DUAL_159_128_OFS		0x0214
+#define GIC_SH_DUAL_191_160_OFS		0x0210
+#define GIC_SH_DUAL_223_192_OFS		0x021c
+#define GIC_SH_DUAL_255_224_OFS		0x0218
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS		0x0280
+
+/* Reset Mask - Disables Interrupt */
+#define GIC_SH_RMASK_31_0_OFS		0x0304
+#define GIC_SH_RMASK_63_32_OFS		0x0300
+#define GIC_SH_RMASK_95_64_OFS		0x030c
+#define GIC_SH_RMASK_127_96_OFS		0x0308
+#define GIC_SH_RMASK_159_128_OFS	0x0314
+#define GIC_SH_RMASK_191_160_OFS	0x0310
+#define GIC_SH_RMASK_223_192_OFS	0x031c
+#define GIC_SH_RMASK_255_224_OFS	0x0318
+
+/* Set Mask (WO) - Enables Interrupt */
+#define GIC_SH_SMASK_31_0_OFS		0x0384
+#define GIC_SH_SMASK_63_32_OFS		0x0380
+#define GIC_SH_SMASK_95_64_OFS		0x038c
+#define GIC_SH_SMASK_127_96_OFS		0x0388
+#define GIC_SH_SMASK_159_128_OFS	0x0394
+#define GIC_SH_SMASK_191_160_OFS	0x0390
+#define GIC_SH_SMASK_223_192_OFS	0x039c
+#define GIC_SH_SMASK_255_224_OFS	0x0398
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_31_0_OFS		0x0404
+#define GIC_SH_MASK_63_32_OFS		0x0400
+#define GIC_SH_MASK_95_64_OFS		0x040c
+#define GIC_SH_MASK_127_96_OFS		0x0408
+#define GIC_SH_MASK_159_128_OFS		0x0414
+#define GIC_SH_MASK_191_160_OFS		0x0410
+#define GIC_SH_MASK_223_192_OFS		0x041c
+#define GIC_SH_MASK_255_224_OFS		0x0418
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_31_0_OFS		0x0484
+#define GIC_SH_PEND_63_32_OFS		0x0480
+#define GIC_SH_PEND_95_64_OFS		0x048c
+#define GIC_SH_PEND_127_96_OFS		0x0488
+#define GIC_SH_PEND_159_128_OFS		0x0494
+#define GIC_SH_PEND_191_160_OFS		0x0490
+#define GIC_SH_PEND_223_192_OFS		0x049c
+#define GIC_SH_PEND_255_224_OFS		0x0498
+
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS	0x0500
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_MAP_TO_PIN(intr) \
+	(GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
+
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS	0x2004
+
+/*
+ * Maps Interrupt X to a VPE.  This is more complex than the LE case, as
+ * odd and even registers need to be transposed.  It does work - trust me!
+ */
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+	(GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + \
+	(((((vpe) / 32) ^ 1) - 1) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe)	(1 << ((vpe) % 32))
+
+/* Polarity */
+#define GIC_SH_SET_POLARITY_OFS		0x0100
+#define GIC_SET_POLARITY(intr, pol) \
+	GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (pol) << ((intr) % 32))
+
+/* Triggering */
+#define GIC_SH_SET_TRIGGER_OFS		0x0180
+#define GIC_SET_TRIGGER(intr, trig) \
+	GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (trig) << ((intr) % 32))
+
+/* Mask manipulation */
+#define GIC_SH_SMASK_OFS		0x0380
+#define GIC_SET_INTR_MASK(intr, val) \
+	GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32)))
+
+#define GIC_SH_RMASK_OFS		0x0300
+#define GIC_CLR_INTR_MASK(intr, val) \
+	GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS			0x0000
+#define GIC_VPE_PEND_OFS		0x0004
+#define GIC_VPE_MASK_OFS		0x0008
+#define GIC_VPE_RMASK_OFS		0x000c
+#define GIC_VPE_SMASK_OFS		0x0010
+#define GIC_VPE_WD_MAP_OFS		0x0040
+#define GIC_VPE_COMPARE_MAP_OFS		0x0044
+#define GIC_VPE_TIMER_MAP_OFS		0x0048
+#define GIC_VPE_PERFCTR_MAP_OFS		0x0050
+#define GIC_VPE_SWINT0_MAP_OFS		0x0054
+#define GIC_VPE_SWINT1_MAP_OFS		0x0058
+#define GIC_VPE_OTHER_ADDR_OFS		0x0080
+#define GIC_VPE_WD_CONFIG0_OFS		0x0090
+#define GIC_VPE_WD_COUNT0_OFS		0x0094
+#define GIC_VPE_WD_INITIAL0_OFS		0x0098
+#define GIC_VPE_COMPARE_LO_OFS		0x00a4
+#define GIC_VPE_COMPARE_HI_OFS		0x00a0
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE	0x0100
+#define GIC_VPE_EIC_SS(intr) \
+	(GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+
+#define GIC_VPE_EIC_VEC_BASE		0x0800
+#define GIC_VPE_EIC_VEC(intr) \
+	(GIC_VPE_EIC_VEC_BASE + (4 * intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS		0x1000
+#define GIC_VPE_TENABLE_YQ_OFS		0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS	0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS	0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS	0x0004
+#define GIC_UMV_SH_COUNTER_63_32_OFS	0x0000
+
+#endif /* !LE */
+
+/* Masks */
+#define GIC_SH_CONFIG_COUNTSTOP_SHF	28
+#define GIC_SH_CONFIG_COUNTSTOP_MSK	(MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
+
+#define GIC_SH_CONFIG_COUNTBITS_SHF	24
+#define GIC_SH_CONFIG_COUNTBITS_MSK	(MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
+
+#define GIC_SH_CONFIG_NUMINTRS_SHF	16
+#define GIC_SH_CONFIG_NUMINTRS_MSK	(MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
+
+#define GIC_SH_CONFIG_NUMVPES_SHF	0
+#define GIC_SH_CONFIG_NUMVPES_MSK	(MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
+
+#define GIC_SH_WEDGE_SET(intr)		(intr | (0x1 << 31))
+#define GIC_SH_WEDGE_CLR(intr)		(intr & ~(0x1 << 31))
+
+#define GIC_MAP_TO_PIN_SHF		31
+#define GIC_MAP_TO_PIN_MSK		(MSK(1) << GIC_MAP_TO_PIN_SHF)
+#define GIC_MAP_TO_NMI_SHF		30
+#define GIC_MAP_TO_NMI_MSK		(MSK(1) << GIC_MAP_TO_NMI_SHF)
+#define GIC_MAP_TO_YQ_SHF		29
+#define GIC_MAP_TO_YQ_MSK		(MSK(1) << GIC_MAP_TO_YQ_SHF)
+#define GIC_MAP_SHF			0
+#define GIC_MAP_MSK			(MSK(6) << GIC_MAP_SHF)
+
+/* GIC_VPE_CTL Masks */
+#define GIC_VPE_CTL_PERFCNT_RTBL_SHF	2
+#define GIC_VPE_CTL_PERFCNT_RTBL_MSK	(MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
+#define GIC_VPE_CTL_TIMER_RTBL_SHF	1
+#define GIC_VPE_CTL_TIMER_RTBL_MSK	(MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
+#define GIC_VPE_CTL_EIC_MODE_SHF	0
+#define GIC_VPE_CTL_EIC_MODE_MSK	(MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
+
+/* GIC_VPE_PEND Masks */
+#define GIC_VPE_PEND_WD_SHF		0
+#define GIC_VPE_PEND_WD_MSK		(MSK(1) << GIC_VPE_PEND_WD_SHF)
+#define GIC_VPE_PEND_CMP_SHF		1
+#define GIC_VPE_PEND_CMP_MSK		(MSK(1) << GIC_VPE_PEND_CMP_SHF)
+#define GIC_VPE_PEND_TIMER_SHF		2
+#define GIC_VPE_PEND_TIMER_MSK		(MSK(1) << GIC_VPE_PEND_TIMER_SHF)
+#define GIC_VPE_PEND_PERFCOUNT_SHF	3
+#define GIC_VPE_PEND_PERFCOUNT_MSK	(MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
+#define GIC_VPE_PEND_SWINT0_SHF		4
+#define GIC_VPE_PEND_SWINT0_MSK		(MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
+#define GIC_VPE_PEND_SWINT1_SHF		5
+#define GIC_VPE_PEND_SWINT1_MSK		(MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+
+/* GIC_VPE_RMASK Masks */
+#define GIC_VPE_RMASK_WD_SHF		0
+#define GIC_VPE_RMASK_WD_MSK		(MSK(1) << GIC_VPE_RMASK_WD_SHF)
+#define GIC_VPE_RMASK_CMP_SHF		1
+#define GIC_VPE_RMASK_CMP_MSK		(MSK(1) << GIC_VPE_RMASK_CMP_SHF)
+#define GIC_VPE_RMASK_TIMER_SHF		2
+#define GIC_VPE_RMASK_TIMER_MSK		(MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
+#define GIC_VPE_RMASK_PERFCNT_SHF	3
+#define GIC_VPE_RMASK_PERFCNT_MSK	(MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
+#define GIC_VPE_RMASK_SWINT0_SHF	4
+#define GIC_VPE_RMASK_SWINT0_MSK	(MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
+#define GIC_VPE_RMASK_SWINT1_SHF	5
+#define GIC_VPE_RMASK_SWINT1_MSK	(MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+
+/* GIC_VPE_SMASK Masks */
+#define GIC_VPE_SMASK_WD_SHF		0
+#define GIC_VPE_SMASK_WD_MSK		(MSK(1) << GIC_VPE_SMASK_WD_SHF)
+#define GIC_VPE_SMASK_CMP_SHF		1
+#define GIC_VPE_SMASK_CMP_MSK		(MSK(1) << GIC_VPE_SMASK_CMP_SHF)
+#define GIC_VPE_SMASK_TIMER_SHF		2
+#define GIC_VPE_SMASK_TIMER_MSK		(MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
+#define GIC_VPE_SMASK_PERFCNT_SHF	3
+#define GIC_VPE_SMASK_PERFCNT_MSK	(MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
+#define GIC_VPE_SMASK_SWINT0_SHF	4
+#define GIC_VPE_SMASK_SWINT0_MSK	(MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
+#define GIC_VPE_SMASK_SWINT1_SHF	5
+#define GIC_VPE_SMASK_SWINT1_MSK	(MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+
+/*
+ * Set the Mapping of Interrupt X to a VPE.
+ */
+#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \
+	GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \
+		 GIC_SH_MAP_TO_VPE_REG_BIT(vpe))
+
+struct gic_pcpu_mask {
+       DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
+};
+
+struct gic_pending_regs {
+       DECLARE_BITMAP(pending, GIC_NUM_INTRS);
+};
+
+struct gic_intrmask_regs {
+       DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
+};
+
+/*
+ * Interrupt Meta-data specification. The ipiflag helps
+ * in building ipi_map.
+ */
+struct gic_intr_map {
+	unsigned int intrnum; 	/* Ext Intr Num 	*/
+	unsigned int cpunum;	/* Directed to this CPU */
+	unsigned int pin;	/* Directed to this Pin */
+	unsigned int polarity;	/* Polarity : +/-	*/
+	unsigned int trigtype;	/* Trigger  : Edge/Levl */
+	unsigned int ipiflag;	/* Is used for IPI ?	*/
+};
+
+extern void gic_init(unsigned long gic_base_addr,
+	unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
+	unsigned int intrmap_size, unsigned int irqbase);
+
+extern unsigned int gic_get_int(void);
+extern void gic_send_ipi(unsigned int intr);
+
+#endif /* _ASM_GICREGS_H */
diff --git a/include/asm-mips/mips-boards/launch.h b/include/asm-mips/mips-boards/launch.h
new file mode 100644
index 000000000000..d8ae7f95a522
--- /dev/null
+++ b/include/asm-mips/mips-boards/launch.h
@@ -0,0 +1,35 @@
+/*
+ *
+ */
+
+#ifndef _ASSEMBLER_
+
+struct cpulaunch {
+    unsigned long	pc;
+    unsigned long	gp;
+    unsigned long	sp;
+    unsigned long	a0;
+    unsigned long	_pad[3]; /* pad to cache line size to avoid thrashing */
+    unsigned long	flags;
+};
+
+#else
+
+#define LOG2CPULAUNCH	5
+#define	LAUNCH_PC	0
+#define	LAUNCH_GP	4
+#define	LAUNCH_SP	8
+#define	LAUNCH_A0	12
+#define	LAUNCH_FLAGS	28
+
+#endif
+
+#define LAUNCH_FREADY	1
+#define LAUNCH_FGO	2
+#define LAUNCH_FGONE	4
+
+#define CPULAUNCH	0x00000f00
+#define NCPULAUNCH	8
+
+/* Polling period in count cycles for secondary CPU's */
+#define LAUNCHPERIOD	10000
diff --git a/include/asm-mips/mips-boards/malta.h b/include/asm-mips/mips-boards/malta.h
index 93bf4e51b8a4..c1891578fa65 100644
--- a/include/asm-mips/mips-boards/malta.h
+++ b/include/asm-mips/mips-boards/malta.h
@@ -52,6 +52,29 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
 }
 
 /*
+ * GCMP Specific definitions
+ */
+#define GCMP_BASE_ADDR			0x1fbf8000
+#define GCMP_ADDRSPACE_SZ		(256 * 1024)
+
+/*
+ * GIC Specific definitions
+ */
+#define GIC_BASE_ADDR			0x1bdc0000
+#define GIC_ADDRSPACE_SZ		(128 * 1024)
+
+/*
+ * MSC01 BIU Specific definitions
+ * FIXME : These should be elsewhere ?
+ */
+#define MSC01_BIU_REG_BASE		0x1bc80000
+#define MSC01_BIU_ADDRSPACE_SZ		(256 * 1024)
+#define MSC01_SC_CFG_OFS		0x0110
+#define MSC01_SC_CFG_GICPRES_MSK	0x00000004
+#define MSC01_SC_CFG_GICPRES_SHF	2
+#define MSC01_SC_CFG_GICENA_SHF		3
+
+/*
  * Malta RTC-device indirect register access.
  */
 #define MALTA_RTC_ADR_REG       0x70
diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h
index 7461318f1cd1..cea872fc6f5c 100644
--- a/include/asm-mips/mips-boards/maltaint.h
+++ b/include/asm-mips/mips-boards/maltaint.h
@@ -39,7 +39,9 @@
 #define MIPSCPU_INT_I8259A	MIPSCPU_INT_MB0
 #define MIPSCPU_INT_MB1		3
 #define MIPSCPU_INT_SMI		MIPSCPU_INT_MB1
+#define MIPSCPU_INT_IPI0	MIPSCPU_INT_MB1	/* GIC IPI */
 #define MIPSCPU_INT_MB2		4
+#define MIPSCPU_INT_IPI1	MIPSCPU_INT_MB2	/* GIC IPI */
 #define MIPSCPU_INT_MB3		5
 #define MIPSCPU_INT_COREHI	MIPSCPU_INT_MB3
 #define MIPSCPU_INT_MB4		6
@@ -76,6 +78,31 @@
 #define MSC01E_INT_PERFCTR	10
 #define MSC01E_INT_CPUCTR	11
 
+/* GIC's Nomenclature for Core Interrupt Pins on the Malta */
+#define GIC_CPU_INT0		0 /* Core Interrupt 2 	*/
+#define GIC_CPU_INT1		1 /* .			*/
+#define GIC_CPU_INT2		2 /* .			*/
+#define GIC_CPU_INT3		3 /* .			*/
+#define GIC_CPU_INT4		4 /* .			*/
+#define GIC_CPU_INT5		5 /* Core Interrupt 5   */
+
+#define GIC_EXT_INTR(x)		x
+
+/* Dummy data */
+#define X			0xdead
+
+/* External Interrupts used for IPI */
+#define GIC_IPI_EXT_INTR_RESCHED_VPE0	16
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE0	17
+#define GIC_IPI_EXT_INTR_RESCHED_VPE1	18
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE1	19
+#define GIC_IPI_EXT_INTR_RESCHED_VPE2	20
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE2	21
+#define GIC_IPI_EXT_INTR_RESCHED_VPE3	22
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE3	23
+
+#define MIPS_GIC_IRQ_BASE	(MIPS_CPU_IRQ_BASE + 8)
+
 #ifndef __ASSEMBLY__
 extern void maltaint_init(void);
 #endif
diff --git a/include/asm-mips/mips-boards/maltasmp.h b/include/asm-mips/mips-boards/maltasmp.h
new file mode 100644
index 000000000000..8d7e955d506e
--- /dev/null
+++ b/include/asm-mips/mips-boards/maltasmp.h
@@ -0,0 +1,36 @@
+/*
+ * There are several SMP models supported
+ * SMTC is mutually exclusive to other options (atm)
+ */
+#if defined(CONFIG_MIPS_MT_SMTC)
+#define malta_smtc	1
+#define malta_cmp	0
+#define malta_smvp	0
+#else
+#define malta_smtc	0
+#if defined(CONFIG_MIPS_CMP)
+extern int gcmp_present;
+#define malta_cmp	gcmp_present
+#else
+#define malta_cmp	0
+#endif
+/* FIXME: should become COMFIG_MIPS_MT_SMVP */
+#if defined(CONFIG_MIPS_MT_SMP)
+#define malta_smvp	1
+#else
+#define malta_smvp	0
+#endif
+#endif
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+
+/* malta_smtc */
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+
+/* malta_cmp */
+#include <asm/cmp.h>
+
+/* malta_smvp */
+#include <asm/smvp.h>
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h
index 5a2f8a3a6a1f..c9420aa97e32 100644
--- a/include/asm-mips/mipsmtregs.h
+++ b/include/asm-mips/mipsmtregs.h
@@ -197,8 +197,8 @@ static inline void __raw_evpe(void)
 	"	.set	pop						\n");
 }
 
-/* Enable multiMT if previous suggested it should be.
-   EMT_ENABLE to force */
+/* Enable virtual processor execution if previous suggested it should be.
+   EVPE_ENABLE to force */
 
 #define EVPE_ENABLE MVPCONTROL_EVP
 
@@ -238,8 +238,8 @@ static inline void __raw_emt(void)
 	"	.set	reorder");
 }
 
-/* enable multiVPE if previous suggested it should be.
-   EVPE_ENABLE to force */
+/* enable multi-threaded execution if previous suggested it should be.
+   EMT_ENABLE to force */
 
 #define EMT_ENABLE VPECONTROL_TE
 
diff --git a/include/asm-mips/r4k-timer.h b/include/asm-mips/r4k-timer.h
new file mode 100644
index 000000000000..a37d12b3b61c
--- /dev/null
+++ b/include/asm-mips/r4k-timer.h
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_R4K_TYPES_H
+#define __ASM_R4K_TYPES_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_SYNC_R4K
+
+extern void synchronise_count_master(void);
+extern void synchronise_count_slave(void);
+
+#else
+
+static inline void synchronise_count_master(void)
+{
+}
+
+static inline void synchronise_count_slave(void)
+{
+}
+
+#endif
+
+#endif /* __ASM_R4K_TYPES_H */
diff --git a/include/asm-mips/smp-ops.h b/include/asm-mips/smp-ops.h
index b17fdfb5d818..43c207e72a63 100644
--- a/include/asm-mips/smp-ops.h
+++ b/include/asm-mips/smp-ops.h
@@ -51,6 +51,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops)
 #endif /* !CONFIG_SMP */
 
 extern struct plat_smp_ops up_smp_ops;
+extern struct plat_smp_ops cmp_smp_ops;
 extern struct plat_smp_ops vsmp_smp_ops;
 
 #endif /* __ASM_SMP_OPS_H */
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index ff3e8936b493..3639b28f80db 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -44,6 +44,7 @@ extern int mipsmt_build_cpu_map(int startslot);
 extern void mipsmt_prepare_cpus(void);
 extern void smtc_smp_finish(void);
 extern void smtc_boot_secondary(int cpu, struct task_struct *t);
+extern void smtc_cpus_done(void);
 
 /*
  * Sharing the TLB between multiple VPEs means that the
diff --git a/include/asm-mips/smvp.h b/include/asm-mips/smvp.h
new file mode 100644
index 000000000000..0d0e80a39e8a
--- /dev/null
+++ b/include/asm-mips/smvp.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_SMVP_H
+#define _ASM_SMVP_H
+
+/*
+ * Definitions for SMVP multitasking on MIPS MT cores
+ */
+struct task_struct;
+
+extern void smvp_smp_setup(void);
+extern void smvp_smp_finish(void);
+extern void smvp_boot_secondary(int cpu, struct task_struct *t);
+extern void smvp_init_secondary(void);
+extern void smvp_smp_finish(void);
+extern void smvp_cpus_done(void);
+extern void smvp_prepare_cpus(unsigned int max_cpus);
+
+/* This is platform specific */
+extern void smvp_send_ipi(int cpu, unsigned int action);
+#endif /*  _ASM_SMVP_H */