summary refs log tree commit diff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_64.S3
-rw-r--r--arch/powerpc/kernel/firmware.c2
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S4
-rw-r--r--arch/powerpc/kernel/irq.c140
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c4
-rw-r--r--arch/powerpc/kernel/lparcfg.c10
-rw-r--r--arch/powerpc/kernel/nvram_64.c6
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/pmc.c10
-rw-r--r--arch/powerpc/kernel/process.c116
-rw-r--r--arch/powerpc/kernel/prom_init.c81
-rw-r--r--arch/powerpc/kernel/ptrace.c516
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c16
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/kernel/traps.c128
17 files changed, 890 insertions, 166 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bdcb557d470a..07109d843787 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -791,9 +791,8 @@ _GLOBAL(enter_rtas)
 	
         li      r9,1
         rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
-	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
+	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
 	andc	r6,r0,r9
-	ori	r6,r6,MSR_RI
 	sync				/* disable interrupts so SRR0/1 */
 	mtmsrd	r0			/* don't get trashed */
 
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index 1679a70bbcad..6b1f4271eb53 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -17,5 +17,5 @@
 
 #include <asm/firmware.h>
 
-unsigned long powerpc_firmware_features;
+unsigned long powerpc_firmware_features __read_mostly;
 EXPORT_SYMBOL_GPL(powerpc_firmware_features);
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 7f4bd7f3b6af..25793bb0e782 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -214,11 +214,11 @@ skpinv:	addi	r6,r6,1				/* Increment */
 	bl	1f		/* Find our address */
 1:	mflr	r9
 	rlwimi	r7,r9,0,20,31
-	addi	r7,r7,24
+	addi	r7,r7,(2f - 1b)
 	mtspr	SPRN_SRR0,r7
 	mtspr	SPRN_SRR1,r6
 	rfi
-
+2:
 /* 4. Clear out PIDs & Search info */
 	li	r6,0
 	mtspr   SPRN_MAS6,r6
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9040330b0530..64f6f2031c22 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -73,8 +73,10 @@
 #define CREATE_TRACE_POINTS
 #include <asm/trace.h>
 
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
 int __irq_offset_value;
-static int ppc_spurious_interrupts;
 
 #ifdef CONFIG_PPC32
 EXPORT_SYMBOL(__irq_offset_value);
@@ -180,30 +182,64 @@ notrace void raw_local_irq_restore(unsigned long en)
 EXPORT_SYMBOL(raw_local_irq_restore);
 #endif /* CONFIG_PPC64 */
 
+static int show_other_interrupts(struct seq_file *p, int prec)
+{
+	int j;
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
+	if (tau_initialized) {
+		seq_printf(p, "%*s: ", prec, "TAU");
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", tau_interrupts(j));
+		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
+	}
+#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
+
+	seq_printf(p, "%*s: ", prec, "LOC");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
+        seq_printf(p, "  Local timer interrupts\n");
+
+	seq_printf(p, "%*s: ", prec, "SPU");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
+	seq_printf(p, "  Spurious interrupts\n");
+
+	seq_printf(p, "%*s: ", prec, "CNT");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
+	seq_printf(p, "  Performance monitoring interrupts\n");
+
+	seq_printf(p, "%*s: ", prec, "MCE");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
+	seq_printf(p, "  Machine check exceptions\n");
+
+	return 0;
+}
+
 int show_interrupts(struct seq_file *p, void *v)
 {
-	int i = *(loff_t *)v, j;
+	unsigned long flags, any_count = 0;
+	int i = *(loff_t *) v, j, prec;
 	struct irqaction *action;
 	struct irq_desc *desc;
-	unsigned long flags;
 
+	if (i > nr_irqs)
+		return 0;
+
+	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+		j *= 10;
+
+	if (i == nr_irqs)
+		return show_other_interrupts(p, prec);
+
+	/* print header */
 	if (i == 0) {
-		seq_puts(p, "           ");
+		seq_printf(p, "%*s", prec + 8, "");
 		for_each_online_cpu(j)
-			seq_printf(p, "CPU%d       ", j);
+			seq_printf(p, "CPU%-8d", j);
 		seq_putc(p, '\n');
-	} else if (i == nr_irqs) {
-#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
-		if (tau_initialized){
-			seq_puts(p, "TAU: ");
-			for_each_online_cpu(j)
-				seq_printf(p, "%10u ", tau_interrupts(j));
-			seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
-		}
-#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
-		seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
-
-		return 0;
 	}
 
 	desc = irq_to_desc(i);
@@ -211,37 +247,48 @@ int show_interrupts(struct seq_file *p, void *v)
 		return 0;
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
-
+	for_each_online_cpu(j)
+		any_count |= kstat_irqs_cpu(i, j);
 	action = desc->action;
-	if (!action || !action->handler)
-		goto skip;
+	if (!action && !any_count)
+		goto out;
 
-	seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
+	seq_printf(p, "%*d: ", prec, i);
 	for_each_online_cpu(j)
 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#else
-	seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
 
 	if (desc->chip)
-		seq_printf(p, " %s ", desc->chip->name);
+		seq_printf(p, "  %-16s", desc->chip->name);
 	else
-		seq_puts(p, "  None      ");
+		seq_printf(p, "  %-16s", "None");
+	seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
 
-	seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
-	seq_printf(p, "    %s", action->name);
+	if (action) {
+		seq_printf(p, "     %s", action->name);
+		while ((action = action->next) != NULL)
+			seq_printf(p, ", %s", action->name);
+	}
 
-	for (action = action->next; action; action = action->next)
-		seq_printf(p, ", %s", action->name);
 	seq_putc(p, '\n');
-
-skip:
+out:
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
-
 	return 0;
 }
 
+/*
+ * /proc/stat helpers
+ */
+u64 arch_irq_stat_cpu(unsigned int cpu)
+{
+	u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
+
+	sum += per_cpu(irq_stat, cpu).pmu_irqs;
+	sum += per_cpu(irq_stat, cpu).mce_exceptions;
+	sum += per_cpu(irq_stat, cpu).spurious_irqs;
+
+	return sum;
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(cpumask_t map)
 {
@@ -353,8 +400,7 @@ void do_IRQ(struct pt_regs *regs)
 	if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
 		handle_one_irq(irq);
 	else if (irq != NO_IRQ_IGNORE)
-		/* That's not SMP safe ... but who cares ? */
-		ppc_spurious_interrupts++;
+		__get_cpu_var(irq_stat).spurious_irqs++;
 
 	irq_exit();
 	set_irq_regs(old_regs);
@@ -474,7 +520,7 @@ void do_softirq(void)
  */
 
 static LIST_HEAD(irq_hosts);
-static DEFINE_SPINLOCK(irq_big_lock);
+static DEFINE_RAW_SPINLOCK(irq_big_lock);
 static unsigned int revmap_trees_allocated;
 static DEFINE_MUTEX(revmap_trees_mutex);
 struct irq_map_entry irq_map[NR_IRQS];
@@ -520,14 +566,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
 	if (host->ops->match == NULL)
 		host->ops->match = default_irq_host_match;
 
-	spin_lock_irqsave(&irq_big_lock, flags);
+	raw_spin_lock_irqsave(&irq_big_lock, flags);
 
 	/* If it's a legacy controller, check for duplicates and
 	 * mark it as allocated (we use irq 0 host pointer for that
 	 */
 	if (revmap_type == IRQ_HOST_MAP_LEGACY) {
 		if (irq_map[0].host != NULL) {
-			spin_unlock_irqrestore(&irq_big_lock, flags);
+			raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 			/* If we are early boot, we can't free the structure,
 			 * too bad...
 			 * this will be fixed once slab is made available early
@@ -541,7 +587,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
 	}
 
 	list_add(&host->link, &irq_hosts);
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 
 	/* Additional setups per revmap type */
 	switch(revmap_type) {
@@ -592,13 +638,13 @@ struct irq_host *irq_find_host(struct device_node *node)
 	 * the absence of a device node. This isn't a problem so far
 	 * yet though...
 	 */
-	spin_lock_irqsave(&irq_big_lock, flags);
+	raw_spin_lock_irqsave(&irq_big_lock, flags);
 	list_for_each_entry(h, &irq_hosts, link)
 		if (h->ops->match(h, node)) {
 			found = h;
 			break;
 		}
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 	return found;
 }
 EXPORT_SYMBOL_GPL(irq_find_host);
@@ -967,7 +1013,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
 	if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
 		return NO_IRQ;
 
-	spin_lock_irqsave(&irq_big_lock, flags);
+	raw_spin_lock_irqsave(&irq_big_lock, flags);
 
 	/* Use hint for 1 interrupt if any */
 	if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
@@ -991,7 +1037,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
 		}
 	}
 	if (found == NO_IRQ) {
-		spin_unlock_irqrestore(&irq_big_lock, flags);
+		raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 		return NO_IRQ;
 	}
  hint_found:
@@ -1000,7 +1046,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
 		smp_wmb();
 		irq_map[i].host = host;
 	}
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 	return found;
 }
 
@@ -1012,7 +1058,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
 	WARN_ON (virq < NUM_ISA_INTERRUPTS);
 	WARN_ON (count == 0 || (virq + count) > irq_virq_count);
 
-	spin_lock_irqsave(&irq_big_lock, flags);
+	raw_spin_lock_irqsave(&irq_big_lock, flags);
 	for (i = virq; i < (virq + count); i++) {
 		struct irq_host *host;
 
@@ -1025,7 +1071,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
 		smp_wmb();
 		irq_map[i].host = NULL;
 	}
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
 }
 
 int arch_early_irq_init(void)
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index b6bd1eaa1c24..41bada0298c8 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -333,7 +333,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
 		atomic_set(&kgdb_cpu_doing_single_step, -1);
 		/* set the trace bit if we're stepping */
 		if (remcom_in_buffer[0] == 's') {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 			mtspr(SPRN_DBCR0,
 			      mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
 			linux_regs->msr |= MSR_DE;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index c9329786073b..3fd1af902112 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -36,7 +36,7 @@
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
-#ifdef CONFIG_BOOKE
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 #define MSR_SINGLESTEP	(MSR_DE)
 #else
 #define MSR_SINGLESTEP	(MSR_SE)
@@ -110,7 +110,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 	 * like Decrementer or External Interrupt */
 	regs->msr &= ~MSR_EE;
 	regs->msr |= MSR_SINGLESTEP;
-#ifdef CONFIG_BOOKE
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	regs->msr &= ~MSR_CE;
 	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
 #endif
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 79a00bb9c64c..d09d1c615150 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -359,7 +359,7 @@ static void parse_system_parameter_string(struct seq_file *m)
 
 	unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
 	if (!local_buffer) {
-		printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
+		printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
 		       __FILE__, __func__, __LINE__);
 		return;
 	}
@@ -383,13 +383,13 @@ static void parse_system_parameter_string(struct seq_file *m)
 		int idx, w_idx;
 		char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
 		if (!workbuffer) {
-			printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
+			printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
 			       __FILE__, __func__, __LINE__);
 			kfree(local_buffer);
 			return;
 		}
 #ifdef LPARCFG_DEBUG
-		printk(KERN_INFO "success calling get-system-parameter \n");
+		printk(KERN_INFO "success calling get-system-parameter\n");
 #endif
 		splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
 		local_buffer += 2;	/* step over strlen value */
@@ -440,7 +440,7 @@ static int lparcfg_count_active_processors(void)
 
 	while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
 #ifdef LPARCFG_DEBUG
-		printk(KERN_ERR "cpus_dn %p \n", cpus_dn);
+		printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
 #endif
 		count++;
 	}
@@ -725,7 +725,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
 	const unsigned int *lp_index_ptr;
 	unsigned int lp_index = 0;
 
-	seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
+	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
 
 	rootdn = of_find_node_by_path("/");
 	if (rootdn) {
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index ad461e735aec..9cf197f01e94 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -338,8 +338,8 @@ static int __init nvram_create_os_partition(void)
 
 	rc = nvram_write_header(new_part);
 	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header \
-				failed (%d)\n", rc);
+		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
+				"failed (%d)\n", rc);
 		return rc;
 	}
 
@@ -349,7 +349,7 @@ static int __init nvram_create_os_partition(void)
 	rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
 	if (rc <= 0) {
 		printk(KERN_ERR "nvram_create_os_partition: nvram_write "
-				"failed (%d)\n", rc);
+		       "failed (%d)\n", rc);
 		return rc;
 	}
 	
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 4aa17401657b..cd11d5ca80df 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -304,7 +304,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
 	int reglen, devfn;
 	struct pci_dev *dev;
 
-	pr_debug("of_scan_bus(%s) bus no %d... \n",
+	pr_debug("of_scan_bus(%s) bus no %d...\n",
 		 node->full_name, bus->number);
 
 	/* Scan direct children */
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 0516e2d3e02e..461499b43cff 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs)
 }
 
 
-static DEFINE_SPINLOCK(pmc_owner_lock);
+static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
 static void *pmc_owner_caller; /* mostly for debugging */
 perf_irq_t perf_irq = dummy_perf;
 
@@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
 {
 	int err = 0;
 
-	spin_lock(&pmc_owner_lock);
+	raw_spin_lock(&pmc_owner_lock);
 
 	if (pmc_owner_caller) {
 		printk(KERN_WARNING "reserve_pmc_hardware: "
@@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
 	perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
 
  out:
-	spin_unlock(&pmc_owner_lock);
+	raw_spin_unlock(&pmc_owner_lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
 
 void release_pmc_hardware(void)
 {
-	spin_lock(&pmc_owner_lock);
+	raw_spin_lock(&pmc_owner_lock);
 
 	WARN_ON(! pmc_owner_caller);
 
 	pmc_owner_caller = NULL;
 	perf_irq = dummy_perf;
 
-	spin_unlock(&pmc_owner_lock);
+	raw_spin_unlock(&pmc_owner_lock);
 }
 EXPORT_SYMBOL_GPL(release_pmc_hardware);
 
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b816daf3eba..e4d71ced97ef 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -245,6 +245,24 @@ void discard_lazy_cpu_state(void)
 }
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+void do_send_trap(struct pt_regs *regs, unsigned long address,
+		  unsigned long error_code, int signal_code, int breakpt)
+{
+	siginfo_t info;
+
+	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+			11, SIGSEGV) == NOTIFY_STOP)
+		return;
+
+	/* Deliver the signal to userspace */
+	info.si_signo = SIGTRAP;
+	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
+	info.si_code = signal_code;
+	info.si_addr = (void __user *)address;
+	force_sig_info(SIGTRAP, &info, current);
+}
+#else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
 void do_dabr(struct pt_regs *regs, unsigned long address,
 		    unsigned long error_code)
 {
@@ -257,12 +275,6 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
 	if (debugger_dabr_match(regs))
 		return;
 
-	/* Clear the DAC and struct entries.  One shot trigger */
-#if defined(CONFIG_BOOKE)
-	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
-							| DBCR0_IDM));
-#endif
-
 	/* Clear the DABR */
 	set_dabr(0);
 
@@ -273,9 +285,82 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
 	info.si_addr = (void __user *)address;
 	force_sig_info(SIGTRAP, &info, current);
 }
+#endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
 
 static DEFINE_PER_CPU(unsigned long, current_dabr);
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+/*
+ * Set the debug registers back to their default "safe" values.
+ */
+static void set_debug_reg_defaults(struct thread_struct *thread)
+{
+	thread->iac1 = thread->iac2 = 0;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	thread->iac3 = thread->iac4 = 0;
+#endif
+	thread->dac1 = thread->dac2 = 0;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+	thread->dvc1 = thread->dvc2 = 0;
+#endif
+	thread->dbcr0 = 0;
+#ifdef CONFIG_BOOKE
+	/*
+	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
+	 */
+	thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\
+			DBCR1_IAC3US | DBCR1_IAC4US;
+	/*
+	 * Force Data Address Compare User/Supervisor bits to be User-only
+	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
+	 */
+	thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
+#else
+	thread->dbcr1 = 0;
+#endif
+}
+
+static void prime_debug_regs(struct thread_struct *thread)
+{
+	mtspr(SPRN_IAC1, thread->iac1);
+	mtspr(SPRN_IAC2, thread->iac2);
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	mtspr(SPRN_IAC3, thread->iac3);
+	mtspr(SPRN_IAC4, thread->iac4);
+#endif
+	mtspr(SPRN_DAC1, thread->dac1);
+	mtspr(SPRN_DAC2, thread->dac2);
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+	mtspr(SPRN_DVC1, thread->dvc1);
+	mtspr(SPRN_DVC2, thread->dvc2);
+#endif
+	mtspr(SPRN_DBCR0, thread->dbcr0);
+	mtspr(SPRN_DBCR1, thread->dbcr1);
+#ifdef CONFIG_BOOKE
+	mtspr(SPRN_DBCR2, thread->dbcr2);
+#endif
+}
+/*
+ * Unless neither the old or new thread are making use of the
+ * debug registers, set the debug registers from the values
+ * stored in the new thread.
+ */
+static void switch_booke_debug_regs(struct thread_struct *new_thread)
+{
+	if ((current->thread.dbcr0 & DBCR0_IDM)
+		|| (new_thread->dbcr0 & DBCR0_IDM))
+			prime_debug_regs(new_thread);
+}
+#else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
+static void set_debug_reg_defaults(struct thread_struct *thread)
+{
+	if (thread->dabr) {
+		thread->dabr = 0;
+		set_dabr(0);
+	}
+}
+#endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
+
 int set_dabr(unsigned long dabr)
 {
 	__get_cpu_var(current_dabr) = dabr;
@@ -284,7 +369,7 @@ int set_dabr(unsigned long dabr)
 		return ppc_md.set_dabr(dabr);
 
 	/* XXX should we have a CPU_FTR_HAS_DABR ? */
-#if defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	mtspr(SPRN_DAC1, dabr);
 #elif defined(CONFIG_PPC_BOOK3S)
 	mtspr(SPRN_DABR, dabr);
@@ -371,10 +456,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
 #endif /* CONFIG_SMP */
 
-#if defined(CONFIG_BOOKE)
-	/* If new thread DAC (HW breakpoint) is the same then leave it */
-	if (new->thread.dabr)
-		set_dabr(new->thread.dabr);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+	switch_booke_debug_regs(&new->thread);
 #else
 	if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
 		set_dabr(new->thread.dabr);
@@ -514,7 +597,7 @@ void show_regs(struct pt_regs * regs)
 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
 	trap = TRAP(regs);
 	if (trap == 0x300 || trap == 0x600)
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
 #else
 		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
@@ -556,14 +639,7 @@ void flush_thread(void)
 {
 	discard_lazy_cpu_state();
 
-	if (current->thread.dabr) {
-		current->thread.dabr = 0;
-		set_dabr(0);
-
-#if defined(CONFIG_BOOKE)
-		current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
-#endif
-	}
+	set_debug_reg_defaults(&current->thread);
 }
 
 void
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index bafac2e41ae1..5f306c4946e5 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -654,6 +654,9 @@ static void __init early_cmdline_parse(void)
 #define OV5_CMO			0x00
 #endif
 
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX		0x02	/* Linux is our OS */
+
 /*
  * The architecture vector has an array of PVR mask/value pairs,
  * followed by # option vectors - 1, followed by the option vectors.
@@ -665,7 +668,7 @@ static unsigned char ibm_architecture_vec[] = {
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
-	5 - 1,				/* 5 option vectors */
+	6 - 1,				/* 6 option vectors */
 
 	/* option vector 1: processor architectures supported */
 	3 - 2,				/* length */
@@ -697,12 +700,29 @@ static unsigned char ibm_architecture_vec[] = {
 	0,				/* don't halt */
 
 	/* option vector 5: PAPR/OF options */
-	5 - 2,				/* length */
+	13 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
 	OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
 	OV5_DONATE_DEDICATE_CPU | OV5_MSI,
 	0,
 	OV5_CMO,
+	0,
+	0,
+	0,
+	0,
+	/* WARNING: The offset of the "number of cores" field below
+	 * must match by the macro below. Update the definition if
+	 * the structure layout changes.
+	 */
+#define IBM_ARCH_VEC_NRCORES_OFFSET	100
+	W(NR_CPUS),			/* number of cores supported */
+
+	/* option vector 6: IBM PAPR hints */
+	4 - 2,				/* length */
+	0,
+	0,
+	OV6_LINUX,
+
 };
 
 /* Old method - ELF header with PT_NOTE sections */
@@ -792,13 +812,70 @@ static struct fake_elf {
 	}
 };
 
+static int __init prom_count_smt_threads(void)
+{
+	phandle node;
+	char type[64];
+	unsigned int plen;
+
+	/* Pick up th first CPU node we can find */
+	for (node = 0; prom_next_node(&node); ) {
+		type[0] = 0;
+		prom_getprop(node, "device_type", type, sizeof(type));
+
+		if (strcmp(type, RELOC("cpu")))
+			continue;
+		/*
+		 * There is an entry for each smt thread, each entry being
+		 * 4 bytes long.  All cpus should have the same number of
+		 * smt threads, so return after finding the first.
+		 */
+		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
+		if (plen == PROM_ERROR)
+			break;
+		plen >>= 2;
+		prom_debug("Found 0x%x smt threads per core\n", (unsigned long)plen);
+
+		/* Sanity check */
+		if (plen < 1 || plen > 64) {
+			prom_printf("Threads per core 0x%x out of bounds, assuming 1\n",
+				    (unsigned long)plen);
+			return 1;
+		}
+		return plen;
+	}
+	prom_debug("No threads found, assuming 1 per core\n");
+
+	return 1;
+
+}
+
+
 static void __init prom_send_capabilities(void)
 {
 	ihandle elfloader, root;
 	prom_arg_t ret;
+	u32 *cores;
 
 	root = call_prom("open", 1, 1, ADDR("/"));
 	if (root != 0) {
+		/* We need to tell the FW about the number of cores we support.
+		 *
+		 * To do that, we count the number of threads on the first core
+		 * (we assume this is the same for all cores) and use it to
+		 * divide NR_CPUS.
+		 */
+		cores = (u32 *)PTRRELOC(&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]);
+		if (*cores != NR_CPUS) {
+			prom_printf("WARNING ! "
+				    "ibm_architecture_vec structure inconsistent: 0x%x !\n",
+				    *cores);
+		} else {
+			*cores = NR_CPUS / prom_count_smt_threads();
+			prom_printf("Max number of cores passed to firmware: 0x%x\n",
+				    (unsigned long)*cores);
+		}
+
 		/* try calling the ibm,client-architecture-support method */
 		prom_printf("Calling ibm,client-architecture-support...");
 		if (call_prom_ret("call-method", 3, 2, &ret,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ef149880c145..d9b05866615f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -46,7 +46,7 @@
 /*
  * Set of msr bits that gdb can change on behalf of a process.
  */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 #define MSR_DEBUGCHANGE	0
 #else
 #define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
@@ -703,7 +703,7 @@ void user_enable_single_step(struct task_struct *task)
 	struct pt_regs *regs = task->thread.regs;
 
 	if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 		task->thread.dbcr0 &= ~DBCR0_BT;
 		task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
 		regs->msr |= MSR_DE;
@@ -720,7 +720,7 @@ void user_enable_block_step(struct task_struct *task)
 	struct pt_regs *regs = task->thread.regs;
 
 	if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 		task->thread.dbcr0 &= ~DBCR0_IC;
 		task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
 		regs->msr |= MSR_DE;
@@ -737,17 +737,25 @@ void user_disable_single_step(struct task_struct *task)
 	struct pt_regs *regs = task->thread.regs;
 
 	if (regs != NULL) {
-#if defined(CONFIG_BOOKE)
-		/* If DAC don't clear DBCRO_IDM or MSR_DE */
-		if (task->thread.dabr)
-			task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
-		else {
-			task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		/*
+		 * The logic to disable single stepping should be as
+		 * simple as turning off the Instruction Complete flag.
+		 * And, after doing so, if all debug flags are off, turn
+		 * off DBCR0(IDM) and MSR(DE) .... Torez
+		 */
+		task->thread.dbcr0 &= ~DBCR0_IC;
+		/*
+		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
+		 */
+		if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
+					task->thread.dbcr1)) {
+			/*
+			 * All debug events were off.....
+			 */
+			task->thread.dbcr0 &= ~DBCR0_IDM;
 			regs->msr &= ~MSR_DE;
 		}
-#elif defined(CONFIG_40x)
-		task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
-		regs->msr &= ~MSR_DE;
 #else
 		regs->msr &= ~(MSR_SE | MSR_BE);
 #endif
@@ -769,8 +777,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
 	if ((data & ~0x7UL) >= TASK_SIZE)
 		return -EIO;
 
-#ifndef CONFIG_BOOKE
-
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
 	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
 	 *  It was assumed, on previous implementations, that 3 bits were
 	 *  passed together with the data address, fitting the design of the
@@ -789,21 +796,22 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
 
 	/* Move contents to the DABR register */
 	task->thread.dabr = data;
-
-#endif
-#if defined(CONFIG_BOOKE)
-
+#else /* CONFIG_PPC_ADV_DEBUG_REGS */
 	/* As described above, it was assumed 3 bits were passed with the data
 	 *  address, but we will assume only the mode bits will be passed
 	 *  as to not cause alignment restrictions for DAC-based processors.
 	 */
 
 	/* DAC's hold the whole address without any mode flags */
-	task->thread.dabr = data & ~0x3UL;
-
-	if (task->thread.dabr == 0) {
-		task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
-		task->thread.regs->msr &= ~MSR_DE;
+	task->thread.dac1 = data & ~0x3UL;
+
+	if (task->thread.dac1 == 0) {
+		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+		if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
+					task->thread.dbcr1)) {
+			task->thread.regs->msr &= ~MSR_DE;
+			task->thread.dbcr0 &= ~DBCR0_IDM;
+		}
 		return 0;
 	}
 
@@ -814,17 +822,17 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
 
 	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
 	   register */
-	task->thread.dbcr0 = DBCR0_IDM;
+	task->thread.dbcr0 |= DBCR0_IDM;
 
 	/* Check for write and read flags and set DBCR0
 	   accordingly */
+	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
 	if (data & 0x1UL)
-		task->thread.dbcr0 |= DBSR_DAC1R;
+		dbcr_dac(task) |= DBCR_DAC1R;
 	if (data & 0x2UL)
-		task->thread.dbcr0 |= DBSR_DAC1W;
-
+		dbcr_dac(task) |= DBCR_DAC1W;
 	task->thread.regs->msr |= MSR_DE;
-#endif
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 	return 0;
 }
 
@@ -839,6 +847,394 @@ void ptrace_disable(struct task_struct *child)
 	user_disable_single_step(child);
 }
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+static long set_intruction_bp(struct task_struct *child,
+			      struct ppc_hw_breakpoint *bp_info)
+{
+	int slot;
+	int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
+	int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
+	int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
+	int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
+
+	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+		slot2_in_use = 1;
+	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+		slot4_in_use = 1;
+
+	if (bp_info->addr >= TASK_SIZE)
+		return -EIO;
+
+	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+
+		/* Make sure range is valid. */
+		if (bp_info->addr2 >= TASK_SIZE)
+			return -EIO;
+
+		/* We need a pair of IAC regsisters */
+		if ((!slot1_in_use) && (!slot2_in_use)) {
+			slot = 1;
+			child->thread.iac1 = bp_info->addr;
+			child->thread.iac2 = bp_info->addr2;
+			child->thread.dbcr0 |= DBCR0_IAC1;
+			if (bp_info->addr_mode ==
+					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+				dbcr_iac_range(child) |= DBCR_IAC12X;
+			else
+				dbcr_iac_range(child) |= DBCR_IAC12I;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+		} else if ((!slot3_in_use) && (!slot4_in_use)) {
+			slot = 3;
+			child->thread.iac3 = bp_info->addr;
+			child->thread.iac4 = bp_info->addr2;
+			child->thread.dbcr0 |= DBCR0_IAC3;
+			if (bp_info->addr_mode ==
+					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+				dbcr_iac_range(child) |= DBCR_IAC34X;
+			else
+				dbcr_iac_range(child) |= DBCR_IAC34I;
+#endif
+		} else
+			return -ENOSPC;
+	} else {
+		/* We only need one.  If possible leave a pair free in
+		 * case a range is needed later
+		 */
+		if (!slot1_in_use) {
+			/*
+			 * Don't use iac1 if iac1-iac2 are free and either
+			 * iac3 or iac4 (but not both) are free
+			 */
+			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
+				slot = 1;
+				child->thread.iac1 = bp_info->addr;
+				child->thread.dbcr0 |= DBCR0_IAC1;
+				goto out;
+			}
+		}
+		if (!slot2_in_use) {
+			slot = 2;
+			child->thread.iac2 = bp_info->addr;
+			child->thread.dbcr0 |= DBCR0_IAC2;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+		} else if (!slot3_in_use) {
+			slot = 3;
+			child->thread.iac3 = bp_info->addr;
+			child->thread.dbcr0 |= DBCR0_IAC3;
+		} else if (!slot4_in_use) {
+			slot = 4;
+			child->thread.iac4 = bp_info->addr;
+			child->thread.dbcr0 |= DBCR0_IAC4;
+#endif
+		} else
+			return -ENOSPC;
+	}
+out:
+	child->thread.dbcr0 |= DBCR0_IDM;
+	child->thread.regs->msr |= MSR_DE;
+
+	return slot;
+}
+
+static int del_instruction_bp(struct task_struct *child, int slot)
+{
+	switch (slot) {
+	case 1:
+		if (child->thread.iac1 == 0)
+			return -ENOENT;
+
+		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
+			/* address range - clear slots 1 & 2 */
+			child->thread.iac2 = 0;
+			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
+		}
+		child->thread.iac1 = 0;
+		child->thread.dbcr0 &= ~DBCR0_IAC1;
+		break;
+	case 2:
+		if (child->thread.iac2 == 0)
+			return -ENOENT;
+
+		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+			/* used in a range */
+			return -EINVAL;
+		child->thread.iac2 = 0;
+		child->thread.dbcr0 &= ~DBCR0_IAC2;
+		break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+	case 3:
+		if (child->thread.iac3 == 0)
+			return -ENOENT;
+
+		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
+			/* address range - clear slots 3 & 4 */
+			child->thread.iac4 = 0;
+			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
+		}
+		child->thread.iac3 = 0;
+		child->thread.dbcr0 &= ~DBCR0_IAC3;
+		break;
+	case 4:
+		if (child->thread.iac4 == 0)
+			return -ENOENT;
+
+		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+			/* Used in a range */
+			return -EINVAL;
+		child->thread.iac4 = 0;
+		child->thread.dbcr0 &= ~DBCR0_IAC4;
+		break;
+#endif
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+	int byte_enable =
+		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
+		& 0xf;
+	int condition_mode =
+		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
+	int slot;
+
+	if (byte_enable && (condition_mode == 0))
+		return -EINVAL;
+
+	if (bp_info->addr >= TASK_SIZE)
+		return -EIO;
+
+	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
+		slot = 1;
+		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+			dbcr_dac(child) |= DBCR_DAC1R;
+		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+			dbcr_dac(child) |= DBCR_DAC1W;
+		child->thread.dac1 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+		if (byte_enable) {
+			child->thread.dvc1 =
+				(unsigned long)bp_info->condition_value;
+			child->thread.dbcr2 |=
+				((byte_enable << DBCR2_DVC1BE_SHIFT) |
+				 (condition_mode << DBCR2_DVC1M_SHIFT));
+		}
+#endif
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+	} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
+		/* Both dac1 and dac2 are part of a range */
+		return -ENOSPC;
+#endif
+	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
+		slot = 2;
+		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+			dbcr_dac(child) |= DBCR_DAC2R;
+		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+			dbcr_dac(child) |= DBCR_DAC2W;
+		child->thread.dac2 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+		if (byte_enable) {
+			child->thread.dvc2 =
+				(unsigned long)bp_info->condition_value;
+			child->thread.dbcr2 |=
+				((byte_enable << DBCR2_DVC2BE_SHIFT) |
+				 (condition_mode << DBCR2_DVC2M_SHIFT));
+		}
+#endif
+	} else
+		return -ENOSPC;
+	child->thread.dbcr0 |= DBCR0_IDM;
+	child->thread.regs->msr |= MSR_DE;
+
+	return slot + 4;
+}
+
+static int del_dac(struct task_struct *child, int slot)
+{
+	if (slot == 1) {
+		if (child->thread.dac1 == 0)
+			return -ENOENT;
+
+		child->thread.dac1 = 0;
+		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+		if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
+			child->thread.dac2 = 0;
+			child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
+		}
+		child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+		child->thread.dvc1 = 0;
+#endif
+	} else if (slot == 2) {
+		if (child->thread.dac1 == 0)
+			return -ENOENT;
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+		if (child->thread.dbcr2 & DBCR2_DAC12MODE)
+			/* Part of a range */
+			return -EINVAL;
+		child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+		child->thread.dvc2 = 0;
+#endif
+		child->thread.dac2 = 0;
+		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+static int set_dac_range(struct task_struct *child,
+			 struct ppc_hw_breakpoint *bp_info)
+{
+	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
+
+	/* We don't allow range watchpoints to be used with DVC */
+	if (bp_info->condition_mode)
+		return -EINVAL;
+
+	/*
+	 * Best effort to verify the address range.  The user/supervisor bits
+	 * prevent trapping in kernel space, but let's fail on an obvious bad
+	 * range.  The simple test on the mask is not fool-proof, and any
+	 * exclusive range will spill over into kernel space.
+	 */
+	if (bp_info->addr >= TASK_SIZE)
+		return -EIO;
+	if (mode == PPC_BREAKPOINT_MODE_MASK) {
+		/*
+		 * dac2 is a bitmask.  Don't allow a mask that makes a
+		 * kernel space address from a valid dac1 value
+		 */
+		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
+			return -EIO;
+	} else {
+		/*
+		 * For range breakpoints, addr2 must also be a valid address
+		 */
+		if (bp_info->addr2 >= TASK_SIZE)
+			return -EIO;
+	}
+
+	if (child->thread.dbcr0 &
+	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
+		return -ENOSPC;
+
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+		child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+		child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
+	child->thread.dac1 = bp_info->addr;
+	child->thread.dac2 = bp_info->addr2;
+	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
+		child->thread.dbcr2  |= DBCR2_DAC12M;
+	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+		child->thread.dbcr2  |= DBCR2_DAC12MX;
+	else	/* PPC_BREAKPOINT_MODE_MASK */
+		child->thread.dbcr2  |= DBCR2_DAC12MM;
+	child->thread.regs->msr |= MSR_DE;
+
+	return 5;
+}
+#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
+
+static long ppc_set_hwdebug(struct task_struct *child,
+		     struct ppc_hw_breakpoint *bp_info)
+{
+	if (bp_info->version != 1)
+		return -ENOTSUPP;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+	/*
+	 * Check for invalid flags and combinations
+	 */
+	if ((bp_info->trigger_type == 0) ||
+	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
+				       PPC_BREAKPOINT_TRIGGER_RW)) ||
+	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
+	    (bp_info->condition_mode &
+	     ~(PPC_BREAKPOINT_CONDITION_MODE |
+	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
+		return -EINVAL;
+#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
+	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+		return -EINVAL;
+#endif
+
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
+		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
+		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+			return -EINVAL;
+		return set_intruction_bp(child, bp_info);
+	}
+	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+		return set_dac(child, bp_info);
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+	return set_dac_range(child, bp_info);
+#else
+	return -EINVAL;
+#endif
+#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
+	/*
+	 * We only support one data breakpoint
+	 */
+	if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
+	    ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
+	    (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
+	    (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
+	    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+		return -EINVAL;
+
+	if (child->thread.dabr)
+		return -ENOSPC;
+
+	if ((unsigned long)bp_info->addr >= TASK_SIZE)
+		return -EIO;
+
+	child->thread.dabr = (unsigned long)bp_info->addr;
+
+	return 1;
+#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
+}
+
+static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+	int rc;
+
+	if (data <= 4)
+		rc = del_instruction_bp(child, (int)data);
+	else
+		rc = del_dac(child, (int)data - 4);
+
+	if (!rc) {
+		if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
+					child->thread.dbcr1)) {
+			child->thread.dbcr0 &= ~DBCR0_IDM;
+			child->thread.regs->msr &= ~MSR_DE;
+		}
+	}
+	return rc;
+#else
+	if (data != 1)
+		return -EINVAL;
+	if (child->thread.dabr == 0)
+		return -ENOENT;
+
+	child->thread.dabr = 0;
+
+	return 0;
+#endif
+}
+
 /*
  * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  * we mark them as obsolete now, they will be removed in a future version
@@ -932,13 +1328,77 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 		break;
 	}
 
+	case PPC_PTRACE_GETHWDBGINFO: {
+		struct ppc_debug_info dbginfo;
+
+		dbginfo.version = 1;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
+		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
+		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
+		dbginfo.data_bp_alignment = 4;
+		dbginfo.sizeof_condition = 4;
+		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
+				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+		dbginfo.features |=
+				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
+				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
+#endif
+#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
+		dbginfo.num_instruction_bps = 0;
+		dbginfo.num_data_bps = 1;
+		dbginfo.num_condition_regs = 0;
+#ifdef CONFIG_PPC64
+		dbginfo.data_bp_alignment = 8;
+#else
+		dbginfo.data_bp_alignment = 4;
+#endif
+		dbginfo.sizeof_condition = 0;
+		dbginfo.features = 0;
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
+
+		if (!access_ok(VERIFY_WRITE, data,
+			       sizeof(struct ppc_debug_info)))
+			return -EFAULT;
+		ret = __copy_to_user((struct ppc_debug_info __user *)data,
+				     &dbginfo, sizeof(struct ppc_debug_info)) ?
+		      -EFAULT : 0;
+		break;
+	}
+
+	case PPC_PTRACE_SETHWDEBUG: {
+		struct ppc_hw_breakpoint bp_info;
+
+		if (!access_ok(VERIFY_READ, data,
+			       sizeof(struct ppc_hw_breakpoint)))
+			return -EFAULT;
+		ret = __copy_from_user(&bp_info,
+				       (struct ppc_hw_breakpoint __user *)data,
+				       sizeof(struct ppc_hw_breakpoint)) ?
+		      -EFAULT : 0;
+		if (!ret)
+			ret = ppc_set_hwdebug(child, &bp_info);
+		break;
+	}
+
+	case PPC_PTRACE_DELHWDEBUG: {
+		ret = ppc_del_hwdebug(child, addr, data);
+		break;
+	}
+
 	case PTRACE_GET_DEBUGREG: {
 		ret = -EINVAL;
 		/* We only support one DABR and no IABRS at the moment */
 		if (addr > 0)
 			break;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		ret = put_user(child->thread.dac1,
+			       (unsigned long __user *)data);
+#else
 		ret = put_user(child->thread.dabr,
 			       (unsigned long __user *)data);
+#endif
 		break;
 	}
 
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 00b5078da9a3..a0afb555a7c9 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -140,17 +140,15 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
 		return 0;               /* no signals delivered */
 	}
 
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
         /*
 	 * Reenable the DABR before delivering the signal to
 	 * user space. The DABR will have been cleared if it
 	 * triggered inside the kernel.
 	 */
-	if (current->thread.dabr) {
+	if (current->thread.dabr)
 		set_dabr(current->thread.dabr);
-#if defined(CONFIG_BOOKE)
-		mtspr(SPRN_DBCR0, current->thread.dbcr0);
 #endif
-	}
 
 	if (is32) {
         	if (ka.sa.sa_flags & SA_SIGINFO)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d670429a1608..266610119f66 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1078,7 +1078,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 	int i;
 	unsigned char tmp;
 	unsigned long new_msr = regs->msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	unsigned long new_dbcr0 = current->thread.dbcr0;
 #endif
 
@@ -1087,13 +1087,17 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 			return -EFAULT;
 		switch (op.dbg_type) {
 		case SIG_DBG_SINGLE_STEPPING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 			if (op.dbg_value) {
 				new_msr |= MSR_DE;
 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
 			} else {
-				new_msr &= ~MSR_DE;
-				new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
+				new_dbcr0 &= ~DBCR0_IC;
+				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
+						current->thread.dbcr1)) {
+					new_msr &= ~MSR_DE;
+					new_dbcr0 &= ~DBCR0_IDM;
+				}
 			}
 #else
 			if (op.dbg_value)
@@ -1103,7 +1107,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 #endif
 			break;
 		case SIG_DBG_BRANCH_TRACING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 			return -EINVAL;
 #else
 			if (op.dbg_value)
@@ -1124,7 +1128,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
 	   failure is a problem, anyway, and it's very unlikely unless
 	   the user is really doing something wrong. */
 	regs->msr = new_msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 	current->thread.dbcr0 = new_dbcr0;
 #endif
 
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6c6093d67f30..1b16b9a3e49a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -265,8 +265,8 @@ void account_system_vtime(struct task_struct *tsk)
 		account_system_time(tsk, 0, delta, deltascaled);
 	else
 		account_idle_time(delta);
-	per_cpu(cputime_last_delta, smp_processor_id()) = delta;
-	per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
+	__get_cpu_var(cputime_last_delta) = delta;
+	__get_cpu_var(cputime_scaled_last_delta) = deltascaled;
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(account_system_vtime);
@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs)
 
 	trace_timer_interrupt_entry(regs);
 
+	__get_cpu_var(irq_stat).timer_irqs++;
+
 	/* Ensure a positive value is written to the decrementer, or else
 	 * some CPUs will continuue to take decrementer exceptions */
 	set_dec(DECREMENTER_MAX);
@@ -935,8 +937,8 @@ static void register_decrementer_clockevent(int cpu)
 	*dec = decrementer_clockevent;
 	dec->cpumask = cpumask_of(cpu);
 
-	printk(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
-	       dec->name, dec->mult, dec->shift, cpu);
+	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
+		    dec->name, dec->mult, dec->shift, cpu);
 
 	clockevents_register_device(dec);
 }
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d069ff8a7e03..696626a2e835 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -60,13 +60,13 @@
 #endif
 
 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
-int (*__debugger)(struct pt_regs *regs);
-int (*__debugger_ipi)(struct pt_regs *regs);
-int (*__debugger_bpt)(struct pt_regs *regs);
-int (*__debugger_sstep)(struct pt_regs *regs);
-int (*__debugger_iabr_match)(struct pt_regs *regs);
-int (*__debugger_dabr_match)(struct pt_regs *regs);
-int (*__debugger_fault_handler)(struct pt_regs *regs);
+int (*__debugger)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly;
+int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
 
 EXPORT_SYMBOL(__debugger);
 EXPORT_SYMBOL(__debugger_ipi);
@@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { }
 int die(const char *str, struct pt_regs *regs, long err)
 {
 	static struct {
-		spinlock_t lock;
+		raw_spinlock_t lock;
 		u32 lock_owner;
 		int lock_owner_depth;
 	} die = {
-		.lock =			__SPIN_LOCK_UNLOCKED(die.lock),
+		.lock =			__RAW_SPIN_LOCK_UNLOCKED(die.lock),
 		.lock_owner =		-1,
 		.lock_owner_depth =	0
 	};
@@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err)
 
 	if (die.lock_owner != raw_smp_processor_id()) {
 		console_verbose();
-		spin_lock_irqsave(&die.lock, flags);
+		raw_spin_lock_irqsave(&die.lock, flags);
 		die.lock_owner = smp_processor_id();
 		die.lock_owner_depth = 0;
 		bust_spinlocks(1);
@@ -146,6 +146,11 @@ int die(const char *str, struct pt_regs *regs, long err)
 #endif
 		printk("%s\n", ppc_md.name ? ppc_md.name : "");
 
+		sysfs_printk_last_file();
+		if (notify_die(DIE_OOPS, str, regs, err, 255,
+			       SIGSEGV) == NOTIFY_STOP)
+			return 1;
+
 		print_modules();
 		show_regs(regs);
 	} else {
@@ -155,7 +160,7 @@ int die(const char *str, struct pt_regs *regs, long err)
 	bust_spinlocks(0);
 	die.lock_owner = -1;
 	add_taint(TAINT_DIE);
-	spin_unlock_irqrestore(&die.lock, flags);
+	raw_spin_unlock_irqrestore(&die.lock, flags);
 
 	if (kexec_should_crash(current) ||
 		kexec_sr_activated(smp_processor_id()))
@@ -294,7 +299,7 @@ static inline int check_io_access(struct pt_regs *regs)
 	return 0;
 }
 
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 /* On 4xx, the reason for the machine check or program exception
    is in the ESR. */
 #define get_reason(regs)	((regs)->dsisr)
@@ -478,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs)
 {
 	int recover = 0;
 
+	__get_cpu_var(irq_stat).mce_exceptions++;
+
 	/* See if any machine dependent calls. In theory, we would want
 	 * to call the CPU first, and call the ppc_md. one if the CPU
 	 * one returns a positive number. However there is existing code
@@ -960,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs)
 
 void performance_monitor_exception(struct pt_regs *regs)
 {
+	__get_cpu_var(irq_stat).pmu_irqs++;
+
 	perf_irq(regs);
 }
 
@@ -1024,10 +1033,69 @@ void SoftwareEmulation(struct pt_regs *regs)
 }
 #endif /* CONFIG_8xx */
 
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
+{
+	int changed = 0;
+	/*
+	 * Determine the cause of the debug event, clear the
+	 * event flags and send a trap to the handler. Torez
+	 */
+	if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
+		dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+		current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
+#endif
+		do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
+			     5);
+		changed |= 0x01;
+	}  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
+		dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
+		do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
+			     6);
+		changed |= 0x01;
+	}  else if (debug_status & DBSR_IAC1) {
+		current->thread.dbcr0 &= ~DBCR0_IAC1;
+		dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
+		do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
+			     1);
+		changed |= 0x01;
+	}  else if (debug_status & DBSR_IAC2) {
+		current->thread.dbcr0 &= ~DBCR0_IAC2;
+		do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
+			     2);
+		changed |= 0x01;
+	}  else if (debug_status & DBSR_IAC3) {
+		current->thread.dbcr0 &= ~DBCR0_IAC3;
+		dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
+		do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
+			     3);
+		changed |= 0x01;
+	}  else if (debug_status & DBSR_IAC4) {
+		current->thread.dbcr0 &= ~DBCR0_IAC4;
+		do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
+			     4);
+		changed |= 0x01;
+	}
+	/*
+	 * At the point this routine was called, the MSR(DE) was turned off.
+	 * Check all other debug flags and see if that bit needs to be turned
+	 * back on or not.
+	 */
+	if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
+		regs->msr |= MSR_DE;
+	else
+		/* Make sure the IDM flag is off */
+		current->thread.dbcr0 &= ~DBCR0_IDM;
+
+	if (changed & 0x01)
+		mtspr(SPRN_DBCR0, current->thread.dbcr0);
+}
 
 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
 {
+	current->thread.dbsr = debug_status;
+
 	/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
 	 * on server, it stops on the target of the branch. In order to simulate
 	 * the server behaviour, we thus restart right away with a single step
@@ -1071,29 +1139,23 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
 		if (debugger_sstep(regs))
 			return;
 
-		if (user_mode(regs))
-			current->thread.dbcr0 &= ~(DBCR0_IC);
-
-		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
-	} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
-		regs->msr &= ~MSR_DE;
-
 		if (user_mode(regs)) {
-			current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W |
-								DBCR0_IDM);
-		} else {
-			/* Disable DAC interupts */
-			mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R |
-						DBSR_DAC1W | DBCR0_IDM));
-
-			/* Clear the DAC event */
-			mtspr(SPRN_DBSR, (DBSR_DAC1R | DBSR_DAC1W));
+			current->thread.dbcr0 &= ~DBCR0_IC;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+			if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
+					       current->thread.dbcr1))
+				regs->msr |= MSR_DE;
+			else
+				/* Make sure the IDM bit is off */
+				current->thread.dbcr0 &= ~DBCR0_IDM;
+#endif
 		}
-		/* Setup and send the trap to the handler */
-		do_dabr(regs, mfspr(SPRN_DAC1), debug_status);
-	}
+
+		_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+	} else
+		handle_debug(regs, debug_status);
 }
-#endif /* CONFIG_4xx || CONFIG_BOOKE */
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
 #if !defined(CONFIG_TAU_INT)
 void TAUException(struct pt_regs *regs)