summary refs log tree commit diff
path: root/arch/sh/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/smp.c')
-rw-r--r--arch/sh/kernel/smp.c307
1 files changed, 232 insertions, 75 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 283e1425ced5..94075e1a1e61 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,68 +3,40 @@
  *
  * SMP support for the SuperH processors.
  *
- * Copyright (C) 2002, 2003 Paul Mundt
+ * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2006 - 2007 Akio Idehara
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
-
 #include <linux/err.h>
 #include <linux/cache.h>
 #include <linux/cpumask.h>
 #include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
 #include <linux/spinlock.h>
-#include <linux/threads.h>
+#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
+#include <linux/interrupt.h>
 #include <asm/atomic.h>
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 #include <asm/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
 
-/*
- * This was written with the Sega Saturn (SMP SH-2 7604) in mind,
- * but is designed to be usable regardless if there's an MMU
- * present or not.
- */
-struct sh_cpuinfo cpu_data[NR_CPUS];
-
-extern void per_cpu_trap_init(void);
+int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
+int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
 
 cpumask_t cpu_online_map;
 EXPORT_SYMBOL(cpu_online_map);
-static atomic_t cpus_booted = ATOMIC_INIT(0);
 
-/* These are defined by the board-specific code. */
-
-/*
- * Cause the function described by call_data to be executed on the passed
- * cpu.  When the function has finished, increment the finished field of
- * call_data.
- */
-void __smp_send_ipi(unsigned int cpu, unsigned int action);
-
-/*
- * Find the number of available processors
- */
-unsigned int __smp_probe_cpus(void);
-
-/*
- * Start a particular processor
- */
-void __smp_slave_init(unsigned int cpu);
+static atomic_t cpus_booted = ATOMIC_INIT(0);
 
 /*
  * Run specified function on a particular processor.
@@ -73,74 +45,123 @@ void __smp_call_function(unsigned int cpu);
 
 static inline void __init smp_store_cpu_info(unsigned int cpu)
 {
-	cpu_data[cpu].loops_per_jiffy = loops_per_jiffy;
+	struct sh_cpuinfo *c = cpu_data + cpu;
+
+	c->loops_per_jiffy = loops_per_jiffy;
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 	unsigned int cpu = smp_processor_id();
-	int i;
 
-	atomic_set(&cpus_booted, 1);
-	smp_store_cpu_info(cpu);
-	
-	for (i = 0; i < __smp_probe_cpus(); i++)
-		cpu_set(i, cpu_possible_map);
+	init_new_context(current, &init_mm);
+	current_thread_info()->cpu = cpu;
+	plat_prepare_cpus(max_cpus);
+
+#ifndef CONFIG_HOTPLUG_CPU
+	cpu_present_map = cpu_possible_map;
+#endif
 }
 
 void __devinit smp_prepare_boot_cpu(void)
 {
 	unsigned int cpu = smp_processor_id();
 
+	__cpu_number_map[0] = cpu;
+	__cpu_logical_map[0] = cpu;
+
 	cpu_set(cpu, cpu_online_map);
 	cpu_set(cpu, cpu_possible_map);
 }
 
-int __cpu_up(unsigned int cpu)
+asmlinkage void __cpuinit start_secondary(void)
 {
-	struct task_struct *tsk;
+	unsigned int cpu;
+	struct mm_struct *mm = &init_mm;
 
-	tsk = fork_idle(cpu);
+	atomic_inc(&mm->mm_count);
+	atomic_inc(&mm->mm_users);
+	current->active_mm = mm;
+	BUG_ON(current->mm);
+	enter_lazy_tlb(mm, current);
 
-	if (IS_ERR(tsk))
-		panic("Failed forking idle task for cpu %d\n", cpu);
-	
-	task_thread_info(tsk)->cpu = cpu;
+	per_cpu_trap_init();
+
+	preempt_disable();
+
+	local_irq_enable();
+
+	calibrate_delay();
+
+	cpu = smp_processor_id();
+	smp_store_cpu_info(cpu);
 
 	cpu_set(cpu, cpu_online_map);
 
-	return 0;
+	cpu_idle();
 }
 
-int start_secondary(void *unused)
+extern struct {
+	unsigned long sp;
+	unsigned long bss_start;
+	unsigned long bss_end;
+	void *start_kernel_fn;
+	void *cpu_init_fn;
+	void *thread_info;
+} stack_start;
+
+int __cpuinit __cpu_up(unsigned int cpu)
 {
-	unsigned int cpu;
+	struct task_struct *tsk;
+	unsigned long timeout;
 
-	cpu = smp_processor_id();
+	tsk = fork_idle(cpu);
+	if (IS_ERR(tsk)) {
+		printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
+		return PTR_ERR(tsk);
+	}
 
-	atomic_inc(&init_mm.mm_count);
-	current->active_mm = &init_mm;
+	/* Fill in data in head.S for secondary cpus */
+	stack_start.sp = tsk->thread.sp;
+	stack_start.thread_info = tsk->stack;
+	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
+	stack_start.start_kernel_fn = start_secondary;
 
-	smp_store_cpu_info(cpu);
+	flush_cache_all();
 
-	__smp_slave_init(cpu);
-	preempt_disable();
-	per_cpu_trap_init();
-	
-	atomic_inc(&cpus_booted);
+	plat_start_cpu(cpu, (unsigned long)_stext);
 
-	cpu_idle();
-	return 0;
+	timeout = jiffies + HZ;
+	while (time_before(jiffies, timeout)) {
+		if (cpu_online(cpu))
+			break;
+
+		udelay(10);
+	}
+
+	if (cpu_online(cpu))
+		return 0;
+
+	return -ENOENT;
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-	smp_mb();
+	unsigned long bogosum = 0;
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		bogosum += cpu_data[cpu].loops_per_jiffy;
+
+	printk(KERN_INFO "SMP: Total of %d processors activated "
+	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
+	       bogosum / (500000/HZ),
+	       (bogosum / (5000/HZ)) % 100);
 }
 
 void smp_send_reschedule(int cpu)
 {
-	__smp_send_ipi(cpu, SMP_MSG_RESCHEDULE);
+	plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
 }
 
 static void stop_this_cpu(void *unused)
@@ -157,7 +178,6 @@ void smp_send_stop(void)
 	smp_call_function(stop_this_cpu, 0, 1, 0);
 }
 
-
 struct smp_fn_call_struct smp_fn_call = {
 	.lock		= SPIN_LOCK_UNLOCKED,
 	.finished	= ATOMIC_INIT(0),
@@ -175,9 +195,6 @@ int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
 	unsigned int nr_cpus = atomic_read(&cpus_booted);
 	int i;
 
-	if (nr_cpus < 2)
-		return 0;
-
 	/* Can deadlock when called with interrupts disabled */
 	WARN_ON(irqs_disabled());
 
@@ -189,7 +206,7 @@ int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
 
 	for (i = 0; i < nr_cpus; i++)
 		if (i != smp_processor_id())
-			__smp_call_function(i);
+			plat_send_ipi(i, SMP_MSG_FUNCTION);
 
 	if (wait)
 		while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
@@ -205,3 +222,143 @@ int setup_profiling_timer(unsigned int multiplier)
 	return 0;
 }
 
+static void flush_tlb_all_ipi(void *info)
+{
+	local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+	on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+	local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, intercpu interrupts have to be sent.
+ * Another case where intercpu interrupts are required is when the target
+ * mm might be active on another cpu (eg debuggers doing the flushes on
+ * behalf of debugees, kswapd stealing pages from another process etc).
+ * Kanoj 07/00.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	preempt_disable();
+
+	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+	} else {
+		int i;
+		for (i = 0; i < num_online_cpus(); i++)
+			if (smp_processor_id() != i)
+				cpu_context(i, mm) = 0;
+	}
+	local_flush_tlb_mm(mm);
+
+	preempt_enable();
+}
+
+struct flush_tlb_data {
+	struct vm_area_struct *vma;
+	unsigned long addr1;
+	unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+		     unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	preempt_disable();
+	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+		struct flush_tlb_data fd;
+
+		fd.vma = vma;
+		fd.addr1 = start;
+		fd.addr2 = end;
+		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+	} else {
+		int i;
+		for (i = 0; i < num_online_cpus(); i++)
+			if (smp_processor_id() != i)
+				cpu_context(i, mm) = 0;
+	}
+	local_flush_tlb_range(vma, start, end);
+	preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	struct flush_tlb_data fd;
+
+	fd.addr1 = start;
+	fd.addr2 = end;
+	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+
+	local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	preempt_disable();
+	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+	    (current->mm != vma->vm_mm)) {
+		struct flush_tlb_data fd;
+
+		fd.vma = vma;
+		fd.addr1 = page;
+		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+	} else {
+		int i;
+		for (i = 0; i < num_online_cpus(); i++)
+			if (smp_processor_id() != i)
+				cpu_context(i, vma->vm_mm) = 0;
+	}
+	local_flush_tlb_page(vma, page);
+	preempt_enable();
+}
+
+static void flush_tlb_one_ipi(void *info)
+{
+	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+	local_flush_tlb_one(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_one(unsigned long asid, unsigned long vaddr)
+{
+	struct flush_tlb_data fd;
+
+	fd.addr1 = asid;
+	fd.addr2 = vaddr;
+
+	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
+	local_flush_tlb_one(asid, vaddr);
+}