summary refs log tree commit diff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-24 09:55:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-24 09:55:18 -0700
commitff0c4ad2c3a75ccfe6adca916e50804eb45bb2d9 (patch)
tree574f2d38793f7a08af73c40a96d6dc76af9c6f46
parentfcda12e7f6d58d61997681a9d41779e3fd2ffc94 (diff)
parent19f9d392365113f74286b1721c7c032c12cf5abd (diff)
downloadlinux-ff0c4ad2c3a75ccfe6adca916e50804eb45bb2d9.tar.gz
Merge branch 'for-upstream' of git://openrisc.net/jonas/linux
* 'for-upstream' of git://openrisc.net/jonas/linux: (24 commits)
  OpenRISC: Add MAINTAINERS entry
  OpenRISC: Miscellaneous
  OpenRISC: Library routines
  OpenRISC: Headers
  OpenRISC: Traps
  OpenRISC: Module support
  OpenRISC: GPIO
  OpenRISC: Scheduling/Process management
  OpenRISC: Idle/Power management
  OpenRISC: System calls
  OpenRISC: IRQ
  OpenRISC: Timekeeping
  OpenRISC: DMA
  OpenRISC: PTrace
  OpenRISC: Build infrastructure
  OpenRISC: Signal handling
  OpenRISC: Memory management
  OpenRISC: Device tree
  OpenRISC: Boot code
  iomap: make IOPORT/PCI mapping functions conditional
  ...
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/avr32/include/asm/delay.h27
-rw-r--r--arch/m32r/include/asm/delay.h27
-rw-r--r--arch/openrisc/Kconfig207
-rw-r--r--arch/openrisc/Makefile55
-rw-r--r--arch/openrisc/README.openrisc99
-rw-r--r--arch/openrisc/TODO.openrisc16
-rw-r--r--arch/openrisc/boot/Makefile15
-rw-r--r--arch/openrisc/boot/dts/or1ksim.dts50
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig65
-rw-r--r--arch/openrisc/include/asm/Kbuild64
-rw-r--r--arch/openrisc/include/asm/asm-offsets.h1
-rw-r--r--arch/openrisc/include/asm/bitops.h59
-rw-r--r--arch/openrisc/include/asm/bitops/__ffs.h33
-rw-r--r--arch/openrisc/include/asm/bitops/__fls.h33
-rw-r--r--arch/openrisc/include/asm/bitops/ffs.h32
-rw-r--r--arch/openrisc/include/asm/bitops/fls.h33
-rw-r--r--arch/openrisc/include/asm/byteorder.h1
-rw-r--r--arch/openrisc/include/asm/cache.h29
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h34
-rw-r--r--arch/openrisc/include/asm/delay.h24
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h134
-rw-r--r--arch/openrisc/include/asm/elf.h108
-rw-r--r--arch/openrisc/include/asm/fixmap.h87
-rw-r--r--arch/openrisc/include/asm/gpio.h65
-rw-r--r--arch/openrisc/include/asm/io.h51
-rw-r--r--arch/openrisc/include/asm/irq.h27
-rw-r--r--arch/openrisc/include/asm/irqflags.h29
-rw-r--r--arch/openrisc/include/asm/linkage.h25
-rw-r--r--arch/openrisc/include/asm/memblock.h24
-rw-r--r--arch/openrisc/include/asm/mmu.h26
-rw-r--r--arch/openrisc/include/asm/mmu_context.h43
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/openrisc/include/asm/page.h110
-rw-r--r--arch/openrisc/include/asm/param.h26
-rw-r--r--arch/openrisc/include/asm/pgalloc.h102
-rw-r--r--arch/openrisc/include/asm/pgtable.h463
-rw-r--r--arch/openrisc/include/asm/processor.h113
-rw-r--r--arch/openrisc/include/asm/prom.h77
-rw-r--r--arch/openrisc/include/asm/ptrace.h131
-rw-r--r--arch/openrisc/include/asm/serial.h36
-rw-r--r--arch/openrisc/include/asm/sigcontext.h38
-rw-r--r--arch/openrisc/include/asm/spinlock.h24
-rw-r--r--arch/openrisc/include/asm/spr.h42
-rw-r--r--arch/openrisc/include/asm/spr_defs.h604
-rw-r--r--arch/openrisc/include/asm/syscall.h77
-rw-r--r--arch/openrisc/include/asm/syscalls.h27
-rw-r--r--arch/openrisc/include/asm/system.h35
-rw-r--r--arch/openrisc/include/asm/thread_info.h134
-rw-r--r--arch/openrisc/include/asm/timex.h36
-rw-r--r--arch/openrisc/include/asm/tlb.h34
-rw-r--r--arch/openrisc/include/asm/tlbflush.h55
-rw-r--r--arch/openrisc/include/asm/uaccess.h355
-rw-r--r--arch/openrisc/include/asm/unaligned.h51
-rw-r--r--arch/openrisc/include/asm/unistd.h31
-rw-r--r--arch/openrisc/kernel/Makefile14
-rw-r--r--arch/openrisc/kernel/asm-offsets.c70
-rw-r--r--arch/openrisc/kernel/dma.c191
-rw-r--r--arch/openrisc/kernel/entry.S1128
-rw-r--r--arch/openrisc/kernel/head.S1607
-rw-r--r--arch/openrisc/kernel/idle.c77
-rw-r--r--arch/openrisc/kernel/init_task.c41
-rw-r--r--arch/openrisc/kernel/irq.c172
-rw-r--r--arch/openrisc/kernel/module.c72
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c46
-rw-r--r--arch/openrisc/kernel/process.c311
-rw-r--r--arch/openrisc/kernel/prom.c108
-rw-r--r--arch/openrisc/kernel/ptrace.c211
-rw-r--r--arch/openrisc/kernel/setup.c381
-rw-r--r--arch/openrisc/kernel/signal.c396
-rw-r--r--arch/openrisc/kernel/sys_call_table.c28
-rw-r--r--arch/openrisc/kernel/sys_or32.c57
-rw-r--r--arch/openrisc/kernel/time.c181
-rw-r--r--arch/openrisc/kernel/traps.c366
-rw-r--r--arch/openrisc/kernel/vmlinux.h12
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S115
-rw-r--r--arch/openrisc/lib/Makefile5
-rw-r--r--arch/openrisc/lib/delay.c60
-rw-r--r--arch/openrisc/lib/string.S204
-rw-r--r--arch/openrisc/mm/Makefile5
-rw-r--r--arch/openrisc/mm/fault.c338
-rw-r--r--arch/openrisc/mm/init.c283
-rw-r--r--arch/openrisc/mm/ioremap.c137
-rw-r--r--arch/openrisc/mm/tlb.c193
-rw-r--r--arch/sh/include/asm/delay.h27
-rw-r--r--arch/x86/include/asm/delay.h25
-rw-r--r--include/asm-generic/delay.h37
-rw-r--r--include/asm-generic/io.h9
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--lib/iomap.c4
90 files changed, 10970 insertions, 104 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 63524a09ca86..43392c9ef4c3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4688,6 +4688,14 @@ F:	drivers/of
 F:	include/linux/of*.h
 K:	of_get_property
 
+OPENRISC ARCHITECTURE
+M:	Jonas Bonn <jonas@southpole.se>
+W:	http://openrisc.net
+L:	linux@lists.openrisc.net
+S:	Maintained
+T:	git git://openrisc.net/~jonas/linux
+F:	arch/openrisc
+
 OPL4 DRIVER
 M:	Clemens Ladisch <clemens@ladisch.de>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
index a0ed9a9839a5..9670e127b7b2 100644
--- a/arch/avr32/include/asm/delay.h
+++ b/arch/avr32/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_AVR32_DELAY_H
-#define __ASM_AVR32_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/avr32/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
-	__udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
-	__ndelay(n))
-
-#endif /* __ASM_AVR32_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/m32r/include/asm/delay.h b/arch/m32r/include/asm/delay.h
index 9dd9e999ea69..9670e127b7b2 100644
--- a/arch/m32r/include/asm/delay.h
+++ b/arch/m32r/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef _ASM_M32R_DELAY_H
-#define _ASM_M32R_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/m32r/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
-	__udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
-	__ndelay(n))
-
-#endif /* _ASM_M32R_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
new file mode 100644
index 000000000000..4558bafbd1a2
--- /dev/null
+++ b/arch/openrisc/Kconfig
@@ -0,0 +1,207 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+#
+
+config OPENRISC
+	def_bool y
+	select OF
+	select OF_EARLY_FLATTREE
+	select HAVE_MEMBLOCK
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+        select HAVE_ARCH_TRACEHOOK
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_CHIP
+	select GENERIC_IRQ_PROBE
+	select GENERIC_IRQ_SHOW
+	select GENERIC_IOMAP
+
+config MMU
+	def_bool y
+
+config WISHBONE_BUS_BIG_ENDIAN
+	def_bool y
+
+config SYMBOL_PREFIX
+        string
+        default ""
+
+config HAVE_DMA_ATTRS
+	def_bool y
+
+config UID16
+	def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+	def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+	def_bool n
+
+config GENERIC_HWEIGHT
+	def_bool y
+
+config GENERIC_IOMAP
+	def_bool y
+
+config NO_IOPORT
+	def_bool y
+
+config GENERIC_GPIO
+	def_bool y
+
+config GENERIC_CLOCKEVENTS
+	def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+        def_bool y
+
+# For now, use generic checksum functions
+#These can be reimplemented in assembly later if so inclined
+config GENERIC_CSUM
+        def_bool y
+
+config GENERIC_FIND_NEXT_BIT
+	def_bool y
+
+source "init/Kconfig"
+
+
+menu "Processor type and features"
+
+choice
+	prompt "Subarchitecture"
+	default OR1K_1200
+
+config OR1K_1200
+	bool "OR1200"
+	help
+	  Generic OpenRISC 1200 architecture
+
+endchoice
+
+config OPENRISC_BUILTIN_DTB
+        string "Builtin DTB"
+        default ""
+
+menu "Class II Instructions"
+
+config OPENRISC_HAVE_INST_FF1
+	bool "Have instruction l.ff1"
+	default y
+	help
+	  Select this if your implementation has the Class II instruction l.ff1
+
+config OPENRISC_HAVE_INST_FL1
+	bool "Have instruction l.fl1"
+	default y
+	help
+	  Select this if your implementation has the Class II instruction l.fl1
+
+config OPENRISC_HAVE_INST_MUL
+	bool "Have instruction l.mul for hardware multiply"
+	default y
+	help
+	  Select this if your implementation has a hardware multiply instruction
+
+config OPENRISC_HAVE_INST_DIV
+	bool "Have instruction l.div for hardware divide"
+	default y
+	help
+	  Select this if your implementation has a hardware divide instruction
+endmenu
+
+
+source "kernel/time/Kconfig"
+source kernel/Kconfig.hz
+source kernel/Kconfig.preempt
+source "mm/Kconfig"
+
+config OPENRISC_NO_SPR_SR_DSX
+	bool "use SPR_SR_DSX software emulation" if OR1K_1200
+	default y
+	help
+	  SPR_SR_DSX bit is status register bit indicating whether
+	  the last exception has happened in delay slot.
+
+	  OpenRISC architecture makes it optional to have it implemented
+	  in hardware and the OR1200 does not have it.
+
+	  Say N here if you know that your OpenRISC processor has
+	  SPR_SR_DSX bit implemented. Say Y if you are unsure.
+
+config CMDLINE
+        string "Default kernel command string"
+        default ""
+        help
+          On some architectures there is currently no way for the boot loader
+          to pass arguments to the kernel. For these architectures, you should
+          supply some command-line options at build time by entering them
+          here.
+
+menu "Debugging options"
+
+config DEBUG_STACKOVERFLOW
+	bool "Check for kernel stack overflow"
+	default y
+	help
+	  Make extra checks for space avaliable on stack in some
+          critical functions. This will cause kernel to run a bit slower,
+	  but will catch most of kernel stack overruns and exit gracefuly.
+
+	  Say Y if you are unsure.
+
+config JUMP_UPON_UNHANDLED_EXCEPTION
+	bool "Try to die gracefully"
+	default y
+	help
+	  Now this puts kernel into infinite loop after first oops. Till
+	  your kernel crashes this doesn't have any influence.
+
+	  Say Y if you are unsure.
+
+config OPENRISC_EXCEPTION_DEBUG
+	bool "Print processor state at each exception"
+	default n
+	help
+	  This option will make your kernel unusable for all but kernel
+	  debugging.
+
+	  Say N if you are unsure.
+
+config OPENRISC_ESR_EXCEPTION_BUG_CHECK
+	bool "Check for possible ESR exception bug"
+	default n
+	help
+	  This option enables some checks that might expose some problems
+          in kernel.
+
+	  Say N if you are unsure.
+
+endmenu
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
new file mode 100644
index 000000000000..158ae4c0dc6c
--- /dev/null
+++ b/arch/openrisc/Makefile
@@ -0,0 +1,55 @@
+# BK Id: %F% %I% %G% %U% %#%
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Modifications for the OpenRISC architecture:
+# Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+# Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+#
+# Based on:
+# arch/i386/Makefile
+
+KBUILD_DEFCONFIG := or1ksim_defconfig
+
+LDFLAGS         :=
+OBJCOPYFLAGS    := -O binary -R .note -R .comment -S
+LDFLAGS_vmlinux :=
+LIBGCC 		:= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+KBUILD_CFLAGS	+= -pipe -ffixed-r10
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
+	KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
+else
+	KBUILD_CFLAGS += $(call cc-option,-msoft-mul)
+endif
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_DIV),y)
+	KBUILD_CFLAGS += $(call cc-option,-mhard-div)
+else
+	KBUILD_CFLAGS += $(call cc-option,-msoft-div)
+endif
+
+head-y 		:= arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o
+
+core-y		+= arch/openrisc/lib/ \
+		   arch/openrisc/kernel/ \
+		   arch/openrisc/mm/
+libs-y		+= $(LIBGCC)
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := y
+else
+BUILTIN_DTB := n
+endif
+core-$(BUILTIN_DTB) += arch/openrisc/boot/
+
+all: vmlinux
diff --git a/arch/openrisc/README.openrisc b/arch/openrisc/README.openrisc
new file mode 100644
index 000000000000..c9f7edf2b9a2
--- /dev/null
+++ b/arch/openrisc/README.openrisc
@@ -0,0 +1,99 @@
+OpenRISC Linux
+==============
+
+This is a port of Linux to the OpenRISC class of microprocessors; the initial
+target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
+
+For information about OpenRISC processors and ongoing development:
+
+	website		http://openrisc.net
+
+For more information about Linux on OpenRISC, please contact South Pole AB.
+
+	email:		info@southpole.se
+
+	website:	http://southpole.se
+			http://southpoleconsulting.com
+
+---------------------------------------------------------------------
+
+Build instructions for OpenRISC toolchain and Linux
+===================================================
+
+In order to build and run Linux for OpenRISC, you'll need at least a basic
+toolchain and, perhaps, the architectural simulator.  Steps to get these bits
+in place are outlined here.
+
+1)  The toolchain can be obtained from openrisc.net.  Instructions for building
+a toolchain can be found at:
+
+http://openrisc.net/toolchain-build.html
+
+2) or1ksim (optional)
+
+or1ksim is the architectural simulator which will allow you to actually run
+your OpenRISC Linux kernel if you don't have an OpenRISC processor at hand.
+
+	git clone git://openrisc.net/jonas/or1ksim-svn
+
+	cd or1ksim
+	./configure --prefix=$OPENRISC_PREFIX
+	make
+	make install
+
+3)  Linux kernel
+
+Build the kernel as usual
+
+	make ARCH=openrisc defconfig
+	make ARCH=openrisc
+
+4)  Run in architectural simulator
+
+Grab the or1ksim platform configuration file (from the or1ksim source) and
+together with your freshly built vmlinux, run your kernel with the following
+incantation:
+
+	sim -f arch/openrisc/or1ksim.cfg vmlinux
+
+---------------------------------------------------------------------
+
+Terminology
+===========
+
+In the code, the following particles are used on symbols to limit the scope
+to more or less specific processor implementations:
+
+openrisc: the OpenRISC class of processors
+or1k:     the OpenRISC 1000 family of processors
+or1200:   the OpenRISC 1200 processor
+
+---------------------------------------------------------------------
+
+History
+========
+
+18. 11. 2003	Matjaz Breskvar (phoenix@bsemi.com)
+	initial port of linux to OpenRISC/or32 architecture.
+        all the core stuff is implemented and seams usable.
+
+08. 12. 2003	Matjaz Breskvar (phoenix@bsemi.com)
+	complete change of TLB miss handling.
+	rewrite of exceptions handling.
+	fully functional sash-3.6 in default initrd.
+	a much improved version with changes all around.
+
+10. 04. 2004	Matjaz Breskvar (phoenix@bsemi.com)
+	alot of bugfixes all over.
+	ethernet support, functional http and telnet servers.
+	running many standard linux apps.
+
+26. 06. 2004	Matjaz Breskvar (phoenix@bsemi.com)
+	port to 2.6.x
+
+30. 11. 2004	Matjaz Breskvar (phoenix@bsemi.com)
+	lots of bugfixes and enhancments.
+	added opencores framebuffer driver.
+
+09. 10. 2010    Jonas Bonn (jonas@southpole.se)
+	major rewrite to bring up to par with upstream Linux 2.6.36
diff --git a/arch/openrisc/TODO.openrisc b/arch/openrisc/TODO.openrisc
new file mode 100644
index 000000000000..acfeef9c58e3
--- /dev/null
+++ b/arch/openrisc/TODO.openrisc
@@ -0,0 +1,16 @@
+The OpenRISC Linux port is fully functional and has been tracking upstream
+since 2.6.35.  There are, however, remaining items to be completed within
+the coming months.  Here's a list of known-to-be-less-than-stellar items
+that are due for investigation shortly, i.e. our TODO list:
+
+-- Implement the rest of the DMA API... dma_map_sg, etc.
+
+-- Consolidate usage of memblock and bootmem... move everything over to
+   memblock.
+
+-- Finish the renaming cleanup... there are references to or32 in the code
+   which was an older name for the architecture.  The name we've settled on is
+   or1k and this change is slowly trickling through the stack.  For the time
+   being, or32 is equivalent to or1k.
+
+-- Implement optimized version of memcpy and memset
diff --git a/arch/openrisc/boot/Makefile b/arch/openrisc/boot/Makefile
new file mode 100644
index 000000000000..98ca185097a5
--- /dev/null
+++ b/arch/openrisc/boot/Makefile
@@ -0,0 +1,15 @@
+
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o
+else
+BUILTIN_DTB :=
+endif
+obj-y += $(BUILTIN_DTB)
+
+clean-files := *.dtb.S
+
+#DTC_FLAGS ?= -p 1024
+
+$(obj)/%.dtb: $(src)/dts/%.dts
+	$(call cmd,dtc)
diff --git a/arch/openrisc/boot/dts/or1ksim.dts b/arch/openrisc/boot/dts/or1ksim.dts
new file mode 100644
index 000000000000..5d4f9027afaf
--- /dev/null
+++ b/arch/openrisc/boot/dts/or1ksim.dts
@@ -0,0 +1,50 @@
+/dts-v1/;
+/ {
+	compatible = "opencores,or1ksim";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&pic>;
+
+	chosen {
+		bootargs = "console=uart,mmio,0x90000000,115200";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x02000000>;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			compatible = "opencores,or1200-rtlsvn481";
+			reg = <0>;
+			clock-frequency = <20000000>;
+		};
+	};
+
+	/*
+	 * OR1K PIC is built into CPU and accessed via special purpose
+	 * registers.  It is not addressable and, hence, has no 'reg'
+	 * property.
+	 */
+	pic: pic {
+		compatible = "opencores,or1k-pic";
+		#interrupt-cells = <1>;
+		interrupt-controller;
+	};
+
+	serial0: serial@90000000 {
+		compatible = "opencores,uart16550-rtlsvn105", "ns16550a";
+		reg = <0x90000000 0x100>;
+		interrupts = <2>;
+		clock-frequency = <20000000>;
+	};
+
+	enet0: ethoc@92000000 {
+		compatible = "opencores,ethmac-rtlsvn338";
+		reg = <0x92000000 0x100>;
+		interrupts = <4>;
+	};
+};
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
new file mode 100644
index 000000000000..ea172bdfa36a
--- /dev/null
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -0,0 +1,65 @@
+CONFIG_CROSS_COMPILE="or32-linux-"
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_EXPERT=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_EPOLL is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+CONFIG_MODULES=y
+# CONFIG_BLOCK is not set
+CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
+CONFIG_NO_HZ=y
+CONFIG_HZ_100=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_NETDEVICES=y
+CONFIG_MICREL_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_ETHOC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
new file mode 100644
index 000000000000..11162e6c878f
--- /dev/null
+++ b/arch/openrisc/include/asm/Kbuild
@@ -0,0 +1,64 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += spr_defs.h
+
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cacheflush.h
+generic-y += checksum.h
+generic-y += cmpxchg.h
+generic-y += cmpxchg-local.h
+generic-y += cpumask.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += rmap.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += stat.h
+generic-y += string.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
diff --git a/arch/openrisc/include/asm/asm-offsets.h b/arch/openrisc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/openrisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
new file mode 100644
index 000000000000..a9e11efae14d
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops.h
@@ -0,0 +1,59 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_BITOPS_H
+#define __ASM_OPENRISC_BITOPS_H
+
+/*
+ * Where we haven't written assembly versions yet, we fall back to the
+ * generic implementations.  Otherwise, we pull in our (hopefully)
+ * optimized versions.
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+
+/*
+ * clear_bit may not imply a memory barrier
+ */
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit()	smp_mb()
+#define smp_mb__after_clear_bit()	smp_mb()
+#endif
+
+#include <asm/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm/bitops/fls.h>
+#include <asm/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h
new file mode 100644
index 000000000000..6c8368a34059
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__ffs.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FFS_H
+#define __ASM_OPENRISC___FFS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline unsigned long __ffs(unsigned long x)
+{
+	int ret;
+
+	__asm__ ("l.ff1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h
new file mode 100644
index 000000000000..c4ecdb4c523b
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FLS_H
+#define __ASM_OPENRISC___FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline unsigned long __fls(unsigned long x)
+{
+	int ret;
+
+	__asm__ ("l.fl1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FLS_H */
diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h
new file mode 100644
index 000000000000..9de46246ebc7
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/ffs.h
@@ -0,0 +1,32 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FFS_H
+#define __ASM_OPENRISC_FFS_H
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline int ffs(int x)
+{
+	int ret;
+
+	__asm__ ("l.ff1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h
new file mode 100644
index 000000000000..9efbf9ad86c4
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FLS_H
+#define __ASM_OPENRISC_FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline int fls(int x)
+{
+	int ret;
+
+	__asm__ ("l.fl1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret;
+}
+
+#else
+#include <asm-generic/bitops/fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FLS_H */
diff --git a/arch/openrisc/include/asm/byteorder.h b/arch/openrisc/include/asm/byteorder.h
new file mode 100644
index 000000000000..60d14f7e14e2
--- /dev/null
+++ b/arch/openrisc/include/asm/byteorder.h
@@ -0,0 +1 @@
+#include <linux/byteorder/big_endian.h>
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
new file mode 100644
index 000000000000..4ce7a01a252d
--- /dev/null
+++ b/arch/openrisc/include/asm/cache.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CACHE_H
+#define __ASM_OPENRISC_CACHE_H
+
+/* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+#define L1_CACHE_BYTES 16
+#define L1_CACHE_SHIFT 4
+
+#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
new file mode 100644
index 000000000000..917318b6a970
--- /dev/null
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CPUINFO_H
+#define __ASM_OPENRISC_CPUINFO_H
+
+struct cpuinfo {
+	u32 clock_frequency;
+
+	u32 icache_size;
+	u32 icache_block_size;
+
+	u32 dcache_size;
+	u32 dcache_block_size;
+};
+
+extern struct cpuinfo cpuinfo;
+
+#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/delay.h b/arch/openrisc/include/asm/delay.h
new file mode 100644
index 000000000000..17f8bf5a5ac2
--- /dev/null
+++ b/arch/openrisc/include/asm/delay.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DELAY_H
+#define __ASM_OPENRISC_DELAY_H
+
+#include <asm-generic/delay.h>
+
+extern unsigned long loops_per_jiffy;
+
+#endif
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..052f877b52a5
--- /dev/null
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DMA_MAPPING_H
+#define __ASM_OPENRISC_DMA_MAPPING_H
+
+/*
+ * See Documentation/PCI/PCI-DMA-mapping.txt and
+ * Documentation/DMA-API.txt for documentation.
+ *
+ * This file is written with the intention of eventually moving over
+ * to largely using asm-generic/dma-mapping-common.h in its place.
+ */
+
+#include <linux/dma-debug.h>
+#include <asm-generic/dma-coherent.h>
+#include <linux/kmemcheck.h>
+
+#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
+
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+			      dma_addr_t *dma_handle, gfp_t flag);
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+			    dma_addr_t dma_handle);
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+			 unsigned long offset, size_t size,
+			 enum dma_data_direction dir,
+			 struct dma_attrs *attrs);
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		     size_t size, enum dma_data_direction dir,
+		     struct dma_attrs *attrs);
+void or1k_sync_single_for_cpu(struct device *dev,
+			      dma_addr_t dma_handle, size_t size,
+			      enum dma_data_direction dir);
+void or1k_sync_single_for_device(struct device *dev,
+			         dma_addr_t dma_handle, size_t size,
+			         enum dma_data_direction dir);
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+					dma_addr_t *dma_handle, gfp_t flag)
+{
+	void *memory;
+
+	memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
+
+	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+	return memory;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+	or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+					size_t size,
+					enum dma_data_direction dir)
+{
+	dma_addr_t addr;
+
+	kmemcheck_mark_initialized(ptr, size);
+	BUG_ON(!valid_dma_direction(dir));
+	addr = or1k_map_page(dev, virt_to_page(ptr),
+			     (unsigned long)ptr & ~PAGE_MASK, size,
+			     dir, NULL);
+	debug_dma_map_page(dev, virt_to_page(ptr),
+			   (unsigned long)ptr & ~PAGE_MASK, size,
+			   dir, addr, true);
+	return addr;
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+					  size_t size,
+					  enum dma_data_direction dir)
+{
+	BUG_ON(!valid_dma_direction(dir));
+	or1k_unmap_page(dev, addr, size, dir, NULL);
+	debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+					   size_t size,
+					   enum dma_data_direction dir)
+{
+	BUG_ON(!valid_dma_direction(dir));
+	or1k_sync_single_for_cpu(dev, addr, size, dir);
+	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+					      dma_addr_t addr, size_t size,
+					      enum dma_data_direction dir)
+{
+	BUG_ON(!valid_dma_direction(dir));
+	or1k_sync_single_for_device(dev, addr, size, dir);
+	debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+	/* Support 32 bit DMA mask exclusively */
+	return dma_mask == 0xffffffffULL;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+#endif	/* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
new file mode 100644
index 000000000000..2ce603bbfdd3
--- /dev/null
+++ b/arch/openrisc/include/asm/elf.h
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_ELF_H
+#define __ASM_OPENRISC_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+
+/* The OR1K relocation types... not all relevant for module loader */
+#define R_OR32_NONE	0
+#define R_OR32_32	1
+#define R_OR32_16	2
+#define R_OR32_8	3
+#define R_OR32_CONST	4
+#define R_OR32_CONSTH	5
+#define R_OR32_JUMPTARG	6
+#define R_OR32_VTINHERIT 7
+#define R_OR32_VTENTRY	8
+
+typedef unsigned long elf_greg_t;
+
+/*
+ * Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is
+ * thus exposed to user-space.
+ */
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* A placeholder; OR32 does not have fp support yes, so no fp regs for now.  */
+typedef unsigned long elf_fpregset_t;
+
+/* This should be moved to include/linux/elf.h */
+#define EM_OR32         0x8472
+#define EM_OPENRISC     92     /* OpenRISC 32-bit embedded processor */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH	EM_OR32
+#define ELF_CLASS	ELFCLASS32
+#define ELF_DATA	ELFDATA2MSB
+
+#ifdef __KERNEL__
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_OR32) || ((x)->e_machine == EM_OPENRISC))
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (0x08000000)
+
+/*
+ * Enable dump using regset.
+ * This covers all of general/DSP/FPU regs.
+ */
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE	8192
+
+extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
+#define ELF_CORE_COPY_REGS(dest, regs) dump_elf_thread(dest, regs);
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this cpu supports.  This could be done in userspace,
+   but it's not easy, and we've already done it here.  */
+
+#define ELF_HWCAP	(0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM	(NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
new file mode 100644
index 000000000000..52733416c1f3
--- /dev/null
+++ b/arch/openrisc/include/asm/fixmap.h
@@ -0,0 +1,87 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FIXMAP_H
+#define __ASM_OPENRISC_FIXMAP_H
+
+/* Why exactly do we need 2 empty pages between the top of the fixed
+ * addresses and the top of virtual memory?  Something is using that
+ * memory space but not sure what right now... If you find it, leave
+ * a comment here.
+ */
+#define FIXADDR_TOP	((unsigned long) (-2*PAGE_SIZE))
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * On OpenRISC we use these special fixed_addresses for doing ioremap
+ * early in the boot process before memory initialization is complete.
+ * This is used, in particular, by the early serial console code.
+ *
+ * It's not really 'fixmap', per se, but fits loosely into the same
+ * paradigm.
+ */
+enum fixed_addresses {
+	/*
+	 * FIX_IOREMAP entries are useful for mapping physical address
+	 * space before ioremap() is useable, e.g. really early in boot
+	 * before kmalloc() is working.
+	 */
+#define FIX_N_IOREMAPS  32
+	FIX_IOREMAP_BEGIN,
+	FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
+	__end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE		(__end_of_fixed_addresses << PAGE_SHIFT)
+/* FIXADDR_BOTTOM might be a better name here... */
+#define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+	/*
+	 * this branch gets completely eliminated after inlining,
+	 * except when someone tries to use fixaddr indices in an
+	 * illegal way. (such as mixing up address types or using
+	 * out-of-range indices).
+	 *
+	 * If it doesn't get removed, the linker will complain
+	 * loudly with a reasonably clear error message..
+	 */
+	if (idx >= __end_of_fixed_addresses)
+		BUG();
+
+	return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+	return __virt_to_fix(vaddr);
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/gpio.h b/arch/openrisc/include/asm/gpio.h
new file mode 100644
index 000000000000..0b0d174f47cd
--- /dev/null
+++ b/arch/openrisc/include/asm/gpio.h
@@ -0,0 +1,65 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_GPIO_H
+#define __ASM_OPENRISC_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * OpenRISC (or1k) does not have on-chip GPIO's so there is not really
+ * any standardized implementation that makes sense here.  If passing
+ * through gpiolib becomes a bottleneck then it may make sense, on a
+ * case-by-case basis, to implement these inlined/rapid versions.
+ *
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+	return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+	__gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+	return __gpio_cansleep(gpio);
+}
+
+/*
+ * Not implemented, yet.
+ */
+static inline int gpio_to_irq(unsigned int gpio)
+{
+	return -ENOSYS;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+	return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* __ASM_OPENRISC_GPIO_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
new file mode 100644
index 000000000000..07f5299d6c28
--- /dev/null
+++ b/arch/openrisc/include/asm/io.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IO_H
+#define __ASM_OPENRISC_IO_H
+
+/*
+ * PCI: can we really do 0 here if we have no port IO?
+ */
+#define IO_SPACE_LIMIT		0
+
+/* OpenRISC has no port IO */
+#define HAVE_ARCH_PIO_SIZE	1
+#define PIO_RESERVED		0X0UL
+#define PIO_OFFSET		0
+#define PIO_MASK		0
+
+#include <asm-generic/io.h>
+
+extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
+				pgprot_t prot);
+
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+{
+	return __ioremap(offset, size, PAGE_KERNEL);
+}
+
+/* #define _PAGE_CI       0x002 */
+static inline void __iomem *ioremap_nocache(phys_addr_t offset,
+					     unsigned long size)
+{
+	return __ioremap(offset, size,
+			 __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI));
+}
+
+extern void iounmap(void *addr);
+#endif
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
new file mode 100644
index 000000000000..eb612b1865d2
--- /dev/null
+++ b/arch/openrisc/include/asm/irq.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IRQ_H__
+#define __ASM_OPENRISC_IRQ_H__
+
+#define	NR_IRQS		32
+#include <asm-generic/irq.h>
+
+#define NO_IRQ		(-1)
+
+#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/include/asm/irqflags.h b/arch/openrisc/include/asm/irqflags.h
new file mode 100644
index 000000000000..dc86c653d70b
--- /dev/null
+++ b/arch/openrisc/include/asm/irqflags.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ___ASM_OPENRISC_IRQFLAGS_H
+#define ___ASM_OPENRISC_IRQFLAGS_H
+
+#include <asm/spr_defs.h>
+
+#define ARCH_IRQ_DISABLED        0x00
+#define ARCH_IRQ_ENABLED         (SPR_SR_IEE|SPR_SR_TEE)
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ___ASM_OPENRISC_IRQFLAGS_H */
diff --git a/arch/openrisc/include/asm/linkage.h b/arch/openrisc/include/asm/linkage.h
new file mode 100644
index 000000000000..e2638752091a
--- /dev/null
+++ b/arch/openrisc/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_LINKAGE_H
+#define __ASM_OPENRISC_LINKAGE_H
+
+#define __ALIGN      .align 0
+#define __ALIGN_STR ".align 0"
+
+#endif /* __ASM_OPENRISC_LINKAGE_H */
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
new file mode 100644
index 000000000000..bbe5a1c788cb
--- /dev/null
+++ b/arch/openrisc/include/asm/memblock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MEMBLOCK_H
+#define __ASM_OPENRISC_MEMBLOCK_H
+
+/* empty */
+
+#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h
new file mode 100644
index 000000000000..d069bc2ddfa4
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_H
+#define __ASM_OPENRISC_MMU_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long mm_context_t;
+#endif
+
+#endif
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..e94b814d2e3c
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -0,0 +1,43 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_CONTEXT_H
+#define __ASM_OPENRISC_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+		      struct task_struct *tsk);
+
+#define deactivate_mm(tsk, mm)	do { } while (0)
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+
+/* current active pgd - this is similar to other processors pgd
+ * registers like cr3 on the i386
+ */
+
+extern volatile pgd_t *current_pgd;   /* defined in arch/openrisc/mm/fault.c */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
new file mode 100644
index 000000000000..b85a0cfa9fc9
--- /dev/null
+++ b/arch/openrisc/include/asm/mutex.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
new file mode 100644
index 000000000000..b041b344b229
--- /dev/null
+++ b/arch/openrisc/include/asm/page.h
@@ -0,0 +1,110 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PAGE_H
+#define __ASM_OPENRISC_PAGE_H
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT      13
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE       (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE       (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK       (~(PAGE_SIZE-1))
+
+#define PAGE_OFFSET	0xc0000000
+#define KERNELBASE	PAGE_OFFSET
+
+/* This is not necessarily the right place for this, but it's needed by
+ * drivers/of/fdt.c
+ */
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr)            __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)      free_page(addr)
+
+#define clear_page(page)	memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg)        clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+	unsigned long pte;
+} pte_t;
+typedef struct {
+	unsigned long pgd;
+} pgd_t;
+typedef struct {
+	unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x)	((x).pte)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x)	((pte_t) { (x) })
+#define __pgd(x)	((pgd_t) { (x) })
+#define __pgprot(x)	((pgprot_t) { (x) })
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+
+#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) \
+	(mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
+#define page_to_virt(page) \
+	((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+
+#define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+#define pfn_valid(pfn)          ((pfn) < max_mapnr)
+
+#define virt_addr_valid(kaddr)  (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+				((void *)(kaddr) < (void *)memory_end))
+
+#endif /* __ASSEMBLY__ */
+
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_OPENRISC_PAGE_H */
diff --git a/arch/openrisc/include/asm/param.h b/arch/openrisc/include/asm/param.h
new file mode 100644
index 000000000000..c39a336610e2
--- /dev/null
+++ b/arch/openrisc/include/asm/param.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PARAM_H
+#define __ASM_OPENRISC_PARAM_H
+
+#define EXEC_PAGESIZE	8192
+
+#include <asm-generic/param.h>
+
+#endif /* __ASM_OPENRISC_PARAM_H */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..05c39ecd2efd
--- /dev/null
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -0,0 +1,102 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PGALLOC_H
+#define __ASM_OPENRISC_PGALLOC_H
+
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+
+extern int mem_init_done;
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+	set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+				struct page *pte)
+{
+	set_pmd(pmd, __pmd(_KERNPG_TABLE +
+		     ((unsigned long)page_to_pfn(pte) <<
+		     (unsigned long) PAGE_SHIFT)));
+}
+
+/*
+ * Allocate and free page tables.
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+	if (ret) {
+		memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+		memcpy(ret + USER_PTRS_PER_PGD,
+		       swapper_pg_dir + USER_PTRS_PER_PGD,
+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+	}
+	return ret;
+}
+
+#if 0
+/* FIXME: This seems to be the preferred style, but we are using
+ * current_pgd (from mm->pgd) to load kernel pages so we need it
+ * initialized.  This needs to be looked into.
+ */
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+}
+#endif
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+					 unsigned long address)
+{
+	struct page *pte;
+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+	if (pte)
+		clear_page(page_address(pte));
+	return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+	__free_page(pte);
+}
+
+
+#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#define check_pgt_cache()          do { } while (0)
+
+#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
new file mode 100644
index 000000000000..043505d7f684
--- /dev/null
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -0,0 +1,463 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* or32 pgtable.h - macros and functions to manipulate page tables
+ *
+ * Based on:
+ * include/asm-cris/pgtable.h
+ */
+
+#ifndef __ASM_OPENRISC_PGTABLE_H
+#define __ASM_OPENRISC_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/mmu.h>
+#include <asm/fixmap.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * or32, we use that, but "fold" the mid level into the top-level page
+ * table. Since the MMU TLB is software loaded through an interrupt, it
+ * supports any page table structure, so we could have used a three-level
+ * setup, but for the amounts of memory we normally use, a two-level is
+ * probably more efficient.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the or32 page table tree.
+ */
+
+extern void paging_init(void);
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-2))
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically.
+ * pointers are 4 bytes so we can use the page size and
+ * divide it by 4 (shift by 2).
+ */
+#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-2))
+
+#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-2))
+
+/* calculate how many PGD entries a user-level program can use
+ * the first mappable virtual address is 0
+ * (TASK_SIZE is the maximum virtual address space)
+ */
+
+#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS      0
+
+/*
+ * Kernels own virtual memory area.
+ */
+
+/*
+ * The size and location of the vmalloc area are chosen so that modules
+ * placed in this area aren't more than a 28-bit signed offset from any
+ * kernel functions that they may need.  This greatly simplifies handling
+ * of the relocations for l.j and l.jal instructions as we don't need to
+ * introduce any trampolines for reaching "distant" code.
+ *
+ * 64 MB of vmalloc area is comparable to what's available on other arches.
+ */
+
+#define VMALLOC_START	(PAGE_OFFSET-0x04000000)
+#define VMALLOC_END	(PAGE_OFFSET)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+/* Define some higher level generic page attributes.
+ *
+ * If you change _PAGE_CI definition be sure to change it in
+ * io.h for ioremap_nocache() too.
+ */
+
+/*
+ * An OR32 PTE looks like this:
+ *
+ * |  31 ... 10 |  9  |  8 ... 6  |  5  |  4  |  3  |  2  |  1  |  0  |
+ *  Phys pg.num    L     PP Index    D     A    WOM   WBC   CI    CC
+ *
+ *  L  : link
+ *  PPI: Page protection index
+ *  D  : Dirty
+ *  A  : Accessed
+ *  WOM: Weakly ordered memory
+ *  WBC: Write-back cache
+ *  CI : Cache inhibit
+ *  CC : Cache coherent
+ *
+ * The protection bits below should correspond to the layout of the actual
+ * PTE as per above
+ */
+
+#define _PAGE_CC       0x001 /* software: pte contains a translation */
+#define _PAGE_CI       0x002 /* cache inhibit          */
+#define _PAGE_WBC      0x004 /* write back cache       */
+#define _PAGE_FILE     0x004 /* set: pagecache, unset: swap (when !PRESENT) */
+#define _PAGE_WOM      0x008 /* weakly ordered memory  */
+
+#define _PAGE_A        0x010 /* accessed               */
+#define _PAGE_D        0x020 /* dirty                  */
+#define _PAGE_URE      0x040 /* user read enable       */
+#define _PAGE_UWE      0x080 /* user write enable      */
+
+#define _PAGE_SRE      0x100 /* superuser read enable  */
+#define _PAGE_SWE      0x200 /* superuser write enable */
+#define _PAGE_EXEC     0x400 /* software: page is executable */
+#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
+
+/* 0x001 is cache coherency bit, which should always be set to
+ *       1 - for SMP (when we support it)
+ *       0 - otherwise
+ *
+ * we just reuse this bit in software for _PAGE_PRESENT and
+ * force it to 0 when loading it into TLB.
+ */
+#define _PAGE_PRESENT  _PAGE_CC
+#define _PAGE_USER     _PAGE_URE
+#define _PAGE_WRITE    (_PAGE_UWE | _PAGE_SWE)
+#define _PAGE_DIRTY    _PAGE_D
+#define _PAGE_ACCESSED _PAGE_A
+#define _PAGE_NO_CACHE _PAGE_CI
+#define _PAGE_SHARED   _PAGE_U_SHARED
+#define _PAGE_READ     (_PAGE_URE | _PAGE_SRE)
+
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_BASE     (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_ALL      (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _KERNPG_TABLE \
+	(_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE       __pgprot(_PAGE_ALL)
+#define PAGE_READONLY   __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+#define PAGE_SHARED \
+	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+		 | _PAGE_SHARED)
+#define PAGE_SHARED_X \
+	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_EXEC)
+#define PAGE_COPY       __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_COPY_X     __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+
+#define PAGE_KERNEL \
+	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_RO \
+	__pgprot(_PAGE_ALL | _PAGE_SRE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_NOCACHE \
+	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
+
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY_X
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY_X
+#define __P100	PAGE_READONLY
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY_X
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED_X
+#define __S100	PAGE_READONLY
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED
+#define __S111	PAGE_SHARED_X
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page[2048];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR			(8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK			(~(sizeof(void *)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+/* 64-bit machines, beware!  SRB. */
+#define SIZEOF_PTR_LOG2			2
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address) \
+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+/* to set the page-dir */
+#define SET_PAGE_DIR(tsk, pgdir)
+
+#define pte_none(x)	(!pte_val(x))
+#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x)	(!pmd_val(x))
+#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
+#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)	do { pmd_val(*(xp)) = 0; } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_WRITE);
+	return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_READ);
+	return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_EXEC);
+	return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_DIRTY);
+	return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_ACCESSED);
+	return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_WRITE;
+	return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_READ;
+	return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_ACCESSED;
+	return pte;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/* What actually goes as arguments to the various functions is less than
+ * obvious, but a rule of thumb is that struct page's goes as struct page *,
+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
+ * addresses (the 0xc0xxxxxx's) goes as void *'s.
+ */
+
+static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+{
+	pte_t pte;
+	/* the PTE needs a physical address */
+	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
+	return pte;
+}
+
+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
+
+#define mk_pte_phys(physpage, pgprot) \
+({                                                                      \
+	pte_t __pte;                                                    \
+									\
+	pte_val(__pte) = (physpage) + pgprot_val(pgprot);               \
+	__pte;                                                          \
+})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
+}
+
+
+/*
+ * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
+ * __pte_page(pte_val) refers to the "virtual" DRAM interval
+ * pte_pagenr refers to the page-number counted starting from the virtual
+ * DRAM start
+ */
+
+static inline unsigned long __pte_page(pte_t pte)
+{
+	/* the PTE contains a physical address */
+	return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
+}
+
+#define pte_pagenr(pte)         ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+/* permanent address of a page */
+
+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define pte_page(pte)		(mem_map+pte_pagenr(pte))
+
+/*
+ * only the pte's themselves need to point to physical DRAM (see above)
+ * the pagetable links are purely handled within the kernel SW and thus
+ * don't need the __pa and __va transformations.
+ */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+	pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
+}
+
+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_kernel(pmd)    ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address)      ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address)   pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define __pte_offset(address)                   \
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address)         \
+	((pte_t *) pmd_page_kernel(*(dir)) +  __pte_offset(address))
+#define pte_offset_map(dir, address)	        \
+	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+#define pte_offset_map_nested(dir, address)     \
+	pte_offset_map(dir, address)
+
+#define pte_unmap(pte)          do { } while (0)
+#define pte_unmap_nested(pte)   do { } while (0)
+#define pte_pfn(x)		((unsigned long)(((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)  __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
+
+#define pte_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
+	       __FILE__, __LINE__, &(e), pte_val(e))
+#define pgd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
+	       __FILE__, __LINE__, &(e), pgd_val(e))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
+
+/*
+ * or32 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Actually I am not sure on what this could be used for.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+	unsigned long address, pte_t *pte)
+{
+}
+
+/* __PHX__ FIXME, SWAP, this probably doesn't work */
+
+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
+
+#define __swp_type(x)			(((x).val >> 5) & 0x7f)
+#define __swp_offset(x)			((x).val >> 12)
+#define __swp_entry(type, offset) \
+	((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+/* Encode and decode a nonlinear file mapping entry */
+
+#define PTE_FILE_MAX_BITS               26
+#define pte_to_pgoff(x)	                (pte_val(x) >> 6)
+#define pgoff_to_pte(x)	                __pte(((x) << 6) | _PAGE_FILE)
+
+#define kern_addr_valid(addr)           (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)         \
+	remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()		do { } while (0)
+#define io_remap_page_range		remap_page_range
+
+typedef pte_t *pte_addr_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PGTABLE_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
new file mode 100644
index 000000000000..bb54c97b9783
--- /dev/null
+++ b/arch/openrisc/include/asm/processor.h
@@ -0,0 +1,113 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PROCESSOR_H
+#define __ASM_OPENRISC_PROCESSOR_H
+
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+#define STACK_TOP       TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
+/* Kernel and user SR register setting */
+#define KERNEL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+		   | SPR_SR_DCE | SPR_SR_SM)
+#define USER_SR   (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+		   | SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/*
+ * User space process size. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+
+#define TASK_SIZE       (0x80000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE      (TASK_SIZE / 8 * 3)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+struct thread_struct {
+};
+
+/*
+ * At user->kernel entry, the pt_regs struct is stacked on the top of the
+ * kernel-stack.  This macro allows us to find those regs for a task.
+ * Notice that subsequent pt_regs stackings, like recursive interrupts
+ * occurring while we're in the kernel, won't affect this - only the first
+ * user->kernel transition registers are reached by this (i.e. not regs
+ * for running signal handler)
+ */
+#define user_regs(thread_info)  (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE - STACK_FRAME_OVERHEAD)) - 1)
+
+/*
+ * Dito but for the currently running task
+ */
+
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() user_regs(current_thread_info())
+
+extern inline void prepare_to_copy(struct task_struct *tsk)
+{
+}
+
+#define INIT_SP         (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD  { }
+
+
+#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->pc);
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp);
+
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void release_thread(struct task_struct *);
+unsigned long get_wchan(struct task_struct *p);
+
+/*
+ * Free current thread data structures etc..
+ */
+
+extern inline void exit_thread(void)
+{
+	/* Nothing needs to be done.  */
+}
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+extern unsigned long thread_saved_pc(struct task_struct *t);
+
+#define init_stack      (init_thread_union.stack)
+
+#define cpu_relax()     do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
new file mode 100644
index 000000000000..e1f3fe26606c
--- /dev/null
+++ b/arch/openrisc/include/asm/prom.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of.h>	/* linux/of.h gets to determine #include ordering */
+
+#ifndef _ASM_OPENRISC_PROM_H
+#define _ASM_OPENRISC_PROM_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+/* Other Prototypes */
+extern int early_uartlite_console(void);
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+		unsigned long *busno, unsigned long *phys, unsigned long *size);
+
+extern void kdump_move_device_tree(void);
+
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
+/* Get the MAC address */
+extern const void *of_get_mac_address(struct device_node *np);
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev:	the device whose interrupt is to be resolved
+ * @out_irq:	structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+struct pci_dev;
+extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+/* This routine is here to provide compatibility with how powerpc
+ * handles IRQ mapping for OF device nodes.  We precompute and permanently
+ * register them in the platform_device objects, whereas powerpc computes them
+ * on request.
+ */
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
new file mode 100644
index 000000000000..054537c5f9c9
--- /dev/null
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -0,0 +1,131 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PTRACE_H
+#define __ASM_OPENRISC_PTRACE_H
+
+#include <asm/spr_defs.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * This is the layout of the regset returned by the GETREGSET ptrace call
+ */
+struct user_regs_struct {
+	/* GPR R0-R31... */
+	unsigned long gpr[32];
+	unsigned long pc;
+	unsigned long sr;
+	unsigned long pad1;
+	unsigned long pad2;
+};
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * Make kernel PTrace/register structures opaque to userspace... userspace can
+ * access thread state via the regset mechanism.  This allows us a bit of
+ * flexibility in how we order the registers on the stack, permitting some
+ * optimizations like packing call-clobbered registers together so that
+ * they share a cacheline (not done yet, though... future optimization).
+ */
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct describes how the registers are laid out on the kernel stack
+ * during a syscall or other kernel entry.
+ *
+ * This structure should always be cacheline aligned on the stack.
+ * FIXME: I don't think that's the case right now.  The alignment is
+ * taken care of elsewhere... head.S, process.c, etc.
+ */
+
+struct pt_regs {
+	union {
+		struct {
+			/* Named registers */
+			long  sr;	/* Stored in place of r0 */
+			long  sp;	/* r1 */
+		};
+		struct {
+			/* Old style */
+			long offset[2];
+			long gprs[30];
+		};
+		struct {
+			/* New style */
+			long gpr[32];
+		};
+	};
+	long  pc;
+	long  orig_gpr11;	/* For restarting system calls */
+	long  syscallno;	/* Syscall number (used by strace) */
+	long dummy;		/* Cheap alignment fix */
+};
+#endif /* __ASSEMBLY__ */
+
+/* TODO: Rename this to REDZONE because that's what it is */
+#define STACK_FRAME_OVERHEAD  128  /* size of minimum stack frame */
+
+#define instruction_pointer(regs)	((regs)->pc)
+#define user_mode(regs)			(((regs)->sr & SPR_SR_SM) == 0)
+#define user_stack_pointer(regs)	((unsigned long)(regs)->sp)
+#define profile_pc(regs)		instruction_pointer(regs)
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+#define PT_SR         0
+#define PT_SP         4
+#define PT_GPR2       8
+#define PT_GPR3       12
+#define PT_GPR4       16
+#define PT_GPR5       20
+#define PT_GPR6       24
+#define PT_GPR7       28
+#define PT_GPR8       32
+#define PT_GPR9       36
+#define PT_GPR10      40
+#define PT_GPR11      44
+#define PT_GPR12      48
+#define PT_GPR13      52
+#define PT_GPR14      56
+#define PT_GPR15      60
+#define PT_GPR16      64
+#define PT_GPR17      68
+#define PT_GPR18      72
+#define PT_GPR19      76
+#define PT_GPR20      80
+#define PT_GPR21      84
+#define PT_GPR22      88
+#define PT_GPR23      92
+#define PT_GPR24      96
+#define PT_GPR25      100
+#define PT_GPR26      104
+#define PT_GPR27      108
+#define PT_GPR28      112
+#define PT_GPR29      116
+#define PT_GPR30      120
+#define PT_GPR31      124
+#define PT_PC	      128
+#define PT_ORIG_GPR11 132
+#define PT_SYSCALLNO  136
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_PTRACE_H */
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
new file mode 100644
index 000000000000..270a45241639
--- /dev/null
+++ b/arch/openrisc/include/asm/serial.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SERIAL_H
+#define __ASM_OPENRISC_SERIAL_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpuinfo.h>
+
+/* There's a generic version of this file, but it assumes a 1.8MHz UART clk...
+ * this, on the other hand, assumes the UART clock is tied to the system
+ * clock... 8250_early.c (early 8250 serial console) actually uses this, so
+ * it needs to be correct to get the early console working.
+ */
+
+#define BASE_BAUD (cpuinfo.clock_frequency/16)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_SERIAL_H */
diff --git a/arch/openrisc/include/asm/sigcontext.h b/arch/openrisc/include/asm/sigcontext.h
new file mode 100644
index 000000000000..54a5c50132e3
--- /dev/null
+++ b/arch/openrisc/include/asm/sigcontext.h
@@ -0,0 +1,38 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SIGCONTEXT_H
+#define __ASM_OPENRISC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/* This struct is saved by setup_frame in signal.c, to keep the current
+   context while a signal handler is executed. It's restored by sys_sigreturn.
+
+   To keep things simple, we use pt_regs here even though normally you just
+   specify the list of regs to save. Then we can use copy_from_user on the
+   entire regs instead of a bunch of get_user's as well...
+*/
+
+struct sigcontext {
+	struct pt_regs regs;  /* needs to be first */
+	unsigned long oldmask;
+	unsigned long usp;    /* usp before stacking this gunk on it */
+};
+
+#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
new file mode 100644
index 000000000000..fd00a3a24123
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPINLOCK_H
+#define __ASM_OPENRISC_SPINLOCK_H
+
+#error "or32 doesn't do SMP yet"
+
+#endif
diff --git a/arch/openrisc/include/asm/spr.h b/arch/openrisc/include/asm/spr.h
new file mode 100644
index 000000000000..1cccb42dd477
--- /dev/null
+++ b/arch/openrisc/include/asm/spr.h
@@ -0,0 +1,42 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPR_H
+#define __ASM_OPENRISC_SPR_H
+
+#define mtspr(_spr, _val) __asm__ __volatile__ (		\
+	"l.mtspr r0,%1,%0"					\
+	: : "K" (_spr), "r" (_val))
+#define mtspr_off(_spr, _off, _val) __asm__ __volatile__ (	\
+	"l.mtspr %0,%1,%2"					\
+	: : "r" (_off), "r" (_val), "K" (_spr))
+
+static inline unsigned long mfspr(unsigned long add)
+{
+	unsigned long ret;
+	__asm__ __volatile__ ("l.mfspr %0,r0,%1" : "=r" (ret) : "K" (add));
+	return ret;
+}
+
+static inline unsigned long mfspr_off(unsigned long add, unsigned long offset)
+{
+	unsigned long ret;
+	__asm__ __volatile__ ("l.mfspr %0,%1,%2" : "=r" (ret)
+						 : "r" (offset), "K" (add));
+	return ret;
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
new file mode 100644
index 000000000000..5dbc668865c4
--- /dev/null
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -0,0 +1,604 @@
+/*
+ * OpenRISC Linux
+ *
+ * SPR Definitions
+ *
+ * Copyright (C) 2000 Damjan Lampret
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2008, 2010 Embecosm Limited
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file is part of OpenRISC 1000 Architectural Simulator.
+ */
+
+#ifndef SPR_DEFS__H
+#define SPR_DEFS__H
+
+/* Definition of special-purpose registers (SPRs). */
+
+#define MAX_GRPS (32)
+#define MAX_SPRS_PER_GRP_BITS (11)
+#define MAX_SPRS_PER_GRP (1 << MAX_SPRS_PER_GRP_BITS)
+#define MAX_SPRS (0x10000)
+
+/* Base addresses for the groups */
+#define SPRGROUP_SYS	(0 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DMMU	(1 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IMMU	(2 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DC	(3 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IC	(4 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_MAC	(5 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_D	(6 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PC	(7 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PM	(8 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PIC	(9 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_TT	(10 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_FP	(11 << MAX_SPRS_PER_GRP_BITS)
+
+/* System control and status group */
+#define SPR_VR		(SPRGROUP_SYS + 0)
+#define SPR_UPR		(SPRGROUP_SYS + 1)
+#define SPR_CPUCFGR	(SPRGROUP_SYS + 2)
+#define SPR_DMMUCFGR	(SPRGROUP_SYS + 3)
+#define SPR_IMMUCFGR	(SPRGROUP_SYS + 4)
+#define SPR_DCCFGR	(SPRGROUP_SYS + 5)
+#define SPR_ICCFGR	(SPRGROUP_SYS + 6)
+#define SPR_DCFGR	(SPRGROUP_SYS + 7)
+#define SPR_PCCFGR	(SPRGROUP_SYS + 8)
+#define SPR_NPC         (SPRGROUP_SYS + 16)  /* CZ 21/06/01 */
+#define SPR_SR		(SPRGROUP_SYS + 17)  /* CZ 21/06/01 */
+#define SPR_PPC         (SPRGROUP_SYS + 18)  /* CZ 21/06/01 */
+#define SPR_FPCSR       (SPRGROUP_SYS + 20)  /* CZ 21/06/01 */
+#define SPR_EPCR_BASE	(SPRGROUP_SYS + 32)  /* CZ 21/06/01 */
+#define SPR_EPCR_LAST	(SPRGROUP_SYS + 47)  /* CZ 21/06/01 */
+#define SPR_EEAR_BASE	(SPRGROUP_SYS + 48)
+#define SPR_EEAR_LAST	(SPRGROUP_SYS + 63)
+#define SPR_ESR_BASE	(SPRGROUP_SYS + 64)
+#define SPR_ESR_LAST	(SPRGROUP_SYS + 79)
+#define SPR_GPR_BASE	(SPRGROUP_SYS + 1024)
+
+/* Data MMU group */
+#define SPR_DMMUCR	(SPRGROUP_DMMU + 0)
+#define SPR_DTLBEIR	(SPRGROUP_DMMU + 2)
+#define SPR_DTLBMR_BASE(WAY)	(SPRGROUP_DMMU + 0x200 + (WAY) * 0x100)
+#define SPR_DTLBMR_LAST(WAY)	(SPRGROUP_DMMU + 0x27f + (WAY) * 0x100)
+#define SPR_DTLBTR_BASE(WAY)	(SPRGROUP_DMMU + 0x280 + (WAY) * 0x100)
+#define SPR_DTLBTR_LAST(WAY)	(SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100)
+
+/* Instruction MMU group */
+#define SPR_IMMUCR	(SPRGROUP_IMMU + 0)
+#define SPR_ITLBEIR	(SPRGROUP_IMMU + 2)
+#define SPR_ITLBMR_BASE(WAY)	(SPRGROUP_IMMU + 0x200 + (WAY) * 0x100)
+#define SPR_ITLBMR_LAST(WAY)	(SPRGROUP_IMMU + 0x27f + (WAY) * 0x100)
+#define SPR_ITLBTR_BASE(WAY)	(SPRGROUP_IMMU + 0x280 + (WAY) * 0x100)
+#define SPR_ITLBTR_LAST(WAY)	(SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100)
+
+/* Data cache group */
+#define SPR_DCCR	(SPRGROUP_DC + 0)
+#define SPR_DCBPR	(SPRGROUP_DC + 1)
+#define SPR_DCBFR	(SPRGROUP_DC + 2)
+#define SPR_DCBIR	(SPRGROUP_DC + 3)
+#define SPR_DCBWR	(SPRGROUP_DC + 4)
+#define SPR_DCBLR	(SPRGROUP_DC + 5)
+#define SPR_DCR_BASE(WAY)	(SPRGROUP_DC + 0x200 + (WAY) * 0x200)
+#define SPR_DCR_LAST(WAY)	(SPRGROUP_DC + 0x3ff + (WAY) * 0x200)
+
+/* Instruction cache group */
+#define SPR_ICCR	(SPRGROUP_IC + 0)
+#define SPR_ICBPR	(SPRGROUP_IC + 1)
+#define SPR_ICBIR	(SPRGROUP_IC + 2)
+#define SPR_ICBLR	(SPRGROUP_IC + 3)
+#define SPR_ICR_BASE(WAY)	(SPRGROUP_IC + 0x200 + (WAY) * 0x200)
+#define SPR_ICR_LAST(WAY)	(SPRGROUP_IC + 0x3ff + (WAY) * 0x200)
+
+/* MAC group */
+#define SPR_MACLO	(SPRGROUP_MAC + 1)
+#define SPR_MACHI	(SPRGROUP_MAC + 2)
+
+/* Debug group */
+#define SPR_DVR(N)	(SPRGROUP_D + (N))
+#define SPR_DCR(N)	(SPRGROUP_D + 8 + (N))
+#define SPR_DMR1	(SPRGROUP_D + 16)
+#define SPR_DMR2	(SPRGROUP_D + 17)
+#define SPR_DWCR0	(SPRGROUP_D + 18)
+#define SPR_DWCR1	(SPRGROUP_D + 19)
+#define SPR_DSR		(SPRGROUP_D + 20)
+#define SPR_DRR		(SPRGROUP_D + 21)
+
+/* Performance counters group */
+#define SPR_PCCR(N)	(SPRGROUP_PC + (N))
+#define SPR_PCMR(N)	(SPRGROUP_PC + 8 + (N))
+
+/* Power management group */
+#define SPR_PMR (SPRGROUP_PM + 0)
+
+/* PIC group */
+#define SPR_PICMR (SPRGROUP_PIC + 0)
+#define SPR_PICPR (SPRGROUP_PIC + 1)
+#define SPR_PICSR (SPRGROUP_PIC + 2)
+
+/* Tick Timer group */
+#define SPR_TTMR (SPRGROUP_TT + 0)
+#define SPR_TTCR (SPRGROUP_TT + 1)
+
+/*
+ * Bit definitions for the Version Register
+ *
+ */
+#define SPR_VR_VER	0xff000000  /* Processor version */
+#define SPR_VR_CFG	0x00ff0000  /* Processor configuration */
+#define SPR_VR_RES	0x0000ffc0  /* Reserved */
+#define SPR_VR_REV	0x0000003f  /* Processor revision */
+
+#define SPR_VR_VER_OFF	24
+#define SPR_VR_CFG_OFF	16
+#define SPR_VR_REV_OFF	0
+
+/*
+ * Bit definitions for the Unit Present Register
+ *
+ */
+#define SPR_UPR_UP	   0x00000001  /* UPR present */
+#define SPR_UPR_DCP	   0x00000002  /* Data cache present */
+#define SPR_UPR_ICP	   0x00000004  /* Instruction cache present */
+#define SPR_UPR_DMP	   0x00000008  /* Data MMU present */
+#define SPR_UPR_IMP	   0x00000010  /* Instruction MMU present */
+#define SPR_UPR_MP	   0x00000020  /* MAC present */
+#define SPR_UPR_DUP	   0x00000040  /* Debug unit present */
+#define SPR_UPR_PCUP	   0x00000080  /* Performance counters unit present */
+#define SPR_UPR_PMP	   0x00000100  /* Power management present */
+#define SPR_UPR_PICP	   0x00000200  /* PIC present */
+#define SPR_UPR_TTP	   0x00000400  /* Tick timer present */
+#define SPR_UPR_RES	   0x00fe0000  /* Reserved */
+#define SPR_UPR_CUP	   0xff000000  /* Context units present */
+
+/*
+ * JPB: Bit definitions for the CPU configuration register
+ *
+ */
+#define SPR_CPUCFGR_NSGF   0x0000000f  /* Number of shadow GPR files */
+#define SPR_CPUCFGR_CGF	   0x00000010  /* Custom GPR file */
+#define SPR_CPUCFGR_OB32S  0x00000020  /* ORBIS32 supported */
+#define SPR_CPUCFGR_OB64S  0x00000040  /* ORBIS64 supported */
+#define SPR_CPUCFGR_OF32S  0x00000080  /* ORFPX32 supported */
+#define SPR_CPUCFGR_OF64S  0x00000100  /* ORFPX64 supported */
+#define SPR_CPUCFGR_OV64S  0x00000200  /* ORVDX64 supported */
+#define SPR_CPUCFGR_RES	   0xfffffc00  /* Reserved */
+
+/*
+ * JPB: Bit definitions for the Debug configuration register and other
+ * constants.
+ *
+ */
+
+#define SPR_DCFGR_NDP      0x00000007  /* Number of matchpoints mask */
+#define SPR_DCFGR_NDP1     0x00000000  /* One matchpoint supported */
+#define SPR_DCFGR_NDP2     0x00000001  /* Two matchpoints supported */
+#define SPR_DCFGR_NDP3     0x00000002  /* Three matchpoints supported */
+#define SPR_DCFGR_NDP4     0x00000003  /* Four matchpoints supported */
+#define SPR_DCFGR_NDP5     0x00000004  /* Five matchpoints supported */
+#define SPR_DCFGR_NDP6     0x00000005  /* Six matchpoints supported */
+#define SPR_DCFGR_NDP7     0x00000006  /* Seven matchpoints supported */
+#define SPR_DCFGR_NDP8     0x00000007  /* Eight matchpoints supported */
+#define SPR_DCFGR_WPCI     0x00000008  /* Watchpoint counters implemented */
+
+#define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \
+                               2 == n ? SPR_DCFGR_NDP2 : \
+                               3 == n ? SPR_DCFGR_NDP3 : \
+                               4 == n ? SPR_DCFGR_NDP4 : \
+                               5 == n ? SPR_DCFGR_NDP5 : \
+                               6 == n ? SPR_DCFGR_NDP6 : \
+                               7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8)
+#define MAX_MATCHPOINTS  8
+#define MAX_WATCHPOINTS  (MAX_MATCHPOINTS + 2)
+
+/*
+ * Bit definitions for the Supervision Register
+ *
+ */
+#define SPR_SR_SM          0x00000001  /* Supervisor Mode */
+#define SPR_SR_TEE         0x00000002  /* Tick timer Exception Enable */
+#define SPR_SR_IEE         0x00000004  /* Interrupt Exception Enable */
+#define SPR_SR_DCE         0x00000008  /* Data Cache Enable */
+#define SPR_SR_ICE         0x00000010  /* Instruction Cache Enable */
+#define SPR_SR_DME         0x00000020  /* Data MMU Enable */
+#define SPR_SR_IME         0x00000040  /* Instruction MMU Enable */
+#define SPR_SR_LEE         0x00000080  /* Little Endian Enable */
+#define SPR_SR_CE          0x00000100  /* CID Enable */
+#define SPR_SR_F           0x00000200  /* Condition Flag */
+#define SPR_SR_CY          0x00000400  /* Carry flag */
+#define SPR_SR_OV          0x00000800  /* Overflow flag */
+#define SPR_SR_OVE         0x00001000  /* Overflow flag Exception */
+#define SPR_SR_DSX         0x00002000  /* Delay Slot Exception */
+#define SPR_SR_EPH         0x00004000  /* Exception Prefix High */
+#define SPR_SR_FO          0x00008000  /* Fixed one */
+#define SPR_SR_SUMRA       0x00010000  /* Supervisor SPR read access */
+#define SPR_SR_RES         0x0ffe0000  /* Reserved */
+#define SPR_SR_CID         0xf0000000  /* Context ID */
+
+/*
+ * Bit definitions for the Data MMU Control Register
+ *
+ */
+#define SPR_DMMUCR_P2S	   0x0000003e  /* Level 2 Page Size */
+#define SPR_DMMUCR_P1S	   0x000007c0  /* Level 1 Page Size */
+#define SPR_DMMUCR_VADDR_WIDTH	0x0000f800  /* Virtual ADDR Width */
+#define SPR_DMMUCR_PADDR_WIDTH	0x000f0000  /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Instruction MMU Control Register
+ *
+ */
+#define SPR_IMMUCR_P2S	   0x0000003e  /* Level 2 Page Size */
+#define SPR_IMMUCR_P1S	   0x000007c0  /* Level 1 Page Size */
+#define SPR_IMMUCR_VADDR_WIDTH	0x0000f800  /* Virtual ADDR Width */
+#define SPR_IMMUCR_PADDR_WIDTH	0x000f0000  /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Data TLB Match Register
+ *
+ */
+#define SPR_DTLBMR_V	   0x00000001  /* Valid */
+#define SPR_DTLBMR_PL1	   0x00000002  /* Page Level 1 (if 0 then PL2) */
+#define SPR_DTLBMR_CID	   0x0000003c  /* Context ID */
+#define SPR_DTLBMR_LRU	   0x000000c0  /* Least Recently Used */
+#define SPR_DTLBMR_VPN	   0xfffff000  /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Data TLB Translate Register
+ *
+ */
+#define SPR_DTLBTR_CC	   0x00000001  /* Cache Coherency */
+#define SPR_DTLBTR_CI	   0x00000002  /* Cache Inhibit */
+#define SPR_DTLBTR_WBC	   0x00000004  /* Write-Back Cache */
+#define SPR_DTLBTR_WOM	   0x00000008  /* Weakly-Ordered Memory */
+#define SPR_DTLBTR_A	   0x00000010  /* Accessed */
+#define SPR_DTLBTR_D	   0x00000020  /* Dirty */
+#define SPR_DTLBTR_URE	   0x00000040  /* User Read Enable */
+#define SPR_DTLBTR_UWE	   0x00000080  /* User Write Enable */
+#define SPR_DTLBTR_SRE	   0x00000100  /* Supervisor Read Enable */
+#define SPR_DTLBTR_SWE	   0x00000200  /* Supervisor Write Enable */
+#define SPR_DTLBTR_PPN	   0xfffff000  /* Physical Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Match Register
+ *
+ */
+#define SPR_ITLBMR_V	   0x00000001  /* Valid */
+#define SPR_ITLBMR_PL1	   0x00000002  /* Page Level 1 (if 0 then PL2) */
+#define SPR_ITLBMR_CID	   0x0000003c  /* Context ID */
+#define SPR_ITLBMR_LRU	   0x000000c0  /* Least Recently Used */
+#define SPR_ITLBMR_VPN	   0xfffff000  /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Translate Register
+ *
+ */
+#define SPR_ITLBTR_CC	   0x00000001  /* Cache Coherency */
+#define SPR_ITLBTR_CI	   0x00000002  /* Cache Inhibit */
+#define SPR_ITLBTR_WBC	   0x00000004  /* Write-Back Cache */
+#define SPR_ITLBTR_WOM	   0x00000008  /* Weakly-Ordered Memory */
+#define SPR_ITLBTR_A	   0x00000010  /* Accessed */
+#define SPR_ITLBTR_D	   0x00000020  /* Dirty */
+#define SPR_ITLBTR_SXE	   0x00000040  /* User Read Enable */
+#define SPR_ITLBTR_UXE	   0x00000080  /* User Write Enable */
+#define SPR_ITLBTR_PPN	   0xfffff000  /* Physical Page Number */
+
+/*
+ * Bit definitions for Data Cache Control register
+ *
+ */
+#define SPR_DCCR_EW	   0x000000ff  /* Enable ways */
+
+/*
+ * Bit definitions for Insn Cache Control register
+ *
+ */
+#define SPR_ICCR_EW	   0x000000ff  /* Enable ways */
+
+/*
+ * Bit definitions for Data Cache Configuration Register
+ *
+ */
+
+#define SPR_DCCFGR_NCW		0x00000007
+#define SPR_DCCFGR_NCS		0x00000078
+#define SPR_DCCFGR_CBS		0x00000080
+#define SPR_DCCFGR_CWS		0x00000100
+#define SPR_DCCFGR_CCRI		0x00000200
+#define SPR_DCCFGR_CBIRI	0x00000400
+#define SPR_DCCFGR_CBPRI	0x00000800
+#define SPR_DCCFGR_CBLRI	0x00001000
+#define SPR_DCCFGR_CBFRI	0x00002000
+#define SPR_DCCFGR_CBWBRI	0x00004000
+
+#define SPR_DCCFGR_NCW_OFF      0
+#define SPR_DCCFGR_NCS_OFF      3
+#define SPR_DCCFGR_CBS_OFF	7
+
+/*
+ * Bit definitions for Instruction Cache Configuration Register
+ *
+ */
+#define SPR_ICCFGR_NCW		0x00000007
+#define SPR_ICCFGR_NCS		0x00000078
+#define SPR_ICCFGR_CBS		0x00000080
+#define SPR_ICCFGR_CCRI		0x00000200
+#define SPR_ICCFGR_CBIRI	0x00000400
+#define SPR_ICCFGR_CBPRI	0x00000800
+#define SPR_ICCFGR_CBLRI	0x00001000
+
+#define SPR_ICCFGR_NCW_OFF      0
+#define SPR_ICCFGR_NCS_OFF      3
+#define SPR_ICCFGR_CBS_OFF	7
+
+/*
+ * Bit definitions for Data MMU Configuration Register
+ *
+ */
+
+#define SPR_DMMUCFGR_NTW	0x00000003
+#define SPR_DMMUCFGR_NTS	0x0000001C
+#define SPR_DMMUCFGR_NAE	0x000000E0
+#define SPR_DMMUCFGR_CRI	0x00000100
+#define SPR_DMMUCFGR_PRI        0x00000200
+#define SPR_DMMUCFGR_TEIRI	0x00000400
+#define SPR_DMMUCFGR_HTR	0x00000800
+
+#define SPR_DMMUCFGR_NTW_OFF	0
+#define SPR_DMMUCFGR_NTS_OFF	2
+
+/*
+ * Bit definitions for Instruction MMU Configuration Register
+ *
+ */
+
+#define SPR_IMMUCFGR_NTW	0x00000003
+#define SPR_IMMUCFGR_NTS	0x0000001C
+#define SPR_IMMUCFGR_NAE	0x000000E0
+#define SPR_IMMUCFGR_CRI	0x00000100
+#define SPR_IMMUCFGR_PRI	0x00000200
+#define SPR_IMMUCFGR_TEIRI	0x00000400
+#define SPR_IMMUCFGR_HTR	0x00000800
+
+#define SPR_IMMUCFGR_NTW_OFF	0
+#define SPR_IMMUCFGR_NTS_OFF	2
+
+/*
+ * Bit definitions for Debug Control registers
+ *
+ */
+#define SPR_DCR_DP	0x00000001  /* DVR/DCR present */
+#define SPR_DCR_CC	0x0000000e  /* Compare condition */
+#define SPR_DCR_SC	0x00000010  /* Signed compare */
+#define SPR_DCR_CT	0x000000e0  /* Compare to */
+
+/* Bit results with SPR_DCR_CC mask */
+#define SPR_DCR_CC_MASKED 0x00000000
+#define SPR_DCR_CC_EQUAL  0x00000002
+#define SPR_DCR_CC_LESS   0x00000004
+#define SPR_DCR_CC_LESSE  0x00000006
+#define SPR_DCR_CC_GREAT  0x00000008
+#define SPR_DCR_CC_GREATE 0x0000000a
+#define SPR_DCR_CC_NEQUAL 0x0000000c
+
+/* Bit results with SPR_DCR_CT mask */
+#define SPR_DCR_CT_DISABLED 0x00000000
+#define SPR_DCR_CT_IFEA     0x00000020
+#define SPR_DCR_CT_LEA      0x00000040
+#define SPR_DCR_CT_SEA      0x00000060
+#define SPR_DCR_CT_LD       0x00000080
+#define SPR_DCR_CT_SD       0x000000a0
+#define SPR_DCR_CT_LSEA     0x000000c0
+#define SPR_DCR_CT_LSD	    0x000000e0
+/* SPR_DCR_CT_LSD doesn't seem to be implemented anywhere in or1ksim. 2004-1-30 HP */
+
+/*
+ * Bit definitions for Debug Mode 1 register
+ *
+ */
+#define SPR_DMR1_CW       0x000fffff  /* Chain register pair data */
+#define SPR_DMR1_CW0_AND  0x00000001
+#define SPR_DMR1_CW0_OR   0x00000002
+#define SPR_DMR1_CW0      (SPR_DMR1_CW0_AND | SPR_DMR1_CW0_OR)
+#define SPR_DMR1_CW1_AND  0x00000004
+#define SPR_DMR1_CW1_OR   0x00000008
+#define SPR_DMR1_CW1      (SPR_DMR1_CW1_AND | SPR_DMR1_CW1_OR)
+#define SPR_DMR1_CW2_AND  0x00000010
+#define SPR_DMR1_CW2_OR   0x00000020
+#define SPR_DMR1_CW2      (SPR_DMR1_CW2_AND | SPR_DMR1_CW2_OR)
+#define SPR_DMR1_CW3_AND  0x00000040
+#define SPR_DMR1_CW3_OR   0x00000080
+#define SPR_DMR1_CW3      (SPR_DMR1_CW3_AND | SPR_DMR1_CW3_OR)
+#define SPR_DMR1_CW4_AND  0x00000100
+#define SPR_DMR1_CW4_OR   0x00000200
+#define SPR_DMR1_CW4      (SPR_DMR1_CW4_AND | SPR_DMR1_CW4_OR)
+#define SPR_DMR1_CW5_AND  0x00000400
+#define SPR_DMR1_CW5_OR   0x00000800
+#define SPR_DMR1_CW5      (SPR_DMR1_CW5_AND | SPR_DMR1_CW5_OR)
+#define SPR_DMR1_CW6_AND  0x00001000
+#define SPR_DMR1_CW6_OR   0x00002000
+#define SPR_DMR1_CW6      (SPR_DMR1_CW6_AND | SPR_DMR1_CW6_OR)
+#define SPR_DMR1_CW7_AND  0x00004000
+#define SPR_DMR1_CW7_OR   0x00008000
+#define SPR_DMR1_CW7      (SPR_DMR1_CW7_AND | SPR_DMR1_CW7_OR)
+#define SPR_DMR1_CW8_AND  0x00010000
+#define SPR_DMR1_CW8_OR   0x00020000
+#define SPR_DMR1_CW8      (SPR_DMR1_CW8_AND | SPR_DMR1_CW8_OR)
+#define SPR_DMR1_CW9_AND  0x00040000
+#define SPR_DMR1_CW9_OR   0x00080000
+#define SPR_DMR1_CW9      (SPR_DMR1_CW9_AND | SPR_DMR1_CW9_OR)
+#define SPR_DMR1_RES1      0x00300000  /* Reserved */
+#define SPR_DMR1_ST	  0x00400000  /* Single-step trace*/
+#define SPR_DMR1_BT	  0x00800000  /* Branch trace */
+#define SPR_DMR1_RES2	  0xff000000  /* Reserved */
+
+/*
+ * Bit definitions for Debug Mode 2 register. AWTC and WGB corrected by JPB
+ *
+ */
+#define SPR_DMR2_WCE0	   0x00000001  /* Watchpoint counter 0 enable */
+#define SPR_DMR2_WCE1	   0x00000002  /* Watchpoint counter 0 enable */
+#define SPR_DMR2_AWTC	   0x00000ffc  /* Assign watchpoints to counters */
+#define SPR_DMR2_AWTC_OFF           2  /* Bit offset to AWTC field */
+#define SPR_DMR2_WGB	   0x003ff000  /* Watchpoints generating breakpoint */
+#define SPR_DMR2_WGB_OFF           12  /* Bit offset to WGB field */
+#define SPR_DMR2_WBS	   0xffc00000  /* JPB: Watchpoint status */
+#define SPR_DMR2_WBS_OFF           22  /* Bit offset to WBS field */
+
+/*
+ * Bit definitions for Debug watchpoint counter registers
+ *
+ */
+#define SPR_DWCR_COUNT	    0x0000ffff  /* Count */
+#define SPR_DWCR_MATCH	    0xffff0000  /* Match */
+#define SPR_DWCR_MATCH_OFF          16  /* Match bit offset */
+
+/*
+ * Bit definitions for Debug stop register
+ *
+ */
+#define SPR_DSR_RSTE	0x00000001  /* Reset exception */
+#define SPR_DSR_BUSEE	0x00000002  /* Bus error exception */
+#define SPR_DSR_DPFE	0x00000004  /* Data Page Fault exception */
+#define SPR_DSR_IPFE	0x00000008  /* Insn Page Fault exception */
+#define SPR_DSR_TTE	0x00000010  /* Tick Timer exception */
+#define SPR_DSR_AE	0x00000020  /* Alignment exception */
+#define SPR_DSR_IIE	0x00000040  /* Illegal Instruction exception */
+#define SPR_DSR_IE	0x00000080  /* Interrupt exception */
+#define SPR_DSR_DME	0x00000100  /* DTLB miss exception */
+#define SPR_DSR_IME	0x00000200  /* ITLB miss exception */
+#define SPR_DSR_RE	0x00000400  /* Range exception */
+#define SPR_DSR_SCE	0x00000800  /* System call exception */
+#define SPR_DSR_FPE     0x00001000  /* Floating Point Exception */
+#define SPR_DSR_TE	0x00002000  /* Trap exception */
+
+/*
+ * Bit definitions for Debug reason register
+ *
+ */
+#define SPR_DRR_RSTE	0x00000001  /* Reset exception */
+#define SPR_DRR_BUSEE	0x00000002  /* Bus error exception */
+#define SPR_DRR_DPFE	0x00000004  /* Data Page Fault exception */
+#define SPR_DRR_IPFE	0x00000008  /* Insn Page Fault exception */
+#define SPR_DRR_TTE	0x00000010  /* Tick Timer exception */
+#define SPR_DRR_AE	0x00000020  /* Alignment exception */
+#define SPR_DRR_IIE	0x00000040  /* Illegal Instruction exception */
+#define SPR_DRR_IE	0x00000080  /* Interrupt exception */
+#define SPR_DRR_DME	0x00000100  /* DTLB miss exception */
+#define SPR_DRR_IME	0x00000200  /* ITLB miss exception */
+#define SPR_DRR_RE	0x00000400  /* Range exception */
+#define SPR_DRR_SCE	0x00000800  /* System call exception */
+#define SPR_DRR_FPE     0x00001000  /* Floating Point Exception */
+#define SPR_DRR_TE	0x00002000  /* Trap exception */
+
+/*
+ * Bit definitions for Performance counters mode registers
+ *
+ */
+#define SPR_PCMR_CP	0x00000001  /* Counter present */
+#define SPR_PCMR_UMRA	0x00000002  /* User mode read access */
+#define SPR_PCMR_CISM	0x00000004  /* Count in supervisor mode */
+#define SPR_PCMR_CIUM	0x00000008  /* Count in user mode */
+#define SPR_PCMR_LA	0x00000010  /* Load access event */
+#define SPR_PCMR_SA	0x00000020  /* Store access event */
+#define SPR_PCMR_IF	0x00000040  /* Instruction fetch event*/
+#define SPR_PCMR_DCM	0x00000080  /* Data cache miss event */
+#define SPR_PCMR_ICM	0x00000100  /* Insn cache miss event */
+#define SPR_PCMR_IFS	0x00000200  /* Insn fetch stall event */
+#define SPR_PCMR_LSUS	0x00000400  /* LSU stall event */
+#define SPR_PCMR_BS	0x00000800  /* Branch stall event */
+#define SPR_PCMR_DTLBM	0x00001000  /* DTLB miss event */
+#define SPR_PCMR_ITLBM	0x00002000  /* ITLB miss event */
+#define SPR_PCMR_DDS	0x00004000  /* Data dependency stall event */
+#define SPR_PCMR_WPE	0x03ff8000  /* Watchpoint events */
+
+/*
+ * Bit definitions for the Power management register
+ *
+ */
+#define SPR_PMR_SDF	0x0000000f  /* Slow down factor */
+#define SPR_PMR_DME	0x00000010  /* Doze mode enable */
+#define SPR_PMR_SME	0x00000020  /* Sleep mode enable */
+#define SPR_PMR_DCGE	0x00000040  /* Dynamic clock gating enable */
+#define SPR_PMR_SUME	0x00000080  /* Suspend mode enable */
+
+/*
+ * Bit definitions for PICMR
+ *
+ */
+#define SPR_PICMR_IUM	0xfffffffc  /* Interrupt unmask */
+
+/*
+ * Bit definitions for PICPR
+ *
+ */
+#define SPR_PICPR_IPRIO	0xfffffffc  /* Interrupt priority */
+
+/*
+ * Bit definitions for PICSR
+ *
+ */
+#define SPR_PICSR_IS	0xffffffff  /* Interrupt status */
+
+/*
+ * Bit definitions for Tick Timer Control Register
+ *
+ */
+
+#define SPR_TTCR_CNT	0xffffffff  /* Count, time period */
+#define SPR_TTMR_TP	0x0fffffff  /* Time period */
+#define SPR_TTMR_IP	0x10000000  /* Interrupt Pending */
+#define SPR_TTMR_IE	0x20000000  /* Interrupt Enable */
+#define SPR_TTMR_DI	0x00000000  /* Disabled */
+#define SPR_TTMR_RT	0x40000000  /* Restart tick */
+#define SPR_TTMR_SR     0x80000000  /* Single run */
+#define SPR_TTMR_CR     0xc0000000  /* Continuous run */
+#define SPR_TTMR_M      0xc0000000  /* Tick mode */
+
+/*
+ * Bit definitions for the FP Control Status Register
+ *
+ */
+#define SPR_FPCSR_FPEE  0x00000001  /* Floating Point Exception Enable */
+#define SPR_FPCSR_RM    0x00000006  /* Rounding Mode */
+#define SPR_FPCSR_OVF   0x00000008  /* Overflow Flag */
+#define SPR_FPCSR_UNF   0x00000010  /* Underflow Flag */
+#define SPR_FPCSR_SNF   0x00000020  /* SNAN Flag */
+#define SPR_FPCSR_QNF   0x00000040  /* QNAN Flag */
+#define SPR_FPCSR_ZF    0x00000080  /* Zero Flag */
+#define SPR_FPCSR_IXF   0x00000100  /* Inexact Flag */
+#define SPR_FPCSR_IVF   0x00000200  /* Invalid Flag */
+#define SPR_FPCSR_INF   0x00000400  /* Infinity Flag */
+#define SPR_FPCSR_DZF   0x00000800  /* Divide By Zero Flag */
+#define SPR_FPCSR_ALLF (SPR_FPCSR_OVF | SPR_FPCSR_UNF | SPR_FPCSR_SNF | \
+			SPR_FPCSR_QNF | SPR_FPCSR_ZF | SPR_FPCSR_IXF |  \
+			SPR_FPCSR_IVF | SPR_FPCSR_INF | SPR_FPCSR_DZF)
+
+#define FPCSR_RM_RN (0<<1)
+#define FPCSR_RM_RZ (1<<1)
+#define FPCSR_RM_RIP (2<<1)
+#define FPCSR_RM_RIN (3<<1)
+
+/*
+ * l.nop constants
+ *
+ */
+#define NOP_NOP          0x0000      /* Normal nop instruction */
+#define NOP_EXIT         0x0001      /* End of simulation */
+#define NOP_REPORT       0x0002      /* Simple report */
+/*#define NOP_PRINTF       0x0003       Simprintf instruction (obsolete)*/
+#define NOP_PUTC         0x0004      /* JPB: Simputc instruction */
+#define NOP_CNT_RESET    0x0005	     /* Reset statistics counters */
+#define NOP_GET_TICKS    0x0006	     /* JPB: Get # ticks running */
+#define NOP_GET_PS       0x0007      /* JPB: Get picosecs/cycle */
+#define NOP_REPORT_FIRST 0x0400      /* Report with number */
+#define NOP_REPORT_LAST  0x03ff      /* Report with number */
+
+#endif	/* SPR_DEFS__H */
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
new file mode 100644
index 000000000000..9f0337055d26
--- /dev/null
+++ b/arch/openrisc/include/asm/syscall.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALL_H__
+#define __ASM_OPENRISC_SYSCALL_H__
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+	return regs->syscallno ? regs->syscallno : -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+	regs->gpr[11] = regs->orig_gpr11;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+	return IS_ERR_VALUE(regs->gpr[11]) ? regs->gpr[11] : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+	return regs->gpr[11];
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+			 int error, long val)
+{
+	if (error)
+		regs->gpr[11] = -error;
+	else
+		regs->gpr[11] = val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+		      unsigned int i, unsigned int n, unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+		      unsigned int i, unsigned int n, const unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
new file mode 100644
index 000000000000..84a978af44d7
--- /dev/null
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALLS_H
+#define __ASM_OPENRISC_SYSCALLS_H
+
+asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
+				unsigned long *v2);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/include/asm/system.h b/arch/openrisc/include/asm/system.h
new file mode 100644
index 000000000000..cf658882186b
--- /dev/null
+++ b/arch/openrisc/include/asm/system.h
@@ -0,0 +1,35 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSTEM_H
+#define __ASM_OPENRISC_SYSTEM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <asm/spr.h>
+#include <asm-generic/system.h>
+
+/* We probably need this definition, but the generic system.h provides it
+ * and it's not used on our arch anyway...
+ */
+/*#define nop() __asm__ __volatile__ ("l.nop"::)*/
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_OPENRISC_SYSTEM_H */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
new file mode 100644
index 000000000000..07a8bc080ef2
--- /dev/null
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/processor.h>
+#endif
+
+
+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in or32, a page is 8192 bytes, which seems like a sane size
+ */
+
+#define THREAD_SIZE_ORDER 0
+#define THREAD_SIZE       (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ *   must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+struct thread_info {
+	struct task_struct	*task;		/* main task structure */
+	struct exec_domain	*exec_domain;	/* execution domain */
+	unsigned long		flags;		/* low level flags */
+	__u32			cpu;		/* current CPU */
+	__s32			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+	mm_segment_t		addr_limit; /* thread address space:
+					       0-0x7FFFFFFF for user-thead
+					       0-0xFFFFFFFF for kernel-thread
+					     */
+	struct restart_block    restart_block;
+	__u8			supervisor_stack[0];
+
+	/* saved context data */
+	unsigned long           ksp;
+};
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+#define INIT_THREAD_INFO(tsk)				\
+{							\
+	.task		= &tsk,				\
+	.exec_domain	= &default_exec_domain,		\
+	.flags		= 0,				\
+	.cpu		= 0,				\
+	.preempt_count	= 1,				\
+	.addr_limit	= KERNEL_DS,			\
+	.restart_block  = {				\
+			  .fn = do_no_restart_syscall,	\
+	},						\
+	.ksp            = 0,                            \
+}
+
+#define init_thread_info	(init_thread_union.thread_info)
+
+/* how to get the thread information struct from C */
+register struct thread_info *current_thread_info_reg asm("r10");
+#define current_thread_info()   (current_thread_info_reg)
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ *   these are process state flags that various assembly files may need to
+ *   access
+ *   - pending work-to-be-done flags are in LSW
+ *   - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SINGLESTEP		4	/* restore singlestep on return to user
+					 * mode
+					 */
+#define TIF_SYSCALL_TRACEPOINT  8       /* for ftrace syscall instrumentation */
+#define TIF_RESTORE_SIGMASK     9
+#define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling						 * TIF_NEED_RESCHED
+					 */
+#define TIF_MEMDIE              17
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_RESTORE_SIGMASK     (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+
+/* Work to do when returning from interrupt/exception */
+/* For OpenRISC, this is anything in the LSW other than syscall trace */
+#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h
new file mode 100644
index 000000000000..9935cad1b9b9
--- /dev/null
+++ b/arch/openrisc/include/asm/timex.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TIMEX_H
+#define __ASM_OPENRISC_TIMEX_H
+
+#define get_cycles get_cycles
+
+#include <asm-generic/timex.h>
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+static inline cycles_t get_cycles(void)
+{
+	return mfspr(SPR_TTCR);
+}
+
+/* This isn't really used any more */
+#define CLOCK_TICK_RATE 1000
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
new file mode 100644
index 000000000000..fa4376a4515d
--- /dev/null
+++ b/arch/openrisc/include/asm/tlb.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLB_H__
+#define __ASM_OPENRISC_TLB_H__
+
+/*
+ * or32 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_OPENRISC_TLB_H__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..6a2accd6cb67
--- /dev/null
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -0,0 +1,55 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLBFLUSH_H
+#define __ASM_OPENRISC_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+
+/*
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma,
+		     unsigned long start,
+		     unsigned long end);
+
+static inline void flush_tlb(void)
+{
+	flush_tlb_mm(current->mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	flush_tlb_range(NULL, start, end);
+}
+
+#endif /* __ASM_OPENRISC_TLBFLUSH_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
new file mode 100644
index 000000000000..c310e45b538e
--- /dev/null
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -0,0 +1,355 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UACCESS_H
+#define __ASM_OPENRISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/prefetch.h>
+#include <linux/string.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+
+#define VERIFY_READ	0
+#define VERIFY_WRITE	1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+/* addr_limit is the maximum accessible address for the task. we misuse
+ * the KERNEL_DS and USER_DS values to both assign and compare the
+ * addr_limit values through the equally misnamed get/set_fs macros.
+ * (see above)
+ */
+
+#define KERNEL_DS	(~0UL)
+#define get_ds()	(KERNEL_DS)
+
+#define USER_DS		(TASK_SIZE)
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)	((a) == (b))
+
+/* Ensure that the range from addr to addr+size is all within the process'
+ * address space
+ */
+#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
+
+/* Ensure that addr is below task's addr_limit */
+#define __addr_ok(addr) ((unsigned long) addr < get_fs())
+
+#define access_ok(type, addr, size) \
+	__range_ok((unsigned long)addr, (unsigned long)size)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue.  No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path.  This means when everything is well,
+ * we don't even have to jump over them.  Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+	unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise.  */
+extern unsigned long search_exception_table(unsigned long);
+extern void sort_exception_table(void);
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments.  (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+#define get_user(x, ptr) \
+	__get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size)		\
+({							\
+	long __pu_err;					\
+	__put_user_size((x), (ptr), (size), __pu_err);	\
+	__pu_err;					\
+})
+
+#define __put_user_check(x, ptr, size)					\
+({									\
+	long __pu_err = -EFAULT;					\
+	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
+	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
+		__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	__pu_err;							\
+})
+
+#define __put_user_size(x, ptr, size, retval)				\
+do {									\
+	retval = 0;							\
+	switch (size) {							\
+	case 1: __put_user_asm(x, ptr, retval, "l.sb"); break;		\
+	case 2: __put_user_asm(x, ptr, retval, "l.sh"); break;		\
+	case 4: __put_user_asm(x, ptr, retval, "l.sw"); break;		\
+	case 8: __put_user_asm2(x, ptr, retval); break;			\
+	default: __put_user_bad();					\
+	}								\
+} while (0)
+
+struct __large_struct {
+	unsigned long buf[100];
+};
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op)			\
+	__asm__ __volatile__(					\
+		"1:	"op" 0(%2),%1\n"			\
+		"2:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"3:	l.addi %0,r0,%3\n"			\
+		"	l.j 2b\n"				\
+		"	l.nop\n"				\
+		".previous\n"					\
+		".section __ex_table,\"a\"\n"			\
+		"	.align 2\n"				\
+		"	.long 1b,3b\n"				\
+		".previous"					\
+		: "=r"(err)					\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __put_user_asm2(x, addr, err)				\
+	__asm__ __volatile__(					\
+		"1:	l.sw 0(%2),%1\n"			\
+		"2:	l.sw 4(%2),%H1\n"			\
+		"3:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"4:	l.addi %0,r0,%3\n"			\
+		"	l.j 3b\n"				\
+		"	l.nop\n"				\
+		".previous\n"					\
+		".section __ex_table,\"a\"\n"			\
+		"	.align 2\n"				\
+		"	.long 1b,4b\n"				\
+		"	.long 2b,4b\n"				\
+		".previous"					\
+		: "=r"(err)					\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_nocheck(x, ptr, size)			\
+({								\
+	long __gu_err, __gu_val;				\
+	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
+	(x) = (__typeof__(*(ptr)))__gu_val;			\
+	__gu_err;						\
+})
+
+#define __get_user_check(x, ptr, size)					\
+({									\
+	long __gu_err = -EFAULT, __gu_val = 0;				\
+	const __typeof__(*(ptr)) * __gu_addr = (ptr);			\
+	if (access_ok(VERIFY_READ, __gu_addr, size))			\
+		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	(x) = (__typeof__(*(ptr)))__gu_val;				\
+	__gu_err;							\
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval)				\
+do {									\
+	retval = 0;							\
+	switch (size) {							\
+	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
+	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
+	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
+	case 8: __get_user_asm2(x, ptr, retval);			\
+	default: (x) = __get_user_bad();				\
+	}								\
+} while (0)
+
+#define __get_user_asm(x, addr, err, op)		\
+	__asm__ __volatile__(				\
+		"1:	"op" %1,0(%2)\n"		\
+		"2:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"3:	l.addi %0,r0,%3\n"		\
+		"	l.addi %1,r0,0\n"		\
+		"	l.j 2b\n"			\
+		"	l.nop\n"			\
+		".previous\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align 2\n"			\
+		"	.long 1b,3b\n"			\
+		".previous"				\
+		: "=r"(err), "=r"(x)			\
+		: "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_asm2(x, addr, err)			\
+	__asm__ __volatile__(				\
+		"1:	l.lwz %1,0(%2)\n"		\
+		"2:	l.lwz %H1,4(%2)\n"		\
+		"3:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"4:	l.addi %0,r0,%3\n"		\
+		"	l.addi %1,r0,0\n"		\
+		"	l.addi %H1,r0,0\n"		\
+		"	l.j 3b\n"			\
+		"	l.nop\n"			\
+		".previous\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align 2\n"			\
+		"	.long 1b,4b\n"			\
+		"	.long 2b,4b\n"			\
+		".previous"				\
+		: "=r"(err), "=&r"(x)			\
+		: "r"(addr), "i"(-EFAULT), "0"(err))
+
+/* more complex routines */
+
+extern unsigned long __must_check
+__copy_tofrom_user(void *to, const void *from, unsigned long size);
+
+#define __copy_from_user(to, from, size) \
+	__copy_tofrom_user(to, from, size)
+#define __copy_to_user(to, from, size) \
+	__copy_tofrom_user(to, from, size)
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+static inline unsigned long
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+	unsigned long over;
+
+	if (access_ok(VERIFY_READ, from, n))
+		return __copy_tofrom_user(to, from, n);
+	if ((unsigned long)from < TASK_SIZE) {
+		over = (unsigned long)from + n - TASK_SIZE;
+		return __copy_tofrom_user(to, from, n - over) + over;
+	}
+	return n;
+}
+
+static inline unsigned long
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+	unsigned long over;
+
+	if (access_ok(VERIFY_WRITE, to, n))
+		return __copy_tofrom_user(to, from, n);
+	if ((unsigned long)to < TASK_SIZE) {
+		over = (unsigned long)to + n - TASK_SIZE;
+		return __copy_tofrom_user(to, from, n - over) + over;
+	}
+	return n;
+}
+
+extern unsigned long __clear_user(void *addr, unsigned long size);
+
+static inline __must_check unsigned long
+clear_user(void *addr, unsigned long size)
+{
+
+	if (access_ok(VERIFY_WRITE, addr, size))
+		return __clear_user(addr, size);
+	if ((unsigned long)addr < TASK_SIZE) {
+		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+		return __clear_user(addr, size - over) + over;
+	}
+	return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char *src, long count)
+{
+	if (access_ok(VERIFY_READ, src, 1))
+		return __strncpy_from_user(dst, src, count);
+	return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+
+extern int __strnlen_user(const char *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline long strnlen_user(const char __user *str, long len)
+{
+	unsigned long top = (unsigned long)get_fs();
+	unsigned long res = 0;
+
+	if (__addr_ok(str))
+		res = __strnlen_user(str, len, top);
+
+	return res;
+}
+
+#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+
+#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
new file mode 100644
index 000000000000..1141cbd6fd72
--- /dev/null
+++ b/arch/openrisc/include/asm/unaligned.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UNALIGNED_H
+#define __ASM_OPENRISC_UNALIGNED_H
+
+/*
+ * This is copied from the generic implementation and the C-struct
+ * variant replaced with the memmove variant.  The GCC compiler
+ * for the OR32 arch optimizes too aggressively for the C-struct
+ * variant to work, so use the memmove variant instead.
+ *
+ * It may be worth considering implementing the unaligned access
+ * exception handler and allowing unaligned accesses (access_ok.h)...
+ * not sure if it would be much of a performance win without further
+ * investigation.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h
new file mode 100644
index 000000000000..89af3ab5c2e9
--- /dev/null
+++ b/arch/openrisc/include/asm/unistd.h
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#if !defined(__ASM_OPENRISC_UNISTD_H) || defined(__SYSCALL)
+#define __ASM_OPENRISC_UNISTD_H
+
+#define __ARCH_HAVE_MMU
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define __NR_or1k_atomic __NR_arch_specific_syscall
+__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic)
+
+#endif /* __ASM_OPENRISC_UNISTD_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
new file mode 100644
index 000000000000..9a4c2706d795
--- /dev/null
+++ b/arch/openrisc/kernel/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y	:= head.o vmlinux.lds init_task.o
+
+obj-y	:= setup.o idle.o or32_ksyms.o process.o dma.o \
+	   traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \
+	   sys_call_table.o
+
+obj-$(CONFIG_MODULES)		+= module.o
+obj-$(CONFIG_OF)		+= prom.o
+
+clean:
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1a242a0d7583
--- /dev/null
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -0,0 +1,70 @@
+/*
+ * OpenRISC asm-offsets.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#define DEFINE(sym, val) \
+		asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+	/* offsets into the task_struct */
+	DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+	DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+	DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+	DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+	DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+	/* offsets into thread_info */
+	DEFINE(TI_TASK, offsetof(struct thread_info, task));
+	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+	DEFINE(TI_KSP, offsetof(struct thread_info, ksp));
+
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+
+	/* Interrupt register frame */
+	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+	DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+
+	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28);
+	return 0;
+}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
new file mode 100644
index 000000000000..968d3ee477e3
--- /dev/null
+++ b/arch/openrisc/kernel/dma.c
@@ -0,0 +1,191 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * DMA mapping callbacks...
+ * As alloc_coherent is the only DMA callback being used currently, that's
+ * the only thing implemented properly.  The rest need looking into...
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+
+#include <asm/cpuinfo.h>
+#include <asm/spr_defs.h>
+#include <asm/tlbflush.h>
+
+static int page_set_nocache(pte_t *pte, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	unsigned long cl;
+
+	pte_val(*pte) |= _PAGE_CI;
+
+	/*
+	 * Flush the page out of the TLB so that the new page flags get
+	 * picked up next time there's an access
+	 */
+	flush_tlb_page(NULL, addr);
+
+	/* Flush page out of dcache */
+	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
+		mtspr(SPR_DCBFR, cl);
+
+	return 0;
+}
+
+static int page_clear_nocache(pte_t *pte, unsigned long addr,
+			      unsigned long next, struct mm_walk *walk)
+{
+	pte_val(*pte) &= ~_PAGE_CI;
+
+	/*
+	 * Flush the page out of the TLB so that the new page flags get
+	 * picked up next time there's an access
+	 */
+	flush_tlb_page(NULL, addr);
+
+	return 0;
+}
+
+/*
+ * Alloc "coherent" memory, which for OpenRISC means simply uncached.
+ *
+ * This function effectively just calls __get_free_pages, sets the
+ * cache-inhibit bit on those pages, and makes sure that the pages are
+ * flushed out of the cache before they are used.
+ *
+ */
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+			      dma_addr_t *dma_handle, gfp_t gfp)
+{
+	unsigned long va;
+	void *page;
+	struct mm_walk walk = {
+		.pte_entry = page_set_nocache,
+		.mm = &init_mm
+	};
+
+	page = alloc_pages_exact(size, gfp);
+	if (!page)
+		return NULL;
+
+	/* This gives us the real physical address of the first page. */
+	*dma_handle = __pa(page);
+
+	va = (unsigned long)page;
+
+	/*
+	 * We need to iterate through the pages, clearing the dcache for
+	 * them and setting the cache-inhibit bit.
+	 */
+	if (walk_page_range(va, va + size, &walk)) {
+		free_pages_exact(page, size);
+		return NULL;
+	}
+
+	return (void *)va;
+}
+
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+			    dma_addr_t dma_handle)
+{
+	unsigned long va = (unsigned long)vaddr;
+	struct mm_walk walk = {
+		.pte_entry = page_clear_nocache,
+		.mm = &init_mm
+	};
+
+	/* walk_page_range shouldn't be able to fail here */
+	WARN_ON(walk_page_range(va, va + size, &walk));
+
+	free_pages_exact(vaddr, size);
+}
+
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+			 unsigned long offset, size_t size,
+			 enum dma_data_direction dir,
+			 struct dma_attrs *attrs)
+{
+	unsigned long cl;
+	dma_addr_t addr = page_to_phys(page) + offset;
+
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		/* Flush the dcache for the requested range */
+		for (cl = addr; cl < addr + size;
+		     cl += cpuinfo.dcache_block_size)
+			mtspr(SPR_DCBFR, cl);
+		break;
+	case DMA_FROM_DEVICE:
+		/* Invalidate the dcache for the requested range */
+		for (cl = addr; cl < addr + size;
+		     cl += cpuinfo.dcache_block_size)
+			mtspr(SPR_DCBIR, cl);
+		break;
+	default:
+		/*
+		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
+		 * flush nor invalidate the cache here as the area will need
+		 * to be manually synced anyway.
+		 */
+		break;
+	}
+
+	return addr;
+}
+
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		     size_t size, enum dma_data_direction dir,
+		     struct dma_attrs *attrs)
+{
+	/* Nothing special to do here... */
+}
+
+void or1k_sync_single_for_cpu(struct device *dev,
+			      dma_addr_t dma_handle, size_t size,
+			      enum dma_data_direction dir)
+{
+	unsigned long cl;
+	dma_addr_t addr = dma_handle;
+
+	/* Invalidate the dcache for the requested range */
+	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+		mtspr(SPR_DCBIR, cl);
+}
+
+void or1k_sync_single_for_device(struct device *dev,
+			         dma_addr_t dma_handle, size_t size,
+			         enum dma_data_direction dir)
+{
+	unsigned long cl;
+	dma_addr_t addr = dma_handle;
+
+	/* Flush the dcache for the requested range */
+	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+		mtspr(SPR_DCBFR, cl);
+}
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+	return 0;
+}
+
+fs_initcall(dma_init);
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
new file mode 100644
index 000000000000..d5f9c35a583f
--- /dev/null
+++ b/arch/openrisc/kernel/entry.S
@@ -0,0 +1,1128 @@
+/*
+ * OpenRISC entry.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/asm-offsets.h>
+
+#define DISABLE_INTERRUPTS(t1,t2)			\
+	l.mfspr t2,r0,SPR_SR				;\
+	l.movhi	t1,hi(~(SPR_SR_IEE|SPR_SR_TEE))		;\
+	l.ori	t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE))	;\
+	l.and   t2,t2,t1				;\
+	l.mtspr r0,t2,SPR_SR
+
+#define ENABLE_INTERRUPTS(t1)				\
+	l.mfspr	t1,r0,SPR_SR				;\
+	l.ori	t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE)		;\
+	l.mtspr	r0,t1,SPR_SR
+
+/* =========================================================[ macros ]=== */
+
+/*
+ * We need to disable interrupts at beginning of RESTORE_ALL
+ * since interrupt might come in after we've loaded EPC return address
+ * and overwrite EPC with address somewhere in RESTORE_ALL
+ * which is of course wrong!
+ */
+
+#define RESTORE_ALL						\
+	DISABLE_INTERRUPTS(r3,r4)				;\
+	l.lwz   r3,PT_PC(r1)					;\
+	l.mtspr r0,r3,SPR_EPCR_BASE				;\
+	l.lwz   r3,PT_SR(r1)					;\
+	l.mtspr r0,r3,SPR_ESR_BASE				;\
+	l.lwz   r2,PT_GPR2(r1)					;\
+	l.lwz   r3,PT_GPR3(r1)					;\
+	l.lwz   r4,PT_GPR4(r1)					;\
+	l.lwz   r5,PT_GPR5(r1)					;\
+	l.lwz   r6,PT_GPR6(r1)					;\
+	l.lwz   r7,PT_GPR7(r1)					;\
+	l.lwz   r8,PT_GPR8(r1)					;\
+	l.lwz   r9,PT_GPR9(r1)					;\
+	l.lwz   r10,PT_GPR10(r1)					;\
+	l.lwz   r11,PT_GPR11(r1)					;\
+	l.lwz   r12,PT_GPR12(r1)					;\
+	l.lwz   r13,PT_GPR13(r1)					;\
+	l.lwz   r14,PT_GPR14(r1)					;\
+	l.lwz   r15,PT_GPR15(r1)					;\
+	l.lwz   r16,PT_GPR16(r1)					;\
+	l.lwz   r17,PT_GPR17(r1)					;\
+	l.lwz   r18,PT_GPR18(r1)					;\
+	l.lwz   r19,PT_GPR19(r1)					;\
+	l.lwz   r20,PT_GPR20(r1)					;\
+	l.lwz   r21,PT_GPR21(r1)					;\
+	l.lwz   r22,PT_GPR22(r1)					;\
+	l.lwz   r23,PT_GPR23(r1)					;\
+	l.lwz   r24,PT_GPR24(r1)					;\
+	l.lwz   r25,PT_GPR25(r1)					;\
+	l.lwz   r26,PT_GPR26(r1)					;\
+	l.lwz   r27,PT_GPR27(r1)					;\
+	l.lwz   r28,PT_GPR28(r1)					;\
+	l.lwz   r29,PT_GPR29(r1)					;\
+	l.lwz   r30,PT_GPR30(r1)					;\
+	l.lwz   r31,PT_GPR31(r1)					;\
+	l.lwz   r1,PT_SP(r1)					;\
+	l.rfe
+
+
+#define EXCEPTION_ENTRY(handler)				\
+	.global	handler						;\
+handler:							;\
+	/* r1, EPCR, ESR a already saved */			;\
+	l.sw	PT_GPR2(r1),r2					;\
+	l.sw    PT_GPR3(r1),r3					;\
+	l.sw    PT_ORIG_GPR11(r1),r11				;\
+	/* r4 already save */					;\
+	l.sw    PT_GPR5(r1),r5					;\
+	l.sw    PT_GPR6(r1),r6					;\
+	l.sw    PT_GPR7(r1),r7					;\
+	l.sw    PT_GPR8(r1),r8					;\
+	l.sw    PT_GPR9(r1),r9					;\
+	/* r10 already saved */					;\
+	l.sw    PT_GPR11(r1),r11					;\
+	/* r12 already saved */					;\
+	l.sw    PT_GPR13(r1),r13					;\
+	l.sw    PT_GPR14(r1),r14					;\
+	l.sw    PT_GPR15(r1),r15					;\
+	l.sw    PT_GPR16(r1),r16					;\
+	l.sw    PT_GPR17(r1),r17					;\
+	l.sw    PT_GPR18(r1),r18					;\
+	l.sw    PT_GPR19(r1),r19					;\
+	l.sw    PT_GPR20(r1),r20					;\
+	l.sw    PT_GPR21(r1),r21					;\
+	l.sw    PT_GPR22(r1),r22					;\
+	l.sw    PT_GPR23(r1),r23					;\
+	l.sw    PT_GPR24(r1),r24					;\
+	l.sw    PT_GPR25(r1),r25					;\
+	l.sw    PT_GPR26(r1),r26					;\
+	l.sw    PT_GPR27(r1),r27					;\
+	l.sw    PT_GPR28(r1),r28					;\
+	l.sw    PT_GPR29(r1),r29					;\
+	/* r30 already save */					;\
+/*        l.sw    PT_GPR30(r1),r30*/					;\
+	l.sw    PT_GPR31(r1),r31					;\
+	l.sw    PT_SYSCALLNO(r1),r0
+
+#define UNHANDLED_EXCEPTION(handler,vector)			\
+	.global	handler						;\
+handler:							;\
+	/* r1, EPCR, ESR already saved */			;\
+	l.sw    PT_GPR2(r1),r2					;\
+	l.sw    PT_GPR3(r1),r3					;\
+	l.sw    PT_ORIG_GPR11(r1),r11				;\
+	l.sw    PT_GPR5(r1),r5					;\
+	l.sw    PT_GPR6(r1),r6					;\
+	l.sw    PT_GPR7(r1),r7					;\
+	l.sw    PT_GPR8(r1),r8					;\
+	l.sw    PT_GPR9(r1),r9					;\
+	/* r10 already saved */					;\
+	l.sw    PT_GPR11(r1),r11					;\
+	/* r12 already saved */					;\
+	l.sw    PT_GPR13(r1),r13					;\
+	l.sw    PT_GPR14(r1),r14					;\
+	l.sw    PT_GPR15(r1),r15					;\
+	l.sw    PT_GPR16(r1),r16					;\
+	l.sw    PT_GPR17(r1),r17					;\
+	l.sw    PT_GPR18(r1),r18					;\
+	l.sw    PT_GPR19(r1),r19					;\
+	l.sw    PT_GPR20(r1),r20					;\
+	l.sw    PT_GPR21(r1),r21					;\
+	l.sw    PT_GPR22(r1),r22					;\
+	l.sw    PT_GPR23(r1),r23					;\
+	l.sw    PT_GPR24(r1),r24					;\
+	l.sw    PT_GPR25(r1),r25					;\
+	l.sw    PT_GPR26(r1),r26					;\
+	l.sw    PT_GPR27(r1),r27					;\
+	l.sw    PT_GPR28(r1),r28					;\
+	l.sw    PT_GPR29(r1),r29					;\
+	/* r31 already saved */					;\
+	l.sw    PT_GPR30(r1),r30					;\
+/*        l.sw    PT_GPR31(r1),r31	*/				;\
+	l.sw    PT_SYSCALLNO(r1),r0				;\
+	l.addi	r3,r1,0						;\
+	/* r4 is exception EA */				;\
+	l.addi	r5,r0,vector					;\
+	l.jal	unhandled_exception				;\
+	 l.nop							;\
+	l.j	_ret_from_exception				;\
+	 l.nop
+
+/*
+ * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR
+ *       contain the same values as when exception we're handling
+ *	 occured. in fact they never do. if you need them use
+ *	 values saved on stack (for SPR_EPC, SPR_ESR) or content
+ *       of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
+ *       in 'arch/or32/kernel/head.S'
+ */
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_tng_kernel_start)
+	l.jal	_start
+	 l.andi r0,r0,0
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+
+EXCEPTION_ENTRY(_bus_fault_handler)
+	/* r4: EA of fault (set by EXCEPTION_HANDLE) */
+	l.jal   do_bus_fault
+	 l.addi  r3,r1,0 /* pt_regs */
+
+	l.j     _ret_from_exception
+	 l.nop
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_data_page_fault_handler)
+	/* set up parameters for do_page_fault */
+	l.addi  r3,r1,0                    // pt_regs
+	/* r4 set be EXCEPTION_HANDLE */   // effective address of fault
+	l.ori   r5,r0,0x300                // exception vector
+
+	/*
+	 * __PHX__: TODO
+	 *
+	 * all this can be written much simpler. look at
+	 * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
+	 */
+#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
+	l.lwz   r6,PT_PC(r3)                  // address of an offending insn
+	l.lwz   r6,0(r6)                   // instruction that caused pf
+
+	l.srli  r6,r6,26                   // check opcode for jump insn
+	l.sfeqi r6,0                       // l.j
+	l.bf    8f
+	l.sfeqi r6,1                       // l.jal
+	l.bf    8f
+	l.sfeqi r6,3                       // l.bnf
+	l.bf    8f
+	l.sfeqi r6,4                       // l.bf
+	l.bf    8f
+	l.sfeqi r6,0x11                    // l.jr
+	l.bf    8f
+	l.sfeqi r6,0x12                    // l.jalr
+	l.bf    8f
+
+	l.nop
+
+	l.j     9f
+	l.nop
+8:
+
+	l.lwz   r6,PT_PC(r3)                  // address of an offending insn
+	l.addi  r6,r6,4
+	l.lwz   r6,0(r6)                   // instruction that caused pf
+	l.srli  r6,r6,26                   // get opcode
+9:
+
+#else
+
+	l.mfspr r6,r0,SPR_SR		   // SR
+//	l.lwz	r6,PT_SR(r3)		   // ESR
+	l.andi	r6,r6,SPR_SR_DSX	   // check for delay slot exception
+	l.sfeqi	r6,0x1			   // exception happened in delay slot
+	l.bnf	7f
+	l.lwz	r6,PT_PC(r3)		   // address of an offending insn
+
+	l.addi	r6,r6,4			   // offending insn is in delay slot
+7:
+	l.lwz   r6,0(r6)                   // instruction that caused pf
+	l.srli  r6,r6,26                   // check opcode for write access
+#endif
+
+	l.sfgeui r6,0x34		   // check opcode for write access
+	l.bnf   1f
+	l.sfleui r6,0x37
+	l.bnf   1f
+	l.ori   r6,r0,0x1                  // write access
+	l.j     2f
+	l.nop
+1:	l.ori   r6,r0,0x0                  // !write access
+2:
+
+	/* call fault.c handler in or32/mm/fault.c */
+	l.jal   do_page_fault
+	l.nop
+	l.j     _ret_from_exception
+	l.nop
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_insn_page_fault_handler)
+	/* set up parameters for do_page_fault */
+	l.addi  r3,r1,0                    // pt_regs
+	/* r4 set be EXCEPTION_HANDLE */   // effective address of fault
+	l.ori   r5,r0,0x400                // exception vector
+	l.ori	r6,r0,0x0		   // !write access
+
+	/* call fault.c handler in or32/mm/fault.c */
+	l.jal   do_page_fault
+	l.nop
+	l.j     _ret_from_exception
+	l.nop
+
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_timer_handler)
+	l.jal	timer_interrupt
+	 l.addi r3,r1,0 /* pt_regs */
+
+	l.j    _ret_from_intr
+	 l.nop
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+
+EXCEPTION_ENTRY(_alignment_handler)
+	/* r4: EA of fault (set by EXCEPTION_HANDLE) */
+	l.jal   do_unaligned_access
+	 l.addi  r3,r1,0 /* pt_regs */
+
+	l.j     _ret_from_exception
+	 l.nop
+
+#if 0
+EXCEPTION_ENTRY(_aligment_handler)
+//        l.mfspr r2,r0,SPR_EEAR_BASE     /* Load the efective addres */
+	l.addi	r2,r4,0
+//        l.mfspr r5,r0,SPR_EPCR_BASE     /* Load the insn address */
+	l.lwz   r5,PT_PC(r1)
+
+	l.lwz   r3,0(r5)                /* Load insn */
+	l.srli  r4,r3,26                /* Shift left to get the insn opcode */
+
+	l.sfeqi r4,0x00                 /* Check if the load/store insn is in delay slot */
+	l.bf    jmp
+	l.sfeqi r4,0x01
+	l.bf    jmp
+	l.sfeqi r4,0x03
+	l.bf    jmp
+	l.sfeqi r4,0x04
+	l.bf    jmp
+	l.sfeqi r4,0x11
+	l.bf    jr
+	l.sfeqi r4,0x12
+	l.bf    jr
+	l.nop
+	l.j     1f
+	l.addi  r5,r5,4                 /* Increment PC to get return insn address */
+
+jmp:
+	l.slli  r4,r3,6                 /* Get the signed extended jump length */
+	l.srai  r4,r4,4
+
+	l.lwz   r3,4(r5)                /* Load the real load/store insn */
+
+	l.add   r5,r5,r4                /* Calculate jump target address */
+
+	l.j     1f
+	l.srli  r4,r3,26                /* Shift left to get the insn opcode */
+
+jr:
+	l.slli  r4,r3,9                 /* Shift to get the reg nb */
+	l.andi  r4,r4,0x7c
+
+	l.lwz   r3,4(r5)                /* Load the real load/store insn */
+
+	l.add   r4,r4,r1                /* Load the jump register value from the stack */
+	l.lwz   r5,0(r4)
+
+	l.srli  r4,r3,26                /* Shift left to get the insn opcode */
+
+
+1:
+//	  l.mtspr r0,r5,SPR_EPCR_BASE
+	l.sw	PT_PC(r1),r5
+
+	l.sfeqi r4,0x26
+	l.bf    lhs
+	l.sfeqi r4,0x25
+	l.bf    lhz
+	l.sfeqi r4,0x22
+	l.bf    lws
+	l.sfeqi r4,0x21
+	l.bf    lwz
+	l.sfeqi r4,0x37
+	l.bf    sh
+	l.sfeqi r4,0x35
+	l.bf    sw
+	l.nop
+
+1:      l.j     1b                      /* I don't know what to do */
+	l.nop
+
+lhs:    l.lbs   r5,0(r2)
+	l.slli  r5,r5,8
+	l.lbz   r6,1(r2)
+	l.or    r5,r5,r6
+	l.srli  r4,r3,19
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.j     align_end
+	l.sw    0(r4),r5
+
+lhz:    l.lbz   r5,0(r2)
+	l.slli  r5,r5,8
+	l.lbz   r6,1(r2)
+	l.or    r5,r5,r6
+	l.srli  r4,r3,19
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.j     align_end
+	l.sw    0(r4),r5
+
+lws:    l.lbs   r5,0(r2)
+	l.slli  r5,r5,24
+	l.lbz   r6,1(r2)
+	l.slli  r6,r6,16
+	l.or    r5,r5,r6
+	l.lbz   r6,2(r2)
+	l.slli  r6,r6,8
+	l.or    r5,r5,r6
+	l.lbz   r6,3(r2)
+	l.or    r5,r5,r6
+	l.srli  r4,r3,19
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.j     align_end
+	l.sw    0(r4),r5
+
+lwz:    l.lbz   r5,0(r2)
+	l.slli  r5,r5,24
+	l.lbz   r6,1(r2)
+	l.slli  r6,r6,16
+	l.or    r5,r5,r6
+	l.lbz   r6,2(r2)
+	l.slli  r6,r6,8
+	l.or    r5,r5,r6
+	l.lbz   r6,3(r2)
+	l.or    r5,r5,r6
+	l.srli  r4,r3,19
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.j     align_end
+	l.sw    0(r4),r5
+
+sh:
+	l.srli  r4,r3,9
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.lwz   r5,0(r4)
+	l.sb    1(r2),r5
+	l.srli  r5,r5,8
+	l.j     align_end
+	l.sb    0(r2),r5
+
+sw:
+	l.srli  r4,r3,9
+	l.andi  r4,r4,0x7c
+	l.add   r4,r4,r1
+	l.lwz   r5,0(r4)
+	l.sb    3(r2),r5
+	l.srli  r5,r5,8
+	l.sb    2(r2),r5
+	l.srli  r5,r5,8
+	l.sb    1(r2),r5
+	l.srli  r5,r5,8
+	l.j     align_end
+	l.sb    0(r2),r5
+
+align_end:
+	l.j    _ret_from_intr
+	l.nop
+#endif
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+
+EXCEPTION_ENTRY(_illegal_instruction_handler)
+	/* r4: EA of fault (set by EXCEPTION_HANDLE) */
+	l.jal   do_illegal_instruction
+	 l.addi  r3,r1,0 /* pt_regs */
+
+	l.j     _ret_from_exception
+	 l.nop
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+
+EXCEPTION_ENTRY(_external_irq_handler)
+#ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK
+	l.lwz	r4,PT_SR(r1)		// were interrupts enabled ?
+	l.andi	r4,r4,SPR_SR_IEE
+	l.sfeqi	r4,0
+	l.bnf	1f			// ext irq enabled, all ok.
+	l.nop
+
+	l.addi  r1,r1,-0x8
+	l.movhi r3,hi(42f)
+	l.ori	r3,r3,lo(42f)
+	l.sw    0x0(r1),r3
+	l.jal   printk
+	l.sw    0x4(r1),r4
+	l.addi  r1,r1,0x8
+
+	.section .rodata, "a"
+42:
+		.string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
+		.align 4
+	.previous
+
+	l.ori	r4,r4,SPR_SR_IEE	// fix the bug
+//	l.sw	PT_SR(r1),r4
+1:
+#endif
+	l.addi	r3,r1,0
+	l.movhi	r8,hi(do_IRQ)
+	l.ori	r8,r8,lo(do_IRQ)
+	l.jalr r8
+	l.nop
+	l.j    _ret_from_intr
+	l.nop
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xb00,0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+
+/*
+ * Syscalls are a special type of exception in that they are
+ * _explicitly_ invoked by userspace and can therefore be
+ * held to conform to the same ABI as normal functions with
+ * respect to whether registers are preserved across the call
+ * or not.
+ */
+
+/* Upon syscall entry we just save the callee-saved registers
+ * and not the call-clobbered ones.
+ */
+
+_string_syscall_return:
+	.string "syscall return %ld \n\r\0"
+	.align 4
+
+ENTRY(_sys_call_handler)
+	/* syscalls run with interrupts enabled */
+	ENABLE_INTERRUPTS(r29)		// enable interrupts, r29 is temp
+
+	/* r1, EPCR, ESR a already saved */
+	l.sw	PT_GPR2(r1),r2
+	/* r3-r8 must be saved because syscall restart relies
+	 * on us being able to restart the syscall args... technically
+	 * they should be clobbered, otherwise
+	 */
+	l.sw    PT_GPR3(r1),r3
+	/* r4 already saved */
+	/* r4 holds the EEAR address of the fault, load the original r4 */
+	l.lwz	r4,PT_GPR4(r1)
+	l.sw    PT_GPR5(r1),r5
+	l.sw    PT_GPR6(r1),r6
+	l.sw    PT_GPR7(r1),r7
+	l.sw    PT_GPR8(r1),r8
+	l.sw    PT_GPR9(r1),r9
+	/* r10 already saved */
+	l.sw    PT_GPR11(r1),r11
+	l.sw    PT_ORIG_GPR11(r1),r11
+	/* r12,r13 already saved */
+
+	/* r14-r28 (even) aren't touched by the syscall fast path below
+	 * so we don't need to save them.  However, the functions that return
+	 * to userspace via a call to switch() DO need to save these because
+	 * switch() effectively clobbers them... saving these registers for
+	 * such functions is handled in their syscall wrappers (see fork, vfork,
+	 * and clone, below).
+
+	/* r30 is the only register we clobber in the fast path */
+	/* r30 already saved */
+/*	l.sw    PT_GPR30(r1),r30 */
+	/* This is used by do_signal to determine whether to check for
+	 * syscall restart or not */
+	l.sw    PT_SYSCALLNO(r1),r11
+
+_syscall_check_trace_enter:
+	/* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */
+	l.lwz	r30,TI_FLAGS(r10)
+	l.andi	r30,r30,_TIF_SYSCALL_TRACE
+	l.sfne	r30,r0
+	l.bf	_syscall_trace_enter
+	 l.nop
+
+_syscall_check:
+	/* Ensure that the syscall number is reasonable */
+	l.sfgeui r11,__NR_syscalls
+	l.bf	_syscall_badsys
+	 l.nop
+
+_syscall_call:
+	l.movhi r29,hi(sys_call_table)
+	l.ori   r29,r29,lo(sys_call_table)
+	l.slli  r11,r11,2
+	l.add   r29,r29,r11
+	l.lwz   r29,0(r29)
+
+	l.jalr  r29
+	 l.nop
+
+_syscall_return:
+	/* All syscalls return here... just pay attention to ret_from_fork
+	 * which does it in a round-about way.
+	 */
+	l.sw    PT_GPR11(r1),r11           // save return value
+
+#if 0
+_syscall_debug:
+	l.movhi r3,hi(_string_syscall_return)
+	l.ori   r3,r3,lo(_string_syscall_return)
+	l.ori   r27,r0,1
+	l.sw    -4(r1),r27
+	l.sw    -8(r1),r11
+	l.addi  r1,r1,-8
+	l.movhi r27,hi(printk)
+	l.ori   r27,r27,lo(printk)
+	l.jalr  r27
+	 l.nop
+	l.addi  r1,r1,8
+#endif
+
+_syscall_check_trace_leave:
+	/* r30 is a callee-saved register so this should still hold the
+	 * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above...
+	 * _syscall_trace_leave expects syscall result to be in pt_regs->r11.
+	 */
+	l.sfne	r30,r0
+	l.bf	_syscall_trace_leave
+	 l.nop
+
+/* This is where the exception-return code begins... interrupts need to be
+ * disabled the rest of the way here because we can't afford to miss any
+ * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */
+
+_syscall_check_work:
+	/* Here we need to disable interrupts */
+	DISABLE_INTERRUPTS(r27,r29)
+	l.lwz	r30,TI_FLAGS(r10)
+	l.andi	r30,r30,_TIF_WORK_MASK
+	l.sfne	r30,r0
+
+	l.bnf	_syscall_resume_userspace
+	 l.nop
+
+	/* Work pending follows a different return path, so we need to
+	 * make sure that all the call-saved registers get into pt_regs
+	 * before branching...
+	 */
+	l.sw    PT_GPR14(r1),r14
+	l.sw    PT_GPR16(r1),r16
+	l.sw    PT_GPR18(r1),r18
+	l.sw    PT_GPR20(r1),r20
+	l.sw    PT_GPR22(r1),r22
+	l.sw    PT_GPR24(r1),r24
+	l.sw    PT_GPR26(r1),r26
+	l.sw    PT_GPR28(r1),r28
+
+	/* _work_pending needs to be called with interrupts disabled */
+	l.j	_work_pending
+	 l.nop
+
+_syscall_resume_userspace:
+//	ENABLE_INTERRUPTS(r29)
+
+
+/* This is the hot path for returning to userspace from a syscall.  If there's
+ * work to be done and the branch to _work_pending was taken above, then the
+ * return to userspace will be done via the normal exception return path...
+ * that path restores _all_ registers and will overwrite the "clobbered"
+ * registers with whatever garbage is in pt_regs -- that's OK because those
+ * registers are clobbered anyway and because the extra work is insignificant
+ * in the context of the extra work that _work_pending is doing.
+
+/* Once again, syscalls are special and only guarantee to preserve the
+ * same registers as a normal function call */
+
+/* The assumption here is that the registers r14-r28 (even) are untouched and
+ * don't need to be restored... be sure that that's really the case!
+ */
+
+/* This is still too much... we should only be restoring what we actually
+ * clobbered... we should even be using 'scratch' (odd) regs above so that
+ * we don't need to restore anything, hardly...
+ */
+
+	l.lwz	r2,PT_GPR2(r1)
+
+	/* Restore args */
+	/* r3-r8 are technically clobbered, but syscall restart needs these
+	 * to be restored...
+	 */
+	l.lwz	r3,PT_GPR3(r1)
+	l.lwz	r4,PT_GPR4(r1)
+	l.lwz	r5,PT_GPR5(r1)
+	l.lwz	r6,PT_GPR6(r1)
+	l.lwz	r7,PT_GPR7(r1)
+	l.lwz	r8,PT_GPR8(r1)
+
+	l.lwz	r9,PT_GPR9(r1)
+	l.lwz	r10,PT_GPR10(r1)
+	l.lwz	r11,PT_GPR11(r1)
+
+	/* r30 is the only register we clobber in the fast path */
+	l.lwz	r30,PT_GPR30(r1)
+
+	/* Here we use r13-r19 (odd) as scratch regs */
+	l.lwz   r13,PT_PC(r1)
+	l.lwz   r15,PT_SR(r1)
+	l.lwz	r1,PT_SP(r1)
+	/* Interrupts need to be disabled for setting EPCR and ESR
+	 * so that another interrupt doesn't come in here and clobber
+	 * them before we can use them for our l.rfe */
+	DISABLE_INTERRUPTS(r17,r19)
+	l.mtspr r0,r13,SPR_EPCR_BASE
+	l.mtspr r0,r15,SPR_ESR_BASE
+	l.rfe
+
+/* End of hot path!
+ * Keep the below tracing and error handling out of the hot path...
+*/
+
+_syscall_trace_enter:
+	/* Here we pass pt_regs to do_syscall_trace_enter.  Make sure
+	 * that function is really getting all the info it needs as
+	 * pt_regs isn't a complete set of userspace regs, just the
+	 * ones relevant to the syscall...
+	 *
+	 * Note use of delay slot for setting argument.
+	 */
+	l.jal	do_syscall_trace_enter
+	 l.addi	r3,r1,0
+
+	/* Restore arguments (not preserved across do_syscall_trace_enter)
+	 * so that we can do the syscall for real and return to the syscall
+	 * hot path.
+	 */
+	l.lwz	r11,PT_SYSCALLNO(r1)
+	l.lwz	r3,PT_GPR3(r1)
+	l.lwz	r4,PT_GPR4(r1)
+	l.lwz	r5,PT_GPR5(r1)
+	l.lwz	r6,PT_GPR6(r1)
+	l.lwz	r7,PT_GPR7(r1)
+
+	l.j	_syscall_check
+	 l.lwz	r8,PT_GPR8(r1)
+
+_syscall_trace_leave:
+	l.jal	do_syscall_trace_leave
+	 l.addi	r3,r1,0
+
+	l.j	_syscall_check_work
+	 l.nop
+
+_syscall_badsys:
+	/* Here we effectively pretend to have executed an imaginary
+	 * syscall that returns -ENOSYS and then return to the regular
+	 * syscall hot path.
+	 * Note that "return value" is set in the delay slot...
+	 */
+	l.j	_syscall_return
+	 l.addi	r11,r0,-ENOSYS
+
+/******* END SYSCALL HANDLING *******/
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+
+UNHANDLED_EXCEPTION(_vector_0xd00,0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+
+EXCEPTION_ENTRY(_trap_handler)
+	/* r4: EA of fault (set by EXCEPTION_HANDLE) */
+	l.jal   do_trap
+	 l.addi  r3,r1,0 /* pt_regs */
+
+	l.j     _ret_from_exception
+	 l.nop
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xf00,0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1000,0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1100,0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1200,0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1300,0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1400,0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1500,0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1600,0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1700,0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1800,0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1900,0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
+
+/* ========================================================[ return ] === */
+
+_work_pending:
+	/*
+	 * if (current_thread_info->flags & _TIF_NEED_RESCHED)
+	 *     schedule();
+	 */
+	l.lwz   r5,TI_FLAGS(r10)
+	l.andi	r3,r5,_TIF_NEED_RESCHED
+	l.sfnei r3,0
+	l.bnf   _work_notifysig
+	 l.nop
+	l.jal   schedule
+	 l.nop
+	l.j	_resume_userspace
+	 l.nop
+
+/* Handle pending signals and notify-resume requests.
+ * do_notify_resume must be passed the latest pushed pt_regs, not
+ * necessarily the "userspace" ones.  Also, pt_regs->syscallno
+ * must be set so that the syscall restart functionality works.
+ */
+_work_notifysig:
+	l.jal	do_notify_resume
+	 l.ori	r3,r1,0		  /* pt_regs */
+
+_resume_userspace:
+	DISABLE_INTERRUPTS(r3,r4)
+	l.lwz	r3,TI_FLAGS(r10)
+	l.andi	r3,r3,_TIF_WORK_MASK
+	l.sfnei	r3,0
+	l.bf	_work_pending
+	 l.nop
+
+_restore_all:
+	RESTORE_ALL
+	/* This returns to userspace code */
+
+
+ENTRY(_ret_from_intr)
+ENTRY(_ret_from_exception)
+	l.lwz	r4,PT_SR(r1)
+	l.andi	r3,r4,SPR_SR_SM
+	l.sfeqi	r3,0
+	l.bnf	_restore_all
+	 l.nop
+	l.j	_resume_userspace
+	 l.nop
+
+ENTRY(ret_from_fork)
+	l.jal	schedule_tail
+	 l.nop
+
+	/* _syscall_returns expect r11 to contain return value */
+	l.lwz	r11,PT_GPR11(r1)
+
+	/* The syscall fast path return expects call-saved registers
+	 * r12-r28 to be untouched, so we restore them here as they
+	 * will have been effectively clobbered when arriving here
+	 * via the call to switch()
+	 */
+	l.lwz	r12,PT_GPR12(r1)
+	l.lwz	r14,PT_GPR14(r1)
+	l.lwz	r16,PT_GPR16(r1)
+	l.lwz	r18,PT_GPR18(r1)
+	l.lwz	r20,PT_GPR20(r1)
+	l.lwz	r22,PT_GPR22(r1)
+	l.lwz	r24,PT_GPR24(r1)
+	l.lwz	r26,PT_GPR26(r1)
+	l.lwz	r28,PT_GPR28(r1)
+
+	l.j	_syscall_return
+	 l.nop
+
+/* Since syscalls don't save call-clobbered registers, the args to
+ * kernel_thread_helper will need to be passed through callee-saved
+ * registers and copied to the parameter registers when the thread
+ * begins running.
+ *
+ * See arch/openrisc/kernel/process.c:
+ * The args are passed as follows:
+ *   arg1 (r3) : passed in r20
+ *   arg2 (r4) : passed in r22
+ */
+
+ENTRY(_kernel_thread_helper)
+	l.or	r3,r20,r0
+	l.or	r4,r22,r0
+	l.movhi	r31,hi(kernel_thread_helper)
+	l.ori	r31,r31,lo(kernel_thread_helper)
+	l.jr	r31
+	 l.nop
+
+
+/* ========================================================[ switch ] === */
+
+/*
+ * This routine switches between two different tasks.  The process
+ * state of one is saved on its kernel stack.  Then the state
+ * of the other is restored from its kernel stack.  The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via the 'return'.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path.  If you change this (or in particular, the
+ * SAVE_REGS macro), you'll have to change the fork code also.
+ */
+
+
+/* _switch MUST never lay on page boundry, cause it runs from
+ * effective addresses and beeing interrupted by iTLB miss would kill it.
+ * dTLB miss seams to never accour in the bad place since data accesses
+ * are from task structures which are always page aligned.
+ *
+ * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR
+ * register, then load the previous register values and only at the end call
+ * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets
+ * garbled and we end up calling l.rfe with the wrong EPCR. (same probably
+ * holds for ESR)
+ *
+ * To avoid this problems it is sufficient to align _switch to
+ * some nice round number smaller than it's size...
+ */
+
+/* ABI rules apply here... we either enter _switch via schedule() or via
+ * an imaginary call to which we shall return at return_from_fork.  Either
+ * way, we are a function call and only need to preserve the callee-saved
+ * registers when we return.  As such, we don't need to save the registers
+ * on the stack that we won't be returning as they were...
+ */
+
+	.align 0x400
+ENTRY(_switch)
+	/* We don't store SR as _switch only gets called in a context where
+	 * the SR will be the same going in and coming out... */
+
+	/* Set up new pt_regs struct for saving task state */
+	l.addi  r1,r1,-(INT_FRAME_SIZE)
+
+	/* No need to store r1/PT_SP as it goes into KSP below */
+	l.sw    PT_GPR2(r1),r2
+	l.sw    PT_GPR9(r1),r9
+	/* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+	 * and expects r12 to be callee-saved... */
+	l.sw    PT_GPR12(r1),r12
+	l.sw    PT_GPR14(r1),r14
+	l.sw    PT_GPR16(r1),r16
+	l.sw    PT_GPR18(r1),r18
+	l.sw    PT_GPR20(r1),r20
+	l.sw    PT_GPR22(r1),r22
+	l.sw    PT_GPR24(r1),r24
+	l.sw    PT_GPR26(r1),r26
+	l.sw    PT_GPR28(r1),r28
+	l.sw    PT_GPR30(r1),r30
+
+	l.addi	r11,r10,0			/* Save old 'current' to 'last' return value*/
+
+	/* We use thread_info->ksp for storing the address of the above
+	 * structure so that we can get back to it later... we don't want
+	 * to lose the value of thread_info->ksp, though, so store it as
+	 * pt_regs->sp so that we can easily restore it when we are made
+	 * live again...
+	 */
+
+	/* Save the old value of thread_info->ksp as pt_regs->sp */
+	l.lwz	r29,TI_KSP(r10)
+	l.sw	PT_SP(r1),r29
+
+	/* Swap kernel stack pointers */
+	l.sw    TI_KSP(r10),r1			/* Save old stack pointer */
+	l.or	r10,r4,r0			/* Set up new current_thread_info */
+	l.lwz   r1,TI_KSP(r10)			/* Load new stack pointer */
+
+	/* Restore the old value of thread_info->ksp */
+	l.lwz	r29,PT_SP(r1)
+	l.sw	TI_KSP(r10),r29
+
+	/* ...and restore the registers, except r11 because the return value
+	 * has already been set above.
+	 */
+	l.lwz   r2,PT_GPR2(r1)
+	l.lwz   r9,PT_GPR9(r1)
+	/* No need to restore r10 */
+	/* ...and do not restore r11 */
+
+	/* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+	 * and expects r12 to be callee-saved... */
+	l.lwz   r12,PT_GPR12(r1)
+	l.lwz   r14,PT_GPR14(r1)
+	l.lwz   r16,PT_GPR16(r1)
+	l.lwz   r18,PT_GPR18(r1)
+	l.lwz   r20,PT_GPR20(r1)
+	l.lwz   r22,PT_GPR22(r1)
+	l.lwz   r24,PT_GPR24(r1)
+	l.lwz   r26,PT_GPR26(r1)
+	l.lwz   r28,PT_GPR28(r1)
+	l.lwz   r30,PT_GPR30(r1)
+
+	/* Unwind stack to pre-switch state */
+	l.addi  r1,r1,(INT_FRAME_SIZE)
+
+	/* Return via the link-register back to where we 'came from', where that can be
+	 * either schedule() or return_from_fork()... */
+	l.jr	r9
+	 l.nop
+
+/* ==================================================================== */
+
+/* These all use the delay slot for setting the argument register, so the
+ * jump is always happening after the l.addi instruction.
+ *
+ * These are all just wrappers that don't touch the link-register r9, so the
+ * return from the "real" syscall function will return back to the syscall
+ * code that did the l.jal that brought us here.
+ */
+
+/* fork requires that we save all the callee-saved registers because they
+ * are all effectively clobbered by the call to _switch.  Here we store
+ * all the registers that aren't touched by the syscall fast path and thus
+ * weren't saved there.
+ */
+
+_fork_save_extra_regs_and_call:
+	l.sw    PT_GPR14(r1),r14
+	l.sw    PT_GPR16(r1),r16
+	l.sw    PT_GPR18(r1),r18
+	l.sw    PT_GPR20(r1),r20
+	l.sw    PT_GPR22(r1),r22
+	l.sw    PT_GPR24(r1),r24
+	l.sw    PT_GPR26(r1),r26
+	l.jr	r29
+	 l.sw    PT_GPR28(r1),r28
+
+ENTRY(sys_clone)
+	l.movhi	r29,hi(_sys_clone)
+	l.ori	r29,r29,lo(_sys_clone)
+	l.j	_fork_save_extra_regs_and_call
+	 l.addi	r7,r1,0
+
+ENTRY(sys_fork)
+	l.movhi	r29,hi(_sys_fork)
+	l.ori	r29,r29,lo(_sys_fork)
+	l.j	_fork_save_extra_regs_and_call
+	 l.addi	r3,r1,0
+
+ENTRY(sys_execve)
+	l.j	_sys_execve
+	 l.addi	r6,r1,0
+
+ENTRY(sys_sigaltstack)
+	l.j	_sys_sigaltstack
+	 l.addi	r5,r1,0
+
+ENTRY(sys_rt_sigreturn)
+	l.j	_sys_rt_sigreturn
+	 l.addi	r3,r1,0
+
+/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000.
+ * The functions takes a variable number of parameters depending on which
+ * particular flavour of atomic you want... parameter 1 is a flag identifying
+ * the atomic in question.  Currently, this function implements the
+ * following variants:
+ *
+ * XCHG:
+ *  @flag: 1
+ *  @ptr1:
+ *  @ptr2:
+ * Atomically exchange the values in pointers 1 and 2.
+ *
+ */
+
+ENTRY(sys_or1k_atomic)
+	/* FIXME: This ignores r3 and always does an XCHG */
+	DISABLE_INTERRUPTS(r17,r19)
+	l.lwz	r30,0(r4)
+	l.lwz	r28,0(r5)
+	l.sw	0(r4),r28
+	l.sw	0(r5),r30
+	ENABLE_INTERRUPTS(r17)
+	l.jr	r9
+	 l.or	r11,r0,r0
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
new file mode 100644
index 000000000000..c75018d22644
--- /dev/null
+++ b/arch/openrisc/kernel/head.S
@@ -0,0 +1,1607 @@
+/*
+ * OpenRISC head.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <asm/spr_defs.h>
+#include <asm/asm-offsets.h>
+
+#define tophys(rd,rs)				\
+	l.movhi	rd,hi(-KERNELBASE)		;\
+	l.add	rd,rd,rs
+
+#define CLEAR_GPR(gpr)				\
+	l.or    gpr,r0,r0
+
+#define LOAD_SYMBOL_2_GPR(gpr,symbol)		\
+	l.movhi gpr,hi(symbol)			;\
+	l.ori   gpr,gpr,lo(symbol)
+
+
+#define UART_BASE_ADD      0x90000000
+
+#define EXCEPTION_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
+#define SYSCALL_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
+
+/* ============================================[ tmp store locations ]=== */
+
+/*
+ * emergency_print temporary stores
+ */
+#define EMERGENCY_PRINT_STORE_GPR4	l.sw    0x20(r0),r4
+#define EMERGENCY_PRINT_LOAD_GPR4	l.lwz   r4,0x20(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR5	l.sw    0x24(r0),r5
+#define EMERGENCY_PRINT_LOAD_GPR5	l.lwz   r5,0x24(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR6	l.sw    0x28(r0),r6
+#define EMERGENCY_PRINT_LOAD_GPR6	l.lwz   r6,0x28(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR7	l.sw    0x2c(r0),r7
+#define EMERGENCY_PRINT_LOAD_GPR7	l.lwz   r7,0x2c(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR8	l.sw    0x30(r0),r8
+#define EMERGENCY_PRINT_LOAD_GPR8	l.lwz   r8,0x30(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR9	l.sw    0x34(r0),r9
+#define EMERGENCY_PRINT_LOAD_GPR9	l.lwz   r9,0x34(r0)
+
+
+/*
+ * TLB miss handlers temorary stores
+ */
+#define EXCEPTION_STORE_GPR9		l.sw    0x10(r0),r9
+#define EXCEPTION_LOAD_GPR9		l.lwz   r9,0x10(r0)
+
+#define EXCEPTION_STORE_GPR2		l.sw    0x64(r0),r2
+#define EXCEPTION_LOAD_GPR2		l.lwz   r2,0x64(r0)
+
+#define EXCEPTION_STORE_GPR3		l.sw    0x68(r0),r3
+#define EXCEPTION_LOAD_GPR3		l.lwz   r3,0x68(r0)
+
+#define EXCEPTION_STORE_GPR4		l.sw    0x6c(r0),r4
+#define EXCEPTION_LOAD_GPR4		l.lwz   r4,0x6c(r0)
+
+#define EXCEPTION_STORE_GPR5		l.sw    0x70(r0),r5
+#define EXCEPTION_LOAD_GPR5		l.lwz   r5,0x70(r0)
+
+#define EXCEPTION_STORE_GPR6		l.sw    0x74(r0),r6
+#define EXCEPTION_LOAD_GPR6		l.lwz   r6,0x74(r0)
+
+
+/*
+ * EXCEPTION_HANDLE temporary stores
+ */
+
+#define EXCEPTION_T_STORE_GPR30		l.sw    0x78(r0),r30
+#define EXCEPTION_T_LOAD_GPR30(reg)	l.lwz   reg,0x78(r0)
+
+#define EXCEPTION_T_STORE_GPR10		l.sw    0x7c(r0),r10
+#define EXCEPTION_T_LOAD_GPR10(reg)	l.lwz   reg,0x7c(r0)
+
+#define EXCEPTION_T_STORE_SP		l.sw	0x80(r0),r1
+#define EXCEPTION_T_LOAD_SP(reg)	l.lwz   reg,0x80(r0)
+
+/*
+ * For UNHANLDED_EXCEPTION
+ */
+
+#define EXCEPTION_T_STORE_GPR31		l.sw    0x84(r0),r31
+#define EXCEPTION_T_LOAD_GPR31(reg)	l.lwz   reg,0x84(r0)
+
+/* =========================================================[ macros ]=== */
+
+
+#define GET_CURRENT_PGD(reg,t1)					\
+	LOAD_SYMBOL_2_GPR(reg,current_pgd)			;\
+	tophys  (t1,reg)					;\
+	l.lwz   reg,0(t1)
+
+
+/*
+ * DSCR: this is a common hook for handling exceptions. it will save
+ *       the needed registers, set up stack and pointer to current
+ *	 then jump to the handler while enabling MMU
+ *
+ * PRMS: handler	- a function to jump to. it has to save the
+ *			remaining registers to kernel stack, call
+ *			appropriate arch-independant exception handler
+ *			and finaly jump to ret_from_except
+ *
+ * PREQ: unchanged state from the time exception happened
+ *
+ * POST: SAVED the following registers original value
+ *	       to the new created exception frame pointed to by r1
+ *
+ *	 r1  - ksp	pointing to the new (exception) frame
+ *	 r4  - EEAR     exception EA
+ *	 r10 - current	pointing to current_thread_info struct
+ *	 r12 - syscall  0, since we didn't come from syscall
+ *	 r13 - temp	it actually contains new SR, not needed anymore
+ *	 r31 - handler	address of the handler we'll jump to
+ *
+ *	 handler has to save remaining registers to the exception
+ *	 ksp frame *before* tainting them!
+ *
+ * NOTE: this function is not reentrant per se. reentrancy is guaranteed
+ *       by processor disabling all exceptions/interrupts when exception
+ *	 accours.
+ *
+ * OPTM: no need to make it so wasteful to extract ksp when in user mode
+ */
+
+#define EXCEPTION_HANDLE(handler)				\
+	EXCEPTION_T_STORE_GPR30					;\
+	l.mfspr r30,r0,SPR_ESR_BASE				;\
+	l.andi  r30,r30,SPR_SR_SM				;\
+	l.sfeqi r30,0						;\
+	EXCEPTION_T_STORE_GPR10					;\
+	l.bnf   2f                            /* kernel_mode */	;\
+	 EXCEPTION_T_STORE_SP                 /* delay slot */	;\
+1: /* user_mode:   */						;\
+	LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)		;\
+	tophys  (r30,r1)					;\
+	/* r10: current_thread_info  */				;\
+	l.lwz   r10,0(r30)					;\
+	tophys  (r30,r10)					;\
+	l.lwz   r1,(TI_KSP)(r30)				;\
+	/* fall through */					;\
+2: /* kernel_mode: */						;\
+	/* create new stack frame, save only needed gprs */	;\
+	/* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */	;\
+	/* r12:	temp, syscall indicator */			;\
+	l.addi  r1,r1,-(INT_FRAME_SIZE)				;\
+	/* r1 is KSP, r30 is __pa(KSP) */			;\
+	tophys  (r30,r1)					;\
+	l.sw    PT_GPR12(r30),r12				;\
+	l.mfspr r12,r0,SPR_EPCR_BASE				;\
+	l.sw    PT_PC(r30),r12					;\
+	l.mfspr r12,r0,SPR_ESR_BASE				;\
+	l.sw    PT_SR(r30),r12					;\
+	/* save r30 */						;\
+	EXCEPTION_T_LOAD_GPR30(r12)				;\
+	l.sw	PT_GPR30(r30),r12				;\
+	/* save r10 as was prior to exception */		;\
+	EXCEPTION_T_LOAD_GPR10(r12)				;\
+	l.sw	PT_GPR10(r30),r12				;\
+	/* save PT_SP as was prior to exception */		;\
+	EXCEPTION_T_LOAD_SP(r12)				;\
+	l.sw	PT_SP(r30),r12					;\
+	/* save exception r4, set r4 = EA */			;\
+	l.sw	PT_GPR4(r30),r4					;\
+	l.mfspr r4,r0,SPR_EEAR_BASE				;\
+	/* r12 == 1 if we come from syscall */			;\
+	CLEAR_GPR(r12)						;\
+	/* ----- turn on MMU ----- */				;\
+	l.ori	r30,r0,(EXCEPTION_SR)				;\
+	l.mtspr	r0,r30,SPR_ESR_BASE				;\
+	/* r30:	EA address of handler */			;\
+	LOAD_SYMBOL_2_GPR(r30,handler)				;\
+	l.mtspr r0,r30,SPR_EPCR_BASE				;\
+	l.rfe
+
+/*
+ * this doesn't work
+ *
+ *
+ * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+ * #define UNHANDLED_EXCEPTION(handler)				\
+ *	l.ori   r3,r0,0x1					;\
+ *	l.mtspr r0,r3,SPR_SR					;\
+ *      l.movhi r3,hi(0xf0000100)				;\
+ *      l.ori   r3,r3,lo(0xf0000100)				;\
+ *	l.jr	r3						;\
+ *	l.nop	1
+ *
+ * #endif
+ */
+
+/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
+ *       a bit more carefull (if we have a PT_SP or current pointer
+ *       corruption) and set them up from 'current_set'
+ *
+ */
+#define UNHANDLED_EXCEPTION(handler)				\
+	EXCEPTION_T_STORE_GPR31					;\
+	EXCEPTION_T_STORE_GPR10					;\
+	EXCEPTION_T_STORE_SP					;\
+	/* temporary store r3, r9 into r1, r10 */		;\
+	l.addi	r1,r3,0x0					;\
+	l.addi	r10,r9,0x0					;\
+	/* the string referenced by r3 must be low enough */	;\
+	l.jal	_emergency_print				;\
+	l.ori	r3,r0,lo(_string_unhandled_exception)		;\
+	l.mfspr	r3,r0,SPR_NPC					;\
+	l.jal	_emergency_print_nr				;\
+	l.andi	r3,r3,0x1f00					;\
+	/* the string referenced by r3 must be low enough */	;\
+	l.jal	_emergency_print				;\
+	l.ori	r3,r0,lo(_string_epc_prefix)			;\
+	l.jal	_emergency_print_nr				;\
+	l.mfspr	r3,r0,SPR_EPCR_BASE				;\
+	l.jal	_emergency_print				;\
+	l.ori	r3,r0,lo(_string_nl)				;\
+	/* end of printing */					;\
+	l.addi	r3,r1,0x0					;\
+	l.addi	r9,r10,0x0					;\
+	/* extract current, ksp from current_set */		;\
+	LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top)		;\
+	LOAD_SYMBOL_2_GPR(r10,init_thread_union)		;\
+	/* create new stack frame, save only needed gprs */	;\
+	/* r1: KSP, r10: current, r31: __pa(KSP) */		;\
+	/* r12:	temp, syscall indicator, r13 temp */		;\
+	l.addi  r1,r1,-(INT_FRAME_SIZE)				;\
+	/* r1 is KSP, r31 is __pa(KSP) */			;\
+	tophys  (r31,r1)					;\
+	l.sw    PT_GPR12(r31),r12					;\
+	l.mfspr r12,r0,SPR_EPCR_BASE				;\
+	l.sw    PT_PC(r31),r12					;\
+	l.mfspr r12,r0,SPR_ESR_BASE				;\
+	l.sw    PT_SR(r31),r12					;\
+	/* save r31 */						;\
+	EXCEPTION_T_LOAD_GPR31(r12)				;\
+	l.sw	PT_GPR31(r31),r12					;\
+	/* save r10 as was prior to exception */		;\
+	EXCEPTION_T_LOAD_GPR10(r12)				;\
+	l.sw	PT_GPR10(r31),r12					;\
+	/* save PT_SP as was prior to exception */			;\
+	EXCEPTION_T_LOAD_SP(r12)				;\
+	l.sw	PT_SP(r31),r12					;\
+	l.sw    PT_GPR13(r31),r13					;\
+	/* --> */						;\
+	/* save exception r4, set r4 = EA */			;\
+	l.sw	PT_GPR4(r31),r4					;\
+	l.mfspr r4,r0,SPR_EEAR_BASE				;\
+	/* r12 == 1 if we come from syscall */			;\
+	CLEAR_GPR(r12)						;\
+	/* ----- play a MMU trick ----- */			;\
+	l.ori	r31,r0,(EXCEPTION_SR)				;\
+	l.mtspr	r0,r31,SPR_ESR_BASE				;\
+	/* r31:	EA address of handler */			;\
+	LOAD_SYMBOL_2_GPR(r31,handler)				;\
+	l.mtspr r0,r31,SPR_EPCR_BASE				;\
+	l.rfe
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+    .org 0x100
+	/* Jump to .init code at _start which lives in the .head section
+	 * and will be discarded after boot.
+	 */
+	LOAD_SYMBOL_2_GPR(r4, _start)
+	tophys	(r3,r4)			/* MMU disabled */
+	l.jr	r3
+	 l.nop
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+    .org 0x200
+_dispatch_bus_fault:
+	EXCEPTION_HANDLE(_bus_fault_handler)
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+    .org 0x300
+_dispatch_do_dpage_fault:
+//      totaly disable timer interrupt
+// 	l.mtspr	r0,r0,SPR_TTMR
+//	DEBUG_TLB_PROBE(0x300)
+//	EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
+	EXCEPTION_HANDLE(_data_page_fault_handler)
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+    .org 0x400
+_dispatch_do_ipage_fault:
+//      totaly disable timer interrupt
+//	l.mtspr	r0,r0,SPR_TTMR
+//	DEBUG_TLB_PROBE(0x400)
+//	EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
+	EXCEPTION_HANDLE(_insn_page_fault_handler)
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+    .org 0x500
+	EXCEPTION_HANDLE(_timer_handler)
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+    .org 0x600
+	EXCEPTION_HANDLE(_alignment_handler)
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+    .org 0x700
+	EXCEPTION_HANDLE(_illegal_instruction_handler)
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+    .org 0x800
+	EXCEPTION_HANDLE(_external_irq_handler)
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+    .org 0x900
+	l.j	boot_dtlb_miss_handler
+	l.nop
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+    .org 0xa00
+	l.j	boot_itlb_miss_handler
+	l.nop
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+    .org 0xb00
+	UNHANDLED_EXCEPTION(_vector_0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+    .org 0xc00
+	EXCEPTION_HANDLE(_sys_call_handler)
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+    .org 0xd00
+	UNHANDLED_EXCEPTION(_vector_0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+    .org 0xe00
+//	UNHANDLED_EXCEPTION(_vector_0xe00)
+	EXCEPTION_HANDLE(_trap_handler)
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+    .org 0xf00
+	UNHANDLED_EXCEPTION(_vector_0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+    .org 0x1000
+	UNHANDLED_EXCEPTION(_vector_0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+    .org 0x1100
+	UNHANDLED_EXCEPTION(_vector_0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+    .org 0x1200
+	UNHANDLED_EXCEPTION(_vector_0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+    .org 0x1300
+	UNHANDLED_EXCEPTION(_vector_0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+    .org 0x1400
+	UNHANDLED_EXCEPTION(_vector_0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+    .org 0x1500
+	UNHANDLED_EXCEPTION(_vector_0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+    .org 0x1600
+	UNHANDLED_EXCEPTION(_vector_0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+    .org 0x1700
+	UNHANDLED_EXCEPTION(_vector_0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+    .org 0x1800
+	UNHANDLED_EXCEPTION(_vector_0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+    .org 0x1900
+	UNHANDLED_EXCEPTION(_vector_0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+    .org 0x1a00
+	UNHANDLED_EXCEPTION(_vector_0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+    .org 0x1b00
+	UNHANDLED_EXCEPTION(_vector_0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+    .org 0x1c00
+	UNHANDLED_EXCEPTION(_vector_0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+    .org 0x1d00
+	UNHANDLED_EXCEPTION(_vector_0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+    .org 0x1e00
+	UNHANDLED_EXCEPTION(_vector_0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+    .org 0x1f00
+	UNHANDLED_EXCEPTION(_vector_0x1f00)
+
+    .org 0x2000
+/* ===================================================[ kernel start ]=== */
+
+/*    .text*/
+
+/* This early stuff belongs in HEAD, but some of the functions below definitely
+ * don't... */
+
+	__HEAD
+	.global _start
+_start:
+	/*
+	 * ensure a deterministic start
+	 */
+
+	l.ori	r3,r0,0x1
+	l.mtspr	r0,r3,SPR_SR
+
+	CLEAR_GPR(r1)
+	CLEAR_GPR(r2)
+	CLEAR_GPR(r3)
+	CLEAR_GPR(r4)
+	CLEAR_GPR(r5)
+	CLEAR_GPR(r6)
+	CLEAR_GPR(r7)
+	CLEAR_GPR(r8)
+	CLEAR_GPR(r9)
+	CLEAR_GPR(r10)
+	CLEAR_GPR(r11)
+	CLEAR_GPR(r12)
+	CLEAR_GPR(r13)
+	CLEAR_GPR(r14)
+	CLEAR_GPR(r15)
+	CLEAR_GPR(r16)
+	CLEAR_GPR(r17)
+	CLEAR_GPR(r18)
+	CLEAR_GPR(r19)
+	CLEAR_GPR(r20)
+	CLEAR_GPR(r21)
+	CLEAR_GPR(r22)
+	CLEAR_GPR(r23)
+	CLEAR_GPR(r24)
+	CLEAR_GPR(r25)
+	CLEAR_GPR(r26)
+	CLEAR_GPR(r27)
+	CLEAR_GPR(r28)
+	CLEAR_GPR(r29)
+	CLEAR_GPR(r30)
+	CLEAR_GPR(r31)
+
+	/*
+	 * set up initial ksp and current
+	 */
+	LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000)	// setup kernel stack
+	LOAD_SYMBOL_2_GPR(r10,init_thread_union)	// setup current
+	tophys	(r31,r10)
+	l.sw	TI_KSP(r31), r1
+
+	l.ori	r4,r0,0x0
+
+
+	/*
+	 * .data contains initialized data,
+	 * .bss contains uninitialized data - clear it up
+	 */
+clear_bss:
+	LOAD_SYMBOL_2_GPR(r24, __bss_start)
+	LOAD_SYMBOL_2_GPR(r26, _end)
+	tophys(r28,r24)
+	tophys(r30,r26)
+	CLEAR_GPR(r24)
+	CLEAR_GPR(r26)
+1:
+	l.sw    (0)(r28),r0
+	l.sfltu r28,r30
+	l.bf    1b
+	l.addi  r28,r28,4
+
+enable_ic:
+	l.jal	_ic_enable
+	 l.nop
+
+enable_dc:
+	l.jal	_dc_enable
+	 l.nop
+
+flush_tlb:
+	/*
+	 *  I N V A L I D A T E   T L B   e n t r i e s
+	 */
+	LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
+	LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
+	l.addi	r7,r0,128 /* Maximum number of sets */
+1:
+	l.mtspr	r5,r0,0x0
+	l.mtspr	r6,r0,0x0
+
+	l.addi	r5,r5,1
+	l.addi	r6,r6,1
+	l.sfeq	r7,r0
+	l.bnf	1b
+	 l.addi	r7,r7,-1
+
+
+/* The MMU needs to be enabled before or32_early_setup is called */
+
+enable_mmu:
+	/*
+	 * enable dmmu & immu
+	 * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
+	 */
+	l.mfspr	r30,r0,SPR_SR
+	l.movhi	r28,hi(SPR_SR_DME | SPR_SR_IME)
+	l.ori	r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
+	l.or	r30,r30,r28
+	l.mtspr	r0,r30,SPR_SR
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+
+	// reset the simulation counters
+	l.nop 5
+
+	LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
+	l.jalr r24
+	 l.nop
+
+clear_regs:
+	/*
+	 * clear all GPRS to increase determinism
+	 */
+	CLEAR_GPR(r2)
+	CLEAR_GPR(r3)
+	CLEAR_GPR(r4)
+	CLEAR_GPR(r5)
+	CLEAR_GPR(r6)
+	CLEAR_GPR(r7)
+	CLEAR_GPR(r8)
+	CLEAR_GPR(r9)
+	CLEAR_GPR(r11)
+	CLEAR_GPR(r12)
+	CLEAR_GPR(r13)
+	CLEAR_GPR(r14)
+	CLEAR_GPR(r15)
+	CLEAR_GPR(r16)
+	CLEAR_GPR(r17)
+	CLEAR_GPR(r18)
+	CLEAR_GPR(r19)
+	CLEAR_GPR(r20)
+	CLEAR_GPR(r21)
+	CLEAR_GPR(r22)
+	CLEAR_GPR(r23)
+	CLEAR_GPR(r24)
+	CLEAR_GPR(r25)
+	CLEAR_GPR(r26)
+	CLEAR_GPR(r27)
+	CLEAR_GPR(r28)
+	CLEAR_GPR(r29)
+	CLEAR_GPR(r30)
+	CLEAR_GPR(r31)
+
+jump_start_kernel:
+	/*
+	 * jump to kernel entry (start_kernel)
+	 */
+	LOAD_SYMBOL_2_GPR(r30, start_kernel)
+	l.jr    r30
+	 l.nop
+
+/* ========================================[ cache ]=== */
+
+	/* aligment here so we don't change memory offsets with
+	 * memory controler defined
+	 */
+	.align 0x2000
+
+_ic_enable:
+	/* Check if IC present and skip enabling otherwise */
+	l.mfspr r24,r0,SPR_UPR
+	l.andi  r26,r24,SPR_UPR_ICP
+	l.sfeq  r26,r0
+	l.bf	9f
+	l.nop
+
+	/* Disable IC */
+	l.mfspr r6,r0,SPR_SR
+	l.addi  r5,r0,-1
+	l.xori  r5,r5,SPR_SR_ICE
+	l.and   r5,r6,r5
+	l.mtspr r0,r5,SPR_SR
+
+	/* Establish cache block size
+	   If BS=0, 16;
+	   If BS=1, 32;
+	   r14 contain block size
+	*/
+	l.mfspr r24,r0,SPR_ICCFGR
+	l.andi	r26,r24,SPR_ICCFGR_CBS
+	l.srli	r28,r26,7
+	l.ori	r30,r0,16
+	l.sll	r14,r30,r28
+
+	/* Establish number of cache sets
+	   r16 contains number of cache sets
+	   r28 contains log(# of cache sets)
+	*/
+	l.andi  r26,r24,SPR_ICCFGR_NCS
+	l.srli 	r28,r26,3
+	l.ori   r30,r0,1
+	l.sll   r16,r30,r28
+
+	/* Invalidate IC */
+	l.addi  r6,r0,0
+	l.sll   r5,r14,r28
+//        l.mul   r5,r14,r16
+//	l.trap  1
+//	l.addi  r5,r0,IC_SIZE
+1:
+	l.mtspr r0,r6,SPR_ICBIR
+	l.sfne  r6,r5
+	l.bf    1b
+	l.add   r6,r6,r14
+ //       l.addi   r6,r6,IC_LINE
+
+	/* Enable IC */
+	l.mfspr r6,r0,SPR_SR
+	l.ori   r6,r6,SPR_SR_ICE
+	l.mtspr r0,r6,SPR_SR
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+	l.nop
+9:
+	l.jr    r9
+	l.nop
+
+_dc_enable:
+	/* Check if DC present and skip enabling otherwise */
+	l.mfspr r24,r0,SPR_UPR
+	l.andi  r26,r24,SPR_UPR_DCP
+	l.sfeq  r26,r0
+	l.bf	9f
+	l.nop
+
+	/* Disable DC */
+	l.mfspr r6,r0,SPR_SR
+	l.addi  r5,r0,-1
+	l.xori  r5,r5,SPR_SR_DCE
+	l.and   r5,r6,r5
+	l.mtspr r0,r5,SPR_SR
+
+	/* Establish cache block size
+	   If BS=0, 16;
+	   If BS=1, 32;
+	   r14 contain block size
+	*/
+	l.mfspr r24,r0,SPR_DCCFGR
+	l.andi	r26,r24,SPR_DCCFGR_CBS
+	l.srli	r28,r26,7
+	l.ori	r30,r0,16
+	l.sll	r14,r30,r28
+
+	/* Establish number of cache sets
+	   r16 contains number of cache sets
+	   r28 contains log(# of cache sets)
+	*/
+	l.andi  r26,r24,SPR_DCCFGR_NCS
+	l.srli 	r28,r26,3
+	l.ori   r30,r0,1
+	l.sll   r16,r30,r28
+
+	/* Invalidate DC */
+	l.addi  r6,r0,0
+	l.sll   r5,r14,r28
+1:
+	l.mtspr r0,r6,SPR_DCBIR
+	l.sfne  r6,r5
+	l.bf    1b
+	l.add   r6,r6,r14
+
+	/* Enable DC */
+	l.mfspr r6,r0,SPR_SR
+	l.ori   r6,r6,SPR_SR_DCE
+	l.mtspr r0,r6,SPR_SR
+9:
+	l.jr    r9
+	l.nop
+
+/* ===============================================[ page table masks ]=== */
+
+/* bit 4 is used in hardware as write back cache bit. we never use this bit
+ * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
+ * writing into hardware pte's
+ */
+
+#define DTLB_UP_CONVERT_MASK  0x3fa
+#define ITLB_UP_CONVERT_MASK  0x3a
+
+/* for SMP we'd have (this is a bit subtle, CC must be always set
+ * for SMP, but since we have _PAGE_PRESENT bit always defined
+ * we can just modify the mask)
+ */
+#define DTLB_SMP_CONVERT_MASK  0x3fb
+#define ITLB_SMP_CONVERT_MASK  0x3b
+
+/* ---[ boot dtlb miss handler ]----------------------------------------- */
+
+boot_dtlb_miss_handler:
+
+/* mask for DTLB_MR register: - (0) sets V (valid) bit,
+ *                            - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_MR_MASK 0xfffff001
+
+/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
+ *			      - (4) sets A (access) bit,
+ *                            - (5) sets D (dirty) bit,
+ *                            - (8) sets SRE (superuser read) bit
+ *                            - (9) sets SWE (superuser write) bit
+ *                            - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_TR_MASK 0xfffff332
+
+/* These are for masking out the VPN/PPN value from the MR/TR registers...
+ * it's not the same as the PFN */
+#define VPN_MASK 0xfffff000
+#define PPN_MASK 0xfffff000
+
+
+	EXCEPTION_STORE_GPR6
+
+#if 0
+	l.mfspr r6,r0,SPR_ESR_BASE	   //
+	l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
+	l.sfeqi r6,0                       // r6 == 0x1 --> SM
+	l.bf    exit_with_no_dtranslation  //
+	l.nop
+#endif
+
+	/* this could be optimized by moving storing of
+	 * non r6 registers here, and jumping r6 restore
+	 * if not in supervisor mode
+	 */
+
+	EXCEPTION_STORE_GPR2
+	EXCEPTION_STORE_GPR3
+	EXCEPTION_STORE_GPR4
+	EXCEPTION_STORE_GPR5
+
+	l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
+
+immediate_translation:
+	CLEAR_GPR(r6)
+
+	l.srli	r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+	l.mfspr r6, r0, SPR_DMMUCFGR
+	l.andi	r6, r6, SPR_DMMUCFGR_NTS
+	l.srli	r6, r6, SPR_DMMUCFGR_NTS_OFF
+	l.ori	r5, r0, 0x1
+	l.sll	r5, r5, r6 	// r5 = number DMMU sets
+	l.addi	r6, r5, -1  	// r6 = nsets mask
+	l.and	r2, r3, r6	// r2 <- r3 % NSETS_MASK
+
+	l.or    r6,r6,r4                   // r6 <- r4
+	l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+	l.movhi r5,hi(DTLB_MR_MASK)        // r5 <- ffff:0000.x000
+	l.ori   r5,r5,lo(DTLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
+	l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have DTLBMR entry
+	l.mtspr r2,r5,SPR_DTLBMR_BASE(0)   // set DTLBMR
+
+	/* set up DTLB with no translation for EA <= 0xbfffffff */
+	LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
+	l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xbfffffff >= EA)
+	l.bf     1f                        // goto out
+	l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
+
+	tophys(r3,r4)                      // r3 <- PA
+1:
+	l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+	l.movhi r5,hi(DTLB_TR_MASK)        // r5 <- ffff:0000.x000
+	l.ori   r5,r5,lo(DTLB_TR_MASK)     // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
+	l.and   r5,r5,r3                   // r5 <- PPN :PPN .x330 - we have DTLBTR entry
+	l.mtspr r2,r5,SPR_DTLBTR_BASE(0)   // set DTLBTR
+
+	EXCEPTION_LOAD_GPR6
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR2
+
+	l.rfe                              // SR <- ESR, PC <- EPC
+
+exit_with_no_dtranslation:
+	/* EA out of memory or not in supervisor mode */
+	EXCEPTION_LOAD_GPR6
+	EXCEPTION_LOAD_GPR4
+	l.j	_dispatch_bus_fault
+
+/* ---[ boot itlb miss handler ]----------------------------------------- */
+
+boot_itlb_miss_handler:
+
+/* mask for ITLB_MR register: - sets V (valid) bit,
+ *                            - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_MR_MASK 0xfffff001
+
+/* mask for ITLB_TR register: - sets A (access) bit,
+ *                            - sets SXE (superuser execute) bit
+ *                            - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_TR_MASK 0xfffff050
+
+/*
+#define VPN_MASK 0xffffe000
+#define PPN_MASK 0xffffe000
+*/
+
+
+
+	EXCEPTION_STORE_GPR2
+	EXCEPTION_STORE_GPR3
+	EXCEPTION_STORE_GPR4
+	EXCEPTION_STORE_GPR5
+	EXCEPTION_STORE_GPR6
+
+#if 0
+	l.mfspr r6,r0,SPR_ESR_BASE         //
+	l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
+	l.sfeqi r6,0                       // r6 == 0x1 --> SM
+	l.bf    exit_with_no_itranslation
+	l.nop
+#endif
+
+
+	l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
+
+earlyearly:
+	CLEAR_GPR(r6)
+
+	l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+	l.mfspr r6, r0, SPR_IMMUCFGR
+	l.andi	r6, r6, SPR_IMMUCFGR_NTS
+	l.srli	r6, r6, SPR_IMMUCFGR_NTS_OFF
+	l.ori	r5, r0, 0x1
+	l.sll	r5, r5, r6 	// r5 = number IMMU sets from IMMUCFGR
+	l.addi	r6, r5, -1  	// r6 = nsets mask
+	l.and	r2, r3, r6	// r2 <- r3 % NSETS_MASK
+
+	l.or    r6,r6,r4                   // r6 <- r4
+	l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+	l.movhi r5,hi(ITLB_MR_MASK)        // r5 <- ffff:0000.x000
+	l.ori   r5,r5,lo(ITLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
+	l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have ITLBMR entry
+	l.mtspr r2,r5,SPR_ITLBMR_BASE(0)   // set ITLBMR
+
+	/*
+	 * set up ITLB with no translation for EA <= 0x0fffffff
+	 *
+	 * we need this for head.S mapping (EA = PA). if we move all functions
+	 * which run with mmu enabled into entry.S, we might be able to eliminate this.
+	 *
+	 */
+	LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
+	l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xb0ffffff >= EA)
+	l.bf     1f                        // goto out
+	l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
+
+	tophys(r3,r4)                      // r3 <- PA
+1:
+	l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+	l.movhi r5,hi(ITLB_TR_MASK)        // r5 <- ffff:0000.x000
+	l.ori   r5,r5,lo(ITLB_TR_MASK)     // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
+	l.and   r5,r5,r3                   // r5 <- PPN :PPN .x050 - we have ITLBTR entry
+	l.mtspr r2,r5,SPR_ITLBTR_BASE(0)   // set ITLBTR
+
+	EXCEPTION_LOAD_GPR6
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR2
+
+	l.rfe                              // SR <- ESR, PC <- EPC
+
+exit_with_no_itranslation:
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR6
+	l.j    _dispatch_bus_fault
+	l.nop
+
+/* ====================================================================== */
+/*
+ * Stuff below here shouldn't go into .head section... maybe this stuff
+ * can be moved to entry.S ???
+ */
+
+/* ==============================================[ DTLB miss handler ]=== */
+
+/*
+ * Comments:
+ *   Exception handlers are entered with MMU off so the following handler
+ *   needs to use physical addressing
+ *
+ */
+
+	.text
+ENTRY(dtlb_miss_handler)
+	EXCEPTION_STORE_GPR2
+	EXCEPTION_STORE_GPR3
+	EXCEPTION_STORE_GPR4
+	EXCEPTION_STORE_GPR5
+	EXCEPTION_STORE_GPR6
+	/*
+	 * get EA of the miss
+	 */
+	l.mfspr	r2,r0,SPR_EEAR_BASE
+	/*
+	 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+	 */
+	GET_CURRENT_PGD(r3,r5)		// r3 is current_pgd, r5 is temp
+	l.srli	r4,r2,0x18		// >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+	l.slli	r4,r4,0x2		// to get address << 2
+	l.add	r5,r4,r3		// r4 is pgd_index(daddr)
+	/*
+	 * if (pmd_none(*pmd))
+	 *   goto pmd_none:
+	 */
+	tophys	(r4,r5)
+	l.lwz	r3,0x0(r4)		// get *pmd value
+	l.sfne	r3,r0
+	l.bnf	d_pmd_none
+	 l.andi	r3,r3,~PAGE_MASK //0x1fff		// ~PAGE_MASK
+	/*
+	 * if (pmd_bad(*pmd))
+	 *   pmd_clear(pmd)
+	 *   goto pmd_bad:
+	 */
+//	l.sfeq	r3,r0			// check *pmd value
+//	l.bf	d_pmd_good
+	l.addi	r3,r0,0xffffe000	// PAGE_MASK
+//	l.j	d_pmd_bad
+//	l.sw	0x0(r4),r0		// clear pmd
+d_pmd_good:
+	/*
+	 * pte = *pte_offset(pmd, daddr);
+	 */
+	l.lwz	r4,0x0(r4)		// get **pmd value
+	l.and	r4,r4,r3		// & PAGE_MASK
+	l.srli	r5,r2,0xd		// >> PAGE_SHIFT, r2 == EEAR
+	l.andi	r3,r5,0x7ff		// (1UL << PAGE_SHIFT - 2) - 1
+	l.slli	r3,r3,0x2		// to get address << 2
+	l.add	r3,r3,r4
+	l.lwz	r2,0x0(r3)		// this is pte at last
+	/*
+	 * if (!pte_present(pte))
+	 */
+	l.andi	r4,r2,0x1
+	l.sfne	r4,r0			// is pte present
+	l.bnf	d_pte_not_present
+	l.addi	r3,r0,0xffffe3fa	// PAGE_MASK | DTLB_UP_CONVERT_MASK
+	/*
+	 * fill DTLB TR register
+	 */
+	l.and	r4,r2,r3		// apply the mask
+	// Determine number of DMMU sets
+	l.mfspr r6, r0, SPR_DMMUCFGR
+	l.andi	r6, r6, SPR_DMMUCFGR_NTS
+	l.srli	r6, r6, SPR_DMMUCFGR_NTS_OFF
+	l.ori	r3, r0, 0x1
+	l.sll	r3, r3, r6 	// r3 = number DMMU sets DMMUCFGR
+	l.addi	r6, r3, -1  	// r6 = nsets mask
+	l.and	r5, r5, r6	// calc offset:	 & (NUM_TLB_ENTRIES-1)
+	                                                   //NUM_TLB_ENTRIES
+	l.mtspr	r5,r4,SPR_DTLBTR_BASE(0)
+	/*
+	 * fill DTLB MR register
+	 */
+	l.mfspr	r2,r0,SPR_EEAR_BASE
+	l.addi	r3,r0,0xffffe000	// PAGE_MASK
+	l.and	r4,r2,r3		// apply PAGE_MASK to EA (__PHX__ do we really need this?)
+	l.ori	r4,r4,0x1		// set hardware valid bit: DTBL_MR entry
+	l.mtspr	r5,r4,SPR_DTLBMR_BASE(0)
+
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.rfe
+d_pmd_bad:
+	l.nop	1
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.rfe
+d_pmd_none:
+d_pte_not_present:
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.j	_dispatch_do_dpage_fault
+	l.nop
+
+/* ==============================================[ ITLB miss handler ]=== */
+ENTRY(itlb_miss_handler)
+	EXCEPTION_STORE_GPR2
+	EXCEPTION_STORE_GPR3
+	EXCEPTION_STORE_GPR4
+	EXCEPTION_STORE_GPR5
+	EXCEPTION_STORE_GPR6
+	/*
+	 * get EA of the miss
+	 */
+	l.mfspr	r2,r0,SPR_EEAR_BASE
+
+	/*
+	 * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+	 *
+	 */
+	GET_CURRENT_PGD(r3,r5)		// r3 is current_pgd, r5 is temp
+	l.srli	r4,r2,0x18		// >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+	l.slli	r4,r4,0x2		// to get address << 2
+	l.add	r5,r4,r3		// r4 is pgd_index(daddr)
+	/*
+	 * if (pmd_none(*pmd))
+	 *   goto pmd_none:
+	 */
+	tophys	(r4,r5)
+	l.lwz	r3,0x0(r4)		// get *pmd value
+	l.sfne	r3,r0
+	l.bnf	i_pmd_none
+	l.andi	r3,r3,0x1fff		// ~PAGE_MASK
+	/*
+	 * if (pmd_bad(*pmd))
+	 *   pmd_clear(pmd)
+	 *   goto pmd_bad:
+	 */
+
+//	l.sfeq	r3,r0			// check *pmd value
+//	l.bf	i_pmd_good
+	l.addi	r3,r0,0xffffe000	// PAGE_MASK
+//	l.j	i_pmd_bad
+//	l.sw	0x0(r4),r0		// clear pmd
+
+i_pmd_good:
+	/*
+	 * pte = *pte_offset(pmd, iaddr);
+	 *
+	 */
+	l.lwz	r4,0x0(r4)		// get **pmd value
+	l.and	r4,r4,r3		// & PAGE_MASK
+	l.srli	r5,r2,0xd		// >> PAGE_SHIFT, r2 == EEAR
+	l.andi	r3,r5,0x7ff		// (1UL << PAGE_SHIFT - 2) - 1
+	l.slli	r3,r3,0x2		// to get address << 2
+	l.add	r3,r3,r4
+	l.lwz	r2,0x0(r3)		// this is pte at last
+	/*
+	 * if (!pte_present(pte))
+	 *
+	 */
+	l.andi	r4,r2,0x1
+	l.sfne	r4,r0			// is pte present
+	l.bnf	i_pte_not_present
+	l.addi	r3,r0,0xffffe03a	// PAGE_MASK | ITLB_UP_CONVERT_MASK
+	/*
+	 * fill ITLB TR register
+	 */
+	l.and	r4,r2,r3		// apply the mask
+	l.andi	r3,r2,0x7c0		// _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE |  _PAGE_URE | _PAGE_UWE
+//	l.andi	r3,r2,0x400		// _PAGE_EXEC
+	l.sfeq	r3,r0
+	l.bf	itlb_tr_fill //_workaround
+	// Determine number of IMMU sets
+	l.mfspr r6, r0, SPR_IMMUCFGR
+	l.andi	r6, r6, SPR_IMMUCFGR_NTS
+	l.srli	r6, r6, SPR_IMMUCFGR_NTS_OFF
+	l.ori	r3, r0, 0x1
+	l.sll	r3, r3, r6 	// r3 = number IMMU sets IMMUCFGR
+	l.addi	r6, r3, -1  	// r6 = nsets mask
+	l.and	r5, r5, r6	// calc offset:	 & (NUM_TLB_ENTRIES-1)
+
+/*
+ * __PHX__ :: fixme
+ * we should not just blindly set executable flags,
+ * but it does help with ping. the clean way would be to find out
+ * (and fix it) why stack doesn't have execution permissions
+ */
+
+itlb_tr_fill_workaround:
+	l.ori	r4,r4,0xc0		// | (SPR_ITLBTR_UXE | ITLBTR_SXE)
+itlb_tr_fill:
+	l.mtspr	r5,r4,SPR_ITLBTR_BASE(0)
+	/*
+	 * fill DTLB MR register
+	 */
+	l.mfspr	r2,r0,SPR_EEAR_BASE
+	l.addi	r3,r0,0xffffe000	// PAGE_MASK
+	l.and	r4,r2,r3		// apply PAGE_MASK to EA (__PHX__ do we really need this?)
+	l.ori	r4,r4,0x1		// set hardware valid bit: DTBL_MR entry
+	l.mtspr	r5,r4,SPR_ITLBMR_BASE(0)
+
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.rfe
+
+i_pmd_bad:
+	l.nop	1
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.rfe
+i_pmd_none:
+i_pte_not_present:
+	EXCEPTION_LOAD_GPR2
+	EXCEPTION_LOAD_GPR3
+	EXCEPTION_LOAD_GPR4
+	EXCEPTION_LOAD_GPR5
+	EXCEPTION_LOAD_GPR6
+	l.j	_dispatch_do_ipage_fault
+	l.nop
+
+/* ==============================================[ boot tlb handlers ]=== */
+
+
+/* =================================================[ debugging aids ]=== */
+
+	.align 64
+_immu_trampoline:
+	.space 64
+_immu_trampoline_top:
+
+#define TRAMP_SLOT_0		(0x0)
+#define TRAMP_SLOT_1		(0x4)
+#define TRAMP_SLOT_2		(0x8)
+#define TRAMP_SLOT_3		(0xc)
+#define TRAMP_SLOT_4		(0x10)
+#define TRAMP_SLOT_5		(0x14)
+#define TRAMP_FRAME_SIZE	(0x18)
+
+ENTRY(_immu_trampoline_workaround)
+	// r2 EEA
+	// r6 is physical EEA
+	tophys(r6,r2)
+
+	LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+	tophys	(r3,r5)			// r3 is trampoline (physical)
+
+	LOAD_SYMBOL_2_GPR(r4,0x15000000)
+	l.sw	TRAMP_SLOT_0(r3),r4
+	l.sw	TRAMP_SLOT_1(r3),r4
+	l.sw	TRAMP_SLOT_4(r3),r4
+	l.sw	TRAMP_SLOT_5(r3),r4
+
+					// EPC = EEA - 0x4
+	l.lwz	r4,0x0(r6)		// load op @ EEA + 0x0 (fc address)
+	l.sw	TRAMP_SLOT_3(r3),r4	// store it to _immu_trampoline_data
+	l.lwz	r4,-0x4(r6)		// load op @ EEA - 0x4 (f8 address)
+	l.sw	TRAMP_SLOT_2(r3),r4	// store it to _immu_trampoline_data
+
+	l.srli  r5,r4,26                // check opcode for write access
+	l.sfeqi r5,0                    // l.j
+	l.bf    0f
+	l.sfeqi r5,0x11                 // l.jr
+	l.bf    1f
+	l.sfeqi r5,1                    // l.jal
+	l.bf    2f
+	l.sfeqi r5,0x12                 // l.jalr
+	l.bf    3f
+	l.sfeqi r5,3                    // l.bnf
+	l.bf    4f
+	l.sfeqi r5,4                    // l.bf
+	l.bf    5f
+99:
+	l.nop
+	l.j	99b			// should never happen
+	l.nop	1
+
+	// r2 is EEA
+	// r3 is trampoline address (physical)
+	// r4 is instruction
+	// r6 is physical(EEA)
+	//
+	// r5
+
+2:	// l.jal
+
+	/* 19 20 aa aa	l.movhi r9,0xaaaa
+	 * a9 29 bb bb  l.ori	r9,0xbbbb
+	 *
+	 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+	 */
+
+	l.addi	r6,r2,0x4		// this is 0xaaaabbbb
+
+					// l.movhi r9,0xaaaa
+	l.ori	r5,r0,0x1920		// 0x1920 == l.movhi r9
+	l.sh	(TRAMP_SLOT_0+0x0)(r3),r5
+	l.srli	r5,r6,16
+	l.sh	(TRAMP_SLOT_0+0x2)(r3),r5
+
+					// l.ori   r9,0xbbbb
+	l.ori	r5,r0,0xa929		// 0xa929 == l.ori r9
+	l.sh	(TRAMP_SLOT_1+0x0)(r3),r5
+	l.andi	r5,r6,0xffff
+	l.sh	(TRAMP_SLOT_1+0x2)(r3),r5
+
+	/* falthrough, need to set up new jump offset */
+
+
+0:	// l.j
+	l.slli	r6,r4,6			// original offset shifted left 6 - 2
+//	l.srli	r6,r6,6			// original offset shifted right 2
+
+	l.slli	r4,r2,4			// old jump position: EEA shifted left 4
+//	l.srli	r4,r4,6			// old jump position: shifted right 2
+
+	l.addi	r5,r3,0xc		// new jump position (physical)
+	l.slli	r5,r5,4			// new jump position: shifted left 4
+
+	// calculate new jump offset
+	// new_off = old_off + (old_jump - new_jump)
+
+	l.sub	r5,r4,r5		// old_jump - new_jump
+	l.add	r5,r6,r5		// orig_off + (old_jump - new_jump)
+	l.srli	r5,r5,6			// new offset shifted right 2
+
+	// r5 is new jump offset
+					// l.j has opcode 0x0...
+	l.sw	TRAMP_SLOT_2(r3),r5	// write it back
+
+	l.j	trampoline_out
+	l.nop
+
+/* ----------------------------- */
+
+3:	// l.jalr
+
+	/* 19 20 aa aa	l.movhi r9,0xaaaa
+	 * a9 29 bb bb  l.ori	r9,0xbbbb
+	 *
+	 * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+	 */
+
+	l.addi	r6,r2,0x4		// this is 0xaaaabbbb
+
+					// l.movhi r9,0xaaaa
+	l.ori	r5,r0,0x1920		// 0x1920 == l.movhi r9
+	l.sh	(TRAMP_SLOT_0+0x0)(r3),r5
+	l.srli	r5,r6,16
+	l.sh	(TRAMP_SLOT_0+0x2)(r3),r5
+
+					// l.ori   r9,0xbbbb
+	l.ori	r5,r0,0xa929		// 0xa929 == l.ori r9
+	l.sh	(TRAMP_SLOT_1+0x0)(r3),r5
+	l.andi	r5,r6,0xffff
+	l.sh	(TRAMP_SLOT_1+0x2)(r3),r5
+
+	l.lhz	r5,(TRAMP_SLOT_2+0x0)(r3)	// load hi part of jump instruction
+	l.andi	r5,r5,0x3ff		// clear out opcode part
+	l.ori	r5,r5,0x4400		// opcode changed from l.jalr -> l.jr
+	l.sh	(TRAMP_SLOT_2+0x0)(r3),r5 // write it back
+
+	/* falthrough */
+
+1:	// l.jr
+	l.j	trampoline_out
+	l.nop
+
+/* ----------------------------- */
+
+4:	// l.bnf
+5:	// l.bf
+	l.slli	r6,r4,6			// original offset shifted left 6 - 2
+//	l.srli	r6,r6,6			// original offset shifted right 2
+
+	l.slli	r4,r2,4			// old jump position: EEA shifted left 4
+//	l.srli	r4,r4,6			// old jump position: shifted right 2
+
+	l.addi	r5,r3,0xc		// new jump position (physical)
+	l.slli	r5,r5,4			// new jump position: shifted left 4
+
+	// calculate new jump offset
+	// new_off = old_off + (old_jump - new_jump)
+
+	l.add	r6,r6,r4		// (orig_off + old_jump)
+	l.sub	r6,r6,r5		// (orig_off + old_jump) - new_jump
+	l.srli	r6,r6,6			// new offset shifted right 2
+
+	// r6 is new jump offset
+	l.lwz	r4,(TRAMP_SLOT_2+0x0)(r3)	// load jump instruction
+	l.srli	r4,r4,16
+	l.andi	r4,r4,0xfc00		// get opcode part
+	l.slli	r4,r4,16
+	l.or	r6,r4,r6		// l.b(n)f new offset
+	l.sw	TRAMP_SLOT_2(r3),r6	// write it back
+
+	/* we need to add l.j to EEA + 0x8 */
+	tophys	(r4,r2)			// may not be needed (due to shifts down_
+	l.addi	r4,r4,(0x8 - 0x8)	// jump target = r2 + 0x8 (compensate for 0x8)
+					// jump position = r5 + 0x8 (0x8 compensated)
+	l.sub	r4,r4,r5		// jump offset = target - new_position + 0x8
+
+	l.slli	r4,r4,4			// the amount of info in imediate of jump
+	l.srli	r4,r4,6			// jump instruction with offset
+	l.sw	TRAMP_SLOT_4(r3),r4	// write it to 4th slot
+
+	/* fallthrough */
+
+trampoline_out:
+	// set up new EPC to point to our trampoline code
+	LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+	l.mtspr	r0,r5,SPR_EPCR_BASE
+
+	// immu_trampoline is (4x) CACHE_LINE aligned
+	// and only 6 instructions long,
+	// so we need to invalidate only 2 lines
+
+	/* Establish cache block size
+	   If BS=0, 16;
+	   If BS=1, 32;
+	   r14 contain block size
+	*/
+	l.mfspr r21,r0,SPR_ICCFGR
+	l.andi	r21,r21,SPR_ICCFGR_CBS
+	l.srli	r21,r21,7
+	l.ori	r23,r0,16
+	l.sll	r14,r23,r21
+
+	l.mtspr	r0,r5,SPR_ICBIR
+	l.add	r5,r5,r14
+	l.mtspr	r0,r5,SPR_ICBIR
+
+	l.jr	r9
+	l.nop
+
+
+/*
+ * DSCR: prints a string referenced by r3.
+ *
+ * PRMS: r3     	- address of the first character of null
+ *			terminated string to be printed
+ *
+ * PREQ: UART at UART_BASE_ADD has to be initialized
+ *
+ * POST: caller should be aware that r3, r9 are changed
+ */
+ENTRY(_emergency_print)
+	EMERGENCY_PRINT_STORE_GPR4
+	EMERGENCY_PRINT_STORE_GPR5
+	EMERGENCY_PRINT_STORE_GPR6
+	EMERGENCY_PRINT_STORE_GPR7
+2:
+	l.lbz	r7,0(r3)
+	l.sfeq	r7,r0
+	l.bf	9f
+	l.nop
+
+// putc:
+	l.movhi r4,hi(UART_BASE_ADD)
+
+	l.addi  r6,r0,0x20
+1:      l.lbz   r5,5(r4)
+	l.andi  r5,r5,0x20
+	l.sfeq  r5,r6
+	l.bnf   1b
+	l.nop
+
+	l.sb    0(r4),r7
+
+	l.addi  r6,r0,0x60
+1:      l.lbz   r5,5(r4)
+	l.andi  r5,r5,0x60
+	l.sfeq  r5,r6
+	l.bnf   1b
+	l.nop
+
+	/* next character */
+	l.j	2b
+	l.addi	r3,r3,0x1
+
+9:
+	EMERGENCY_PRINT_LOAD_GPR7
+	EMERGENCY_PRINT_LOAD_GPR6
+	EMERGENCY_PRINT_LOAD_GPR5
+	EMERGENCY_PRINT_LOAD_GPR4
+	l.jr	r9
+	l.nop
+
+ENTRY(_emergency_print_nr)
+	EMERGENCY_PRINT_STORE_GPR4
+	EMERGENCY_PRINT_STORE_GPR5
+	EMERGENCY_PRINT_STORE_GPR6
+	EMERGENCY_PRINT_STORE_GPR7
+	EMERGENCY_PRINT_STORE_GPR8
+
+	l.addi	r8,r0,32		// shift register
+
+1:	/* remove leading zeros */
+	l.addi	r8,r8,-0x4
+	l.srl	r7,r3,r8
+	l.andi	r7,r7,0xf
+
+	/* don't skip the last zero if number == 0x0 */
+	l.sfeqi	r8,0x4
+	l.bf	2f
+	l.nop
+
+	l.sfeq	r7,r0
+	l.bf	1b
+	l.nop
+
+2:
+	l.srl	r7,r3,r8
+
+	l.andi	r7,r7,0xf
+	l.sflts	r8,r0
+	l.bf	9f
+
+	l.sfgtui r7,0x9
+	l.bnf	8f
+	l.nop
+	l.addi	r7,r7,0x27
+
+8:
+	l.addi	r7,r7,0x30
+// putc:
+	l.movhi r4,hi(UART_BASE_ADD)
+
+	l.addi  r6,r0,0x20
+1:      l.lbz   r5,5(r4)
+	l.andi  r5,r5,0x20
+	l.sfeq  r5,r6
+	l.bnf   1b
+	l.nop
+
+	l.sb    0(r4),r7
+
+	l.addi  r6,r0,0x60
+1:      l.lbz   r5,5(r4)
+	l.andi  r5,r5,0x60
+	l.sfeq  r5,r6
+	l.bnf   1b
+	l.nop
+
+	/* next character */
+	l.j	2b
+	l.addi	r8,r8,-0x4
+
+9:
+	EMERGENCY_PRINT_LOAD_GPR8
+	EMERGENCY_PRINT_LOAD_GPR7
+	EMERGENCY_PRINT_LOAD_GPR6
+	EMERGENCY_PRINT_LOAD_GPR5
+	EMERGENCY_PRINT_LOAD_GPR4
+	l.jr	r9
+	l.nop
+
+
+/*
+ * This should be used for debugging only.
+ * It messes up the Linux early serial output
+ * somehow, so use it sparingly and essentially
+ * only if you need to debug something that goes wrong
+ * before Linux gets the early serial going.
+ *
+ * Furthermore, you'll have to make sure you set the
+ * UART_DEVISOR correctly according to the system
+ * clock rate.
+ *
+ *
+ */
+
+
+
+#define SYS_CLK            20000000
+//#define SYS_CLK            1843200
+#define OR32_CONSOLE_BAUD  115200
+#define UART_DIVISOR       SYS_CLK/(16*OR32_CONSOLE_BAUD)
+
+ENTRY(_early_uart_init)
+	l.movhi	r3,hi(UART_BASE_ADD)
+
+	l.addi	r4,r0,0x7
+	l.sb	0x2(r3),r4
+
+	l.addi	r4,r0,0x0
+	l.sb	0x1(r3),r4
+
+	l.addi	r4,r0,0x3
+	l.sb	0x3(r3),r4
+
+	l.lbz	r5,3(r3)
+	l.ori	r4,r5,0x80
+	l.sb	0x3(r3),r4
+	l.addi	r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
+	l.sb	UART_DLM(r3),r4
+	l.addi  r4,r0,((UART_DIVISOR) & 0x000000ff)
+	l.sb	UART_DLL(r3),r4
+	l.sb	0x3(r3),r5
+
+	l.jr	r9
+	l.nop
+
+_string_copying_linux:
+	.string "\n\n\n\n\n\rCopying Linux... \0"
+
+_string_ok_booting:
+	.string "Ok, booting the kernel.\n\r\0"
+
+_string_unhandled_exception:
+	.string "\n\rRunarunaround: Unhandled exception 0x\0"
+
+_string_epc_prefix:
+	.string ": EPC=0x\0"
+
+_string_nl:
+	.string "\n\r\0"
+
+	.global	_string_esr_irq_bug
+_string_esr_irq_bug:
+	.string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
+
+
+
+/* ========================================[ page aligned structures ]=== */
+
+/*
+ * .data section should be page aligned
+ *	(look into arch/or32/kernel/vmlinux.lds)
+ */
+	.section .data,"aw"
+	.align	8192
+	.global  empty_zero_page
+empty_zero_page:
+	.space  8192
+
+	.global  swapper_pg_dir
+swapper_pg_dir:
+	.space  8192
+
+	.global	_unhandled_stack
+_unhandled_stack:
+	.space	8192
+_unhandled_stack_top:
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
new file mode 100644
index 000000000000..d5bc5f813e89
--- /dev/null
+++ b/arch/openrisc/kernel/idle.c
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * Idle daemon for or32.  Idle daemon will handle any action
+ * that needs to be taken when the system becomes idle.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+#include <asm/pgalloc.h>
+
+void (*powersave) (void) = NULL;
+
+static inline void pm_idle(void)
+{
+	barrier();
+}
+
+void cpu_idle(void)
+{
+	set_thread_flag(TIF_POLLING_NRFLAG);
+
+	/* endless idle loop with no priority at all */
+	while (1) {
+		tick_nohz_stop_sched_tick(1);
+
+		while (!need_resched()) {
+			check_pgt_cache();
+			rmb();
+
+			clear_thread_flag(TIF_POLLING_NRFLAG);
+
+			local_irq_disable();
+			/* Don't trace irqs off for idle */
+			stop_critical_timings();
+			if (!need_resched() && powersave != NULL)
+				powersave();
+			start_critical_timings();
+			local_irq_enable();
+			set_thread_flag(TIF_POLLING_NRFLAG);
+		}
+
+		tick_nohz_restart_sched_tick();
+		preempt_enable_no_resched();
+		schedule();
+		preempt_disable();
+	}
+}
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c
new file mode 100644
index 000000000000..45744a384927
--- /dev/null
+++ b/arch/openrisc/kernel/init_task.c
@@ -0,0 +1,41 @@
+/*
+ * OpenRISC init_task.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is THREAD_SIZE aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union __init_task_data = {
+	INIT_THREAD_INFO(init_task)
+};
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
new file mode 100644
index 000000000000..59b302338331
--- /dev/null
+++ b/arch/openrisc/kernel/irq.c
@@ -0,0 +1,172 @@
+/*
+ * OpenRISC irq.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/ftrace.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#include <linux/irqflags.h>
+
+/* read interrupt enabled status */
+unsigned long arch_local_save_flags(void)
+{
+	return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
+}
+EXPORT_SYMBOL(arch_local_save_flags);
+
+/* set interrupt enabled status */
+void arch_local_irq_restore(unsigned long flags)
+{
+	mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+/* OR1K PIC implementation */
+
+/* We're a couple of cycles faster than the generic implementations with
+ * these 'fast' versions.
+ */
+
+static void or1k_pic_mask(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->irq));
+}
+
+static void or1k_pic_unmask(struct irq_data *data)
+{
+	mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq));
+}
+
+static void or1k_pic_ack(struct irq_data *data)
+{
+	/* EDGE-triggered interrupts need to be ack'ed in order to clear
+	 * the latch.
+	 * LEVER-triggered interrupts do not need to be ack'ed; however,
+	 * ack'ing the interrupt has no ill-effect and is quicker than
+	 * trying to figure out what type it is...
+	 */
+
+	/* The OpenRISC 1000 spec says to write a 1 to the bit to ack the
+	 * interrupt, but the OR1200 does this backwards and requires a 0
+	 * to be written...
+	 */
+
+#ifdef CONFIG_OR1K_1200
+	/* There are two oddities with the OR1200 PIC implementation:
+	 * i)  LEVEL-triggered interrupts are latched and need to be cleared
+	 * ii) the interrupt latch is cleared by writing a 0 to the bit,
+	 *     as opposed to a 1 as mandated by the spec
+	 */
+
+	mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+	WARN(1, "Interrupt handling possibily broken\n");
+	mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static void or1k_pic_mask_ack(struct irq_data *data)
+{
+	/* Comments for pic_ack apply here, too */
+
+#ifdef CONFIG_OR1K_1200
+	mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+	WARN(1, "Interrupt handling possibily broken\n");
+	mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	/* There's nothing to do in the PIC configuration when changing
+	 * flow type.  Level and edge-triggered interrupts are both
+	 * supported, but it's PIC-implementation specific which type
+	 * is handled. */
+
+	return irq_setup_alt_chip(data, flow_type);
+}
+
+static inline int pic_get_irq(int first)
+{
+	int irq;
+
+	irq = ffs(mfspr(SPR_PICSR) >> first);
+
+	return irq ? irq + first - 1 : NO_IRQ;
+}
+
+static void __init or1k_irq_init(void)
+{
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+
+	/* Disable all interrupts until explicitly requested */
+	mtspr(SPR_PICMR, (0UL));
+
+	gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq);
+	ct = gc->chip_types;
+
+	ct->chip.irq_unmask = or1k_pic_unmask;
+	ct->chip.irq_mask = or1k_pic_mask;
+	ct->chip.irq_ack = or1k_pic_ack;
+	ct->chip.irq_mask_ack = or1k_pic_mask_ack;
+	ct->chip.irq_set_type = or1k_pic_set_type;
+
+	/* The OR1K PIC can handle both level and edge trigged
+	 * interrupts in roughly the same manner
+	 */
+#if 0
+	/* FIXME: chip.type??? */
+	ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK;
+#endif
+
+	irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0,
+			       IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+}
+
+void __init init_IRQ(void)
+{
+	or1k_irq_init();
+}
+
+void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+	int irq = -1;
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+
+	while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
+		generic_handle_irq(irq);
+
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+				   const u32 *intspec, unsigned int intsize)
+{
+	return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c
new file mode 100644
index 000000000000..10ff50f0202a
--- /dev/null
+++ b/arch/openrisc/kernel/module.c
@@ -0,0 +1,72 @@
+/*
+ * OpenRISC module.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	unsigned int i;
+	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	uint32_t *location;
+	uint32_t value;
+
+	pr_debug("Applying relocate section %u to %u\n", relsec,
+		 sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+		value = sym->st_value + rel[i].r_addend;
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_OR32_32:
+			*location = value;
+			break;
+		case R_OR32_CONST:
+			location = (uint16_t *)location + 1;
+			*((uint16_t *)location) = (uint16_t) (value);
+			break;
+		case R_OR32_CONSTH:
+			location = (uint16_t *)location + 1;
+			*((uint16_t *)location) = (uint16_t) (value >> 16);
+			break;
+		case R_OR32_JUMPTARG:
+			value -= (uint32_t)location;
+			value >>= 2;
+			value &= 0x03ffffff;
+			value |= *location & 0xfc000000;
+			*location = value;
+			break;
+		default:
+			pr_err("module %s: Unknown relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			break;
+		}
+	}
+
+	return 0;
+}
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
new file mode 100644
index 000000000000..83ccf7c0c58d
--- /dev/null
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -0,0 +1,46 @@
+/*
+ * OpenRISC or32_ksyms.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h>
+
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+/* compiler generated symbols */
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ashrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__lshrdi3);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
new file mode 100644
index 000000000000..e4209af879ec
--- /dev/null
+++ b/arch/openrisc/kernel/process.c
@@ -0,0 +1,311 @@
+/*
+ * OpenRISC process.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of process handling...
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/elfcore.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/spr_defs.h>
+
+#include <linux/smp.h>
+
+/*
+ * Pointer to Current thread info structure.
+ *
+ * Used at user space -> kernel transitions.
+ */
+struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, };
+
+void machine_restart(void)
+{
+	printk(KERN_INFO "*** MACHINE RESTART ***\n");
+	__asm__("l.nop 1");
+}
+
+/*
+ * Similar to machine_power_off, but don't shut off power.  Add code
+ * here to freeze the system for e.g. post-mortem debug purpose when
+ * possible.  This halt has nothing to do with the idle halt.
+ */
+void machine_halt(void)
+{
+	printk(KERN_INFO "*** MACHINE HALT ***\n");
+	__asm__("l.nop 1");
+}
+
+/* If or when software power-off is implemented, add code here.  */
+void machine_power_off(void)
+{
+	printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
+	__asm__("l.nop 1");
+}
+
+void (*pm_power_off) (void) = machine_power_off;
+
+/*
+ * When a process does an "exec", machine state like FPU and debug
+ * registers need to be reset.  This is a hook function for that.
+ * Currently we don't have any such state to reset, so this is empty.
+ */
+void flush_thread(void)
+{
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	extern void show_registers(struct pt_regs *regs);
+
+	/* __PHX__ cleanup this mess */
+	show_registers(regs);
+}
+
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+	return (unsigned long)user_regs(t->stack)->pc;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Copy the thread-specific (arch specific) info from the current
+ * process to the new one p
+ */
+extern asmlinkage void ret_from_fork(void);
+
+int
+copy_thread(unsigned long clone_flags, unsigned long usp,
+	    unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+{
+	struct pt_regs *childregs;
+	struct pt_regs *kregs;
+	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+	struct thread_info *ti;
+	unsigned long top_of_kernel_stack;
+
+	top_of_kernel_stack = sp;
+
+	p->set_child_tid = p->clear_child_tid = NULL;
+
+	/* Copy registers */
+	/* redzone */
+	sp -= STACK_FRAME_OVERHEAD;
+	sp -= sizeof(struct pt_regs);
+	childregs = (struct pt_regs *)sp;
+
+	/* Copy parent registers */
+	*childregs = *regs;
+
+	if ((childregs->sr & SPR_SR_SM) == 1) {
+		/* for kernel thread, set `current_thread_info'
+		 * and stackptr in new task
+		 */
+		childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+		childregs->gpr[10] = (unsigned long)task_thread_info(p);
+	} else {
+		childregs->sp = usp;
+	}
+
+	childregs->gpr[11] = 0;	/* Result from fork() */
+
+	/*
+	 * The way this works is that at some point in the future
+	 * some task will call _switch to switch to the new task.
+	 * That will pop off the stack frame created below and start
+	 * the new task running at ret_from_fork.  The new task will
+	 * do some house keeping and then return from the fork or clone
+	 * system call, using the stack frame created above.
+	 */
+	/* redzone */
+	sp -= STACK_FRAME_OVERHEAD;
+	sp -= sizeof(struct pt_regs);
+	kregs = (struct pt_regs *)sp;
+
+	ti = task_thread_info(p);
+	ti->ksp = sp;
+
+	/* kregs->sp must store the location of the 'pre-switch' kernel stack
+	 * pointer... for a newly forked process, this is simply the top of
+	 * the kernel stack.
+	 */
+	kregs->sp = top_of_kernel_stack;
+	kregs->gpr[3] = (unsigned long)current;	/* arg to schedule_tail */
+	kregs->gpr[10] = (unsigned long)task_thread_info(p);
+	kregs->gpr[9] = (unsigned long)ret_from_fork;
+
+	return 0;
+}
+
+/*
+ * Set up a thread for executing a new program
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+	unsigned long sr = regs->sr & ~SPR_SR_SM;
+
+	set_fs(USER_DS);
+	memset(regs->gpr, 0, sizeof(regs->gpr));
+
+	regs->pc = pc;
+	regs->sr = sr;
+	regs->sp = sp;
+
+/*	printk("start thread, ksp = %lx\n", current_thread_info()->ksp);*/
+}
+
+/* Fill in the fpu structure for a core dump.  */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
+{
+	/* TODO */
+	return 0;
+}
+
+extern struct thread_info *_switch(struct thread_info *old_ti,
+				   struct thread_info *new_ti);
+
+struct task_struct *__switch_to(struct task_struct *old,
+				struct task_struct *new)
+{
+	struct task_struct *last;
+	struct thread_info *new_ti, *old_ti;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	/* current_set is an array of saved current pointers
+	 * (one for each cpu). we need them at user->kernel transition,
+	 * while we save them at kernel->user transition
+	 */
+	new_ti = new->stack;
+	old_ti = old->stack;
+
+	current_thread_info_set[smp_processor_id()] = new_ti;
+	last = (_switch(old_ti, new_ti))->task;
+
+	local_irq_restore(flags);
+
+	return last;
+}
+
+/*
+ * Write out registers in core dump format, as defined by the
+ * struct user_regs_struct
+ */
+void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs)
+{
+	dest[0] = 0; /* r0 */
+	memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long));
+	dest[32] = regs->pc;
+	dest[33] = regs->sr;
+	dest[34] = 0;
+	dest[35] = 0;
+}
+
+extern void _kernel_thread_helper(void);
+
+void __noreturn kernel_thread_helper(int (*fn) (void *), void *arg)
+{
+	do_exit(fn(arg));
+}
+
+/*
+ * Create a kernel thread.
+ */
+int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
+{
+	struct pt_regs regs;
+
+	memset(&regs, 0, sizeof(regs));
+
+	regs.gpr[20] = (unsigned long)fn;
+	regs.gpr[22] = (unsigned long)arg;
+	regs.sr = mfspr(SPR_SR);
+	regs.pc = (unsigned long)_kernel_thread_helper;
+
+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+		       0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage long _sys_execve(const char __user *name,
+			    const char __user * const __user *argv,
+			    const char __user * const __user *envp,
+			    struct pt_regs *regs)
+{
+	int error;
+	char *filename;
+
+	filename = getname(name);
+	error = PTR_ERR(filename);
+
+	if (IS_ERR(filename))
+		goto out;
+
+	error = do_execve(filename, argv, envp, regs);
+	putname(filename);
+
+out:
+	return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	/* TODO */
+
+	return 0;
+}
+
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+	register long __res asm("r11") = __NR_execve;
+	register long __a asm("r3") = (long)(filename);
+	register long __b asm("r4") = (long)(argv);
+	register long __c asm("r5") = (long)(envp);
+	__asm__ volatile ("l.sys 1"
+			  : "=r" (__res), "=r"(__a), "=r"(__b), "=r"(__c)
+			  : "0"(__res), "1"(__a), "2"(__b), "3"(__c)
+			  : "r6", "r7", "r8", "r12", "r13", "r15",
+			    "r17", "r19", "r21", "r23", "r25", "r27",
+			    "r29", "r31");
+	__asm__ volatile ("l.nop");
+	return __res;
+}
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
new file mode 100644
index 000000000000..1bb58ba89afa
--- /dev/null
+++ b/arch/openrisc/kernel/prom.c
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC prom.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * Architecture specific procedures for creating, accessing and
+ * interpreting the device tree.
+ *
+ */
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+	size &= PAGE_MASK;
+	memblock_add(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+	return __va(memblock_alloc(size, align));
+}
+
+void __init early_init_devtree(void *params)
+{
+	void *alloc;
+
+	/* Setup flat device-tree pointer */
+	initial_boot_params = params;
+
+
+	/* Retrieve various informations from the /chosen node of the
+	 * device-tree, including the platform type, initrd location and
+	 * size, TCE reserve, and more ...
+	 */
+	of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
+
+	/* Scan memory nodes and rebuild MEMBLOCKs */
+	memblock_init();
+	of_scan_flat_dt(early_init_dt_scan_root, NULL);
+	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+	/* Save command line for /proc/cmdline and then parse parameters */
+	strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+	memblock_analyze();
+
+	/* We must copy the flattend device tree from init memory to regular
+	 * memory because the device tree references the strings in it
+	 * directly.
+	 */
+
+	alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE));
+
+	memcpy(alloc, initial_boot_params, initial_boot_params->totalsize);
+
+	initial_boot_params = alloc;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+		unsigned long end)
+{
+	initrd_start = (unsigned long)__va(start);
+	initrd_end = (unsigned long)__va(end);
+	initrd_below_start_ok = 1;
+}
+#endif
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
new file mode 100644
index 000000000000..656b94beab89
--- /dev/null
+++ b/arch/openrisc/kernel/ptrace.c
@@ -0,0 +1,211 @@
+/*
+ * OpenRISC ptrace.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/audit.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+
+#include <asm/thread_info.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * Copy the thread state to a regset that can be interpreted by userspace.
+ *
+ * It doesn't matter what our internal pt_regs structure looks like.  The
+ * important thing is that we export a consistent view of the thread state
+ * to userspace.  As such, we need to make sure that the regset remains
+ * ABI compatible as defined by the struct user_regs_struct:
+ *
+ * (Each item is a 32-bit word)
+ * r0 = 0 (exported for clarity)
+ * 31 GPRS r1-r31
+ * PC (Program counter)
+ * SR (Supervision register)
+ */
+static int genregs_get(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       void *kbuf, void __user * ubuf)
+{
+	const struct pt_regs *regs = task_pt_regs(target);
+	int ret;
+
+	/* r0 */
+	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4);
+
+	if (!ret)
+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					  regs->gpr+1, 4, 4*32);
+	if (!ret)
+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  &regs->pc, 4*32, 4*33);
+	if (!ret)
+		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+					  &regs->sr, 4*33, 4*34);
+	if (!ret)
+		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+					       4*34, -1);
+
+	return ret;
+}
+
+/*
+ * Set the thread state from a regset passed in via ptrace
+ */
+static int genregs_set(struct task_struct *target,
+		       const struct user_regset *regset,
+		       unsigned int pos, unsigned int count,
+		       const void *kbuf, const void __user * ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+	int ret;
+
+	/* ignore r0 */
+	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
+	/* r1 - r31 */
+	if (!ret)
+		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+					 regs->gpr+1, 4, 4*32);
+	/* PC */
+	if (!ret)
+		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				 &regs->pc, 4*32, 4*33);
+	/*
+	 * Skip SR and padding... userspace isn't allowed to changes bits in
+	 * the Supervision register
+	 */
+	if (!ret)
+		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+						4*33, -1);
+
+	return ret;
+}
+
+/*
+ * Define the register sets available on OpenRISC under Linux
+ */
+enum or1k_regset {
+	REGSET_GENERAL,
+};
+
+static const struct user_regset or1k_regsets[] = {
+	[REGSET_GENERAL] = {
+			    .core_note_type = NT_PRSTATUS,
+			    .n = ELF_NGREG,
+			    .size = sizeof(long),
+			    .align = sizeof(long),
+			    .get = genregs_get,
+			    .set = genregs_set,
+			    },
+};
+
+static const struct user_regset_view user_or1k_native_view = {
+	.name = "or1k",
+	.e_machine = EM_OPENRISC,
+	.regsets = or1k_regsets,
+	.n = ARRAY_SIZE(or1k_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &user_or1k_native_view;
+}
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	pr_debug("ptrace_disable(): TODO\n");
+
+	user_disable_single_step(child);
+	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+		 unsigned long data)
+{
+	int ret;
+
+	switch (request) {
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+	long ret = 0;
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+	    tracehook_report_syscall_entry(regs))
+		/*
+		 * Tracing decided this syscall should not happen.
+		 * We'll return a bogus call number to get an ENOSYS
+		 * error, but leave the original number in <something>.
+		 */
+		ret = -1L;
+
+	/* Are these regs right??? */
+	if (unlikely(current->audit_context))
+		audit_syscall_entry(audit_arch(), regs->syscallno,
+				    regs->gpr[3], regs->gpr[4],
+				    regs->gpr[5], regs->gpr[6]);
+
+	return ret ? : regs->syscallno;
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+	int step;
+
+	if (unlikely(current->audit_context))
+		audit_syscall_exit(AUDITSC_RESULT(regs->gpr[11]),
+				   regs->gpr[11]);
+
+	step = test_thread_flag(TIF_SINGLESTEP);
+	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
new file mode 100644
index 000000000000..1422f747f52b
--- /dev/null
+++ b/arch/openrisc/kernel/setup.c
@@ -0,0 +1,381 @@
+/*
+ * OpenRISC setup.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/types.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/cpuinfo.h>
+#include <asm/delay.h>
+
+#include "vmlinux.h"
+
+char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+
+static unsigned long __init setup_memory(void)
+{
+	unsigned long bootmap_size;
+	unsigned long ram_start_pfn;
+	unsigned long free_ram_start_pfn;
+	unsigned long ram_end_pfn;
+	phys_addr_t memory_start, memory_end;
+	struct memblock_region *region;
+
+	memory_end = memory_start = 0;
+
+	/* Find main memory where is the kernel */
+	for_each_memblock(memory, region) {
+		memory_start = region->base;
+		memory_end = region->base + region->size;
+		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+		       memory_start, memory_end);
+	}
+
+	if (!memory_end) {
+		panic("No memory!");
+	}
+
+	ram_start_pfn = PFN_UP(memory_start);
+	/* free_ram_start_pfn is first page after kernel */
+	free_ram_start_pfn = PFN_UP(__pa(&_end));
+	ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+
+	max_pfn = ram_end_pfn;
+
+	/*
+	 * initialize the boot-time allocator (with low memory only).
+	 *
+	 * This makes the memory from the end of the kernel to the end of
+	 * RAM usable.
+	 * init_bootmem sets the global values min_low_pfn, max_low_pfn.
+	 */
+	bootmap_size = init_bootmem(free_ram_start_pfn,
+				    ram_end_pfn - ram_start_pfn);
+	free_bootmem(PFN_PHYS(free_ram_start_pfn),
+		     (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
+	reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
+			BOOTMEM_DEFAULT);
+
+	for_each_memblock(reserved, region) {
+		printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n",
+		       (u32) region->base, (u32) region->size);
+		reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
+	}
+
+	return ram_end_pfn;
+}
+
+struct cpuinfo cpuinfo;
+
+static void print_cpuinfo(void)
+{
+	unsigned long upr = mfspr(SPR_UPR);
+	unsigned long vr = mfspr(SPR_VR);
+	unsigned int version;
+	unsigned int revision;
+
+	version = (vr & SPR_VR_VER) >> 24;
+	revision = (vr & SPR_VR_REV);
+
+	printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
+	       version, revision, cpuinfo.clock_frequency / 1000000);
+
+	if (!(upr & SPR_UPR_UP)) {
+		printk(KERN_INFO
+		       "-- no UPR register... unable to detect configuration\n");
+		return;
+	}
+
+	if (upr & SPR_UPR_DCP)
+		printk(KERN_INFO
+		       "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+		       cpuinfo.dcache_size, cpuinfo.dcache_block_size, 1);
+	else
+		printk(KERN_INFO "-- dcache disabled\n");
+	if (upr & SPR_UPR_ICP)
+		printk(KERN_INFO
+		       "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+		       cpuinfo.icache_size, cpuinfo.icache_block_size, 1);
+	else
+		printk(KERN_INFO "-- icache disabled\n");
+
+	if (upr & SPR_UPR_DMP)
+		printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
+		       1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+		       1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
+	if (upr & SPR_UPR_IMP)
+		printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n",
+		       1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+		       1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
+
+	printk(KERN_INFO "-- additional features:\n");
+	if (upr & SPR_UPR_DUP)
+		printk(KERN_INFO "-- debug unit\n");
+	if (upr & SPR_UPR_PCUP)
+		printk(KERN_INFO "-- performance counters\n");
+	if (upr & SPR_UPR_PMP)
+		printk(KERN_INFO "-- power management\n");
+	if (upr & SPR_UPR_PICP)
+		printk(KERN_INFO "-- PIC\n");
+	if (upr & SPR_UPR_TTP)
+		printk(KERN_INFO "-- timer\n");
+	if (upr & SPR_UPR_CUP)
+		printk(KERN_INFO "-- custom unit(s)\n");
+}
+
+void __init setup_cpuinfo(void)
+{
+	struct device_node *cpu;
+	unsigned long iccfgr, dccfgr;
+	unsigned long cache_set_size, cache_ways;
+
+	cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+	if (!cpu)
+		panic("No compatible CPU found in device tree...\n");
+
+	iccfgr = mfspr(SPR_ICCFGR);
+	cache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
+	cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
+	cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
+	cpuinfo.icache_size =
+	    cache_set_size * cache_ways * cpuinfo.icache_block_size;
+
+	dccfgr = mfspr(SPR_DCCFGR);
+	cache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
+	cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
+	cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
+	cpuinfo.dcache_size =
+	    cache_set_size * cache_ways * cpuinfo.dcache_block_size;
+
+	if (of_property_read_u32(cpu, "clock-frequency",
+				 &cpuinfo.clock_frequency)) {
+		printk(KERN_WARNING
+		       "Device tree missing CPU 'clock-frequency' parameter."
+		       "Assuming frequency 25MHZ"
+		       "This is probably not what you want.");
+	}
+
+	of_node_put(cpu);
+
+	print_cpuinfo();
+}
+
+/**
+ * or32_early_setup
+ *
+ * Handles the pointer to the device tree that this kernel is to use
+ * for establishing the available platform devices.
+ *
+ * For now, this is limited to using the built-in device tree.  In the future,
+ * it is intended that this function will take a pointer to the device tree
+ * that is potentially built-in, but potentially also passed in by the
+ * bootloader, or discovered by some equally clever means...
+ */
+
+void __init or32_early_setup(void)
+{
+
+	early_init_devtree(__dtb_start);
+
+	printk(KERN_INFO "Compiled-in FDT at 0x%p\n", __dtb_start);
+}
+
+static int __init openrisc_device_probe(void)
+{
+	of_platform_populate(NULL, NULL, NULL, NULL);
+
+	return 0;
+}
+
+device_initcall(openrisc_device_probe);
+
+static inline unsigned long extract_value_bits(unsigned long reg,
+					       short bit_nr, short width)
+{
+	return (reg >> bit_nr) & (0 << width);
+}
+
+static inline unsigned long extract_value(unsigned long reg, unsigned long mask)
+{
+	while (!(mask & 0x1)) {
+		reg = reg >> 1;
+		mask = mask >> 1;
+	}
+	return mask & reg;
+}
+
+void __init detect_unit_config(unsigned long upr, unsigned long mask,
+			       char *text, void (*func) (void))
+{
+	if (text != NULL)
+		printk("%s", text);
+
+	if (upr & mask) {
+		if (func != NULL)
+			func();
+		else
+			printk("present\n");
+	} else
+		printk("not present\n");
+}
+
+/*
+ * calibrate_delay
+ *
+ * Lightweight calibrate_delay implementation that calculates loops_per_jiffy
+ * from the clock frequency passed in via the device tree
+ *
+ */
+
+void __cpuinit calibrate_delay(void)
+{
+	const int *val;
+	struct device_node *cpu = NULL;
+	cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+	val = of_get_property(cpu, "clock-frequency", NULL);
+	if (!val)
+		panic("no cpu 'clock-frequency' parameter in device tree");
+	loops_per_jiffy = *val / HZ;
+	pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
+		loops_per_jiffy / (500000 / HZ),
+		(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	unsigned long max_low_pfn;
+
+	unflatten_device_tree();
+
+	setup_cpuinfo();
+
+	/* process 1's initial memory region is the kernel code/data */
+	init_mm.start_code = (unsigned long)&_stext;
+	init_mm.end_code = (unsigned long)&_etext;
+	init_mm.end_data = (unsigned long)&_edata;
+	init_mm.brk = (unsigned long)&_end;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	initrd_start = (unsigned long)&__initrd_start;
+	initrd_end = (unsigned long)&__initrd_end;
+	if (initrd_start == initrd_end) {
+		initrd_start = 0;
+		initrd_end = 0;
+	}
+	initrd_below_start_ok = 1;
+#endif
+
+	/* setup bootmem allocator */
+	max_low_pfn = setup_memory();
+
+	/* paging_init() sets up the MMU and marks all pages as reserved */
+	paging_init();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+	if (!conswitchp)
+		conswitchp = &dummy_con;
+#endif
+
+	*cmdline_p = cmd_line;
+
+	printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n");
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	unsigned long vr;
+	int version, revision;
+
+	vr = mfspr(SPR_VR);
+	version = (vr & SPR_VR_VER) >> 24;
+	revision = vr & SPR_VR_REV;
+
+	return seq_printf(m,
+			  "cpu\t\t: OpenRISC-%x\n"
+			  "revision\t: %d\n"
+			  "frequency\t: %ld\n"
+			  "dcache size\t: %d bytes\n"
+			  "dcache block size\t: %d bytes\n"
+			  "icache size\t: %d bytes\n"
+			  "icache block size\t: %d bytes\n"
+			  "immu\t\t: %d entries, %lu ways\n"
+			  "dmmu\t\t: %d entries, %lu ways\n"
+			  "bogomips\t: %lu.%02lu\n",
+			  version,
+			  revision,
+			  loops_per_jiffy * HZ,
+			  cpuinfo.dcache_size,
+			  cpuinfo.dcache_block_size,
+			  cpuinfo.icache_size,
+			  cpuinfo.icache_block_size,
+			  1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+			  1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
+			  1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+			  1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
+			  (loops_per_jiffy * HZ) / 500000,
+			  ((loops_per_jiffy * HZ) / 5000) % 100);
+}
+
+static void *c_start(struct seq_file *m, loff_t * pos)
+{
+	/* We only have one CPU... */
+	return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+{
+	++*pos;
+	return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start = c_start,
+	.next = c_next,
+	.stop = c_stop,
+	.show = show_cpuinfo,
+};
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
new file mode 100644
index 000000000000..5f759c76834e
--- /dev/null
+++ b/arch/openrisc/kernel/signal.c
@@ -0,0 +1,396 @@
+/*
+ * OpenRISC signal.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tracehook.h>
+
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage long
+_sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
+{
+	return do_sigaltstack(uss, uoss, regs->sp);
+}
+
+struct rt_sigframe {
+	struct siginfo *pinfo;
+	void *puc;
+	struct siginfo info;
+	struct ucontext uc;
+	unsigned char retcode[16];	/* trampoline code */
+};
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+	unsigned int err = 0;
+	unsigned long old_usp;
+
+	/* Alwys make any pending restarted system call return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	/* restore the regs from &sc->regs (same as sc, since regs is first)
+	 * (sc is already checked for VERIFY_READ since the sigframe was
+	 *  checked in sys_sigreturn previously)
+	 */
+
+	if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+		goto badframe;
+
+	/* make sure the SM-bit is cleared so user-mode cannot fool us */
+	regs->sr &= ~SPR_SR_SM;
+
+	/* restore the old USP as it was before we stacked the sc etc.
+	 * (we cannot just pop the sigcontext since we aligned the sp and
+	 *  stuff after pushing it)
+	 */
+
+	err |= __get_user(old_usp, &sc->usp);
+
+	regs->sp = old_usp;
+
+	/* TODO: the other ports use regs->orig_XX to disable syscall checks
+	 * after this completes, but we don't use that mechanism. maybe we can
+	 * use it now ?
+	 */
+
+	return err;
+
+badframe:
+	return 1;
+}
+
+asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
+{
+	struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
+	sigset_t set;
+	stack_t st;
+
+	/*
+	 * Since we stacked the signal on a dword boundary,
+	 * then frame should be dword aligned here.  If it's
+	 * not, then the user is trying to mess with us.
+	 */
+	if (((long)frame) & 3)
+		goto badframe;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+		goto badframe;
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	do_sigaltstack(&st, NULL, regs->sp);
+
+	return regs->gpr[11];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+			    unsigned long mask)
+{
+	int err = 0;
+	unsigned long usp = regs->sp;
+
+	/* copy the regs. they are first in sc so we can use sc directly */
+
+	err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+
+	/* then some other stuff */
+
+	err |= __put_user(mask, &sc->oldmask);
+
+	err |= __put_user(usp, &sc->usp);
+
+	return err;
+}
+
+static inline unsigned long align_sigframe(unsigned long sp)
+{
+	return sp & ~3UL;
+}
+
+/*
+ * Work out where the signal frame should go.  It's either on the user stack
+ * or the alternate stack.
+ */
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+					struct pt_regs *regs, size_t frame_size)
+{
+	unsigned long sp = regs->sp;
+	int onsigstack = on_sig_stack(sp);
+
+	/* redzone */
+	sp -= STACK_FRAME_OVERHEAD;
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) {
+		if (current->sas_ss_size)
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	sp = align_sigframe(sp - frame_size);
+
+	/*
+	 * If we are on the alternate signal stack and would overflow it, don't.
+	 * Return an always-bogus address instead so we will die with SIGSEGV.
+	 */
+	if (onsigstack && !likely(on_sig_stack(sp)))
+		return (void __user *)-1L;
+
+	return (void __user *)sp;
+}
+
+/* grab and setup a signal frame.
+ *
+ * basically we stack a lot of state info, and arrange for the
+ * user-mode program to return to the kernel using either a
+ * trampoline which performs the syscall sigreturn, or a provided
+ * user-mode trampoline.
+ */
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+			   sigset_t *set, struct pt_regs *regs)
+{
+	struct rt_sigframe *frame;
+	unsigned long return_ip;
+	int err = 0;
+
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		goto give_sigsegv;
+
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		err |= copy_siginfo_to_user(&frame->info, info);
+	if (err)
+		goto give_sigsegv;
+
+	/* Clear all the bits of the ucontext we don't use.  */
+	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(NULL, &frame->uc.uc_link);
+	err |= __put_user((void *)current->sas_ss_sp,
+			  &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	if (err)
+		goto give_sigsegv;
+
+	/* trampoline - the desired return ip is the retcode itself */
+	return_ip = (unsigned long)&frame->retcode;
+	/* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+	err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+	err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+	err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+	err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+
+	if (err)
+		goto give_sigsegv;
+
+	/* TODO what is the current->exec_domain stuff and invmap ? */
+
+	/* Set up registers for signal handler */
+	regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */
+	regs->gpr[9] = (unsigned long)return_ip;     /* what we enter LATER */
+	regs->gpr[3] = (unsigned long)sig;           /* arg 1: signo */
+	regs->gpr[4] = (unsigned long)&frame->info;  /* arg 2: (siginfo_t*) */
+	regs->gpr[5] = (unsigned long)&frame->uc;    /* arg 3: ucontext */
+
+	/* actually move the usp to reflect the stacked frame */
+	regs->sp = (unsigned long)frame;
+
+	return;
+
+give_sigsegv:
+	if (sig == SIGSEGV)
+		ka->sa.sa_handler = SIG_DFL;
+	force_sig(SIGSEGV, current);
+}
+
+static inline void
+handle_signal(unsigned long sig,
+	      siginfo_t *info, struct k_sigaction *ka,
+	      sigset_t *oldset, struct pt_regs *regs)
+{
+	setup_rt_frame(sig, ka, info, oldset, regs);
+
+	if (ka->sa.sa_flags & SA_ONESHOT)
+		ka->sa.sa_handler = SIG_DFL;
+
+	spin_lock_irq(&current->sighand->siglock);
+	sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&current->blocked, sig);
+	recalc_sigpending();
+
+	spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Also note that the regs structure given here as an argument, is the latest
+ * pushed pt_regs. It may or may not be the same as the first pushed registers
+ * when the initial usermode->kernelmode transition took place. Therefore
+ * we can use user_mode(regs) to see if we came directly from kernel or user
+ * mode below.
+ */
+
+void do_signal(struct pt_regs *regs)
+{
+	siginfo_t info;
+	int signr;
+	struct k_sigaction ka;
+
+	/*
+	 * We want the common case to go fast, which
+	 * is why we may in certain cases get here from
+	 * kernel mode. Just return without doing anything
+	 * if so.
+	 */
+	if (!user_mode(regs))
+		return;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+	/* If we are coming out of a syscall then we need
+	 * to check if the syscall was interrupted and wants to be
+	 * restarted after handling the signal.  If so, the original
+	 * syscall number is put back into r11 and the PC rewound to
+	 * point at the l.sys instruction that resulted in the
+	 * original syscall.  Syscall results other than the four
+	 * below mean that the syscall executed to completion and no
+	 * restart is necessary.
+	 */
+	if (regs->syscallno) {
+		int restart = 0;
+
+		switch (regs->gpr[11]) {
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			/* Restart if there is no signal handler */
+			restart = (signr <= 0);
+			break;
+		case -ERESTARTSYS:
+			/* Restart if there no signal handler or
+			 * SA_RESTART flag is set */
+			restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
+			break;
+		case -ERESTARTNOINTR:
+			/* Always restart */
+			restart = 1;
+			break;
+		}
+
+		if (restart) {
+			if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
+				regs->gpr[11] = __NR_restart_syscall;
+			else
+				regs->gpr[11] = regs->orig_gpr11;
+			regs->pc -= 4;
+		} else {
+			regs->gpr[11] = -EINTR;
+		}
+	}
+
+	if (signr <= 0) {
+		/* no signal to deliver so we just put the saved sigmask
+		 * back */
+		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+		}
+
+	} else {		/* signr > 0 */
+		sigset_t *oldset;
+
+		if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
+			oldset = &current->saved_sigmask;
+		else
+			oldset = &current->blocked;
+
+		/* Whee!  Actually deliver the signal.  */
+		handle_signal(signr, &info, &ka, oldset, regs);
+		/* a signal was successfully delivered; the saved
+		 * sigmask will have been stored in the signal frame,
+		 * and will be restored by sigreturn, so we can simply
+		 * clear the TIF_RESTORE_SIGMASK flag */
+		if (test_thread_flag(TIF_RESTORE_SIGMASK))
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+		tracehook_signal_handler(signr, &info, &ka, regs,
+					 test_thread_flag(TIF_SINGLESTEP));
+	}
+
+	return;
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs)
+{
+	if (current_thread_info()->flags & _TIF_SIGPENDING)
+		do_signal(regs);
+
+	if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+		if (current->replacement_session_keyring)
+			key_replace_session_keyring();
+	}
+}
diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c
new file mode 100644
index 000000000000..e1f8ce8c72a8
--- /dev/null
+++ b/arch/openrisc/kernel/sys_call_table.c
@@ -0,0 +1,28 @@
+/*
+ * OpenRISC sys_call_table.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+#include <asm/unistd.h>
+};
diff --git a/arch/openrisc/kernel/sys_or32.c b/arch/openrisc/kernel/sys_or32.c
new file mode 100644
index 000000000000..57060084c0cc
--- /dev/null
+++ b/arch/openrisc/kernel/sys_or32.c
@@ -0,0 +1,57 @@
+/*
+ * OpenRISC sys_or32.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on some platforms.
+ * Since we don't have to do any backwards compatibility, our
+ * versions are done in the most "normal" way possible.
+ */
+
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+
+#include <asm/syscalls.h>
+
+/* These are secondary entry points as the primary entry points are defined in
+ * entry.S where we add the 'regs' parameter value
+ */
+
+asmlinkage long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+			   int __user *parent_tid, int __user *child_tid,
+			   struct pt_regs *regs)
+{
+	long ret;
+
+	/* FIXME: Is alignment necessary? */
+	/* newsp = ALIGN(newsp, 4); */
+
+	if (!newsp)
+		newsp = regs->sp;
+
+	ret = do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+
+	return ret;
+}
+
+asmlinkage int _sys_fork(struct pt_regs *regs)
+{
+#ifdef CONFIG_MMU
+	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
+#else
+	return -EINVAL;
+#endif
+}
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
new file mode 100644
index 000000000000..bd946ef1623d
--- /dev/null
+++ b/arch/openrisc/kernel/time.c
@@ -0,0 +1,181 @@
+/*
+ * OpenRISC time.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/interrupt.h>
+#include <linux/ftrace.h>
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/cpuinfo.h>
+
+static int openrisc_timer_set_next_event(unsigned long delta,
+					 struct clock_event_device *dev)
+{
+	u32 c;
+
+	/* Read 32-bit counter value, add delta, mask off the low 28 bits.
+	 * We're guaranteed delta won't be bigger than 28 bits because the
+	 * generic timekeeping code ensures that for us.
+	 */
+	c = mfspr(SPR_TTCR);
+	c += delta;
+	c &= SPR_TTMR_TP;
+
+	/* Set counter and enable interrupt.
+	 * Keep timer in continuous mode always.
+	 */
+	mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
+
+	return 0;
+}
+
+static void openrisc_timer_set_mode(enum clock_event_mode mode,
+				    struct clock_event_device *evt)
+{
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		pr_debug(KERN_INFO "%s: periodic\n", __func__);
+		BUG();
+		break;
+	case CLOCK_EVT_MODE_ONESHOT:
+		pr_debug(KERN_INFO "%s: oneshot\n", __func__);
+		break;
+	case CLOCK_EVT_MODE_UNUSED:
+		pr_debug(KERN_INFO "%s: unused\n", __func__);
+		break;
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		pr_debug(KERN_INFO "%s: shutdown\n", __func__);
+		break;
+	case CLOCK_EVT_MODE_RESUME:
+		pr_debug(KERN_INFO "%s: resume\n", __func__);
+		break;
+	}
+}
+
+/* This is the clock event device based on the OR1K tick timer.
+ * As the timer is being used as a continuous clock-source (required for HR
+ * timers) we cannot enable the PERIODIC feature.  The tick timer can run using
+ * one-shot events, so no problem.
+ */
+
+static struct clock_event_device clockevent_openrisc_timer = {
+	.name = "openrisc_timer_clockevent",
+	.features = CLOCK_EVT_FEAT_ONESHOT,
+	.rating = 300,
+	.set_next_event = openrisc_timer_set_next_event,
+	.set_mode = openrisc_timer_set_mode,
+};
+
+static inline void timer_ack(void)
+{
+	/* Clear the IP bit and disable further interrupts */
+	/* This can be done very simply... we just need to keep the timer
+	   running, so just maintain the CR bits while clearing the rest
+	   of the register
+	 */
+	mtspr(SPR_TTMR, SPR_TTMR_CR);
+}
+
+/*
+ * The timer interrupt is mostly handled in generic code nowadays... this
+ * function just acknowledges the interrupt and fires the event handler that
+ * has been set on the clockevent device by the generic time management code.
+ *
+ * This function needs to be called by the timer exception handler and that's
+ * all the exception handler needs to do.
+ */
+
+irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct clock_event_device *evt = &clockevent_openrisc_timer;
+
+	timer_ack();
+
+	/*
+	 * update_process_times() expects us to have called irq_enter().
+	 */
+	irq_enter();
+	evt->event_handler(evt);
+	irq_exit();
+
+	set_irq_regs(old_regs);
+
+	return IRQ_HANDLED;
+}
+
+static __init void openrisc_clockevent_init(void)
+{
+	clockevents_calc_mult_shift(&clockevent_openrisc_timer,
+				    cpuinfo.clock_frequency, 4);
+
+	/* We only have 28 bits */
+	clockevent_openrisc_timer.max_delta_ns =
+	    clockevent_delta2ns((u32) 0x0fffffff, &clockevent_openrisc_timer);
+	clockevent_openrisc_timer.min_delta_ns =
+	    clockevent_delta2ns(1, &clockevent_openrisc_timer);
+	clockevent_openrisc_timer.cpumask = cpumask_of(0);
+	clockevents_register_device(&clockevent_openrisc_timer);
+}
+
+/**
+ * Clocksource: Based on OpenRISC timer/counter
+ *
+ * This sets up the OpenRISC Tick Timer as a clock source.  The tick timer
+ * is 32 bits wide and runs at the CPU clock frequency.
+ */
+
+static cycle_t openrisc_timer_read(struct clocksource *cs)
+{
+	return (cycle_t) mfspr(SPR_TTCR);
+}
+
+static struct clocksource openrisc_timer = {
+	.name = "openrisc_timer",
+	.rating = 200,
+	.read = openrisc_timer_read,
+	.mask = CLOCKSOURCE_MASK(32),
+	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init openrisc_timer_init(void)
+{
+	if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency))
+		panic("failed to register clocksource");
+
+	/* Enable the incrementer: 'continuous' mode with interrupt disabled */
+	mtspr(SPR_TTMR, SPR_TTMR_CR);
+
+	return 0;
+}
+
+void __init time_init(void)
+{
+	u32 upr;
+
+	upr = mfspr(SPR_UPR);
+	if (!(upr & SPR_UPR_TTP))
+		panic("Linux not supported on devices without tick timer");
+
+	openrisc_timer_init();
+	openrisc_clockevent_init();
+}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
new file mode 100644
index 000000000000..a4ec44a052b2
--- /dev/null
+++ b/arch/openrisc/kernel/traps.c
@@ -0,0 +1,366 @@
+/*
+ * OpenRISC traps.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ *  Here we handle the break vectors not used by the system call
+ *  mechanism, as well as some general stack/register dumping
+ *  things.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <asm/uaccess.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+extern char _etext, _stext;
+
+int kstack_depth_to_print = 0x180;
+
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+{
+	return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3;
+}
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+	struct thread_info *context;
+	unsigned long addr;
+
+	context = (struct thread_info *)
+	    ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+
+	while (valid_stack_ptr(context, stack)) {
+		addr = *stack++;
+		if (__kernel_text_address(addr)) {
+			printk(" [<%08lx>]", addr);
+			print_symbol(" %s", addr);
+			printk("\n");
+		}
+	}
+	printk(" =======================\n");
+}
+
+/* displays a short stack trace */
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+	unsigned long addr, *stack;
+	int i;
+
+	if (esp == NULL)
+		esp = (unsigned long *)&esp;
+
+	stack = esp;
+
+	printk("Stack dump [0x%08lx]:\n", (unsigned long)esp);
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (kstack_end(stack))
+			break;
+		if (__get_user(addr, stack)) {
+			/* This message matches "failing address" marked
+			   s390 in ksymoops, so lines containing it will
+			   not be filtered out by ksymoops.  */
+			printk("Failing address 0x%lx\n", (unsigned long)stack);
+			break;
+		}
+		stack++;
+
+		printk("sp + %02d: 0x%08lx\n", i * 4, addr);
+	}
+	printk("\n");
+
+	show_trace(task, esp);
+
+	return;
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+	/*
+	 * TODO: SysRq-T trace dump...
+	 */
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+	unsigned long stack;
+
+	show_stack(current, &stack);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+	int i;
+	int in_kernel = 1;
+	unsigned long esp;
+
+	esp = (unsigned long)(&regs->sp);
+	if (user_mode(regs))
+		in_kernel = 0;
+
+	printk("CPU #: %d\n"
+	       "   PC: %08lx    SR: %08lx    SP: %08lx\n",
+	       smp_processor_id(), regs->pc, regs->sr, regs->sp);
+	printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+	       0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+	printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+	       regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+	printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+	       regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+	printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+	       regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+	printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+	       regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+	printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+	       regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+	printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+	       regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+	printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+	       regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+	printk("  RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+	       regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+	printk("Process %s (pid: %d, stackpage=%08lx)\n",
+	       current->comm, current->pid, (unsigned long)current);
+	/*
+	 * When in-kernel, we also print out the stack and code at the
+	 * time of the fault..
+	 */
+	if (in_kernel) {
+
+		printk("\nStack: ");
+		show_stack(NULL, (unsigned long *)esp);
+
+		printk("\nCode: ");
+		if (regs->pc < PAGE_OFFSET)
+			goto bad;
+
+		for (i = -24; i < 24; i++) {
+			unsigned char c;
+			if (__get_user(c, &((unsigned char *)regs->pc)[i])) {
+bad:
+				printk(" Bad PC value.");
+				break;
+			}
+
+			if (i == 0)
+				printk("(%02x) ", c);
+			else
+				printk("%02x ", c);
+		}
+	}
+	printk("\n");
+}
+
+void nommu_dump_state(struct pt_regs *regs,
+		      unsigned long ea, unsigned long vector)
+{
+	int i;
+	unsigned long addr, stack = regs->sp;
+
+	printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector);
+
+	printk("CPU #: %d\n"
+	       "   PC: %08lx    SR: %08lx    SP: %08lx\n",
+	       0, regs->pc, regs->sr, regs->sp);
+	printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+	       0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+	printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+	       regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+	printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+	       regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+	printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+	       regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+	printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+	       regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+	printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+	       regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+	printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+	       regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+	printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+	       regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+	printk("  RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+	       regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+	printk("Process %s (pid: %d, stackpage=%08lx)\n",
+	       ((struct task_struct *)(__pa(current)))->comm,
+	       ((struct task_struct *)(__pa(current)))->pid,
+	       (unsigned long)current);
+
+	printk("\nStack: ");
+	printk("Stack dump [0x%08lx]:\n", (unsigned long)stack);
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (((long)stack & (THREAD_SIZE - 1)) == 0)
+			break;
+		stack++;
+
+		printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4,
+		       *((unsigned long *)(__pa(stack))));
+	}
+	printk("\n");
+
+	printk("Call Trace:   ");
+	i = 1;
+	while (((long)stack & (THREAD_SIZE - 1)) != 0) {
+		addr = *((unsigned long *)__pa(stack));
+		stack++;
+
+		if (kernel_text_address(addr)) {
+			if (i && ((i % 6) == 0))
+				printk("\n ");
+			printk(" [<%08lx>]", addr);
+			i++;
+		}
+	}
+	printk("\n");
+
+	printk("\nCode: ");
+
+	for (i = -24; i < 24; i++) {
+		unsigned char c;
+		c = ((unsigned char *)(__pa(regs->pc)))[i];
+
+		if (i == 0)
+			printk("(%02x) ", c);
+		else
+			printk("%02x ", c);
+	}
+	printk("\n");
+}
+
+/* This is normally the 'Oops' routine */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+
+	console_verbose();
+	printk("\n%s#: %04lx\n", str, err & 0xffff);
+	show_registers(regs);
+#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+	printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
+
+	/* shut down interrupts */
+	local_irq_disable();
+
+	__asm__ __volatile__("l.nop   1");
+	do {} while (1);
+#endif
+	do_exit(SIGSEGV);
+}
+
+/* This is normally the 'Oops' routine */
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+	if (user_mode(regs))
+		return;
+
+	die(str, regs, err);
+}
+
+void unhandled_exception(struct pt_regs *regs, int ea, int vector)
+{
+	printk("Unable to handle exception at EA =0x%x, vector 0x%x",
+	       ea, vector);
+	die("Oops", regs, 9);
+}
+
+void __init trap_init(void)
+{
+	/* Nothing needs to be done */
+}
+
+asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
+{
+	siginfo_t info;
+	memset(&info, 0, sizeof(info));
+	info.si_signo = SIGTRAP;
+	info.si_code = TRAP_TRACE;
+	info.si_addr = (void *)address;
+	force_sig_info(SIGTRAP, &info, current);
+
+	regs->pc += 4;
+}
+
+asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+{
+	siginfo_t info;
+
+	if (user_mode(regs)) {
+		/* Send a SIGSEGV */
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		/* info.si_code has been set above */
+		info.si_addr = (void *)address;
+		force_sig_info(SIGSEGV, &info, current);
+	} else {
+		printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
+		show_registers(regs);
+		die("Die:", regs, address);
+	}
+
+}
+
+asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address)
+{
+	siginfo_t info;
+
+	if (user_mode(regs)) {
+		/* Send a SIGBUS */
+		info.si_signo = SIGBUS;
+		info.si_errno = 0;
+		info.si_code = BUS_ADRERR;
+		info.si_addr = (void *)address;
+		force_sig_info(SIGBUS, &info, current);
+	} else {		/* Kernel mode */
+		printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
+		show_registers(regs);
+		die("Die:", regs, address);
+	}
+}
+
+asmlinkage void do_illegal_instruction(struct pt_regs *regs,
+				       unsigned long address)
+{
+	siginfo_t info;
+
+	if (user_mode(regs)) {
+		/* Send a SIGILL */
+		info.si_signo = SIGILL;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPC;
+		info.si_addr = (void *)address;
+		force_sig_info(SIGBUS, &info, current);
+	} else {		/* Kernel mode */
+		printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
+		       address);
+		show_registers(regs);
+		die("Die:", regs, address);
+	}
+}
diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h
new file mode 100644
index 000000000000..ee842a2d3f36
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.h
@@ -0,0 +1,12 @@
+#ifndef __OPENRISC_VMLINUX_H_
+#define __OPENRISC_VMLINUX_H_
+
+extern char _stext, _etext, _edata, _end;
+#ifdef CONFIG_BLK_DEV_INITRD
+extern char __initrd_start, __initrd_end;
+extern char __initramfs_start;
+#endif
+
+extern u32 __dtb_start[];
+
+#endif
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..2d69a853b742
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -0,0 +1,115 @@
+/*
+ * OpenRISC vmlinux.lds.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ *
+ * ld script for OpenRISC architecture
+ */
+
+/* TODO
+ *		- clean up __offset & stuff
+ *		- change all 8192 aligment to PAGE !!!
+ *		- recheck if all aligments are really needed
+ */
+
+#  define LOAD_OFFSET  PAGE_OFFSET
+#  define LOAD_BASE    PAGE_OFFSET
+
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32")
+jiffies = jiffies_64 + 4;
+
+SECTIONS
+{
+        /* Read-only sections, merged into text segment: */
+        . = LOAD_BASE ;
+
+	/* _s_kernel_ro must be page aligned */
+	. = ALIGN(PAGE_SIZE);
+	_s_kernel_ro = .;
+
+        .text                   : AT(ADDR(.text) - LOAD_OFFSET)
+	{
+          _stext = .;
+	  TEXT_TEXT
+	  SCHED_TEXT
+	  LOCK_TEXT
+	  KPROBES_TEXT
+	  IRQENTRY_TEXT
+	  *(.fixup)
+	  *(.text.__*)
+	  _etext = .;
+	}
+	/* TODO: Check if fixup and text.__* are really necessary
+	 * fixup is definitely necessary
+	 */
+
+	_sdata = .;
+
+	/* Page alignment required for RO_DATA_SECTION */
+	RO_DATA_SECTION(PAGE_SIZE)
+	_e_kernel_ro = .;
+
+	/* Whatever comes after _e_kernel_ro had better be page-aligend, too */
+
+	/* 32 here is cacheline size... recheck this */
+	RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE)
+
+        _edata  =  .;
+
+	EXCEPTION_TABLE(4)
+	NOTES
+
+	/* Init code and data */
+	. = ALIGN(PAGE_SIZE);
+	__init_begin = .;
+
+	HEAD_TEXT_SECTION
+
+	/* Page aligned */
+	INIT_TEXT_SECTION(PAGE_SIZE)
+
+	/* Align __setup_start on 16 byte boundary */
+	INIT_DATA_SECTION(16)
+
+	PERCPU_SECTION(L1_CACHE_BYTES)
+
+        __init_end = .;
+
+	. = ALIGN(PAGE_SIZE);
+	.initrd			: AT(ADDR(.initrd) - LOAD_OFFSET)
+	{
+		__initrd_start = .;
+		*(.initrd)
+		__initrd_end = .;
+		FILL (0);
+                . = ALIGN (PAGE_SIZE);
+	}
+
+        __vmlinux_end = .;            /* last address of the physical file */
+
+	BSS_SECTION(0, 0, 0x20)
+
+        _end = .;
+
+	/* Throw in the debugging sections */
+	STABS_DEBUG
+	DWARF_DEBUG
+
+        /* Sections to be discarded -- must be last */
+	DISCARDS
+}
diff --git a/arch/openrisc/lib/Makefile b/arch/openrisc/lib/Makefile
new file mode 100644
index 000000000000..966f65dbc6f0
--- /dev/null
+++ b/arch/openrisc/lib/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for or32 specific library files..
+#
+
+obj-y  = string.o delay.o
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
new file mode 100644
index 000000000000..01d9740ae6f3
--- /dev/null
+++ b/arch/openrisc/lib/delay.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      version 2 as published by the Free Software Foundation
+ *
+ * Precise Delay Loops
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/delay.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+
+int __devinit read_current_timer(unsigned long *timer_value)
+{
+	*timer_value = mfspr(SPR_TTCR);
+	return 0;
+}
+
+void __delay(unsigned long cycles)
+{
+	cycles_t target = get_cycles() + cycles;
+
+	while (get_cycles() < target)
+		cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+	unsigned long long loops;
+
+	loops = xloops * loops_per_jiffy * HZ;
+
+	__delay(loops >> 32);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+	__const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+	__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
new file mode 100644
index 000000000000..465f04bc7deb
--- /dev/null
+++ b/arch/openrisc/lib/string.S
@@ -0,0 +1,204 @@
+/*
+ * OpenRISC string.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+
+	/*
+	 * this can be optimized by doing gcc inline assemlby with
+	 * proper constraints (no need to save args registers...)
+	 *
+	 */
+
+
+/*
+ *
+ * int __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ *
+ * NOTE: it returns number of bytes NOT copied !!!
+ *
+ */
+	.global	__copy_tofrom_user
+__copy_tofrom_user:
+	l.addi  r1,r1,-12
+	l.sw    0(r1),r6
+	l.sw    4(r1),r4
+	l.sw    8(r1),r3
+
+	l.addi  r11,r5,0
+2:  	l.sfeq  r11,r0
+	l.bf    1f
+	l.addi  r11,r11,-1
+8:    	l.lbz   r6,0(r4)
+9:    	l.sb    0(r3),r6
+	l.addi  r3,r3,1
+	l.j     2b
+	l.addi  r4,r4,1
+1:
+	l.addi  r11,r11,1               // r11 holds the return value
+
+	l.lwz   r6,0(r1)
+	l.lwz   r4,4(r1)
+	l.lwz   r3,8(r1)
+	l.jr    r9
+	l.addi  r1,r1,12
+
+	.section .fixup, "ax"
+99:
+		l.j     1b
+		l.nop
+	.previous
+
+	.section __ex_table, "a"
+		.long 8b, 99b		// read fault
+		.long 9b, 99b		// write fault
+	.previous
+
+/*
+ * unsigned long clear_user(void *addr, unsigned long size) ;
+ *
+ * NOTE: it returns number of bytes NOT cleared !!!
+ */
+	.global	__clear_user
+__clear_user:
+	l.addi  r1,r1,-8
+	l.sw    0(r1),r4
+	l.sw    4(r1),r3
+
+2:	l.sfeq	r4,r0
+	l.bf	1f
+	l.addi	r4,r4,-1
+9:	l.sb	0(r3),r0
+	l.j	2b
+	l.addi  r3,r3,1
+
+1:
+	l.addi  r11,r4,1
+
+	l.lwz	r4,0(r1)
+	l.lwz	r3,4(r1)
+	l.jr	r9
+	l.addi	r1,r1,8
+
+	.section .fixup, "ax"
+99:
+		l.j     1b
+		l.nop
+	.previous
+
+	.section __ex_table, "a"
+		.long 9b, 99b		// write fault
+	.previous
+
+/*
+ * long strncpy_from_user(char *dst, const char *src, long count)
+ *
+ *
+ */
+	.global	__strncpy_from_user
+__strncpy_from_user:
+	l.addi  r1,r1,-16
+	l.sw    0(r1),r6
+	l.sw    4(r1),r5
+	l.sw    8(r1),r4
+	l.sw    12(r1),r3
+
+	l.addi  r11,r5,0
+2:  	l.sfeq  r5,r0
+	l.bf    1f
+	l.addi  r5,r5,-1
+8:    	l.lbz   r6,0(r4)
+	l.sfeq  r6,r0
+	l.bf    1f
+9:    	l.sb    0(r3),r6
+	l.addi  r3,r3,1
+	l.j     2b
+	l.addi  r4,r4,1
+1:
+	l.lwz   r6,0(r1)
+	l.addi  r5,r5,1
+	l.sub   r11,r11,r5              // r11 holds the return value
+
+	l.lwz   r6,0(r1)
+	l.lwz   r5,4(r1)
+	l.lwz   r4,8(r1)
+	l.lwz   r3,12(r1)
+	l.jr    r9
+	l.addi  r1,r1,16
+
+	.section .fixup, "ax"
+99:
+		l.movhi r11,hi(-EFAULT)
+		l.ori   r11,r11,lo(-EFAULT)
+
+		l.lwz   r6,0(r1)
+		l.lwz   r5,4(r1)
+		l.lwz   r4,8(r1)
+		l.lwz   r3,12(r1)
+		l.jr	r9
+		l.addi  r1,r1,16
+	.previous
+
+	.section __ex_table, "a"
+		.long 8b, 99b		// read fault
+	.previous
+
+/*
+ * extern int __strnlen_user(const char *str, long len, unsigned long top);
+ *
+ *
+ * RTRN: - length of a string including NUL termination character
+ *       - on page fault 0
+ */
+
+	.global __strnlen_user
+__strnlen_user:
+	l.addi  r1,r1,-8
+	l.sw    0(r1),r6
+	l.sw    4(r1),r3
+
+	l.addi  r11,r0,0
+2:  	l.sfeq  r11,r4
+	l.bf    1f
+	l.addi  r11,r11,1
+8:    	l.lbz   r6,0(r3)
+	l.sfeq  r6,r0
+	l.bf    1f
+	l.sfgeu r3,r5                  // are we over the top ?
+	l.bf    99f
+	l.j     2b
+	l.addi  r3,r3,1
+
+1:
+	l.lwz   r6,0(r1)
+	l.lwz	r3,4(r1)
+	l.jr    r9
+	l.addi  r1,r1,8
+
+	.section .fixup, "ax"
+99:
+		l.addi  r11,r0,0
+
+		l.lwz   r6,0(r1)
+		l.lwz	r3,4(r1)
+		l.jr	r9
+		l.addi  r1,r1,8
+	.previous
+
+	.section __ex_table, "a"
+		.long 8b, 99b		// read fault
+	.previous
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
new file mode 100644
index 000000000000..324ba2634529
--- /dev/null
+++ b/arch/openrisc/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux openrisc-specific parts of the memory manager.
+#
+
+obj-y	:= fault.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
new file mode 100644
index 000000000000..a5dce82f864b
--- /dev/null
+++ b/arch/openrisc/mm/fault.c
@@ -0,0 +1,338 @@
+/*
+ * OpenRISC fault.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define NUM_TLB_ENTRIES 64
+#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
+
+unsigned long pte_misses;	/* updated by do_page_fault() */
+unsigned long pte_errors;	/* updated by do_page_fault() */
+
+/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
+ *            - also look into include/asm-or32/mmu_context.h
+ */
+volatile pgd_t *current_pgd;
+
+extern void die(char *, struct pt_regs *, long);
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * If this routine detects a bad access, it returns 1, otherwise it
+ * returns 0.
+ */
+
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+			      unsigned long vector, int write_acc)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	siginfo_t info;
+	int fault;
+
+	tsk = current;
+
+	/*
+	 * We fault-in kernel-space virtual memory on-demand. The
+	 * 'reference' page table is init_mm.pgd.
+	 *
+	 * NOTE! We MUST NOT take any locks for this case. We may
+	 * be in an interrupt or a critical region, and should
+	 * only copy the information from the master page table,
+	 * nothing more.
+	 *
+	 * NOTE2: This is done so that, when updating the vmalloc
+	 * mappings we don't have to walk all processes pgdirs and
+	 * add the high mappings all at once. Instead we do it as they
+	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
+	 * bit set so sometimes the TLB can use a lingering entry.
+	 *
+	 * This verifies that the fault happens in kernel space
+	 * and that the fault was not a protection error.
+	 */
+
+	if (address >= VMALLOC_START &&
+	    (vector != 0x300 && vector != 0x400) &&
+	    !user_mode(regs))
+		goto vmalloc_fault;
+
+	/* If exceptions were enabled, we can reenable them here */
+	if (user_mode(regs)) {
+		/* Exception was in userspace: reenable interrupts */
+		local_irq_enable();
+	} else {
+		/* If exception was in a syscall, then IRQ's may have
+		 * been enabled or disabled.  If they were enabled,
+		 * reenable them.
+		 */
+		if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
+			local_irq_enable();
+	}
+
+	mm = tsk->mm;
+	info.si_code = SEGV_MAPERR;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+
+	if (in_interrupt() || !mm)
+		goto no_context;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+
+	if (!vma)
+		goto bad_area;
+
+	if (vma->vm_start <= address)
+		goto good_area;
+
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+
+	if (user_mode(regs)) {
+		/*
+		 * accessing the stack below usp is always a bug.
+		 * we get page-aligned addresses so we can only check
+		 * if we're within a page from usp, but that might be
+		 * enough to catch brutal errors at least.
+		 */
+		if (address + PAGE_SIZE < regs->sp)
+			goto bad_area;
+	}
+	if (expand_stack(vma, address))
+		goto bad_area;
+
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it..
+	 */
+
+good_area:
+	info.si_code = SEGV_ACCERR;
+
+	/* first do some preliminary protection checks */
+
+	if (write_acc) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+	} else {
+		/* not present */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	/* are we trying to execute nonexecutable area */
+	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
+		goto bad_area;
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+
+	fault = handle_mm_fault(mm, vma, address, write_acc);
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
+	}
+	/*RGD modeled on Cris */
+	if (fault & VM_FAULT_MAJOR)
+		tsk->maj_flt++;
+	else
+		tsk->min_flt++;
+
+	up_read(&mm->mmap_sem);
+	return;
+
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+	/* User mode accesses just cause a SIGSEGV */
+
+	if (user_mode(regs)) {
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		/* info.si_code has been set above */
+		info.si_addr = (void *)address;
+		force_sig_info(SIGSEGV, &info, tsk);
+		return;
+	}
+
+no_context:
+
+	/* Are we prepared to handle this kernel fault?
+	 *
+	 * (The kernel has valid exception-points in the source
+	 *  when it acesses user-memory. When it fails in one
+	 *  of those points, we find it in a table and do a jump
+	 *  to some fixup code that loads an appropriate error
+	 *  code)
+	 */
+
+	{
+		const struct exception_table_entry *entry;
+
+		__asm__ __volatile__("l.nop 42");
+
+		if ((entry = search_exception_tables(regs->pc)) != NULL) {
+			/* Adjust the instruction pointer in the stackframe */
+			regs->pc = entry->fixup;
+			return;
+		}
+	}
+
+	/*
+	 * Oops. The kernel tried to access some bad page. We'll have to
+	 * terminate things with extreme prejudice.
+	 */
+
+	if ((unsigned long)(address) < PAGE_SIZE)
+		printk(KERN_ALERT
+		       "Unable to handle kernel NULL pointer dereference");
+	else
+		printk(KERN_ALERT "Unable to handle kernel access");
+	printk(" at virtual address 0x%08lx\n", address);
+
+	die("Oops", regs, write_acc);
+
+	do_exit(SIGKILL);
+
+	/*
+	 * We ran out of memory, or some other thing happened to us that made
+	 * us unable to handle the page fault gracefully.
+	 */
+
+out_of_memory:
+	__asm__ __volatile__("l.nop 42");
+	__asm__ __volatile__("l.nop 1");
+
+	up_read(&mm->mmap_sem);
+	printk("VM: killing process %s\n", tsk->comm);
+	if (user_mode(regs))
+		do_exit(SIGKILL);
+	goto no_context;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Send a sigbus, regardless of whether we were in kernel
+	 * or user mode.
+	 */
+	info.si_signo = SIGBUS;
+	info.si_errno = 0;
+	info.si_code = BUS_ADRERR;
+	info.si_addr = (void *)address;
+	force_sig_info(SIGBUS, &info, tsk);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (!user_mode(regs))
+		goto no_context;
+	return;
+
+vmalloc_fault:
+	{
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 *
+		 * Use current_pgd instead of tsk->active_mm->pgd
+		 * since the latter might be unavailable if this
+		 * code is executed in a misfortunately run irq
+		 * (like inside schedule() between switch_mm and
+		 *  switch_to...).
+		 */
+
+		int offset = pgd_index(address);
+		pgd_t *pgd, *pgd_k;
+		pud_t *pud, *pud_k;
+		pmd_t *pmd, *pmd_k;
+		pte_t *pte_k;
+
+/*
+		phx_warn("do_page_fault(): vmalloc_fault will not work, "
+			 "since current_pgd assign a proper value somewhere\n"
+			 "anyhow we don't need this at the moment\n");
+
+		phx_mmu("vmalloc_fault");
+*/
+		pgd = (pgd_t *)current_pgd + offset;
+		pgd_k = init_mm.pgd + offset;
+
+		/* Since we're two-level, we don't need to do both
+		 * set_pgd and set_pmd (they do the same thing). If
+		 * we go three-level at some point, do the right thing
+		 * with pgd_present and set_pgd here.
+		 *
+		 * Also, since the vmalloc area is global, we don't
+		 * need to copy individual PTE's, it is enough to
+		 * copy the pgd pointer into the pte page of the
+		 * root task. If that is there, we'll find our pte if
+		 * it exists.
+		 */
+
+		pud = pud_offset(pgd, address);
+		pud_k = pud_offset(pgd_k, address);
+		if (!pud_present(*pud_k))
+			goto no_context;
+
+		pmd = pmd_offset(pud, address);
+		pmd_k = pmd_offset(pud_k, address);
+
+		if (!pmd_present(*pmd_k))
+			goto bad_area_nosemaphore;
+
+		set_pmd(pmd, *pmd_k);
+
+		/* Make sure the actual PTE exists as well to
+		 * catch kernel vmalloc-area accesses to non-mapped
+		 * addresses. If we don't do this, this will just
+		 * silently loop forever.
+		 */
+
+		pte_k = pte_offset_kernel(pmd_k, address);
+		if (!pte_present(*pte_k))
+			goto no_context;
+
+		return;
+	}
+}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
new file mode 100644
index 000000000000..359dcb20fe85
--- /dev/null
+++ b/arch/openrisc/mm/init.c
@@ -0,0 +1,283 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>	/* for initrd_* */
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+int mem_init_done;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static void __init zone_sizes_init(void)
+{
+	unsigned long zones_size[MAX_NR_ZONES];
+
+	/* Clear the zone sizes */
+	memset(zones_size, 0, sizeof(zones_size));
+
+	/*
+	 * We use only ZONE_NORMAL
+	 */
+	zones_size[ZONE_NORMAL] = max_low_pfn;
+
+	free_area_init(zones_size);
+}
+
+extern const char _s_kernel_ro[], _e_kernel_ro[];
+
+/*
+ * Map all physical memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+	unsigned long v, p, e;
+	pgprot_t prot;
+	pgd_t *pge;
+	pud_t *pue;
+	pmd_t *pme;
+	pte_t *pte;
+	/* These mark extents of read-only kernel pages...
+	 * ...from vmlinux.lds.S
+	 */
+	struct memblock_region *region;
+
+	v = PAGE_OFFSET;
+
+	for_each_memblock(memory, region) {
+		p = (u32) region->base & PAGE_MASK;
+		e = p + (u32) region->size;
+
+		v = (u32) __va(p);
+		pge = pgd_offset_k(v);
+
+		while (p < e) {
+			int j;
+			pue = pud_offset(pge, v);
+			pme = pmd_offset(pue, v);
+
+			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+				panic("%s: OR1K kernel hardcoded for "
+				      "two-level page tables",
+				     __func__);
+			}
+
+			/* Alloc one page for holding PTE's... */
+			pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
+
+			/* Fill the newly allocated page with PTE'S */
+			for (j = 0; p < e && j < PTRS_PER_PGD;
+			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+				if (v >= (u32) _e_kernel_ro ||
+				    v < (u32) _s_kernel_ro)
+					prot = PAGE_KERNEL;
+				else
+					prot = PAGE_KERNEL_RO;
+
+				set_pte(pte, mk_pte_phys(p, prot));
+			}
+
+			pge++;
+		}
+
+		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+		       region->base, region->base + region->size);
+	}
+}
+
+void __init paging_init(void)
+{
+	extern void tlb_init(void);
+
+	unsigned long end;
+	int i;
+
+	printk(KERN_INFO "Setting up paging and PTEs.\n");
+
+	/* clear out the init_mm.pgd that will contain the kernel's mappings */
+
+	for (i = 0; i < PTRS_PER_PGD; i++)
+		swapper_pg_dir[i] = __pgd(0);
+
+	/* make sure the current pgd table points to something sane
+	 * (even if it is most probably not used until the next
+	 *  switch_mm)
+	 */
+	current_pgd = init_mm.pgd;
+
+	end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
+
+	map_ram();
+
+	zone_sizes_init();
+
+	/* self modifying code ;) */
+	/* Since the old TLB miss handler has been running up until now,
+	 * the kernel pages are still all RW, so we can still modify the
+	 * text directly... after this change and a TLB flush, the kernel
+	 * pages will become RO.
+	 */
+	{
+		extern unsigned long dtlb_miss_handler;
+		extern unsigned long itlb_miss_handler;
+
+		unsigned long *dtlb_vector = __va(0x900);
+		unsigned long *itlb_vector = __va(0xa00);
+
+		printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
+		*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
+				(unsigned long)dtlb_vector) >> 2;
+
+		printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+		*itlb_vector = ((unsigned long)&itlb_miss_handler -
+				(unsigned long)itlb_vector) >> 2;
+	}
+
+	/* Invalidate instruction caches after code modification */
+	mtspr(SPR_ICBIR, 0x900);
+	mtspr(SPR_ICBIR, 0xa00);
+
+	/* New TLB miss handlers and kernel page tables are in now place.
+	 * Make sure that page flags get updated for all pages in TLB by
+	 * flushing the TLB and forcing all TLB entries to be recreated
+	 * from their page table flags.
+	 */
+	flush_tlb_all();
+}
+
+/* References to section boundaries */
+
+extern char _stext, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+static int __init free_pages_init(void)
+{
+	int reservedpages, pfn;
+
+	/* this will put all low memory onto the freelists */
+	totalram_pages = free_all_bootmem();
+
+	reservedpages = 0;
+	for (pfn = 0; pfn < max_low_pfn; pfn++) {
+		/*
+		 * Only count reserved RAM pages
+		 */
+		if (PageReserved(mem_map + pfn))
+			reservedpages++;
+	}
+
+	return reservedpages;
+}
+
+static void __init set_max_mapnr_init(void)
+{
+	max_mapnr = num_physpages = max_low_pfn;
+}
+
+void __init mem_init(void)
+{
+	int codesize, reservedpages, datasize, initsize;
+
+	if (!mem_map)
+		BUG();
+
+	set_max_mapnr_init();
+
+	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+	/* clear the zero-page */
+	memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+	reservedpages = free_pages_init();
+
+	codesize = (unsigned long)&_etext - (unsigned long)&_stext;
+	datasize = (unsigned long)&_edata - (unsigned long)&_etext;
+	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+
+	printk(KERN_INFO
+	       "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+	       (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
+	       max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
+	       reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
+	       initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
+	    );
+
+	printk("mem_init_done ...........................................\n");
+	mem_init_done = 1;
+	return;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
+	       (end - start) >> 10);
+
+	for (; start < end; start += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(start));
+		init_page_count(virt_to_page(start));
+		free_page(start);
+		totalram_pages++;
+	}
+}
+#endif
+
+void free_initmem(void)
+{
+	unsigned long addr;
+
+	addr = (unsigned long)(&__init_begin);
+	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+		ClearPageReserved(virt_to_page(addr));
+		init_page_count(virt_to_page(addr));
+		free_page(addr);
+		totalram_pages++;
+	}
+	printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+	       ((unsigned long)&__init_end -
+		(unsigned long)&__init_begin) >> 10);
+}
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
new file mode 100644
index 000000000000..62b08ef392be
--- /dev/null
+++ b/arch/openrisc/mm/ioremap.c
@@ -0,0 +1,137 @@
+/*
+ * OpenRISC ioremap.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/bug.h>
+#include <asm/pgtable.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+extern int mem_init_done;
+
+static unsigned int fixmaps_used __initdata;
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *__init_refok
+__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
+{
+	phys_addr_t p;
+	unsigned long v;
+	unsigned long offset, last_addr;
+	struct vm_struct *area = NULL;
+
+	/* Don't allow wraparound or zero size */
+	last_addr = addr + size - 1;
+	if (!size || last_addr < addr)
+		return NULL;
+
+	/*
+	 * Mappings have to be page-aligned
+	 */
+	offset = addr & ~PAGE_MASK;
+	p = addr & PAGE_MASK;
+	size = PAGE_ALIGN(last_addr + 1) - p;
+
+	if (likely(mem_init_done)) {
+		area = get_vm_area(size, VM_IOREMAP);
+		if (!area)
+			return NULL;
+		v = (unsigned long)area->addr;
+	} else {
+		if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
+			return NULL;
+		v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
+		fixmaps_used += (size >> PAGE_SHIFT);
+	}
+
+	if (ioremap_page_range(v, v + size, p, prot)) {
+		if (likely(mem_init_done))
+			vfree(area->addr);
+		else
+			fixmaps_used -= (size >> PAGE_SHIFT);
+		return NULL;
+	}
+
+	return (void __iomem *)(offset + (char *)v);
+}
+
+void iounmap(void *addr)
+{
+	/* If the page is from the fixmap pool then we just clear out
+	 * the fixmap mapping.
+	 */
+	if (unlikely((unsigned long)addr > FIXADDR_START)) {
+		/* This is a bit broken... we don't really know
+		 * how big the area is so it's difficult to know
+		 * how many fixed pages to invalidate...
+		 * just flush tlb and hope for the best...
+		 * consider this a FIXME
+		 *
+		 * Really we should be clearing out one or more page
+		 * table entries for these virtual addresses so that
+		 * future references cause a page fault... for now, we
+		 * rely on two things:
+		 *   i)  this code never gets called on known boards
+		 *   ii) invalid accesses to the freed areas aren't made
+		 */
+		flush_tlb_all();
+		return;
+	}
+
+	return vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+/**
+ * OK, this one's a bit tricky... ioremap can get called before memory is
+ * initialized (early serial console does this) and will want to alloc a page
+ * for its mapping.  No userspace pages will ever get allocated before memory
+ * is initialized so this applies only to kernel pages.  In the event that
+ * this is called before memory is initialized we allocate the page using
+ * the memblock infrastructure.
+ */
+
+pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
+					 unsigned long address)
+{
+	pte_t *pte;
+
+	if (likely(mem_init_done)) {
+		pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+	} else {
+		pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+#if 0
+		/* FIXME: use memblock... */
+		pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+#endif
+	}
+
+	if (pte)
+		clear_page(pte);
+	return pte;
+}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
new file mode 100644
index 000000000000..56b0b89624af
--- /dev/null
+++ b/arch/openrisc/mm/tlb.c
@@ -0,0 +1,193 @@
+/*
+ * OpenRISC tlb.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/spr_defs.h>
+
+#define NO_CONTEXT -1
+
+#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+			    SPR_DMMUCFGR_NTS_OFF))
+#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+			    SPR_IMMUCFGR_NTS_OFF))
+#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
+#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
+/*
+ * Invalidate all TLB entries.
+ *
+ * This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
+ * Easiest way to accomplish this is to just zero out the xTLBMR register
+ * completely.
+ *
+ */
+
+void flush_tlb_all(void)
+{
+	int i;
+	unsigned long num_tlb_sets;
+
+	/* Determine number of sets for IMMU. */
+	/* FIXME: Assumption is I & D nsets equal. */
+	num_tlb_sets = NUM_ITLB_SETS;
+
+	for (i = 0; i < num_tlb_sets; i++) {
+		mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
+		mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
+	}
+}
+
+#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
+#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
+
+/*
+ * Invalidate a single page.  This is what the xTLBEIR register is for.
+ *
+ * There's no point in checking the vma for PAGE_EXEC to determine whether it's
+ * the data or instruction TLB that should be flushed... that would take more
+ * than the few instructions that the following compiles down to!
+ *
+ * The case where we don't have the xTLBEIR register really only works for
+ * MMU's with a single way and is hard-coded that way.
+ */
+
+#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
+#define flush_dtlb_page_no_eir(addr) \
+	mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
+
+#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
+#define flush_itlb_page_no_eir(addr) \
+	mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (have_dtlbeir)
+		flush_dtlb_page_eir(addr);
+	else
+		flush_dtlb_page_no_eir(addr);
+
+	if (have_itlbeir)
+		flush_itlb_page_eir(addr);
+	else
+		flush_itlb_page_no_eir(addr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+		     unsigned long start, unsigned long end)
+{
+	int addr;
+	bool dtlbeir;
+	bool itlbeir;
+
+	dtlbeir = have_dtlbeir;
+	itlbeir = have_itlbeir;
+
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
+		if (dtlbeir)
+			flush_dtlb_page_eir(addr);
+		else
+			flush_dtlb_page_no_eir(addr);
+
+		if (itlbeir)
+			flush_itlb_page_eir(addr);
+		else
+			flush_itlb_page_no_eir(addr);
+	}
+}
+
+/*
+ * Invalidate the selected mm context only.
+ *
+ * FIXME: Due to some bug here, we're flushing everything for now.
+ * This should be changed to loop over over mm and call flush_tlb_range.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+
+	/* Was seeing bugs with the mm struct passed to us. Scrapped most of
+	   this function. */
+	/* Several architctures do this */
+	flush_tlb_all();
+}
+
+/* called in schedule() just before actually doing the switch_to */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	       struct task_struct *next_tsk)
+{
+	/* remember the pgd for the fault handlers
+	 * this is similar to the pgd register in some other CPU's.
+	 * we need our own copy of it because current and active_mm
+	 * might be invalid at points where we still need to derefer
+	 * the pgd.
+	 */
+	current_pgd = next->pgd;
+
+	/* We don't have context support implemented, so flush all
+	 * entries belonging to previous map
+	 */
+
+	if (prev != next)
+		flush_tlb_mm(prev);
+
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	mm->context = NO_CONTEXT;
+	return 0;
+}
+
+/* called by __exit_mm to destroy the used MMU context if any before
+ * destroying the mm itself. this is only called when the last user of the mm
+ * drops it.
+ */
+
+void destroy_context(struct mm_struct *mm)
+{
+	flush_tlb_mm(mm);
+
+}
+
+/* called once during VM initialization, from init.c */
+
+void __init tlb_init(void)
+{
+	/* Do nothing... */
+	/* invalidate the entire TLB */
+	/* flush_tlb_all(); */
+}
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
index 4b16bf9b56bd..9670e127b7b2 100644
--- a/arch/sh/include/asm/delay.h
+++ b/arch/sh/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_SH_DELAY_H
-#define __ASM_SH_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/sh/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
-	__udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
-	__ndelay(n))
-
-#endif /* __ASM_SH_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h
index 409a649204aa..9b3b4f2754c7 100644
--- a/arch/x86/include/asm/delay.h
+++ b/arch/x86/include/asm/delay.h
@@ -1,30 +1,7 @@
 #ifndef _ASM_X86_DELAY_H
 #define _ASM_X86_DELAY_H
 
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/x86/lib/delay.c
- */
-
-/* Undefined functions to get compile-time errors */
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
-	__udelay(n))
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) (__builtin_constant_p(n) ? \
-	((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
-	__ndelay(n))
+#include <asm-generic/delay.h>
 
 void use_tsc_delay(void);
 
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
index 4586fec75ddb..0f79054ce7cd 100644
--- a/include/asm-generic/delay.h
+++ b/include/asm-generic/delay.h
@@ -1,9 +1,44 @@
 #ifndef __ASM_GENERIC_DELAY_H
 #define __ASM_GENERIC_DELAY_H
 
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
 extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
 extern void __delay(unsigned long loops);
 
-#define udelay(n) __udelay(n)
+/*
+ * The weird n/20000 thing suppresses a "comparison is always false due to
+ * limited range of data type" warning with non-const 8-bit arguments.
+ */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n)							\
+	({								\
+		if (__builtin_constant_p(n)) {				\
+			if ((n) / 20000 >= 1)				\
+				 __bad_udelay();			\
+			else						\
+				__const_udelay((n) * 0x10c7ul);		\
+		} else {						\
+			__udelay(n);					\
+		}							\
+	})
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n)							\
+	({								\
+		if (__builtin_constant_p(n)) {				\
+			if ((n) / 20000 >= 1)				\
+				__bad_ndelay();				\
+			else						\
+				__const_udelay((n) * 5ul);		\
+		} else {						\
+			__ndelay(n);					\
+		}							\
+	})
 
 #endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index e0ffa3ddb02a..912088773a69 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -307,7 +307,11 @@ static inline void *phys_to_virt(unsigned long address)
 
 /*
  * Change "struct page" to physical address.
+ *
+ * This implementation is for the no-MMU case only... if you have an MMU
+ * you'll need to provide your own definitions.
  */
+#ifndef CONFIG_MMU
 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 {
 	return (void __iomem*) (unsigned long)offset;
@@ -326,7 +330,9 @@ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 static inline void iounmap(void *addr)
 {
 }
+#endif /* CONFIG_MMU */
 
+#ifdef CONFIG_HAS_IOPORT
 #ifndef CONFIG_GENERIC_IOMAP
 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
@@ -340,9 +346,10 @@ static inline void ioport_unmap(void __iomem *p)
 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
 extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_GENERIC_IOMAP */
+#endif /* CONFIG_HAS_IOPORT */
 
 #define xlate_dev_kmem_ptr(p)	p
-#define xlate_dev_mem_ptr(p)	((void *) (p))
+#define xlate_dev_mem_ptr(p)	__va(p)
 
 #ifndef virt_to_bus
 static inline unsigned long virt_to_bus(volatile void *address)
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 76b0cc5637f8..c74ef2c6e633 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -56,17 +56,21 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun
 extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
 extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
 
+#ifdef CONFIG_HAS_IOPORT
 /* Create a virtual mapping cookie for an IO port range */
 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
 extern void ioport_unmap(void __iomem *);
+#endif
 
 #ifndef ARCH_HAS_IOREMAP_WC
 #define ioremap_wc ioremap_nocache
 #endif
 
+#ifdef CONFIG_PCI
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+#endif
 
 #endif
diff --git a/lib/iomap.c b/lib/iomap.c
index d32229385151..5dbcb4b2d864 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -224,6 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep);
 EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 
+#ifdef CONFIG_HAS_IOPORT
 /* Create a virtual mapping cookie for an IO port range */
 void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
@@ -238,7 +239,9 @@ void ioport_unmap(void __iomem *addr)
 }
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT */
 
+#ifdef CONFIG_PCI
 /**
  * pci_iomap - create a virtual mapping cookie for a PCI BAR
  * @dev: PCI device that owns the BAR
@@ -280,3 +283,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 }
 EXPORT_SYMBOL(pci_iomap);
 EXPORT_SYMBOL(pci_iounmap);
+#endif /* CONFIG_PCI */