summary refs log tree commit diff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/s390/kernel
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile31
-rw-r--r--arch/s390/kernel/asm-offsets.c49
-rw-r--r--arch/s390/kernel/binfmt_elf32.c210
-rw-r--r--arch/s390/kernel/bitmap.S56
-rw-r--r--arch/s390/kernel/compat_exec_domain.c30
-rw-r--r--arch/s390/kernel/compat_ioctl.c73
-rw-r--r--arch/s390/kernel/compat_linux.c1045
-rw-r--r--arch/s390/kernel/compat_linux.h197
-rw-r--r--arch/s390/kernel/compat_ptrace.h83
-rw-r--r--arch/s390/kernel/compat_signal.c648
-rw-r--r--arch/s390/kernel/compat_wrapper.S1443
-rw-r--r--arch/s390/kernel/cpcmd.c111
-rw-r--r--arch/s390/kernel/debug.c1286
-rw-r--r--arch/s390/kernel/ebcdic.c400
-rw-r--r--arch/s390/kernel/entry.S868
-rw-r--r--arch/s390/kernel/entry64.S881
-rw-r--r--arch/s390/kernel/head.S772
-rw-r--r--arch/s390/kernel/head64.S769
-rw-r--r--arch/s390/kernel/init_task.c44
-rw-r--r--arch/s390/kernel/irq.c105
-rw-r--r--arch/s390/kernel/module.c405
-rw-r--r--arch/s390/kernel/process.c416
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c738
-rw-r--r--arch/s390/kernel/reipl.S78
-rw-r--r--arch/s390/kernel/reipl64.S96
-rw-r--r--arch/s390/kernel/s390_ext.c135
-rw-r--r--arch/s390/kernel/s390_ksyms.c65
-rw-r--r--arch/s390/kernel/semaphore.c108
-rw-r--r--arch/s390/kernel/setup.c632
-rw-r--r--arch/s390/kernel/signal.c527
-rw-r--r--arch/s390/kernel/smp.c840
-rw-r--r--arch/s390/kernel/sys_s390.c270
-rw-r--r--arch/s390/kernel/syscalls.S292
-rw-r--r--arch/s390/kernel/time.c382
-rw-r--r--arch/s390/kernel/traps.c738
-rw-r--r--arch/s390/kernel/vmlinux.lds.S130
-rw-r--r--arch/s390/kernel/vtime.c565
38 files changed, 15538 insertions, 0 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
new file mode 100644
index 000000000000..b41e0e199a7c
--- /dev/null
+++ b/arch/s390/kernel/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for the linux kernel.
+#
+
+EXTRA_AFLAGS	:= -traditional
+
+obj-y	:=  bitmap.o traps.o time.o process.o \
+            setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
+            semaphore.o s390_ext.o debug.o profile.o irq.o
+
+extra-$(CONFIG_ARCH_S390_31)	+= head.o 
+extra-$(CONFIG_ARCH_S390X)	+= head64.o 
+extra-y				+= init_task.o vmlinux.lds
+
+obj-$(CONFIG_MODULES)		+= s390_ksyms.o module.o
+obj-$(CONFIG_SMP)		+= smp.o
+
+obj-$(CONFIG_S390_SUPPORT)	+= compat_linux.o compat_signal.o \
+					compat_ioctl.o compat_wrapper.o \
+					compat_exec_domain.o
+obj-$(CONFIG_BINFMT_ELF32)	+= binfmt_elf32.o
+
+obj-$(CONFIG_ARCH_S390_31)	+= entry.o reipl.o
+obj-$(CONFIG_ARCH_S390X)	+= entry64.o reipl64.o
+
+obj-$(CONFIG_VIRT_TIMER)	+= vtime.o
+
+#
+# This is just to get the dependencies...
+#
+binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
new file mode 100644
index 000000000000..3f7018e9dbe4
--- /dev/null
+++ b/arch/s390/kernel/asm-offsets.c
@@ -0,0 +1,49 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val, marker) \
+	asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+	DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
+	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
+	DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+	DEFINE(__THREAD_mm_segment,
+	       offsetof(struct task_struct, thread.mm_segment),);
+	BLANK();
+	DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
+	BLANK();
+	DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
+	DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
+	DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
+	BLANK();
+	DEFINE(__TI_task, offsetof(struct thread_info, task),);
+	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
+	DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
+	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
+	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+	BLANK();
+	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
+	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
+	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
+	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
+	DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
+	DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
+	DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
+	BLANK();
+	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
+	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
+	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
+	return 0;
+}
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..03ba5893f17b
--- /dev/null
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -0,0 +1,210 @@
+/*
+ * Support for 32-bit Linux for S390 ELF binaries.
+ *
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#define __ASMS390_ELF_H
+
+#include <linux/time.h>
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#define ELF_DATA	ELFDATA2MSB
+#define ELF_ARCH	EM_S390
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
+         && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+
+/* ELF register definitions */
+#define NUM_GPRS      16
+#define NUM_FPRS      16
+#define NUM_ACRS      16    
+
+/* For SVR4/S390 the function pointer to be registered with `atexit` is
+   passed in R14. */
+#define ELF_PLAT_INIT(_r, load_addr) \
+	do { \
+		_r->gprs[14] = 0; \
+	} while(0)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE       4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
+
+/* Wow, the "main" arch needs arch dependent functions too.. :) */
+
+/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+   now struct_user_regs, they are different) */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
+
+#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
+
+#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this CPU supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex, ibcs2)			\
+do {							\
+	if (ibcs2)                                      \
+		set_personality(PER_SVR4);              \
+	else if (current->personality != PER_LINUX32)   \
+		set_personality(PER_LINUX);             \
+	set_thread_flag(TIF_31BIT);			\
+} while (0)
+
+#include "compat_linux.h"
+
+typedef _s390_fp_regs32 elf_fpregset_t;
+
+typedef struct
+{
+	
+	_psw_t32	psw;
+	__u32		gprs[__NUM_GPRS]; 
+	__u32		acrs[__NUM_ACRS]; 
+	__u32		orig_gpr2;
+} s390_regs32;
+typedef s390_regs32 elf_gregset_t;
+
+static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
+{
+	int i;
+
+	memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
+	memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = ptregs->gprs[i];
+	save_access_regs(regs->acrs);
+	regs->orig_gpr2 = ptregs->orig_gpr2;
+	return 1;
+}
+
+static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
+{
+	struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+	int i;
+
+	memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
+	memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = ptregs->gprs[i];
+	memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
+	regs->orig_gpr2 = ptregs->orig_gpr2;
+	return 1;
+}
+
+static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+	if (tsk == current)
+		save_fp_regs((s390_fp_regs *) fpregs);
+	else
+		memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
+	return 1;
+}
+
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/elfcore.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+	struct elf_siginfo pr_info;	/* Info associated with signal */
+	short	pr_cursig;		/* Current signal */
+	u32	pr_sigpend;	/* Set of pending signals */
+	u32	pr_sighold;	/* Set of held signals */
+	pid_t	pr_pid;
+	pid_t	pr_ppid;
+	pid_t	pr_pgrp;
+	pid_t	pr_sid;
+	struct compat_timeval pr_utime;	/* User time */
+	struct compat_timeval pr_stime;	/* System time */
+	struct compat_timeval pr_cutime;	/* Cumulative user time */
+	struct compat_timeval pr_cstime;	/* Cumulative system time */
+	elf_gregset_t pr_reg;	/* GP registers */
+	int pr_fpvalid;		/* True if math co-processor being used.  */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+	char	pr_state;	/* numeric process state */
+	char	pr_sname;	/* char for pr_state */
+	char	pr_zomb;	/* zombie */
+	char	pr_nice;	/* nice val */
+	u32	pr_flag;	/* flags */
+	u16	pr_uid;
+	u16	pr_gid;
+	pid_t	pr_pid, pr_ppid, pr_pgrp, pr_sid;
+	/* Lots missing */
+	char	pr_fname[16];	/* filename of executable */
+	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
+};
+
+#include <linux/highuid.h>
+
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) 
+
+#define elf_addr_t	u32
+/*
+#define init_elf_binfmt init_elf32_binfmt
+*/
+
+#undef start_thread
+#define start_thread                    start_thread31 
+
+MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
+                   " Copyright 2000 IBM Corporation"); 
+MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
+
+#undef MODULE_DESCRIPTION
+#undef MODULE_AUTHOR
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+	value->tv_usec = cputime % 1000000;
+	value->tv_sec = cputime / 1000000;
+}
+
+#include "../../../fs/binfmt_elf.c"
+
diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S
new file mode 100644
index 000000000000..dfb41f946e23
--- /dev/null
+++ b/arch/s390/kernel/bitmap.S
@@ -0,0 +1,56 @@
+/*
+ *  arch/s390/kernel/bitmap.S
+ *    Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
+ *    See include/asm-s390/{bitops.h|posix_types.h} for details
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+         .globl _oi_bitmap
+_oi_bitmap:
+         .byte  0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
+
+         .globl _ni_bitmap
+_ni_bitmap:
+         .byte  0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F
+
+         .globl _zb_findmap
+_zb_findmap:
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
+         .byte  0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
+
+         .globl _sb_findmap
+_sb_findmap:
+         .byte  8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+         .byte  4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
new file mode 100644
index 000000000000..71d27c493568
--- /dev/null
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -0,0 +1,30 @@
+/*
+ * Support for 32-bit Linux for S390 personality.
+ *
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+
+struct exec_domain s390_exec_domain;
+
+static int __init
+s390_init (void)
+{
+	s390_exec_domain.name = "Linux/s390";
+	s390_exec_domain.handler = NULL;
+	s390_exec_domain.pers_low = PER_LINUX32;
+	s390_exec_domain.pers_high = PER_LINUX32;
+	s390_exec_domain.signal_map = default_exec_domain.signal_map;
+	s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+	register_exec_domain(&s390_exec_domain);
+	return 0;
+}
+
+__initcall(s390_init);
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
new file mode 100644
index 000000000000..96571ff7115d
--- /dev/null
+++ b/arch/s390/kernel/compat_ioctl.c
@@ -0,0 +1,73 @@
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ *  S390 version
+ *    Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Gerhard Tonn (ton@de.ibm.com)
+ *               Arnd Bergmann (arndb@de.ibm.com)
+ *
+ * Original implementation from 32-bit Sparc compat code which is
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Written by Ulf Carlsson (ulfc@engr.sgi.com) 
+ */
+
+#include "compat_linux.h"
+#define INCLUDES
+#define CODE
+#include "../../../fs/compat_ioctl.c"
+#include <asm/dasd.h>
+#include <asm/tape390.h>
+
+static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
+				unsigned long arg, struct file *f)
+{
+	return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
+}
+
+static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd,
+				unsigned long arg, struct file *f)
+{
+	return sys_ioctl(fd, cmd, arg);
+}
+
+#define COMPATIBLE_IOCTL(cmd)		HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer)
+#define ULONG_IOCTL(cmd)		HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong)
+#define HANDLE_IOCTL(cmd,handler)	{ (cmd), (ioctl_trans_handler_t)(handler), NULL },
+
+struct ioctl_trans ioctl_start[] = {
+/* architecture independent ioctls */
+#include <linux/compat_ioctl.h>
+#define DECLARES
+#include "../../../fs/compat_ioctl.c"
+
+/* s390 only ioctls */
+#if defined(CONFIG_DASD) || defined(CONFIG_DASD_MODULE)
+COMPATIBLE_IOCTL(DASDAPIVER)
+COMPATIBLE_IOCTL(BIODASDDISABLE)
+COMPATIBLE_IOCTL(BIODASDENABLE)
+COMPATIBLE_IOCTL(BIODASDRSRV)
+COMPATIBLE_IOCTL(BIODASDRLSE)
+COMPATIBLE_IOCTL(BIODASDSLCK)
+COMPATIBLE_IOCTL(BIODASDINFO)
+COMPATIBLE_IOCTL(BIODASDINFO2)
+COMPATIBLE_IOCTL(BIODASDFMT)
+COMPATIBLE_IOCTL(BIODASDPRRST)
+COMPATIBLE_IOCTL(BIODASDQUIESCE)
+COMPATIBLE_IOCTL(BIODASDRESUME)
+COMPATIBLE_IOCTL(BIODASDPRRD)
+COMPATIBLE_IOCTL(BIODASDPSRD)
+COMPATIBLE_IOCTL(BIODASDGATTR)
+COMPATIBLE_IOCTL(BIODASDSATTR)
+
+#endif
+
+#if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE)
+COMPATIBLE_IOCTL(TAPE390_DISPLAY)
+#endif
+
+/* s390 doesn't need handlers here */
+COMPATIBLE_IOCTL(TIOCGSERIAL)
+COMPATIBLE_IOCTL(TIOCSSERIAL)
+};
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
new file mode 100644
index 000000000000..614056222875
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.c
@@ -0,0 +1,1045 @@
+/*
+ *  arch/s390x/kernel/linux32.c
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Gerhard Tonn (ton@de.ibm.com)   
+ *               Thomas Spatzier (tspat@de.ibm.com)
+ *
+ *  Conversion between 31bit and 64bit native syscalls.
+ *
+ * Heavily inspired by the 32-bit Sparc compat code which is 
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h> 
+#include <linux/mm.h> 
+#include <linux/file.h> 
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/ptrace.h>
+
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include <net/scm.h>
+#include <net/sock.h>
+
+#include "compat_linux.h"
+
+ 
+/* For this source file, we want overflow handling. */
+
+#undef high2lowuid
+#undef high2lowgid
+#undef low2highuid
+#undef low2highgid
+#undef SET_UID16
+#undef SET_GID16
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#undef SET_OLDSTAT_UID
+#undef SET_OLDSTAT_GID
+#undef SET_STAT_UID
+#undef SET_STAT_GID
+
+#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
+#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
+#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
+#define SET_UID16(var, uid)	var = high2lowuid(uid)
+#define SET_GID16(var, gid)	var = high2lowgid(gid)
+#define NEW_TO_OLD_UID(uid)	high2lowuid(uid)
+#define NEW_TO_OLD_GID(gid)	high2lowgid(gid)
+#define SET_OLDSTAT_UID(stat, uid)	(stat).st_uid = high2lowuid(uid)
+#define SET_OLDSTAT_GID(stat, gid)	(stat).st_gid = high2lowgid(gid)
+#define SET_STAT_UID(stat, uid)		(stat).st_uid = high2lowuid(uid)
+#define SET_STAT_GID(stat, gid)		(stat).st_gid = high2lowgid(gid)
+
+asmlinkage long sys32_chown16(const char * filename, u16 user, u16 group)
+{
+	return sys_chown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_lchown16(const char * filename, u16 user, u16 group)
+{
+	return sys_lchown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+{
+	return sys_fchown(fd, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+{
+	return sys_setregid(low2highgid(rgid), low2highgid(egid));
+}
+
+asmlinkage long sys32_setgid16(u16 gid)
+{
+	return sys_setgid((gid_t)gid);
+}
+
+asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+{
+	return sys_setreuid(low2highuid(ruid), low2highuid(euid));
+}
+
+asmlinkage long sys32_setuid16(u16 uid)
+{
+	return sys_setuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+{
+	return sys_setresuid(low2highuid(ruid), low2highuid(euid),
+		low2highuid(suid));
+}
+
+asmlinkage long sys32_getresuid16(u16 *ruid, u16 *euid, u16 *suid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
+	    !(retval = put_user(high2lowuid(current->euid), euid)))
+		retval = put_user(high2lowuid(current->suid), suid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+{
+	return sys_setresgid(low2highgid(rgid), low2highgid(egid),
+		low2highgid(sgid));
+}
+
+asmlinkage long sys32_getresgid16(u16 *rgid, u16 *egid, u16 *sgid)
+{
+	int retval;
+
+	if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
+	    !(retval = put_user(high2lowgid(current->egid), egid)))
+		retval = put_user(high2lowgid(current->sgid), sgid);
+
+	return retval;
+}
+
+asmlinkage long sys32_setfsuid16(u16 uid)
+{
+	return sys_setfsuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setfsgid16(u16 gid)
+{
+	return sys_setfsgid((gid_t)gid);
+}
+
+static int groups16_to_user(u16 *grouplist, struct group_info *group_info)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		group = (u16)GROUP_AT(group_info, i);
+		if (put_user(group, grouplist+i))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int groups16_from_user(struct group_info *group_info, u16 *grouplist)
+{
+	int i;
+	u16 group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		if (get_user(group, grouplist+i))
+			return  -EFAULT;
+		GROUP_AT(group_info, i) = (gid_t)group;
+	}
+
+	return 0;
+}
+
+asmlinkage long sys32_getgroups16(int gidsetsize, u16 *grouplist)
+{
+	int i;
+
+	if (gidsetsize < 0)
+		return -EINVAL;
+
+	get_group_info(current->group_info);
+	i = current->group_info->ngroups;
+	if (gidsetsize) {
+		if (i > gidsetsize) {
+			i = -EINVAL;
+			goto out;
+		}
+		if (groups16_to_user(grouplist, current->group_info)) {
+			i = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	put_group_info(current->group_info);
+	return i;
+}
+
+asmlinkage long sys32_setgroups16(int gidsetsize, u16 *grouplist)
+{
+	struct group_info *group_info;
+	int retval;
+
+	if (!capable(CAP_SETGID))
+		return -EPERM;
+	if ((unsigned)gidsetsize > NGROUPS_MAX)
+		return -EINVAL;
+
+	group_info = groups_alloc(gidsetsize);
+	if (!group_info)
+		return -ENOMEM;
+	retval = groups16_from_user(group_info, grouplist);
+	if (retval) {
+		put_group_info(group_info);
+		return retval;
+	}
+
+	retval = set_current_groups(group_info);
+	put_group_info(group_info);
+
+	return retval;
+}
+
+asmlinkage long sys32_getuid16(void)
+{
+	return high2lowuid(current->uid);
+}
+
+asmlinkage long sys32_geteuid16(void)
+{
+	return high2lowuid(current->euid);
+}
+
+asmlinkage long sys32_getgid16(void)
+{
+	return high2lowgid(current->gid);
+}
+
+asmlinkage long sys32_getegid16(void)
+{
+	return high2lowgid(current->egid);
+}
+
+/* 32-bit timeval and related flotsam.  */
+
+static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
+{
+	return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
+		(__get_user(o->tv_sec, &i->tv_sec) ||
+		 __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long put_tv32(struct compat_timeval *o, struct timeval *i)
+{
+	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+		(__put_user(i->tv_sec, &o->tv_sec) ||
+		 __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+/*
+ * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
+{
+	if (call >> 16)		/* hack for backward compatibility */
+		return -EINVAL;
+
+	call &= 0xffff;
+
+	switch (call) {
+	case SEMTIMEDOP:
+		return compat_sys_semtimedop(first, compat_ptr(ptr),
+					     second, compat_ptr(third));
+	case SEMOP:
+		/* struct sembuf is the same on 32 and 64bit :)) */
+		return sys_semtimedop(first, compat_ptr(ptr),
+				      second, NULL);
+	case SEMGET:
+		return sys_semget(first, second, third);
+	case SEMCTL:
+		return compat_sys_semctl(first, second, third,
+					 compat_ptr(ptr));
+	case MSGSND:
+		return compat_sys_msgsnd(first, second, third,
+					 compat_ptr(ptr));
+	case MSGRCV:
+		return compat_sys_msgrcv(first, second, 0, third,
+					 0, compat_ptr(ptr));
+	case MSGGET:
+		return sys_msgget((key_t) first, second);
+	case MSGCTL:
+		return compat_sys_msgctl(first, second, compat_ptr(ptr));
+	case SHMAT:
+		return compat_sys_shmat(first, second, third,
+					0, compat_ptr(ptr));
+	case SHMDT:
+		return sys_shmdt(compat_ptr(ptr));
+	case SHMGET:
+		return sys_shmget(first, (unsigned)second, third);
+	case SHMCTL:
+		return compat_sys_shmctl(first, second, compat_ptr(ptr));
+	}
+
+	return -ENOSYS;
+}
+
+asmlinkage long sys32_truncate64(const char * path, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+{
+	if ((int)high < 0)
+		return -EINVAL;
+	else
+		return sys_ftruncate(fd, (high << 32) | low);
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat *statbuf)
+{
+	int err;
+
+	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+
+	err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
+	err |= put_user(stat->ino, &statbuf->st_ino);
+	err |= put_user(stat->mode, &statbuf->st_mode);
+	err |= put_user(stat->nlink, &statbuf->st_nlink);
+	err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
+	err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
+	err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
+	err |= put_user(stat->size, &statbuf->st_size);
+	err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
+	err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
+	err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
+	err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
+	err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
+	err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
+	err |= put_user(stat->blksize, &statbuf->st_blksize);
+	err |= put_user(stat->blocks, &statbuf->st_blocks);
+/* fixme
+	err |= put_user(0, &statbuf->__unused4[0]);
+	err |= put_user(0, &statbuf->__unused4[1]);
+*/
+	return err;
+}
+
+struct sysinfo32 {
+        s32 uptime;
+        u32 loads[3];
+        u32 totalram;
+        u32 freeram;
+        u32 sharedram;
+        u32 bufferram;
+        u32 totalswap;
+        u32 freeswap;
+        unsigned short procs;
+	unsigned short pads;
+	u32 totalhigh;
+	u32 freehigh;
+	unsigned int mem_unit;
+        char _f[8];
+};
+
+asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
+{
+	struct sysinfo s;
+	int ret, err;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs (KERNEL_DS);
+	ret = sys_sysinfo(&s);
+	set_fs (old_fs);
+	err = put_user (s.uptime, &info->uptime);
+	err |= __put_user (s.loads[0], &info->loads[0]);
+	err |= __put_user (s.loads[1], &info->loads[1]);
+	err |= __put_user (s.loads[2], &info->loads[2]);
+	err |= __put_user (s.totalram, &info->totalram);
+	err |= __put_user (s.freeram, &info->freeram);
+	err |= __put_user (s.sharedram, &info->sharedram);
+	err |= __put_user (s.bufferram, &info->bufferram);
+	err |= __put_user (s.totalswap, &info->totalswap);
+	err |= __put_user (s.freeswap, &info->freeswap);
+	err |= __put_user (s.procs, &info->procs);
+	err |= __put_user (s.totalhigh, &info->totalhigh);
+	err |= __put_user (s.freehigh, &info->freehigh);
+	err |= __put_user (s.mem_unit, &info->mem_unit);
+	if (err)
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+				struct compat_timespec __user *interval)
+{
+	struct timespec t;
+	int ret;
+	mm_segment_t old_fs = get_fs ();
+	
+	set_fs (KERNEL_DS);
+	ret = sys_sched_rr_get_interval(pid, &t);
+	set_fs (old_fs);
+	if (put_compat_timespec(&t, interval))
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+			compat_sigset_t __user *oset, size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (set) {
+		if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
+			return -EFAULT;
+		switch (_NSIG_WORDS) {
+		case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+		case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+		case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+		case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+		}
+	}
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL, sigsetsize);
+	set_fs (old_fs);
+	if (ret) return ret;
+	if (oset) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+				size_t sigsetsize)
+{
+	sigset_t s;
+	compat_sigset_t s32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+		
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigpending(&s, sigsetsize);
+	set_fs (old_fs);
+	if (!ret) {
+		switch (_NSIG_WORDS) {
+		case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+		case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+		case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+		case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+		}
+		if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage long
+sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
+{
+	siginfo_t info;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+	
+	if (copy_siginfo_from_user32(&info, uinfo))
+		return -EFAULT;
+	set_fs (KERNEL_DS);
+	ret = sys_rt_sigqueueinfo(pid, sig, &info);
+	set_fs (old_fs);
+	return ret;
+}
+
+/*
+ * sys32_execve() executes a new program after the asm stub has set
+ * things up for us.  This should basically do what I want it to.
+ */
+asmlinkage long
+sys32_execve(struct pt_regs regs)
+{
+        int error;
+        char * filename;
+
+        filename = getname(compat_ptr(regs.orig_gpr2));
+        error = PTR_ERR(filename);
+        if (IS_ERR(filename))
+                goto out;
+        error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
+				 compat_ptr(regs.gprs[4]), &regs);
+	if (error == 0)
+	{
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+		current->thread.fp_regs.fpc=0;
+		__asm__ __volatile__
+		        ("sr  0,0\n\t"
+		         "sfpc 0,0\n\t"
+			 : : :"0");
+	}
+        putname(filename);
+out:
+        return error;
+}
+
+
+#ifdef CONFIG_MODULES
+
+asmlinkage long
+sys32_init_module(void __user *umod, unsigned long len,
+		const char __user *uargs)
+{
+	return sys_init_module(umod, len, uargs);
+}
+
+asmlinkage long
+sys32_delete_module(const char __user *name_user, unsigned int flags)
+{
+	return sys_delete_module(name_user, flags);
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage long
+sys32_init_module(void __user *umod, unsigned long len,
+		const char __user *uargs)
+{
+	return -ENOSYS;
+}
+
+asmlinkage long
+sys32_delete_module(const char __user *name_user, unsigned int flags)
+{
+	return -ENOSYS;
+}
+
+#endif  /* CONFIG_MODULES */
+
+/* Translations due to time_t size differences.  Which affects all
+   sorts of things, like timeval and itimerval.  */
+
+extern struct timezone sys_tz;
+
+asmlinkage long sys32_gettimeofday(struct compat_timeval *tv, struct timezone *tz)
+{
+	if (tv) {
+		struct timeval ktv;
+		do_gettimeofday(&ktv);
+		if (put_tv32(tv, &ktv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static inline long get_ts32(struct timespec *o, struct compat_timeval *i)
+{
+	long usec;
+
+	if (!access_ok(VERIFY_READ, i, sizeof(*i)))
+		return -EFAULT;
+	if (__get_user(o->tv_sec, &i->tv_sec))
+		return -EFAULT;
+	if (__get_user(usec, &i->tv_usec))
+		return -EFAULT;
+	o->tv_nsec = usec * 1000;
+	return 0;
+}
+
+asmlinkage long sys32_settimeofday(struct compat_timeval *tv, struct timezone *tz)
+{
+	struct timespec kts;
+	struct timezone ktz;
+
+ 	if (tv) {
+		if (get_ts32(&kts, tv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_from_user(&ktz, tz, sizeof(ktz)))
+			return -EFAULT;
+	}
+
+	return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+/* These are here just in case some old sparc32 binary calls it. */
+asmlinkage long sys32_pause(void)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage long sys32_pread64(unsigned int fd, char *ubuf,
+				size_t count, u32 poshi, u32 poslo)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL;
+	return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+asmlinkage long sys32_pwrite64(unsigned int fd, const char *ubuf,
+				size_t count, u32 poshi, u32 poslo)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL;
+	return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+}
+
+asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
+{
+	return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
+}
+
+asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t *offset, size_t count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	off_t of;
+	
+	if (offset && get_user(of, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
+	set_fs(old_fs);
+	
+	if (!ret && offset && put_user(of, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
+				compat_loff_t *offset, s32 count)
+{
+	mm_segment_t old_fs = get_fs();
+	int ret;
+	loff_t lof;
+	
+	if (offset && get_user(lof, offset))
+		return -EFAULT;
+		
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count);
+	set_fs(old_fs);
+	
+	if (offset && put_user(lof, offset))
+		return -EFAULT;
+		
+	return ret;
+}
+
+/* Handle adjtimex compatibility. */
+
+struct timex32 {
+	u32 modes;
+	s32 offset, freq, maxerror, esterror;
+	s32 status, constant, precision, tolerance;
+	struct compat_timeval time;
+	s32 tick;
+	s32 ppsfreq, jitter, shift, stabil;
+	s32 jitcnt, calcnt, errcnt, stbcnt;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+};
+
+extern int do_adjtimex(struct timex *);
+
+asmlinkage long sys32_adjtimex(struct timex32 *utp)
+{
+	struct timex txc;
+	int ret;
+
+	memset(&txc, 0, sizeof(struct timex));
+
+	if(get_user(txc.modes, &utp->modes) ||
+	   __get_user(txc.offset, &utp->offset) ||
+	   __get_user(txc.freq, &utp->freq) ||
+	   __get_user(txc.maxerror, &utp->maxerror) ||
+	   __get_user(txc.esterror, &utp->esterror) ||
+	   __get_user(txc.status, &utp->status) ||
+	   __get_user(txc.constant, &utp->constant) ||
+	   __get_user(txc.precision, &utp->precision) ||
+	   __get_user(txc.tolerance, &utp->tolerance) ||
+	   __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __get_user(txc.tick, &utp->tick) ||
+	   __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __get_user(txc.jitter, &utp->jitter) ||
+	   __get_user(txc.shift, &utp->shift) ||
+	   __get_user(txc.stabil, &utp->stabil) ||
+	   __get_user(txc.jitcnt, &utp->jitcnt) ||
+	   __get_user(txc.calcnt, &utp->calcnt) ||
+	   __get_user(txc.errcnt, &utp->errcnt) ||
+	   __get_user(txc.stbcnt, &utp->stbcnt))
+		return -EFAULT;
+
+	ret = do_adjtimex(&txc);
+
+	if(put_user(txc.modes, &utp->modes) ||
+	   __put_user(txc.offset, &utp->offset) ||
+	   __put_user(txc.freq, &utp->freq) ||
+	   __put_user(txc.maxerror, &utp->maxerror) ||
+	   __put_user(txc.esterror, &utp->esterror) ||
+	   __put_user(txc.status, &utp->status) ||
+	   __put_user(txc.constant, &utp->constant) ||
+	   __put_user(txc.precision, &utp->precision) ||
+	   __put_user(txc.tolerance, &utp->tolerance) ||
+	   __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __put_user(txc.tick, &utp->tick) ||
+	   __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __put_user(txc.jitter, &utp->jitter) ||
+	   __put_user(txc.shift, &utp->shift) ||
+	   __put_user(txc.stabil, &utp->stabil) ||
+	   __put_user(txc.jitcnt, &utp->jitcnt) ||
+	   __put_user(txc.calcnt, &utp->calcnt) ||
+	   __put_user(txc.errcnt, &utp->errcnt) ||
+	   __put_user(txc.stbcnt, &utp->stbcnt))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+#ifdef CONFIG_SYSCTL
+struct __sysctl_args32 {
+	u32 name;
+	int nlen;
+	u32 oldval;
+	u32 oldlenp;
+	u32 newval;
+	u32 newlen;
+	u32 __unused[4];
+};
+
+asmlinkage long sys32_sysctl(struct __sysctl_args32 *args)
+{
+	struct __sysctl_args32 tmp;
+	int error;
+	size_t oldlen, *oldlenp = NULL;
+	unsigned long addr = (((long)&args->__unused[0]) + 7) & ~7;
+
+	if (copy_from_user(&tmp, args, sizeof(tmp)))
+		return -EFAULT;
+
+	if (tmp.oldval && tmp.oldlenp) {
+		/* Duh, this is ugly and might not work if sysctl_args
+		   is in read-only memory, but do_sysctl does indirectly
+		   a lot of uaccess in both directions and we'd have to
+		   basically copy the whole sysctl.c here, and
+		   glibc's __sysctl uses rw memory for the structure
+		   anyway.  */
+		if (get_user(oldlen, (u32 *)A(tmp.oldlenp)) ||
+		    put_user(oldlen, (size_t *)addr))
+			return -EFAULT;
+		oldlenp = (size_t *)addr;
+	}
+
+	lock_kernel();
+	error = do_sysctl((int *)A(tmp.name), tmp.nlen, (void *)A(tmp.oldval),
+			  oldlenp, (void *)A(tmp.newval), tmp.newlen);
+	unlock_kernel();
+	if (oldlenp) {
+		if (!error) {
+			if (get_user(oldlen, (size_t *)addr) ||
+			    put_user(oldlen, (u32 *)A(tmp.oldlenp)))
+				error = -EFAULT;
+		}
+		copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+	}
+	return error;
+}
+#endif
+
+struct stat64_emu31 {
+	unsigned long long  st_dev;
+	unsigned int    __pad1;
+#define STAT64_HAS_BROKEN_ST_INO        1
+	u32             __st_ino;
+	unsigned int    st_mode;
+	unsigned int    st_nlink;
+	u32             st_uid;
+	u32             st_gid;
+	unsigned long long  st_rdev;
+	unsigned int    __pad3;
+	long            st_size;
+	u32             st_blksize;
+	unsigned char   __pad4[4];
+	u32             __pad5;     /* future possible st_blocks high bits */
+	u32             st_blocks;  /* Number 512-byte blocks allocated. */
+	u32             st_atime;
+	u32             __pad6;
+	u32             st_mtime;
+	u32             __pad7;
+	u32             st_ctime;
+	u32             __pad8;     /* will be high 32 bits of ctime someday */
+	unsigned long   st_ino;
+};	
+
+static int cp_stat64(struct stat64_emu31 *ubuf, struct kstat *stat)
+{
+	struct stat64_emu31 tmp;
+
+	memset(&tmp, 0, sizeof(tmp));
+
+	tmp.st_dev = huge_encode_dev(stat->dev);
+	tmp.st_ino = stat->ino;
+	tmp.__st_ino = (u32)stat->ino;
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = (unsigned int)stat->nlink;
+	tmp.st_uid = stat->uid;
+	tmp.st_gid = stat->gid;
+	tmp.st_rdev = huge_encode_dev(stat->rdev);
+	tmp.st_size = stat->size;
+	tmp.st_blksize = (u32)stat->blksize;
+	tmp.st_blocks = (u32)stat->blocks;
+	tmp.st_atime = (u32)stat->atime.tv_sec;
+	tmp.st_mtime = (u32)stat->mtime.tv_sec;
+	tmp.st_ctime = (u32)stat->ctime.tv_sec;
+
+	return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 
+}
+
+asmlinkage long sys32_stat64(char * filename, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_stat(filename, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+asmlinkage long sys32_lstat64(char * filename, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_lstat(filename, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 * statbuf)
+{
+	struct kstat stat;
+	int ret = vfs_fstat(fd, &stat);
+	if (!ret)
+		ret = cp_stat64(statbuf, &stat);
+	return ret;
+}
+
+/*
+ * Linux/i386 didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct_emu31 {
+	u32	addr;
+	u32	len;
+	u32	prot;
+	u32	flags;
+	u32	fd;
+	u32	offset;
+};
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+	unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	struct file * file = NULL;
+	unsigned long error = -EBADF;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
+		/* Result is out of bounds.  */
+		do_munmap(current->mm, addr, len);
+		error = -ENOMEM;
+	}
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:    
+	return error;
+}
+
+
+asmlinkage unsigned long
+old32_mmap(struct mmap_arg_struct_emu31 *arg)
+{
+	struct mmap_arg_struct_emu31 a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+
+	error = -EINVAL;
+	if (a.offset & ~PAGE_MASK)
+		goto out;
+
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 
+out:
+	return error;
+}
+
+asmlinkage long 
+sys32_mmap2(struct mmap_arg_struct_emu31 *arg)
+{
+	struct mmap_arg_struct_emu31 a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+out:
+	return error;
+}
+
+asmlinkage long sys32_read(unsigned int fd, char * buf, size_t count)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL; 
+
+	return sys_read(fd, buf, count);
+}
+
+asmlinkage long sys32_write(unsigned int fd, char * buf, size_t count)
+{
+	if ((compat_ssize_t) count < 0)
+		return -EINVAL; 
+
+	return sys_write(fd, buf, count);
+}
+
+asmlinkage long sys32_clone(struct pt_regs regs)
+{
+        unsigned long clone_flags;
+        unsigned long newsp;
+	int *parent_tidptr, *child_tidptr;
+
+        clone_flags = regs.gprs[3] & 0xffffffffUL;
+        newsp = regs.orig_gpr2 & 0x7fffffffUL;
+	parent_tidptr = (int *) (regs.gprs[4] & 0x7fffffffUL);
+	child_tidptr = (int *) (regs.gprs[5] & 0x7fffffffUL);
+        if (!newsp)
+                newsp = regs.gprs[15];
+        return do_fork(clone_flags, newsp, &regs, 0,
+		       parent_tidptr, child_tidptr);
+}
+
+/*
+ * Wrapper function for sys_timer_create.
+ */
+extern asmlinkage long
+sys_timer_create(clockid_t, struct sigevent *, timer_t *);
+
+asmlinkage long
+sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
+		timer_t *timer_id)
+{
+	struct sigevent se;
+	timer_t ktimer_id;
+	mm_segment_t old_fs;
+	long ret;
+
+	if (se32 == NULL)
+		return sys_timer_create(which_clock, NULL, timer_id);
+
+	if (get_compat_sigevent(&se, se32))
+		return -EFAULT;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = sys_timer_create(which_clock, &se, &ktimer_id);
+	set_fs(old_fs);
+
+	if (!ret)
+		ret = put_user (ktimer_id, timer_id);
+
+	return ret;
+}
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
new file mode 100644
index 000000000000..bf33dcfec7db
--- /dev/null
+++ b/arch/s390/kernel/compat_linux.h
@@ -0,0 +1,197 @@
+#ifndef _ASM_S390X_S390_H
+#define _ASM_S390X_S390_H
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/export.h>
+
+/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
+/*       to a 64 bit pointer */
+#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
+#define AA(__x)				\
+	((unsigned long)(__x))
+
+/* Now 32bit compatibility types */
+struct ipc_kludge_32 {
+        __u32   msgp;                           /* pointer              */
+        __s32   msgtyp;
+};
+
+struct old_sigaction32 {
+       __u32			sa_handler;	/* Really a pointer, but need to deal with 32 bits */
+       compat_old_sigset_t	sa_mask;	/* A 32 bit mask */
+       __u32			sa_flags;
+       __u32			sa_restorer;	/* Another 32 bit pointer */
+};
+ 
+typedef struct compat_siginfo {
+	int	si_signo;
+	int	si_errno;
+	int	si_code;
+
+	union {
+		int _pad[((128/sizeof(int)) - 3)];
+
+		/* kill() */
+		struct {
+			pid_t	_pid;	/* sender's pid */
+			uid_t	_uid;	/* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			timer_t _tid;		/* timer id */
+			int _overrun;		/* overrun count */
+			compat_sigval_t _sigval;	/* same as below */
+			int _sys_private;       /* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			pid_t			_pid;	/* sender's pid */
+			uid_t			_uid;	/* sender's uid */
+			compat_sigval_t		_sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			pid_t			_pid;	/* which child */
+			uid_t			_uid;	/* sender's uid */
+			int			_status;/* exit code */
+			compat_clock_t		_utime;
+			compat_clock_t		_stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+		struct {
+			__u32	_addr;	/* faulting insn/memory ref. - pointer */
+		} _sigfault;
+                          
+		/* SIGPOLL */
+		struct {
+			int	_band;	/* POLL_IN, POLL_OUT, POLL_MSG */
+			int	_fd;
+		} _sigpoll;
+	} _sifields;
+} compat_siginfo_t;
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid		_sifields._kill._pid
+#define si_uid		_sifields._kill._uid
+#define si_status	_sifields._sigchld._status
+#define si_utime	_sifields._sigchld._utime
+#define si_stime	_sifields._sigchld._stime
+#define si_value	_sifields._rt._sigval
+#define si_int		_sifields._rt._sigval.sival_int
+#define si_ptr		_sifields._rt._sigval.sival_ptr
+#define si_addr		_sifields._sigfault._addr
+#define si_band		_sifields._sigpoll._band
+#define si_fd		_sifields._sigpoll._fd    
+#define si_tid		_sifields._timer._tid
+#define si_overrun	_sifields._timer._overrun
+
+/* asm/sigcontext.h */
+typedef union
+{
+	__u64   d;
+	__u32   f; 
+} freg_t32;
+
+typedef struct
+{
+	unsigned int	fpc;
+	freg_t32	fprs[__NUM_FPRS];              
+} _s390_fp_regs32;
+
+typedef struct 
+{
+        __u32   mask;
+        __u32	addr;
+} _psw_t32 __attribute__ ((aligned(8)));
+
+#define PSW32_MASK_PER		0x40000000UL
+#define PSW32_MASK_DAT		0x04000000UL
+#define PSW32_MASK_IO		0x02000000UL
+#define PSW32_MASK_EXT		0x01000000UL
+#define PSW32_MASK_KEY		0x00F00000UL
+#define PSW32_MASK_MCHECK	0x00040000UL
+#define PSW32_MASK_WAIT		0x00020000UL
+#define PSW32_MASK_PSTATE	0x00010000UL
+#define PSW32_MASK_ASC		0x0000C000UL
+#define PSW32_MASK_CC		0x00003000UL
+#define PSW32_MASK_PM		0x00000f00UL
+
+#define PSW32_ADDR_AMODE31	0x80000000UL
+#define PSW32_ADDR_INSN		0x7FFFFFFFUL
+
+#define PSW32_BASE_BITS		0x00080000UL
+
+#define PSW32_ASC_PRIMARY	0x00000000UL
+#define PSW32_ASC_ACCREG	0x00004000UL
+#define PSW32_ASC_SECONDARY	0x00008000UL
+#define PSW32_ASC_HOME		0x0000C000UL
+
+#define PSW32_USER_BITS	(PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
+			 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
+			 PSW32_MASK_PSTATE)
+
+#define PSW32_MASK_MERGE(CURRENT,NEW) \
+        (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
+         ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
+
+
+typedef struct
+{
+	_psw_t32	psw;
+	__u32		gprs[__NUM_GPRS];
+	__u32		acrs[__NUM_ACRS];
+} _s390_regs_common32;
+
+typedef struct
+{
+	_s390_regs_common32 regs;
+	_s390_fp_regs32     fpregs;
+} _sigregs32;
+
+#define _SIGCONTEXT_NSIG32	64
+#define _SIGCONTEXT_NSIG_BPW32	32
+#define __SIGNAL_FRAMESIZE32	96
+#define _SIGMASK_COPY_SIZE32	(sizeof(u32)*2)
+
+struct sigcontext32
+{
+	__u32	oldmask[_COMPAT_NSIG_WORDS];
+	__u32	sregs;				/* pointer */
+};
+
+/* asm/signal.h */
+struct sigaction32 {
+	__u32		sa_handler;		/* pointer */
+	__u32		sa_flags;
+        __u32		sa_restorer;		/* pointer */
+	compat_sigset_t	sa_mask;        /* mask last for extensibility */
+};
+
+typedef struct {
+	__u32			ss_sp;		/* pointer */
+	int			ss_flags;
+	compat_size_t		ss_size;
+} stack_t32;
+
+/* asm/ucontext.h */
+struct ucontext32 {
+	__u32			uc_flags;
+	__u32			uc_link;	/* pointer */	
+	stack_t32		uc_stack;
+	_sigregs32		uc_mcontext;
+	compat_sigset_t		uc_sigmask;	/* mask last for extensibility */
+};
+
+#endif /* _ASM_S390X_S390_H */
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
new file mode 100644
index 000000000000..419aef913ee1
--- /dev/null
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -0,0 +1,83 @@
+#ifndef _PTRACE32_H
+#define _PTRACE32_H
+
+#include "compat_linux.h"  /* needed for _psw_t32 */
+
+typedef struct {
+	__u32 cr[3];
+} per_cr_words32;
+
+typedef struct {
+	__u16          perc_atmid;          /* 0x096 */
+	__u32          address;             /* 0x098 */
+	__u8           access_id;           /* 0x0a1 */
+} per_lowcore_words32;
+
+typedef struct {
+	union {
+		per_cr_words32   words;
+	} control_regs;
+	/*
+	 * Use these flags instead of setting em_instruction_fetch
+	 * directly they are used so that single stepping can be
+	 * switched on & off while not affecting other tracing
+	 */
+	unsigned  single_step       : 1;
+	unsigned  instruction_fetch : 1;
+	unsigned                    : 30;
+	/*
+	 * These addresses are copied into cr10 & cr11 if single
+	 * stepping is switched off
+	 */
+	__u32     starting_addr;
+	__u32     ending_addr;
+	union {
+		per_lowcore_words32 words;
+	} lowcore; 
+} per_struct32;
+
+struct user_regs_struct32
+{
+	_psw_t32 psw;
+	u32 gprs[NUM_GPRS];
+	u32 acrs[NUM_ACRS];
+	u32 orig_gpr2;
+	s390_fp_regs fp_regs;
+	/*
+	 * These per registers are in here so that gdb can modify them
+	 * itself as there is no "official" ptrace interface for hardware
+	 * watchpoints. This is the way intel does it.
+	 */
+	per_struct32 per_info;
+	u32  ieee_instruction_pointer; 
+	/* Used to give failing instruction back to user for ieee exceptions */
+};
+
+struct user32 {
+	/* We start with the registers, to mimic the way that "memory"
+	   is returned from the ptrace(3,...) function.  */
+	struct user_regs_struct32 regs; /* Where the registers are actually stored */
+	/* The rest of this junk is to help gdb figure out what goes where */
+	u32 u_tsize;		/* Text segment size (pages). */
+	u32 u_dsize;	        /* Data segment size (pages). */
+	u32 u_ssize;	        /* Stack segment size (pages). */
+	u32 start_code;         /* Starting virtual address of text. */
+	u32 start_stack;	/* Starting virtual address of stack area.
+				   This is actually the bottom of the stack,
+				   the top of the stack is always found in the
+				   esp register.  */
+	s32 signal;     	 /* Signal that caused the core dump. */
+	u32 u_ar0;               /* Used by gdb to help find the values for */
+	                         /* the registers. */
+	u32 magic;		 /* To uniquely identify a core file */
+	char u_comm[32];	 /* User command that was responsible */
+};
+
+typedef struct
+{
+	__u32   len;
+	__u32   kernel_addr;
+	__u32   process_addr;
+} ptrace_area_emu31;
+
+#endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
new file mode 100644
index 000000000000..d05d65ac9694
--- /dev/null
+++ b/arch/s390/kernel/compat_signal.c
@@ -0,0 +1,648 @@
+/*
+ *  arch/s390/kernel/signal32.c
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *               Gerhard Tonn (ton@de.ibm.com)                  
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ */
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/lowcore.h>
+#include "compat_linux.h"
+#include "compat_ptrace.h"
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
+	struct sigcontext32 sc;
+	_sigregs32 sregs;
+	int signo;
+	__u8 retcode[S390_SYSCALL_SIZE];
+} sigframe32;
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
+	__u8 retcode[S390_SYSCALL_SIZE];
+	compat_siginfo_t info;
+	struct ucontext32 uc;
+} rt_sigframe32;
+
+asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+{
+	int err;
+
+	if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	/* If you change siginfo_t structure, please be sure
+	   this code is fixed accordingly.
+	   It should never copy any pad contained in the structure
+	   to avoid security leaks, but must copy the generic
+	   3 ints plus the relevant union member.  
+	   This routine must convert siginfo from 64bit to 32bit as well
+	   at the same time.  */
+	err = __put_user(from->si_signo, &to->si_signo);
+	err |= __put_user(from->si_errno, &to->si_errno);
+	err |= __put_user((short)from->si_code, &to->si_code);
+	if (from->si_code < 0)
+		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (from->si_code >> 16) {
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __put_user(from->si_int, &to->si_int);
+			/* fallthrough */
+		case __SI_KILL >> 16:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			err |= __put_user(from->si_utime, &to->si_utime);
+			err |= __put_user(from->si_stime, &to->si_stime);
+			err |= __put_user(from->si_status, &to->si_status);
+			break;
+		case __SI_FAULT >> 16:
+			err |= __put_user((unsigned long) from->si_addr,
+					  &to->si_addr);
+			break;
+		case __SI_POLL >> 16:
+			err |= __put_user(from->si_band, &to->si_band);
+			err |= __put_user(from->si_fd, &to->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __put_user(from->si_tid, &to->si_tid);
+			err |= __put_user(from->si_overrun, &to->si_overrun);
+			err |= __put_user(from->si_int, &to->si_int);
+			break;
+		default:
+			break;
+		}
+	}
+	return err;
+}
+
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+	int err;
+	u32 tmp;
+
+	if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	err = __get_user(to->si_signo, &from->si_signo);
+	err |= __get_user(to->si_errno, &from->si_errno);
+	err |= __get_user(to->si_code, &from->si_code);
+
+	if (to->si_code < 0)
+		err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (to->si_code >> 16) {
+		case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __get_user(to->si_int, &from->si_int);
+			/* fallthrough */
+		case __SI_KILL >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			break;
+		case __SI_CHLD >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			err |= __get_user(to->si_utime, &from->si_utime);
+			err |= __get_user(to->si_stime, &from->si_stime);
+			err |= __get_user(to->si_status, &from->si_status);
+			break;
+		case __SI_FAULT >> 16:
+			err |= __get_user(tmp, &from->si_addr);
+			to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN);
+			break;
+		case __SI_POLL >> 16:
+			err |= __get_user(to->si_band, &from->si_band);
+			err |= __get_user(to->si_fd, &from->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __get_user(to->si_tid, &from->si_tid);
+			err |= __get_user(to->si_overrun, &from->si_overrun);
+			err |= __get_user(to->si_int, &from->si_int);
+			break;
+		default:
+			break;
+		}
+	}
+	return err;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys32_sigsuspend(struct pt_regs * regs,int history0, int history1, old_sigset_t mask)
+{
+	sigset_t saveset;
+
+	mask &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage int
+sys32_rt_sigsuspend(struct pt_regs * regs, compat_sigset_t __user *unewset,
+								size_t sigsetsize)
+{
+	sigset_t saveset, newset;
+	compat_sigset_t set32;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&set32, unewset, sizeof(set32)))
+		return -EFAULT;
+	switch (_NSIG_WORDS) {
+	case 4: newset.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
+	case 3: newset.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
+	case 2: newset.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
+	case 1: newset.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
+	}
+        sigdelsetmask(&newset, ~_BLOCKABLE);
+
+        spin_lock_irq(&current->sighand->siglock);
+        saveset = current->blocked;
+        current->blocked = newset;
+        recalc_sigpending();
+        spin_unlock_irq(&current->sighand->siglock);
+        regs->gprs[2] = -EINTR;
+
+        while (1) {
+                set_current_state(TASK_INTERRUPTIBLE);
+                schedule();
+                if (do_signal(regs, &saveset))
+                        return -EINTR;
+        }
+}
+
+asmlinkage long
+sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
+		 struct old_sigaction32 __user *oact)
+{
+        struct k_sigaction new_ka, old_ka;
+	unsigned long sa_handler, sa_restorer;
+        int ret;
+
+        if (act) {
+		compat_old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(sa_handler, &act->sa_handler) ||
+		    __get_user(sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
+		new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+        }
+
+        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		sa_handler = (unsigned long) old_ka.sa.sa_handler;
+		sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(sa_handler, &oact->sa_handler) ||
+		    __put_user(sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+        }
+
+	return ret;
+}
+
+int
+do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact);
+
+asmlinkage long
+sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
+	   struct sigaction32 __user *oact,  size_t sigsetsize)
+{
+	struct k_sigaction new_ka, old_ka;
+	unsigned long sa_handler;
+	int ret;
+	compat_sigset_t set32;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(compat_sigset_t))
+		return -EINVAL;
+
+	if (act) {
+		ret = get_user(sa_handler, &act->sa_handler);
+		ret |= __copy_from_user(&set32, &act->sa_mask,
+					sizeof(compat_sigset_t));
+		switch (_NSIG_WORDS) {
+		case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
+				| (((long)set32.sig[7]) << 32);
+		case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
+				| (((long)set32.sig[5]) << 32);
+		case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
+				| (((long)set32.sig[3]) << 32);
+		case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
+				| (((long)set32.sig[1]) << 32);
+		}
+		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		
+		if (ret)
+			return -EFAULT;
+		new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		switch (_NSIG_WORDS) {
+		case 4:
+			set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
+			set32.sig[6] = old_ka.sa.sa_mask.sig[3];
+		case 3:
+			set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
+			set32.sig[4] = old_ka.sa.sa_mask.sig[2];
+		case 2:
+			set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
+			set32.sig[2] = old_ka.sa.sa_mask.sig[1];
+		case 1:
+			set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
+			set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+		}
+		ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
+		ret |= __copy_to_user(&oact->sa_mask, &set32,
+				      sizeof(compat_sigset_t));
+		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+	}
+
+	return ret;
+}
+
+asmlinkage long
+sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
+							struct pt_regs *regs)
+{
+	stack_t kss, koss;
+	unsigned long ss_sp;
+	int ret, err = 0;
+	mm_segment_t old_fs = get_fs();
+
+	if (uss) {
+		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
+			return -EFAULT;
+		err |= __get_user(ss_sp, &uss->ss_sp);
+		err |= __get_user(kss.ss_size, &uss->ss_size);
+		err |= __get_user(kss.ss_flags, &uss->ss_flags);
+		if (err)
+			return -EFAULT;
+		kss.ss_sp = (void *) ss_sp;
+	}
+
+	set_fs (KERNEL_DS);
+	ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL),
+			     (stack_t __user *) (uoss ? &koss : NULL),
+			     regs->gprs[15]);
+	set_fs (old_fs);
+
+	if (!ret && uoss) {
+		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
+			return -EFAULT;
+		ss_sp = (unsigned long) koss.ss_sp;
+		err |= __put_user(ss_sp, &uoss->ss_sp);
+		err |= __put_user(koss.ss_size, &uoss->ss_size);
+		err |= __put_user(koss.ss_flags, &uoss->ss_flags);
+		if (err)
+			return -EFAULT;
+	}
+	return ret;
+}
+
+static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
+{
+	_s390_regs_common32 regs32;
+	int err, i;
+
+	regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS,
+					   (__u32)(regs->psw.mask >> 32));
+	regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
+	for (i = 0; i < NUM_GPRS; i++)
+		regs32.gprs[i] = (__u32) regs->gprs[i];
+	save_access_regs(current->thread.acrs);
+	memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
+	err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
+	if (err)
+		return err;
+	save_fp_regs(&current->thread.fp_regs);
+	/* s390_fp_regs and _s390_fp_regs32 are the same ! */
+	return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
+			      sizeof(_s390_fp_regs32));
+}
+
+static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
+{
+	_s390_regs_common32 regs32;
+	int err, i;
+
+	/* Alwys make any pending restarted system call return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
+	if (err)
+		return err;
+	regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
+				        (__u64)regs32.psw.mask << 32);
+	regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+	for (i = 0; i < NUM_GPRS; i++)
+		regs->gprs[i] = (__u64) regs32.gprs[i];
+	memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+	restore_access_regs(current->thread.acrs);
+
+	err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
+			       sizeof(_s390_fp_regs32));
+	current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+	if (err)
+		return err;
+
+	restore_fp_regs(&current->thread.fp_regs);
+	regs->trap = -1;	/* disable syscall checks */
+	return 0;
+}
+
+asmlinkage long sys32_sigreturn(struct pt_regs *regs)
+{
+	sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs32(regs, &frame->sregs))
+		goto badframe;
+
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
+{
+	rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+	sigset_t set;
+	stack_t st;
+	__u32 ss_sp;
+	int err;
+	mm_segment_t old_fs = get_fs();
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
+	st.ss_sp = (void *) A((unsigned long)ss_sp);
+	err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
+	err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
+	if (err)
+		goto badframe; 
+
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	set_fs (KERNEL_DS);
+	do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
+	set_fs (old_fs);
+
+	return regs->gprs[2];
+
+badframe:
+        force_sig(SIGSEGV, current);
+        return 0;
+}	
+
+/*
+ * Set up a signal frame.
+ */
+
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+	unsigned long sp;
+
+	/* Default to using normal stack */
+	sp = (unsigned long) A(regs->gprs[15]);
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (! on_sig_stack(sp))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	/* This is the legacy signal stack switching. */
+	else if (!user_mode(regs) &&
+		 !(ka->sa.sa_flags & SA_RESTORER) &&
+		 ka->sa.sa_restorer) {
+		sp = (unsigned long) ka->sa.sa_restorer;
+	}
+
+	return (void __user *)((sp - frame_size) & -8ul);
+}
+
+static inline int map_signal(int sig)
+{
+	if (current_thread_info()->exec_domain
+	    && current_thread_info()->exec_domain->signal_invmap
+	    && sig < 32)
+		return current_thread_info()->exec_domain->signal_invmap[sig];
+        else
+		return sig;
+}
+
+static void setup_frame32(int sig, struct k_sigaction *ka,
+			sigset_t *set, struct pt_regs * regs)
+{
+	sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
+		goto give_sigsegv;
+
+	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
+		goto give_sigsegv;
+
+	if (save_sigregs32(regs, &frame->sregs))
+		goto give_sigsegv;
+	if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+	} else {
+		regs->gprs[14] = (__u64) frame->retcode;
+		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
+		               (u16 __user *)(frame->retcode)))
+			goto give_sigsegv;
+        }
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (__u64) frame;
+	regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (__u64) &frame->sc;
+
+	/* We forgot to include these in the sigcontext.
+	   To avoid breaking binary compatibility, they are passed as args. */
+	regs->gprs[4] = current->thread.trap_no;
+	regs->gprs[5] = current->thread.prot_addr;
+
+	/* Place signal number on stack to allow backtrace from handler.  */
+	if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
+		goto give_sigsegv;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
+			   sigset_t *set, struct pt_regs * regs)
+{
+	int err = 0;
+	rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
+		goto give_sigsegv;
+
+	if (copy_siginfo_to_user32(&frame->info, info))
+		goto give_sigsegv;
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->gprs[15]),
+	                  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		regs->gprs[14] = (__u64) ka->sa.sa_restorer;
+	} else {
+		regs->gprs[14] = (__u64) frame->retcode;
+		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
+		                  (u16 __user *)(frame->retcode));
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (__u64) frame;
+	regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (__u64) &frame->info;
+	regs->gprs[4] = (__u64) &frame->uc;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */	
+
+void
+handle_signal32(unsigned long sig, struct k_sigaction *ka,
+		siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+	/* Set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		setup_rt_frame32(sig, ka, info, oldset, regs);
+	else
+		setup_frame32(sig, ka, oldset, regs);
+
+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,sig);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
new file mode 100644
index 000000000000..7a607b1d0380
--- /dev/null
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -0,0 +1,1443 @@
+/*
+*  arch/s390/kernel/sys_wrapper31.S
+*    wrapper for 31 bit compatible system calls.
+*
+*  S390 version
+*    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+*    Author(s): Gerhard Tonn (ton@de.ibm.com),
+*               Thomas Spatzier (tspat@de.ibm.com)
+*/ 
+
+	.globl  sys32_exit_wrapper 
+sys32_exit_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_exit		# branch to sys_exit
+    
+	.globl  sys32_read_wrapper 
+sys32_read_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys32_read		# branch to sys_read
+
+	.globl  sys32_write_wrapper 
+sys32_write_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys32_write		# branch to system call
+
+	.globl  sys32_open_wrapper 
+sys32_open_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	jg	sys_open		# branch to system call
+
+	.globl  sys32_close_wrapper 
+sys32_close_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_close		# branch to system call
+
+	.globl  sys32_creat_wrapper 
+sys32_creat_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_creat		# branch to system call
+
+	.globl  sys32_link_wrapper 
+sys32_link_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_link		# branch to system call
+
+	.globl  sys32_unlink_wrapper 
+sys32_unlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_unlink		# branch to system call
+
+	.globl  sys32_chdir_wrapper 
+sys32_chdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_chdir		# branch to system call
+
+	.globl  sys32_time_wrapper 
+sys32_time_wrapper:
+	llgtr	%r2,%r2			# int *
+	jg	compat_sys_time		# branch to system call
+
+	.globl  sys32_mknod_wrapper 
+sys32_mknod_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int 
+	llgfr	%r4,%r4			# dev
+	jg	sys_mknod		# branch to system call
+
+	.globl  sys32_chmod_wrapper 
+sys32_chmod_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# mode_t
+	jg	sys_chmod		# branch to system call
+
+	.globl  sys32_lchown16_wrapper 
+sys32_lchown16_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	jg	sys32_lchown16		# branch to system call
+
+	.globl  sys32_lseek_wrapper 
+sys32_lseek_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	lgfr	%r3,%r3			# off_t
+	llgfr	%r4,%r4			# unsigned int
+	jg	sys_lseek		# branch to system call
+
+#sys32_getpid_wrapper				# void 
+
+	.globl  sys32_mount_wrapper 
+sys32_mount_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# char *
+	llgfr	%r5,%r5			# unsigned long
+	llgtr	%r6,%r6			# void *
+	jg	compat_sys_mount	# branch to system call
+
+	.globl  sys32_oldumount_wrapper 
+sys32_oldumount_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_oldumount		# branch to system call
+
+	.globl  sys32_setuid16_wrapper 
+sys32_setuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	jg	sys32_setuid16		# branch to system call
+
+#sys32_getuid16_wrapper			# void 
+
+	.globl  sys32_ptrace_wrapper 
+sys32_ptrace_wrapper:
+	lgfr	%r2,%r2			# long
+	lgfr	%r3,%r3			# long
+	llgtr	%r4,%r4			# long
+	llgfr	%r5,%r5			# long
+	jg	sys_ptrace		# branch to system call
+
+	.globl  sys32_alarm_wrapper 
+sys32_alarm_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_alarm		# branch to system call
+
+#sys32_pause_wrapper			# void 
+
+	.globl  compat_sys_utime_wrapper 
+compat_sys_utime_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct compat_utimbuf *
+	jg	compat_sys_utime	# branch to system call
+
+	.globl  sys32_access_wrapper 
+sys32_access_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_access		# branch to system call
+
+	.globl  sys32_nice_wrapper 
+sys32_nice_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_nice		# branch to system call
+
+#sys32_sync_wrapper			# void 
+
+	.globl  sys32_kill_wrapper 
+sys32_kill_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	jg	sys_kill		# branch to system call
+
+	.globl  sys32_rename_wrapper 
+sys32_rename_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_rename		# branch to system call
+
+	.globl  sys32_mkdir_wrapper 
+sys32_mkdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_mkdir		# branch to system call
+
+	.globl  sys32_rmdir_wrapper 
+sys32_rmdir_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_rmdir		# branch to system call
+
+	.globl  sys32_dup_wrapper 
+sys32_dup_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_dup			# branch to system call
+
+	.globl  sys32_pipe_wrapper 
+sys32_pipe_wrapper:
+	llgtr	%r2,%r2			# u32 *
+	jg	sys_pipe		# branch to system call
+
+	.globl  compat_sys_times_wrapper 
+compat_sys_times_wrapper:
+	llgtr	%r2,%r2			# struct compat_tms *
+	jg	compat_sys_times	# branch to system call
+
+	.globl  sys32_brk_wrapper 
+sys32_brk_wrapper:
+	llgtr	%r2,%r2			# unsigned long
+	jg	sys_brk			# branch to system call
+
+	.globl  sys32_setgid16_wrapper 
+sys32_setgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	jg	sys32_setgid16		# branch to system call
+
+#sys32_getgid16_wrapper			# void 
+
+	.globl sys32_signal_wrapper
+sys32_signal_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# __sighandler_t
+	jg	sys_signal
+
+#sys32_geteuid16_wrapper		# void 
+
+#sys32_getegid16_wrapper		# void 
+
+	.globl  sys32_acct_wrapper 
+sys32_acct_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_acct		# branch to system call
+
+	.globl  sys32_umount_wrapper 
+sys32_umount_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_umount		# branch to system call
+
+	.globl  compat_sys_ioctl_wrapper
+compat_sys_ioctl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_ioctl	# branch to system call
+
+	.globl  compat_sys_fcntl_wrapper 
+compat_sys_fcntl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_fcntl	# branch to system call
+
+	.globl  sys32_setpgid_wrapper 
+sys32_setpgid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	lgfr	%r3,%r3			# pid_t
+	jg	sys_setpgid		# branch to system call
+
+	.globl  sys32_umask_wrapper 
+sys32_umask_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_umask		# branch to system call
+
+	.globl  sys32_chroot_wrapper 
+sys32_chroot_wrapper:
+	llgtr	%r2,%r2			# char *
+	jg	sys_chroot		# branch to system call
+
+	.globl sys32_ustat_wrapper
+sys32_ustat_wrapper:
+	llgfr	%r2,%r2			# dev_t 
+	llgtr	%r3,%r3			# struct ustat *
+	jg	sys_ustat
+
+	.globl  sys32_dup2_wrapper 
+sys32_dup2_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys_dup2		# branch to system call
+
+#sys32_getppid_wrapper			# void 
+
+#sys32_getpgrp_wrapper			# void 
+
+#sys32_setsid_wrapper			# void 
+
+	.globl  sys32_sigaction_wrapper
+sys32_sigaction_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# const struct old_sigaction *
+	llgtr	%r4,%r4			# struct old_sigaction32 *
+	jg	sys32_sigaction		# branch to system call
+
+	.globl  sys32_setreuid16_wrapper 
+sys32_setreuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	jg	sys32_setreuid16	# branch to system call
+
+	.globl  sys32_setregid16_wrapper 
+sys32_setregid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
+	jg	sys32_setregid16	# branch to system call
+
+#sys32_sigsuspend_wrapper		# done in sigsuspend_glue 
+
+	.globl  compat_sys_sigpending_wrapper 
+compat_sys_sigpending_wrapper:
+	llgtr	%r2,%r2			# compat_old_sigset_t *
+	jg	compat_sys_sigpending	# branch to system call
+
+	.globl  sys32_sethostname_wrapper 
+sys32_sethostname_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_sethostname		# branch to system call
+
+	.globl  compat_sys_setrlimit_wrapper 
+compat_sys_setrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_setrlimit	# branch to system call
+
+	.globl  compat_sys_old_getrlimit_wrapper 
+compat_sys_old_getrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_old_getrlimit # branch to system call
+
+	.globl  compat_sys_getrlimit_wrapper 
+compat_sys_getrlimit_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct rlimit_emu31 *
+	jg	compat_sys_getrlimit	# branch to system call
+
+	.globl  sys32_mmap2_wrapper 
+sys32_mmap2_wrapper:
+	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
+	jg	sys32_mmap2			# branch to system call
+
+	.globl  compat_sys_getrusage_wrapper 
+compat_sys_getrusage_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct rusage_emu31 *
+	jg	compat_sys_getrusage	# branch to system call
+
+	.globl  sys32_gettimeofday_wrapper 
+sys32_gettimeofday_wrapper:
+	llgtr	%r2,%r2			# struct timeval_emu31 *
+	llgtr	%r3,%r3			# struct timezone *
+	jg	sys32_gettimeofday	# branch to system call
+
+	.globl  sys32_settimeofday_wrapper 
+sys32_settimeofday_wrapper:
+	llgtr	%r2,%r2			# struct timeval_emu31 *
+	llgtr	%r3,%r3			# struct timezone *
+	jg	sys32_settimeofday	# branch to system call
+
+	.globl  sys32_getgroups16_wrapper 
+sys32_getgroups16_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	jg	sys32_getgroups16	# branch to system call
+
+	.globl  sys32_setgroups16_wrapper 
+sys32_setgroups16_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	jg	sys32_setgroups16	# branch to system call
+
+	.globl  sys32_symlink_wrapper 
+sys32_symlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_symlink		# branch to system call
+
+	.globl  sys32_readlink_wrapper 
+sys32_readlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# char *
+	lgfr	%r4,%r4			# int
+	jg	sys_readlink		# branch to system call
+
+	.globl  sys32_uselib_wrapper 
+sys32_uselib_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_uselib		# branch to system call
+
+	.globl  sys32_swapon_wrapper 
+sys32_swapon_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	jg	sys_swapon		# branch to system call
+
+	.globl  sys32_reboot_wrapper 
+sys32_reboot_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgfr	%r4,%r4			# unsigned int
+	llgtr	%r5,%r5			# void *
+	jg	sys_reboot		# branch to system call
+
+	.globl  old32_readdir_wrapper 
+old32_readdir_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_old_readdir	# branch to system call
+
+	.globl  old32_mmap_wrapper 
+old32_mmap_wrapper:
+	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
+	jg	old32_mmap		# branch to system call
+
+	.globl  sys32_munmap_wrapper 
+sys32_munmap_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t 
+	jg	sys_munmap		# branch to system call
+
+	.globl  sys32_truncate_wrapper 
+sys32_truncate_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_truncate		# branch to system call
+
+	.globl  sys32_ftruncate_wrapper 
+sys32_ftruncate_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_ftruncate		# branch to system call
+
+	.globl  sys32_fchmod_wrapper 
+sys32_fchmod_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# mode_t
+	jg	sys_fchmod		# branch to system call
+
+	.globl  sys32_fchown16_wrapper 
+sys32_fchown16_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# compat_uid_t
+	llgfr	%r4,%r4			# compat_uid_t
+	jg	sys32_fchown16		# branch to system call
+
+	.globl  sys32_getpriority_wrapper 
+sys32_getpriority_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	jg	sys_getpriority		# branch to system call
+
+	.globl  sys32_setpriority_wrapper 
+sys32_setpriority_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	jg	sys_setpriority		# branch to system call
+
+	.globl  compat_sys_statfs_wrapper 
+compat_sys_statfs_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct compat_statfs *
+	jg	compat_sys_statfs	# branch to system call
+
+	.globl  compat_sys_fstatfs_wrapper 
+compat_sys_fstatfs_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct compat_statfs *
+	jg	compat_sys_fstatfs	# branch to system call
+
+	.globl  compat_sys_socketcall_wrapper 
+compat_sys_socketcall_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# u32 *
+	jg	compat_sys_socketcall	# branch to system call
+
+	.globl  sys32_syslog_wrapper 
+sys32_syslog_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	lgfr	%r4,%r4			# int
+	jg	sys_syslog		# branch to system call
+
+	.globl  compat_sys_setitimer_wrapper 
+compat_sys_setitimer_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct itimerval_emu31 *
+	llgtr	%r4,%r4			# struct itimerval_emu31 *
+	jg	compat_sys_setitimer	# branch to system call
+
+	.globl  compat_sys_getitimer_wrapper 
+compat_sys_getitimer_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct itimerval_emu31 *
+	jg	compat_sys_getitimer	# branch to system call
+
+	.globl  compat_sys_newstat_wrapper 
+compat_sys_newstat_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newstat	# branch to system call
+
+	.globl  compat_sys_newlstat_wrapper 
+compat_sys_newlstat_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newlstat	# branch to system call
+
+	.globl  compat_sys_newfstat_wrapper 
+compat_sys_newfstat_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# struct stat_emu31 *
+	jg	compat_sys_newfstat	# branch to system call
+
+#sys32_vhangup_wrapper			# void 
+
+	.globl  compat_sys_wait4_wrapper 
+compat_sys_wait4_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# unsigned int *
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct rusage *
+	jg	compat_sys_wait4	# branch to system call
+
+	.globl  sys32_swapoff_wrapper 
+sys32_swapoff_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_swapoff		# branch to system call
+
+	.globl  sys32_sysinfo_wrapper 
+sys32_sysinfo_wrapper:
+	llgtr	%r2,%r2			# struct sysinfo_emu31 *
+	jg	sys32_sysinfo		# branch to system call
+
+	.globl  sys32_ipc_wrapper 
+sys32_ipc_wrapper:
+	llgfr	%r2,%r2			# uint
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	lgfr	%r5,%r5			# int
+	llgfr	%r6,%r6			# u32
+	jg	sys32_ipc		# branch to system call
+
+	.globl  sys32_fsync_wrapper 
+sys32_fsync_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fsync		# branch to system call
+
+#sys32_sigreturn_wrapper		# done in sigreturn_glue 
+
+#sys32_clone_wrapper			# done in clone_glue 
+
+	.globl  sys32_setdomainname_wrapper 
+sys32_setdomainname_wrapper:
+	llgtr	%r2,%r2			# char *
+	lgfr	%r3,%r3			# int
+	jg	sys_setdomainname	# branch to system call
+
+	.globl  sys32_newuname_wrapper 
+sys32_newuname_wrapper:
+	llgtr	%r2,%r2			# struct new_utsname *
+	jg	s390x_newuname		# branch to system call
+
+	.globl  sys32_adjtimex_wrapper 
+sys32_adjtimex_wrapper:
+	llgtr	%r2,%r2			# struct timex_emu31 *
+	jg	sys32_adjtimex		# branch to system call
+
+	.globl  sys32_mprotect_wrapper 
+sys32_mprotect_wrapper:
+	llgtr	%r2,%r2			# unsigned long (actually pointer
+	llgfr	%r3,%r3			# size_t
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys_mprotect		# branch to system call
+
+	.globl  compat_sys_sigprocmask_wrapper 
+compat_sys_sigprocmask_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# compat_old_sigset_t *
+	llgtr	%r4,%r4			# compat_old_sigset_t *
+	jg	compat_sys_sigprocmask		# branch to system call
+
+	.globl  sys32_init_module_wrapper 
+sys32_init_module_wrapper:
+	llgtr	%r2,%r2			# void *
+	llgfr	%r3,%r3			# unsigned long
+	llgtr	%r4,%r4			# char *
+	jg	sys32_init_module	# branch to system call
+
+	.globl  sys32_delete_module_wrapper 
+sys32_delete_module_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys32_delete_module	# branch to system call
+
+	.globl  sys32_quotactl_wrapper 
+sys32_quotactl_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# qid_t
+	llgtr	%r5,%r5			# caddr_t
+	jg	sys_quotactl		# branch to system call
+
+	.globl  sys32_getpgid_wrapper 
+sys32_getpgid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_getpgid		# branch to system call
+
+	.globl  sys32_fchdir_wrapper 
+sys32_fchdir_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fchdir		# branch to system call
+
+	.globl  sys32_bdflush_wrapper 
+sys32_bdflush_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# long
+	jg	sys_bdflush		# branch to system call
+
+	.globl  sys32_sysfs_wrapper 
+sys32_sysfs_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys_sysfs		# branch to system call
+
+	.globl  sys32_personality_wrapper 
+sys32_personality_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	jg	s390x_personality	# branch to system call
+
+	.globl  sys32_setfsuid16_wrapper 
+sys32_setfsuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	jg	sys32_setfsuid16	# branch to system call
+
+	.globl  sys32_setfsgid16_wrapper 
+sys32_setfsgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	jg	sys32_setfsgid16	# branch to system call
+
+	.globl  sys32_llseek_wrapper 
+sys32_llseek_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgtr	%r5,%r5			# loff_t *
+	llgfr	%r6,%r6			# unsigned int
+	jg	sys_llseek		# branch to system call
+
+	.globl  sys32_getdents_wrapper 
+sys32_getdents_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	compat_sys_getdents	# branch to system call
+
+	.globl  compat_sys_select_wrapper
+compat_sys_select_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# compat_fd_set *
+	llgtr	%r4,%r4			# compat_fd_set *
+	llgtr	%r5,%r5			# compat_fd_set *
+	llgtr	%r6,%r6			# struct compat_timeval *
+	jg	compat_sys_select	# branch to system call
+
+	.globl  sys32_flock_wrapper 
+sys32_flock_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int
+	jg	sys_flock		# branch to system call
+
+	.globl  sys32_msync_wrapper 
+sys32_msync_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	lgfr	%r4,%r4			# int
+	jg	sys_msync		# branch to system call
+
+	.globl  compat_sys_readv_wrapper
+compat_sys_readv_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct compat_iovec *
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_readv	# branch to system call
+
+	.globl  compat_sys_writev_wrapper
+compat_sys_writev_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct compat_iovec *
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_writev	# branch to system call
+
+	.globl  sys32_getsid_wrapper 
+sys32_getsid_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_getsid		# branch to system call
+
+	.globl  sys32_fdatasync_wrapper 
+sys32_fdatasync_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	jg	sys_fdatasync		# branch to system call
+
+#sys32_sysctl_wrapper			# tbd 
+
+	.globl  sys32_mlock_wrapper 
+sys32_mlock_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	jg	sys_mlock		# branch to system call
+
+	.globl  sys32_munlock_wrapper 
+sys32_munlock_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	jg	sys_munlock		# branch to system call
+
+	.globl  sys32_mlockall_wrapper 
+sys32_mlockall_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_mlockall		# branch to system call
+
+#sys32_munlockall_wrapper		# void 
+
+	.globl  sys32_sched_setparam_wrapper 
+sys32_sched_setparam_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct sched_param *
+	jg	sys_sched_setparam	# branch to system call
+
+	.globl  sys32_sched_getparam_wrapper 
+sys32_sched_getparam_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct sched_param *
+	jg	sys_sched_getparam	# branch to system call
+
+	.globl  sys32_sched_setscheduler_wrapper 
+sys32_sched_setscheduler_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct sched_param *
+	jg	sys_sched_setscheduler	# branch to system call
+
+	.globl  sys32_sched_getscheduler_wrapper 
+sys32_sched_getscheduler_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	jg	sys_sched_getscheduler	# branch to system call
+
+#sys32_sched_yield_wrapper		# void 
+
+	.globl  sys32_sched_get_priority_max_wrapper 
+sys32_sched_get_priority_max_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_sched_get_priority_max	# branch to system call
+
+	.globl  sys32_sched_get_priority_min_wrapper 
+sys32_sched_get_priority_min_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_sched_get_priority_min	# branch to system call
+
+	.globl  sys32_sched_rr_get_interval_wrapper 
+sys32_sched_rr_get_interval_wrapper:
+	lgfr	%r2,%r2			# pid_t
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	sys32_sched_rr_get_interval	# branch to system call
+
+	.globl  compat_sys_nanosleep_wrapper 
+compat_sys_nanosleep_wrapper:
+	llgtr	%r2,%r2			# struct compat_timespec *
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_nanosleep		# branch to system call
+
+	.globl  sys32_mremap_wrapper 
+sys32_mremap_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_mremap		# branch to system call
+
+	.globl  sys32_setresuid16_wrapper 
+sys32_setresuid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	jg	sys32_setresuid16	# branch to system call
+
+	.globl  sys32_getresuid16_wrapper 
+sys32_getresuid16_wrapper:
+	llgtr	%r2,%r2			# __kernel_old_uid_emu31_t *
+	llgtr	%r3,%r3			# __kernel_old_uid_emu31_t *
+	llgtr	%r4,%r4			# __kernel_old_uid_emu31_t *
+	jg	sys32_getresuid16	# branch to system call
+
+	.globl  sys32_poll_wrapper 
+sys32_poll_wrapper:
+	llgtr	%r2,%r2			# struct pollfd * 
+	llgfr	%r3,%r3			# unsigned int 
+	lgfr	%r4,%r4			# long 
+	jg	sys_poll		# branch to system call
+
+	.globl  compat_sys_nfsservctl_wrapper
+compat_sys_nfsservctl_wrapper:
+	lgfr	%r2,%r2			# int 
+	llgtr	%r3,%r3			# struct compat_nfsctl_arg*
+	llgtr	%r4,%r4			# union compat_nfsctl_res*
+	jg	compat_sys_nfsservctl	# branch to system call
+
+	.globl  sys32_setresgid16_wrapper 
+sys32_setresgid16_wrapper:
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	jg	sys32_setresgid16	# branch to system call
+
+	.globl  sys32_getresgid16_wrapper 
+sys32_getresgid16_wrapper:
+	llgtr	%r2,%r2			# __kernel_old_gid_emu31_t *
+	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
+	llgtr	%r4,%r4			# __kernel_old_gid_emu31_t *
+	jg	sys32_getresgid16	# branch to system call
+
+	.globl  sys32_prctl_wrapper 
+sys32_prctl_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_prctl		# branch to system call
+
+#sys32_rt_sigreturn_wrapper		# done in rt_sigreturn_glue 
+
+	.globl  sys32_rt_sigaction_wrapper 
+sys32_rt_sigaction_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# const struct sigaction_emu31 *
+	llgtr	%r4,%r4			# const struct sigaction_emu31 *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_rt_sigaction	# branch to system call
+
+	.globl  sys32_rt_sigprocmask_wrapper 
+sys32_rt_sigprocmask_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# old_sigset_emu31 *
+	llgtr	%r4,%r4			# old_sigset_emu31 *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_rt_sigprocmask	# branch to system call
+
+	.globl  sys32_rt_sigpending_wrapper 
+sys32_rt_sigpending_wrapper:
+	llgtr	%r2,%r2			# sigset_emu31 *
+	llgfr	%r3,%r3			# size_t
+	jg	sys32_rt_sigpending	# branch to system call
+
+	.globl  compat_sys_rt_sigtimedwait_wrapper
+compat_sys_rt_sigtimedwait_wrapper:
+	llgtr	%r2,%r2			# const sigset_emu31_t *
+	llgtr	%r3,%r3			# siginfo_emu31_t *
+	llgtr	%r4,%r4			# const struct compat_timespec *
+	llgfr	%r5,%r5			# size_t
+	jg	compat_sys_rt_sigtimedwait	# branch to system call
+
+	.globl  sys32_rt_sigqueueinfo_wrapper 
+sys32_rt_sigqueueinfo_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# siginfo_emu31_t *
+	jg	sys32_rt_sigqueueinfo	# branch to system call
+
+#sys32_rt_sigsuspend_wrapper		# done in rt_sigsuspend_glue 
+
+	.globl  sys32_pread64_wrapper 
+sys32_pread64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# u32
+	llgfr	%r6,%r6			# u32
+	jg	sys32_pread64		# branch to system call
+
+	.globl  sys32_pwrite64_wrapper 
+sys32_pwrite64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# u32
+	llgfr	%r6,%r6			# u32
+	jg	sys32_pwrite64		# branch to system call
+
+	.globl  sys32_chown16_wrapper 
+sys32_chown16_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	jg	sys32_chown16		# branch to system call
+
+	.globl  sys32_getcwd_wrapper 
+sys32_getcwd_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgfr	%r3,%r3			# unsigned long
+	jg	sys_getcwd		# branch to system call
+
+	.globl  sys32_capget_wrapper 
+sys32_capget_wrapper:
+	llgtr	%r2,%r2			# cap_user_header_t
+	llgtr	%r3,%r3			# cap_user_data_t
+	jg	sys_capget		# branch to system call
+
+	.globl  sys32_capset_wrapper 
+sys32_capset_wrapper:
+	llgtr	%r2,%r2			# cap_user_header_t
+	llgtr	%r3,%r3			# const cap_user_data_t
+	jg	sys_capset		# branch to system call
+
+	.globl sys32_sigaltstack_wrapper
+sys32_sigaltstack_wrapper:
+	llgtr	%r2,%r2			# const stack_emu31_t * 
+	llgtr	%r3,%r3			# stack_emu31_t * 
+	jg	sys32_sigaltstack
+
+	.globl  sys32_sendfile_wrapper 
+sys32_sendfile_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# __kernel_off_emu31_t *
+	llgfr	%r5,%r5			# size_t
+	jg	sys32_sendfile		# branch to system call
+
+#sys32_vfork_wrapper			# done in vfork_glue 
+
+	.globl  sys32_truncate64_wrapper 
+sys32_truncate64_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys32_truncate64	# branch to system call
+
+	.globl  sys32_ftruncate64_wrapper 
+sys32_ftruncate64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	jg	sys32_ftruncate64	# branch to system call
+
+	.globl sys32_lchown_wrapper	
+sys32_lchown_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_lchown		# branch to system call
+
+#sys32_getuid_wrapper			# void			 
+#sys32_getgid_wrapper			# void 
+#sys32_geteuid_wrapper			# void 
+#sys32_getegid_wrapper			# void 
+
+	.globl sys32_setreuid_wrapper
+sys32_setreuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	llgfr	%r3,%r3			# uid_t
+	jg	sys_setreuid		# branch to system call
+
+	.globl sys32_setregid_wrapper
+sys32_setregid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	llgfr	%r3,%r3			# gid_t
+	jg	sys_setregid		# branch to system call
+
+	.globl  sys32_getgroups_wrapper 
+sys32_getgroups_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# gid_t *
+	jg	sys_getgroups		# branch to system call
+
+	.globl  sys32_setgroups_wrapper 
+sys32_setgroups_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# gid_t *
+	jg	sys_setgroups		# branch to system call
+
+	.globl sys32_fchown_wrapper	
+sys32_fchown_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_fchown		# branch to system call
+
+	.globl sys32_setresuid_wrapper	
+sys32_setresuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# uid_t
+	jg	sys_setresuid		# branch to system call
+
+	.globl sys32_getresuid_wrapper	
+sys32_getresuid_wrapper:
+	llgtr	%r2,%r2			# uid_t *
+	llgtr	%r3,%r3			# uid_t *
+	llgtr	%r4,%r4			# uid_t *
+	jg	sys_getresuid		# branch to system call
+
+	.globl sys32_setresgid_wrapper	
+sys32_setresgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	llgfr	%r3,%r3			# gid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_setresgid		# branch to system call
+
+	.globl sys32_getresgid_wrapper	
+sys32_getresgid_wrapper:
+	llgtr	%r2,%r2			# gid_t *
+	llgtr	%r3,%r3			# gid_t *
+	llgtr	%r4,%r4			# gid_t *
+	jg	sys_getresgid		# branch to system call
+
+	.globl sys32_chown_wrapper	
+sys32_chown_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# uid_t
+	llgfr	%r4,%r4			# gid_t
+	jg	sys_chown		# branch to system call
+
+	.globl sys32_setuid_wrapper	
+sys32_setuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	jg	sys_setuid		# branch to system call
+
+	.globl sys32_setgid_wrapper	
+sys32_setgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	jg	sys_setgid		# branch to system call
+
+	.globl sys32_setfsuid_wrapper	
+sys32_setfsuid_wrapper:
+	llgfr	%r2,%r2			# uid_t
+	jg	sys_setfsuid		# branch to system call
+
+	.globl sys32_setfsgid_wrapper	
+sys32_setfsgid_wrapper:
+	llgfr	%r2,%r2			# gid_t
+	jg	sys_setfsgid		# branch to system call
+
+	.globl  sys32_pivot_root_wrapper 
+sys32_pivot_root_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	jg	sys_pivot_root		# branch to system call
+
+	.globl  sys32_mincore_wrapper 
+sys32_mincore_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	llgtr	%r4,%r4			# unsigned char *
+	jg	sys_mincore		# branch to system call
+
+	.globl  sys32_madvise_wrapper 
+sys32_madvise_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# size_t
+	lgfr	%r4,%r4			# int
+	jg	sys_madvise		# branch to system call
+
+	.globl  sys32_getdents64_wrapper 
+sys32_getdents64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# void *
+	llgfr	%r4,%r4			# unsigned int
+	jg	sys_getdents64		# branch to system call
+
+	.globl  compat_sys_fcntl64_wrapper 
+compat_sys_fcntl64_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r4,%r4			# unsigned long
+	jg	compat_sys_fcntl64	# branch to system call
+
+	.globl	sys32_stat64_wrapper
+sys32_stat64_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_stat64		# branch to system call
+
+	.globl	sys32_lstat64_wrapper
+sys32_lstat64_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_lstat64		# branch to system call
+
+	.globl	sys32_stime_wrapper
+sys32_stime_wrapper:
+	llgtr	%r2,%r2			# long *
+	jg	compat_sys_stime	# branch to system call
+
+	.globl  sys32_sysctl_wrapper
+sys32_sysctl_wrapper:
+	llgtr   %r2,%r2                 # struct __sysctl_args32 *
+	jg      sys32_sysctl
+
+	.globl	sys32_fstat64_wrapper
+sys32_fstat64_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgtr	%r3,%r3			# struct stat64 *
+	jg	sys32_fstat64		# branch to system call
+
+	.globl  compat_sys_futex_wrapper 
+compat_sys_futex_wrapper:
+	llgtr	%r2,%r2			# u32 *
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct compat_timespec *
+	llgtr	%r6,%r6			# u32 *
+	lgf	%r0,164(%r15)		# int
+	stg	%r0,160(%r15)
+	jg	compat_sys_futex	# branch to system call
+
+	.globl	sys32_setxattr_wrapper
+sys32_setxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_setxattr
+
+	.globl	sys32_lsetxattr_wrapper
+sys32_lsetxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_lsetxattr
+
+	.globl	sys32_fsetxattr_wrapper
+sys32_fsetxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	lgfr	%r6,%r6			# int
+	jg	sys_fsetxattr
+
+	.globl	sys32_getxattr_wrapper
+sys32_getxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_getxattr
+
+	.globl	sys32_lgetxattr_wrapper
+sys32_lgetxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_lgetxattr
+
+	.globl	sys32_fgetxattr_wrapper
+sys32_fgetxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgtr	%r4,%r4			# void *
+	llgfr	%r5,%r5			# size_t
+	jg	sys_fgetxattr
+
+	.globl	sys32_listxattr_wrapper
+sys32_listxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_listxattr
+
+	.globl	sys32_llistxattr_wrapper
+sys32_llistxattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_llistxattr
+
+	.globl	sys32_flistxattr_wrapper
+sys32_flistxattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	jg	sys_flistxattr
+
+	.globl	sys32_removexattr_wrapper
+sys32_removexattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	jg	sys_removexattr
+
+	.globl	sys32_lremovexattr_wrapper
+sys32_lremovexattr_wrapper:
+	llgtr	%r2,%r2			# char *
+	llgtr	%r3,%r3			# char *
+	jg	sys_lremovexattr
+
+	.globl	sys32_fremovexattr_wrapper
+sys32_fremovexattr_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# char *
+	jg	sys_fremovexattr
+
+	.globl	sys32_sched_setaffinity_wrapper
+sys32_sched_setaffinity_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned int
+	llgtr	%r4,%r4			# unsigned long *
+	jg	compat_sys_sched_setaffinity
+
+	.globl	sys32_sched_getaffinity_wrapper
+sys32_sched_getaffinity_wrapper:
+	lgfr	%r2,%r2			# int
+	llgfr	%r3,%r3			# unsigned int
+	llgtr	%r4,%r4			# unsigned long *
+	jg	compat_sys_sched_getaffinity
+
+	.globl  sys32_exit_group_wrapper
+sys32_exit_group_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_exit_group		# branch to system call
+
+	.globl  sys32_set_tid_address_wrapper
+sys32_set_tid_address_wrapper:
+	llgtr	%r2,%r2			# int *
+	jg	sys_set_tid_address	# branch to system call
+
+	.globl  sys_epoll_create_wrapper
+sys_epoll_create_wrapper:
+	lgfr	%r2,%r2			# int
+	jg	sys_epoll_create	# branch to system call
+
+	.globl  sys_epoll_ctl_wrapper
+sys_epoll_ctl_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# int
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct epoll_event *
+	jg	sys_epoll_ctl		# branch to system call
+
+	.globl  sys_epoll_wait_wrapper
+sys_epoll_wait_wrapper:
+	lgfr	%r2,%r2			# int
+	llgtr	%r3,%r3			# struct epoll_event *
+	lgfr	%r4,%r4			# int
+	lgfr	%r5,%r5			# int
+	jg	sys_epoll_wait		# branch to system call
+
+	.globl	sys32_lookup_dcookie_wrapper
+sys32_lookup_dcookie_wrapper:
+	sllg	%r2,%r2,32		# get high word of 64bit dcookie
+	or	%r2,%r3			# get low word of 64bit dcookie
+	llgtr	%r3,%r4			# char *
+	llgfr	%r4,%r5			# size_t
+	jg	sys_lookup_dcookie
+
+	.globl	sys32_fadvise64_wrapper
+sys32_fadvise64_wrapper:
+	lgfr	%r2,%r2			# int
+	sllg	%r3,%r3,32		# get high word of 64bit loff_t
+	or	%r3,%r4			# get low word of 64bit loff_t
+	llgfr	%r4,%r5			# size_t (unsigned long)
+	lgfr	%r5,%r6			# int
+	jg	sys_fadvise64
+
+	.globl	sys32_fadvise64_64_wrapper
+sys32_fadvise64_64_wrapper:
+	llgtr	%r2,%r2			# struct fadvise64_64_args *
+	jg	s390_fadvise64_64
+
+	.globl	sys32_clock_settime_wrapper
+sys32_clock_settime_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_settime
+
+	.globl	sys32_clock_gettime_wrapper
+sys32_clock_gettime_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_gettime
+
+	.globl	sys32_clock_getres_wrapper
+sys32_clock_getres_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	llgtr	%r3,%r3			# struct compat_timespec *
+	jg	compat_sys_clock_getres
+
+	.globl	sys32_clock_nanosleep_wrapper
+sys32_clock_nanosleep_wrapper:
+	lgfr	%r2,%r2			# clockid_t (int)
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct compat_timespec *
+	llgtr	%r5,%r5			# struct compat_timespec *
+	jg	compat_sys_clock_nanosleep
+
+	.globl	sys32_timer_create_wrapper
+sys32_timer_create_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	llgtr	%r3,%r3			# struct compat_sigevent *
+	llgtr	%r4,%r4			# timer_t *
+	jg	sys32_timer_create
+
+	.globl	sys32_timer_settime_wrapper
+sys32_timer_settime_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	lgfr	%r3,%r3			# int
+	llgtr	%r4,%r4			# struct compat_itimerspec *
+	llgtr	%r5,%r5			# struct compat_itimerspec *
+	jg	compat_sys_timer_settime
+
+	.globl	sys32_timer_gettime_wrapper
+sys32_timer_gettime_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	llgtr	%r3,%r3			# struct compat_itimerspec *
+	jg	compat_sys_timer_gettime
+
+	.globl	sys32_timer_getoverrun_wrapper
+sys32_timer_getoverrun_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	jg	sys_timer_getoverrun
+
+	.globl	sys32_timer_delete_wrapper
+sys32_timer_delete_wrapper:
+	lgfr	%r2,%r2			# timer_t (int)
+	jg	sys_timer_delete
+
+	.globl	sys32_io_setup_wrapper
+sys32_io_setup_wrapper:
+	llgfr	%r2,%r2			# unsigned int
+	llgtr	%r3,%r3			# u32 *
+	jg	compat_sys_io_setup
+
+	.globl	sys32_io_destroy_wrapper
+sys32_io_destroy_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	jg	sys_io_destroy
+
+	.globl	sys32_io_getevents_wrapper
+sys32_io_getevents_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	lgfr	%r3,%r3			# long
+	lgfr	%r4,%r4			# long
+	llgtr	%r5,%r5			# struct io_event *
+	llgtr	%r6,%r6			# struct compat_timespec *
+	jg	compat_sys_io_getevents
+
+	.globl	sys32_io_submit_wrapper
+sys32_io_submit_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	lgfr	%r3,%r3			# long
+	llgtr	%r4,%r4			# struct iocb **
+	jg	compat_sys_io_submit
+
+	.globl	sys32_io_cancel_wrapper
+sys32_io_cancel_wrapper:
+	llgfr	%r2,%r2			# (aio_context_t) u32
+	llgtr	%r3,%r3			# struct iocb *
+	llgtr	%r4,%r4			# struct io_event *
+	jg	sys_io_cancel
+
+	.globl compat_sys_statfs64_wrapper
+compat_sys_statfs64_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgfr	%r3,%r3			# compat_size_t
+	llgtr	%r4,%r4			# struct compat_statfs64 *
+	jg	compat_sys_statfs64
+
+	.globl compat_sys_fstatfs64_wrapper
+compat_sys_fstatfs64_wrapper:
+	llgfr	%r2,%r2			# unsigned int fd
+	llgfr	%r3,%r3			# compat_size_t
+	llgtr	%r4,%r4			# struct compat_statfs64 *
+	jg	compat_sys_fstatfs64
+
+	.globl	compat_sys_mq_open_wrapper
+compat_sys_mq_open_wrapper:
+	llgtr	%r2,%r2			# const char *
+	lgfr	%r3,%r3			# int
+	llgfr	%r4,%r4			# mode_t
+	llgtr	%r5,%r5			# struct compat_mq_attr *
+	jg	compat_sys_mq_open
+
+	.globl	sys32_mq_unlink_wrapper
+sys32_mq_unlink_wrapper:
+	llgtr	%r2,%r2			# const char *
+	jg	sys_mq_unlink
+
+	.globl	compat_sys_mq_timedsend_wrapper
+compat_sys_mq_timedsend_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# const char *
+	llgfr	%r4,%r4			# size_t
+	llgfr	%r5,%r5			# unsigned int
+	llgtr	%r6,%r6			# const struct compat_timespec *
+	jg	compat_sys_mq_timedsend
+
+	.globl	compat_sys_mq_timedreceive_wrapper
+compat_sys_mq_timedreceive_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# char *
+	llgfr	%r4,%r4			# size_t
+	llgtr	%r5,%r5			# unsigned int *
+	llgtr	%r6,%r6			# const struct compat_timespec *
+	jg	compat_sys_mq_timedreceive
+
+	.globl	compat_sys_mq_notify_wrapper
+compat_sys_mq_notify_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# struct compat_sigevent *
+	jg	compat_sys_mq_notify
+
+	.globl	compat_sys_mq_getsetattr_wrapper
+compat_sys_mq_getsetattr_wrapper:
+	lgfr	%r2,%r2			# mqd_t
+	llgtr	%r3,%r3			# struct compat_mq_attr *
+	llgtr	%r4,%r4			# struct compat_mq_attr *
+	jg	compat_sys_mq_getsetattr
+
+	.globl	compat_sys_add_key_wrapper
+compat_sys_add_key_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	llgtr	%r4,%r4			# const void *
+	llgfr	%r5,%r5			# size_t
+	llgfr	%r6,%r6			# (key_serial_t) u32
+	jg	sys_add_key
+
+	.globl	compat_sys_request_key_wrapper
+compat_sys_request_key_wrapper:
+	llgtr	%r2,%r2			# const char *
+	llgtr	%r3,%r3			# const char *
+	llgtr	%r4,%r4			# const void *
+	llgfr	%r5,%r5			# (key_serial_t) u32
+	jg	sys_request_key
+
+	.globl	sys32_remap_file_pages_wrapper
+sys32_remap_file_pages_wrapper:
+	llgfr	%r2,%r2			# unsigned long
+	llgfr	%r3,%r3			# unsigned long
+	llgfr	%r4,%r4			# unsigned long
+	llgfr	%r5,%r5			# unsigned long
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_remap_file_pages
+
+	.globl	compat_sys_waitid_wrapper
+compat_sys_waitid_wrapper:
+	lgfr	%r2,%r2			# int
+	lgfr	%r3,%r3			# pid_t
+	llgtr	%r4,%r4			# siginfo_emu31_t *
+	lgfr	%r5,%r5			# int
+	llgtr	%r6,%r6			# struct rusage_emu31 *
+	jg	compat_sys_waitid
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
new file mode 100644
index 000000000000..44df8dc07c59
--- /dev/null
+++ b/arch/s390/kernel/cpcmd.c
@@ -0,0 +1,111 @@
+/*
+ *  arch/s390/kernel/cpcmd.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Christian Borntraeger (cborntra@de.ibm.com),
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <asm/ebcdic.h>
+#include <asm/cpcmd.h>
+#include <asm/system.h>
+
+static DEFINE_SPINLOCK(cpcmd_lock);
+static char cpcmd_buf[240];
+
+/*
+ * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
+ */
+void __cpcmd(char *cmd, char *response, int rlen)
+{
+	const int mask = 0x40000000L;
+	unsigned long flags;
+	int cmdlen;
+
+	spin_lock_irqsave(&cpcmd_lock, flags);
+	cmdlen = strlen(cmd);
+	BUG_ON(cmdlen > 240);
+	strcpy(cpcmd_buf, cmd);
+	ASCEBC(cpcmd_buf, cmdlen);
+
+	if (response != NULL && rlen > 0) {
+		memset(response, 0, rlen);
+#ifndef CONFIG_ARCH_S390X
+		asm volatile ("LRA   2,0(%0)\n\t"
+                              "LR    4,%1\n\t"
+                              "O     4,%4\n\t"
+                              "LRA   3,0(%2)\n\t"
+                              "LR    5,%3\n\t"
+                              ".long 0x83240008 # Diagnose X'08'\n\t"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen),
+                                "a" (response), "d" (rlen), "m" (mask)
+                              : "cc", "2", "3", "4", "5" );
+#else /* CONFIG_ARCH_S390X */
+                asm volatile ("   lrag  2,0(%0)\n"
+                              "   lgr   4,%1\n"
+                              "   o     4,%4\n"
+                              "   lrag  3,0(%2)\n"
+                              "   lgr   5,%3\n"
+                              "   sam31\n"
+                              "   .long 0x83240008 # Diagnose X'08'\n"
+                              "   sam64"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen),
+                                "a" (response), "d" (rlen), "m" (mask)
+                              : "cc", "2", "3", "4", "5" );
+#endif /* CONFIG_ARCH_S390X */
+                EBCASC(response, rlen);
+        } else {
+#ifndef CONFIG_ARCH_S390X
+                asm volatile ("LRA   2,0(%0)\n\t"
+                              "LR    3,%1\n\t"
+                              ".long 0x83230008 # Diagnose X'08'\n\t"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen)
+                              : "2", "3"  );
+#else /* CONFIG_ARCH_S390X */
+                asm volatile ("   lrag  2,0(%0)\n"
+                              "   lgr   3,%1\n"
+                              "   sam31\n"
+                              "   .long 0x83230008 # Diagnose X'08'\n"
+                              "   sam64"
+                              : /* no output */
+                              : "a" (cpcmd_buf), "d" (cmdlen)
+                              : "2", "3"  );
+#endif /* CONFIG_ARCH_S390X */
+        }
+	spin_unlock_irqrestore(&cpcmd_lock, flags);
+}
+
+EXPORT_SYMBOL(__cpcmd);
+
+#ifdef CONFIG_ARCH_S390X
+void cpcmd(char *cmd, char *response, int rlen)
+{
+	char *lowbuf;
+	if ((rlen == 0) || (response == NULL)
+	    || !((unsigned long)response >> 31))
+		__cpcmd(cmd, response, rlen);
+	else {
+		lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
+		if (!lowbuf) {
+			printk(KERN_WARNING
+				"cpcmd: could not allocate response buffer\n");
+			return;
+		}
+		__cpcmd(cmd, lowbuf, rlen);
+		memcpy(response, lowbuf, rlen);
+		kfree(lowbuf);
+	}
+}
+
+EXPORT_SYMBOL(cpcmd);
+#endif		/* CONFIG_ARCH_S390X */
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
new file mode 100644
index 000000000000..91f8ce5543d3
--- /dev/null
+++ b/arch/s390/kernel/debug.c
@@ -0,0 +1,1286 @@
+/*
+ *  arch/s390/kernel/debug.c
+ *   S/390 debug facility
+ *
+ *    Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
+ *                             IBM Corporation
+ *    Author(s): Michael Holzheu (holzheu@de.ibm.com),
+ *               Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ *
+ *    Bugreports to: <Linux390@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+
+#define DEBUG_PROLOG_ENTRY -1
+
+/* typedefs */
+
+typedef struct file_private_info {
+	loff_t offset;			/* offset of last read in file */
+	int    act_area;                /* number of last formated area */
+	int    act_entry;               /* last formated entry (offset */
+                                        /* relative to beginning of last */
+                                        /* formated area) */ 
+	size_t act_entry_offset;        /* up to this offset we copied */
+					/* in last read the last formated */
+					/* entry to userland */
+	char   temp_buf[2048];		/* buffer for output */
+	debug_info_t *debug_info_org;   /* original debug information */
+	debug_info_t *debug_info_snap;	/* snapshot of debug information */
+	struct debug_view *view;	/* used view of debug info */
+} file_private_info_t;
+
+typedef struct
+{
+	char *string;
+	/* 
+	 * This assumes that all args are converted into longs 
+	 * on L/390 this is the case for all types of parameter 
+	 * except of floats, and long long (32 bit) 
+         *
+         */
+	long args[0];
+} debug_sprintf_entry_t;
+
+
+extern void tod_to_timeval(uint64_t todval, struct timeval *xtime);
+
+/* internal function prototyes */
+
+static int debug_init(void);
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+			    size_t user_len, loff_t * offset);
+static ssize_t debug_input(struct file *file, const char __user *user_buf,
+			   size_t user_len, loff_t * offset);
+static int debug_open(struct inode *inode, struct file *file);
+static int debug_close(struct inode *inode, struct file *file);
+static debug_info_t*  debug_info_create(char *name, int page_order, int nr_areas, int buf_size);
+static void debug_info_get(debug_info_t *);
+static void debug_info_put(debug_info_t *);
+static int debug_prolog_level_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf);
+static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
+				struct file *file, const char __user *user_buf,
+				size_t user_buf_size, loff_t * offset);
+static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
+                                struct file *file, const char __user *user_buf,
+                                size_t user_buf_size, loff_t * offset);
+static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
+                                char *out_buf, const char *in_buf);
+static int debug_raw_format_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf,
+				 const char *in_buf);
+static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
+                         int area, debug_entry_t * entry, char *out_buf);
+
+static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
+				   char *out_buf, debug_sprintf_entry_t *curr_event);
+
+/* globals */
+
+struct debug_view debug_raw_view = {
+	"raw",
+	NULL,
+	&debug_raw_header_fn,
+	&debug_raw_format_fn,
+	NULL,
+	NULL
+};
+
+struct debug_view debug_hex_ascii_view = {
+	"hex_ascii",
+	NULL,
+	&debug_dflt_header_fn,
+	&debug_hex_ascii_format_fn,
+	NULL,
+	NULL
+};
+
+struct debug_view debug_level_view = {
+	"level",
+	&debug_prolog_level_fn,
+	NULL,
+	NULL,
+	&debug_input_level_fn,
+	NULL
+};
+
+struct debug_view debug_flush_view = {
+        "flush",
+        NULL,
+        NULL,
+        NULL,
+        &debug_input_flush_fn,
+        NULL
+};
+
+struct debug_view debug_sprintf_view = {
+	"sprintf",
+	NULL,
+	&debug_dflt_header_fn,
+	(debug_format_proc_t*)&debug_sprintf_format_fn,
+	NULL,
+	NULL
+};
+
+
+unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
+
+/* static globals */
+
+static debug_info_t *debug_area_first = NULL;
+static debug_info_t *debug_area_last = NULL;
+DECLARE_MUTEX(debug_lock);
+
+static int initialized;
+
+static struct file_operations debug_file_ops = {
+	.owner	 = THIS_MODULE,
+	.read    = debug_output,
+	.write   = debug_input,	
+	.open    = debug_open,
+	.release = debug_close,
+};
+
+static struct proc_dir_entry *debug_proc_root_entry;
+
+/* functions */
+
+/*
+ * debug_info_alloc
+ * - alloc new debug-info
+ */
+
+static debug_info_t*  debug_info_alloc(char *name, int page_order,
+                                        int nr_areas, int buf_size)
+{
+	debug_info_t* rc;
+	int i;
+
+	/* alloc everything */
+
+	rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_ATOMIC);
+	if(!rc)
+		goto fail_malloc_rc;
+	rc->active_entry = (int*)kmalloc(nr_areas * sizeof(int), GFP_ATOMIC);
+	if(!rc->active_entry)
+		goto fail_malloc_active_entry;
+	memset(rc->active_entry, 0, nr_areas * sizeof(int));
+	rc->areas = (debug_entry_t **) kmalloc(nr_areas *
+						sizeof(debug_entry_t *),
+						GFP_ATOMIC);
+	if (!rc->areas)
+		goto fail_malloc_areas;
+	for (i = 0; i < nr_areas; i++) {
+		rc->areas[i] = (debug_entry_t *) __get_free_pages(GFP_ATOMIC,
+								page_order);
+		if (!rc->areas[i]) {
+			for (i--; i >= 0; i--) {
+				free_pages((unsigned long) rc->areas[i],
+						page_order);
+			}
+			goto fail_malloc_areas2;
+		} else {
+			memset(rc->areas[i], 0, PAGE_SIZE << page_order);
+		}
+	}
+
+	/* initialize members */
+
+	spin_lock_init(&rc->lock);
+	rc->page_order  = page_order;
+	rc->nr_areas    = nr_areas;
+	rc->active_area = 0;
+	rc->level       = DEBUG_DEFAULT_LEVEL;
+	rc->buf_size    = buf_size;
+	rc->entry_size  = sizeof(debug_entry_t) + buf_size;
+	strlcpy(rc->name, name, sizeof(rc->name));
+	memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
+#ifdef CONFIG_PROC_FS
+	memset(rc->proc_entries, 0 ,DEBUG_MAX_VIEWS *
+		sizeof(struct proc_dir_entry*));
+#endif /* CONFIG_PROC_FS */
+	atomic_set(&(rc->ref_count), 0);
+
+	return rc;
+
+fail_malloc_areas2:
+	kfree(rc->areas);
+fail_malloc_areas:
+	kfree(rc->active_entry);
+fail_malloc_active_entry:
+	kfree(rc);
+fail_malloc_rc:
+	return NULL;
+}
+
+/*
+ * debug_info_free
+ * - free memory debug-info
+ */
+
+static void debug_info_free(debug_info_t* db_info){
+	int i;
+	for (i = 0; i < db_info->nr_areas; i++) {
+		free_pages((unsigned long) db_info->areas[i],
+		db_info->page_order);
+	}
+	kfree(db_info->areas);
+	kfree(db_info->active_entry);
+	kfree(db_info);
+}
+
+/*
+ * debug_info_create
+ * - create new debug-info
+ */
+
+static debug_info_t*  debug_info_create(char *name, int page_order, 
+                                        int nr_areas, int buf_size)
+{
+	debug_info_t* rc;
+
+        rc = debug_info_alloc(name, page_order, nr_areas, buf_size);
+        if(!rc) 
+		goto out;
+
+
+	/* create proc rood directory */
+        rc->proc_root_entry = proc_mkdir(rc->name, debug_proc_root_entry);
+
+	/* append new element to linked list */
+        if (debug_area_first == NULL) {
+                /* first element in list */
+                debug_area_first = rc;
+                rc->prev = NULL;
+        } else {
+                /* append element to end of list */
+                debug_area_last->next = rc;
+                rc->prev = debug_area_last;
+        }
+        debug_area_last = rc;
+        rc->next = NULL;
+
+	debug_info_get(rc);
+out:
+	return rc;
+}
+
+/*
+ * debug_info_copy
+ * - copy debug-info
+ */
+
+static debug_info_t* debug_info_copy(debug_info_t* in)
+{
+        int i;
+        debug_info_t* rc;
+        rc = debug_info_alloc(in->name, in->page_order, 
+                                in->nr_areas, in->buf_size);
+        if(!rc)
+                goto out;
+
+        for(i = 0; i < in->nr_areas; i++){
+                memcpy(rc->areas[i],in->areas[i], PAGE_SIZE << in->page_order);
+        }
+out:
+        return rc;
+}
+
+/*
+ * debug_info_get
+ * - increments reference count for debug-info
+ */
+
+static void debug_info_get(debug_info_t * db_info)
+{
+	if (db_info)
+		atomic_inc(&db_info->ref_count);
+}
+
+/*
+ * debug_info_put:
+ * - decreases reference count for debug-info and frees it if necessary
+ */
+
+static void debug_info_put(debug_info_t *db_info)
+{
+	int i;
+
+	if (!db_info)
+		return;
+	if (atomic_dec_and_test(&db_info->ref_count)) {
+#ifdef DEBUG
+		printk(KERN_INFO "debug: freeing debug area %p (%s)\n",
+		       db_info, db_info->name);
+#endif
+		for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+			if (db_info->views[i] == NULL)
+				continue;
+#ifdef CONFIG_PROC_FS
+			remove_proc_entry(db_info->proc_entries[i]->name,
+					  db_info->proc_root_entry);
+#endif
+		}
+#ifdef CONFIG_PROC_FS
+		remove_proc_entry(db_info->proc_root_entry->name,
+				  debug_proc_root_entry);
+#endif
+		if(db_info == debug_area_first)
+			debug_area_first = db_info->next;
+		if(db_info == debug_area_last)
+			debug_area_last = db_info->prev;
+		if(db_info->prev) db_info->prev->next = db_info->next;
+		if(db_info->next) db_info->next->prev = db_info->prev;
+		debug_info_free(db_info);
+	}
+}
+
+/*
+ * debug_format_entry:
+ * - format one debug entry and return size of formated data
+ */
+
+static int debug_format_entry(file_private_info_t *p_info)
+{
+	debug_info_t *id_org    = p_info->debug_info_org;
+	debug_info_t *id_snap   = p_info->debug_info_snap;
+	struct debug_view *view = p_info->view;
+	debug_entry_t *act_entry;
+	size_t len = 0;
+	if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+		/* print prolog */
+        	if (view->prolog_proc)
+                	len += view->prolog_proc(id_org, view,p_info->temp_buf);
+		goto out;
+	}
+
+	act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area] +
+					p_info->act_entry);
+                        
+	if (act_entry->id.stck == 0LL)
+			goto out;  /* empty entry */
+	if (view->header_proc)
+		len += view->header_proc(id_org, view, p_info->act_area, 
+					act_entry, p_info->temp_buf + len);
+	if (view->format_proc)
+		len += view->format_proc(id_org, view, p_info->temp_buf + len,
+						DEBUG_DATA(act_entry));
+      out:
+        return len;
+}
+
+/*
+ * debug_next_entry:
+ * - goto next entry in p_info
+ */
+
+extern inline int debug_next_entry(file_private_info_t *p_info)
+{
+	debug_info_t *id = p_info->debug_info_snap;
+	if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+		p_info->act_entry = 0;
+		goto out;
+	}
+	if ((p_info->act_entry += id->entry_size)
+		> ((PAGE_SIZE << (id->page_order)) 
+		- id->entry_size)){
+
+		/* next area */
+		p_info->act_entry = 0;
+        	p_info->act_area++;
+        	if(p_info->act_area >= id->nr_areas)
+			return 1;
+	}
+out:
+	return 0;	
+}
+
+/*
+ * debug_output:
+ * - called for user read()
+ * - copies formated debug entries to the user buffer
+ */
+
+static ssize_t debug_output(struct file *file,		/* file descriptor */
+			    char __user *user_buf,	/* user buffer */
+			    size_t  len,		/* length of buffer */
+			    loff_t *offset)	      /* offset in the file */
+{
+	size_t count = 0;
+	size_t entry_offset, size = 0;
+	file_private_info_t *p_info;
+
+	p_info = ((file_private_info_t *) file->private_data);
+	if (*offset != p_info->offset) 
+		return -EPIPE;
+	if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
+		return 0;
+
+	entry_offset = p_info->act_entry_offset;
+
+	while(count < len){
+		size = debug_format_entry(p_info);
+		size = min((len - count), (size - entry_offset));
+
+		if(size){
+			if (copy_to_user(user_buf + count, 
+					p_info->temp_buf + entry_offset, size))
+			return -EFAULT;
+		}
+		count += size;
+		entry_offset = 0;
+		if(count != len)
+			if(debug_next_entry(p_info)) 
+				goto out;
+	}
+out:
+	p_info->offset           = *offset + count;
+	p_info->act_entry_offset = size;	
+	*offset = p_info->offset;
+	return count;
+}
+
+/*
+ * debug_input:
+ * - called for user write()
+ * - calls input function of view
+ */
+
+static ssize_t debug_input(struct file *file,
+			   const char __user *user_buf, size_t length,
+			   loff_t *offset)
+{
+	int rc = 0;
+	file_private_info_t *p_info;
+
+	down(&debug_lock);
+	p_info = ((file_private_info_t *) file->private_data);
+	if (p_info->view->input_proc)
+		rc = p_info->view->input_proc(p_info->debug_info_org,
+					      p_info->view, file, user_buf,
+					      length, offset);
+	else
+		rc = -EPERM;
+	up(&debug_lock);
+	return rc;		/* number of input characters */
+}
+
+/*
+ * debug_open:
+ * - called for user open()
+ * - copies formated output to private_data area of the file
+ *   handle
+ */
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	int i = 0, rc = 0;
+	file_private_info_t *p_info;
+	debug_info_t *debug_info, *debug_info_snapshot;
+
+#ifdef DEBUG
+	printk("debug_open\n");
+#endif
+	down(&debug_lock);
+
+	/* find debug log and view */
+
+	debug_info = debug_area_first;
+	while(debug_info != NULL){
+		for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+			if (debug_info->views[i] == NULL)
+				continue;
+			else if (debug_info->proc_entries[i] ==
+				 PDE(file->f_dentry->d_inode)) {
+				goto found;	/* found view ! */
+			}
+		}
+		debug_info = debug_info->next;
+	}
+	/* no entry found */
+	rc = -EINVAL;
+	goto out;
+
+      found:
+
+	/* make snapshot of current debug areas to get it consistent */
+
+	debug_info_snapshot = debug_info_copy(debug_info);
+
+	if(!debug_info_snapshot){
+#ifdef DEBUG
+		printk(KERN_ERR "debug_open: debug_info_copy failed (out of mem)\n");
+#endif
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if ((file->private_data =
+	     kmalloc(sizeof(file_private_info_t), GFP_ATOMIC)) == 0) {
+#ifdef DEBUG
+		printk(KERN_ERR "debug_open: kmalloc failed\n");
+#endif
+		debug_info_free(debug_info_snapshot);	
+		rc = -ENOMEM;
+		goto out;
+	}
+	p_info = (file_private_info_t *) file->private_data;
+	p_info->offset = 0;
+	p_info->debug_info_snap = debug_info_snapshot;
+	p_info->debug_info_org  = debug_info;
+	p_info->view = debug_info->views[i];
+	p_info->act_area = 0;
+	p_info->act_entry = DEBUG_PROLOG_ENTRY;
+	p_info->act_entry_offset = 0;
+
+	debug_info_get(debug_info);
+
+      out:
+	up(&debug_lock);
+	return rc;
+}
+
+/*
+ * debug_close:
+ * - called for user close()
+ * - deletes  private_data area of the file handle
+ */
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+	file_private_info_t *p_info;
+#ifdef DEBUG
+	printk("debug_close\n");
+#endif
+	p_info = (file_private_info_t *) file->private_data;
+	debug_info_free(p_info->debug_info_snap);
+	debug_info_put(p_info->debug_info_org);
+	kfree(file->private_data);
+	return 0;		/* success */
+}
+
+/*
+ * debug_register:
+ * - creates and initializes debug area for the caller
+ * - returns handle for debug area
+ */
+
+debug_info_t *debug_register
+    (char *name, int page_order, int nr_areas, int buf_size) 
+{
+	debug_info_t *rc = NULL;
+
+	if (!initialized)
+		BUG();
+	down(&debug_lock);
+
+        /* create new debug_info */
+
+	rc = debug_info_create(name, page_order, nr_areas, buf_size);
+	if(!rc) 
+		goto out;
+	debug_register_view(rc, &debug_level_view);
+        debug_register_view(rc, &debug_flush_view);
+#ifdef DEBUG
+	printk(KERN_INFO
+	       "debug: reserved %d areas of %d pages for debugging %s\n",
+	       nr_areas, 1 << page_order, rc->name);
+#endif
+      out:
+        if (rc == NULL){
+		printk(KERN_ERR "debug: debug_register failed for %s\n",name);
+        }
+	up(&debug_lock);
+	return rc;
+}
+
+/*
+ * debug_unregister:
+ * - give back debug area
+ */
+
+void debug_unregister(debug_info_t * id)
+{
+	if (!id)
+		goto out;
+	down(&debug_lock);
+#ifdef DEBUG
+	printk(KERN_INFO "debug: unregistering %s\n", id->name);
+#endif
+	debug_info_put(id);
+	up(&debug_lock);
+
+      out:
+	return;
+}
+
+/*
+ * debug_set_level:
+ * - set actual debug level
+ */
+
+void debug_set_level(debug_info_t* id, int new_level)
+{
+	unsigned long flags;
+	if(!id)
+		return;	
+	spin_lock_irqsave(&id->lock,flags);
+        if(new_level == DEBUG_OFF_LEVEL){
+                id->level = DEBUG_OFF_LEVEL;
+                printk(KERN_INFO "debug: %s: switched off\n",id->name);
+        } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
+                printk(KERN_INFO
+                        "debug: %s: level %i is out of range (%i - %i)\n",
+                        id->name, new_level, 0, DEBUG_MAX_LEVEL);
+        } else {
+                id->level = new_level;
+#ifdef DEBUG
+                printk(KERN_INFO 
+			"debug: %s: new level %i\n",id->name,id->level);
+#endif
+        }
+	spin_unlock_irqrestore(&id->lock,flags);
+}
+
+
+/*
+ * proceed_active_entry:
+ * - set active entry to next in the ring buffer
+ */
+
+extern inline void proceed_active_entry(debug_info_t * id)
+{
+	if ((id->active_entry[id->active_area] += id->entry_size)
+	    > ((PAGE_SIZE << (id->page_order)) - id->entry_size))
+		id->active_entry[id->active_area] = 0;
+}
+
+/*
+ * proceed_active_area:
+ * - set active area to next in the ring buffer
+ */
+
+extern inline void proceed_active_area(debug_info_t * id)
+{
+	id->active_area++;
+	id->active_area = id->active_area % id->nr_areas;
+}
+
+/*
+ * get_active_entry:
+ */
+
+extern inline debug_entry_t *get_active_entry(debug_info_t * id)
+{
+	return (debug_entry_t *) ((char *) id->areas[id->active_area] +
+				  id->active_entry[id->active_area]);
+}
+
+/*
+ * debug_finish_entry:
+ * - set timestamp, caller address, cpu number etc.
+ */
+
+extern inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active,
+		int level, int exception)
+{
+	STCK(active->id.stck);
+	active->id.fields.cpuid = smp_processor_id();
+	active->caller = __builtin_return_address(0);
+	active->id.fields.exception = exception;
+	active->id.fields.level     = level;
+	proceed_active_entry(id);
+	if(exception)
+		proceed_active_area(id);
+}
+
+static int debug_stoppable=1;
+static int debug_active=1;
+
+#define CTL_S390DBF 5677
+#define CTL_S390DBF_STOPPABLE 5678
+#define CTL_S390DBF_ACTIVE 5679
+
+/*
+ * proc handler for the running debug_active sysctl
+ * always allow read, allow write only if debug_stoppable is set or
+ * if debug_active is already off
+ */
+static int s390dbf_procactive(ctl_table *table, int write, struct file *filp,
+                     void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	if (!write || debug_stoppable || !debug_active)
+		return proc_dointvec(table, write, filp, buffer, lenp, ppos);
+	else
+		return 0;
+}
+
+
+static struct ctl_table s390dbf_table[] = {
+	{
+		.ctl_name       = CTL_S390DBF_STOPPABLE,
+		.procname       = "debug_stoppable",
+		.data		= &debug_stoppable,
+		.maxlen		= sizeof(int),
+		.mode           = S_IRUGO | S_IWUSR,
+		.proc_handler   = &proc_dointvec,
+		.strategy	= &sysctl_intvec,
+	},
+	 {
+		.ctl_name       = CTL_S390DBF_ACTIVE,
+		.procname       = "debug_active",
+		.data		= &debug_active,
+		.maxlen		= sizeof(int),
+		.mode           = S_IRUGO | S_IWUSR,
+		.proc_handler   = &s390dbf_procactive,
+		.strategy	= &sysctl_intvec,
+	},
+	{ .ctl_name = 0 }
+};
+
+static struct ctl_table s390dbf_dir_table[] = {
+	{
+		.ctl_name       = CTL_S390DBF,
+		.procname       = "s390dbf",
+		.maxlen         = 0,
+		.mode           = S_IRUGO | S_IXUGO,
+		.child          = s390dbf_table,
+	},
+	{ .ctl_name = 0 }
+};
+
+struct ctl_table_header *s390dbf_sysctl_header;
+
+void debug_stop_all(void)
+{
+	if (debug_stoppable)
+		debug_active = 0;
+}
+
+
+/*
+ * debug_event_common:
+ * - write debug entry with given size
+ */
+
+debug_entry_t *debug_event_common(debug_info_t * id, int level, const void *buf,
+			          int len)
+{
+	unsigned long flags;
+	debug_entry_t *active;
+
+	if (!debug_active)
+		return NULL;
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	memset(DEBUG_DATA(active), 0, id->buf_size);
+	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+	debug_finish_entry(id, active, level, 0);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_exception_common:
+ * - write debug entry with given size and switch to next debug area
+ */
+
+debug_entry_t *debug_exception_common(debug_info_t * id, int level, 
+                                      const void *buf, int len)
+{
+	unsigned long flags;
+	debug_entry_t *active;
+
+	if (!debug_active)
+		return NULL;
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	memset(DEBUG_DATA(active), 0, id->buf_size);
+	memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+	debug_finish_entry(id, active, level, 1);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * counts arguments in format string for sprintf view
+ */
+
+extern inline int debug_count_numargs(char *string)
+{
+	int numargs=0;
+
+	while(*string) {
+		if(*string++=='%')
+			numargs++;
+	}
+	return(numargs);
+}
+
+/*
+ * debug_sprintf_event:
+ */
+
+debug_entry_t *debug_sprintf_event(debug_info_t* id,
+                                   int level,char *string,...)
+{
+	va_list   ap;
+	int numargs,idx;
+	unsigned long flags;
+	debug_sprintf_entry_t *curr_event;
+	debug_entry_t *active;
+
+	if((!id) || (level > id->level))
+		return NULL;
+	if (!debug_active)
+		return NULL;
+	numargs=debug_count_numargs(string);
+
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
+	va_start(ap,string);
+	curr_event->string=string;
+	for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
+		curr_event->args[idx]=va_arg(ap,long);
+	va_end(ap);
+	debug_finish_entry(id, active, level, 0);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_sprintf_exception:
+ */
+
+debug_entry_t *debug_sprintf_exception(debug_info_t* id,
+                                       int level,char *string,...)
+{
+	va_list   ap;
+	int numargs,idx;
+	unsigned long flags;
+	debug_sprintf_entry_t *curr_event;
+	debug_entry_t *active;
+
+	if((!id) || (level > id->level))
+		return NULL;
+	if (!debug_active)
+		return NULL;
+
+	numargs=debug_count_numargs(string);
+
+	spin_lock_irqsave(&id->lock, flags);
+	active = get_active_entry(id);
+	curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
+	va_start(ap,string);
+	curr_event->string=string;
+	for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
+		curr_event->args[idx]=va_arg(ap,long);
+	va_end(ap);
+	debug_finish_entry(id, active, level, 1);
+	spin_unlock_irqrestore(&id->lock, flags);
+
+	return active;
+}
+
+/*
+ * debug_init:
+ * - is called exactly once to initialize the debug feature
+ */
+
+static int __init debug_init(void)
+{
+	int rc = 0;
+
+	s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table, 1);
+	down(&debug_lock);
+#ifdef CONFIG_PROC_FS
+	debug_proc_root_entry = proc_mkdir(DEBUG_DIR_ROOT, NULL);
+#endif /* CONFIG_PROC_FS */
+	printk(KERN_INFO "debug: Initialization complete\n");
+	initialized = 1;
+	up(&debug_lock);
+
+	return rc;
+}
+
+/*
+ * debug_register_view:
+ */
+
+int debug_register_view(debug_info_t * id, struct debug_view *view)
+{
+	int rc = 0;
+	int i;
+	unsigned long flags;
+	mode_t mode = S_IFREG;
+	struct proc_dir_entry *pde;
+
+	if (!id)
+		goto out;
+	if (view->prolog_proc || view->format_proc || view->header_proc)
+		mode |= S_IRUSR;
+	if (view->input_proc)
+		mode |= S_IWUSR;
+	pde = create_proc_entry(view->name, mode, id->proc_root_entry);
+	if (!pde){
+		printk(KERN_WARNING "debug: create_proc_entry() failed! Cannot register view %s/%s\n", id->name,view->name);
+		rc = -1;
+		goto out;
+	}
+
+	spin_lock_irqsave(&id->lock, flags);
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		if (id->views[i] == NULL)
+			break;
+	}
+	if (i == DEBUG_MAX_VIEWS) {
+		printk(KERN_WARNING "debug: cannot register view %s/%s\n",
+			id->name,view->name);
+		printk(KERN_WARNING 
+			"debug: maximum number of views reached (%i)!\n", i);
+		remove_proc_entry(pde->name, id->proc_root_entry);
+		rc = -1;
+	}
+	else {
+		id->views[i] = view;
+		pde->proc_fops = &debug_file_ops;
+		id->proc_entries[i] = pde;
+	}
+	spin_unlock_irqrestore(&id->lock, flags);
+      out:
+	return rc;
+}
+
+/*
+ * debug_unregister_view:
+ */
+
+int debug_unregister_view(debug_info_t * id, struct debug_view *view)
+{
+	int rc = 0;
+	int i;
+	unsigned long flags;
+
+	if (!id)
+		goto out;
+	spin_lock_irqsave(&id->lock, flags);
+	for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
+		if (id->views[i] == view)
+			break;
+	}
+	if (i == DEBUG_MAX_VIEWS)
+		rc = -1;
+	else {
+#ifdef CONFIG_PROC_FS
+		remove_proc_entry(id->proc_entries[i]->name,
+				  id->proc_root_entry);
+#endif
+		id->views[i] = NULL;
+		rc = 0;
+	}
+	spin_unlock_irqrestore(&id->lock, flags);
+      out:
+	return rc;
+}
+
+/*
+ * functions for debug-views
+ ***********************************
+*/
+
+/*
+ * prints out actual debug level
+ */
+
+static int debug_prolog_level_fn(debug_info_t * id,
+				 struct debug_view *view, char *out_buf)
+{
+	int rc = 0;
+
+	if(id->level == -1) rc = sprintf(out_buf,"-\n");
+	else rc = sprintf(out_buf, "%i\n", id->level);
+	return rc;
+}
+
+/*
+ * reads new debug level
+ */
+
+static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
+				struct file *file, const char __user *user_buf,
+				size_t in_buf_size, loff_t * offset)
+{
+	char input_buf[1];
+	int rc = in_buf_size;
+
+	if (*offset != 0)
+		goto out;
+	if (copy_from_user(input_buf, user_buf, 1)){
+		rc = -EFAULT;
+		goto out;
+	}
+	if (isdigit(input_buf[0])) {
+		int new_level = ((int) input_buf[0] - (int) '0');
+		debug_set_level(id, new_level);
+	} else if(input_buf[0] == '-') {
+		debug_set_level(id, DEBUG_OFF_LEVEL);
+	} else {
+		printk(KERN_INFO "debug: level `%c` is not valid\n",
+		       input_buf[0]);
+	}
+      out:
+	*offset += in_buf_size;
+	return rc;		/* number of input characters */
+}
+
+
+/*
+ * flushes debug areas
+ */
+ 
+void debug_flush(debug_info_t* id, int area)
+{
+        unsigned long flags;
+        int i;
+
+        if(!id)
+                return;
+        spin_lock_irqsave(&id->lock,flags);
+        if(area == DEBUG_FLUSH_ALL){
+                id->active_area = 0;
+                memset(id->active_entry, 0, id->nr_areas * sizeof(int));
+                for (i = 0; i < id->nr_areas; i++) 
+                        memset(id->areas[i], 0, PAGE_SIZE << id->page_order);
+                printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
+        } else if(area >= 0 && area < id->nr_areas) {
+                id->active_entry[area] = 0;
+                memset(id->areas[area], 0, PAGE_SIZE << id->page_order);
+                printk(KERN_INFO
+                        "debug: %s: area %i has been flushed\n",
+                        id->name, area);
+        } else {
+                printk(KERN_INFO
+                        "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
+                        id->name, area, 0, id->nr_areas-1);
+        }
+        spin_unlock_irqrestore(&id->lock,flags);
+}
+
+/*
+ * view function: flushes debug areas 
+ */
+
+static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
+                                struct file *file, const char __user *user_buf,
+                                size_t in_buf_size, loff_t * offset)
+{
+        char input_buf[1];
+        int rc = in_buf_size;
+ 
+        if (*offset != 0)
+                goto out;
+        if (copy_from_user(input_buf, user_buf, 1)){
+                rc = -EFAULT;
+                goto out;
+        }
+        if(input_buf[0] == '-') { 
+                debug_flush(id, DEBUG_FLUSH_ALL);
+                goto out;
+        }
+        if (isdigit(input_buf[0])) {
+                int area = ((int) input_buf[0] - (int) '0');
+                debug_flush(id, area);
+                goto out;
+        }
+
+        printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]);
+
+      out:
+        *offset += in_buf_size;
+        return rc;              /* number of input characters */
+}
+
+/*
+ * prints debug header in raw format
+ */
+
+int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
+                         int area, debug_entry_t * entry, char *out_buf)
+{
+        int rc;
+
+	rc = sizeof(debug_entry_t);
+	memcpy(out_buf,entry,sizeof(debug_entry_t));
+        return rc;
+}
+
+/*
+ * prints debug data in raw format
+ */
+
+static int debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
+			       char *out_buf, const char *in_buf)
+{
+	int rc;
+
+	rc = id->buf_size;
+	memcpy(out_buf, in_buf, id->buf_size);
+	return rc;
+}
+
+/*
+ * prints debug data in hex/ascii format
+ */
+
+static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
+		    		  char *out_buf, const char *in_buf)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < id->buf_size; i++) {
+                rc += sprintf(out_buf + rc, "%02x ",
+                              ((unsigned char *) in_buf)[i]);
+        }
+	rc += sprintf(out_buf + rc, "| ");
+	for (i = 0; i < id->buf_size; i++) {
+		unsigned char c = in_buf[i];
+		if (!isprint(c))
+			rc += sprintf(out_buf + rc, ".");
+		else
+			rc += sprintf(out_buf + rc, "%c", c);
+	}
+	rc += sprintf(out_buf + rc, "\n");
+	return rc;
+}
+
+/*
+ * prints header for debug entry
+ */
+
+int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
+			 int area, debug_entry_t * entry, char *out_buf)
+{
+	struct timeval time_val;
+	unsigned long long time;
+	char *except_str;
+	unsigned long caller;
+	int rc = 0;
+	unsigned int level;
+
+	level = entry->id.fields.level;
+	time = entry->id.stck;
+	/* adjust todclock to 1970 */
+	time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
+	tod_to_timeval(time, &time_val);
+
+	if (entry->id.fields.exception)
+		except_str = "*";
+	else
+		except_str = "-";
+	caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
+	rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p  ",
+		      area, time_val.tv_sec, time_val.tv_usec, level,
+		      except_str, entry->id.fields.cpuid, (void *) caller);
+	return rc;
+}
+
+/*
+ * prints debug data sprintf-formated:
+ * debug_sprinf_event/exception calls must be used together with this view
+ */
+
+#define DEBUG_SPRINTF_MAX_ARGS 10
+
+int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
+                            char *out_buf, debug_sprintf_entry_t *curr_event)
+{
+	int num_longs, num_used_args = 0,i, rc = 0;
+	int index[DEBUG_SPRINTF_MAX_ARGS];
+
+	/* count of longs fit into one entry */
+	num_longs = id->buf_size /  sizeof(long); 
+
+	if(num_longs < 1)
+		goto out; /* bufsize of entry too small */
+	if(num_longs == 1) {
+		/* no args, we use only the string */
+		strcpy(out_buf, curr_event->string);
+		rc = strlen(curr_event->string);
+		goto out;
+	}
+
+	/* number of arguments used for sprintf (without the format string) */
+	num_used_args   = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
+
+	memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
+
+	for(i = 0; i < num_used_args; i++)
+		index[i] = i;
+
+	rc =  sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
+		curr_event->args[index[1]], curr_event->args[index[2]],
+		curr_event->args[index[3]], curr_event->args[index[4]],
+		curr_event->args[index[5]], curr_event->args[index[6]],
+		curr_event->args[index[7]], curr_event->args[index[8]],
+		curr_event->args[index[9]]);
+
+out:
+
+	return rc;
+}
+
+/*
+ * clean up module
+ */
+void __exit debug_exit(void)
+{
+#ifdef DEBUG
+	printk("debug_cleanup_module: \n");
+#endif
+#ifdef CONFIG_PROC_FS
+	remove_proc_entry(debug_proc_root_entry->name, NULL);
+#endif /* CONFIG_PROC_FS */
+	unregister_sysctl_table(s390dbf_sysctl_header);
+	return;
+}
+
+/*
+ * module definitions
+ */
+core_initcall(debug_init);
+module_exit(debug_exit);
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(debug_register);
+EXPORT_SYMBOL(debug_unregister); 
+EXPORT_SYMBOL(debug_set_level);
+EXPORT_SYMBOL(debug_stop_all);
+EXPORT_SYMBOL(debug_register_view);
+EXPORT_SYMBOL(debug_unregister_view);
+EXPORT_SYMBOL(debug_event_common);
+EXPORT_SYMBOL(debug_exception_common);
+EXPORT_SYMBOL(debug_hex_ascii_view);
+EXPORT_SYMBOL(debug_raw_view);
+EXPORT_SYMBOL(debug_dflt_header_fn);
+EXPORT_SYMBOL(debug_sprintf_view);
+EXPORT_SYMBOL(debug_sprintf_exception);
+EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
new file mode 100644
index 000000000000..bb0f973137f0
--- /dev/null
+++ b/arch/s390/kernel/ebcdic.c
@@ -0,0 +1,400 @@
+/*
+ *  arch/s390/kernel/ebcdic.c
+ *    ECBDIC -> ASCII, ASCII -> ECBDIC,
+ *    upper to lower case (EBCDIC) conversion tables.
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *               Martin Peschke <peschke@fh-brandenburg.de>
+ */
+
+#include <linux/module.h>
+#include <asm/types.h>
+
+/*
+ * ASCII (IBM PC 437)  -> EBCDIC 037
+ */
+__u8 _ascebc[256] =
+{
+ /*00 NUL   SOH   STX   ETX   EOT   ENQ   ACK   BEL */
+     0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ /*08  BS    HT    LF    VT    FF    CR    SO    SI */
+ /*              ->NL                               */
+     0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /*10 DLE   DC1   DC2   DC3   DC4   NAK   SYN   ETB */
+     0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
+ /*18 CAN    EM   SUB   ESC    FS    GS    RS    US */
+ /*                               ->IGS ->IRS ->IUS */
+     0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
+ /*20  SP     !     "     #     $     %     &     ' */
+     0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ /*28   (     )     *     +     ,     -    .      / */
+     0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ /*30   0     1     2     3     4     5     6     7 */
+     0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ /*38   8     9     :     ;     <     =     >     ? */
+     0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ /*40   @     A     B     C     D     E     F     G */
+     0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ /*48   H     I     J     K     L     M     N     O */
+     0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ /*50   P     Q     R     S     T     U     V     W */
+     0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ /*58   X     Y     Z     [     \     ]     ^     _ */
+     0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
+ /*60   `     a     b     c     d     e     f     g */
+     0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /*68   h     i     j     k     l     m     n     o */
+     0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ /*70   p     q     r     s     t     u     v     w */
+     0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ /*78   x     y     z     {     |     }     ~    DL */
+     0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
+ /*80*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*88*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*90*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*98*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E0        sz						*/
+     0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F8*/
+     0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
+};
+
+/*
+ * EBCDIC 037 -> ASCII (IBM PC 437)
+ */
+__u8 _ebcasc[256] =
+{
+ /* 0x00   NUL   SOH   STX   ETX  *SEL    HT  *RNL   DEL */
+          0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ /* 0x08   -GE  -SPS  -RPT    VT    FF    CR    SO    SI */
+          0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /* 0x10   DLE   DC1   DC2   DC3  -RES   -NL    BS  -POC
+                                  -ENP  ->LF             */
+          0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ /* 0x18   CAN    EM  -UBS  -CU1  -IFS  -IGS  -IRS  -ITB
+                                                    -IUS */
+          0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x20   -DS  -SOS    FS  -WUS  -BYP    LF   ETB   ESC
+                                  -INP                   */
+          0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ /* 0x28   -SA  -SFE   -SM  -CSP  -MFA   ENQ   ACK   BEL
+                       -SW                               */ 
+          0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ /* 0x30  ----  ----   SYN   -IR   -PP  -TRN  -NBS   EOT */
+          0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ /* 0x38  -SBS   -IT  -RFF  -CU3   DC4   NAK  ----   SUB */
+          0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ /* 0x40    SP   RSP           ä              ----       */
+          0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ /* 0x48                       .     <     (     +     | */
+          0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
+ /* 0x50     &                                      ---- */
+          0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ /* 0x58           ß     !     $     *     )     ;       */
+          0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
+ /* 0x60     -     /  ----     Ä  ----  ----  ----       */
+          0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ /* 0x68              ----     ,     %     _     >     ? */ 
+          0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ /* 0x70  ----        ----  ----  ----  ----  ----  ---- */
+          0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x78     *     `     :     #     @     '     =     " */
+          0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ /* 0x80     *     a     b     c     d     e     f     g */
+          0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x88     h     i              ----  ----  ----       */
+          0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ /* 0x90     °     j     k     l     m     n     o     p */
+          0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ /* 0x98     q     r                    ----        ---- */
+          0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ /* 0xA0           ~     s     t     u     v     w     x */
+          0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ /* 0xA8     y     z              ----  ----  ----  ---- */
+          0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ /* 0xB0     ^                    ----     §  ----       */
+          0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ /* 0xB8        ----     [     ]  ----  ----  ----  ---- */
+          0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
+ /* 0xC0     {     A     B     C     D     E     F     G */
+          0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0xC8     H     I  ----           ö              ---- */
+          0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ /* 0xD0     }     J     K     L     M     N     O     P */
+          0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ /* 0xD8     Q     R  ----           ü                   */
+          0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ /* 0xE0     \           S     T     U     V     W     X */
+          0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ /* 0xE8     Y     Z        ----     Ö  ----  ----  ---- */
+          0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ /* 0xF0     0     1     2     3     4     5     6     7 */
+          0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0xF8     8     9  ----  ----     Ü  ----  ----  ---- */
+          0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
+};
+
+
+/*
+ * ASCII (IBM PC 437)  -> EBCDIC 500
+ */
+__u8 _ascebc_500[256] =
+{
+ /*00 NUL   SOH   STX   ETX   EOT   ENQ   ACK   BEL */
+     0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
+ /*08  BS    HT    LF    VT    FF    CR    SO    SI */
+ /*              ->NL                               */
+     0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /*10 DLE   DC1   DC2   DC3   DC4   NAK   SYN   ETB */
+     0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
+ /*18 CAN    EM   SUB   ESC    FS    GS    RS    US */
+ /*                               ->IGS ->IRS ->IUS */
+     0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
+ /*20  SP     !     "     #     $     %     &     ' */
+     0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
+ /*28   (     )     *     +     ,     -    .      / */
+     0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
+ /*30   0     1     2     3     4     5     6     7 */
+     0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ /*38   8     9     :     ;     <     =     >     ? */
+     0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
+ /*40   @     A     B     C     D     E     F     G */
+     0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ /*48   H     I     J     K     L     M     N     O */
+     0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
+ /*50   P     Q     R     S     T     U     V     W */
+     0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
+ /*58   X     Y     Z     [     \     ]     ^     _ */
+     0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D,
+ /*60   `     a     b     c     d     e     f     g */
+     0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /*68   h     i     j     k     l     m     n     o */
+     0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
+ /*70   p     q     r     s     t     u     v     w */
+     0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
+ /*78   x     y     z     {     |     }     ~    DL */
+     0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07,
+ /*80*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*88*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*90*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*98*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*A8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*B8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*C8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*D8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E0        sz						*/
+     0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*E8*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F0*/
+     0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
+ /*F8*/
+     0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
+};
+
+/*
+ * EBCDIC 500 -> ASCII (IBM PC 437)
+ */
+__u8 _ebcasc_500[256] =
+{
+ /* 0x00   NUL   SOH   STX   ETX  *SEL    HT  *RNL   DEL */
+          0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
+ /* 0x08   -GE  -SPS  -RPT    VT    FF    CR    SO    SI */
+          0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ /* 0x10   DLE   DC1   DC2   DC3  -RES   -NL    BS  -POC
+                                  -ENP  ->LF             */
+          0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
+ /* 0x18   CAN    EM  -UBS  -CU1  -IFS  -IGS  -IRS  -ITB
+                                                    -IUS */
+          0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x20   -DS  -SOS    FS  -WUS  -BYP    LF   ETB   ESC
+                                  -INP                   */
+          0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
+ /* 0x28   -SA  -SFE   -SM  -CSP  -MFA   ENQ   ACK   BEL
+                       -SW                               */ 
+          0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
+ /* 0x30  ----  ----   SYN   -IR   -PP  -TRN  -NBS   EOT */
+          0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
+ /* 0x38  -SBS   -IT  -RFF  -CU3   DC4   NAK  ----   SUB */
+          0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
+ /* 0x40    SP   RSP           ä              ----       */
+          0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
+ /* 0x48                 [     .     <     (     +     ! */
+          0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
+ /* 0x50     &                                      ---- */
+          0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
+ /* 0x58           ß     ]     $     *     )     ;     ^ */
+          0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
+ /* 0x60     -     /  ----     Ä  ----  ----  ----       */
+          0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
+ /* 0x68              ----     ,     %     _     >     ? */ 
+          0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
+ /* 0x70  ----        ----  ----  ----  ----  ----  ---- */
+          0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ /* 0x78     *     `     :     #     @     '     =     " */
+          0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
+ /* 0x80     *     a     b     c     d     e     f     g */
+          0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x88     h     i              ----  ----  ----       */
+          0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
+ /* 0x90     °     j     k     l     m     n     o     p */
+          0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
+ /* 0x98     q     r                    ----        ---- */
+          0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
+ /* 0xA0           ~     s     t     u     v     w     x */
+          0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ /* 0xA8     y     z              ----  ----  ----  ---- */
+          0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
+ /* 0xB0                          ----     §  ----       */
+          0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
+ /* 0xB8        ----           |  ----  ----  ----  ---- */
+          0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
+ /* 0xC0     {     A     B     C     D     E     F     G */
+          0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0xC8     H     I  ----           ö              ---- */
+          0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
+ /* 0xD0     }     J     K     L     M     N     O     P */
+          0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
+ /* 0xD8     Q     R  ----           ü                   */
+          0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
+ /* 0xE0     \           S     T     U     V     W     X */
+          0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ /* 0xE8     Y     Z        ----     Ö  ----  ----  ---- */
+          0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
+ /* 0xF0     0     1     2     3     4     5     6     7 */
+          0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0xF8     8     9  ----  ----     Ü  ----  ----  ---- */
+          0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
+};
+
+
+/*
+ * EBCDIC 037/500 conversion table:
+ * from upper to lower case
+ */
+__u8 _ebc_tolower[256] =
+{
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+	0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+	0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+	0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+	0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+	0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+	0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+	0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+	0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F,
+	0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+	0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF,
+	0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+	0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+	0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+	0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+	0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+	0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+	0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+	0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
+	0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+	0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF
+};
+
+
+/*
+ * EBCDIC 037/500 conversion table:
+ * from lower to upper case
+ */
+__u8 _ebc_toupper[256] =
+{
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
+	0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+	0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
+	0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+	0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+	0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+	0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
+	0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+	0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F,
+	0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+	0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F,
+	0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+	0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+	0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
+	0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+	0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+	0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+	0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
+	0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
+	0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+	0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+	0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+	0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+};
+
+EXPORT_SYMBOL(_ascebc_500);
+EXPORT_SYMBOL(_ebcasc_500);
+EXPORT_SYMBOL(_ascebc);
+EXPORT_SYMBOL(_ebcasc);
+EXPORT_SYMBOL(_ebc_tolower);
+EXPORT_SYMBOL(_ebc_toupper);
+
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
new file mode 100644
index 000000000000..c0e09b33febe
--- /dev/null
+++ b/arch/s390/kernel/entry.S
@@ -0,0 +1,868 @@
+/*
+ *  arch/s390/kernel/entry.S
+ *    S390 low-level entry points.
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+#include <asm/cache.h>
+#include <asm/lowcore.h>
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/offsets.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/*
+ * Stack layout for the system_call stack entry.
+ * The first few entries are identical to the user_regs_struct.
+ */
+SP_PTREGS    =  STACK_FRAME_OVERHEAD
+SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 4
+SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 12
+SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 20
+SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 28
+SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 36
+SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 44
+SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 52
+SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 60
+SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+		 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+
+#define BASED(name) name-system_call(%r13)
+
+/*
+ * Register usage in interrupt handlers:
+ *    R9  - pointer to current task structure
+ *    R13 - pointer to literal pool
+ *    R14 - return register for function calls
+ *    R15 - kernel stack pointer
+ */
+
+	.macro  STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	stpt	\lc_offset
+#endif
+	.endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	lm	%r10,%r11,\lc_from
+	sl	%r10,\lc_to
+	sl	%r11,\lc_to+4
+	bc	3,BASED(0f)
+	sl	%r10,BASED(.Lc_1)
+0:	al	%r10,\lc_sum
+	al	%r11,\lc_sum+4
+	bc	12,BASED(1f)
+	al	%r10,BASED(.Lc_1)
+1:	stm	%r10,%r11,\lc_sum
+	.endm
+#endif
+
+	.macro	SAVE_ALL_BASE savearea
+	stm	%r12,%r15,\savearea
+	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
+	.endm
+
+	.macro	SAVE_ALL psworg,savearea,sync
+	la	%r12,\psworg
+	.if	\sync
+	tm	\psworg+1,0x01		# test problem state bit
+	bz	BASED(2f)		# skip stack setup save
+	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	.else
+	tm	\psworg+1,0x01		# test problem state bit
+	bnz	BASED(1f)		# from user -> load async stack
+	clc	\psworg+4(4),BASED(.Lcritical_end)
+	bhe	BASED(0f)
+	clc	\psworg+4(4),BASED(.Lcritical_start)
+	bl	BASED(0f)
+	l	%r14,BASED(.Lcleanup_critical)
+	basr	%r14,%r14
+	tm	0(%r12),0x01		# retest problem state after cleanup
+	bnz	BASED(1f)
+0:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
+	slr	%r14,%r15
+	sra	%r14,STACK_SHIFT
+	be	BASED(2f)
+1:	l	%r15,__LC_ASYNC_STACK
+	.endif
+#ifdef CONFIG_CHECK_STACK
+	b	BASED(3f)
+2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	bz	BASED(stack_overflow)
+3:
+#endif
+2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	la	%r12,\psworg
+	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
+	icm	%r12,12,__LC_SVC_ILC
+	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	st	%r12,SP_ILC(%r15)
+	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
+	la	%r12,0
+	st	%r12,__SF_BACKCHAIN(%r15)	# clear back chain
+	.endm
+
+	.macro  RESTORE_ALL sync
+	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15) # move user PSW to lowcore
+	.if !\sync
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	.endif
+	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
+	STORE_TIMER __LC_EXIT_TIMER
+	lpsw	__LC_RETURN_PSW		# back to caller
+	.endm
+
+/*
+ * Scheduler resume function, called by switch_to
+ *  gpr2 = (task_struct *) prev
+ *  gpr3 = (task_struct *) next
+ * Returns:
+ *  gpr2 = prev
+ */
+        .globl  __switch_to
+__switch_to:
+        basr    %r1,0
+__switch_to_base:
+	tm	__THREAD_per(%r3),0xe8		# new process is using per ?
+	bz	__switch_to_noper-__switch_to_base(%r1)	# if not we're fine
+        stctl   %c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
+        clc     __THREAD_per(12,%r3),__SF_EMPTY(%r15)
+        be      __switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
+        lctl    %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+__switch_to_noper:
+        stm     %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	st	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
+	l	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
+	lm	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+	st	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
+	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
+	l	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	st	%r3,__LC_THREAD_INFO
+	ahi	%r3,STACK_SIZE
+	st	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	br	%r14
+
+__critical_start:
+/*
+ * SVC interrupt handler routine. System calls are synchronous events and
+ * are executed with interrupts enabled.
+ */
+
+	.globl  system_call
+system_call:
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	lh	%r7,0x8a	  # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(sysc_do_svc)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
+sysc_do_svc:
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	sla	%r7,2             # *4 and test for svc 0
+	bnz	BASED(sysc_nr_ok) # svc number > 0
+	# svc 0: system call number in %r1
+	cl	%r1,BASED(.Lnr_syscalls)
+	bnl	BASED(sysc_nr_ok)
+	lr	%r7,%r1           # copy svc number to %r7
+	sla	%r7,2             # *4
+sysc_nr_ok:
+	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
+sysc_do_restart:
+	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        l       %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+        bnz     BASED(sysc_tracesys)
+        basr    %r14,%r8          # call sys_xxxx
+        st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
+                                  # ATTENTION: check sys_execve_glue before
+                                  # changing anything here !!
+
+sysc_return:
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	bno	BASED(sysc_leave)
+	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
+	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
+sysc_leave:
+        RESTORE_ALL 1
+
+#
+# recheck if there is more work to do
+#
+sysc_work_loop:
+	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
+	bz	BASED(sysc_leave)      # there is no work to do
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bo	BASED(sysc_reschedule)
+	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	bo	BASED(sysc_sigpending)
+	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	bo	BASED(sysc_restart)
+	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	bo	BASED(sysc_singlestep)
+	b	BASED(sysc_leave)
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+sysc_reschedule:        
+        l       %r1,BASED(.Lschedule)
+	la      %r14,BASED(sysc_work_loop)
+	br      %r1		       # call scheduler
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+sysc_sigpending:     
+	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        sr      %r3,%r3                # clear *oldset
+        l       %r1,BASED(.Ldo_signal)
+	basr	%r14,%r1               # call do_signal
+	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	bo	BASED(sysc_restart)
+	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	bo	BASED(sysc_singlestep)
+	b	BASED(sysc_leave)      # out of here, do NOT recheck
+
+#
+# _TIF_RESTART_SVC is set, set up registers and restart svc
+#
+sysc_restart:
+	ni	__TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+	l	%r7,SP_R2(%r15)        # load new svc number
+	sla	%r7,2
+	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
+	lm	%r2,%r6,SP_R2(%r15)    # load svc arguments
+	b	BASED(sysc_do_restart) # restart svc
+
+#
+# _TIF_SINGLE_STEP is set, call do_single_step
+#
+sysc_singlestep:
+	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	mvi	SP_TRAP+1(%r15),0x28	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
+	la	%r14,BASED(sysc_return)	# load adr. of system return
+	br	%r1			# branch to do_single_step
+
+__critical_end:
+
+#
+# call trace before and after sys_call
+#
+sysc_tracesys:
+        l       %r1,BASED(.Ltrace)
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,0
+	srl	%r7,2
+	st	%r7,SP_R2(%r15)
+	basr	%r14,%r1
+	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
+	bnl	BASED(sysc_tracenogo)
+	l	%r7,SP_R2(%r15)        # strace might have changed the 
+	sll	%r7,2                  #  system call
+	l	%r8,sys_call_table-system_call(%r7,%r13)
+sysc_tracego:
+	lm	%r3,%r6,SP_R3(%r15)
+	l	%r2,SP_ORIG_R2(%r15)
+	basr	%r14,%r8          # call sys_xxx
+	st	%r2,SP_R2(%r15)   # store return value
+sysc_tracenogo:
+	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        bz      BASED(sysc_return)
+	l	%r1,BASED(.Ltrace)
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,1
+	la	%r14,BASED(sysc_return)
+	br	%r1
+
+#
+# a new process exits the kernel with ret_from_fork
+#
+        .globl  ret_from_fork
+ret_from_fork:
+	l	%r13,__LC_SVC_NEW_PSW+4
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
+	bo	BASED(0f)
+	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
+0:	l       %r1,BASED(.Lschedtail)
+	basr    %r14,%r1
+        stosm   __SF_EMPTY(%r15),0x03     # reenable interrupts
+	b	BASED(sysc_return)
+
+#
+# clone, fork, vfork, exec and sigreturn need glue,
+# because they all expect pt_regs as parameter,
+# but are called with different parameter.
+# return-address is set up above
+#
+sys_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lclone)
+        br      %r1                   # branch to sys_clone
+
+sys_fork_glue:  
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lfork)
+        br      %r1                   # branch to sys_fork
+
+sys_vfork_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        l       %r1,BASED(.Lvfork)
+        br      %r1                   # branch to sys_vfork
+
+sys_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+        l       %r1,BASED(.Lexecve)
+	lr      %r12,%r14             # save return address
+        basr    %r14,%r1              # call sys_execve
+        ltr     %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       4(%r12)               # SKIP ST 2,SP_R2(15) after BASR 14,8
+                                      # in system_call/sysc_tracesys
+
+sys_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lsigreturn)
+        br      %r1                   # branch to sys_sigreturn
+
+sys_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lrt_sigreturn)
+        br      %r1                   # branch to sys_sigreturn
+
+#
+# sigsuspend and rt_sigsuspend need pt_regs as an additional
+# parameter and they have to skip the store of %r2 into the
+# user register %r2 because the return value was set in 
+# sigsuspend and rt_sigsuspend already and must not be overwritten!
+#
+
+sys_sigsuspend_glue:    
+        lr      %r5,%r4               # move mask back
+        lr      %r4,%r3               # move history1 parameter
+        lr      %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+        l       %r1,BASED(.Lsigsuspend)
+	la      %r14,4(%r14)          # skip store of return value
+        br      %r1                   # branch to sys_sigsuspend
+
+sys_rt_sigsuspend_glue: 
+        lr      %r4,%r3               # move sigsetsize parameter
+        lr      %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+        l       %r1,BASED(.Lrt_sigsuspend)
+	la      %r14,4(%r14)          # skip store of return value
+        br      %r1                   # branch to sys_rt_sigsuspend
+
+sys_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        l       %r1,BASED(.Lsigaltstack)
+        br      %r1                   # branch to sys_sigreturn
+
+
+/*
+ * Program check handler routine
+ */
+
+        .globl  pgm_check_handler
+pgm_check_handler:
+/*
+ * First we need to check for a special case:
+ * Single stepping an instruction that disables the PER event mask will
+ * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
+ * For a single stepped SVC the program check handler gets control after
+ * the SVC new PSW has been loaded. But we want to execute the SVC first and
+ * then handle the PER event. Therefore we update the SVC old PSW to point
+ * to the pgm_check_handler and branch to the SVC handler after we checked
+ * if we have to load the kernel stack register.
+ * For every other possible cause for PER event without the PER mask set
+ * we just ignore the PER event (FIXME: is there anything we have to do
+ * for LPSW?).
+ */
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+        bnz     BASED(pgm_per)           # got per exception -> special case
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        l       %r3,__LC_PGM_ILC         # load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3
+pgm_do_call:
+        l       %r7,BASED(.Ljump_table)
+        sll     %r8,2
+        l       %r7,0(%r8,%r7)		 # load address of handler routine
+        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
+	la      %r14,BASED(sysc_return)
+	br      %r7			 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
+        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
+        bnz     BASED(pgm_per_std)       # ok, normal per event from user space
+# ok its one of the special cases, now we need to find out which one
+        clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
+        be      BASED(pgm_svcper)
+# no interesting special case, ignore PER event
+        lm      %r12,%r15,__LC_SAVE_AREA
+	lpsw    0x28
+
+#
+# Normal per exception
+#
+pgm_per_std:
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime2)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	l	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	l	%r3,__LC_PGM_ILC	 # load program interruption code
+	la	%r8,0x7f
+	nr	%r8,%r3                  # clear per-event-bit and ilc
+	be	BASED(sysc_return)       # only per or per+check ?
+	b	BASED(pgm_do_call)
+
+#
+# it was a single stepped SVC that is causing all the trouble
+#
+pgm_svcper:
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(pgm_no_vtime3)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
+	lh	%r7,0x8a		# get svc number from lowcore
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	l	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	b	BASED(sysc_do_svc)
+
+/*
+ * IO interrupt handler routine
+ */
+
+        .globl io_int_handler
+io_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+16
+        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(io_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        l       %r1,BASED(.Ldo_IRQ)        # load address of do_IRQ
+        la      %r2,SP_PTREGS(%r15) # address of register-save area
+        basr    %r14,%r1          # branch to standard irq handler
+
+io_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+#ifdef CONFIG_PREEMPT
+	bno     BASED(io_preempt)      # no -> check for preemptive scheduling
+#else
+        bno     BASED(io_leave)        # no-> skip resched & signal
+#endif
+	tm	__TI_flags+3(%r9),_TIF_WORK_INT
+	bnz	BASED(io_work)         # there is work to do (signals etc.)
+io_leave:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_PREEMPT
+io_preempt:
+	icm	%r0,15,__TI_precount(%r9)
+	bnz     BASED(io_leave)
+	l	%r1,SP_R15(%r15)
+	s	%r1,BASED(.Lc_spsize)
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lr	%r15,%r1
+io_resume_loop:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bno	BASED(io_leave)
+	mvc     __TI_precount(4,%r9),BASED(.Lc_pactive)
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+        l       %r1,BASED(.Lschedule)
+	basr	%r14,%r1	       # call schedule
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	b	BASED(io_resume_loop)
+#endif
+
+#
+# switch to kernel stack, then check the TIF bits
+#
+io_work:
+	l	%r1,__LC_KERNEL_STACK
+	s	%r1,BASED(.Lc_spsize)
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lr	%r15,%r1
+#
+# One of the work bits is on. Find out which one.
+# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
+#
+io_work_loop:
+	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	bo	BASED(io_reschedule)
+	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	bo	BASED(io_sigpending)
+	b	BASED(io_leave)
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+io_reschedule:        
+        l       %r1,BASED(.Lschedule)
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+	basr    %r14,%r1	       # call scheduler
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	tm	__TI_flags+3(%r9),_TIF_WORK_INT
+	bz	BASED(io_leave)        # there is no work to do
+	b	BASED(io_work_loop)
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+io_sigpending:     
+        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        sr      %r3,%r3                # clear *oldset
+        l       %r1,BASED(.Ldo_signal)
+	basr    %r14,%r1	       # call do_signal
+        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	b	BASED(io_leave)        # out of here, do NOT recheck
+
+/*
+ * External interrupt handler routine
+ */
+
+        .globl  ext_int_handler
+ext_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+16
+        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(ext_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
+	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	la	%r2,SP_PTREGS(%r15)    # address of register-save area
+	lh	%r3,__LC_EXT_INT_CODE  # get interruption code
+	l	%r1,BASED(.Ldo_extint)
+	basr	%r14,%r1
+	b	BASED(io_return)
+
+/*
+ * Machine check handler routines
+ */
+
+        .globl mcck_int_handler
+mcck_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(mcck_no_vtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
+	l       %r1,BASED(.Ls390_mcck)
+	basr    %r14,%r1	  # call machine check handler
+mcck_return:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_SMP
+/*
+ * Restart interruption handler, kick starter for additional CPUs
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        l       %r15,__LC_SAVE_AREA+60 # load ksp
+        lctl    %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
+        lam     %a0,%a15,__LC_AREGS_SAVE_AREA
+        lm      %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
+        basr    %r14,0
+        l       %r14,restart_addr-.(%r14)
+        br      %r14                   # branch to start_secondary
+restart_addr:
+        .long   start_secondary
+#else
+/*
+ * If we do not run with SMP enabled, let the new CPU crash ...
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        basr    %r1,0
+restart_base:
+        lpsw    restart_crash-restart_base(%r1)
+        .align 8
+restart_crash:
+        .long  0x000a0000,0x00000000
+restart_go:
+#endif
+
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+	l	%r15,__LC_PANIC_STACK	# change to panic stack
+	sl	%r15,BASED(.Lc_spsize)
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	la	%r1,__LC_SAVE_AREA
+	ch	%r12,BASED(.L0x020)	# old psw addr == __LC_SVC_OLD_PSW ?
+	be	BASED(0f)
+	ch	%r12,BASED(.L0x028)	# old psw addr == __LC_PGM_OLD_PSW ?
+	be	BASED(0f)
+	la	%r1,__LC_SAVE_AREA+16
+0:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
+        xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
+        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	br	%r1
+1:	.long  kernel_stack_overflow
+#endif
+
+cleanup_table_system_call:
+	.long	system_call + 0x80000000, sysc_do_svc + 0x80000000
+cleanup_table_sysc_return:
+	.long	sysc_return + 0x80000000, sysc_leave + 0x80000000
+cleanup_table_sysc_leave:
+	.long	sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
+cleanup_table_sysc_work_loop:
+	.long	sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
+
+cleanup_critical:
+	clc	4(4,%r12),BASED(cleanup_table_system_call)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_system_call+4)
+	bl	BASED(cleanup_system_call)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_return)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_return+4)
+	bl	BASED(cleanup_sysc_return)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_leave)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_leave+4)
+	bl	BASED(cleanup_sysc_leave)
+0:
+	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop)
+	bl	BASED(0f)
+	clc	4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
+	bl	BASED(cleanup_sysc_leave)
+0:
+	br	%r14
+
+cleanup_system_call:
+	mvc	__LC_RETURN_PSW(8),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
+	bh	BASED(0f)
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0:	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
+	bhe	BASED(cleanup_vtime)
+#endif
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
+	bh	BASED(0f)
+	mvc	__LC_SAVE_AREA(16),__LC_SAVE_AREA+16
+0:	st	%r13,__LC_SAVE_AREA+20
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	st	%r15,__LC_SAVE_AREA+28
+	lh	%r7,0x8a
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
+	bhe	BASED(cleanup_stime)
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	bz	BASED(cleanup_novtime)
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
+	bh	BASED(cleanup_update)
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
+	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_system_call_insn:
+	.long	sysc_saveall + 0x80000000
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.long   system_call + 0x80000000
+	.long   sysc_vtime + 0x80000000
+	.long   sysc_stime + 0x80000000
+	.long   sysc_update + 0x80000000
+#endif
+
+cleanup_sysc_return:
+	mvc	__LC_RETURN_PSW(4),0(%r12)
+	mvc	__LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+
+cleanup_sysc_leave:
+	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn)
+	be	BASED(0f)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	clc	4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
+	be	BASED(0f)
+#endif
+	mvc	__LC_RETURN_PSW(8),SP_PSW(%r15)
+	mvc	__LC_SAVE_AREA+16(16),SP_R12(%r15)
+	lm	%r0,%r11,SP_R0(%r15)
+	l	%r15,SP_R15(%r15)
+0:	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.long	sysc_leave + 14 + 0x80000000
+#endif
+	.long	sysc_leave + 10 + 0x80000000
+
+/*
+ * Integer constants
+ */
+               .align 4
+.Lc_spsize:    .long  SP_SIZE
+.Lc_overhead:  .long  STACK_FRAME_OVERHEAD
+.Lc_pactive:   .long  PREEMPT_ACTIVE
+.Lnr_syscalls: .long  NR_syscalls
+.L0x018:       .short 0x018
+.L0x020:       .short 0x020
+.L0x028:       .short 0x028
+.L0x030:       .short 0x030
+.L0x038:       .short 0x038
+.Lc_1:         .long  1
+
+/*
+ * Symbol constants
+ */
+.Ls390_mcck:   .long  s390_do_machine_check
+.Ldo_IRQ:      .long  do_IRQ
+.Ldo_extint:   .long  do_extint
+.Ldo_signal:   .long  do_signal
+.Lhandle_per:  .long  do_single_step
+.Ljump_table:  .long  pgm_check_table
+.Lschedule:    .long  schedule
+.Lclone:       .long  sys_clone
+.Lexecve:      .long  sys_execve
+.Lfork:        .long  sys_fork
+.Lrt_sigreturn:.long  sys_rt_sigreturn
+.Lrt_sigsuspend:
+               .long  sys_rt_sigsuspend
+.Lsigreturn:   .long  sys_sigreturn
+.Lsigsuspend:  .long  sys_sigsuspend
+.Lsigaltstack: .long  sys_sigaltstack
+.Ltrace:       .long  syscall_trace
+.Lvfork:       .long  sys_vfork
+.Lschedtail:   .long  schedule_tail
+
+.Lcritical_start:
+               .long  __critical_start + 0x80000000
+.Lcritical_end:
+               .long  __critical_end + 0x80000000
+.Lcleanup_critical:
+               .long  cleanup_critical
+
+#define SYSCALL(esa,esame,emu)	.long esa
+	.globl  sys_call_table
+sys_call_table:
+#include "syscalls.S"
+#undef SYSCALL
+
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
new file mode 100644
index 000000000000..51527ab8c8f9
--- /dev/null
+++ b/arch/s390/kernel/entry64.S
@@ -0,0 +1,881 @@
+/*
+ *  arch/s390/kernel/entry.S
+ *    S390 low-level entry points.
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <linux/config.h>
+#include <asm/cache.h>
+#include <asm/lowcore.h>
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/offsets.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+/*
+ * Stack layout for the system_call stack entry.
+ * The first few entries are identical to the user_regs_struct.
+ */
+SP_PTREGS    =  STACK_FRAME_OVERHEAD
+SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 64
+SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 72
+SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 80
+SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 88
+SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 96
+SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 104
+SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 112
+SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 120
+SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+		 _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+#define BASED(name) name-system_call(%r13)
+
+	.macro  STORE_TIMER lc_offset
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	stpt	\lc_offset
+#endif
+	.endm
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	lg	%r10,\lc_from
+	slg	%r10,\lc_to
+	alg	%r10,\lc_sum
+	stg	%r10,\lc_sum
+	.endm
+#endif
+
+/*
+ * Register usage in interrupt handlers:
+ *    R9  - pointer to current task structure
+ *    R13 - pointer to literal pool
+ *    R14 - return register for function calls
+ *    R15 - kernel stack pointer
+ */
+
+        .macro  SAVE_ALL_BASE savearea
+	stmg	%r12,%r15,\savearea
+	larl	%r13,system_call
+	.endm
+
+        .macro  SAVE_ALL psworg,savearea,sync
+	la	%r12,\psworg
+	.if	\sync
+	tm	\psworg+1,0x01		# test problem state bit
+	jz	2f			# skip stack setup save
+	lg	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	.else
+	tm	\psworg+1,0x01		# test problem state bit
+	jnz	1f			# from user -> load kernel stack
+	clc	\psworg+8(8),BASED(.Lcritical_end)
+	jhe	0f
+	clc	\psworg+8(8),BASED(.Lcritical_start)
+	jl	0f
+	brasl	%r14,cleanup_critical
+	tm	0(%r12),0x01		# retest problem state after cleanup
+	jnz	1f
+0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async. stack ?
+	slgr	%r14,%r15
+	srag	%r14,%r14,STACK_SHIFT
+	jz	2f
+1:	lg	%r15,__LC_ASYNC_STACK	# load async stack
+	.endif
+#ifdef CONFIG_CHECK_STACK
+	j	3f
+2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	jz	stack_overflow
+3:
+#endif
+2:	aghi    %r15,-SP_SIZE		# make room for registers & psw
+	mvc     SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
+	la	%r12,\psworg
+	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
+	icm	%r12,12,__LC_SVC_ILC
+	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	st	%r12,SP_ILC(%r15)
+	mvc	SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
+	la	%r12,0
+	stg	%r12,__SF_BACKCHAIN(%r15)
+        .endm
+
+	.macro	RESTORE_ALL sync
+	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15) # move user PSW to lowcore
+	.if !\sync
+	ni	__LC_RETURN_PSW+1,0xfd	# clear wait state bit
+	.endif
+	lmg	%r0,%r15,SP_R0(%r15)	# load gprs 0-15 of user
+	STORE_TIMER __LC_EXIT_TIMER
+	lpswe	__LC_RETURN_PSW		# back to caller
+	.endm
+
+/*
+ * Scheduler resume function, called by switch_to
+ *  gpr2 = (task_struct *) prev
+ *  gpr3 = (task_struct *) next
+ * Returns:
+ *  gpr2 = prev
+ */
+        .globl  __switch_to
+__switch_to:
+	tm	__THREAD_per+4(%r3),0xe8 # is the new process using per ?
+	jz	__switch_to_noper		# if not we're fine
+        stctg   %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
+        clc     __THREAD_per(24,%r3),__SF_EMPTY(%r15)
+        je      __switch_to_noper            # we got away without bashing TLB's
+        lctlg   %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+__switch_to_noper:
+        stmg    %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	stg	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
+	lg	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
+        lmg     %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+	stg	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
+	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
+	lg	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	stg	%r3,__LC_THREAD_INFO
+	aghi	%r3,STACK_SIZE
+	stg	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	br	%r14
+
+__critical_start:
+/*
+ * SVC interrupt handler routine. System calls are synchronous events and
+ * are executed with interrupts enabled.
+ */
+
+	.globl  system_call
+system_call:
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+sysc_saveall:
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	llgh    %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+sysc_vtime:
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	sysc_do_svc
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+sysc_stime:
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+sysc_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+#endif
+sysc_do_svc:
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        slag    %r7,%r7,2         # *4 and test for svc 0
+	jnz	sysc_nr_ok
+	# svc 0: system call number in %r1
+	cl	%r1,BASED(.Lnr_syscalls)
+	jnl	sysc_nr_ok
+	lgfr	%r7,%r1           # clear high word in r1
+	slag    %r7,%r7,2         # svc 0: system call number in %r1
+sysc_nr_ok:
+	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
+sysc_do_restart:
+	larl    %r10,sys_call_table
+#ifdef CONFIG_S390_SUPPORT
+        tm      SP_PSW+3(%r15),0x01  # are we running in 31 bit mode ?
+        jo      sysc_noemu
+	larl    %r10,sys_call_table_emu  # use 31 bit emulation system calls
+sysc_noemu:
+#endif
+	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        lgf     %r8,0(%r7,%r10)   # load address of system call routine
+        jnz     sysc_tracesys
+        basr    %r14,%r8          # call sys_xxxx
+        stg     %r2,SP_R2(%r15)   # store return value (change R2 on stack)
+                                  # ATTENTION: check sys_execve_glue before
+                                  # changing anything here !!
+
+sysc_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+        jno     sysc_leave
+	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
+	jnz	sysc_work         # there is work to do (signals etc.)
+sysc_leave:
+        RESTORE_ALL 1
+
+#
+# recheck if there is more work to do
+#
+sysc_work_loop:
+	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
+	jz	sysc_leave        # there is no work to do
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jo	sysc_reschedule
+	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
+	jo	sysc_sigpending
+	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
+	jo	sysc_restart
+	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
+	jo	sysc_singlestep
+	j	sysc_leave
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+sysc_reschedule:        
+	larl    %r14,sysc_work_loop
+        jg      schedule            # return point is sysc_return
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+sysc_sigpending:     
+	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+        la      %r2,SP_PTREGS(%r15) # load pt_regs
+        sgr     %r3,%r3           # clear *oldset
+	brasl	%r14,do_signal    # call do_signal
+	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
+	jo	sysc_restart
+	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
+	jo	sysc_singlestep
+	j	sysc_leave        # out of here, do NOT recheck
+
+#
+# _TIF_RESTART_SVC is set, set up registers and restart svc
+#
+sysc_restart:
+	ni	__TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+	lg	%r7,SP_R2(%r15)        # load new svc number
+        slag    %r7,%r7,2              # *4
+	mvc	SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
+	lmg	%r2,%r6,SP_R2(%r15)    # load svc arguments
+	j	sysc_do_restart        # restart svc
+
+#
+# _TIF_SINGLE_STEP is set, call do_single_step
+#
+sysc_singlestep:
+	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	lhi	%r0,__LC_PGM_OLD_PSW
+	sth	%r0,SP_TRAP(%r15)	# set trap indication to pgm check
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	larl	%r14,sysc_return	# load adr. of system return
+	jg	do_single_step		# branch to do_sigtrap
+
+
+__critical_end:
+
+#
+# call syscall_trace before and after system call
+# special linkage: %r12 contains the return address for trace_svc
+#
+sysc_tracesys:
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,0
+	srl	%r7,2
+	stg     %r7,SP_R2(%r15)
+        brasl   %r14,syscall_trace
+	lghi	%r0,NR_syscalls
+	clg	%r0,SP_R2(%r15)
+	jnh	sysc_tracenogo
+	lg	%r7,SP_R2(%r15)   # strace might have changed the
+	sll     %r7,2             #  system call
+	lgf	%r8,0(%r7,%r10)
+sysc_tracego:
+	lmg     %r3,%r6,SP_R3(%r15)
+	lg      %r2,SP_ORIG_R2(%r15)
+        basr    %r14,%r8            # call sys_xxx
+        stg     %r2,SP_R2(%r15)     # store return value
+sysc_tracenogo:
+	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+        jz      sysc_return
+	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r3,1
+	larl	%r14,sysc_return    # return point is sysc_return
+	jg	syscall_trace
+
+#
+# a new process exits the kernel with ret_from_fork
+#
+        .globl  ret_from_fork
+ret_from_fork:
+	lg	%r13,__LC_SVC_NEW_PSW+8
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
+	jo	0f
+	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
+0:	brasl   %r14,schedule_tail
+        stosm   24(%r15),0x03     # reenable interrupts
+	j	sysc_return
+
+#
+# clone, fork, vfork, exec and sigreturn need glue,
+# because they all expect pt_regs as parameter,
+# but are called with different parameter.
+# return-address is set up above
+#
+sys_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_clone              # branch to sys_clone
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_clone_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys32_clone            # branch to sys32_clone
+#endif
+
+sys_fork_glue:  
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_fork               # branch to sys_fork
+
+sys_vfork_glue: 
+        la      %r2,SP_PTREGS(%r15)    # load pt_regs
+        jg      sys_vfork              # branch to sys_vfork
+
+sys_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+	lgr     %r12,%r14             # save return address
+        brasl   %r14,sys_execve       # call sys_execve
+        ltgr    %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
+                                      # system_call/sysc_tracesys
+#ifdef CONFIG_S390_SUPPORT
+sys32_execve_glue:        
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs
+	lgr     %r12,%r14             # save return address
+        brasl   %r14,sys32_execve     # call sys32_execve
+        ltgr    %r2,%r2               # check if execve failed
+        bnz     0(%r12)               # it did fail -> store result in gpr2
+        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
+                                      # system_call/sysc_tracesys
+#endif
+
+sys_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_sigreturn         # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_sigreturn       # branch to sys32_sigreturn
+#endif
+
+sys_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_rt_sigreturn      # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_rt_sigreturn_glue:     
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_rt_sigreturn    # branch to sys32_sigreturn
+#endif
+
+#
+# sigsuspend and rt_sigsuspend need pt_regs as an additional
+# parameter and they have to skip the store of %r2 into the
+# user register %r2 because the return value was set in 
+# sigsuspend and rt_sigsuspend already and must not be overwritten!
+#
+
+sys_sigsuspend_glue:    
+        lgr     %r5,%r4               # move mask back
+        lgr     %r4,%r3               # move history1 parameter
+        lgr     %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys_sigsuspend        # branch to sys_sigsuspend
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigsuspend_glue:    
+	llgfr	%r4,%r4               # unsigned long			
+        lgr     %r5,%r4               # move mask back
+	lgfr	%r3,%r3               # int			
+        lgr     %r4,%r3               # move history1 parameter
+	lgfr	%r2,%r2               # int			
+        lgr     %r3,%r2               # move history0 parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys32_sigsuspend      # branch to sys32_sigsuspend
+#endif
+
+sys_rt_sigsuspend_glue: 
+        lgr     %r4,%r3               # move sigsetsize parameter
+        lgr     %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys_rt_sigsuspend     # branch to sys_rt_sigsuspend
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_rt_sigsuspend_glue: 
+	llgfr	%r3,%r3               # size_t			
+        lgr     %r4,%r3               # move sigsetsize parameter
+	llgtr	%r2,%r2               # sigset_emu31_t *
+        lgr     %r3,%r2               # move unewset parameter
+        la      %r2,SP_PTREGS(%r15)   # load pt_regs as first parameter
+	la      %r14,6(%r14)          # skip store of return value
+        jg      sys32_rt_sigsuspend   # branch to sys32_rt_sigsuspend
+#endif
+
+sys_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys_sigaltstack       # branch to sys_sigreturn
+
+#ifdef CONFIG_S390_SUPPORT
+sys32_sigaltstack_glue:
+        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
+        jg      sys32_sigaltstack_wrapper # branch to sys_sigreturn
+#endif
+
+/*
+ * Program check handler routine
+ */
+
+        .globl  pgm_check_handler
+pgm_check_handler:
+/*
+ * First we need to check for a special case:
+ * Single stepping an instruction that disables the PER event mask will
+ * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
+ * For a single stepped SVC the program check handler gets control after
+ * the SVC new PSW has been loaded. But we want to execute the SVC first and
+ * then handle the PER event. Therefore we update the SVC old PSW to point
+ * to the pgm_check_handler and branch to the SVC handler after we checked
+ * if we have to load the kernel stack register.
+ * For every other possible cause for PER event without the PER mask set
+ * we just ignore the PER event (FIXME: is there anything we have to do
+ * for LPSW?).
+ */
+	STORE_TIMER __LC_SYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA
+        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+        jnz     pgm_per                  # got per exception -> special case
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3
+pgm_do_call:
+        sll     %r8,3
+        larl    %r1,pgm_check_table
+        lg      %r1,0(%r8,%r1)		 # load address of handler routine
+        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
+	larl	%r14,sysc_return
+        br      %r1			 # branch to interrupt-handler
+
+#
+# handle per exception
+#
+pgm_per:
+        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
+        jnz     pgm_per_std              # ok, normal per event from user space
+# ok its one of the special cases, now we need to find out which one
+        clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
+        je      pgm_svcper
+# no interesting special case, ignore PER event
+	lmg	%r12,%r15,__LC_SAVE_AREA
+	lpswe   __LC_PGM_OLD_PSW
+
+#
+# Normal per exception
+#
+pgm_per_std:
+	SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime2
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime2:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lg	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lghi	%r8,0x7f
+	ngr	%r8,%r3			 # clear per-event-bit and ilc
+	je	sysc_return
+	j	pgm_do_call
+
+#
+# it was a single stepped SVC that is causing all the trouble
+#
+pgm_svcper:
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	pgm_no_vtime3
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+pgm_no_vtime3:
+#endif
+	llgh    %r7,__LC_SVC_INT_CODE	# get svc number from lowcore
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	lg	%r1,__TI_task(%r9)
+	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
+	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
+	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	j	sysc_do_svc
+
+/*
+ * IO interrupt handler routine
+ */
+        .globl io_int_handler
+io_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	io_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+io_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+        la      %r2,SP_PTREGS(%r15)    # address of register-save area
+	brasl   %r14,do_IRQ            # call standard irq handler
+
+io_return:
+        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+#ifdef CONFIG_PREEMPT
+	jno     io_preempt             # no -> check for preemptive scheduling
+#else
+        jno     io_leave               # no-> skip resched & signal
+#endif
+	tm	__TI_flags+7(%r9),_TIF_WORK_INT
+	jnz	io_work                # there is work to do (signals etc.)
+io_leave:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_PREEMPT
+io_preempt:
+	icm	%r0,15,__TI_precount(%r9)	
+	jnz     io_leave
+	# switch to kernel stack
+	lg	%r1,SP_R15(%r15)
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lgr	%r15,%r1
+io_resume_loop:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jno	io_leave
+	larl    %r1,.Lc_pactive
+	mvc     __TI_precount(4,%r9),0(%r1)
+        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+	brasl   %r14,schedule          # call schedule
+        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
+	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	j	io_resume_loop
+#endif
+
+#
+# switch to kernel stack, then check TIF bits
+#
+io_work:
+	lg	%r1,__LC_KERNEL_STACK
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
+        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	lgr	%r15,%r1
+#
+# One of the work bits is on. Find out which one.
+# Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED
+#
+io_work_loop:
+	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
+	jo	io_reschedule
+	tm	__TI_flags+7(%r9),_TIF_SIGPENDING
+	jo	io_sigpending
+	j	io_leave
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#	
+io_reschedule:        
+	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
+	brasl   %r14,schedule		# call scheduler
+	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	tm	__TI_flags+7(%r9),_TIF_WORK_INT
+	jz	io_leave		# there is no work to do
+	j	io_work_loop
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+io_sigpending:     
+	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
+	la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	slgr    %r3,%r3			# clear *oldset
+	brasl	%r14,do_signal		# call do_signal
+	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	j	sysc_leave		# out of here, do NOT recheck
+
+/*
+ * External interrupt handler routine
+ */
+        .globl  ext_int_handler
+ext_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	stck	__LC_INT_CLOCK
+	SAVE_ALL_BASE __LC_SAVE_AREA+32
+        SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	ext_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ext_no_vtime:
+#endif
+	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	la	%r2,SP_PTREGS(%r15)    # address of register-save area
+	llgh	%r3,__LC_EXT_INT_CODE  # get interruption code
+	brasl   %r14,do_extint
+	j	io_return
+
+/*
+ * Machine check handler routines
+ */
+        .globl mcck_int_handler
+mcck_int_handler:
+	STORE_TIMER __LC_ASYNC_ENTER_TIMER
+	SAVE_ALL_BASE __LC_SAVE_AREA+64
+        SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	mcck_no_vtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+mcck_no_vtime:
+#endif
+	brasl   %r14,s390_do_machine_check
+mcck_return:
+        RESTORE_ALL 0
+
+#ifdef CONFIG_SMP
+/*
+ * Restart interruption handler, kick starter for additional CPUs
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        lg      %r15,__LC_SAVE_AREA+120 # load ksp
+        lghi    %r10,__LC_CREGS_SAVE_AREA
+        lctlg   %c0,%c15,0(%r10) # get new ctl regs
+        lghi    %r10,__LC_AREGS_SAVE_AREA
+        lam     %a0,%a15,0(%r10)
+        lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
+	jg      start_secondary
+#else
+/*
+ * If we do not run with SMP enabled, let the new CPU crash ...
+ */
+        .globl restart_int_handler
+restart_int_handler:
+        basr    %r1,0
+restart_base:
+        lpswe   restart_crash-restart_base(%r1)
+        .align 8
+restart_crash:
+        .long  0x000a0000,0x00000000,0x00000000,0x00000000
+restart_go:
+#endif
+
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+	lg	%r15,__LC_PANIC_STACK	# change to panic stack
+	aghi	%r1,-SP_SIZE
+	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
+	stmg	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
+	la	%r1,__LC_SAVE_AREA
+	chi	%r12,__LC_SVC_OLD_PSW
+	je	0f
+	chi	%r12,__LC_PGM_OLD_PSW
+	je	0f
+	la	%r1,__LC_SAVE_AREA+16
+0:	mvc	SP_R12(32,%r15),0(%r1)  # move %r12-%r15 to stack
+        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	kernel_stack_overflow
+#endif
+
+cleanup_table_system_call:
+	.quad	system_call, sysc_do_svc
+cleanup_table_sysc_return:
+	.quad	sysc_return, sysc_leave
+cleanup_table_sysc_leave:
+	.quad	sysc_leave, sysc_work_loop
+cleanup_table_sysc_work_loop:
+	.quad	sysc_work_loop, sysc_reschedule
+
+cleanup_critical:
+	clc	8(8,%r12),BASED(cleanup_table_system_call)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_system_call+8)
+	jl	cleanup_system_call
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_return)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_return+8)
+	jl	cleanup_sysc_return
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_leave)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_leave+8)
+	jl	cleanup_sysc_leave
+0:
+	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+	jl	0f
+	clc	8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
+	jl	cleanup_sysc_leave
+0:
+	br	%r14
+
+cleanup_system_call:
+	mvc	__LC_RETURN_PSW(16),0(%r12)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
+	jh	0f
+	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0:	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
+	jhe	cleanup_vtime
+#endif
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
+	jh	0f
+	mvc	__LC_SAVE_AREA(32),__LC_SAVE_AREA+32
+0:	stg	%r13,__LC_SAVE_AREA+40
+	SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1
+	stg	%r15,__LC_SAVE_AREA+56
+	llgh	%r7,__LC_SVC_INT_CODE
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+cleanup_vtime:
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
+	jhe	cleanup_stime
+	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
+	jz	cleanup_novtime
+	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
+cleanup_stime:
+	clc	__LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
+	jh	cleanup_update
+	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
+cleanup_update:
+	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+cleanup_novtime:
+#endif
+	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_system_call_insn:
+	.quad	sysc_saveall
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.quad   system_call
+	.quad   sysc_vtime
+	.quad   sysc_stime
+	.quad   sysc_update
+#endif
+
+cleanup_sysc_return:
+	mvc	__LC_RETURN_PSW(8),0(%r12)
+	mvc	__LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+	la	%r12,__LC_RETURN_PSW
+	br	%r14
+
+cleanup_sysc_leave:
+	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn)
+	je	0f
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	clc	8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
+	je	0f
+#endif
+	mvc	__LC_RETURN_PSW(16),SP_PSW(%r15)
+	mvc	__LC_SAVE_AREA+32(32),SP_R12(%r15)
+	lmg	%r0,%r11,SP_R0(%r15)
+	lg	%r15,SP_R15(%r15)
+0:	la	%r12,__LC_RETURN_PSW
+	br	%r14
+cleanup_sysc_leave_insn:
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	.quad	sysc_leave + 16
+#endif
+	.quad	sysc_leave + 12
+
+/*
+ * Integer constants
+ */
+               .align 4
+.Lconst:
+.Lc_pactive:   .long  PREEMPT_ACTIVE
+.Lnr_syscalls: .long  NR_syscalls
+.L0x0130:      .short 0x130
+.L0x0140:      .short 0x140
+.L0x0150:      .short 0x150
+.L0x0160:      .short 0x160
+.L0x0170:      .short 0x170
+.Lcritical_start:
+               .quad  __critical_start
+.Lcritical_end:
+               .quad  __critical_end
+
+#define SYSCALL(esa,esame,emu)	.long esame
+	.globl  sys_call_table
+sys_call_table:
+#include "syscalls.S"
+#undef SYSCALL
+
+#ifdef CONFIG_S390_SUPPORT
+
+#define SYSCALL(esa,esame,emu)	.long emu
+	.globl  sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
new file mode 100644
index 000000000000..b804c55bd919
--- /dev/null
+++ b/arch/s390/kernel/head.S
@@ -0,0 +1,772 @@
+/*
+ *  arch/s390/kernel/head.S
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Rob van der Heij (rvdhei@iae.nl)
+ *
+ * There are 5 different IPL methods
+ *  1) load the image directly into ram at address 0 and do an PSW restart
+ *  2) linload will load the image from address 0x10000 to memory 0x10000
+ *     and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
+ *  3) generate the tape ipl header, store the generated image on a tape
+ *     and ipl from it
+ *     In case of SL tape you need to IPL 5 times to get past VOL1 etc
+ *  4) generate the vm reader ipl header, move the generated image to the
+ *     VM reader (use option NOH!) and do a ipl from reader (VM only)
+ *  5) direct call of start by the SALIPL loader
+ *  We use the cpuid to distinguish between VM and native ipl
+ *  params for kernel are pushed to 0x10400 (see setup.h)
+
+    Changes: 
+    Okt 25 2000 <rvdheij@iae.nl>
+	added code to skip HDR and EOF to allow SL tape IPL (5 retries)
+	changed first CCW from rewind to backspace block
+
+ */
+
+#include <linux/config.h>
+#include <asm/setup.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+#ifndef CONFIG_IPL
+        .org   0
+        .long  0x00080000,0x80000000+startup   # Just a restart PSW
+#else
+#ifdef CONFIG_IPL_TAPE
+#define IPL_BS 1024
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x27000000,0x60000001           # by ipl to addresses 0-23.
+        .long  0x02000000,0x20000000+IPL_BS    # (a PSW and two CCWs).
+        .long  0x00000000,0x00000000           # external old psw
+        .long  0x00000000,0x00000000           # svc old psw
+        .long  0x00000000,0x00000000           # program check old psw
+        .long  0x00000000,0x00000000           # machine check old psw
+        .long  0x00000000,0x00000000           # io old psw
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x000a0000,0x00000058           # external new psw
+        .long  0x000a0000,0x00000060           # svc new psw
+        .long  0x000a0000,0x00000068           # program check new psw
+        .long  0x000a0000,0x00000070           # machine check new psw
+        .long  0x00080000,0x80000000+.Lioint   # io new psw
+
+        .org   0x100
+#
+# subroutine for loading from tape
+# Paramters:	
+#  R1 = device number
+#  R2 = load address
+.Lloader:	
+        st    %r14,.Lldret
+        la    %r3,.Lorbread                    # r3 = address of orb 
+	la    %r5,.Lirb                        # r5 = address of irb
+        st    %r2,.Lccwread+4                  # initialize CCW data addresses
+        lctl  %c6,%c6,.Lcr6               
+        slr   %r2,%r2
+.Lldlp:
+        la    %r6,3                            # 3 retries
+.Lssch:
+        ssch  0(%r3)                           # load chunk of IPL_BS bytes
+        bnz   .Llderr
+.Lw4end:
+        bas   %r14,.Lwait4io
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lrecov
+        slr   %r7,%r7
+        icm   %r7,3,10(%r5)                    # get residual count
+        lcr   %r7,%r7
+        la    %r7,IPL_BS(%r7)                  # IPL_BS-residual=#bytes read
+        ar    %r2,%r7                          # add to total size
+        tm    8(%r5),0x01                      # found a tape mark ?
+        bnz   .Ldone
+        l     %r0,.Lccwread+4                  # update CCW data addresses
+        ar    %r0,%r7
+        st    %r0,.Lccwread+4                
+        b     .Lldlp
+.Ldone:
+        l     %r14,.Lldret
+        br    %r14                             # r2 contains the total size
+.Lrecov:
+        bas   %r14,.Lsense                     # do the sensing
+        bct   %r6,.Lssch                       # dec. retry count & branch
+        b     .Llderr
+#
+# Sense subroutine
+#
+.Lsense:
+        st    %r14,.Lsnsret
+        la    %r7,.Lorbsense              
+        ssch  0(%r7)                           # start sense command
+        bnz   .Llderr
+        bas   %r14,.Lwait4io
+        l     %r14,.Lsnsret
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Llderr
+        br    %r14
+#
+# Wait for interrupt subroutine
+#
+.Lwait4io:
+        lpsw  .Lwaitpsw                 
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+        bne   .Lwait4io
+        tsch  0(%r5)
+        slr   %r0,%r0
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lwtexit
+        tm    8(%r5),0x04                      # got device end ?
+        bz    .Lwait4io
+.Lwtexit:
+        br    %r14
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorbread:
+	.long  0x00000000,0x0080ff00,.Lccwread
+        .align 8
+.Lorbsense:
+        .long  0x00000000,0x0080ff00,.Lccwsense
+        .align 8
+.Lccwread:
+        .long  0x02200000+IPL_BS,0x00000000
+.Lccwsense:
+        .long  0x04200001,0x00000000
+.Lwaitpsw:
+	.long  0x020a0000,0x80000000+.Lioint
+
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lldret:.long  0
+.Lsnsret: .long 0
+#endif  /* CONFIG_IPL_TAPE */
+
+#ifdef CONFIG_IPL_VM
+#define IPL_BS 0x730
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x02000018,0x60000050           # by ipl to addresses 0-23.
+        .long  0x02000068,0x60000050           # (a PSW and two CCWs).
+        .fill  80-24,1,0x40                    # bytes 24-79 are discarded !!
+        .long  0x020000f0,0x60000050           # The next 160 byte are loaded
+        .long  0x02000140,0x60000050           # to addresses 0x18-0xb7
+        .long  0x02000190,0x60000050           # They form the continuation
+        .long  0x020001e0,0x60000050           # of the CCW program started
+        .long  0x02000230,0x60000050           # by ipl and load the range
+        .long  0x02000280,0x60000050           # 0x0f0-0x730 from the image
+        .long  0x020002d0,0x60000050           # to the range 0x0f0-0x730
+        .long  0x02000320,0x60000050           # in memory. At the end of
+        .long  0x02000370,0x60000050           # the channel program the PSW
+        .long  0x020003c0,0x60000050           # at location 0 is loaded.
+        .long  0x02000410,0x60000050           # Initial processing starts
+        .long  0x02000460,0x60000050           # at 0xf0 = iplstart.
+        .long  0x020004b0,0x60000050
+        .long  0x02000500,0x60000050
+        .long  0x02000550,0x60000050
+        .long  0x020005a0,0x60000050
+        .long  0x020005f0,0x60000050
+        .long  0x02000640,0x60000050
+        .long  0x02000690,0x60000050
+        .long  0x020006e0,0x20000050
+
+        .org   0xf0
+#
+# subroutine for loading cards from the reader
+#
+.Lloader:	
+	la    %r3,.Lorb                        # r2 = address of orb into r2
+	la    %r5,.Lirb                        # r4 = address of irb
+        la    %r6,.Lccws              
+        la    %r7,20
+.Linit:
+        st    %r2,4(%r6)                       # initialize CCW data addresses
+        la    %r2,0x50(%r2)
+        la    %r6,8(%r6)
+        bct   7,.Linit
+
+        lctl  %c6,%c6,.Lcr6                    # set IO subclass mask
+	slr   %r2,%r2
+.Lldlp:
+        ssch  0(%r3)                           # load chunk of 1600 bytes
+        bnz   .Llderr
+.Lwait4irq:
+        mvc   __LC_IO_NEW_PSW(8),.Lnewpsw      # set up IO interrupt psw
+        lpsw  .Lwaitpsw              
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+	bne   .Lwait4irq
+	tsch  0(%r5)
+
+	slr   %r0,%r0
+	ic    %r0,8(%r5)                       # get device status
+	chi   %r0,8                            # channel end ?
+	be    .Lcont
+	chi   %r0,12                           # channel end + device end ?
+	be    .Lcont
+
+        l     %r0,4(%r5)
+        s     %r0,8(%r3)                       # r0/8 = number of ccws executed
+        mhi   %r0,10                           # *10 = number of bytes in ccws
+        lh    %r3,10(%r5)                      # get residual count
+        sr    %r0,%r3                          # #ccws*80-residual=#bytes read
+	ar    %r2,%r0
+	
+        br    %r14                             # r2 contains the total size
+
+.Lcont:
+	ahi   %r2,0x640                        # add 0x640 to total size
+        la    %r6,.Lccws             
+        la    %r7,20
+.Lincr:
+        l     %r0,4(%r6)                       # update CCW data addresses
+        ahi   %r0,0x640
+        st    %r0,4(%r6)
+        ahi   %r6,8
+        bct   7,.Lincr
+
+        b     .Lldlp
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorb:	.long  0x00000000,0x0080ff00,.Lccws
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+.Lloadp:.long  0,0
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lnewpsw:
+        .long  0x00080000,0x80000000+.Lioint
+.Lwaitpsw:
+        .long  0x020a0000,0x80000000+.Lioint
+
+        .align 8
+.Lccws: .rept  19
+        .long  0x02600050,0x00000000
+        .endr
+        .long  0x02200050,0x00000000
+#endif  /* CONFIG_IPL_VM */
+
+iplstart:
+        lh    %r1,0xb8                         # test if subchannel number
+        bct   %r1,.Lnoload                     #  is valid
+	l     %r1,0xb8                         # load ipl subchannel number
+        la    %r2,IPL_BS                       # load start address
+        bas   %r14,.Lloader                    # load rest of ipl image
+        l     %r12,.Lparm                      # pointer to parameter area
+        st    %r1,IPL_DEVICE-PARMAREA(%r12)    # store ipl device number
+
+#
+# load parameter file from ipl device
+#
+.Lagain1:
+ 	l     %r2,INITRD_START-PARMAREA(%r12)  # use ramdisk location as temp
+        bas   %r14,.Lloader                    # load parameter file
+        ltr   %r2,%r2                          # got anything ?
+        bz    .Lnopf
+	chi   %r2,895
+	bnh   .Lnotrunc
+	la    %r2,895
+.Lnotrunc:
+	l     %r4,INITRD_START-PARMAREA(%r12)
+	clc   0(3,%r4),.L_hdr		       # if it is HDRx
+	bz    .Lagain1			       # skip dataset header
+	clc   0(3,%r4),.L_eof		       # if it is EOFx
+	bz    .Lagain1			       # skip dateset trailer
+        la    %r5,0(%r4,%r2)
+        lr    %r3,%r2
+.Lidebc:
+        tm    0(%r5),0x80                      # high order bit set ?
+        bo    .Ldocv                           #  yes -> convert from EBCDIC
+        ahi   %r5,-1
+        bct   %r3,.Lidebc
+        b     .Lnocv
+.Ldocv:
+        l     %r3,.Lcvtab
+        tr    0(256,%r4),0(%r3)                # convert parameters to ascii
+        tr    256(256,%r4),0(%r3)
+        tr    512(256,%r4),0(%r3)
+        tr    768(122,%r4),0(%r3)
+.Lnocv: la    %r3,COMMAND_LINE-PARMAREA(%r12)  # load adr. of command line
+	mvc   0(256,%r3),0(%r4)
+	mvc   256(256,%r3),256(%r4)
+	mvc   512(256,%r3),512(%r4)
+	mvc   768(122,%r3),768(%r4)
+        slr   %r0,%r0
+        b     .Lcntlp
+.Ldelspc:
+        ic    %r0,0(%r2,%r3)
+        chi   %r0,0x20                         # is it a space ?
+        be    .Lcntlp
+        ahi   %r2,1
+        b     .Leolp
+.Lcntlp:
+        brct  %r2,.Ldelspc
+.Leolp:
+        slr   %r0,%r0
+        stc   %r0,0(%r2,%r3)                   # terminate buffer
+.Lnopf:
+
+#
+# load ramdisk from ipl device
+#	
+.Lagain2:
+ 	l     %r2,INITRD_START-PARMAREA(%r12)  # load adr. of ramdisk
+        bas   %r14,.Lloader                    # load ramdisk
+ 	st    %r2,INITRD_SIZE-PARMAREA(%r12)   # store size of ramdisk
+        ltr   %r2,%r2
+        bnz   .Lrdcont
+        st    %r2,INITRD_START-PARMAREA(%r12)  # no ramdisk found, null it
+.Lrdcont:
+	l     %r2,INITRD_START-PARMAREA(%r12)
+
+	clc   0(3,%r2),.L_hdr		       # skip HDRx and EOFx 
+	bz    .Lagain2
+	clc   0(3,%r2),.L_eof
+	bz    .Lagain2
+
+#ifdef CONFIG_IPL_VM
+#
+# reset files in VM reader
+#
+        stidp __LC_CPUID                       # store cpuid
+	tm    __LC_CPUID,0xff                  # running VM ?
+	bno   .Lnoreset
+        la    %r2,.Lreset              
+        lhi   %r3,26
+        .long 0x83230008
+.Lnoreset:
+#endif
+	
+#
+# everything loaded, go for it
+#
+.Lnoload:
+        l     %r1,.Lstartup
+        br    %r1
+
+.Lparm:	.long  PARMAREA
+.Lstartup: .long startup
+.Lcvtab:.long  _ebcasc                         # ebcdic to ascii table
+.Lreset:.byte  0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+        .byte  0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+        .byte  0xc8,0xd6,0xd3,0xc4             # "change rdr all keep nohold"
+.L_eof: .long  0xc5d6c600       /* C'EOF' */
+.L_hdr: .long  0xc8c4d900       /* C'HDR' */
+
+#endif  /* CONFIG_IPL */
+
+#
+# SALIPL loader support. Based on a patch by Rob van der Heij.
+# This entry point is called directly from the SALIPL loader and
+# doesn't need a builtin ipl record.
+#
+        .org  0x800
+	.globl start
+start:
+	stm   %r0,%r15,0x07b0		# store registers
+	basr  %r12,%r0
+.base:
+	l     %r11,.parm
+	l     %r8,.cmd			# pointer to command buffer
+
+	ltr   %r9,%r9			# do we have SALIPL parameters?
+	bp    .sk8x8
+
+	mvc   0(64,%r8),0x00b0		# copy saved registers
+	xc    64(240-64,%r8),0(%r8)	# remainder of buffer
+	tr    0(64,%r8),.lowcase	
+	b     .gotr
+.sk8x8:
+	mvc   0(240,%r8),0(%r9)		# copy iplparms into buffer
+.gotr:
+	l     %r10,.tbl			# EBCDIC to ASCII table
+	tr    0(240,%r8),0(%r10)
+	stidp __LC_CPUID		# Are we running on VM maybe
+	cli   __LC_CPUID,0xff
+	bnz   .test
+	.long 0x83300060		# diag 3,0,x'0060' - storage size
+	b     .done
+.test:
+	mvc   0x68(8),.pgmnw		# set up pgm check handler
+	l     %r2,.fourmeg
+	lr    %r3,%r2
+	bctr  %r3,%r0			# 4M-1
+.loop:  iske  %r0,%r3
+	ar    %r3,%r2
+.pgmx:
+	sr    %r3,%r2
+	la    %r3,1(%r3)
+.done:
+        l     %r1,.memsize
+	st    %r3,0(%r1)
+	slr   %r0,%r0
+	st    %r0,INITRD_SIZE-PARMAREA(%r11)
+	st    %r0,INITRD_START-PARMAREA(%r11)
+	j     startup                   # continue with startup
+.tbl:	.long _ebcasc			# translate table
+.cmd:	.long COMMAND_LINE		# address of command line buffer
+.parm:	.long PARMAREA
+.memsize: .long memory_size
+.fourmeg: .long 0x00400000      	# 4M
+.pgmnw:	.long 0x00080000,.pgmx
+.lowcase:
+	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 
+	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
+	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 
+	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
+	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 
+	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
+	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 
+	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
+	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 
+	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
+	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 
+	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
+	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 
+	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
+	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 
+	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
+
+	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 
+	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
+	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 
+	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
+	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 
+	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
+	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 
+	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
+	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg 
+	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
+	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 	# .jklmnop
+	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
+	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
+	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
+	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 
+	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
+
+#
+# startup-code at 0x10000, running in real mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+        .org  0x10000
+startup:basr  %r13,0                     # get base
+.LPG1:  lctl  %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+	la    %r12,_pstart-.LPG1(%r13)   # pointer to parameter area
+					 # move IPL device to lowcore
+        mvc   __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
+	
+#
+# clear bss memory
+#
+        l     %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
+        l     %r3,.Lbss_end-.LPG1(%r13) # end of bss
+        sr    %r3,%r2                   # length of bss
+        sr    %r4,%r4                   #
+        sr    %r5,%r5                   # set src,length and pad to zero
+        sr    %r0,%r0                   #
+        mvcle %r2,%r4,0                 # clear mem
+        jo    .-4                       # branch back, if not finish
+
+	l     %r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
+.Lservicecall:
+	stosm .Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
+
+	stctl %r0, %r0,.Lcr-.LPG1(%r13)	# get cr0
+	la    %r1,0x200			# set bit 22
+	o     %r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
+	st    %r1,.Lcr-.LPG1(%r13)
+	lctl  %r0, %r0,.Lcr-.LPG1(%r13)	# load modified cr0
+
+	mvc   __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
+	la    %r1, .Lsclph-.LPG1(%r13)
+	a     %r1,__LC_EXT_NEW_PSW+4	# set handler
+	st    %r1,__LC_EXT_NEW_PSW+4
+
+	la    %r4,_pstart-.LPG1(%r13)	# %r4 is our index for sccb stuff
+	la    %r1, .Lsccb-PARMAREA(%r4)	# our sccb
+	.insn rre,0xb2200000,%r2,%r1	# service call
+	ipm   %r1
+	srl   %r1,28			# get cc code
+	xr    %r3, %r3
+	chi   %r1,3
+	be    .Lfchunk-.LPG1(%r13)	# leave
+	chi   %r1,2
+	be    .Lservicecall-.LPG1(%r13)
+	lpsw  .Lwaitsclp-.LPG1(%r13)
+.Lsclph:
+	lh    %r1,.Lsccbr-PARMAREA(%r4)
+	chi   %r1,0x10			# 0x0010 is the sucess code
+	je    .Lprocsccb		# let's process the sccb
+	chi   %r1,0x1f0
+	bne   .Lfchunk-.LPG1(%r13)	# unhandled error code
+	c     %r2, .Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
+	bne   .Lfchunk-.LPG1(%r13)	# if no, give up
+	l     %r2, .Lrcp2-.LPG1(%r13)	# try with Read SCP
+	b     .Lservicecall-.LPG1(%r13)
+.Lprocsccb:
+	lh    %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+	chi   %r1,0x00
+	jne   .Lscnd
+	l     %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
+.Lscnd:
+	xr    %r3,%r3			# same logic
+	ic    %r3,.Lscpa1-PARMAREA(%r4)
+	chi   %r3,0x00
+	jne   .Lcompmem
+	l     %r3,.Lscpa2-PARMAREA(%r13)
+.Lcompmem:
+	mr    %r2,%r1			# mem in MB on 128-bit
+	l     %r1,.Lonemb-.LPG1(%r13)
+	mr    %r2,%r1			# mem size in bytes in %r3
+	b     .Lfchunk-.LPG1(%r13)
+
+.Lpmask:
+	.byte 0
+.align 8
+.Lpcext:.long  0x00080000,0x80000000
+.Lcr:
+	.long 0x00			# place holder for cr0
+.Lwaitsclp:
+	.long 0x020A0000
+	.long .Lsclph
+.Lrcp:
+	.int 0x00120001			# Read SCP forced code
+.Lrcp2:
+	.int 0x00020001			# Read SCP code
+.Lonemb:
+	.int 0x100000
+.Lfchunk:
+
+#
+# find memory chunks.
+#
+	lr    %r9,%r3			 # end of mem
+	mvc   __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
+	la    %r1,1                      # test in increments of 128KB
+	sll   %r1,17
+	l     %r3,.Lmchunk-.LPG1(%r13)   # get pointer to memory_chunk array
+	slr   %r4,%r4                    # set start of chunk to zero
+	slr   %r5,%r5                    # set end of chunk to zero
+	slr   %r6,%r6			 # set access code to zero
+	la    %r10, MEMORY_CHUNKS	 # number of chunks
+.Lloop:
+	tprot 0(%r5),0			 # test protection of first byte
+	ipm   %r7
+	srl   %r7,28
+	clr   %r6,%r7			 # compare cc with last access code
+	be    .Lsame-.LPG1(%r13)
+	b     .Lchkmem-.LPG1(%r13)
+.Lsame:
+	ar    %r5,%r1			 # add 128KB to end of chunk
+	bno   .Lloop-.LPG1(%r13)	 # r1 < 0x80000000 -> loop
+.Lchkmem:				 # > 2GB or tprot got a program check
+	clr   %r4,%r5			 # chunk size > 0?
+	be    .Lchkloop-.LPG1(%r13)
+	st    %r4,0(%r3)		 # store start address of chunk
+	lr    %r0,%r5
+	slr   %r0,%r4
+	st    %r0,4(%r3)		 # store size of chunk
+	st    %r6,8(%r3)		 # store type of chunk
+	la    %r3,12(%r3)
+	l     %r4,.Lmemsize-.LPG1(%r13)	 # address of variable memory_size
+	st    %r5,0(%r4)		 # store last end to memory size
+	ahi   %r10,-1			 # update chunk number
+.Lchkloop:
+	lr    %r6,%r7			 # set access code to last cc
+	# we got an exception or we're starting a new
+	# chunk , we must check if we should
+	# still try to find valid memory (if we detected
+	# the amount of available storage), and if we
+	# have chunks left
+	xr    %r0,%r0
+	clr   %r0,%r9			 # did we detect memory?
+	je    .Ldonemem			 # if not, leave
+	chi   %r10,0			 # do we have chunks left?
+	je    .Ldonemem
+	alr   %r5,%r1			 # add 128KB to end of chunk
+	lr    %r4,%r5			 # potential new chunk
+	clr    %r5,%r9			 # should we go on?
+	jl     .Lloop
+.Ldonemem:		
+        l      %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
+#
+# find out if we are running under VM
+#
+        stidp  __LC_CPUID               # store cpuid
+	tm     __LC_CPUID,0xff          # running under VM ?
+	bno    .Lnovm-.LPG1(%r13)
+        oi     3(%r12),1                # set VM flag
+.Lnovm:
+        lh     %r0,__LC_CPUID+4         # get cpu version
+        chi    %r0,0x7490               # running on a P/390 ?
+        bne    .Lnop390-.LPG1(%r13)
+        oi     3(%r12),4                # set P/390 flag
+.Lnop390:
+
+#
+# find out if we have an IEEE fpu
+#
+        mvc    __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
+	efpc   %r0,0                    # test IEEE extract fpc instruction
+        oi     3(%r12),2                # set IEEE fpu flag
+.Lchkfpu:
+
+#
+# find out if we have the CSP instruction
+#
+       mvc    __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
+       la     %r0,0
+       lr     %r1,%r0
+       la     %r2,4
+       csp    %r0,%r2                   # Test CSP instruction
+       oi     3(%r12),8                 # set CSP flag
+.Lchkcsp:
+
+#
+# find out if we have the MVPG instruction
+#
+       mvc    __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
+       sr     %r0,%r0
+       la     %r1,0
+       la     %r2,0
+       mvpg   %r1,%r2                   # Test CSP instruction
+       oi     3(%r12),16                # set MVPG flag
+.Lchkmvpg:
+
+#
+# find out if we have the IDTE instruction
+#
+	mvc	__LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
+	.long	0xb2b10000		# store facility list
+	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+	bno	.Lchkidte-.LPG1(%r13)
+	lhi	%r1,2094
+	lhi	%r2,0
+	.long	0xb98e2001
+	oi	3(%r12),0x80		# set IDTE flag
+.Lchkidte:
+
+        lpsw  .Lentry-.LPG1(13)         # jump to _stext in primary-space,
+                                        # virtual and never return ...
+        .align 8
+.Lentry:.long  0x00080000,0x80000000 + _stext
+.Lctl:  .long  0x04b50002               # cr0: various things
+        .long  0                        # cr1: primary space segment table
+        .long  .Lduct                   # cr2: dispatchable unit control table
+        .long  0                        # cr3: instruction authorization
+        .long  0                        # cr4: instruction authorization
+        .long  0xffffffff               # cr5: primary-aste origin
+        .long  0                        # cr6:  I/O interrupts
+        .long  0                        # cr7:  secondary space segment table
+        .long  0                        # cr8:  access registers translation
+        .long  0                        # cr9:  tracing off
+        .long  0                        # cr10: tracing off
+        .long  0                        # cr11: tracing off
+        .long  0                        # cr12: tracing off
+        .long  0                        # cr13: home space segment table
+        .long  0xc0000000               # cr14: machine check handling off
+        .long  0                        # cr15: linkage stack operations
+.Lpcmem:.long  0x00080000,0x80000000 + .Lchkmem
+.Lpcfpu:.long  0x00080000,0x80000000 + .Lchkfpu
+.Lpccsp:.long  0x00080000,0x80000000 + .Lchkcsp
+.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
+.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
+.Lmemsize:.long memory_size
+.Lmchunk:.long memory_chunk
+.Lmflags:.long machine_flags
+.Lbss_bgn:  .long  __bss_start
+.Lbss_end:  .long  _end
+
+	.org PARMAREA-64
+.Lduct:	.long 0,0,0,0,0,0,0,0
+	.long 0,0,0,0,0,0,0,0
+
+#
+# params at 10400 (setup.h)
+#
+	.org   PARMAREA
+	.global _pstart
+_pstart:	
+        .long  0,0                      # IPL_DEVICE
+        .long  0,RAMDISK_ORIGIN         # INITRD_START
+        .long  0,RAMDISK_SIZE           # INITRD_SIZE
+
+        .org   COMMAND_LINE
+    	.byte  "root=/dev/ram0 ro"
+        .byte  0
+	.org   0x11000
+.Lsccb:
+	.hword 0x1000			# length, one page
+	.byte 0x00,0x00,0x00
+	.byte 0x80			# variable response bit set
+.Lsccbr:
+	.hword 0x00			# response code
+.Lscpincr1:
+	.hword 0x00
+.Lscpa1:
+	.byte 0x00
+	.fill 89,1,0
+.Lscpa2:
+	.int 0x00
+.Lscpincr2:
+	.quad 0x00
+	.fill 3984,1,0
+	.org 0x12000
+	.global _pend
+_pend:	
+
+#ifdef CONFIG_SHARED_KERNEL
+	.org   0x100000
+#endif
+
+#
+# startup-code, running in virtual mode
+#
+        .globl _stext
+_stext:	basr  %r13,0                    # get base
+.LPG2:
+#
+# Setup stack
+#
+        l     %r15,.Linittu-.LPG2(%r13)
+	mvc   __LC_CURRENT(4),__TI_task(%r15)
+        ahi   %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+        st    %r15,__LC_KERNEL_STACK    # set end of kernel stack
+        ahi   %r15,-96
+        xc    __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+
+# check control registers
+        stctl  %c0,%c15,0(%r15)
+	oi     2(%r15),0x20             # enable sigp external interrupts
+	oi     0(%r15),0x10             # switch on low address protection
+        lctl   %c0,%c15,0(%r15)
+
+#
+        lam    0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
+        l      %r14,.Lstart-.LPG2(%r13)
+        basr   %r14,%r14                # call start_kernel
+#
+# We returned from start_kernel ?!? PANIK
+#
+        basr  %r13,0
+	lpsw  .Ldw-.(%r13)           # load disabled wait psw
+#
+            .align 8
+.Ldw:	    .long  0x000a0000,0x00000000
+.Linittu:   .long  init_thread_union
+.Lstart:    .long  start_kernel
+.Laregs:    .long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
new file mode 100644
index 000000000000..8366793bc371
--- /dev/null
+++ b/arch/s390/kernel/head64.S
@@ -0,0 +1,769 @@
+/*
+ *  arch/s390/kernel/head.S
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Rob van der Heij (rvdhei@iae.nl)
+ *
+ * There are 5 different IPL methods
+ *  1) load the image directly into ram at address 0 and do an PSW restart
+ *  2) linload will load the image from address 0x10000 to memory 0x10000
+ *     and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
+ *  3) generate the tape ipl header, store the generated image on a tape
+ *     and ipl from it
+ *     In case of SL tape you need to IPL 5 times to get past VOL1 etc
+ *  4) generate the vm reader ipl header, move the generated image to the
+ *     VM reader (use option NOH!) and do a ipl from reader (VM only)
+ *  5) direct call of start by the SALIPL loader
+ *  We use the cpuid to distinguish between VM and native ipl
+ *  params for kernel are pushed to 0x10400 (see setup.h)
+
+    Changes: 
+    Okt 25 2000 <rvdheij@iae.nl>
+	added code to skip HDR and EOF to allow SL tape IPL (5 retries)
+	changed first CCW from rewind to backspace block
+
+ */
+
+#include <linux/config.h>
+#include <asm/setup.h>
+#include <asm/lowcore.h>
+#include <asm/offsets.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+#ifndef CONFIG_IPL
+        .org   0
+        .long  0x00080000,0x80000000+startup   # Just a restart PSW
+#else
+#ifdef CONFIG_IPL_TAPE
+#define IPL_BS 1024
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x27000000,0x60000001           # by ipl to addresses 0-23.
+        .long  0x02000000,0x20000000+IPL_BS    # (a PSW and two CCWs).
+        .long  0x00000000,0x00000000           # external old psw
+        .long  0x00000000,0x00000000           # svc old psw
+        .long  0x00000000,0x00000000           # program check old psw
+        .long  0x00000000,0x00000000           # machine check old psw
+        .long  0x00000000,0x00000000           # io old psw
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x00000000,0x00000000
+        .long  0x000a0000,0x00000058           # external new psw
+        .long  0x000a0000,0x00000060           # svc new psw
+        .long  0x000a0000,0x00000068           # program check new psw
+        .long  0x000a0000,0x00000070           # machine check new psw
+        .long  0x00080000,0x80000000+.Lioint   # io new psw
+
+        .org   0x100
+#
+# subroutine for loading from tape
+# Paramters:	
+#  R1 = device number
+#  R2 = load address
+.Lloader:	
+        st    %r14,.Lldret
+        la    %r3,.Lorbread                    # r3 = address of orb 
+	la    %r5,.Lirb                        # r5 = address of irb
+        st    %r2,.Lccwread+4                  # initialize CCW data addresses
+        lctl  %c6,%c6,.Lcr6               
+        slr   %r2,%r2
+.Lldlp:
+        la    %r6,3                            # 3 retries
+.Lssch:
+        ssch  0(%r3)                           # load chunk of IPL_BS bytes
+        bnz   .Llderr
+.Lw4end:
+        bas   %r14,.Lwait4io
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lrecov
+        slr   %r7,%r7
+        icm   %r7,3,10(%r5)                    # get residual count
+        lcr   %r7,%r7
+        la    %r7,IPL_BS(%r7)                  # IPL_BS-residual=#bytes read
+        ar    %r2,%r7                          # add to total size
+        tm    8(%r5),0x01                      # found a tape mark ?
+        bnz   .Ldone
+        l     %r0,.Lccwread+4                  # update CCW data addresses
+        ar    %r0,%r7
+        st    %r0,.Lccwread+4                
+        b     .Lldlp
+.Ldone:
+        l     %r14,.Lldret
+        br    %r14                             # r2 contains the total size
+.Lrecov:
+        bas   %r14,.Lsense                     # do the sensing
+        bct   %r6,.Lssch                       # dec. retry count & branch
+        b     .Llderr
+#
+# Sense subroutine
+#
+.Lsense:
+        st    %r14,.Lsnsret
+        la    %r7,.Lorbsense              
+        ssch  0(%r7)                           # start sense command
+        bnz   .Llderr
+        bas   %r14,.Lwait4io
+        l     %r14,.Lsnsret
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Llderr
+        br    %r14
+#
+# Wait for interrupt subroutine
+#
+.Lwait4io:
+        lpsw  .Lwaitpsw                 
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+        bne   .Lwait4io
+        tsch  0(%r5)
+        slr   %r0,%r0
+        tm    8(%r5),0x82                      # do we have a problem ?
+        bnz   .Lwtexit
+        tm    8(%r5),0x04                      # got device end ?
+        bz    .Lwait4io
+.Lwtexit:
+        br    %r14
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorbread:
+	.long  0x00000000,0x0080ff00,.Lccwread
+        .align 8
+.Lorbsense:
+        .long  0x00000000,0x0080ff00,.Lccwsense
+        .align 8
+.Lccwread:
+        .long  0x02200000+IPL_BS,0x00000000
+.Lccwsense:
+        .long  0x04200001,0x00000000
+.Lwaitpsw:
+	.long  0x020a0000,0x80000000+.Lioint
+
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lldret:.long  0
+.Lsnsret: .long 0
+#endif  /* CONFIG_IPL_TAPE */
+
+#ifdef CONFIG_IPL_VM
+#define IPL_BS 0x730
+        .org   0
+        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
+        .long  0x02000018,0x60000050           # by ipl to addresses 0-23.
+        .long  0x02000068,0x60000050           # (a PSW and two CCWs).
+        .fill  80-24,1,0x40                    # bytes 24-79 are discarded !!
+        .long  0x020000f0,0x60000050           # The next 160 byte are loaded
+        .long  0x02000140,0x60000050           # to addresses 0x18-0xb7
+        .long  0x02000190,0x60000050           # They form the continuation
+        .long  0x020001e0,0x60000050           # of the CCW program started
+        .long  0x02000230,0x60000050           # by ipl and load the range
+        .long  0x02000280,0x60000050           # 0x0f0-0x730 from the image
+        .long  0x020002d0,0x60000050           # to the range 0x0f0-0x730
+        .long  0x02000320,0x60000050           # in memory. At the end of
+        .long  0x02000370,0x60000050           # the channel program the PSW
+        .long  0x020003c0,0x60000050           # at location 0 is loaded.
+        .long  0x02000410,0x60000050           # Initial processing starts
+        .long  0x02000460,0x60000050           # at 0xf0 = iplstart.
+        .long  0x020004b0,0x60000050
+        .long  0x02000500,0x60000050
+        .long  0x02000550,0x60000050
+        .long  0x020005a0,0x60000050
+        .long  0x020005f0,0x60000050
+        .long  0x02000640,0x60000050
+        .long  0x02000690,0x60000050
+        .long  0x020006e0,0x20000050
+
+        .org   0xf0
+#
+# subroutine for loading cards from the reader
+#
+.Lloader:	
+	la    %r3,.Lorb                        # r2 = address of orb into r2
+	la    %r5,.Lirb                        # r4 = address of irb
+        la    %r6,.Lccws              
+        la    %r7,20
+.Linit:
+        st    %r2,4(%r6)                       # initialize CCW data addresses
+        la    %r2,0x50(%r2)
+        la    %r6,8(%r6)
+        bct   7,.Linit
+
+        lctl  %c6,%c6,.Lcr6                    # set IO subclass mask
+	slr   %r2,%r2
+.Lldlp:
+        ssch  0(%r3)                           # load chunk of 1600 bytes
+        bnz   .Llderr
+.Lwait4irq:
+        mvc   0x78(8),.Lnewpsw                 # set up IO interrupt psw
+        lpsw  .Lwaitpsw              
+.Lioint:
+        c     %r1,0xb8                         # compare subchannel number
+	bne   .Lwait4irq
+	tsch  0(%r5)
+
+	slr   %r0,%r0
+	ic    %r0,8(%r5)                       # get device status
+	chi   %r0,8                            # channel end ?
+	be    .Lcont
+	chi   %r0,12                           # channel end + device end ?
+	be    .Lcont
+
+        l     %r0,4(%r5)
+        s     %r0,8(%r3)                       # r0/8 = number of ccws executed
+        mhi   %r0,10                           # *10 = number of bytes in ccws
+        lh    %r3,10(%r5)                      # get residual count
+        sr    %r0,%r3                          # #ccws*80-residual=#bytes read
+	ar    %r2,%r0
+	
+        br    %r14                             # r2 contains the total size
+
+.Lcont:
+	ahi   %r2,0x640                        # add 0x640 to total size
+        la    %r6,.Lccws             
+        la    %r7,20
+.Lincr:
+        l     %r0,4(%r6)                       # update CCW data addresses
+        ahi   %r0,0x640
+        st    %r0,4(%r6)
+        ahi   %r6,8
+        bct   7,.Lincr
+
+        b     .Lldlp
+.Llderr:
+        lpsw  .Lcrash              
+
+        .align 8
+.Lorb:	.long  0x00000000,0x0080ff00,.Lccws
+.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:  .long  0xff000000
+.Lloadp:.long  0,0
+        .align 8
+.Lcrash:.long  0x000a0000,0x00000000
+.Lnewpsw:
+        .long  0x00080000,0x80000000+.Lioint
+.Lwaitpsw:
+        .long  0x020a0000,0x80000000+.Lioint
+
+        .align 8
+.Lccws: .rept  19
+        .long  0x02600050,0x00000000
+        .endr
+        .long  0x02200050,0x00000000
+#endif  /* CONFIG_IPL_VM */
+
+iplstart:
+        lh    %r1,0xb8                         # test if subchannel number
+        bct   %r1,.Lnoload                     #  is valid
+	l     %r1,0xb8                         # load ipl subchannel number
+        la    %r2,IPL_BS                       # load start address
+        bas   %r14,.Lloader                    # load rest of ipl image
+        larl  %r12,_pstart                     # pointer to parameter area
+        st    %r1,IPL_DEVICE+4-PARMAREA(%r12)  # store ipl device number
+
+#
+# load parameter file from ipl device
+#
+.Lagain1:
+ 	l     %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp
+        bas   %r14,.Lloader                    # load parameter file
+        ltr   %r2,%r2                          # got anything ?
+        bz    .Lnopf
+	chi   %r2,895
+	bnh   .Lnotrunc
+	la    %r2,895
+.Lnotrunc:
+	l     %r4,INITRD_START+4-PARMAREA(%r12)
+ 	clc   0(3,%r4),.L_hdr		       # if it is HDRx
+ 	bz    .Lagain1			       # skip dataset header
+ 	clc   0(3,%r4),.L_eof		       # if it is EOFx
+ 	bz    .Lagain1			       # skip dateset trailer
+        la    %r5,0(%r4,%r2)
+        lr    %r3,%r2
+.Lidebc:
+        tm    0(%r5),0x80                      # high order bit set ?
+        bo    .Ldocv                           #  yes -> convert from EBCDIC
+        ahi   %r5,-1
+        bct   %r3,.Lidebc
+        b     .Lnocv
+.Ldocv:
+        l     %r3,.Lcvtab
+        tr    0(256,%r4),0(%r3)                # convert parameters to ascii
+        tr    256(256,%r4),0(%r3)
+        tr    512(256,%r4),0(%r3)
+        tr    768(122,%r4),0(%r3)
+.Lnocv: la    %r3,COMMAND_LINE-PARMAREA(%r12)  # load adr. of command line
+	mvc   0(256,%r3),0(%r4)
+	mvc   256(256,%r3),256(%r4)
+	mvc   512(256,%r3),512(%r4)
+	mvc   768(122,%r3),768(%r4)
+        slr   %r0,%r0
+        b     .Lcntlp
+.Ldelspc:
+        ic    %r0,0(%r2,%r3)
+        chi   %r0,0x20                         # is it a space ?
+        be    .Lcntlp
+        ahi   %r2,1
+        b     .Leolp
+.Lcntlp:
+        brct  %r2,.Ldelspc
+.Leolp:
+        slr   %r0,%r0
+        stc   %r0,0(%r2,%r3)                   # terminate buffer
+.Lnopf:
+
+#
+# load ramdisk from ipl device
+#
+.Lagain2:
+ 	l     %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk
+        bas   %r14,.Lloader                    # load ramdisk
+ 	st    %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk
+        ltr   %r2,%r2
+        bnz   .Lrdcont
+        st    %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it
+.Lrdcont:
+	l     %r2,INITRD_START+4-PARMAREA(%r12)
+	clc   0(3,%r2),.L_hdr		       # skip HDRx and EOFx 
+	bz    .Lagain2
+	clc   0(3,%r2),.L_eof
+	bz    .Lagain2
+
+#ifdef CONFIG_IPL_VM
+#
+# reset files in VM reader
+#
+        stidp __LC_CPUID                       # store cpuid
+	tm    __LC_CPUID,0xff                  # running VM ?
+	bno   .Lnoreset
+        la    %r2,.Lreset              
+        lhi   %r3,26
+        .long 0x83230008
+.Lnoreset:
+#endif
+	
+#
+# everything loaded, go for it
+#
+.Lnoload:
+        l     %r1,.Lstartup
+        br    %r1
+
+.Lstartup: .long startup
+.Lcvtab:.long  _ebcasc                         # ebcdic to ascii table
+.Lreset:.byte  0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+        .byte  0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+        .byte  0xc8,0xd6,0xd3,0xc4             # "change rdr all keep nohold"
+.L_eof: .long  0xc5d6c600       /* C'EOF' */
+.L_hdr: .long  0xc8c4d900       /* C'HDR' */
+#endif  /* CONFIG_IPL */
+
+#
+# SALIPL loader support. Based on a patch by Rob van der Heij.
+# This entry point is called directly from the SALIPL loader and
+# doesn't need a builtin ipl record.
+#
+        .org  0x800
+	.globl start
+start:
+	stm   %r0,%r15,0x07b0		# store registers
+	basr  %r12,%r0
+.base:
+	l     %r11,.parm
+	l     %r8,.cmd			# pointer to command buffer
+
+	ltr   %r9,%r9			# do we have SALIPL parameters?
+	bp    .sk8x8
+
+	mvc   0(64,%r8),0x00b0		# copy saved registers
+	xc    64(240-64,%r8),0(%r8)	# remainder of buffer
+	tr    0(64,%r8),.lowcase	
+	b     .gotr
+.sk8x8:
+	mvc   0(240,%r8),0(%r9)		# copy iplparms into buffer
+.gotr:
+	l     %r10,.tbl			# EBCDIC to ASCII table
+	tr    0(240,%r8),0(%r10)
+	stidp __LC_CPUID		# Are we running on VM maybe
+	cli   __LC_CPUID,0xff
+	bnz   .test
+	.long 0x83300060		# diag 3,0,x'0060' - storage size
+	b     .done
+.test:
+	mvc   0x68(8),.pgmnw		# set up pgm check handler
+	l     %r2,.fourmeg
+	lr    %r3,%r2
+	bctr  %r3,%r0			# 4M-1
+.loop:  iske  %r0,%r3
+	ar    %r3,%r2
+.pgmx:
+	sr    %r3,%r2
+	la    %r3,1(%r3)
+.done:
+	l     %r1,.memsize
+	st    %r3,4(%r1)
+	slr   %r0,%r0
+	st    %r0,INITRD_SIZE+4-PARMAREA(%r11)
+	st    %r0,INITRD_START+4-PARMAREA(%r11)
+	j     startup                   # continue with startup
+.tbl:	.long _ebcasc			# translate table
+.cmd:	.long COMMAND_LINE		# address of command line buffer
+.parm:	.long PARMAREA
+.fourmeg: .long 0x00400000      	# 4M
+.pgmnw:	.long 0x00080000,.pgmx
+.memsize: .long memory_size
+.lowcase:
+	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 
+	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
+	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 
+	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
+	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 
+	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
+	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 
+	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
+	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 
+	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
+	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 
+	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
+	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 
+	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
+	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 
+	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
+
+	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 
+	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
+	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 
+	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
+	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 
+	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
+	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 
+	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
+	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg 
+	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
+	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 	# .jklmnop
+	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
+	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
+	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
+	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 
+	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
+
+#
+# startup-code at 0x10000, running in real mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+        .org  0x10000
+startup:basr  %r13,0                     # get base
+.LPG1:  sll   %r13,1                     # remove high order bit
+        srl   %r13,1
+        lhi   %r1,1                      # mode 1 = esame
+        slr   %r0,%r0                    # set cpuid to zero
+        sigp  %r1,%r0,0x12               # switch to esame mode
+	sam64				 # switch to 64 bit mode
+	lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+	larl  %r12,_pstart               # pointer to parameter area
+					 # move IPL device to lowcore
+        mvc   __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+
+#
+# clear bss memory
+#
+	larl  %r2,__bss_start           # start of bss segment
+        larl  %r3,_end                  # end of bss segment
+        sgr   %r3,%r2                   # length of bss
+        sgr   %r4,%r4                   #
+        sgr   %r5,%r5                   # set src,length and pad to zero
+        mvcle %r2,%r4,0                 # clear mem
+        jo    .-4                       # branch back, if not finish
+
+	l     %r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
+.Lservicecall:
+	stosm .Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
+
+	stctg %r0,%r0,.Lcr-.LPG1(%r13)	# get cr0
+	la    %r1,0x200			# set bit 22
+	og    %r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
+	stg   %r1,.Lcr-.LPG1(%r13)
+	lctlg %r0,%r0,.Lcr-.LPG1(%r13)	# load modified cr0
+
+	mvc   __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
+	larl  %r1,.Lsclph
+	stg   %r1,__LC_EXT_NEW_PSW+8	# set handler
+
+	larl  %r4,_pstart		# %r4 is our index for sccb stuff
+	la    %r1,.Lsccb-PARMAREA(%r4)	# our sccb
+	.insn rre,0xb2200000,%r2,%r1	# service call
+	ipm   %r1
+	srl   %r1,28			# get cc code
+	xr    %r3,%r3
+	chi   %r1,3
+	be    .Lfchunk-.LPG1(%r13)	# leave
+	chi   %r1,2
+	be    .Lservicecall-.LPG1(%r13)
+	lpsw  .Lwaitsclp-.LPG1(%r13)
+.Lsclph:
+	lh    %r1,.Lsccbr-PARMAREA(%r4)
+	chi   %r1,0x10			# 0x0010 is the sucess code
+	je    .Lprocsccb		# let's process the sccb
+	chi   %r1,0x1f0
+	bne   .Lfchunk-.LPG1(%r13)	# unhandled error code
+	c     %r2,.Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
+	bne   .Lfchunk-.LPG1(%r13)	# if no, give up
+	l     %r2,.Lrcp2-.LPG1(%r13)	# try with Read SCP
+	b     .Lservicecall-.LPG1(%r13)
+.Lprocsccb:
+	lh    %r1,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+	chi   %r1,0x00
+	jne   .Lscnd
+	lg    %r1,.Lscpincr2-PARMAREA(%r4) # otherwise use this one
+.Lscnd:
+	xr    %r3,%r3			# same logic
+	ic    %r3,.Lscpa1-PARMAREA(%r4)
+	chi   %r3,0x00
+	jne   .Lcompmem
+	l     %r3,.Lscpa2-PARMAREA(%r13)
+.Lcompmem:
+	mlgr  %r2,%r1			# mem in MB on 128-bit
+	l     %r1,.Lonemb-.LPG1(%r13)
+	mlgr  %r2,%r1			# mem size in bytes in %r3
+	b     .Lfchunk-.LPG1(%r13)
+
+.Lpmask:
+	.byte 0
+	.align 8
+.Lcr:
+	.quad 0x00  # place holder for cr0
+.Lwaitsclp:
+	.long 0x020A0000
+	.quad .Lsclph
+.Lrcp:
+	.int 0x00120001 # Read SCP forced code
+.Lrcp2:
+	.int 0x00020001 # Read SCP code
+.Lonemb:
+	.int 0x100000
+
+.Lfchunk:
+					 # set program check new psw mask
+	mvc   __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+
+#
+# find memory chunks.
+#
+	lgr   %r9,%r3			 # end of mem
+	larl  %r1,.Lchkmem               # set program check address
+	stg   %r1,__LC_PGM_NEW_PSW+8
+	la    %r1,1                      # test in increments of 128KB
+	sllg  %r1,%r1,17
+	larl  %r3,memory_chunk
+	slgr  %r4,%r4                    # set start of chunk to zero
+	slgr  %r5,%r5                    # set end of chunk to zero
+	slr  %r6,%r6			 # set access code to zero
+	la    %r10,MEMORY_CHUNKS	 # number of chunks
+.Lloop:
+	tprot 0(%r5),0			 # test protection of first byte
+	ipm   %r7
+	srl   %r7,28
+	clr   %r6,%r7			 # compare cc with last access code
+	je    .Lsame
+	j     .Lchkmem
+.Lsame:
+	algr  %r5,%r1			 # add 128KB to end of chunk
+					 # no need to check here,
+	brc   12,.Lloop			 # this is the same chunk
+.Lchkmem:				 # > 16EB or tprot got a program check
+	clgr  %r4,%r5			 # chunk size > 0?
+	je    .Lchkloop
+	stg   %r4,0(%r3)		 # store start address of chunk
+	lgr   %r0,%r5
+	slgr  %r0,%r4
+	stg   %r0,8(%r3)		 # store size of chunk
+	st    %r6,20(%r3)		 # store type of chunk
+	la    %r3,24(%r3)
+	larl  %r8,memory_size
+	stg   %r5,0(%r8)                 # store memory size
+	ahi   %r10,-1			 # update chunk number
+.Lchkloop:
+	lr    %r6,%r7			 # set access code to last cc
+	# we got an exception or we're starting a new
+	# chunk , we must check if we should
+	# still try to find valid memory (if we detected
+	# the amount of available storage), and if we
+	# have chunks left
+	lghi  %r4,1
+	sllg  %r4,%r4,31
+	clgr  %r5,%r4
+	je    .Lhsaskip
+	xr    %r0, %r0
+	clgr  %r0, %r9			 # did we detect memory?
+	je    .Ldonemem			 # if not, leave
+	chi   %r10, 0			 # do we have chunks left?
+	je    .Ldonemem
+.Lhsaskip:
+	algr  %r5,%r1			 # add 128KB to end of chunk
+	lgr   %r4,%r5			 # potential new chunk
+	clgr  %r5,%r9			 # should we go on?
+	jl    .Lloop
+.Ldonemem:		
+
+	larl  %r12,machine_flags
+#
+# find out if we are running under VM
+#
+        stidp  __LC_CPUID               # store cpuid
+	tm     __LC_CPUID,0xff          # running under VM ?
+	bno    0f-.LPG1(%r13)
+        oi     7(%r12),1                # set VM flag
+0:      lh     %r0,__LC_CPUID+4         # get cpu version
+        chi    %r0,0x7490               # running on a P/390 ?
+        bne    1f-.LPG1(%r13)
+        oi     7(%r12),4                # set P/390 flag
+1:
+
+#
+# find out if we have the MVPG instruction
+#
+	la     %r1,0f-.LPG1(%r13)       # set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	sgr    %r0,%r0
+	lghi   %r1,0
+	lghi   %r2,0
+	mvpg   %r1,%r2                  # test MVPG instruction
+	oi     7(%r12),16               # set MVPG flag
+0:
+
+#
+# find out if the diag 0x44 works in 64 bit mode
+#
+	la     %r1,0f-.LPG1(%r13)	# set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	mvc    __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13)
+	diag   0,0,0x44			# test diag 0x44
+	oi     7(%r12),32		# set diag44 flag
+	mvc    __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13)
+0:	
+
+#
+# find out if we have the IDTE instruction
+#
+	la     %r1,0f-.LPG1(%r13)	# set program check address
+	stg    %r1,__LC_PGM_NEW_PSW+8
+	.long	0xb2b10000		# store facility list
+	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+	bno	0f-.LPG1(%r13)
+	lhi	%r1,2094
+	lhi	%r2,0
+	.long	0xb98e2001
+	oi	7(%r12),0x80		# set IDTE flag
+0:
+
+        lpswe .Lentry-.LPG1(13)         # jump to _stext in primary-space,
+                                        # virtual and never return ...
+        .align 16
+.Lentry:.quad  0x0000000180000000,_stext
+.Lctl:  .quad  0x04b50002               # cr0: various things
+        .quad  0                        # cr1: primary space segment table
+        .quad  .Lduct                   # cr2: dispatchable unit control table
+        .quad  0                        # cr3: instruction authorization
+        .quad  0                        # cr4: instruction authorization
+        .quad  0xffffffffffffffff       # cr5: primary-aste origin
+        .quad  0                        # cr6:  I/O interrupts
+        .quad  0                        # cr7:  secondary space segment table
+        .quad  0                        # cr8:  access registers translation
+        .quad  0                        # cr9:  tracing off
+        .quad  0                        # cr10: tracing off
+        .quad  0                        # cr11: tracing off
+        .quad  0                        # cr12: tracing off
+        .quad  0                        # cr13: home space segment table
+        .quad  0xc0000000               # cr14: machine check handling off
+        .quad  0                        # cr15: linkage stack operations
+.Lpcmsk:.quad  0x0000000180000000
+.L4malign:.quad 0xffffffffffc00000
+.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
+.Lnop:	.long  0x07000700
+.Ldiag44:.long 0x83000044
+
+	.org PARMAREA-64
+.Lduct:	.long 0,0,0,0,0,0,0,0
+	.long 0,0,0,0,0,0,0,0
+
+#
+# params at 10400 (setup.h)
+#
+	.org   PARMAREA
+	.global _pstart
+_pstart:
+	.quad  0                        # IPL_DEVICE
+        .quad  RAMDISK_ORIGIN           # INITRD_START
+        .quad  RAMDISK_SIZE             # INITRD_SIZE
+
+        .org   COMMAND_LINE
+    	.byte  "root=/dev/ram0 ro"
+        .byte  0
+	.org   0x11000
+.Lsccb:
+	.hword 0x1000			# length, one page
+	.byte 0x00,0x00,0x00
+	.byte 0x80			# variable response bit set
+.Lsccbr:
+	.hword 0x00			# response code
+.Lscpincr1:
+	.hword 0x00
+.Lscpa1:
+	.byte 0x00
+	.fill 89,1,0
+.Lscpa2:
+	.int 0x00
+.Lscpincr2:
+	.quad 0x00
+	.fill 3984,1,0
+	.org 0x12000
+	.global _pend
+_pend:	
+
+#ifdef CONFIG_SHARED_KERNEL
+	.org   0x100000
+#endif
+	
+#
+# startup-code, running in virtual mode
+#
+        .globl _stext
+_stext:	basr  %r13,0                    # get base
+.LPG2:
+#
+# Setup stack
+#
+	larl  %r15,init_thread_union
+	lg    %r14,__TI_task(%r15)      # cache current in lowcore
+	stg   %r14,__LC_CURRENT
+        aghi  %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+        stg   %r15,__LC_KERNEL_STACK    # set end of kernel stack
+        aghi  %r15,-160
+        xc    __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+
+# check control registers
+        stctg  %c0,%c15,0(%r15)
+	oi     6(%r15),0x20             # enable sigp external interrupts
+	oi     4(%r15),0x10             # switch on low address proctection
+        lctlg  %c0,%c15,0(%r15)
+
+#
+        lam    0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess
+        brasl  %r14,start_kernel        # go to C code
+#
+# We returned from start_kernel ?!? PANIK
+#
+        basr  %r13,0
+	lpswe .Ldw-.(%r13)           # load disabled wait psw
+#
+            .align 8
+.Ldw:       .quad  0x0002000180000000,0x0000000000000000
+.Laregs:    .long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
new file mode 100644
index 000000000000..d73a74013e73
--- /dev/null
+++ b/arch/s390/kernel/init_task.c
@@ -0,0 +1,44 @@
+/*
+ *  arch/s390/kernel/init_task.c
+ *
+ *  S390 version
+ *
+ *  Derived from "arch/i386/kernel/init_task.c"
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union 
+	__attribute__((__section__(".data.init_task"))) =
+		{ INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
new file mode 100644
index 000000000000..480b6a5fef3a
--- /dev/null
+++ b/arch/s390/kernel/irq.c
@@ -0,0 +1,105 @@
+/*
+ *  arch/s390/kernel/irq.c
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *
+ * This file contains interrupt related functions.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+
+/*
+ * show_interrupts is needed by /proc/interrupts.
+ */
+int show_interrupts(struct seq_file *p, void *v)
+{
+	static const char *intrclass_names[] = { "EXT", "I/O", };
+	int i = *(loff_t *) v, j;
+
+	if (i == 0) {
+		seq_puts(p, "           ");
+		for_each_online_cpu(j)
+			seq_printf(p, "CPU%d       ",j);
+		seq_putc(p, '\n');
+	}
+
+	if (i < NR_IRQS) {
+		seq_printf(p, "%s: ", intrclass_names[i]);
+#ifndef CONFIG_SMP
+		seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+                seq_putc(p, '\n');
+
+        }
+
+        return 0;
+}
+
+/*
+ * For compatibilty only. S/390 specific setup of interrupts et al. is done
+ * much later in init_channel_subsystem().
+ */
+void __init
+init_IRQ(void)
+{
+	/* nothing... */
+}
+
+/*
+ * Switch to the asynchronous interrupt stack for softirq execution.
+ */
+extern void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+	unsigned long flags, old, new;
+
+	if (in_interrupt())
+		return;
+
+	local_irq_save(flags);
+
+	account_system_vtime(current);
+
+	local_bh_disable();
+
+	if (local_softirq_pending()) {
+		/* Get current stack pointer. */
+		asm volatile("la %0,0(15)" : "=a" (old));
+		/* Check against async. stack address range. */
+		new = S390_lowcore.async_stack;
+		if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+			/* Need to switch to the async. stack. */
+			new -= STACK_FRAME_OVERHEAD;
+			((struct stack_frame *) new)->back_chain = old;
+
+			asm volatile("   la    15,0(%0)\n"
+				     "   basr  14,%2\n"
+				     "   la    15,0(%1)\n"
+				     : : "a" (new), "a" (old),
+				         "a" (__do_softirq)
+				     : "0", "1", "2", "3", "4", "5", "14",
+				       "cc", "memory" );
+		} else
+			/* We are already on the async stack. */
+			__do_softirq();
+	}
+
+	account_system_vtime(current);
+
+	__local_bh_enable();
+
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(do_softirq);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
new file mode 100644
index 000000000000..607d506689c8
--- /dev/null
+++ b/arch/s390/kernel/module.c
@@ -0,0 +1,405 @@
+/*
+ *  arch/s390/kernel/module.c - Kernel module help for s390.
+ *
+ *  S390 version
+ *    Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
+ *			       IBM Corporation
+ *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  based on i386 version
+ *    Copyright (C) 2001 Rusty Russell.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...)
+#endif
+
+#ifndef CONFIG_ARCH_S390X
+#define PLT_ENTRY_SIZE 12
+#else /* CONFIG_ARCH_S390X */
+#define PLT_ENTRY_SIZE 20
+#endif /* CONFIG_ARCH_S390X */
+
+void *module_alloc(unsigned long size)
+{
+	if (size == 0)
+		return NULL;
+	return vmalloc(size);
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+	vfree(module_region);
+	/* FIXME: If module_region == mod->init_region, trim exception
+           table entries. */
+}
+
+static inline void
+check_rela(Elf_Rela *rela, struct module *me)
+{
+	struct mod_arch_syminfo *info;
+
+	info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
+	switch (ELF_R_TYPE (rela->r_info)) {
+	case R_390_GOT12:	/* 12 bit GOT offset.  */
+	case R_390_GOT16:	/* 16 bit GOT offset.  */
+	case R_390_GOT20:	/* 20 bit GOT offset.  */
+	case R_390_GOT32:	/* 32 bit GOT offset.  */
+	case R_390_GOT64:	/* 64 bit GOT offset.  */
+	case R_390_GOTENT:	/* 32 bit PC rel. to GOT entry shifted by 1. */
+	case R_390_GOTPLT12:	/* 12 bit offset to jump slot.	*/
+	case R_390_GOTPLT16:	/* 16 bit offset to jump slot.  */
+	case R_390_GOTPLT20:	/* 20 bit offset to jump slot.  */
+	case R_390_GOTPLT32:	/* 32 bit offset to jump slot.  */
+	case R_390_GOTPLT64:	/* 64 bit offset to jump slot.	*/
+	case R_390_GOTPLTENT:	/* 32 bit rel. offset to jump slot >> 1. */
+		if (info->got_offset == -1UL) {
+			info->got_offset = me->arch.got_size;
+			me->arch.got_size += sizeof(void*);
+		}
+		break;
+	case R_390_PLT16DBL:	/* 16 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32DBL:	/* 32 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32:	/* 32 bit PC relative PLT address.  */
+	case R_390_PLT64:	/* 64 bit PC relative PLT address.  */
+	case R_390_PLTOFF16:	/* 16 bit offset from GOT to PLT. */
+	case R_390_PLTOFF32:	/* 32 bit offset from GOT to PLT. */
+	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
+		if (info->plt_offset == -1UL) {
+			info->plt_offset = me->arch.plt_size;
+			me->arch.plt_size += PLT_ENTRY_SIZE;
+		}
+		break;
+	case R_390_COPY:
+	case R_390_GLOB_DAT:
+	case R_390_JMP_SLOT:
+	case R_390_RELATIVE:
+		/* Only needed if we want to support loading of 
+		   modules linked with -shared. */
+		break;
+	}
+}
+
+/*
+ * Account for GOT and PLT relocations. We can't add sections for
+ * got and plt but we can increase the core module size.
+ */
+int
+module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+			  char *secstrings, struct module *me)
+{
+	Elf_Shdr *symtab;
+	Elf_Sym *symbols;
+	Elf_Rela *rela;
+	char *strings;
+	int nrela, i, j;
+
+	/* Find symbol table and string table. */
+	symtab = 0;
+	for (i = 0; i < hdr->e_shnum; i++)
+		switch (sechdrs[i].sh_type) {
+		case SHT_SYMTAB:
+			symtab = sechdrs + i;
+			break;
+		}
+	if (!symtab) {
+		printk(KERN_ERR "module %s: no symbol table\n", me->name);
+		return -ENOEXEC;
+	}
+
+	/* Allocate one syminfo structure per symbol. */
+	me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
+	me->arch.syminfo = vmalloc(me->arch.nsyms *
+				   sizeof(struct mod_arch_syminfo));
+	if (!me->arch.syminfo)
+		return -ENOMEM;
+	symbols = (void *) hdr + symtab->sh_offset;
+	strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
+	for (i = 0; i < me->arch.nsyms; i++) {
+		if (symbols[i].st_shndx == SHN_UNDEF &&
+		    strcmp(strings + symbols[i].st_name,
+			   "_GLOBAL_OFFSET_TABLE_") == 0)
+			/* "Define" it as absolute. */
+			symbols[i].st_shndx = SHN_ABS;
+		me->arch.syminfo[i].got_offset = -1UL;
+		me->arch.syminfo[i].plt_offset = -1UL;
+		me->arch.syminfo[i].got_initialized = 0;
+		me->arch.syminfo[i].plt_initialized = 0;
+	}
+
+	/* Search for got/plt relocations. */
+	me->arch.got_size = me->arch.plt_size = 0;
+	for (i = 0; i < hdr->e_shnum; i++) {
+		if (sechdrs[i].sh_type != SHT_RELA)
+			continue;
+		nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+		rela = (void *) hdr + sechdrs[i].sh_offset;
+		for (j = 0; j < nrela; j++)
+			check_rela(rela + j, me);
+	}
+
+	/* Increase core size by size of got & plt and set start
+	   offsets for got and plt. */
+	me->core_size = ALIGN(me->core_size, 4);
+	me->arch.got_offset = me->core_size;
+	me->core_size += me->arch.got_size;
+	me->arch.plt_offset = me->core_size;
+	me->core_size += me->arch.plt_size;
+	return 0;
+}
+
+int
+apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+	       unsigned int relsec, struct module *me)
+{
+	printk(KERN_ERR "module %s: RELOCATION unsupported\n",
+	       me->name);
+	return -ENOEXEC;
+}
+
+static inline int
+apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 
+	   struct module *me)
+{
+	struct mod_arch_syminfo *info;
+	Elf_Addr loc, val;
+	int r_type, r_sym;
+
+	/* This is where to make the change */
+	loc = base + rela->r_offset;
+	/* This is the symbol it is referring to.  Note that all
+	   undefined symbols have been resolved.  */
+	r_sym = ELF_R_SYM(rela->r_info);
+	r_type = ELF_R_TYPE(rela->r_info);
+	info = me->arch.syminfo + r_sym;
+	val = symtab[r_sym].st_value;
+
+	switch (r_type) {
+	case R_390_8:		/* Direct 8 bit.   */
+	case R_390_12:		/* Direct 12 bit.  */
+	case R_390_16:		/* Direct 16 bit.  */
+	case R_390_20:		/* Direct 20 bit.  */
+	case R_390_32:		/* Direct 32 bit.  */
+	case R_390_64:		/* Direct 64 bit.  */
+		val += rela->r_addend;
+		if (r_type == R_390_8)
+			*(unsigned char *) loc = val;
+		else if (r_type == R_390_12)
+			*(unsigned short *) loc = (val & 0xfff) |
+				(*(unsigned short *) loc & 0xf000);
+		else if (r_type == R_390_16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_20)
+			*(unsigned int *) loc =
+				(*(unsigned int *) loc & 0xf00000ff) |
+				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+		else if (r_type == R_390_32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_PC16:	/* PC relative 16 bit.  */
+	case R_390_PC16DBL:	/* PC relative 16 bit shifted by 1.  */
+	case R_390_PC32DBL:	/* PC relative 32 bit shifted by 1.  */
+	case R_390_PC32:	/* PC relative 32 bit.  */
+	case R_390_PC64:	/* PC relative 64 bit.	*/
+		val += rela->r_addend - loc;
+		if (r_type == R_390_PC16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_PC16DBL)
+			*(unsigned short *) loc = val >> 1;
+		else if (r_type == R_390_PC32DBL)
+			*(unsigned int *) loc = val >> 1;
+		else if (r_type == R_390_PC32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_PC64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOT12:	/* 12 bit GOT offset.  */
+	case R_390_GOT16:	/* 16 bit GOT offset.  */
+	case R_390_GOT20:	/* 20 bit GOT offset.  */
+	case R_390_GOT32:	/* 32 bit GOT offset.  */
+	case R_390_GOT64:	/* 64 bit GOT offset.  */
+	case R_390_GOTENT:	/* 32 bit PC rel. to GOT entry shifted by 1. */
+	case R_390_GOTPLT12:	/* 12 bit offset to jump slot.	*/
+	case R_390_GOTPLT20:	/* 20 bit offset to jump slot.  */
+	case R_390_GOTPLT16:	/* 16 bit offset to jump slot.  */
+	case R_390_GOTPLT32:	/* 32 bit offset to jump slot.  */
+	case R_390_GOTPLT64:	/* 64 bit offset to jump slot.	*/
+	case R_390_GOTPLTENT:	/* 32 bit rel. offset to jump slot >> 1. */
+		if (info->got_initialized == 0) {
+			Elf_Addr *gotent;
+
+			gotent = me->module_core + me->arch.got_offset +
+				info->got_offset;
+			*gotent = val;
+			info->got_initialized = 1;
+		}
+		val = info->got_offset + rela->r_addend;
+		if (r_type == R_390_GOT12 ||
+		    r_type == R_390_GOTPLT12)
+			*(unsigned short *) loc = (val & 0xfff) |
+				(*(unsigned short *) loc & 0xf000);
+		else if (r_type == R_390_GOT16 ||
+			 r_type == R_390_GOTPLT16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_GOT20 ||
+			 r_type == R_390_GOTPLT20)
+			*(unsigned int *) loc =
+				(*(unsigned int *) loc & 0xf00000ff) |
+				(val & 0xfff) << 16 | (val & 0xff000) >> 4;
+		else if (r_type == R_390_GOT32 ||
+			 r_type == R_390_GOTPLT32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTENT ||
+			 r_type == R_390_GOTPLTENT)
+			*(unsigned int *) loc =
+				(val + (Elf_Addr) me->module_core - loc) >> 1;
+		else if (r_type == R_390_GOT64 ||
+			 r_type == R_390_GOTPLT64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_PLT16DBL:	/* 16 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32DBL:	/* 32 bit PC rel. PLT shifted by 1.  */
+	case R_390_PLT32:	/* 32 bit PC relative PLT address.  */
+	case R_390_PLT64:	/* 64 bit PC relative PLT address.  */
+	case R_390_PLTOFF16:	/* 16 bit offset from GOT to PLT. */
+	case R_390_PLTOFF32:	/* 32 bit offset from GOT to PLT. */
+	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
+		if (info->plt_initialized == 0) {
+			unsigned int *ip;
+			ip = me->module_core + me->arch.plt_offset +
+				info->plt_offset;
+#ifndef CONFIG_ARCH_S390X
+			ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+			ip[1] = 0x100607f1;
+			ip[2] = val;
+#else /* CONFIG_ARCH_S390X */
+			ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+			ip[1] = 0x100a0004;
+			ip[2] = 0x07f10000;
+			ip[3] = (unsigned int) (val >> 32);
+			ip[4] = (unsigned int) val;
+#endif /* CONFIG_ARCH_S390X */
+			info->plt_initialized = 1;
+		}
+		if (r_type == R_390_PLTOFF16 ||
+		    r_type == R_390_PLTOFF32
+		    || r_type == R_390_PLTOFF64
+			)
+			val = me->arch.plt_offset - me->arch.got_offset +
+				info->plt_offset + rela->r_addend;
+		else
+			val =  (Elf_Addr) me->module_core +
+				me->arch.plt_offset + info->plt_offset + 
+				rela->r_addend - loc;
+		if (r_type == R_390_PLT16DBL)
+			*(unsigned short *) loc = val >> 1;
+		else if (r_type == R_390_PLTOFF16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_PLT32DBL)
+			*(unsigned int *) loc = val >> 1;
+		else if (r_type == R_390_PLT32 ||
+			 r_type == R_390_PLTOFF32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_PLT64 ||
+			 r_type == R_390_PLTOFF64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOTOFF16:	/* 16 bit offset to GOT.  */
+	case R_390_GOTOFF32:	/* 32 bit offset to GOT.  */
+	case R_390_GOTOFF64:	/* 64 bit offset to GOT. */
+		val = val + rela->r_addend -
+			((Elf_Addr) me->module_core + me->arch.got_offset);
+		if (r_type == R_390_GOTOFF16)
+			*(unsigned short *) loc = val;
+		else if (r_type == R_390_GOTOFF32)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTOFF64)
+			*(unsigned long *) loc = val;
+		break;
+	case R_390_GOTPC:	/* 32 bit PC relative offset to GOT. */
+	case R_390_GOTPCDBL:	/* 32 bit PC rel. off. to GOT shifted by 1. */
+		val = (Elf_Addr) me->module_core + me->arch.got_offset +
+			rela->r_addend - loc;
+		if (r_type == R_390_GOTPC)
+			*(unsigned int *) loc = val;
+		else if (r_type == R_390_GOTPCDBL)
+			*(unsigned int *) loc = val >> 1;
+		break;
+	case R_390_COPY:
+	case R_390_GLOB_DAT:	/* Create GOT entry.  */
+	case R_390_JMP_SLOT:	/* Create PLT entry.  */
+	case R_390_RELATIVE:	/* Adjust by program base.  */
+		/* Only needed if we want to support loading of 
+		   modules linked with -shared. */
+		break;
+	default:
+		printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+		       me->name, r_type);
+		return -ENOEXEC;
+	}
+	return 0;
+}
+
+int
+apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+		   unsigned int symindex, unsigned int relsec,
+		   struct module *me)
+{
+	Elf_Addr base;
+	Elf_Sym *symtab;
+	Elf_Rela *rela;
+	unsigned long i, n;
+	int rc;
+
+	DEBUGP("Applying relocate section %u to %u\n",
+	       relsec, sechdrs[relsec].sh_info);
+	base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+	symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
+	rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
+	n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
+
+	for (i = 0; i < n; i++, rela++) {
+		rc = apply_rela(rela, base, symtab, me);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *me)
+{
+	vfree(me->arch.syminfo);
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
new file mode 100644
index 000000000000..7aea25d6e300
--- /dev/null
+++ b/arch/s390/kernel/process.c
@@ -0,0 +1,416 @@
+/*
+ *  arch/s390/kernel/process.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Hartmut Penner (hp@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *
+ *  Derived from "arch/i386/kernel/process.c"
+ *    Copyright (C) 1995, Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/config.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/timer.h>
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+/*
+ * Return saved PC of a blocked thread. used in kernel/sched.
+ * resume in entry.S does not create a new stack frame, it
+ * just stores the registers %r6-%r15 to the frame given by
+ * schedule. We want to return the address of the caller of
+ * schedule, so we have to walk the backchain one time to
+ * find the frame schedule() store its return address.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+	struct stack_frame *sf;
+
+	sf = (struct stack_frame *) tsk->thread.ksp;
+	sf = (struct stack_frame *) sf->back_chain;
+	return sf->gprs[8];
+}
+
+/*
+ * Need to know about CPUs going idle?
+ */
+static struct notifier_block *idle_chain;
+
+int register_idle_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_register(&idle_chain, nb);
+}
+EXPORT_SYMBOL(register_idle_notifier);
+
+int unregister_idle_notifier(struct notifier_block *nb)
+{
+	return notifier_chain_unregister(&idle_chain, nb);
+}
+EXPORT_SYMBOL(unregister_idle_notifier);
+
+void do_monitor_call(struct pt_regs *regs, long interruption_code)
+{
+	/* disable monitor call class 0 */
+	__ctl_clear_bit(8, 15);
+
+	notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
+			    (void *)(long) smp_processor_id());
+}
+
+/*
+ * The idle loop on a S390...
+ */
+void default_idle(void)
+{
+	psw_t wait_psw;
+	unsigned long reg;
+	int cpu, rc;
+
+	local_irq_disable();
+        if (need_resched()) {
+		local_irq_enable();
+                schedule();
+                return;
+        }
+
+	/* CPU is going idle. */
+	cpu = smp_processor_id();
+	rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
+	if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
+		BUG();
+	if (rc != NOTIFY_OK) {
+		local_irq_enable();
+		return;
+	}
+
+	/* enable monitor call class 0 */
+	__ctl_set_bit(8, 15);
+
+#ifdef CONFIG_HOTPLUG_CPU
+	if (cpu_is_offline(smp_processor_id()))
+		cpu_die();
+#endif
+
+	/* 
+	 * Wait for external, I/O or machine check interrupt and
+	 * switch off machine check bit after the wait has ended.
+	 */
+	wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT |
+		PSW_MASK_IO | PSW_MASK_EXT;
+#ifndef CONFIG_ARCH_S390X
+	asm volatile (
+		"    basr %0,0\n"
+		"0:  la   %0,1f-0b(%0)\n"
+		"    st   %0,4(%1)\n"
+		"    oi   4(%1),0x80\n"
+		"    lpsw 0(%1)\n"
+		"1:  la   %0,2f-1b(%0)\n"
+		"    st   %0,4(%1)\n"
+		"    oi   4(%1),0x80\n"
+		"    ni   1(%1),0xf9\n"
+		"    lpsw 0(%1)\n"
+		"2:"
+		: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
+#else /* CONFIG_ARCH_S390X */
+	asm volatile (
+		"    larl  %0,0f\n"
+		"    stg   %0,8(%1)\n"
+		"    lpswe 0(%1)\n"
+		"0:  larl  %0,1f\n"
+		"    stg   %0,8(%1)\n"
+		"    ni    1(%1),0xf9\n"
+		"    lpswe 0(%1)\n"
+		"1:"
+		: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
+#endif /* CONFIG_ARCH_S390X */
+}
+
+void cpu_idle(void)
+{
+	for (;;)
+		default_idle();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	struct task_struct *tsk = current;
+
+        printk("CPU:    %d    %s\n", tsk->thread_info->cpu, print_tainted());
+        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+	       current->comm, current->pid, (void *) tsk,
+	       (void *) tsk->thread.ksp);
+
+	show_registers(regs);
+	/* Show stack backtrace if pt_regs is from kernel mode */
+	if (!(regs->psw.mask & PSW_MASK_PSTATE))
+		show_trace(0,(unsigned long *) regs->gprs[15]);
+}
+
+extern void kernel_thread_starter(void);
+
+__asm__(".align 4\n"
+	"kernel_thread_starter:\n"
+	"    la    2,0(10)\n"
+	"    basr  14,9\n"
+	"    la    2,0\n"
+	"    br    11\n");
+
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+	struct pt_regs regs;
+
+	memset(&regs, 0, sizeof(regs));
+	regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+	regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
+	regs.gprs[9] = (unsigned long) fn;
+	regs.gprs[10] = (unsigned long) arg;
+	regs.gprs[11] = (unsigned long) do_exit;
+	regs.orig_gpr2 = -1;
+
+	/* Ok, create the new process.. */
+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+		       0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+	clear_used_math();
+	clear_tsk_thread_flag(current, TIF_USEDFPU);
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
+	unsigned long unused,
+        struct task_struct * p, struct pt_regs * regs)
+{
+        struct fake_frame
+          {
+	    struct stack_frame sf;
+            struct pt_regs childregs;
+          } *frame;
+
+        frame = ((struct fake_frame *)
+		 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+        p->thread.ksp = (unsigned long) frame;
+	/* Store access registers to kernel stack of new process. */
+        frame->childregs = *regs;
+	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
+        frame->childregs.gprs[15] = new_stackp;
+        frame->sf.back_chain = 0;
+
+        /* new return point is ret_from_fork */
+        frame->sf.gprs[8] = (unsigned long) ret_from_fork;
+
+        /* fake return stack for resume(), don't go back to schedule */
+        frame->sf.gprs[9] = (unsigned long) frame;
+
+	/* Save access registers to new thread structure. */
+	save_access_regs(&p->thread.acrs[0]);
+
+#ifndef CONFIG_ARCH_S390X
+        /*
+	 * save fprs to current->thread.fp_regs to merge them with
+	 * the emulated registers and then copy the result to the child.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
+	       sizeof(s390_fp_regs));
+        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
+	/* Set a new TLS ?  */
+	if (clone_flags & CLONE_SETTLS)
+		p->thread.acrs[0] = regs->gprs[6];
+#else /* CONFIG_ARCH_S390X */
+	/* Save the fpu registers to new thread structure. */
+	save_fp_regs(&p->thread.fp_regs);
+        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
+	/* Set a new TLS ?  */
+	if (clone_flags & CLONE_SETTLS) {
+		if (test_thread_flag(TIF_31BIT)) {
+			p->thread.acrs[0] = (unsigned int) regs->gprs[6];
+		} else {
+			p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
+			p->thread.acrs[1] = (unsigned int) regs->gprs[6];
+		}
+	}
+#endif /* CONFIG_ARCH_S390X */
+	/* start new process with ar4 pointing to the correct address space */
+	p->thread.mm_segment = get_fs();
+        /* Don't copy debug registers */
+        memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
+
+        return 0;
+}
+
+asmlinkage long sys_fork(struct pt_regs regs)
+{
+	return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL);
+}
+
+asmlinkage long sys_clone(struct pt_regs regs)
+{
+        unsigned long clone_flags;
+        unsigned long newsp;
+	int __user *parent_tidptr, *child_tidptr;
+
+        clone_flags = regs.gprs[3];
+        newsp = regs.orig_gpr2;
+	parent_tidptr = (int __user *) regs.gprs[4];
+	child_tidptr = (int __user *) regs.gprs[5];
+        if (!newsp)
+                newsp = regs.gprs[15];
+        return do_fork(clone_flags, newsp, &regs, 0,
+		       parent_tidptr, child_tidptr);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage long sys_vfork(struct pt_regs regs)
+{
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+		       regs.gprs[15], &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage long sys_execve(struct pt_regs regs)
+{
+        int error;
+        char * filename;
+
+        filename = getname((char __user *) regs.orig_gpr2);
+        error = PTR_ERR(filename);
+        if (IS_ERR(filename))
+                goto out;
+        error = do_execve(filename, (char __user * __user *) regs.gprs[3],
+			  (char __user * __user *) regs.gprs[4], &regs);
+	if (error == 0) {
+		task_lock(current);
+		current->ptrace &= ~PT_DTRACE;
+		task_unlock(current);
+		current->thread.fp_regs.fpc = 0;
+		if (MACHINE_HAS_IEEE)
+			asm volatile("sfpc %0,%0" : : "d" (0));
+	}
+        putname(filename);
+out:
+        return error;
+}
+
+
+/*
+ * fill in the FPU structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
+{
+#ifndef CONFIG_ARCH_S390X
+        /*
+	 * save fprs to current->thread.fp_regs to merge them with
+	 * the emulated registers and then copy the result to the dump.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
+#else /* CONFIG_ARCH_S390X */
+	save_fp_regs(fpregs);
+#endif /* CONFIG_ARCH_S390X */
+	return 1;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+
+/* changed the size calculations - should hopefully work better. lbt */
+	dump->magic = CMAGIC;
+	dump->start_code = 0;
+	dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
+	dump->u_tsize = current->mm->end_code >> PAGE_SHIFT;
+	dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	dump->u_dsize -= dump->u_tsize;
+	dump->u_ssize = 0;
+	if (dump->start_stack < TASK_SIZE)
+		dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT;
+	memcpy(&dump->regs, regs, sizeof(s390_regs));
+	dump_fpu (regs, &dump->regs.fp_regs);
+	dump->regs.per_info = current->thread.per_info;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	struct stack_frame *sf, *low, *high;
+	unsigned long return_address;
+	int count;
+
+	if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info)
+		return 0;
+	low = (struct stack_frame *) p->thread_info;
+	high = (struct stack_frame *)
+		((unsigned long) p->thread_info + THREAD_SIZE) - 1;
+	sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
+	if (sf <= low || sf > high)
+		return 0;
+	for (count = 0; count < 16; count++) {
+		sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+		if (sf <= low || sf > high)
+			return 0;
+		return_address = sf->gprs[8] & PSW_ADDR_INSN;
+		if (!in_sched_functions(return_address))
+			return return_address;
+	}
+	return 0;
+}
+
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
new file mode 100644
index 000000000000..7ba777eec1a8
--- /dev/null
+++ b/arch/s390/kernel/profile.c
@@ -0,0 +1,20 @@
+/*
+ * arch/s390/kernel/profile.c
+ *
+ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ */
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+
+static struct proc_dir_entry * root_irq_dir;
+
+void init_irq_proc(void)
+{
+	/* create /proc/irq */
+	root_irq_dir = proc_mkdir("irq", 0);
+
+	/* create /proc/irq/prof_cpu_mask */
+	create_prof_cpu_mask(root_irq_dir);
+}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
new file mode 100644
index 000000000000..647233c02fc8
--- /dev/null
+++ b/arch/s390/kernel/ptrace.c
@@ -0,0 +1,738 @@
+/*
+ *  arch/s390/kernel/ptrace.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  Based on PowerPC version 
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Derived from "arch/m68k/kernel/ptrace.c"
+ *  Copyright (C) 1994 by Hamish Macdonald
+ *  Taken from linux/kernel/ptrace.c and modified for M680x0.
+ *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@cs.nmt.edu) 
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/security.h>
+#include <linux/audit.h>
+
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_S390_SUPPORT
+#include "compat_ptrace.h"
+#endif
+
+static void
+FixPerRegisters(struct task_struct *task)
+{
+	struct pt_regs *regs;
+	per_struct *per_info;
+
+	regs = __KSTK_PTREGS(task);
+	per_info = (per_struct *) &task->thread.per_info;
+	per_info->control_regs.bits.em_instruction_fetch =
+		per_info->single_step | per_info->instruction_fetch;
+	
+	if (per_info->single_step) {
+		per_info->control_regs.bits.starting_addr = 0;
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT))
+			per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
+		else
+#endif
+			per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
+	} else {
+		per_info->control_regs.bits.starting_addr =
+			per_info->starting_addr;
+		per_info->control_regs.bits.ending_addr =
+			per_info->ending_addr;
+	}
+	/*
+	 * if any of the control reg tracing bits are on 
+	 * we switch on per in the psw
+	 */
+	if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
+		regs->psw.mask |= PSW_MASK_PER;
+	else
+		regs->psw.mask &= ~PSW_MASK_PER;
+
+	if (per_info->control_regs.bits.em_storage_alteration)
+		per_info->control_regs.bits.storage_alt_space_ctl = 1;
+	else
+		per_info->control_regs.bits.storage_alt_space_ctl = 0;
+}
+
+void
+set_single_step(struct task_struct *task)
+{
+	task->thread.per_info.single_step = 1;
+	FixPerRegisters(task);
+}
+
+void
+clear_single_step(struct task_struct *task)
+{
+	task->thread.per_info.single_step = 0;
+	FixPerRegisters(task);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void
+ptrace_disable(struct task_struct *child)
+{
+	/* make sure the single step bit is not set. */
+	clear_single_step(child);
+}
+
+#ifndef CONFIG_ARCH_S390X
+# define __ADDR_MASK 3
+#else
+# define __ADDR_MASK 7
+#endif
+
+/*
+ * Read the word at offset addr from the user area of a process. The
+ * trouble here is that the information is littered over different
+ * locations. The process registers are found on the kernel stack,
+ * the floating point stuff and the trace settings are stored in
+ * the task structure. In addition the different structures in
+ * struct user contain pad bytes that should be read as zeroes.
+ * Lovely...
+ */
+static int
+peek_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user *dummy = NULL;
+	addr_t offset, tmp;
+
+	/*
+	 * Stupid gdb peeks/pokes the access registers in 64 bit with
+	 * an alignment of 4. Programmers from hell...
+	 */
+	if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
+		if (addr == (addr_t) &dummy->regs.psw.mask)
+			/* Remove per bit from user psw. */
+			tmp &= ~PSW_MASK_PER;
+
+	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.acrs;
+		tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
+
+	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
+
+	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+		/* 
+		 * floating point regs. are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.fp_regs;
+		tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
+
+	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.per_info;
+		tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
+
+	} else
+		tmp = 0;
+
+	return put_user(tmp, (addr_t __user *) data);
+}
+
+/*
+ * Write a word to the user area of a process at location addr. This
+ * operation does have an additional problem compared to peek_user.
+ * Stores to the program status word and on the floating point
+ * control register needs to get checked for validity.
+ */
+static int
+poke_user(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user *dummy = NULL;
+	addr_t offset;
+
+	/*
+	 * Stupid gdb peeks/pokes the access registers in 64 bit with
+	 * an alignment of 4. Programmers from hell indeed...
+	 */
+	if ((addr & 3) || addr > sizeof(struct user) - __ADDR_MASK)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy->regs.psw.mask &&
+#ifdef CONFIG_S390_SUPPORT
+		    data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
+#endif
+		    data != PSW_MASK_MERGE(PSW_USER_BITS, data))
+			/* Invalid psw mask. */
+			return -EINVAL;
+#ifndef CONFIG_ARCH_S390X
+		if (addr == (addr_t) &dummy->regs.psw.addr)
+			/* I'd like to reject addresses without the
+			   high order bit but older gdb's rely on it */
+			data |= PSW_ADDR_AMODE;
+#endif
+		*(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy->regs.acrs;
+		*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+
+	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		__KSTK_PTREGS(child)->orig_gpr2 = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure
+		 */
+		if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
+		    (data & ~FPC_VALID_MASK) != 0)
+			return -EINVAL;
+		offset = addr - (addr_t) &dummy->regs.fp_regs;
+		*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
+
+	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure 
+		 */
+		offset = addr - (addr_t) &dummy->regs.per_info;
+		*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
+
+	}
+
+	FixPerRegisters(child);
+	return 0;
+}
+
+static int
+do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
+{
+	unsigned long tmp;
+	ptrace_area parea; 
+	int copied, ret;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:
+	case PTRACE_PEEKDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
+		/* read word at location addr. */
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return put_user(tmp, (unsigned long __user *) data);
+
+	case PTRACE_PEEKUSR:
+		/* read the word at location addr in the USER area. */
+		return peek_user(child, addr, data);
+
+	case PTRACE_POKETEXT:
+	case PTRACE_POKEDATA:
+		/* Remove high order bit from address (only for 31 bit). */
+		addr &= PSW_ADDR_INSN;
+		/* write the word at location addr. */
+		copied = access_process_vm(child, addr, &data, sizeof(data),1);
+		if (copied != sizeof(data))
+			return -EIO;
+		return 0;
+
+	case PTRACE_POKEUSR:
+		/* write the word at location addr in the USER area */
+		return poke_user(child, addr, data);
+
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, (void __user *) addr,
+							sizeof(parea)))
+			return -EFAULT;
+		addr = parea.kernel_addr;
+		data = parea.process_addr;
+		copied = 0;
+		while (copied < parea.len) {
+			if (request == PTRACE_PEEKUSR_AREA)
+				ret = peek_user(child, addr, data);
+			else {
+				addr_t tmp;
+				if (get_user (tmp, (addr_t __user *) data))
+					return -EFAULT;
+				ret = poke_user(child, addr, tmp);
+			}
+			if (ret)
+				return ret;
+			addr += sizeof(unsigned long);
+			data += sizeof(unsigned long);
+			copied += sizeof(unsigned long);
+		}
+		return 0;
+	}
+	return ptrace_request(child, request, addr, data);
+}
+
+#ifdef CONFIG_S390_SUPPORT
+/*
+ * Now the fun part starts... a 31 bit program running in the
+ * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
+ * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
+ * to handle, the difference to the 64 bit versions of the requests
+ * is that the access is done in multiples of 4 byte instead of
+ * 8 bytes (sizeof(unsigned long) on 31/64 bit).
+ * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
+ * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
+ * is a 31 bit program too, the content of struct user can be
+ * emulated. A 31 bit program peeking into the struct user of
+ * a 64 bit program is a no-no.
+ */
+
+/*
+ * Same as peek_user but for a 31 bit program.
+ */
+static int
+peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user32 *dummy32 = NULL;
+	per_struct32 *dummy_per32 = NULL;
+	addr_t offset;
+	__u32 tmp;
+
+	if (!test_thread_flag(TIF_31BIT) ||
+	    (addr & 3) || addr > sizeof(struct user) - 3)
+		return -EIO;
+
+	if (addr < (addr_t) &dummy32->regs.acrs) {
+		/*
+		 * psw and gprs are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+			/* Fake a 31 bit psw mask. */
+			tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
+			tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
+		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+			/* Fake a 31 bit psw address. */
+			tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
+				PSW32_ADDR_AMODE31;
+		} else {
+			/* gpr 0-15 */
+			tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
+					 addr*2 + 4);
+		}
+	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.acrs;
+		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
+
+	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
+
+	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure 
+		 */
+	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
+		tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
+
+	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.per_info;
+		/* This is magic. See per_struct and per_struct32. */
+		if ((offset >= (addr_t) &dummy_per32->control_regs &&
+		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+		    (offset >= (addr_t) &dummy_per32->starting_addr &&
+		     offset <= (addr_t) &dummy_per32->ending_addr) ||
+		    offset == (addr_t) &dummy_per32->lowcore.words.address)
+			offset = offset*2 + 4;
+		else
+			offset = offset*2;
+		tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
+
+	} else
+		tmp = 0;
+
+	return put_user(tmp, (__u32 __user *) data);
+}
+
+/*
+ * Same as poke_user but for a 31 bit program.
+ */
+static int
+poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
+{
+	struct user32 *dummy32 = NULL;
+	per_struct32 *dummy_per32 = NULL;
+	addr_t offset;
+	__u32 tmp;
+
+	if (!test_thread_flag(TIF_31BIT) ||
+	    (addr & 3) || addr > sizeof(struct user32) - 3)
+		return -EIO;
+
+	tmp = (__u32) data;
+
+	if (addr < (addr_t) &dummy32->regs.acrs) {
+		/*
+		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
+		 */
+		if (addr == (addr_t) &dummy32->regs.psw.mask) {
+			/* Build a 64 bit psw mask from 31 bit mask. */
+			if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
+				/* Invalid psw mask. */
+				return -EINVAL;
+			__KSTK_PTREGS(child)->psw.mask =
+				PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
+		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
+			/* Build a 64 bit psw address from 31 bit address. */
+			__KSTK_PTREGS(child)->psw.addr = 
+				(__u64) tmp & PSW32_ADDR_INSN;
+		} else {
+			/* gpr 0-15 */
+			*(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+				  + addr*2 + 4) = tmp;
+		}
+	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * access registers are stored in the thread structure
+		 */
+		offset = addr - (addr_t) &dummy32->regs.acrs;
+		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
+
+	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
+		/*
+		 * orig_gpr2 is stored on the kernel stack
+		 */
+		*(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
+
+	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
+		/*
+		 * floating point regs. are stored in the thread structure 
+		 */
+		if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
+		    (tmp & ~FPC_VALID_MASK) != 0)
+			/* Invalid floating point control. */
+			return -EINVAL;
+	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
+		*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
+
+	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
+		/*
+		 * per_info is found in the thread structure.
+		 */
+		offset = addr - (addr_t) &dummy32->regs.per_info;
+		/*
+		 * This is magic. See per_struct and per_struct32.
+		 * By incident the offsets in per_struct are exactly
+		 * twice the offsets in per_struct32 for all fields.
+		 * The 8 byte fields need special handling though,
+		 * because the second half (bytes 4-7) is needed and
+		 * not the first half.
+		 */
+		if ((offset >= (addr_t) &dummy_per32->control_regs &&
+		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
+		    (offset >= (addr_t) &dummy_per32->starting_addr &&
+		     offset <= (addr_t) &dummy_per32->ending_addr) ||
+		    offset == (addr_t) &dummy_per32->lowcore.words.address)
+			offset = offset*2 + 4;
+		else
+			offset = offset*2;
+		*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
+
+	}
+
+	FixPerRegisters(child);
+	return 0;
+}
+
+static int
+do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
+{
+	unsigned int tmp;  /* 4 bytes !! */
+	ptrace_area_emu31 parea; 
+	int copied, ret;
+
+	switch (request) {
+	case PTRACE_PEEKTEXT:
+	case PTRACE_PEEKDATA:
+		/* read word at location addr. */
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return put_user(tmp, (unsigned int __user *) data);
+
+	case PTRACE_PEEKUSR:
+		/* read the word at location addr in the USER area. */
+		return peek_user_emu31(child, addr, data);
+
+	case PTRACE_POKETEXT:
+	case PTRACE_POKEDATA:
+		/* write the word at location addr. */
+		tmp = data;
+		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
+		if (copied != sizeof(tmp))
+			return -EIO;
+		return 0;
+
+	case PTRACE_POKEUSR:
+		/* write the word at location addr in the USER area */
+		return poke_user_emu31(child, addr, data);
+
+	case PTRACE_PEEKUSR_AREA:
+	case PTRACE_POKEUSR_AREA:
+		if (copy_from_user(&parea, (void __user *) addr,
+							sizeof(parea)))
+			return -EFAULT;
+		addr = parea.kernel_addr;
+		data = parea.process_addr;
+		copied = 0;
+		while (copied < parea.len) {
+			if (request == PTRACE_PEEKUSR_AREA)
+				ret = peek_user_emu31(child, addr, data);
+			else {
+				__u32 tmp;
+				if (get_user (tmp, (__u32 __user *) data))
+					return -EFAULT;
+				ret = poke_user_emu31(child, addr, tmp);
+			}
+			if (ret)
+				return ret;
+			addr += sizeof(unsigned int);
+			data += sizeof(unsigned int);
+			copied += sizeof(unsigned int);
+		}
+		return 0;
+	case PTRACE_GETEVENTMSG:
+		return put_user((__u32) child->ptrace_message,
+				(unsigned int __user *) data);
+	case PTRACE_GETSIGINFO:
+		if (child->last_siginfo == NULL)
+			return -EINVAL;
+		return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
+					      child->last_siginfo);
+	case PTRACE_SETSIGINFO:
+		if (child->last_siginfo == NULL)
+			return -EINVAL;
+		return copy_siginfo_from_user32(child->last_siginfo,
+						(compat_siginfo_t __user *) data);
+	}
+	return ptrace_request(child, request, addr, data);
+}
+#endif
+
+#define PT32_IEEE_IP 0x13c
+
+static int
+do_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+	int ret;
+
+	if (request == PTRACE_ATTACH)
+		return ptrace_attach(child);
+
+	/*
+	 * Special cases to get/store the ieee instructions pointer.
+	 */
+	if (child == current) {
+		if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
+			return peek_user(child, addr, data);
+		if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
+			return poke_user(child, addr, data);
+#ifdef CONFIG_S390_SUPPORT
+		if (request == PTRACE_PEEKUSR &&
+		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+			return peek_user_emu31(child, addr, data);
+		if (request == PTRACE_POKEUSR &&
+		    addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
+			return poke_user_emu31(child, addr, data);
+#endif
+	}
+
+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+	if (ret < 0)
+		return ret;
+
+	switch (request) {
+	case PTRACE_SYSCALL:
+		/* continue and stop at next (return from) syscall */
+	case PTRACE_CONT:
+		/* restart after signal. */
+		if ((unsigned long) data >= _NSIG)
+			return -EIO;
+		if (request == PTRACE_SYSCALL)
+			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		else
+			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		child->exit_code = data;
+		/* make sure the single step bit is not set. */
+		clear_single_step(child);
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_KILL:
+		/*
+		 * make the child exit.  Best I can do is send it a sigkill. 
+		 * perhaps it should be put in the status that it wants to 
+		 * exit.
+		 */
+		if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+			return 0;
+		child->exit_code = SIGKILL;
+		/* make sure the single step bit is not set. */
+		clear_single_step(child);
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_SINGLESTEP:
+		/* set the trap flag. */
+		if ((unsigned long) data >= _NSIG)
+			return -EIO;
+		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+		child->exit_code = data;
+		if (data)
+			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+		else
+			set_single_step(child);
+		/* give it a chance to run. */
+		wake_up_process(child);
+		return 0;
+
+	case PTRACE_DETACH:
+		/* detach a process that was attached. */
+		return ptrace_detach(child, data);
+
+
+	/* Do requests that differ for 31/64 bit */
+	default:
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT))
+			return do_ptrace_emu31(child, request, addr, data);
+#endif
+		return do_ptrace_normal(child, request, addr, data);
+	}
+	/* Not reached.  */
+	return -EIO;
+}
+
+asmlinkage long
+sys_ptrace(long request, long pid, long addr, long data)
+{
+	struct task_struct *child;
+	int ret;
+
+	lock_kernel();
+
+	if (request == PTRACE_TRACEME) {
+		/* are we already being traced? */
+		ret = -EPERM;
+		if (current->ptrace & PT_PTRACED)
+			goto out;
+		ret = security_ptrace(current->parent, current);
+		if (ret)
+			goto out;
+		/* set the ptrace bit in the process flags. */
+		current->ptrace |= PT_PTRACED;
+		goto out;
+	}
+
+	ret = -EPERM;
+	if (pid == 1)		/* you may not mess with init */
+		goto out;
+
+	ret = -ESRCH;
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
+	read_unlock(&tasklist_lock);
+	if (!child)
+		goto out;
+
+	ret = do_ptrace(child, request, addr, data);
+
+	put_task_struct(child);
+out:
+	unlock_kernel();
+	return ret;
+}
+
+asmlinkage void
+syscall_trace(struct pt_regs *regs, int entryexit)
+{
+	if (unlikely(current->audit_context)) {
+		if (!entryexit)
+			audit_syscall_entry(current, regs->gprs[2],
+					    regs->orig_gpr2, regs->gprs[3],
+					    regs->gprs[4], regs->gprs[5]);
+		else
+			audit_syscall_exit(current, regs->gprs[2]);
+	}
+	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+		return;
+	if (!(current->ptrace & PT_PTRACED))
+		return;
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
new file mode 100644
index 000000000000..658e5ac484f9
--- /dev/null
+++ b/arch/s390/kernel/reipl.S
@@ -0,0 +1,78 @@
+/*
+ *  arch/s390/kernel/reipl.S
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ */
+
+#include <asm/lowcore.h>
+
+		.globl	do_reipl
+do_reipl:	basr	%r13,0
+.Lpg0:		lpsw	.Lnewpsw-.Lpg0(%r13)
+.Lpg1:		lctl	%c6,%c6,.Lall-.Lpg0(%r13)
+                stctl   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                ni      .Lctlsave-.Lpg0(%r13),0xef
+                lctl    %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                lr      %r1,%r2
+        	mvc     __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+                stsch   .Lschib-.Lpg0(%r13)                                    
+	        oi      .Lschib+5-.Lpg0(%r13),0x84 
+.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
+        	msch    .Lschib-.Lpg0(%r13) 
+                lhi     %r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		jz	.L001
+                brct    %r0,.Lssch  
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L001:		mvc	__LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)	
+.Ltpi:		lpsw	.Lwaitpsw-.Lpg0(%r13)          
+.Lcont:		c	%r1,__LC_SUBCHANNEL_ID
+		jnz	.Ltpi
+		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
+		jnz	.Ltpi
+		tsch	.Liplirb-.Lpg0(%r13)           
+		tm	.Liplirb+9-.Lpg0(%r13),0xbf
+                jz      .L002
+                bas     %r14,.Ldisab-.Lpg0(%r13)    
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
+                jz      .L003
+                bas     %r14,.Ldisab-.Lpg0(%r13)	
+.L003:		spx	.Lnull-.Lpg0(%r13)
+		st 	%r1,__LC_SUBCHANNEL_ID
+                lpsw 	0
+		sigp    0,0,0(6)               
+.Ldisab:	st      %r14,.Ldispsw+4-.Lpg0(%r13)
+		lpsw	.Ldispsw-.Lpg0(%r13)
+                .align 	8
+.Lall:		.long	0xff000000
+.Lnull:		.long   0x00000000
+.Lctlsave:      .long   0x00000000
+                .align 	8
+.Lnewpsw:	.long   0x00080000,0x80000000+.Lpg1
+.Lpcnew:  	.long   0x00080000,0x80000000+.Lecs
+.Lionew:	.long   0x00080000,0x80000000+.Lcont
+.Lwaitpsw:	.long	0x020a0000,0x00000000+.Ltpi
+.Ldispsw:	.long   0x000a0000,0x00000000
+.Liplccws:	.long   0x02000000,0x60000018
+		.long   0x08000008,0x20000001
+.Liplorb:	.long	0x0049504c,0x0040ff80
+		.long	0x00000000+.Liplccws
+.Lschib:        .long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+.Liplirb:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+	
+
+	
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
new file mode 100644
index 000000000000..4d090d60f3ef
--- /dev/null
+++ b/arch/s390/kernel/reipl64.S
@@ -0,0 +1,96 @@
+/*
+ *  arch/s390/kernel/reipl.S
+ *
+ *  S390 version
+ *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+	         Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#include <asm/lowcore.h>
+		.globl	do_reipl
+do_reipl:	basr	%r13,0
+.Lpg0:		lpswe   .Lnewpsw-.Lpg0(%r13)
+.Lpg1:		lctlg	%c6,%c6,.Lall-.Lpg0(%r13)
+                stctg   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                ni      .Lctlsave+4-.Lpg0(%r13),0xef
+                lctlg   %c0,%c0,.Lctlsave-.Lpg0(%r13)
+                lgr     %r1,%r2
+        	mvc     __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
+                stsch   .Lschib-.Lpg0(%r13)                                    
+	        oi      .Lschib+5-.Lpg0(%r13),0x84 
+.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
+        	msch    .Lschib-.Lpg0(%r13) 
+	        lghi    %r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		jz	.L001
+		brct    %r0,.Lssch   
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L001:		mvc	__LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)	
+.Ltpi:		lpswe	.Lwaitpsw-.Lpg0(%r13)          
+.Lcont:		c	%r1,__LC_SUBCHANNEL_ID
+		jnz	.Ltpi
+		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
+		jnz	.Ltpi
+		tsch	.Liplirb-.Lpg0(%r13)           
+		tm	.Liplirb+9-.Lpg0(%r13),0xbf
+                jz      .L002
+                bas     %r14,.Ldisab-.Lpg0(%r13)    
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
+                jz      .L003
+                bas     %r14,.Ldisab-.Lpg0(%r13)	
+.L003:		spx	.Lnull-.Lpg0(%r13)
+		st 	%r1,__LC_SUBCHANNEL_ID
+                lhi     %r1,0            # mode 0 = esa
+                slr     %r0,%r0          # set cpuid to zero
+                sigp    %r1,%r0,0x12     # switch to esa mode
+                lpsw 	0
+.Ldisab:	sll    %r14,1
+		srl    %r14,1            # need to kill hi bit to avoid specification exceptions.
+		st     %r14,.Ldispsw+12-.Lpg0(%r13)
+		lpswe	.Ldispsw-.Lpg0(%r13)
+                .align 	8
+.Lall:		.quad	0x00000000ff000000
+.Lctlsave:      .quad   0x0000000000000000
+.Lnull:		.long   0x0000000000000000
+                .align 	16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * ommited from the pop.
+ */
+.Lnewpsw:	.quad   0x0000000080000000
+		.quad   .Lpg1
+.Lpcnew:	.quad   0x0000000080000000
+	  	.quad   .Lecs
+.Lionew:	.quad   0x0000000080000000
+		.quad   .Lcont
+.Lwaitpsw:	.quad	0x0202000080000000
+		.quad   .Ltpi
+.Ldispsw:	.quad   0x0002000080000000
+		.quad   0x0000000000000000
+.Liplccws:	.long   0x02000000,0x60000018
+		.long   0x08000008,0x20000001
+.Liplorb:	.long	0x0049504c,0x0040ff80
+		.long	0x00000000+.Liplccws
+.Lschib:        .long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+		.long   0x00000000,0x00000000
+.Liplirb:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+	
+
+	
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
new file mode 100644
index 000000000000..3bdd38ec71da
--- /dev/null
+++ b/arch/s390/kernel/s390_ext.c
@@ -0,0 +1,135 @@
+/*
+ *  arch/s390/kernel/s390_ext.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+
+#include <asm/lowcore.h>
+#include <asm/s390_ext.h>
+#include <asm/irq.h>
+
+/*
+ * Simple hash strategy: index = code & 0xff;
+ * ext_int_hash[index] is the start of the list for all external interrupts
+ * that hash to this index. With the current set of external interrupts 
+ * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
+ * iucv and 0x2603 pfault) this is always the first element. 
+ */
+ext_int_info_t *ext_int_hash[256] = { 0, };
+
+int register_external_interrupt(__u16 code, ext_int_handler_t handler)
+{
+        ext_int_info_t *p;
+        int index;
+
+	p = (ext_int_info_t *) kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
+        if (p == NULL)
+                return -ENOMEM;
+        p->code = code;
+        p->handler = handler;
+        index = code & 0xff;
+        p->next = ext_int_hash[index];
+        ext_int_hash[index] = p;
+        return 0;
+}
+
+int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+				      ext_int_info_t *p)
+{
+        int index;
+
+        if (p == NULL)
+                return -EINVAL;
+        p->code = code;
+        p->handler = handler;
+        index = code & 0xff;
+        p->next = ext_int_hash[index];
+        ext_int_hash[index] = p;
+        return 0;
+}
+
+int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
+{
+        ext_int_info_t *p, *q;
+        int index;
+
+        index = code & 0xff;
+        q = NULL;
+        p = ext_int_hash[index];
+        while (p != NULL) {
+                if (p->code == code && p->handler == handler)
+                        break;
+                q = p;
+                p = p->next;
+        }
+        if (p == NULL)
+                return -ENOENT;
+        if (q != NULL)
+                q->next = p->next;
+        else
+                ext_int_hash[index] = p->next;
+	kfree(p);
+        return 0;
+}
+
+int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
+					ext_int_info_t *p)
+{
+	ext_int_info_t *q;
+	int index;
+
+	if (p == NULL || p->code != code || p->handler != handler)
+		return -EINVAL;
+	index = code & 0xff;
+	q = ext_int_hash[index];
+	if (p != q) {
+		while (q != NULL) {
+			if (q->next == p)
+				break;
+			q = q->next;
+		}
+		if (q == NULL)
+			return -ENOENT;
+		q->next = p->next;
+	} else
+		ext_int_hash[index] = p->next;
+	return 0;
+}
+
+void do_extint(struct pt_regs *regs, unsigned short code)
+{
+        ext_int_info_t *p;
+        int index;
+
+	irq_enter();
+	asm volatile ("mc 0,0");
+	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
+		/**
+		 * Make sure that the i/o interrupt did not "overtake"
+		 * the last HZ timer interrupt.
+		 */
+		account_ticks(regs);
+	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+        index = code & 0xff;
+	for (p = ext_int_hash[index]; p; p = p->next) {
+		if (likely(p->code == code)) {
+			if (likely(p->handler))
+				p->handler(regs, code);
+		}
+	}
+	irq_exit();
+}
+
+EXPORT_SYMBOL(register_external_interrupt);
+EXPORT_SYMBOL(unregister_external_interrupt);
+
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
new file mode 100644
index 000000000000..11fd6d556d8f
--- /dev/null
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -0,0 +1,65 @@
+/*
+ *  arch/s390/kernel/s390_ksyms.c
+ *
+ *  S390 version
+ */
+#include <linux/config.h>
+#include <linux/highuid.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl32.h>
+#include <asm/checksum.h>
+#include <asm/cpcmd.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h>
+#include <asm/setup.h>
+#ifdef CONFIG_IP_MULTICAST
+#include <net/arp.h>
+#endif
+
+/*
+ * memory management
+ */
+EXPORT_SYMBOL(_oi_bitmap);
+EXPORT_SYMBOL(_ni_bitmap);
+EXPORT_SYMBOL(_zb_findmap);
+EXPORT_SYMBOL(_sb_findmap);
+EXPORT_SYMBOL(__copy_from_user_asm);
+EXPORT_SYMBOL(__copy_to_user_asm);
+EXPORT_SYMBOL(__copy_in_user_asm);
+EXPORT_SYMBOL(__clear_user_asm);
+EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strnlen_user_asm);
+EXPORT_SYMBOL(diag10);
+EXPORT_SYMBOL(default_storage_key);
+
+/*
+ * semaphore ops
+ */
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+
+/*
+ * binfmt_elf loader 
+ */
+extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(overflowuid);
+EXPORT_SYMBOL(overflowgid);
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * misc.
+ */
+EXPORT_SYMBOL(machine_flags);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(csum_fold);
+EXPORT_SYMBOL(console_mode);
+EXPORT_SYMBOL(console_devno);
+EXPORT_SYMBOL(console_irq);
+EXPORT_SYMBOL(sys_wait4);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
new file mode 100644
index 000000000000..8dfb690c159f
--- /dev/null
+++ b/arch/s390/kernel/semaphore.c
@@ -0,0 +1,108 @@
+/*
+ *  linux/arch/s390/kernel/semaphore.c
+ *
+ *  S390 version
+ *    Copyright (C) 1998-2000 IBM Corporation
+ *    Author(s): Martin Schwidefsky
+ *
+ *  Derived from "linux/arch/i386/kernel/semaphore.c
+ *    Copyright (C) 1999, Linus Torvalds
+ *
+ */
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/semaphore.h>
+
+/*
+ * Atomically update sem->count. Equivalent to:
+ *   old_val = sem->count.counter;
+ *   new_val = ((old_val >= 0) ? old_val : 0) + incr;
+ *   sem->count.counter = new_val;
+ *   return old_val;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+	int old_val, new_val;
+
+        __asm__ __volatile__("   l     %0,0(%3)\n"
+                             "0: ltr   %1,%0\n"
+			     "   jhe   1f\n"
+			     "   lhi   %1,0\n"
+			     "1: ar    %1,%4\n"
+                             "   cs    %0,%1,0(%3)\n"
+                             "   jl    0b\n"
+                             : "=&d" (old_val), "=&d" (new_val),
+			       "=m" (sem->count)
+			     : "a" (&sem->count), "d" (incr), "m" (sem->count)
+			     : "cc" );
+	return old_val;
+}
+
+/*
+ * The inline function up() incremented count but the result
+ * was <= 0. This indicates that some process is waiting on
+ * the semaphore. The semaphore is free and we'll wake the
+ * first sleeping process, so we set count to 1 unless some
+ * other cpu has called up in the meantime in which case
+ * we just increment count by 1.
+ */
+void __up(struct semaphore *sem)
+{
+	__sem_update_count(sem, 1);
+	wake_up(&sem->wait);
+}
+
+/*
+ * The inline function down() decremented count and the result
+ * was < 0. The wait loop will atomically test and update the
+ * semaphore counter following the rules:
+ *   count > 0: decrement count, wake up queue and exit.
+ *   count <= 0: set count to -1, go to sleep.
+ */
+void __sched __down(struct semaphore * sem)
+{
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+	while (__sem_update_count(sem, -1) <= 0) {
+		schedule();
+		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+	wake_up(&sem->wait);
+}
+
+/*
+ * Same as __down() with an additional test for signals.
+ * If a signal is pending the count is updated as follows:
+ *   count > 0: wake up queue and exit.
+ *   count <= 0: set count to 0, wake up queue and exit.
+ */
+int __sched __down_interruptible(struct semaphore * sem)
+{
+	int retval = 0;
+	struct task_struct *tsk = current;
+	DECLARE_WAITQUEUE(wait, tsk);
+
+	__set_task_state(tsk, TASK_INTERRUPTIBLE);
+	add_wait_queue_exclusive(&sem->wait, &wait);
+	while (__sem_update_count(sem, -1) <= 0) {
+		if (signal_pending(current)) {
+			__sem_update_count(sem, 0);
+			retval = -EINTR;
+			break;
+		}
+		schedule();
+		set_task_state(tsk, TASK_INTERRUPTIBLE);
+	}
+	remove_wait_queue(&sem->wait, &wait);
+	__set_task_state(tsk, TASK_RUNNING);
+	wake_up(&sem->wait);
+	return retval;
+}
+
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
new file mode 100644
index 000000000000..c879c40aa7a5
--- /dev/null
+++ b/arch/s390/kernel/setup.c
@@ -0,0 +1,632 @@
+/*
+ *  arch/s390/kernel/setup.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ *  Derived from "arch/i386/kernel/setup.c"
+ *    Copyright (C) 1995, Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+#include <asm/mmu_context.h>
+#include <asm/cpcmd.h>
+#include <asm/lowcore.h>
+#include <asm/irq.h>
+
+/*
+ * Machine setup..
+ */
+unsigned int console_mode = 0;
+unsigned int console_devno = -1;
+unsigned int console_irq = -1;
+unsigned long memory_size = 0;
+unsigned long machine_flags = 0;
+unsigned int default_storage_key = 0;
+struct {
+	unsigned long addr, size, type;
+} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
+#define CHUNK_READ_WRITE 0
+#define CHUNK_READ_ONLY 1
+volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
+
+/*
+ * Setup options
+ */
+extern int _text,_etext, _edata, _end;
+
+/*
+ * This is set up by the setup-routine at boot-time
+ * for S390 need to find out, what we have to setup
+ * using address 0x10400 ...
+ */
+
+#include <asm/setup.h>
+
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+
+static struct resource code_resource = {
+	.name  = "Kernel code",
+	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+static struct resource data_resource = {
+	.name = "Kernel data",
+	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
+/*
+ * cpu_init() initializes state that is per-CPU.
+ */
+void __devinit cpu_init (void)
+{
+        int addr = hard_smp_processor_id();
+
+        /*
+         * Store processor id in lowcore (used e.g. in timer_interrupt)
+         */
+        asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
+        S390_lowcore.cpu_data.cpu_addr = addr;
+
+        /*
+         * Force FPU initialization:
+         */
+        clear_thread_flag(TIF_USEDFPU);
+        clear_used_math();
+
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+        if (current->mm)
+                BUG();
+        enter_lazy_tlb(&init_mm, current);
+}
+
+/*
+ * VM halt and poweroff setup routines
+ */
+char vmhalt_cmd[128] = "";
+char vmpoff_cmd[128] = "";
+
+static inline void strncpy_skip_quote(char *dst, char *src, int n)
+{
+        int sx, dx;
+
+        dx = 0;
+        for (sx = 0; src[sx] != 0; sx++) {
+                if (src[sx] == '"') continue;
+                dst[dx++] = src[sx];
+                if (dx >= n) break;
+        }
+}
+
+static int __init vmhalt_setup(char *str)
+{
+        strncpy_skip_quote(vmhalt_cmd, str, 127);
+        vmhalt_cmd[127] = 0;
+        return 1;
+}
+
+__setup("vmhalt=", vmhalt_setup);
+
+static int __init vmpoff_setup(char *str)
+{
+        strncpy_skip_quote(vmpoff_cmd, str, 127);
+        vmpoff_cmd[127] = 0;
+        return 1;
+}
+
+__setup("vmpoff=", vmpoff_setup);
+
+/*
+ * condev= and conmode= setup parameter.
+ */
+
+static int __init condev_setup(char *str)
+{
+	int vdev;
+
+	vdev = simple_strtoul(str, &str, 0);
+	if (vdev >= 0 && vdev < 65536) {
+		console_devno = vdev;
+		console_irq = -1;
+	}
+	return 1;
+}
+
+__setup("condev=", condev_setup);
+
+static int __init conmode_setup(char *str)
+{
+#if defined(CONFIG_SCLP_CONSOLE)
+	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
+                SET_CONSOLE_SCLP;
+#endif
+#if defined(CONFIG_TN3215_CONSOLE)
+	if (strncmp(str, "3215", 5) == 0)
+		SET_CONSOLE_3215;
+#endif
+#if defined(CONFIG_TN3270_CONSOLE)
+	if (strncmp(str, "3270", 5) == 0)
+		SET_CONSOLE_3270;
+#endif
+        return 1;
+}
+
+__setup("conmode=", conmode_setup);
+
+static void __init conmode_default(void)
+{
+	char query_buffer[1024];
+	char *ptr;
+
+        if (MACHINE_IS_VM) {
+		__cpcmd("QUERY CONSOLE", query_buffer, 1024);
+		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
+		ptr = strstr(query_buffer, "SUBCHANNEL =");
+		console_irq = simple_strtoul(ptr + 13, NULL, 16);
+		__cpcmd("QUERY TERM", query_buffer, 1024);
+		ptr = strstr(query_buffer, "CONMODE");
+		/*
+		 * Set the conmode to 3215 so that the device recognition 
+		 * will set the cu_type of the console to 3215. If the
+		 * conmode is 3270 and we don't set it back then both
+		 * 3215 and the 3270 driver will try to access the console
+		 * device (3215 as console and 3270 as normal tty).
+		 */
+		__cpcmd("TERM CONMODE 3215", NULL, 0);
+		if (ptr == NULL) {
+#if defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+			return;
+		}
+		if (strncmp(ptr + 8, "3270", 4) == 0) {
+#if defined(CONFIG_TN3270_CONSOLE)
+			SET_CONSOLE_3270;
+#elif defined(CONFIG_TN3215_CONSOLE)
+			SET_CONSOLE_3215;
+#elif defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
+#if defined(CONFIG_TN3215_CONSOLE)
+			SET_CONSOLE_3215;
+#elif defined(CONFIG_TN3270_CONSOLE)
+			SET_CONSOLE_3270;
+#elif defined(CONFIG_SCLP_CONSOLE)
+			SET_CONSOLE_SCLP;
+#endif
+		}
+        } else if (MACHINE_IS_P390) {
+#if defined(CONFIG_TN3215_CONSOLE)
+		SET_CONSOLE_3215;
+#elif defined(CONFIG_TN3270_CONSOLE)
+		SET_CONSOLE_3270;
+#endif
+	} else {
+#if defined(CONFIG_SCLP_CONSOLE)
+		SET_CONSOLE_SCLP;
+#endif
+	}
+}
+
+#ifdef CONFIG_SMP
+extern void machine_restart_smp(char *);
+extern void machine_halt_smp(void);
+extern void machine_power_off_smp(void);
+
+void (*_machine_restart)(char *command) = machine_restart_smp;
+void (*_machine_halt)(void) = machine_halt_smp;
+void (*_machine_power_off)(void) = machine_power_off_smp;
+#else
+/*
+ * Reboot, halt and power_off routines for non SMP.
+ */
+extern void reipl(unsigned long devno);
+static void do_machine_restart_nonsmp(char * __unused)
+{
+	if (MACHINE_IS_VM)
+		cpcmd ("IPL", NULL, 0);
+	else
+		reipl (0x10000 | S390_lowcore.ipl_device);
+}
+
+static void do_machine_halt_nonsmp(void)
+{
+        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+                cpcmd(vmhalt_cmd, NULL, 0);
+        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+}
+
+static void do_machine_power_off_nonsmp(void)
+{
+        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+                cpcmd(vmpoff_cmd, NULL, 0);
+        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+}
+
+void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
+#endif
+
+ /*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+ * _machine_halt or _machine_power_off. 
+ */
+
+void machine_restart(char *command)
+{
+	console_unblank();
+	_machine_restart(command);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+	console_unblank();
+	_machine_halt();
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+	console_unblank();
+	_machine_power_off();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+/*
+ * Setup function called from init/main.c just after the banner
+ * was printed.
+ */
+extern char _pstart, _pend, _stext;
+
+void __init setup_arch(char **cmdline_p)
+{
+        unsigned long bootmap_size;
+        unsigned long memory_start, memory_end;
+        char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
+	unsigned long start_pfn, end_pfn;
+        static unsigned int smptrap=0;
+        unsigned long delay = 0;
+	struct _lowcore *lc;
+	int i;
+
+        if (smptrap)
+                return;
+        smptrap=1;
+
+        /*
+         * print what head.S has found out about the machine 
+         */
+#ifndef CONFIG_ARCH_S390X
+	printk((MACHINE_IS_VM) ?
+	       "We are running under VM (31 bit mode)\n" :
+	       "We are running native (31 bit mode)\n");
+	printk((MACHINE_HAS_IEEE) ?
+	       "This machine has an IEEE fpu\n" :
+	       "This machine has no IEEE fpu\n");
+#else /* CONFIG_ARCH_S390X */
+	printk((MACHINE_IS_VM) ?
+	       "We are running under VM (64 bit mode)\n" :
+	       "We are running native (64 bit mode)\n");
+#endif /* CONFIG_ARCH_S390X */
+
+        ROOT_DEV = Root_RAM0;
+        memory_start = (unsigned long) &_end;    /* fixit if use $CODELO etc*/
+#ifndef CONFIG_ARCH_S390X
+	memory_end = memory_size & ~0x400000UL;  /* align memory end to 4MB */
+        /*
+         * We need some free virtual space to be able to do vmalloc.
+         * On a machine with 2GB memory we make sure that we have at
+         * least 128 MB free space for vmalloc.
+         */
+        if (memory_end > 1920*1024*1024)
+                memory_end = 1920*1024*1024;
+#else /* CONFIG_ARCH_S390X */
+	memory_end = memory_size & ~0x200000UL;  /* detected in head.s */
+#endif /* CONFIG_ARCH_S390X */
+        init_mm.start_code = PAGE_OFFSET;
+        init_mm.end_code = (unsigned long) &_etext;
+        init_mm.end_data = (unsigned long) &_edata;
+        init_mm.brk = (unsigned long) &_end;
+
+	code_resource.start = (unsigned long) &_text;
+	code_resource.end = (unsigned long) &_etext - 1;
+	data_resource.start = (unsigned long) &_etext;
+	data_resource.end = (unsigned long) &_edata - 1;
+
+        /* Save unparsed command line copy for /proc/cmdline */
+        memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+        saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+        for (;;) {
+                /*
+                 * "mem=XXX[kKmM]" sets memsize 
+                 */
+                if (c == ' ' && strncmp(from, "mem=", 4) == 0) {
+                        memory_end = simple_strtoul(from+4, &from, 0);
+                        if ( *from == 'K' || *from == 'k' ) {
+                                memory_end = memory_end << 10;
+                                from++;
+                        } else if ( *from == 'M' || *from == 'm' ) {
+                                memory_end = memory_end << 20;
+                                from++;
+                        }
+                }
+                /*
+                 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
+                 */
+                if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
+                        delay = simple_strtoul(from+9, &from, 0);
+			if (*from == 's' || *from == 'S') {
+				delay = delay*1000000;
+				from++;
+			} else if (*from == 'm' || *from == 'M') {
+				delay = delay*60*1000000;
+				from++;
+			}
+			/* now wait for the requested amount of time */
+			udelay(delay);
+                }
+                cn = *(from++);
+                if (!cn)
+                        break;
+                if (cn == '\n')
+                        cn = ' ';  /* replace newlines with space */
+		if (cn == 0x0d)
+			cn = ' ';  /* replace 0x0d with space */
+                if (cn == ' ' && c == ' ')
+                        continue;  /* remove additional spaces */
+                c = cn;
+                if (to - command_line >= COMMAND_LINE_SIZE)
+                        break;
+                *(to++) = c;
+        }
+        if (c == ' ' && to > command_line) to--;
+        *to = '\0';
+        *cmdline_p = command_line;
+
+	/*
+	 * partially used pages are not usable - thus
+	 * we are rounding upwards:
+	 */
+	start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
+
+	/*
+	 * Initialize the boot-time allocator (with low memory only):
+	 */
+	bootmap_size = init_bootmem(start_pfn, end_pfn);
+
+	/*
+	 * Register RAM areas with the bootmem allocator.
+	 */
+	for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
+		unsigned long start_chunk, end_chunk;
+
+		if (memory_chunk[i].type != CHUNK_READ_WRITE)
+			continue;
+		start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
+		start_chunk >>= PAGE_SHIFT;
+		end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
+		end_chunk >>= PAGE_SHIFT;
+		if (start_chunk < start_pfn)
+			start_chunk = start_pfn;
+		if (end_chunk > end_pfn)
+			end_chunk = end_pfn;
+		if (start_chunk < end_chunk)
+			free_bootmem(start_chunk << PAGE_SHIFT,
+				     (end_chunk - start_chunk) << PAGE_SHIFT);
+	}
+
+        /*
+         * Reserve the bootmem bitmap itself as well. We do this in two
+         * steps (first step was init_bootmem()) because this catches
+         * the (very unlikely) case of us accidentally initializing the
+         * bootmem allocator with an invalid RAM area.
+         */
+        reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+        if (INITRD_START) {
+		if (INITRD_START + INITRD_SIZE <= memory_end) {
+			reserve_bootmem(INITRD_START, INITRD_SIZE);
+			initrd_start = INITRD_START;
+			initrd_end = initrd_start + INITRD_SIZE;
+		} else {
+                        printk("initrd extends beyond end of memory "
+                               "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+                               initrd_start + INITRD_SIZE, memory_end);
+                        initrd_start = initrd_end = 0;
+		}
+        }
+#endif
+
+	for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) {
+		struct resource *res;
+
+		res = alloc_bootmem_low(sizeof(struct resource));
+		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+
+		switch (memory_chunk[i].type) {
+		case CHUNK_READ_WRITE:
+			res->name = "System RAM";
+			break;
+		case CHUNK_READ_ONLY:
+			res->name = "System ROM";
+			res->flags |= IORESOURCE_READONLY;
+			break;
+		default:
+			res->name = "reserved";
+		}
+		res->start = memory_chunk[i].addr;
+		res->end = memory_chunk[i].addr +  memory_chunk[i].size - 1;
+		request_resource(&iomem_resource, res);
+		request_resource(res, &code_resource);
+		request_resource(res, &data_resource);
+	}
+
+        /*
+         * Setup lowcore for boot cpu
+         */
+#ifndef CONFIG_ARCH_S390X
+	lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
+	memset(lc, 0, PAGE_SIZE);
+#else /* CONFIG_ARCH_S390X */
+	lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0);
+	memset(lc, 0, 2*PAGE_SIZE);
+#endif /* CONFIG_ARCH_S390X */
+	lc->restart_psw.mask = PSW_BASE_BITS;
+	lc->restart_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+	lc->external_new_psw.mask = PSW_KERNEL_BITS;
+	lc->external_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
+	lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
+	lc->program_new_psw.mask = PSW_KERNEL_BITS;
+	lc->program_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
+	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
+	lc->mcck_new_psw.addr =
+		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
+	lc->io_new_psw.mask = PSW_KERNEL_BITS;
+	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
+	lc->ipl_device = S390_lowcore.ipl_device;
+	lc->jiffy_timer = -1LL;
+	lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+	lc->async_stack = (unsigned long)
+		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+#ifdef CONFIG_CHECK_STACK
+	lc->panic_stack = (unsigned long)
+		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+#endif
+	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
+	lc->thread_info = (unsigned long) &init_thread_union;
+#ifdef CONFIG_ARCH_S390X
+	if (MACHINE_HAS_DIAG44)
+		lc->diag44_opcode = 0x83000044;
+	else
+		lc->diag44_opcode = 0x07000700;
+#endif /* CONFIG_ARCH_S390X */
+	set_prefix((u32)(unsigned long) lc);
+        cpu_init();
+        __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
+
+	/*
+	 * Create kernel page tables and switch to virtual addressing.
+	 */
+        paging_init();
+
+        /* Setup default console */
+	conmode_default();
+}
+
+void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
+{
+   printk("cpu %d "
+#ifdef CONFIG_SMP
+           "phys_idx=%d "
+#endif
+           "vers=%02X ident=%06X machine=%04X unused=%04X\n",
+           cpuinfo->cpu_nr,
+#ifdef CONFIG_SMP
+           cpuinfo->cpu_addr,
+#endif
+           cpuinfo->cpu_id.version,
+           cpuinfo->cpu_id.ident,
+           cpuinfo->cpu_id.machine,
+           cpuinfo->cpu_id.unused);
+}
+
+/*
+ * show_cpuinfo - Get information on one CPU for use by procfs.
+ */
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+        struct cpuinfo_S390 *cpuinfo;
+	unsigned long n = (unsigned long) v - 1;
+
+	if (!n) {
+		seq_printf(m, "vendor_id       : IBM/S390\n"
+			       "# processors    : %i\n"
+			       "bogomips per cpu: %lu.%02lu\n",
+			       num_online_cpus(), loops_per_jiffy/(500000/HZ),
+			       (loops_per_jiffy/(5000/HZ))%100);
+	}
+	if (cpu_online(n)) {
+#ifdef CONFIG_SMP
+		if (smp_processor_id() == n)
+			cpuinfo = &S390_lowcore.cpu_data;
+		else
+			cpuinfo = &lowcore_ptr[n]->cpu_data;
+#else
+		cpuinfo = &S390_lowcore.cpu_data;
+#endif
+		seq_printf(m, "processor %li: "
+			       "version = %02X,  "
+			       "identification = %06X,  "
+			       "machine = %04X\n",
+			       n, cpuinfo->cpu_id.version,
+			       cpuinfo->cpu_id.ident,
+			       cpuinfo->cpu_id.machine);
+	}
+        return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_cpuinfo,
+};
+
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
new file mode 100644
index 000000000000..610c1d03e975
--- /dev/null
+++ b/arch/s390/kernel/signal.c
@@ -0,0 +1,527 @@
+/*
+ *  arch/s390/kernel/signal.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ *    Based on Intel version
+ * 
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/lowcore.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
+	struct sigcontext sc;
+	_sigregs sregs;
+	int signo;
+	__u8 retcode[S390_SYSCALL_SIZE];
+} sigframe;
+
+typedef struct 
+{
+	__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
+	__u8 retcode[S390_SYSCALL_SIZE];
+	struct siginfo info;
+	struct ucontext uc;
+} rt_sigframe;
+
+int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(struct pt_regs * regs, int history0, int history1,
+	       old_sigset_t mask)
+{
+	sigset_t saveset;
+
+	mask &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage long
+sys_rt_sigsuspend(struct pt_regs *regs, sigset_t __user *unewset,
+						size_t sigsetsize)
+{
+	sigset_t saveset, newset;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&newset, unewset, sizeof(newset)))
+		return -EFAULT;
+	sigdelsetmask(&newset, ~_BLOCKABLE);
+
+	spin_lock_irq(&current->sighand->siglock);
+	saveset = current->blocked;
+	current->blocked = newset;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+	regs->gprs[2] = -EINTR;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (do_signal(regs, &saveset))
+			return -EINTR;
+	}
+}
+
+asmlinkage long
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+	      struct old_sigaction __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+asmlinkage long
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+					struct pt_regs *regs)
+{
+	return do_sigaltstack(uss, uoss, regs->gprs[15]);
+}
+
+
+
+/* Returns non-zero on fault. */
+static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+{
+	unsigned long old_mask = regs->psw.mask;
+	int err;
+  
+	save_access_regs(current->thread.acrs);
+
+	/* Copy a 'clean' PSW mask to the user to avoid leaking
+	   information about whether PER is currently on.  */
+	regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
+	err = __copy_to_user(&sregs->regs.psw, &regs->psw,
+			     sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+	regs->psw.mask = old_mask;
+	if (err != 0)
+		return err;
+	err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
+			     sizeof(sregs->regs.acrs));
+	if (err != 0)
+		return err;
+	/* 
+	 * We have to store the fp registers to current->thread.fp_regs
+	 * to merge them with the emulated registers.
+	 */
+	save_fp_regs(&current->thread.fp_regs);
+	return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
+			      sizeof(s390_fp_regs));
+}
+
+/* Returns positive number on error */
+static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+{
+	unsigned long old_mask = regs->psw.mask;
+	int err;
+
+	/* Alwys make any pending restarted system call return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	err = __copy_from_user(&regs->psw, &sregs->regs.psw,
+			       sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+	regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
+	regs->psw.addr |= PSW_ADDR_AMODE;
+	if (err)
+		return err;
+	err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs,
+			       sizeof(sregs->regs.acrs));
+	if (err)
+		return err;
+	restore_access_regs(current->thread.acrs);
+
+	err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
+			       sizeof(s390_fp_regs));
+	current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+	if (err)
+		return err;
+
+	restore_fp_regs(&current->thread.fp_regs);
+	regs->trap = -1;	/* disable syscall checks */
+	return 0;
+}
+
+asmlinkage long sys_sigreturn(struct pt_regs *regs)
+{
+	sigframe __user *frame = (sigframe __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs(regs, &frame->sregs))
+		goto badframe;
+
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+	rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
+		goto badframe;
+
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]);
+	return regs->gprs[2];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+	unsigned long sp;
+
+	/* Default to using normal stack */
+	sp = regs->gprs[15];
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (! sas_ss_flags(sp))
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	}
+
+	/* This is the legacy signal stack switching. */
+	else if (!user_mode(regs) &&
+		 !(ka->sa.sa_flags & SA_RESTORER) &&
+		 ka->sa.sa_restorer) {
+		sp = (unsigned long) ka->sa.sa_restorer;
+	}
+
+	return (void __user *)((sp - frame_size) & -8ul);
+}
+
+static inline int map_signal(int sig)
+{
+	if (current_thread_info()->exec_domain
+	    && current_thread_info()->exec_domain->signal_invmap
+	    && sig < 32)
+		return current_thread_info()->exec_domain->signal_invmap[sig];
+	else
+		return sig;
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+			sigset_t *set, struct pt_regs * regs)
+{
+	sigframe __user *frame;
+
+	frame = get_sigframe(ka, regs, sizeof(sigframe));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
+		goto give_sigsegv;
+
+	if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
+		goto give_sigsegv;
+
+	if (save_sigregs(regs, &frame->sregs))
+		goto give_sigsegv;
+	if (__put_user(&frame->sregs, &frame->sc.sregs))
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+                regs->gprs[14] = (unsigned long)
+			ka->sa.sa_restorer | PSW_ADDR_AMODE;
+	} else {
+                regs->gprs[14] = (unsigned long)
+			frame->retcode | PSW_ADDR_AMODE;
+		if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
+	                       (u16 __user *)(frame->retcode)))
+			goto give_sigsegv;
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (addr_t __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (unsigned long) frame;
+	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (unsigned long) &frame->sc;
+
+	/* We forgot to include these in the sigcontext.
+	   To avoid breaking binary compatibility, they are passed as args. */
+	regs->gprs[4] = current->thread.trap_no;
+	regs->gprs[5] = current->thread.prot_addr;
+
+	/* Place signal number on stack to allow backtrace from handler.  */
+	if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
+		goto give_sigsegv;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+			   sigset_t *set, struct pt_regs * regs)
+{
+	int err = 0;
+	rt_sigframe __user *frame;
+
+	frame = get_sigframe(ka, regs, sizeof(rt_sigframe));
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
+		goto give_sigsegv;
+
+	if (copy_siginfo_to_user(&frame->info, info))
+		goto give_sigsegv;
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->gprs[15]),
+			  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= save_sigregs(regs, &frame->uc.uc_mcontext);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+                regs->gprs[14] = (unsigned long)
+			ka->sa.sa_restorer | PSW_ADDR_AMODE;
+	} else {
+                regs->gprs[14] = (unsigned long)
+			frame->retcode | PSW_ADDR_AMODE;
+		err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
+	                          (u16 __user *)(frame->retcode));
+	}
+
+	/* Set up backchain. */
+	if (__put_user(regs->gprs[15], (addr_t __user *) frame))
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->gprs[15] = (unsigned long) frame;
+	regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+	regs->gprs[2] = map_signal(sig);
+	regs->gprs[3] = (unsigned long) &frame->info;
+	regs->gprs[4] = (unsigned long) &frame->uc;
+	return;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */	
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+	      siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+	/* Set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		setup_rt_frame(sig, ka, info, oldset, regs);
+	else
+		setup_frame(sig, ka, oldset, regs);
+
+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
+		spin_lock_irq(&current->sighand->siglock);
+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+		sigaddset(&current->blocked,sig);
+		recalc_sigpending();
+		spin_unlock_irq(&current->sighand->siglock);
+	}
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+	unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
+	siginfo_t info;
+	int signr;
+	struct k_sigaction ka;
+
+	/*
+	 * We want the common case to go fast, which
+	 * is why we may in certain cases get here from
+	 * kernel mode. Just return without doing anything
+	 * if so.
+	 */
+	if (!user_mode(regs))
+		return 1;
+
+	if (!oldset)
+		oldset = &current->blocked;
+
+	/* Are we from a system call? */
+	if (regs->trap == __LC_SVC_OLD_PSW) {
+		continue_addr = regs->psw.addr;
+		restart_addr = continue_addr - regs->ilc;
+		retval = regs->gprs[2];
+
+		/* Prepare for system call restart.  We do this here so that a
+		   debugger will see the already changed PSW. */
+		if (retval == -ERESTARTNOHAND ||
+		    retval == -ERESTARTSYS ||
+		    retval == -ERESTARTNOINTR) {
+			regs->gprs[2] = regs->orig_gpr2;
+			regs->psw.addr = restart_addr;
+		} else if (retval == -ERESTART_RESTARTBLOCK) {
+			regs->gprs[2] = -EINTR;
+		}
+	}
+
+	/* Get signal to deliver.  When running under ptrace, at this point
+	   the debugger may change all our registers ... */
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+	/* Depending on the signal settings we may need to revert the
+	   decision to restart the system call. */
+	if (signr > 0 && regs->psw.addr == restart_addr) {
+		if (retval == -ERESTARTNOHAND
+		    || (retval == -ERESTARTSYS
+			 && !(current->sighand->action[signr-1].sa.sa_flags
+			      & SA_RESTART))) {
+			regs->gprs[2] = -EINTR;
+			regs->psw.addr = continue_addr;
+		}
+	}
+
+	if (signr > 0) {
+		/* Whee!  Actually deliver the signal.  */
+#ifdef CONFIG_S390_SUPPORT
+		if (test_thread_flag(TIF_31BIT)) {
+			extern void handle_signal32(unsigned long sig,
+						    struct k_sigaction *ka,
+						    siginfo_t *info,
+						    sigset_t *oldset,
+						    struct pt_regs *regs);
+			handle_signal32(signr, &ka, &info, oldset, regs);
+			return 1;
+	        }
+#endif
+		handle_signal(signr, &ka, &info, oldset, regs);
+		return 1;
+	}
+
+	/* Restart a different system call. */
+	if (retval == -ERESTART_RESTARTBLOCK
+	    && regs->psw.addr == continue_addr) {
+		regs->gprs[2] = __NR_restart_syscall;
+		set_thread_flag(TIF_RESTART_SVC);
+	}
+	return 0;
+}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
new file mode 100644
index 000000000000..fdfcf0488b49
--- /dev/null
+++ b/arch/s390/kernel/smp.c
@@ -0,0 +1,840 @@
+/*
+ *  arch/s390/kernel/smp.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *               Heiko Carstens (heiko.carstens@de.ibm.com)
+ *
+ *  based on other smp stuff by 
+ *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
+ *    (c) 1998 Ingo Molnar
+ *
+ * We work with logical cpu numbering everywhere we can. The only
+ * functions using the real cpu address (got from STAP) are the sigp
+ * functions. For all other functions we use the identity mapping.
+ * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
+ * used e.g. to find the idle task belonging to a logical cpu. Every array
+ * in the kernel is sorted by the logical cpu number and not by the physical
+ * one which is causing all the confusion with __cpu_logical_map and
+ * cpu_number_map in other architectures.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/smp_lock.h>
+
+#include <linux/delay.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include <asm/sigp.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/s390_ext.h>
+#include <asm/cpcmd.h>
+#include <asm/tlbflush.h>
+
+/* prototypes */
+
+extern volatile int __cpu_logical_map[];
+
+/*
+ * An array with a pointer the lowcore of every CPU.
+ */
+
+struct _lowcore *lowcore_ptr[NR_CPUS];
+
+cpumask_t cpu_online_map;
+cpumask_t cpu_possible_map;
+
+static struct task_struct *current_set[NR_CPUS];
+
+EXPORT_SYMBOL(cpu_online_map);
+
+/*
+ * Reboot, halt and power_off routines for SMP.
+ */
+extern char vmhalt_cmd[];
+extern char vmpoff_cmd[];
+
+extern void reipl(unsigned long devno);
+
+static void smp_ext_bitcall(int, ec_bit_sig);
+static void smp_ext_bitcall_others(ec_bit_sig);
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+	void (*func) (void *info);
+	void *info;
+	atomic_t started;
+	atomic_t finished;
+	int wait;
+};
+
+static struct call_data_struct * call_data;
+
+/*
+ * 'Call function' interrupt callback
+ */
+static void do_call_function(void)
+{
+	void (*func) (void *info) = call_data->func;
+	void *info = call_data->info;
+	int wait = call_data->wait;
+
+	atomic_inc(&call_data->started);
+	(*func)(info);
+	if (wait)
+		atomic_inc(&call_data->finished);
+}
+
+/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+
+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+			int wait)
+/*
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <nonatomic> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+{
+	struct call_data_struct data;
+	int cpus = num_online_cpus()-1;
+
+	if (cpus <= 0)
+		return 0;
+
+	/* Can deadlock when called with interrupts disabled */
+	WARN_ON(irqs_disabled());
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	spin_lock(&call_lock);
+	call_data = &data;
+	/* Send a message to all other CPUs and wait for them to respond */
+        smp_ext_bitcall_others(ec_call_function);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != cpus)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != cpus)
+			cpu_relax();
+	spin_unlock(&call_lock);
+
+	return 0;
+}
+
+/*
+ * Call a function on one CPU
+ * cpu : the CPU the function should be executed on
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler. You may call it from a bottom half.
+ *
+ * It is guaranteed that the called function runs on the specified CPU,
+ * preemption is disabled.
+ */
+int smp_call_function_on(void (*func) (void *info), void *info,
+			 int nonatomic, int wait, int cpu)
+{
+	struct call_data_struct data;
+	int curr_cpu;
+
+	if (!cpu_online(cpu))
+		return -EINVAL;
+
+	/* disable preemption for local function call */
+	curr_cpu = get_cpu();
+
+	if (curr_cpu == cpu) {
+		/* direct call to function */
+		func(info);
+		put_cpu();
+		return 0;
+	}
+
+	data.func = func;
+	data.info = info;
+	atomic_set(&data.started, 0);
+	data.wait = wait;
+	if (wait)
+		atomic_set(&data.finished, 0);
+
+	spin_lock_bh(&call_lock);
+	call_data = &data;
+	smp_ext_bitcall(cpu, ec_call_function);
+
+	/* Wait for response */
+	while (atomic_read(&data.started) != 1)
+		cpu_relax();
+
+	if (wait)
+		while (atomic_read(&data.finished) != 1)
+			cpu_relax();
+
+	spin_unlock_bh(&call_lock);
+	put_cpu();
+	return 0;
+}
+EXPORT_SYMBOL(smp_call_function_on);
+
+static inline void do_send_stop(void)
+{
+        int cpu, rc;
+
+        /* stop all processors */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		do {
+			rc = signal_processor(cpu, sigp_stop);
+		} while (rc == sigp_busy);
+	}
+}
+
+static inline void do_store_status(void)
+{
+        int cpu, rc;
+
+        /* store status of all processors in their lowcores (real 0) */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		do {
+			rc = signal_processor_p(
+				(__u32)(unsigned long) lowcore_ptr[cpu], cpu,
+				sigp_store_status_at_address);
+		} while(rc == sigp_busy);
+        }
+}
+
+/*
+ * this function sends a 'stop' sigp to all other CPUs in the system.
+ * it goes straight through.
+ */
+void smp_send_stop(void)
+{
+        /* write magic number to zero page (absolute 0) */
+	lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
+
+	/* stop other processors. */
+	do_send_stop();
+
+	/* store status of other processors. */
+	do_store_status();
+}
+
+/*
+ * Reboot, halt and power_off routines for SMP.
+ */
+
+static void do_machine_restart(void * __unused)
+{
+	int cpu;
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+		signal_processor(smp_processor_id(), sigp_stop);
+
+	/* Wait for all other cpus to enter stopped state */
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+			continue;
+		while(!smp_cpu_not_running(cpu))
+			cpu_relax();
+	}
+
+	/* Store status of other cpus. */
+	do_store_status();
+
+	/*
+	 * Finally call reipl. Because we waited for all other
+	 * cpus to enter this function we know that they do
+	 * not hold any s390irq-locks (the cpus have been
+	 * interrupted by an external interrupt and s390irq
+	 * locks are always held disabled).
+	 */
+	if (MACHINE_IS_VM)
+		cpcmd ("IPL", NULL, 0);
+	else
+		reipl (0x10000 | S390_lowcore.ipl_device);
+}
+
+void machine_restart_smp(char * __unused) 
+{
+        on_each_cpu(do_machine_restart, NULL, 0, 0);
+}
+
+static void do_wait_for_stop(void)
+{
+	unsigned long cr[16];
+
+	__ctl_store(cr, 0, 15);
+	cr[0] &= ~0xffff;
+	cr[6] = 0;
+	__ctl_load(cr, 0, 15);
+	for (;;)
+		enabled_wait();
+}
+
+static void do_machine_halt(void * __unused)
+{
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+		smp_send_stop();
+		if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+			cpcmd(vmhalt_cmd, NULL, 0);
+		signal_processor(smp_processor_id(),
+				 sigp_stop_and_store_status);
+	}
+	do_wait_for_stop();
+}
+
+void machine_halt_smp(void)
+{
+        on_each_cpu(do_machine_halt, NULL, 0, 0);
+}
+
+static void do_machine_power_off(void * __unused)
+{
+	static atomic_t cpuid = ATOMIC_INIT(-1);
+
+	if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+		smp_send_stop();
+		if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+			cpcmd(vmpoff_cmd, NULL, 0);
+		signal_processor(smp_processor_id(),
+				 sigp_stop_and_store_status);
+	}
+	do_wait_for_stop();
+}
+
+void machine_power_off_smp(void)
+{
+        on_each_cpu(do_machine_power_off, NULL, 0, 0);
+}
+
+/*
+ * This is the main routine where commands issued by other
+ * cpus are handled.
+ */
+
+void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
+{
+        unsigned long bits;
+
+        /*
+         * handle bit signal external calls
+         *
+         * For the ec_schedule signal we have to do nothing. All the work
+         * is done automatically when we return from the interrupt.
+         */
+	bits = xchg(&S390_lowcore.ext_call_fast, 0);
+
+	if (test_bit(ec_call_function, &bits)) 
+		do_call_function();
+}
+
+/*
+ * Send an external call sigp to another cpu and return without waiting
+ * for its completion.
+ */
+static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
+{
+        /*
+         * Set signaling bit in lowcore of target cpu and kick it
+         */
+	set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+	while(signal_processor(cpu, sigp_external_call) == sigp_busy)
+		udelay(10);
+}
+
+/*
+ * Send an external call sigp to every other cpu in the system and
+ * return without waiting for its completion.
+ */
+static void smp_ext_bitcall_others(ec_bit_sig sig)
+{
+        int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu == smp_processor_id())
+                        continue;
+                /*
+                 * Set signaling bit in lowcore of target cpu and kick it
+                 */
+		set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+		while (signal_processor(cpu, sigp_external_call) == sigp_busy)
+			udelay(10);
+        }
+}
+
+#ifndef CONFIG_ARCH_S390X
+/*
+ * this function sends a 'purge tlb' signal to another CPU.
+ */
+void smp_ptlb_callback(void *info)
+{
+	local_flush_tlb();
+}
+
+void smp_ptlb_all(void)
+{
+        on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
+}
+EXPORT_SYMBOL(smp_ptlb_all);
+#endif /* ! CONFIG_ARCH_S390X */
+
+/*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void smp_send_reschedule(int cpu)
+{
+        smp_ext_bitcall(cpu, ec_schedule);
+}
+
+/*
+ * parameter area for the set/clear control bit callbacks
+ */
+typedef struct
+{
+	__u16 start_ctl;
+	__u16 end_ctl;
+	unsigned long orvals[16];
+	unsigned long andvals[16];
+} ec_creg_mask_parms;
+
+/*
+ * callback for setting/clearing control bits
+ */
+void smp_ctl_bit_callback(void *info) {
+	ec_creg_mask_parms *pp;
+	unsigned long cregs[16];
+	int i;
+	
+	pp = (ec_creg_mask_parms *) info;
+	__ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
+	for (i = pp->start_ctl; i <= pp->end_ctl; i++)
+		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
+	__ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
+}
+
+/*
+ * Set a bit in a control register of all cpus
+ */
+void smp_ctl_set_bit(int cr, int bit) {
+        ec_creg_mask_parms parms;
+
+	parms.start_ctl = cr;
+	parms.end_ctl = cr;
+	parms.orvals[cr] = 1 << bit;
+	parms.andvals[cr] = -1L;
+	preempt_disable();
+	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
+        __ctl_set_bit(cr, bit);
+	preempt_enable();
+}
+
+/*
+ * Clear a bit in a control register of all cpus
+ */
+void smp_ctl_clear_bit(int cr, int bit) {
+        ec_creg_mask_parms parms;
+
+	parms.start_ctl = cr;
+	parms.end_ctl = cr;
+	parms.orvals[cr] = 0;
+	parms.andvals[cr] = ~(1L << bit);
+	preempt_disable();
+	smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
+        __ctl_clear_bit(cr, bit);
+	preempt_enable();
+}
+
+/*
+ * Lets check how many CPUs we have.
+ */
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
+{
+	int cpu, num_cpus;
+	__u16 boot_cpu_addr;
+
+	/*
+	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+	 */
+
+	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+	current_thread_info()->cpu = 0;
+	num_cpus = 1;
+	for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
+		if ((__u16) cpu == boot_cpu_addr)
+			continue;
+		__cpu_logical_map[num_cpus] = (__u16) cpu;
+		if (signal_processor(num_cpus, sigp_sense) ==
+		    sigp_not_operational)
+			continue;
+		cpu_set(num_cpus, cpu_present_map);
+		num_cpus++;
+	}
+
+	for (cpu = 1; cpu < max_cpus; cpu++)
+		cpu_set(cpu, cpu_possible_map);
+
+	printk("Detected %d CPU's\n",(int) num_cpus);
+	printk("Boot cpu address %2X\n", boot_cpu_addr);
+}
+
+/*
+ *      Activate a secondary processor.
+ */
+extern void init_cpu_timer(void);
+extern void init_cpu_vtimer(void);
+extern int pfault_init(void);
+extern void pfault_fini(void);
+
+int __devinit start_secondary(void *cpuvoid)
+{
+        /* Setup the cpu */
+        cpu_init();
+        /* init per CPU timer */
+        init_cpu_timer();
+#ifdef CONFIG_VIRT_TIMER
+        init_cpu_vtimer();
+#endif
+#ifdef CONFIG_PFAULT
+	/* Enable pfault pseudo page faults on this cpu. */
+	pfault_init();
+#endif
+	/* Mark this cpu as online */
+	cpu_set(smp_processor_id(), cpu_online_map);
+	/* Switch on interrupts */
+	local_irq_enable();
+        /* Print info about this processor */
+        print_cpu_info(&S390_lowcore.cpu_data);
+        /* cpu_idle will call schedule for us */
+        cpu_idle();
+        return 0;
+}
+
+static void __init smp_create_idle(unsigned int cpu)
+{
+	struct task_struct *p;
+
+	/*
+	 *  don't care about the psw and regs settings since we'll never
+	 *  reschedule the forked task.
+	 */
+	p = fork_idle(cpu);
+	if (IS_ERR(p))
+		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+	current_set[cpu] = p;
+}
+
+/* Reserving and releasing of CPUs */
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+static int smp_cpu_reserved[NR_CPUS];
+
+int
+smp_get_cpu(cpumask_t cpu_mask)
+{
+	unsigned long flags;
+	int cpu;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	/* Try to find an already reserved cpu. */
+	for_each_cpu_mask(cpu, cpu_mask) {
+		if (smp_cpu_reserved[cpu] != 0) {
+			smp_cpu_reserved[cpu]++;
+			/* Found one. */
+			goto out;
+		}
+	}
+	/* Reserve a new cpu from cpu_mask. */
+	for_each_cpu_mask(cpu, cpu_mask) {
+		if (cpu_online(cpu)) {
+			smp_cpu_reserved[cpu]++;
+			goto out;
+		}
+	}
+	cpu = -ENODEV;
+out:
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+	return cpu;
+}
+
+void
+smp_put_cpu(int cpu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	smp_cpu_reserved[cpu]--;
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+}
+
+static inline int
+cpu_stopped(int cpu)
+{
+	__u32 status;
+
+	/* Check for stopped state */
+	if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+		if (status & 0x40)
+			return 1;
+	}
+	return 0;
+}
+
+/* Upping and downing of CPUs */
+
+int
+__cpu_up(unsigned int cpu)
+{
+	struct task_struct *idle;
+        struct _lowcore    *cpu_lowcore;
+	struct stack_frame *sf;
+        sigp_ccode          ccode;
+	int                 curr_cpu;
+
+	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+		__cpu_logical_map[cpu] = (__u16) curr_cpu;
+		if (cpu_stopped(cpu))
+			break;
+	}
+
+	if (!cpu_stopped(cpu))
+		return -ENODEV;
+
+	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
+				   cpu, sigp_set_prefix);
+	if (ccode){
+		printk("sigp_set_prefix failed for cpu %d "
+		       "with condition code %d\n",
+		       (int) cpu, (int) ccode);
+		return -EIO;
+	}
+
+	idle = current_set[cpu];
+        cpu_lowcore = lowcore_ptr[cpu];
+	cpu_lowcore->kernel_stack = (unsigned long)
+		idle->thread_info + (THREAD_SIZE);
+	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+				     - sizeof(struct pt_regs)
+				     - sizeof(struct stack_frame));
+	memset(sf, 0, sizeof(struct stack_frame));
+	sf->gprs[9] = (unsigned long) sf;
+	cpu_lowcore->save_area[15] = (unsigned long) sf;
+	__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
+	__asm__ __volatile__("stam  0,15,0(%0)"
+			     : : "a" (&cpu_lowcore->access_regs_save_area)
+			     : "memory");
+	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
+        cpu_lowcore->current_task = (unsigned long) idle;
+        cpu_lowcore->cpu_data.cpu_nr = cpu;
+	eieio();
+	signal_processor(cpu,sigp_restart);
+
+	while (!cpu_online(cpu))
+		cpu_relax();
+	return 0;
+}
+
+int
+__cpu_disable(void)
+{
+	unsigned long flags;
+	ec_creg_mask_parms cr_parms;
+
+	spin_lock_irqsave(&smp_reserve_lock, flags);
+	if (smp_cpu_reserved[smp_processor_id()] != 0) {
+		spin_unlock_irqrestore(&smp_reserve_lock, flags);
+		return -EBUSY;
+	}
+
+#ifdef CONFIG_PFAULT
+	/* Disable pfault pseudo page faults on this cpu. */
+	pfault_fini();
+#endif
+
+	/* disable all external interrupts */
+
+	cr_parms.start_ctl = 0;
+	cr_parms.end_ctl = 0;
+	cr_parms.orvals[0] = 0;
+	cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+				1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+	smp_ctl_bit_callback(&cr_parms);
+
+	/* disable all I/O interrupts */
+
+	cr_parms.start_ctl = 6;
+	cr_parms.end_ctl = 6;
+	cr_parms.orvals[6] = 0;
+	cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+				1<<27 | 1<<26 | 1<<25 | 1<<24);
+	smp_ctl_bit_callback(&cr_parms);
+
+	/* disable most machine checks */
+
+	cr_parms.start_ctl = 14;
+	cr_parms.end_ctl = 14;
+	cr_parms.orvals[14] = 0;
+	cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+	smp_ctl_bit_callback(&cr_parms);
+
+	spin_unlock_irqrestore(&smp_reserve_lock, flags);
+	return 0;
+}
+
+void
+__cpu_die(unsigned int cpu)
+{
+	/* Wait until target cpu is down */
+	while (!smp_cpu_not_running(cpu))
+		cpu_relax();
+	printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+	idle_task_exit();
+	signal_processor(smp_processor_id(), sigp_stop);
+	BUG();
+	for(;;);
+}
+
+/*
+ *	Cycle through the processors and setup structures.
+ */
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned long stack;
+	unsigned int cpu;
+        int i;
+
+        /* request the 0x1202 external interrupt */
+        if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
+                panic("Couldn't request external interrupt 0x1202");
+        smp_check_cpus(max_cpus);
+        memset(lowcore_ptr,0,sizeof(lowcore_ptr));  
+        /*
+         *  Initialize prefix pages and stacks for all possible cpus
+         */
+	print_cpu_info(&S390_lowcore.cpu_data);
+
+        for(i = 0; i < NR_CPUS; i++) {
+		if (!cpu_possible(i))
+			continue;
+		lowcore_ptr[i] = (struct _lowcore *)
+			__get_free_pages(GFP_KERNEL|GFP_DMA, 
+					sizeof(void*) == 8 ? 1 : 0);
+		stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
+		if (lowcore_ptr[i] == NULL || stack == 0ULL)
+			panic("smp_boot_cpus failed to allocate memory\n");
+
+		*(lowcore_ptr[i]) = S390_lowcore;
+		lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
+#ifdef CONFIG_CHECK_STACK
+		stack = __get_free_pages(GFP_KERNEL,0);
+		if (stack == 0ULL)
+			panic("smp_boot_cpus failed to allocate memory\n");
+		lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+#endif
+	}
+	set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
+
+	for_each_cpu(cpu)
+		if (cpu != smp_processor_id())
+			smp_create_idle(cpu);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+	BUG_ON(smp_processor_id() != 0);
+
+	cpu_set(0, cpu_online_map);
+	cpu_set(0, cpu_present_map);
+	cpu_set(0, cpu_possible_map);
+	S390_lowcore.percpu_offset = __per_cpu_offset[0];
+	current_set[0] = current;
+}
+
+void smp_cpus_done(unsigned int max_cpus)
+{
+	cpu_present_map = cpu_possible_map;
+}
+
+/*
+ * the frequency of the profiling timer can be changed
+ * by writing a multiplier value into /proc/profile.
+ *
+ * usually you want to run this on all CPUs ;)
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+        return 0;
+}
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+	int cpu;
+	int ret;
+
+	for_each_cpu(cpu) {
+		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+		if (ret)
+			printk(KERN_WARNING "topology_init: register_cpu %d "
+			       "failed (%d)\n", cpu, ret);
+	}
+	return 0;
+}
+
+subsys_initcall(topology_init);
+
+EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(lowcore_ptr);
+EXPORT_SYMBOL(smp_ctl_set_bit);
+EXPORT_SYMBOL(smp_ctl_clear_bit);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_get_cpu);
+EXPORT_SYMBOL(smp_put_cpu);
+
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
new file mode 100644
index 000000000000..efe6b83b53f7
--- /dev/null
+++ b/arch/s390/kernel/sys_s390.c
@@ -0,0 +1,270 @@
+/*
+ *  arch/s390/kernel/sys_s390.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Thomas Spatzier (tspat@de.ibm.com)
+ *
+ *  Derived from "arch/i386/kernel/sys_i386.c"
+ *
+ *  This file contains various random system calls that
+ *  have a non-standard calling sequence on the Linux/s390
+ *  platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_ARCH_S390X
+#include <linux/personality.h>
+#endif /* CONFIG_ARCH_S390X */
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage long sys_pipe(unsigned long __user *fildes)
+{
+	int fd[2];
+	int error;
+
+	error = do_pipe(fd);
+	if (!error) {
+		if (copy_to_user(fildes, fd, 2*sizeof(int)))
+			error = -EFAULT;
+	}
+	return error;
+}
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+	unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	long error = -EBADF;
+	struct file * file = NULL;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			goto out;
+	}
+
+	down_write(&current->mm->mmap_sem);
+	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+	up_write(&current->mm->mmap_sem);
+
+	if (file)
+		fput(file);
+out:
+	return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux for S/390 isn't able to handle more than 5
+ * system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+	unsigned long addr;
+	unsigned long len;
+	unsigned long prot;
+	unsigned long flags;
+	unsigned long fd;
+	unsigned long offset;
+};
+
+asmlinkage long sys_mmap2(struct mmap_arg_struct __user  *arg)
+{
+	struct mmap_arg_struct a;
+	int error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+out:
+	return error;
+}
+
+asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
+{
+	struct mmap_arg_struct a;
+	long error = -EFAULT;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		goto out;
+
+	error = -EINVAL;
+	if (a.offset & ~PAGE_MASK)
+		goto out;
+
+	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+	return error;
+}
+
+#ifndef CONFIG_ARCH_S390X
+struct sel_arg_struct {
+	unsigned long n;
+	fd_set *inp, *outp, *exp;
+	struct timeval *tvp;
+};
+
+asmlinkage long old_select(struct sel_arg_struct __user *arg)
+{
+	struct sel_arg_struct a;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+	/* sys_select() does the appropriate kernel locking */
+	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+
+}
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage long sys_ipc(uint call, int first, unsigned long second,
+				  unsigned long third, void __user *ptr)
+{
+        struct ipc_kludge tmp;
+	int ret;
+
+        switch (call) {
+        case SEMOP:
+		return sys_semtimedop(first, (struct sembuf __user *)ptr,
+				       (unsigned)second, NULL);
+	case SEMTIMEDOP:
+		return sys_semtimedop(first, (struct sembuf __user *)ptr,
+				       (unsigned)second,
+				       (const struct timespec __user *) third);
+        case SEMGET:
+                return sys_semget(first, (int)second, third);
+        case SEMCTL: {
+                union semun fourth;
+                if (!ptr)
+                        return -EINVAL;
+                if (get_user(fourth.__pad, (void __user * __user *) ptr))
+                        return -EFAULT;
+                return sys_semctl(first, (int)second, third, fourth);
+        }
+        case MSGSND:
+		return sys_msgsnd (first, (struct msgbuf __user *) ptr,
+                                   (size_t)second, third);
+		break;
+        case MSGRCV:
+                if (!ptr)
+                        return -EINVAL;
+                if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
+                                    sizeof (struct ipc_kludge)))
+                        return -EFAULT;
+                return sys_msgrcv (first, tmp.msgp,
+                                   (size_t)second, tmp.msgtyp, third);
+        case MSGGET:
+                return sys_msgget((key_t)first, (int)second);
+        case MSGCTL:
+                return sys_msgctl(first, (int)second,
+				   (struct msqid_ds __user *)ptr);
+
+	case SHMAT: {
+		ulong raddr;
+		ret = do_shmat(first, (char __user *)ptr,
+				(int)second, &raddr);
+		if (ret)
+			return ret;
+		return put_user (raddr, (ulong __user *) third);
+		break;
+        }
+	case SHMDT:
+		return sys_shmdt ((char __user *)ptr);
+	case SHMGET:
+		return sys_shmget(first, (size_t)second, third);
+	case SHMCTL:
+		return sys_shmctl(first, (int)second,
+                                   (struct shmid_ds __user *) ptr);
+	default:
+		return -ENOSYS;
+
+	}
+
+	return -EINVAL;
+}
+
+#ifdef CONFIG_ARCH_S390X
+asmlinkage long s390x_newuname(struct new_utsname __user *name)
+{
+	int ret = sys_newuname(name);
+
+	if (current->personality == PER_LINUX32 && !ret) {
+		ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
+		if (ret) ret = -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage long s390x_personality(unsigned long personality)
+{
+	int ret;
+
+	if (current->personality == PER_LINUX32 && personality == PER_LINUX)
+		personality = PER_LINUX32;
+	ret = sys_personality(personality);
+	if (ret == PER_LINUX32)
+		ret = PER_LINUX;
+
+	return ret;
+}
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * Wrapper function for sys_fadvise64/fadvise64_64
+ */
+#ifndef CONFIG_ARCH_S390X
+
+asmlinkage long
+s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
+{
+	return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
+			len, advice);
+}
+
+#endif
+
+struct fadvise64_64_args {
+	int fd;
+	long long offset;
+	long long len;
+	int advice;
+};
+
+asmlinkage long
+s390_fadvise64_64(struct fadvise64_64_args __user *args)
+{
+	struct fadvise64_64_args a;
+
+	if ( copy_from_user(&a, args, sizeof(a)) )
+		return -EFAULT;
+	return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
+}
+
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
new file mode 100644
index 000000000000..515938628f82
--- /dev/null
+++ b/arch/s390/kernel/syscalls.S
@@ -0,0 +1,292 @@
+/*
+ * definitions for sys_call_table, each line represents an
+ * entry in the table in the form 
+ * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
+ *
+ * this file is meant to be included from entry.S and entry64.S
+ */
+
+#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
+
+NI_SYSCALL							/* 0 */
+SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
+SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
+SYSCALL(sys_read,sys_read,sys32_read_wrapper)
+SYSCALL(sys_write,sys_write,sys32_write_wrapper)
+SYSCALL(sys_open,sys_open,sys32_open_wrapper)			/* 5 */
+SYSCALL(sys_close,sys_close,sys32_close_wrapper)
+SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
+SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
+SYSCALL(sys_link,sys_link,sys32_link_wrapper)
+SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper)		/* 10 */
+SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
+SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
+SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper)		/* old time syscall */
+SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
+SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper)		/* 15 */
+SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper)	/* old lchown16 syscall*/
+NI_SYSCALL							/* old break syscall holder */
+NI_SYSCALL							/* old stat syscall holder */
+SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
+SYSCALL(sys_getpid,sys_getpid,sys_getpid)			/* 20 */
+SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
+SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
+SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper)	/* old setuid16 syscall*/
+SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16)		/* old getuid16 syscall*/
+SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper)		/* 25 old stime syscall */
+SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
+SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
+NI_SYSCALL							/* old fstat syscall */
+SYSCALL(sys_pause,sys_pause,sys32_pause)
+SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper)		/* 30 */
+NI_SYSCALL							/* old stty syscall */
+NI_SYSCALL							/* old gtty syscall */
+SYSCALL(sys_access,sys_access,sys32_access_wrapper)
+SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
+NI_SYSCALL							/* 35 old ftime syscall */
+SYSCALL(sys_sync,sys_sync,sys_sync)
+SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
+SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
+SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
+SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper)		/* 40 */
+SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
+SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
+SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
+NI_SYSCALL							/* old prof syscall */
+SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper)			/* 45 */
+SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper)	/* old setgid16 syscall*/
+SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16)		/* old getgid16 syscall*/
+SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
+SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16)		/* old geteuid16 syscall */
+SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16)		/* 50 old getegid16 syscall */
+SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
+SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
+NI_SYSCALL							/* old lock syscall */
+SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
+SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper)		/* 55 */
+NI_SYSCALL							/* intel mpx syscall */
+SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
+NI_SYSCALL							/* old ulimit syscall */
+NI_SYSCALL							/* old uname syscall */
+SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper)		/* 60 */
+SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
+SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
+SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
+SYSCALL(sys_getppid,sys_getppid,sys_getppid)
+SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp)			/* 65 */
+SYSCALL(sys_setsid,sys_setsid,sys_setsid)
+SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper)
+NI_SYSCALL							/* old sgetmask syscall*/
+NI_SYSCALL							/* old ssetmask syscall*/
+SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper)	/* old setreuid16 syscall */
+SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper)	/* old setregid16 syscall */
+SYSCALL(sys_sigsuspend_glue,sys_sigsuspend_glue,sys32_sigsuspend_glue)
+SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
+SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
+SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper)	/* 75 */
+SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
+SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper)
+SYSCALL(sys_gettimeofday,sys_gettimeofday,sys32_gettimeofday_wrapper)
+SYSCALL(sys_settimeofday,sys_settimeofday,sys32_settimeofday_wrapper)
+SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper)	/* 80 old getgroups16 syscall */
+SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper)	/* old setgroups16 syscall */
+NI_SYSCALL							/* old select syscall */
+SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
+NI_SYSCALL							/* old lstat syscall */
+SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper)	/* 85 */
+SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
+SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
+SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
+SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)	/* old readdir syscall */
+SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper)			/* 90 */
+SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
+SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
+SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
+SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper)	/* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
+SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
+NI_SYSCALL							/* old profil syscall */
+SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
+SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper)	/* 100 */
+NI_SYSCALL							/* ioperm for i386 */
+SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
+SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
+SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper)
+SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper)	/* 105 */
+SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
+SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
+SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
+NI_SYSCALL							/* old uname syscall */
+SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper)	/* 110 */
+SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
+NI_SYSCALL							/* old "idle" system call */
+NI_SYSCALL							/* vm86old for i386 */
+SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
+SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper)		/* 115 */
+SYSCALL(sys_sysinfo,sys_sysinfo,sys32_sysinfo_wrapper)
+SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
+SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
+SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
+SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue)		/* 120 */
+SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
+SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
+NI_SYSCALL							/* modify_ldt for i386 */
+SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper)
+SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper)	/* 125 */
+SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
+NI_SYSCALL							/* old "create module" */
+SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper)
+SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper)
+NI_SYSCALL							/* 130: old get_kernel_syms */
+SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
+SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
+SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
+SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
+SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper)		/* 135 */
+SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
+NI_SYSCALL							/* for afs_syscall */
+SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper)	/* old setfsuid16 syscall */
+SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper)	/* old setfsgid16 syscall */
+SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper)		/* 140 */
+SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
+SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
+SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
+SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
+SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)		/* 145 */
+SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
+SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
+SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)		/* 150 */
+SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
+SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
+SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
+SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
+SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper)	/* 155 */
+SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
+SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
+SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
+SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
+SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper)	/* 160 */
+SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper)
+SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
+SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
+SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper)	/* old setresuid16 syscall */
+SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper)	/* 165 old getresuid16 syscall */
+NI_SYSCALL							/* for vm86 */
+NI_SYSCALL							/* old sys_query_module */
+SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
+SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
+SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper)	/* 170 old setresgid16 syscall */
+SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper)	/* old getresgid16 syscall */
+SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
+SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
+SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
+SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper)	/* 175 */
+SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
+SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper)
+SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper)
+SYSCALL(sys_rt_sigsuspend_glue,sys_rt_sigsuspend_glue,sys32_rt_sigsuspend_glue)
+SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper)		/* 180 */
+SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
+SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper)	/* old chown16 syscall */
+SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
+SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
+SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper)		/* 185 */
+SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
+SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
+NI_SYSCALL							/* streams1 */
+NI_SYSCALL							/* streams2 */
+SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue)		/* 190 */
+SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
+SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
+SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
+SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
+SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper)		/* 195 */
+SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
+SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
+SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
+SYSCALL(sys_getuid,sys_getuid,sys_getuid)
+SYSCALL(sys_getgid,sys_getgid,sys_getgid)			/* 200 */
+SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
+SYSCALL(sys_getegid,sys_getegid,sys_getegid)
+SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
+SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
+SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper)	/* 205 */
+SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
+SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
+SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
+SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
+SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper)	/* 210 */
+SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
+SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
+SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
+SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
+SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper)	/* 215 */
+SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
+SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
+SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
+SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
+SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper)	/* 220 */
+SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
+SYSCALL(sys_readahead,sys_readahead,sys32_readahead)
+SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64)
+SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
+SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper)	/* 225 */
+SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
+SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
+SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
+SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
+SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper)	/* 230 */
+SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
+SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
+SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
+SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
+SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper)	/* 235 */
+SYSCALL(sys_gettid,sys_gettid,sys_gettid)
+SYSCALL(sys_tkill,sys_tkill,sys_tkill)
+SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
+SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
+SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper)	/* 240 */
+SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill)
+NI_SYSCALL							/* reserved for TUX */
+SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
+SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
+SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper)	/* 245 */
+SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
+SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
+SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
+SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
+SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper)	/* 250 */
+SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
+SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
+SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
+SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
+SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper)	/* 255 */
+SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
+SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
+SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
+SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
+SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)	/* 260 */
+SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
+SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+NI_SYSCALL							/* reserved for vserver */
+SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
+SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
+SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
+NI_SYSCALL							/* 268 sys_mbind */
+NI_SYSCALL							/* 269 sys_get_mempolicy */
+NI_SYSCALL							/* 270 sys_set_mempolicy */
+SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
+SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
+SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
+SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
+SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
+SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
+NI_SYSCALL							/* reserved for kexec */
+SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
+SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
+SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl)		/* 280 */
+SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
new file mode 100644
index 000000000000..061e81138dc2
--- /dev/null
+++ b/arch/s390/kernel/time.c
@@ -0,0 +1,382 @@
+/*
+ *  arch/s390/kernel/time.c
+ *    Time of day based timer functions.
+ *
+ *  S390 version
+ *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Hartmut Penner (hp@de.ibm.com),
+ *               Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ *  Derived from "arch/i386/kernel/time.c"
+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/notifier.h>
+
+#include <asm/uaccess.h>
+#include <asm/delay.h>
+#include <asm/s390_ext.h>
+#include <asm/div64.h>
+#include <asm/irq.h>
+#include <asm/timer.h>
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY     ((unsigned long) 1000000/HZ)
+#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
+
+/*
+ * Create a small time difference between the timer interrupts
+ * on the different cpus to avoid lock contention.
+ */
+#define CPU_DEVIATION       (smp_processor_id() << 12)
+
+#define TICK_SIZE tick
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static ext_int_info_t ext_int_info_cc;
+static u64 init_timer_cc;
+static u64 jiffies_timer_cc;
+static u64 xtime_cc;
+
+extern unsigned long wall_jiffies;
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+	return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
+}
+
+void tod_to_timeval(__u64 todval, struct timespec *xtime)
+{
+	unsigned long long sec;
+
+	sec = todval >> 12;
+	do_div(sec, 1000000);
+	xtime->tv_sec = sec;
+	todval -= (sec * 1000000) << 12;
+	xtime->tv_nsec = ((todval * 1000) >> 12);
+}
+
+static inline unsigned long do_gettimeoffset(void) 
+{
+	__u64 now;
+
+        now = (get_clock() - jiffies_timer_cc) >> 12;
+	/* We require the offset from the latest update of xtime */
+	now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+	return (unsigned long) now;
+}
+
+/*
+ * This version of gettimeofday has microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+	unsigned long flags;
+	unsigned long seq;
+	unsigned long usec, sec;
+
+	do {
+		seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+		sec = xtime.tv_sec;
+		usec = xtime.tv_nsec / 1000 + do_gettimeoffset();
+	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+	while (usec >= 1000000) {
+		usec -= 1000000;
+		sec++;
+	}
+
+	tv->tv_sec = sec;
+	tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+	time_t wtm_sec, sec = tv->tv_sec;
+	long wtm_nsec, nsec = tv->tv_nsec;
+
+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+		return -EINVAL;
+
+	write_seqlock_irq(&xtime_lock);
+	/* This is revolting. We need to set the xtime.tv_nsec
+	 * correctly. However, the value in this location is
+	 * is value at the last tick.
+	 * Discover what correction gettimeofday
+	 * would have done, and then undo it!
+	 */
+	nsec -= do_gettimeoffset() * 1000;
+
+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+	set_normalized_timespec(&xtime, sec, nsec);
+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+	time_adjust = 0;		/* stop active adjtime() */
+	time_status |= STA_UNSYNC;
+	time_maxerror = NTP_PHASE_LIMIT;
+	time_esterror = NTP_PHASE_LIMIT;
+	write_sequnlock_irq(&xtime_lock);
+	clock_was_set();
+	return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+
+#ifdef CONFIG_PROFILING
+#define s390_do_profile(regs)	profile_tick(CPU_PROFILING, regs)
+#else
+#define s390_do_profile(regs)  do { ; } while(0)
+#endif /* CONFIG_PROFILING */
+
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+void account_ticks(struct pt_regs *regs)
+{
+	__u64 tmp;
+	__u32 ticks, xticks;
+
+	/* Calculate how many ticks have passed. */
+	if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
+		/*
+		 * We have to program the clock comparator even if
+		 * no tick has passed. That happens if e.g. an i/o
+		 * interrupt wakes up an idle processor that has
+		 * switched off its hz timer.
+		 */
+		tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
+		asm volatile ("SCKC %0" : : "m" (tmp));
+		return;
+	}
+	tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
+	if (tmp >= 2*CLK_TICKS_PER_JIFFY) {  /* more than two ticks ? */
+		ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
+		S390_lowcore.jiffy_timer +=
+			CLK_TICKS_PER_JIFFY * (__u64) ticks;
+	} else if (tmp >= CLK_TICKS_PER_JIFFY) {
+		ticks = 2;
+		S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
+	} else {
+		ticks = 1;
+		S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+	}
+
+	/* set clock comparator for next tick */
+	tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
+        asm volatile ("SCKC %0" : : "m" (tmp));
+
+#ifdef CONFIG_SMP
+	/*
+	 * Do not rely on the boot cpu to do the calls to do_timer.
+	 * Spread it over all cpus instead.
+	 */
+	write_seqlock(&xtime_lock);
+	if (S390_lowcore.jiffy_timer > xtime_cc) {
+		tmp = S390_lowcore.jiffy_timer - xtime_cc;
+		if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
+			xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
+			xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
+		} else {
+			xticks = 1;
+			xtime_cc += CLK_TICKS_PER_JIFFY;
+		}
+		while (xticks--)
+			do_timer(regs);
+	}
+	write_sequnlock(&xtime_lock);
+#else
+	for (xticks = ticks; xticks > 0; xticks--)
+		do_timer(regs);
+#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+	account_user_vtime(current);
+#else
+	while (ticks--)
+		update_process_times(user_mode(regs));
+#endif
+
+	s390_do_profile(regs);
+}
+
+#ifdef CONFIG_NO_IDLE_HZ
+
+#ifdef CONFIG_NO_IDLE_HZ_INIT
+int sysctl_hz_timer = 0;
+#else
+int sysctl_hz_timer = 1;
+#endif
+
+/*
+ * Stop the HZ tick on the current CPU.
+ * Only cpu_idle may call this function.
+ */
+static inline void stop_hz_timer(void)
+{
+	__u64 timer;
+
+	if (sysctl_hz_timer != 0)
+		return;
+
+	cpu_set(smp_processor_id(), nohz_cpu_mask);
+
+	/*
+	 * Leave the clock comparator set up for the next timer
+	 * tick if either rcu or a softirq is pending.
+	 */
+	if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
+		cpu_clear(smp_processor_id(), nohz_cpu_mask);
+		return;
+	}
+
+	/*
+	 * This cpu is going really idle. Set up the clock comparator
+	 * for the next event.
+	 */
+	timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
+	timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
+	asm volatile ("SCKC %0" : : "m" (timer));
+}
+
+/*
+ * Start the HZ tick on the current CPU.
+ * Only cpu_idle may call this function.
+ */
+static inline void start_hz_timer(void)
+{
+	if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
+		return;
+	account_ticks(__KSTK_PTREGS(current));
+	cpu_clear(smp_processor_id(), nohz_cpu_mask);
+}
+
+static int nohz_idle_notify(struct notifier_block *self,
+			    unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_IDLE:
+		stop_hz_timer();
+		break;
+	case CPU_NOT_IDLE:
+		start_hz_timer();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block nohz_idle_nb = {
+	.notifier_call = nohz_idle_notify,
+};
+
+void __init nohz_init(void)
+{
+	if (register_idle_notifier(&nohz_idle_nb))
+		panic("Couldn't register idle notifier");
+}
+
+#endif
+
+/*
+ * Start the clock comparator on the current CPU.
+ */
+void init_cpu_timer(void)
+{
+	unsigned long cr0;
+	__u64 timer;
+
+	timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
+	S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
+	timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
+	asm volatile ("SCKC %0" : : "m" (timer));
+        /* allow clock comparator timer interrupt */
+	__ctl_store(cr0, 0, 0);
+        cr0 |= 0x800;
+	__ctl_load(cr0, 0, 0);
+}
+
+extern void vtime_init(void);
+
+/*
+ * Initialize the TOD clock and the CPU timer of
+ * the boot cpu.
+ */
+void __init time_init(void)
+{
+	__u64 set_time_cc;
+	int cc;
+
+        /* kick the TOD clock */
+        asm volatile ("STCK 0(%1)\n\t"
+                      "IPM  %0\n\t"
+                      "SRL  %0,28" : "=r" (cc) : "a" (&init_timer_cc) 
+				   : "memory", "cc");
+        switch (cc) {
+        case 0: /* clock in set state: all is fine */
+                break;
+        case 1: /* clock in non-set state: FIXME */
+                printk("time_init: TOD clock in non-set state\n");
+                break;
+        case 2: /* clock in error state: FIXME */
+                printk("time_init: TOD clock in error state\n");
+                break;
+        case 3: /* clock in stopped or not-operational state: FIXME */
+                printk("time_init: TOD clock stopped/non-operational\n");
+                break;
+        }
+	jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
+
+	/* set xtime */
+	xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
+	set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+		(0x3c26700LL*1000000*4096);
+        tod_to_timeval(set_time_cc, &xtime);
+        set_normalized_timespec(&wall_to_monotonic,
+                                -xtime.tv_sec, -xtime.tv_nsec);
+
+	/* request the clock comparator external interrupt */
+        if (register_early_external_interrupt(0x1004, 0,
+					      &ext_int_info_cc) != 0)
+                panic("Couldn't request external interrupt 0x1004");
+
+        init_cpu_timer();
+
+#ifdef CONFIG_NO_IDLE_HZ
+	nohz_init();
+#endif
+
+#ifdef CONFIG_VIRT_TIMER
+	vtime_init();
+#endif
+}
+
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
new file mode 100644
index 000000000000..8b90e9528b91
--- /dev/null
+++ b/arch/s390/kernel/traps.c
@@ -0,0 +1,738 @@
+/*
+ *  arch/s390/kernel/traps.c
+ *
+ *  S390 version
+ *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *
+ *  Derived from "arch/i386/kernel/traps.c"
+ *    Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/mathemu.h>
+#include <asm/cpcmd.h>
+#include <asm/s390_ext.h>
+#include <asm/lowcore.h>
+#include <asm/debug.h>
+
+/* Called from entry.S only */
+extern void handle_per_exception(struct pt_regs *regs);
+
+typedef void pgm_check_handler_t(struct pt_regs *, long);
+pgm_check_handler_t *pgm_check_table[128];
+
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_PROCESS_DEBUG
+int sysctl_userprocess_debug = 1;
+#else
+int sysctl_userprocess_debug = 0;
+#endif
+#endif
+
+extern pgm_check_handler_t do_protection_exception;
+extern pgm_check_handler_t do_dat_exception;
+extern pgm_check_handler_t do_pseudo_page_fault;
+#ifdef CONFIG_PFAULT
+extern int pfault_init(void);
+extern void pfault_fini(void);
+extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
+static ext_int_info_t ext_int_pfault;
+#endif
+extern pgm_check_handler_t do_monitor_call;
+
+#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
+
+#ifndef CONFIG_ARCH_S390X
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_ARCH_S390X */
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_ARCH_S390X */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ *   - the panic stack which is used if the kernel stack has overflown
+ *   - the asynchronous interrupt stack (cpu related)
+ *   - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+	struct stack_frame *sf;
+	struct pt_regs *regs;
+
+	while (1) {
+		sp = sp & PSW_ADDR_INSN;
+		if (sp < low || sp > high - sizeof(*sf))
+			return sp;
+		sf = (struct stack_frame *) sp;
+		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+		/* Follow the backchain. */
+		while (1) {
+			low = sp;
+			sp = sf->back_chain & PSW_ADDR_INSN;
+			if (!sp)
+				break;
+			if (sp <= low || sp > high - sizeof(*sf))
+				return sp;
+			sf = (struct stack_frame *) sp;
+			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+		}
+		/* Zero backchain detected, check for interrupt frame. */
+		sp = (unsigned long) (sf + 1);
+		if (sp <= low || sp > high - sizeof(*regs))
+			return sp;
+		regs = (struct pt_regs *) sp;
+		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+		low = sp;
+		sp = regs->gprs[15];
+	}
+}
+
+void show_trace(struct task_struct *task, unsigned long * stack)
+{
+	register unsigned long __r15 asm ("15");
+	unsigned long sp;
+
+	sp = (unsigned long) stack;
+	if (!sp)
+		sp = task ? task->thread.ksp : __r15;
+	printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+			  S390_lowcore.panic_stack);
+#endif
+	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+			  S390_lowcore.async_stack);
+	if (task)
+		__show_trace(sp, (unsigned long) task->thread_info,
+			     (unsigned long) task->thread_info + THREAD_SIZE);
+	else
+		__show_trace(sp, S390_lowcore.thread_info,
+			     S390_lowcore.thread_info + THREAD_SIZE);
+	printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	register unsigned long * __r15 asm ("15");
+	unsigned long *stack;
+	int i;
+
+	// debugging aid: "show_stack(NULL);" prints the
+	// back trace for this cpu.
+
+	if (!sp)
+		sp = task ? (unsigned long *) task->thread.ksp : __r15;
+
+	stack = sp;
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+			break;
+		if (i && ((i * sizeof (long) % 32) == 0))
+			printk("\n       ");
+		printk("%p ", (void *)*stack++);
+	}
+	printk("\n");
+	show_trace(task, sp);
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+	show_stack(0, 0);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_registers(struct pt_regs *regs)
+{
+	mm_segment_t old_fs;
+	char *mode;
+	int i;
+
+	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+	printk("%s PSW : %p %p",
+	       mode, (void *) regs->psw.mask,
+	       (void *) regs->psw.addr);
+	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+	printk("%s GPRS: " FOURLONG, mode,
+	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+	printk("           " FOURLONG,
+	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+	printk("           " FOURLONG,
+	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+	printk("           " FOURLONG,
+	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+
+#if 0
+	/* FIXME: this isn't needed any more but it changes the ksymoops
+	 * input. To remove or not to remove ... */
+	save_access_regs(regs->acrs);
+	printk("%s ACRS: %08x %08x %08x %08x\n", mode,
+	       regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
+	printk("           %08x %08x %08x %08x\n",
+	       regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
+#endif
+
+	/*
+	 * Print the first 20 byte of the instruction stream at the
+	 * time of the fault.
+	 */
+	old_fs = get_fs();
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		set_fs(USER_DS);
+	else
+		set_fs(KERNEL_DS);
+	printk("%s Code: ", mode);
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
+			printk(" Bad PSW.");
+			break;
+		}
+		printk("%02x ", c);
+	}
+	set_fs(old_fs);
+
+	printk("\n");
+}	
+
+/* This is called from fs/proc/array.c */
+char *task_show_regs(struct task_struct *task, char *buffer)
+{
+	struct pt_regs *regs;
+
+	regs = __KSTK_PTREGS(task);
+	buffer += sprintf(buffer, "task: %p, ksp: %p\n",
+		       task, (void *)task->thread.ksp);
+	buffer += sprintf(buffer, "User PSW : %p %p\n",
+		       (void *) regs->psw.mask, (void *)regs->psw.addr);
+
+	buffer += sprintf(buffer, "User GPRS: " FOURLONG,
+			  regs->gprs[0], regs->gprs[1],
+			  regs->gprs[2], regs->gprs[3]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[4], regs->gprs[5],
+			  regs->gprs[6], regs->gprs[7]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[8], regs->gprs[9],
+			  regs->gprs[10], regs->gprs[11]);
+	buffer += sprintf(buffer, "           " FOURLONG,
+			  regs->gprs[12], regs->gprs[13],
+			  regs->gprs[14], regs->gprs[15]);
+	buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
+			  task->thread.acrs[0], task->thread.acrs[1],
+			  task->thread.acrs[2], task->thread.acrs[3]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[4], task->thread.acrs[5],
+			  task->thread.acrs[6], task->thread.acrs[7]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[8], task->thread.acrs[9],
+			  task->thread.acrs[10], task->thread.acrs[11]);
+	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
+			  task->thread.acrs[12], task->thread.acrs[13],
+			  task->thread.acrs[14], task->thread.acrs[15]);
+	return buffer;
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+	static int die_counter;
+
+	debug_stop_all();
+	console_verbose();
+	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+        show_regs(regs);
+	bust_spinlocks(0);
+        spin_unlock_irq(&die_lock);
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+	if (panic_on_oops)
+		panic("Fatal exception: panic_on_oops");
+        do_exit(SIGSEGV);
+}
+
+static void inline
+report_user_fault(long interruption_code, struct pt_regs *regs)
+{
+#if defined(CONFIG_SYSCTL)
+	if (!sysctl_userprocess_debug)
+		return;
+#endif
+#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
+	printk("User process fault: interruption code 0x%lX\n",
+	       interruption_code);
+	show_regs(regs);
+#endif
+}
+
+static void inline do_trap(long interruption_code, int signr, char *str,
+                           struct pt_regs *regs, siginfo_t *info)
+{
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+        if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+                struct task_struct *tsk = current;
+
+                tsk->thread.trap_no = interruption_code & 0xffff;
+		force_sig_info(signr, info, tsk);
+		report_user_fault(interruption_code, regs);
+        } else {
+                const struct exception_table_entry *fixup;
+                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+                if (fixup)
+                        regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+                else
+                        die(str, regs, interruption_code);
+        }
+}
+
+static inline void *get_check_address(struct pt_regs *regs)
+{
+	return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
+}
+
+void do_single_step(struct pt_regs *regs)
+{
+	if ((current->ptrace & PT_PTRACED) != 0)
+		force_sig(SIGTRAP, current);
+}
+
+asmlinkage void
+default_trap_handler(struct pt_regs * regs, long interruption_code)
+{
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+		local_irq_enable();
+		do_exit(SIGSEGV);
+		report_user_fault(interruption_code, regs);
+	} else
+		die("Unknown program exception", regs, interruption_code);
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
+asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+{ \
+        siginfo_t info; \
+        info.si_signo = signr; \
+        info.si_errno = 0; \
+        info.si_code = sicode; \
+        info.si_addr = (void *)siaddr; \
+        do_trap(interruption_code, signr, str, regs, &info); \
+}
+
+DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
+	      ILL_ILLADR, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "execute exception", execute_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "fixpoint divide exception", divide_exception,
+	      FPE_INTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "fixpoint overflow exception", overflow_exception,
+	      FPE_INTOVF, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP overflow exception", hfp_overflow_exception,
+	      FPE_FLTOVF, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP underflow exception", hfp_underflow_exception,
+	      FPE_FLTUND, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP significance exception", hfp_significance_exception,
+	      FPE_FLTRES, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP divide exception", hfp_divide_exception,
+	      FPE_FLTDIV, get_check_address(regs))
+DO_ERROR_INFO(SIGFPE,  "HFP square root exception", hfp_sqrt_exception,
+	      FPE_FLTINV, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "operand exception", operand_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "privileged operation", privileged_op,
+	      ILL_PRVOPC, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "special operation exception", special_op_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+DO_ERROR_INFO(SIGILL,  "translation exception", translation_exception,
+	      ILL_ILLOPN, get_check_address(regs))
+
+static inline void
+do_fp_trap(struct pt_regs *regs, void *location,
+           int fpc, long interruption_code)
+{
+	siginfo_t si;
+
+	si.si_signo = SIGFPE;
+	si.si_errno = 0;
+	si.si_addr = location;
+	si.si_code = 0;
+	/* FPC[2] is Data Exception Code */
+	if ((fpc & 0x00000300) == 0) {
+		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
+		if (fpc & 0x8000) /* invalid fp operation */
+			si.si_code = FPE_FLTINV;
+		else if (fpc & 0x4000) /* div by 0 */
+			si.si_code = FPE_FLTDIV;
+		else if (fpc & 0x2000) /* overflow */
+			si.si_code = FPE_FLTOVF;
+		else if (fpc & 0x1000) /* underflow */
+			si.si_code = FPE_FLTUND;
+		else if (fpc & 0x0800) /* inexact */
+			si.si_code = FPE_FLTRES;
+	}
+	current->thread.ieee_instruction_pointer = (addr_t) location;
+	do_trap(interruption_code, SIGFPE,
+		"floating point exception", regs, &si);
+}
+
+asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
+{
+	siginfo_t info;
+        __u8 opcode[6];
+	__u16 *location;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+	if (regs->psw.mask & PSW_MASK_PSTATE) {
+		get_user(*((__u16 *) opcode), (__u16 __user *) location);
+		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
+			if (current->ptrace & PT_PTRACED)
+				force_sig(SIGTRAP, current);
+			else
+				signal = SIGILL;
+#ifdef CONFIG_MATHEMU
+		} else if (opcode[0] == 0xb3) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_b3(opcode, regs);
+                } else if (opcode[0] == 0xed) {
+			get_user(*((__u32 *) (opcode+2)),
+				 (__u32 *)(location+1));
+			signal = math_emu_ed(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb299) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_srnm(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb29c) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_stfpc(opcode, regs);
+		} else if (*((__u16 *) opcode) == 0xb29d) {
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_lfpc(opcode, regs);
+#endif
+		} else
+			signal = SIGILL;
+	} else
+		signal = SIGILL;
+
+#ifdef CONFIG_MATHEMU
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal == SIGSEGV) {
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = SEGV_MAPERR;
+		info.si_addr = (void *) location;
+		do_trap(interruption_code, signal,
+			"user address fault", regs, &info);
+	} else
+#endif
+        if (signal) {
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPC;
+		info.si_addr = (void *) location;
+		do_trap(interruption_code, signal,
+			"illegal operation", regs, &info);
+	}
+}
+
+
+#ifdef CONFIG_MATHEMU
+asmlinkage void 
+specification_exception(struct pt_regs * regs, long interruption_code)
+{
+        __u8 opcode[6];
+	__u16 *location = NULL;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+        if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+        if (regs->psw.mask & PSW_MASK_PSTATE) {
+		get_user(*((__u16 *) opcode), location);
+		switch (opcode[0]) {
+		case 0x28: /* LDR Rx,Ry   */
+			signal = math_emu_ldr(opcode);
+			break;
+		case 0x38: /* LER Rx,Ry   */
+			signal = math_emu_ler(opcode);
+			break;
+		case 0x60: /* STD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_std(opcode, regs);
+			break;
+		case 0x68: /* LD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ld(opcode, regs);
+			break;
+		case 0x70: /* STE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ste(opcode, regs);
+			break;
+		case 0x78: /* LE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_le(opcode, regs);
+			break;
+		default:
+			signal = SIGILL;
+			break;
+                }
+        } else
+		signal = SIGILL;
+
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal) {
+		siginfo_t info;
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPN;
+		info.si_addr = location;
+		do_trap(interruption_code, signal, 
+			"specification exception", regs, &info);
+	}
+}
+#else
+DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
+	      ILL_ILLOPN, get_check_address(regs));
+#endif
+
+asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
+{
+	__u16 *location;
+	int signal = 0;
+
+	location = (__u16 *) get_check_address(regs);
+
+	/*
+	 * We got all needed information from the lowcore and can
+	 * now safely switch on interrupts.
+	 */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		local_irq_enable();
+
+	if (MACHINE_HAS_IEEE)
+		__asm__ volatile ("stfpc %0\n\t" 
+				  : "=m" (current->thread.fp_regs.fpc));
+
+#ifdef CONFIG_MATHEMU
+        else if (regs->psw.mask & PSW_MASK_PSTATE) {
+        	__u8 opcode[6];
+		get_user(*((__u16 *) opcode), location);
+		switch (opcode[0]) {
+		case 0x28: /* LDR Rx,Ry   */
+			signal = math_emu_ldr(opcode);
+			break;
+		case 0x38: /* LER Rx,Ry   */
+			signal = math_emu_ler(opcode);
+			break;
+		case 0x60: /* STD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_std(opcode, regs);
+			break;
+		case 0x68: /* LD R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ld(opcode, regs);
+			break;
+		case 0x70: /* STE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_ste(opcode, regs);
+			break;
+		case 0x78: /* LE R,D(X,B) */
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_le(opcode, regs);
+			break;
+		case 0xb3:
+			get_user(*((__u16 *) (opcode+2)), location+1);
+			signal = math_emu_b3(opcode, regs);
+			break;
+                case 0xed:
+			get_user(*((__u32 *) (opcode+2)),
+				 (__u32 *)(location+1));
+			signal = math_emu_ed(opcode, regs);
+			break;
+	        case 0xb2:
+			if (opcode[1] == 0x99) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_srnm(opcode, regs);
+			} else if (opcode[1] == 0x9c) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_stfpc(opcode, regs);
+			} else if (opcode[1] == 0x9d) {
+				get_user(*((__u16 *) (opcode+2)), location+1);
+				signal = math_emu_lfpc(opcode, regs);
+			} else
+				signal = SIGILL;
+			break;
+		default:
+			signal = SIGILL;
+			break;
+                }
+        }
+#endif 
+	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
+		signal = SIGFPE;
+	else
+		signal = SIGILL;
+        if (signal == SIGFPE)
+		do_fp_trap(regs, location,
+                           current->thread.fp_regs.fpc, interruption_code);
+        else if (signal) {
+		siginfo_t info;
+		info.si_signo = signal;
+		info.si_errno = 0;
+		info.si_code = ILL_ILLOPN;
+		info.si_addr = location;
+		do_trap(interruption_code, signal, 
+			"data exception", regs, &info);
+	}
+}
+
+asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code)
+{
+        siginfo_t info;
+
+	/* Set user psw back to home space mode. */
+	if (regs->psw.mask & PSW_MASK_PSTATE)
+		regs->psw.mask |= PSW_ASC_HOME;
+	/* Send SIGILL. */
+        info.si_signo = SIGILL;
+        info.si_errno = 0;
+        info.si_code = ILL_PRVOPC;
+        info.si_addr = get_check_address(regs);
+        do_trap(int_code, SIGILL, "space switch event", regs, &info);
+}
+
+asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
+{
+	die("Kernel stack overflow", regs, 0);
+	panic("Corrupt kernel stack, can't continue.");
+}
+
+
+/* init is done in lowcore.S and head.S */
+
+void __init trap_init(void)
+{
+        int i;
+
+        for (i = 0; i < 128; i++)
+          pgm_check_table[i] = &default_trap_handler;
+        pgm_check_table[1] = &illegal_op;
+        pgm_check_table[2] = &privileged_op;
+        pgm_check_table[3] = &execute_exception;
+        pgm_check_table[4] = &do_protection_exception;
+        pgm_check_table[5] = &addressing_exception;
+        pgm_check_table[6] = &specification_exception;
+        pgm_check_table[7] = &data_exception;
+        pgm_check_table[8] = &overflow_exception;
+        pgm_check_table[9] = &divide_exception;
+        pgm_check_table[0x0A] = &overflow_exception;
+        pgm_check_table[0x0B] = &divide_exception;
+        pgm_check_table[0x0C] = &hfp_overflow_exception;
+        pgm_check_table[0x0D] = &hfp_underflow_exception;
+        pgm_check_table[0x0E] = &hfp_significance_exception;
+        pgm_check_table[0x0F] = &hfp_divide_exception;
+        pgm_check_table[0x10] = &do_dat_exception;
+        pgm_check_table[0x11] = &do_dat_exception;
+        pgm_check_table[0x12] = &translation_exception;
+        pgm_check_table[0x13] = &special_op_exception;
+#ifndef CONFIG_ARCH_S390X
+ 	pgm_check_table[0x14] = &do_pseudo_page_fault;
+#else /* CONFIG_ARCH_S390X */
+        pgm_check_table[0x38] = &do_dat_exception;
+	pgm_check_table[0x39] = &do_dat_exception;
+	pgm_check_table[0x3A] = &do_dat_exception;
+        pgm_check_table[0x3B] = &do_dat_exception;
+#endif /* CONFIG_ARCH_S390X */
+        pgm_check_table[0x15] = &operand_exception;
+        pgm_check_table[0x1C] = &space_switch_exception;
+        pgm_check_table[0x1D] = &hfp_sqrt_exception;
+	pgm_check_table[0x40] = &do_monitor_call;
+
+	if (MACHINE_IS_VM) {
+		/*
+		 * First try to get pfault pseudo page faults going.
+		 * If this isn't available turn on pagex page faults.
+		 */
+#ifdef CONFIG_PFAULT
+		/* request the 0x2603 external interrupt */
+		if (register_early_external_interrupt(0x2603, pfault_interrupt,
+						      &ext_int_pfault) != 0)
+			panic("Couldn't request external interrupt 0x2603");
+
+		if (pfault_init() == 0) 
+			return;
+		
+		/* Tough luck, no pfault. */
+		unregister_early_external_interrupt(0x2603, pfault_interrupt,
+						    &ext_int_pfault);
+#endif
+#ifndef CONFIG_ARCH_S390X
+		cpcmd("SET PAGEX ON", NULL, 0);
+#endif
+	}
+}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..89fdb3808bc0
--- /dev/null
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -0,0 +1,130 @@
+/* ld script to make s390 Linux kernel
+ * Written by Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <linux/config.h>
+
+#ifndef CONFIG_ARCH_S390X
+OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+OUTPUT_ARCH(s390)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+#else
+OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+OUTPUT_ARCH(s390:64-bit)
+ENTRY(_start)
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+  . = 0x00000000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	*(.text)
+	SCHED_TEXT
+	LOCK_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} = 0x0700
+
+  _etext = .;			/* End of text section */
+
+  . = ALIGN(16);		/* Exception table */
+  __start___ex_table = .;
+  __ex_table : { *(__ex_table) }
+  __stop___ex_table = .;
+
+  RODATA
+
+#ifdef CONFIG_SHARED_KERNEL
+  . = ALIGN(1048576);		/* VM shared segments are 1MB aligned */
+
+  _eshared = .;			/* End of shareable data */
+#endif
+
+  .data : {			/* Data */
+	*(.data)
+	CONSTRUCTORS
+	}
+
+  . = ALIGN(4096);
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(4096);
+  __nosave_end = .;
+
+  . = ALIGN(4096);
+  .data.page_aligned : { *(.data.idt) }
+
+  . = ALIGN(32);
+  .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+  _edata = .;			/* End of data section */
+
+  . = ALIGN(8192);		/* init_task */
+  .data.init_task : { *(.data.init_task) }
+
+  /* will be freed after init */
+  . = ALIGN(4096);		/* Init code and data */
+  __init_begin = .;
+  .init.text : { 
+	_sinittext = .;
+	*(.init.text)
+	_einittext = .;
+  }
+  .init.data : { *(.init.data) }
+  . = ALIGN(256);
+  __setup_start = .;
+  .init.setup : { *(.init.setup) }
+  __setup_end = .;
+  __initcall_start = .;
+  .initcall.init : {
+	*(.initcall1.init) 
+	*(.initcall2.init) 
+	*(.initcall3.init) 
+	*(.initcall4.init) 
+	*(.initcall5.init) 
+	*(.initcall6.init) 
+	*(.initcall7.init)
+  }
+  __initcall_end = .;
+  __con_initcall_start = .;
+  .con_initcall.init : { *(.con_initcall.init) }
+  __con_initcall_end = .;
+  SECURITY_INIT
+  . = ALIGN(256);
+  __initramfs_start = .;
+  .init.ramfs : { *(.init.initramfs) }
+  . = ALIGN(2);
+  __initramfs_end = .;
+  . = ALIGN(256);
+  __per_cpu_start = .;
+  .data.percpu  : { *(.data.percpu) }
+  __per_cpu_end = .;
+  . = ALIGN(4096);
+  __init_end = .;
+  /* freed after init ends here */
+
+  __bss_start = .;		/* BSS */
+  .bss : { *(.bss) }
+  . = ALIGN(2);
+  __bss_stop = .;
+
+  _end = . ;
+
+  /* Sections to be discarded */
+  /DISCARD/ : {
+	*(.exitcall.exit)
+	}
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
new file mode 100644
index 000000000000..bb6cf02418a2
--- /dev/null
+++ b/arch/s390/kernel/vtime.c
@@ -0,0 +1,565 @@
+/*
+ *  arch/s390/kernel/vtime.c
+ *    Virtual cpu timer based timer functions.
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include <linux/rcupdate.h>
+#include <linux/posix-timers.h>
+
+#include <asm/s390_ext.h>
+#include <asm/timer.h>
+
+#define VTIMER_MAGIC (TIMER_MAGIC + 1)
+static ext_int_info_t ext_int_info_timer;
+DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_user_vtime(struct task_struct *tsk)
+{
+	cputime_t cputime;
+	__u64 timer, clock;
+	int rcu_user_flag;
+
+	timer = S390_lowcore.last_update_timer;
+	clock = S390_lowcore.last_update_clock;
+	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
+		      "  STCK %1"      /* Store current tod clock value */
+		      : "=m" (S390_lowcore.last_update_timer),
+		        "=m" (S390_lowcore.last_update_clock) );
+	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+	S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
+
+	cputime = S390_lowcore.user_timer >> 12;
+	rcu_user_flag = cputime != 0;
+	S390_lowcore.user_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_user_time(tsk, cputime);
+
+	cputime =  S390_lowcore.system_timer >> 12;
+	S390_lowcore.system_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_system_time(tsk, HARDIRQ_OFFSET, cputime);
+
+	cputime = S390_lowcore.steal_clock;
+	if ((__s64) cputime > 0) {
+		cputime >>= 12;
+		S390_lowcore.steal_clock -= cputime << 12;
+		account_steal_time(tsk, cputime);
+	}
+
+	run_local_timers();
+	if (rcu_pending(smp_processor_id()))
+		rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
+	scheduler_tick();
+ 	run_posix_cpu_timers(tsk);
+}
+
+/*
+ * Update process times based on virtual cpu times stored by entry.S
+ * to the lowcore fields user_timer, system_timer & steal_clock.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+	cputime_t cputime;
+	__u64 timer;
+
+	timer = S390_lowcore.last_update_timer;
+	asm volatile ("  STPT %0"    /* Store current cpu timer value */
+		      : "=m" (S390_lowcore.last_update_timer) );
+	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+
+	cputime =  S390_lowcore.system_timer >> 12;
+	S390_lowcore.system_timer -= cputime << 12;
+	S390_lowcore.steal_clock -= cputime << 12;
+	account_system_time(tsk, 0, cputime);
+}
+
+static inline void set_vtimer(__u64 expires)
+{
+	__u64 timer;
+
+	asm volatile ("  STPT %0\n"  /* Store current cpu timer value */
+		      "  SPT %1"     /* Set new value immediatly afterwards */
+		      : "=m" (timer) : "m" (expires) );
+	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
+	S390_lowcore.last_update_timer = expires;
+
+	/* store expire time for this CPU timer */
+	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#else
+static inline void set_vtimer(__u64 expires)
+{
+	S390_lowcore.last_update_timer = expires;
+	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+
+	/* store expire time for this CPU timer */
+	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+}
+#endif
+
+static void start_cpu_timer(void)
+{
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+	set_vtimer(vt_list->idle);
+}
+
+static void stop_cpu_timer(void)
+{
+	__u64 done;
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+
+	/* nothing to do */
+	if (list_empty(&vt_list->list)) {
+		vt_list->idle = VTIMER_MAX_SLICE;
+		goto fire;
+	}
+
+	/* store progress */
+	asm volatile ("STPT %0" : "=m" (done));
+
+	/*
+	 * If done is negative we do not stop the CPU timer
+	 * because we will get instantly an interrupt that
+	 * will start the CPU timer again.
+	 */
+	if (done & 1LL<<63)
+		return;
+	else
+		vt_list->offset += vt_list->to_expire - done;
+
+	/* save the actual expire value */
+	vt_list->idle = done;
+
+	/*
+	 * We cannot halt the CPU timer, we just write a value that
+	 * nearly never expires (only after 71 years) and re-write
+	 * the stored expire value if we continue the timer
+	 */
+ fire:
+	set_vtimer(VTIMER_MAX_SLICE);
+}
+
+/*
+ * Sorted add to a list. List is linear searched until first bigger
+ * element is found.
+ */
+static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
+{
+	struct vtimer_list *event;
+
+	list_for_each_entry(event, head, entry) {
+		if (event->expires > timer->expires) {
+			list_add_tail(&timer->entry, &event->entry);
+			return;
+		}
+	}
+	list_add_tail(&timer->entry, head);
+}
+
+/*
+ * Do the callback functions of expired vtimer events.
+ * Called from within the interrupt handler.
+ */
+static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
+{
+	struct vtimer_queue *vt_list;
+	struct vtimer_list *event, *tmp;
+	void (*fn)(unsigned long, struct pt_regs*);
+	unsigned long data;
+
+	if (list_empty(cb_list))
+		return;
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+
+	list_for_each_entry_safe(event, tmp, cb_list, entry) {
+		fn = event->function;
+		data = event->data;
+		fn(data, regs);
+
+		if (!event->interval)
+			/* delete one shot timer */
+			list_del_init(&event->entry);
+		else {
+			/* move interval timer back to list */
+			spin_lock(&vt_list->lock);
+			list_del_init(&event->entry);
+			list_add_sorted(event, &vt_list->list);
+			spin_unlock(&vt_list->lock);
+		}
+	}
+}
+
+/*
+ * Handler for the virtual CPU timer.
+ */
+static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
+{
+	int cpu;
+	__u64 next, delta;
+	struct vtimer_queue *vt_list;
+	struct vtimer_list *event, *tmp;
+	struct list_head *ptr;
+	/* the callback queue */
+	struct list_head cb_list;
+
+	INIT_LIST_HEAD(&cb_list);
+	cpu = smp_processor_id();
+	vt_list = &per_cpu(virt_cpu_timer, cpu);
+
+	/* walk timer list, fire all expired events */
+	spin_lock(&vt_list->lock);
+
+	if (vt_list->to_expire < VTIMER_MAX_SLICE)
+		vt_list->offset += vt_list->to_expire;
+
+	list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
+		if (event->expires > vt_list->offset)
+			/* found first unexpired event, leave */
+			break;
+
+		/* re-charge interval timer, we have to add the offset */
+		if (event->interval)
+			event->expires = event->interval + vt_list->offset;
+
+		/* move expired timer to the callback queue */
+		list_move_tail(&event->entry, &cb_list);
+	}
+	spin_unlock(&vt_list->lock);
+	do_callbacks(&cb_list, regs);
+
+	/* next event is first in list */
+	spin_lock(&vt_list->lock);
+	if (!list_empty(&vt_list->list)) {
+		ptr = vt_list->list.next;
+		event = list_entry(ptr, struct vtimer_list, entry);
+		next = event->expires - vt_list->offset;
+
+		/* add the expired time from this interrupt handler
+		 * and the callback functions
+		 */
+		asm volatile ("STPT %0" : "=m" (delta));
+		delta = 0xffffffffffffffffLL - delta + 1;
+		vt_list->offset += delta;
+		next -= delta;
+	} else {
+		vt_list->offset = 0;
+		next = VTIMER_MAX_SLICE;
+	}
+	spin_unlock(&vt_list->lock);
+	set_vtimer(next);
+}
+
+void init_virt_timer(struct vtimer_list *timer)
+{
+	timer->magic = VTIMER_MAGIC;
+	timer->function = NULL;
+	INIT_LIST_HEAD(&timer->entry);
+	spin_lock_init(&timer->lock);
+}
+EXPORT_SYMBOL(init_virt_timer);
+
+static inline int check_vtimer(struct vtimer_list *timer)
+{
+	if (timer->magic != VTIMER_MAGIC)
+		return -EINVAL;
+	return 0;
+}
+
+static inline int vtimer_pending(struct vtimer_list *timer)
+{
+	return (!list_empty(&timer->entry));
+}
+
+/*
+ * this function should only run on the specified CPU
+ */
+static void internal_add_vtimer(struct vtimer_list *timer)
+{
+	unsigned long flags;
+	__u64 done;
+	struct vtimer_list *event;
+	struct vtimer_queue *vt_list;
+
+	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	if (timer->cpu != smp_processor_id())
+		printk("internal_add_vtimer: BUG, running on wrong CPU");
+
+	/* if list is empty we only have to set the timer */
+	if (list_empty(&vt_list->list)) {
+		/* reset the offset, this may happen if the last timer was
+		 * just deleted by mod_virt_timer and the interrupt
+		 * didn't happen until here
+		 */
+		vt_list->offset = 0;
+		goto fire;
+	}
+
+	/* save progress */
+	asm volatile ("STPT %0" : "=m" (done));
+
+	/* calculate completed work */
+	done = vt_list->to_expire - done + vt_list->offset;
+	vt_list->offset = 0;
+
+	list_for_each_entry(event, &vt_list->list, entry)
+		event->expires -= done;
+
+ fire:
+	list_add_sorted(timer, &vt_list->list);
+
+	/* get first element, which is the next vtimer slice */
+	event = list_entry(vt_list->list.next, struct vtimer_list, entry);
+
+	set_vtimer(event->expires);
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
+	put_cpu();
+}
+
+static inline int prepare_vtimer(struct vtimer_list *timer)
+{
+	if (check_vtimer(timer) || !timer->function) {
+		printk("add_virt_timer: uninitialized timer\n");
+		return -EINVAL;
+	}
+
+	if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
+		printk("add_virt_timer: invalid timer expire value!\n");
+		return -EINVAL;
+	}
+
+	if (vtimer_pending(timer)) {
+		printk("add_virt_timer: timer pending\n");
+		return -EBUSY;
+	}
+
+	timer->cpu = get_cpu();
+	return 0;
+}
+
+/*
+ * add_virt_timer - add an oneshot virtual CPU timer
+ */
+void add_virt_timer(void *new)
+{
+	struct vtimer_list *timer;
+
+	timer = (struct vtimer_list *)new;
+
+	if (prepare_vtimer(timer) < 0)
+		return;
+
+	timer->interval = 0;
+	internal_add_vtimer(timer);
+}
+EXPORT_SYMBOL(add_virt_timer);
+
+/*
+ * add_virt_timer_int - add an interval virtual CPU timer
+ */
+void add_virt_timer_periodic(void *new)
+{
+	struct vtimer_list *timer;
+
+	timer = (struct vtimer_list *)new;
+
+	if (prepare_vtimer(timer) < 0)
+		return;
+
+	timer->interval = timer->expires;
+	internal_add_vtimer(timer);
+}
+EXPORT_SYMBOL(add_virt_timer_periodic);
+
+/*
+ * If we change a pending timer the function must be called on the CPU
+ * where the timer is running on, e.g. by smp_call_function_on()
+ *
+ * The original mod_timer adds the timer if it is not pending. For compatibility
+ * we do the same. The timer will be added on the current CPU as a oneshot timer.
+ *
+ * returns whether it has modified a pending timer (1) or not (0)
+ */
+int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
+{
+	struct vtimer_queue *vt_list;
+	unsigned long flags;
+	int cpu;
+
+	if (check_vtimer(timer) || !timer->function) {
+		printk("mod_virt_timer: uninitialized timer\n");
+		return	-EINVAL;
+	}
+
+	if (!expires || expires > VTIMER_MAX_SLICE) {
+		printk("mod_virt_timer: invalid expire range\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * This is a common optimization triggered by the
+	 * networking code - if the timer is re-modified
+	 * to be the same thing then just return:
+	 */
+	if (timer->expires == expires && vtimer_pending(timer))
+		return 1;
+
+	cpu = get_cpu();
+	vt_list = &per_cpu(virt_cpu_timer, cpu);
+
+	/* disable interrupts before test if timer is pending */
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	/* if timer isn't pending add it on the current CPU */
+	if (!vtimer_pending(timer)) {
+		spin_unlock_irqrestore(&vt_list->lock, flags);
+		/* we do not activate an interval timer with mod_virt_timer */
+		timer->interval = 0;
+		timer->expires = expires;
+		timer->cpu = cpu;
+		internal_add_vtimer(timer);
+		return 0;
+	}
+
+	/* check if we run on the right CPU */
+	if (timer->cpu != cpu) {
+		printk("mod_virt_timer: running on wrong CPU, check your code\n");
+		spin_unlock_irqrestore(&vt_list->lock, flags);
+		put_cpu();
+		return -EINVAL;
+	}
+
+	list_del_init(&timer->entry);
+	timer->expires = expires;
+
+	/* also change the interval if we have an interval timer */
+	if (timer->interval)
+		timer->interval = expires;
+
+	/* the timer can't expire anymore so we can release the lock */
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	internal_add_vtimer(timer);
+	return 1;
+}
+EXPORT_SYMBOL(mod_virt_timer);
+
+/*
+ * delete a virtual timer
+ *
+ * returns whether the deleted timer was pending (1) or not (0)
+ */
+int del_virt_timer(struct vtimer_list *timer)
+{
+	unsigned long flags;
+	struct vtimer_queue *vt_list;
+
+	if (check_vtimer(timer)) {
+		printk("del_virt_timer: timer not initialized\n");
+		return -EINVAL;
+	}
+
+	/* check if timer is pending */
+	if (!vtimer_pending(timer))
+		return 0;
+
+	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
+	spin_lock_irqsave(&vt_list->lock, flags);
+
+	/* we don't interrupt a running timer, just let it expire! */
+	list_del_init(&timer->entry);
+
+	/* last timer removed */
+	if (list_empty(&vt_list->list)) {
+		vt_list->to_expire = 0;
+		vt_list->offset = 0;
+	}
+
+	spin_unlock_irqrestore(&vt_list->lock, flags);
+	return 1;
+}
+EXPORT_SYMBOL(del_virt_timer);
+
+/*
+ * Start the virtual CPU timer on the current CPU.
+ */
+void init_cpu_vtimer(void)
+{
+	struct vtimer_queue *vt_list;
+	unsigned long cr0;
+
+	/* kick the virtual timer */
+	S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
+	S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
+	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
+	asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
+	__ctl_store(cr0, 0, 0);
+	cr0 |= 0x400;
+	__ctl_load(cr0, 0, 0);
+
+	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+	INIT_LIST_HEAD(&vt_list->list);
+	spin_lock_init(&vt_list->lock);
+	vt_list->to_expire = 0;
+	vt_list->offset = 0;
+	vt_list->idle = 0;
+
+}
+
+static int vtimer_idle_notify(struct notifier_block *self,
+			      unsigned long action, void *hcpu)
+{
+	switch (action) {
+	case CPU_IDLE:
+		stop_cpu_timer();
+		break;
+	case CPU_NOT_IDLE:
+		start_cpu_timer();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block vtimer_idle_nb = {
+	.notifier_call = vtimer_idle_notify,
+};
+
+void __init vtime_init(void)
+{
+	/* request the cpu timer external interrupt */
+	if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
+					      &ext_int_info_timer) != 0)
+		panic("Couldn't request external interrupt 0x1005");
+
+	if (register_idle_notifier(&vtimer_idle_nb))
+		panic("Couldn't register idle notifier");
+
+	init_cpu_vtimer();
+}
+