summary refs log tree commit diff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 18:18:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 18:18:05 -0700
commitd645727bdc2aed8e2e0e9496248f735481b5049a (patch)
tree079fa3cf369dbf0dc0663fe1b0a62460c522a8e9 /arch
parentcd166bd0dde265a97dd9aa8e3451a2646d96d04b (diff)
parent310d6b671588dd7695cbc0d09d02e41d94a42bed (diff)
downloadlinux-d645727bdc2aed8e2e0e9496248f735481b5049a.tar.gz
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (30 commits)
  [S390] wire up sys_perf_counter_open
  [S390] wire up sys_rt_tgsigqueueinfo
  [S390] ftrace: add system call tracer support
  [S390] ftrace: add function graph tracer support
  [S390] ftrace: add function trace mcount test support
  [S390] ftrace: add dynamic ftrace support
  [S390] kprobes: use probe_kernel_write
  [S390] maccess: arch specific probe_kernel_write() implementation
  [S390] maccess: add weak attribute to probe_kernel_write
  [S390] profile_tick called twice
  [S390] dasd: forward internal errors to dasd_sleep_on caller
  [S390] dasd: sync after async probe
  [S390] dasd: check_characteristics cleanup
  [S390] dasd: no High Performance FICON in 31-bit mode
  [S390] dcssblk: revert devt conversion
  [S390] qdio: fix access beyond ARRAY_SIZE of irq_ptr->{in,out}put_qs
  [S390] vmalloc: add vmalloc kernel parameter support
  [S390] uaccess: use might_fault() instead of might_sleep()
  [S390] 3270: lock dependency fixes
  [S390] 3270: do not register with tty_register_device
  ...
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig23
-rw-r--r--arch/s390/include/asm/compat.h19
-rw-r--r--arch/s390/include/asm/cpu.h32
-rw-r--r--arch/s390/include/asm/cputime.h19
-rw-r--r--arch/s390/include/asm/ftrace.h21
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/pgtable.h7
-rw-r--r--arch/s390/include/asm/seccomp.h16
-rw-r--r--arch/s390/include/asm/spinlock.h19
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h12
-rw-r--r--arch/s390/include/asm/uaccess.h16
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/compat_wrapper.S17
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry64.S7
-rw-r--r--arch/s390/kernel/ftrace.c260
-rw-r--r--arch/s390/kernel/head.S65
-rw-r--r--arch/s390/kernel/kprobes.c31
-rw-r--r--arch/s390/kernel/mcount.S212
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c23
-rw-r--r--arch/s390/kernel/s390_ext.c5
-rw-r--r--arch/s390/kernel/sclp.S327
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c3
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/vdso.c19
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/spinlock.c40
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/s390/mm/maccess.c61
-rw-r--r--arch/s390/mm/mmap.c11
-rw-r--r--arch/s390/mm/pgtable.c16
42 files changed, 1173 insertions, 171 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2eca5fe0e75b..99dc3ded6b49 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -82,6 +82,11 @@ config S390
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_SYSCALL_WRAPPERS
 	select HAVE_FUNCTION_TRACER
+	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_FTRACE_SYSCALLS
+	select HAVE_DYNAMIC_FTRACE
+	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_DEFAULT_NO_SPIN_MUTEXES
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
@@ -567,6 +572,24 @@ bool "s390 guest support for KVM (EXPERIMENTAL)"
 	  the KVM hypervisor. This will add detection for KVM as well  as a
 	  virtio transport. If KVM is detected, the virtio console will be
 	  the default console.
+
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	depends on PROC_FS
+	default y
+	help
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via /proc/<pid>/seccomp, it cannot be disabled
+	  and the task is only allowed to execute a few safe syscalls
+	  defined by each seccomp mode.
+
+	  If unsure, say Y.
+
 endmenu
 
 source "net/Kconfig"
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index de065b32381a..01a08020bc0e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -5,6 +5,7 @@
  */
 #include <linux/types.h>
 #include <linux/sched.h>
+#include <linux/thread_info.h>
 
 #define PSW32_MASK_PER		0x40000000UL
 #define PSW32_MASK_DAT		0x04000000UL
@@ -163,12 +164,28 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 	return (u32)(unsigned long)uptr;
 }
 
+#ifdef CONFIG_COMPAT
+
+static inline int is_compat_task(void)
+{
+	return test_thread_flag(TIF_31BIT);
+}
+
+#else
+
+static inline int is_compat_task(void)
+{
+	return 0;
+}
+
+#endif
+
 static inline void __user *compat_alloc_user_space(long len)
 {
 	unsigned long stack;
 
 	stack = KSTK_ESP(current);
-	if (test_thread_flag(TIF_31BIT))
+	if (is_compat_task())
 		stack &= 0x7fffffffUL;
 	return (void __user *) (stack - len);
 }
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
deleted file mode 100644
index d60a2eefb17b..000000000000
--- a/arch/s390/include/asm/cpu.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *  include/asm-s390/cpu.h
- *
- *    Copyright IBM Corp. 2007
- *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#ifndef _ASM_S390_CPU_H_
-#define _ASM_S390_CPU_H_
-
-#include <linux/types.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
-
-struct s390_idle_data {
-	spinlock_t lock;
-	unsigned long long idle_count;
-	unsigned long long idle_enter;
-	unsigned long long idle_time;
-};
-
-DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
-
-void vtime_start_cpu(void);
-
-static inline void s390_idle_check(void)
-{
-	if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
-		vtime_start_cpu();
-}
-
-#endif /* _ASM_S390_CPU_H_ */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 941384fbd39c..ec917d42ee6d 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -9,6 +9,9 @@
 #ifndef _S390_CPUTIME_H
 #define _S390_CPUTIME_H
 
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
 #include <asm/div64.h>
 
 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
@@ -174,8 +177,24 @@ cputime64_to_clock_t(cputime64_t cputime)
        return __div(cputime, 4096000000ULL / USER_HZ);
 }
 
+struct s390_idle_data {
+	spinlock_t lock;
+	unsigned long long idle_count;
+	unsigned long long idle_enter;
+	unsigned long long idle_time;
+};
+
+DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
+
+void vtime_start_cpu(void);
 cputime64_t s390_get_idle_time(int cpu);
 
 #define arch_idle_time(cpu) s390_get_idle_time(cpu)
 
+static inline void s390_idle_check(void)
+{
+	if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
+		vtime_start_cpu();
+}
+
 #endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 5a5bc75e19d4..96c14a9102b8 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -2,7 +2,28 @@
 #define _ASM_S390_FTRACE_H
 
 #ifndef __ASSEMBLY__
+
 extern void _mcount(void);
+extern unsigned long ftrace_dyn_func;
+
+struct dyn_arch_ftrace { };
+
+#define MCOUNT_ADDR ((long)_mcount)
+
+#ifdef CONFIG_64BIT
+#define MCOUNT_OFFSET_RET 18
+#define MCOUNT_INSN_SIZE  24
+#define MCOUNT_OFFSET	  14
+#else
+#define MCOUNT_OFFSET_RET 26
+#define MCOUNT_INSN_SIZE  30
+#define MCOUNT_OFFSET	   8
 #endif
 
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	return addr - MCOUNT_OFFSET;
+}
+
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 3aeca492b147..5046ad6b7a63 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -30,6 +30,7 @@
 #define __LC_SUBCHANNEL_NR		0x00ba
 #define __LC_IO_INT_PARM		0x00bc
 #define __LC_IO_INT_WORD		0x00c0
+#define __LC_STFL_FAC_LIST		0x00c8
 #define __LC_MCCK_CODE			0x00e8
 
 #define __LC_DUMP_REIPL			0x0e00
@@ -67,6 +68,7 @@
 #define __LC_CPUID			0x02b0
 #define __LC_INT_CLOCK			0x02c8
 #define __LC_MACHINE_FLAGS		0x02d8
+#define __LC_FTRACE_FUNC		0x02dc
 #define __LC_IRB			0x0300
 #define __LC_PFAULT_INTPARM		0x0080
 #define __LC_CPU_TIMER_SAVE_AREA	0x00d8
@@ -112,6 +114,7 @@
 #define __LC_INT_CLOCK			0x0340
 #define __LC_VDSO_PER_CPU		0x0350
 #define __LC_MACHINE_FLAGS		0x0358
+#define __LC_FTRACE_FUNC		0x0360
 #define __LC_IRB			0x0380
 #define __LC_PASTE			0x03c0
 #define __LC_PFAULT_INTPARM		0x11b8
@@ -280,7 +283,8 @@ struct _lowcore
 	__u64	int_clock;			/* 0x02c8 */
 	__u64	clock_comparator;		/* 0x02d0 */
 	__u32	machine_flags;			/* 0x02d8 */
-	__u8	pad_0x02dc[0x0300-0x02dc];	/* 0x02dc */
+	__u32	ftrace_func;			/* 0x02dc */
+	__u8	pad_0x02f0[0x0300-0x02f0];	/* 0x02f0 */
 
 	/* Interrupt response block */
 	__u8	irb[64];			/* 0x0300 */
@@ -385,7 +389,8 @@ struct _lowcore
 	__u64	clock_comparator;		/* 0x0348 */
 	__u64	vdso_per_cpu_data;		/* 0x0350 */
 	__u64	machine_flags;			/* 0x0358 */
-	__u8	pad_0x0360[0x0380-0x0360];	/* 0x0360 */
+	__u64	ftrace_func;			/* 0x0360 */
+	__u8	pad_0x0368[0x0380-0x0368];	/* 0x0368 */
 
 	/* Interrupt response block. */
 	__u8	irb[64];			/* 0x0380 */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5caddd4f7bed..60a7b1a1702f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -112,12 +112,15 @@ extern char empty_zero_page[PAGE_SIZE];
  * effect, this also makes sure that 64 bit module code cannot be used
  * as system call address.
  */
+
+extern unsigned long VMALLOC_START;
+
 #ifndef __s390x__
-#define VMALLOC_START	0x78000000UL
+#define VMALLOC_SIZE	(96UL << 20)
 #define VMALLOC_END	0x7e000000UL
 #define VMEM_MAP_END	0x80000000UL
 #else /* __s390x__ */
-#define VMALLOC_START	0x3e000000000UL
+#define VMALLOC_SIZE	(1UL << 30)
 #define VMALLOC_END	0x3e040000000UL
 #define VMEM_MAP_END	0x40000000000UL
 #endif /* __s390x__ */
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
new file mode 100644
index 000000000000..781a9cf9b002
--- /dev/null
+++ b/arch/s390/include/asm/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_S390_SECCOMP_H
+#define _ASM_S390_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read	__NR_read
+#define __NR_seccomp_write	__NR_write
+#define __NR_seccomp_exit	__NR_exit
+#define __NR_seccomp_sigreturn	__NR_sigreturn
+
+#define __NR_seccomp_read_32	__NR_read
+#define __NR_seccomp_write_32	__NR_write
+#define __NR_seccomp_exit_32	__NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif	/* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f3861b09ebb0..c9af0d19c7ab 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -122,8 +122,10 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
 #define __raw_write_can_lock(x) ((x)->lock == 0)
 
 extern void _raw_read_lock_wait(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
 extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
 extern void _raw_write_lock_wait(raw_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
 extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
 
 static inline void __raw_read_lock(raw_rwlock_t *rw)
@@ -134,6 +136,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
 		_raw_read_lock_wait(rw);
 }
 
+static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+{
+	unsigned int old;
+	old = rw->lock & 0x7fffffffU;
+	if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
+		_raw_read_lock_wait_flags(rw, flags);
+}
+
 static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
 	unsigned int old, cmp;
@@ -151,6 +161,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
 		_raw_write_lock_wait(rw);
 }
 
+static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+{
+	if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
+		_raw_write_lock_wait_flags(rw, flags);
+}
+
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
 	_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
@@ -172,9 +188,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
 	return _raw_write_trylock_retry(rw);
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
-
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()
 
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 2429b87eb28d..e0a73d3eb837 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
 #ifndef _ASM_SYSCALL_H
 #define _ASM_SYSCALL_H	1
 
+#include <linux/sched.h>
 #include <asm/ptrace.h>
 
 static inline long syscall_get_nr(struct task_struct *task,
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 461f2abd2e6f..925bcc649035 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -83,14 +83,16 @@ static inline struct thread_info *current_thread_info(void)
 /*
  * thread information flags bit numbers
  */
-#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
 #define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_RESTART_SVC		4	/* restart svc with new svc number */
-#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
 #define TIF_SINGLE_STEP		6	/* deliver sigtrap on return to user */
 #define TIF_MCCK_PENDING	7	/* machine check handling is pending */
+#define TIF_SYSCALL_TRACE	8	/* syscall trace active */
+#define TIF_SYSCALL_AUDIT	9	/* syscall auditing active */
+#define TIF_SECCOMP		10	/* secure computing */
+#define TIF_SYSCALL_FTRACE	11	/* ftrace syscall instrumentation */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling 
 					   TIF_NEED_RESCHED */
@@ -99,15 +101,17 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTORE_SIGMASK	20	/* restore signal mask in do_signal() */
 #define TIF_FREEZE		21	/* thread is freezing for suspend */
 
-#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_RESTART_SVC	(1<<TIF_RESTART_SVC)
-#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
 #define _TIF_MCCK_PENDING	(1<<TIF_MCCK_PENDING)
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
+#define _TIF_SYSCALL_FTRACE	(1<<TIF_SYSCALL_FTRACE)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 0235970278f0..8377e91533d2 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -131,7 +131,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 
 #define put_user(x, ptr)					\
 ({								\
-	might_sleep();						\
+	might_fault();						\
 	__put_user(x, ptr);					\
 })
 
@@ -180,7 +180,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
 
 #define get_user(x, ptr)					\
 ({								\
-	might_sleep();						\
+	might_fault();						\
 	__get_user(x, ptr);					\
 })
 
@@ -231,7 +231,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	might_sleep();
+	might_fault();
 	if (access_ok(VERIFY_WRITE, to, n))
 		n = __copy_to_user(to, from, n);
 	return n;
@@ -282,7 +282,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	might_sleep();
+	might_fault();
 	if (access_ok(VERIFY_READ, from, n))
 		n = __copy_from_user(to, from, n);
 	else
@@ -299,7 +299,7 @@ __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 static inline unsigned long __must_check
 copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
-	might_sleep();
+	might_fault();
 	if (__access_ok(from,n) && __access_ok(to,n))
 		n = __copy_in_user(to, from, n);
 	return n;
@@ -312,7 +312,7 @@ static inline long __must_check
 strncpy_from_user(char *dst, const char __user *src, long count)
 {
         long res = -EFAULT;
-        might_sleep();
+	might_fault();
         if (access_ok(VERIFY_READ, src, 1))
 		res = uaccess.strncpy_from_user(count, src, dst);
         return res;
@@ -321,7 +321,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
 static inline unsigned long
 strnlen_user(const char __user * src, unsigned long n)
 {
-	might_sleep();
+	might_fault();
 	return uaccess.strnlen_user(n, src);
 }
 
@@ -354,7 +354,7 @@ __clear_user(void __user *to, unsigned long n)
 static inline unsigned long __must_check
 clear_user(void __user *to, unsigned long n)
 {
-	might_sleep();
+	might_fault();
 	if (access_ok(VERIFY_WRITE, to, n))
 		n = uaccess.clear_user(n, to);
 	return n;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index f0f19e6ace6c..c80602d7c880 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -267,7 +267,9 @@
 #define __NR_epoll_create1	327
 #define	__NR_preadv		328
 #define	__NR_pwritev		329
-#define NR_syscalls 330
+#define __NR_rt_tgsigqueueinfo	330
+#define __NR_perf_counter_open	331
+#define NR_syscalls 332
 
 /* 
  * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 228e3105ded7..c75ed43b1a18 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -3,8 +3,9 @@
 #
 
 ifdef CONFIG_FUNCTION_TRACER
-# Do not trace early boot code
+# Don't trace early setup code and tracing code
 CFLAGS_REMOVE_early.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
 endif
 
 #
@@ -22,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 obj-y	:=  bitmap.o traps.o time.o process.o base.o early.o setup.o \
 	    processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
 	    s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
-	    vdso.o vtime.o sysinfo.o nmi.o
+	    vdso.o vtime.o sysinfo.o nmi.o sclp.o
 
 obj-y	+= $(if $(CONFIG_64BIT),entry64.o,entry.o)
 obj-y	+= $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -41,6 +42,8 @@ obj-$(CONFIG_COMPAT)		+= compat_linux.o compat_signal.o \
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o
 obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
+obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 
 # Kexec part
 S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index fb38af6316bb..88a83366819f 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1823,3 +1823,20 @@ compat_sys_pwritev_wrapper:
 	llgfr	%r5,%r5			# u32
 	llgfr	%r6,%r6			# u32
 	jg	compat_sys_pwritev	# branch to system call
+
+	.globl	compat_sys_rt_tgsigqueueinfo_wrapper
+compat_sys_rt_tgsigqueueinfo_wrapper:
+	lgfr	%r2,%r2			# compat_pid_t
+	lgfr	%r3,%r3			# compat_pid_t
+	lgfr	%r4,%r4			# int
+	llgtr	%r5,%r5			# struct compat_siginfo *
+	jg	compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
+
+	.globl	sys_perf_counter_open_wrapper
+sys_perf_counter_open_wrapper:
+	llgtr	%r2,%r2			# const struct perf_counter_attr *
+	lgfr	%r3,%r3			# pid_t
+	lgfr	%r4,%r4			# int
+	lgfr	%r5,%r5			# int
+	llgfr	%r6,%r6			# unsigned long
+	jg	sys_perf_counter_open	# branch to system call
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cf09948faad6..fb263736826c 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -11,6 +11,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
+#include <linux/ftrace.h>
 #include <linux/lockdep.h>
 #include <linux/module.h>
 #include <linux/pfn.h>
@@ -410,5 +411,8 @@ void __init startup_init(void)
 	sclp_facilities_detect();
 	detect_memory_layout(memory_chunk);
 	S390_lowcore.machine_flags = machine_flags;
+#ifdef CONFIG_DYNAMIC_FTRACE
+	S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
+#endif
 	lockdep_on();
 }
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f3e275934213..c4c80a22bc1f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,6 +53,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING)
+_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
+		_TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
@@ -265,7 +267,7 @@ sysc_do_restart:
 	sth	%r7,SP_SVCNR(%r15)
 	sll	%r7,2		  # svc number *4
 	l	%r8,BASED(.Lsysc_table)
-	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+	tm	__TI_flags+2(%r9),_TIF_SYSCALL
 	l	%r8,0(%r7,%r8)	  # get system call addr.
 	bnz	BASED(sysc_tracesys)
 	basr	%r14,%r8	  # call sys_xxxx
@@ -405,7 +407,7 @@ sysc_tracego:
 	basr	%r14,%r8		# call sys_xxx
 	st	%r2,SP_R2(%r15)		# store return value
 sysc_tracenogo:
-	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+	tm	__TI_flags+2(%r9),_TIF_SYSCALL
 	bz	BASED(sysc_return)
 	l	%r1,BASED(.Ltrace_exit)
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
@@ -1107,6 +1109,7 @@ cleanup_io_leave_insn:
 
 		.section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esa
+	.globl	sys_call_table
 sys_call_table:
 #include "syscalls.S"
 #undef SYSCALL
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 84a105838e03..f6618e9e15ef 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -56,6 +56,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING)
+_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
+		_TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
 
 #define BASED(name) name-system_call(%r13)
 
@@ -260,7 +262,7 @@ sysc_do_restart:
 	larl	%r10,sys_call_table_emu  # use 31 bit emulation system calls
 sysc_noemu:
 #endif
-	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+	tm	__TI_flags+6(%r9),_TIF_SYSCALL
 	lgf	%r8,0(%r7,%r10) # load address of system call routine
 	jnz	sysc_tracesys
 	basr	%r14,%r8	# call sys_xxxx
@@ -391,7 +393,7 @@ sysc_tracego:
 	basr	%r14,%r8		# call sys_xxx
 	stg	%r2,SP_R2(%r15)		# store return value
 sysc_tracenogo:
-	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+	tm	__TI_flags+6(%r9),_TIF_SYSCALL
 	jz	sysc_return
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	larl	%r14,sysc_return	# return point is sysc_return
@@ -1058,6 +1060,7 @@ cleanup_io_leave_insn:
 
 		.section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esame
+	.globl	sys_call_table
 sys_call_table:
 #include "syscalls.S"
 #undef SYSCALL
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
new file mode 100644
index 000000000000..82ddfd3a75af
--- /dev/null
+++ b/arch/s390/kernel/ftrace.c
@@ -0,0 +1,260 @@
+/*
+ * Dynamic function tracer architecture backend.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <trace/syscall.h>
+#include <asm/lowcore.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+void ftrace_disable_code(void);
+void ftrace_disable_return(void);
+void ftrace_call_code(void);
+void ftrace_nop_code(void);
+
+#define FTRACE_INSN_SIZE 4
+
+#ifdef CONFIG_64BIT
+
+asm(
+	"	.align	4\n"
+	"ftrace_disable_code:\n"
+	"	j	0f\n"
+	"	.word	0x0024\n"
+	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
+	"	basr	%r14,%r1\n"
+	"ftrace_disable_return:\n"
+	"	lg	%r14,8(15)\n"
+	"	lgr	%r0,%r0\n"
+	"0:\n");
+
+asm(
+	"	.align	4\n"
+	"ftrace_nop_code:\n"
+	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
+
+asm(
+	"	.align	4\n"
+	"ftrace_call_code:\n"
+	"	stg	%r14,8(%r15)\n");
+
+#else /* CONFIG_64BIT */
+
+asm(
+	"	.align	4\n"
+	"ftrace_disable_code:\n"
+	"	j	0f\n"
+	"	l	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
+	"	basr	%r14,%r1\n"
+	"ftrace_disable_return:\n"
+	"	l	%r14,4(%r15)\n"
+	"	j	0f\n"
+	"	bcr	0,%r7\n"
+	"	bcr	0,%r7\n"
+	"	bcr	0,%r7\n"
+	"	bcr	0,%r7\n"
+	"	bcr	0,%r7\n"
+	"	bcr	0,%r7\n"
+	"0:\n");
+
+asm(
+	"	.align	4\n"
+	"ftrace_nop_code:\n"
+	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
+
+asm(
+	"	.align	4\n"
+	"ftrace_call_code:\n"
+	"	st	%r14,4(%r15)\n");
+
+#endif /* CONFIG_64BIT */
+
+static int ftrace_modify_code(unsigned long ip,
+			      void *old_code, int old_size,
+			      void *new_code, int new_size)
+{
+	unsigned char replaced[MCOUNT_INSN_SIZE];
+
+	/*
+	 * Note: Due to modules code can disappear and change.
+	 *  We need to protect against faulting as well as code
+	 *  changing. We do this by using the probe_kernel_*
+	 *  functions.
+	 *  This however is just a simple sanity check.
+	 */
+	if (probe_kernel_read(replaced, (void *)ip, old_size))
+		return -EFAULT;
+	if (memcmp(replaced, old_code, old_size) != 0)
+		return -EINVAL;
+	if (probe_kernel_write((void *)ip, new_code, new_size))
+		return -EPERM;
+	return 0;
+}
+
+static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
+				   unsigned long addr)
+{
+	return ftrace_modify_code(rec->ip,
+				  ftrace_call_code, FTRACE_INSN_SIZE,
+				  ftrace_disable_code, MCOUNT_INSN_SIZE);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+		    unsigned long addr)
+{
+	if (addr == MCOUNT_ADDR)
+		return ftrace_make_initial_nop(mod, rec, addr);
+	return ftrace_modify_code(rec->ip,
+				  ftrace_call_code, FTRACE_INSN_SIZE,
+				  ftrace_nop_code, FTRACE_INSN_SIZE);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	return ftrace_modify_code(rec->ip,
+				  ftrace_nop_code, FTRACE_INSN_SIZE,
+				  ftrace_call_code, FTRACE_INSN_SIZE);
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	ftrace_dyn_func = (unsigned long)func;
+	return 0;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+	*(unsigned long *)data = 0;
+	return 0;
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Patch the kernel code at ftrace_graph_caller location:
+ * The instruction there is branch relative on condition. The condition mask
+ * is either all ones (always branch aka disable ftrace_graph_caller) or all
+ * zeroes (nop aka enable ftrace_graph_caller).
+ * Instruction format for brc is a7m4xxxx where m is the condition mask.
+ */
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	unsigned short opcode = 0xa704;
+
+	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	unsigned short opcode = 0xa7f4;
+
+	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
+}
+
+static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
+{
+	return addr - (ftrace_disable_return - ftrace_disable_code);
+}
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
+{
+	return addr - MCOUNT_OFFSET_RET;
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addresses
+ * in current thread info.
+ */
+unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
+{
+	struct ftrace_graph_ent trace;
+
+	/* Nmi's are currently unsupported. */
+	if (unlikely(in_nmi()))
+		goto out;
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		goto out;
+	if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
+		goto out;
+	trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
+	/* Only trace if the calling function expects to. */
+	if (!ftrace_graph_entry(&trace)) {
+		current->curr_ret_stack--;
+		goto out;
+	}
+	parent = (unsigned long)return_to_handler;
+out:
+	return parent;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
+extern unsigned int sys_call_table[];
+
+static struct syscall_metadata **syscalls_metadata;
+
+struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
+		return NULL;
+
+	return syscalls_metadata[nr];
+}
+
+static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
+{
+	struct syscall_metadata *start;
+	struct syscall_metadata *stop;
+	char str[KSYM_SYMBOL_LEN];
+
+	start = (struct syscall_metadata *)__start_syscalls_metadata;
+	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
+
+	for ( ; start < stop; start++) {
+		if (start->name && !strcmp(start->name + 3, str + 3))
+			return start;
+	}
+	return NULL;
+}
+
+void arch_init_ftrace_syscalls(void)
+{
+	struct syscall_metadata *meta;
+	int i;
+	static atomic_t refs;
+
+	if (atomic_inc_return(&refs) != 1)
+		goto out;
+	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
+				    GFP_KERNEL);
+	if (!syscalls_metadata)
+		goto out;
+	for (i = 0; i < NR_syscalls; i++) {
+		meta = find_syscall_meta((unsigned long)sys_call_table[i]);
+		syscalls_metadata[i] = meta;
+	}
+	return;
+out:
+	atomic_dec(&refs);
+}
+#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 22596d70fc2e..6d227413cbe7 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,7 +1,5 @@
 /*
- *  arch/s390/kernel/head.S
- *
- * Copyright (C) IBM Corp. 1999,2006
+ * Copyright IBM Corp. 1999,2009
  *
  *    Author(s): Hartmut Penner <hp@de.ibm.com>
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -479,27 +477,58 @@ startup:basr	%r13,0			# get base
 	mvc	__LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
 	mvc	__LC_EXIT_TIMER(8),5f-.LPG0(%r13)
 #ifndef CONFIG_MARCH_G5
-	# check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
-	stidp	__LC_CPUID		# store cpuid
-	lhi	%r0,(3f-2f) / 2
-	la	%r1,2f-.LPG0(%r13)
-0:	clc	__LC_CPUID+4(2),0(%r1)
-	jne	3f
-	lpsw	1f-.LPG0(13)		# machine type not good enough, crash
+	# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
+	xc	__LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
+	stfl	__LC_STFL_FAC_LIST	# store facility list
+	tm	__LC_STFL_FAC_LIST,0x01	# stfle available ?
+	jz	0f
+	la	%r0,0
+	.insn	s,0xb2b00000,__LC_STFL_FAC_LIST	# store facility list extended
+0:	l	%r0,__LC_STFL_FAC_LIST
+	n	%r0,2f+8-.LPG0(%r13)
+	cl	%r0,2f+8-.LPG0(%r13)
+	jne	1f
+	l	%r0,__LC_STFL_FAC_LIST+4
+	n	%r0,2f+12-.LPG0(%r13)
+	cl	%r0,2f+12-.LPG0(%r13)
+	je	3f
+1:	l	%r15,.Lstack-.LPG0(%r13)
+	ahi	%r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
+	ahi	%r15,-96
+	la	%r2,.Lals_string-.LPG0(%r13)
+	l	%r3,.Lsclp_print-.LPG0(%r13)
+	basr	%r14,%r3
+	lpsw	2f-.LPG0(%r13)		# machine type not good enough, crash
+.Lals_string:
+	.asciz	"The Linux kernel requires more recent processor hardware"
+.Lsclp_print:
+	.long	_sclp_print_early
+.Lstack:
+	.long	init_thread_union
 	.align 16
-1:	.long	0x000a0000,0x00000000
-2:
+2:	.long	0x000a0000,0x8badcccc
+#if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_Z10)
-	.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096
+	.long 0xc100efe3, 0xf0680000
 #elif defined(CONFIG_MARCH_Z9_109)
-	.short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086
+	.long 0xc100efc3, 0x00000000
 #elif defined(CONFIG_MARCH_Z990)
-	.short 0x9672, 0x2064, 0x2066
+	.long 0xc0002000, 0x00000000
 #elif defined(CONFIG_MARCH_Z900)
-	.short 0x9672
+	.long 0xc0000000, 0x00000000
+#endif
+#else
+#if defined(CONFIG_MARCH_Z10)
+	.long 0x8100c880, 0x00000000
+#elif defined(CONFIG_MARCH_Z9_109)
+	.long 0x8100c880, 0x00000000
+#elif defined(CONFIG_MARCH_Z990)
+	.long 0x80002000, 0x00000000
+#elif defined(CONFIG_MARCH_Z900)
+	.long 0x80000000, 0x00000000
+#endif
 #endif
-3:	la	%r1,2(%r1)
-	brct	%r0,0b
+3:
 #endif
 
 	l	%r13,4f-.LPG0(%r13)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index a01cf0284db2..9bb2f6241d9f 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -25,9 +25,9 @@
 #include <linux/preempt.h>
 #include <linux/stop_machine.h>
 #include <linux/kdebug.h>
+#include <linux/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
-#include <asm/uaccess.h>
 #include <linux/module.h>
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -155,35 +155,8 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
 static int __kprobes swap_instruction(void *aref)
 {
 	struct ins_replace_args *args = aref;
-	u32 *addr;
-	u32 instr;
-	int err = -EFAULT;
 
-	/*
-	 * Text segment is read-only, hence we use stura to bypass dynamic
-	 * address translation to exchange the instruction. Since stura
-	 * always operates on four bytes, but we only want to exchange two
-	 * bytes do some calculations to get things right. In addition we
-	 * shall not cross any page boundaries (vmalloc area!) when writing
-	 * the new instruction.
-	 */
-	addr = (u32 *)((unsigned long)args->ptr & -4UL);
-	if ((unsigned long)args->ptr & 2)
-		instr = ((*addr) & 0xffff0000) | args->new;
-	else
-		instr = ((*addr) & 0x0000ffff) | args->new << 16;
-
-	asm volatile(
-		"	lra	%1,0(%1)\n"
-		"0:	stura	%2,%1\n"
-		"1:	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b,2b)
-		: "+d" (err)
-		: "a" (addr), "d" (instr)
-		: "memory", "cc");
-
-	return err;
+	return probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
 }
 
 void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 80641224a095..2a0a5e97ba8c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -1,5 +1,5 @@
 /*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
  *
  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  *
@@ -7,36 +7,64 @@
 
 #include <asm/asm-offsets.h>
 
-#ifndef CONFIG_64BIT
-.globl _mcount
+	.globl ftrace_stub
+ftrace_stub:
+	br	%r14
+
+#ifdef CONFIG_64BIT
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+	.globl _mcount
 _mcount:
-	stm	%r0,%r5,8(%r15)
-	st	%r14,56(%r15)
-	lr	%r1,%r15
-	ahi	%r15,-96
-	l	%r3,100(%r15)
-	la	%r2,0(%r14)
-	st	%r1,__SF_BACKCHAIN(%r15)
-	la	%r3,0(%r3)
-	bras	%r14,0f
-	.long	ftrace_trace_function
-0:	l	%r14,0(%r14)
-	l	%r14,0(%r14)
-	basr	%r14,%r14
-	ahi	%r15,96
-	lm	%r0,%r5,8(%r15)
-	l	%r14,56(%r15)
 	br	%r14
 
-.globl ftrace_stub
-ftrace_stub:
+	.globl ftrace_caller
+ftrace_caller:
+	larl	%r1,function_trace_stop
+	icm	%r1,0xf,0(%r1)
+	bnzr	%r14
+	stmg	%r2,%r5,32(%r15)
+	stg	%r14,112(%r15)
+	lgr	%r1,%r15
+	aghi	%r15,-160
+	stg	%r1,__SF_BACKCHAIN(%r15)
+	lgr	%r2,%r14
+	lg	%r3,168(%r15)
+	larl	%r14,ftrace_dyn_func
+	lg	%r14,0(%r14)
+	basr	%r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.globl	ftrace_graph_caller
+ftrace_graph_caller:
+	# This unconditional branch gets runtime patched. Change only if
+	# you know what you are doing. See ftrace_enable_graph_caller().
+	j	0f
+	lg	%r2,272(%r15)
+	lg	%r3,168(%r15)
+	brasl	%r14,prepare_ftrace_return
+	stg	%r2,168(%r15)
+0:
+#endif
+	aghi	%r15,160
+	lmg	%r2,%r5,32(%r15)
+	lg	%r14,112(%r15)
 	br	%r14
 
-#else /* CONFIG_64BIT */
+	.data
+	.globl	ftrace_dyn_func
+ftrace_dyn_func:
+	.quad	ftrace_stub
+	.previous
+
+#else /* CONFIG_DYNAMIC_FTRACE */
 
-.globl _mcount
+	.globl _mcount
 _mcount:
-	stmg	%r0,%r5,16(%r15)
+	larl	%r1,function_trace_stop
+	icm	%r1,0xf,0(%r1)
+	bnzr	%r14
+	stmg	%r2,%r5,32(%r15)
 	stg	%r14,112(%r15)
 	lgr	%r1,%r15
 	aghi	%r15,-160
@@ -46,13 +74,143 @@ _mcount:
 	larl	%r14,ftrace_trace_function
 	lg	%r14,0(%r14)
 	basr	%r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	lg	%r2,272(%r15)
+	lg	%r3,168(%r15)
+	brasl	%r14,prepare_ftrace_return
+	stg	%r2,168(%r15)
+#endif
 	aghi	%r15,160
-	lmg	%r0,%r5,16(%r15)
+	lmg	%r2,%r5,32(%r15)
 	lg	%r14,112(%r15)
 	br	%r14
 
-.globl ftrace_stub
-ftrace_stub:
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+	.globl	return_to_handler
+return_to_handler:
+	stmg	%r2,%r5,32(%r15)
+	lgr	%r1,%r15
+	aghi	%r15,-160
+	stg	%r1,__SF_BACKCHAIN(%r15)
+	brasl	%r14,ftrace_return_to_handler
+	aghi	%r15,160
+	lgr	%r14,%r2
+	lmg	%r2,%r5,32(%r15)
+	br	%r14
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#else /* CONFIG_64BIT */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+	.globl _mcount
+_mcount:
+	br	%r14
+
+	.globl ftrace_caller
+ftrace_caller:
+	stm	%r2,%r5,16(%r15)
+	bras	%r1,2f
+0:	.long	ftrace_trace_function
+1:	.long	function_trace_stop
+2:	l	%r2,1b-0b(%r1)
+	icm	%r2,0xf,0(%r2)
+	jnz	3f
+	st	%r14,56(%r15)
+	lr	%r0,%r15
+	ahi	%r15,-96
+	l	%r3,100(%r15)
+	la	%r2,0(%r14)
+	st	%r0,__SF_BACKCHAIN(%r15)
+	la	%r3,0(%r3)
+	l	%r14,0b-0b(%r1)
+	l	%r14,0(%r14)
+	basr	%r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.globl	ftrace_graph_caller
+ftrace_graph_caller:
+	# This unconditional branch gets runtime patched. Change only if
+	# you know what you are doing. See ftrace_enable_graph_caller().
+	j	1f
+	bras	%r1,0f
+	.long	prepare_ftrace_return
+0:	l	%r2,152(%r15)
+	l	%r4,0(%r1)
+	l	%r3,100(%r15)
+	basr	%r14,%r4
+	st	%r2,100(%r15)
+1:
+#endif
+	ahi	%r15,96
+	l	%r14,56(%r15)
+3:	lm	%r2,%r5,16(%r15)
 	br	%r14
 
+	.data
+	.globl	ftrace_dyn_func
+ftrace_dyn_func:
+	.long	ftrace_stub
+	.previous
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+	.globl _mcount
+_mcount:
+	stm	%r2,%r5,16(%r15)
+	bras	%r1,2f
+0:	.long	ftrace_trace_function
+1:	.long	function_trace_stop
+2:	l	%r2,1b-0b(%r1)
+	icm	%r2,0xf,0(%r2)
+	jnz	3f
+	st	%r14,56(%r15)
+	lr	%r0,%r15
+	ahi	%r15,-96
+	l	%r3,100(%r15)
+	la	%r2,0(%r14)
+	st	%r0,__SF_BACKCHAIN(%r15)
+	la	%r3,0(%r3)
+	l	%r14,0b-0b(%r1)
+	l	%r14,0(%r14)
+	basr	%r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	bras	%r1,0f
+	.long	prepare_ftrace_return
+0:	l	%r2,152(%r15)
+	l	%r4,0(%r1)
+	l	%r3,100(%r15)
+	basr	%r14,%r4
+	st	%r2,100(%r15)
+#endif
+	ahi	%r15,96
+	l	%r14,56(%r15)
+3:	lm	%r2,%r5,16(%r15)
+	br	%r14
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+	.globl	return_to_handler
+return_to_handler:
+	stm	%r2,%r5,16(%r15)
+	st	%r14,56(%r15)
+	lr	%r0,%r15
+	ahi	%r15,-96
+	st	%r0,__SF_BACKCHAIN(%r15)
+	bras	%r1,0f
+	.long	ftrace_return_to_handler
+0:	l	%r2,0b-0b(%r1)
+	basr	%r14,%r2
+	lr	%r14,%r2
+	ahi	%r15,96
+	lm	%r2,%r5,16(%r15)
+	br	%r14
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 #endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 28cf196ba775..015e27da40eb 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -16,7 +16,7 @@
 #include <asm/lowcore.h>
 #include <asm/smp.h>
 #include <asm/etr.h>
-#include <asm/cpu.h>
+#include <asm/cputime.h>
 #include <asm/nmi.h>
 #include <asm/crw.h>
 
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a3acd8e60aff..355f7a30c3f1 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -32,6 +32,7 @@
 #include <linux/elfcore.h>
 #include <linux/kernel_stat.h>
 #include <linux/syscalls.h>
+#include <asm/compat.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -204,7 +205,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 	save_fp_regs(&p->thread.fp_regs);
 	/* Set a new TLS ?  */
 	if (clone_flags & CLONE_SETTLS) {
-		if (test_thread_flag(TIF_31BIT)) {
+		if (is_compat_task()) {
 			p->thread.acrs[0] = (unsigned int) regs->gprs[6];
 		} else {
 			p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 75c496f4f16d..490b39934d65 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -36,7 +36,9 @@
 #include <linux/elf.h>
 #include <linux/regset.h>
 #include <linux/tracehook.h>
-
+#include <linux/seccomp.h>
+#include <trace/syscall.h>
+#include <asm/compat.h>
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -69,7 +71,7 @@ FixPerRegisters(struct task_struct *task)
 	if (per_info->single_step) {
 		per_info->control_regs.bits.starting_addr = 0;
 #ifdef CONFIG_COMPAT
-		if (test_thread_flag(TIF_31BIT))
+		if (is_compat_task())
 			per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
 		else
 #endif
@@ -482,8 +484,7 @@ static int peek_user_compat(struct task_struct *child,
 {
 	__u32 tmp;
 
-	if (!test_thread_flag(TIF_31BIT) ||
-	    (addr & 3) || addr > sizeof(struct user) - 3)
+	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
 		return -EIO;
 
 	tmp = __peek_user_compat(child, addr);
@@ -584,8 +585,7 @@ static int __poke_user_compat(struct task_struct *child,
 static int poke_user_compat(struct task_struct *child,
 			    addr_t addr, addr_t data)
 {
-	if (!test_thread_flag(TIF_31BIT) ||
-	    (addr & 3) || addr > sizeof(struct user32) - 3)
+	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
 		return -EIO;
 
 	return __poke_user_compat(child, addr, data);
@@ -642,6 +642,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 {
 	long ret;
 
+	/* Do the secure computing check first. */
+	secure_computing(regs->gprs[2]);
+
 	/*
 	 * The sysc_tracesys code in entry.S stored the system
 	 * call number to gprs[2].
@@ -659,8 +662,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 		ret = -1;
 	}
 
+	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+		ftrace_syscall_enter(regs);
+
 	if (unlikely(current->audit_context))
-		audit_syscall_entry(test_thread_flag(TIF_31BIT) ?
+		audit_syscall_entry(is_compat_task() ?
 					AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
 				    regs->gprs[2], regs->orig_gpr2,
 				    regs->gprs[3], regs->gprs[4],
@@ -674,6 +680,9 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
 		audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
 				   regs->gprs[2]);
 
+	if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+		ftrace_syscall_exit(regs);
+
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(regs, 0);
 }
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index a0d2d55d7fb3..0de305b598ce 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -10,10 +10,11 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/ftrace.h>
 #include <linux/errno.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-#include <asm/cpu.h>
+#include <asm/cputime.h>
 #include <asm/lowcore.h>
 #include <asm/s390_ext.h>
 #include <asm/irq_regs.h>
@@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
 	return 0;
 }
 
-void do_extint(struct pt_regs *regs, unsigned short code)
+void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
 {
         ext_int_info_t *p;
         int index;
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
new file mode 100644
index 000000000000..20639dfe0c42
--- /dev/null
+++ b/arch/s390/kernel/sclp.S
@@ -0,0 +1,327 @@
+/*
+ * Mini SCLP driver.
+ *
+ * Copyright IBM Corp. 2004,2009
+ *
+ *   Author(s):	Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>,
+ *		Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+LC_EXT_NEW_PSW		= 0x58			# addr of ext int handler
+LC_EXT_INT_PARAM	= 0x80			# addr of ext int parameter
+LC_EXT_INT_CODE		= 0x86			# addr of ext int code
+
+#
+# Subroutine which waits synchronously until either an external interruption
+# or a timeout occurs.
+#
+# Parameters:
+#   R2	= 0 for no timeout, non-zero for timeout in (approximated) seconds
+#
+# Returns:
+#   R2	= 0 on interrupt, 2 on timeout
+#   R3	= external interruption parameter if R2=0
+#
+
+.section ".init.text","ax"
+
+_sclp_wait_int:
+	stm	%r6,%r15,24(%r15)		# save registers
+	basr	%r13,0				# get base register
+.LbaseS1:
+	ahi	%r15,-96			# create stack frame
+	la	%r8,LC_EXT_NEW_PSW		# register int handler
+	mvc	.LoldpswS1-.LbaseS1(8,%r13),0(%r8)
+	mvc	0(8,%r8),.LextpswS1-.LbaseS1(%r13)
+	lhi	%r6,0x0200			# cr mask for ext int (cr0.54)
+	ltr	%r2,%r2
+	jz	.LsetctS1
+	ahi	%r6,0x0800			# cr mask for clock int (cr0.52)
+	stck	.LtimeS1-.LbaseS1(%r13)		# initiate timeout
+	al	%r2,.LtimeS1-.LbaseS1(%r13)
+	st	%r2,.LtimeS1-.LbaseS1(%r13)
+	sckc	.LtimeS1-.LbaseS1(%r13)
+
+.LsetctS1:
+	stctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)	# enable required interrupts
+	l	%r0,.LctlS1-.LbaseS1(%r13)
+	lhi	%r1,~(0x200 | 0x800)		# clear old values
+	nr	%r1,%r0
+	or	%r1,%r6				# set new value
+	st	%r1,.LctlS1-.LbaseS1(%r13)
+	lctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)
+	st	%r0,.LctlS1-.LbaseS1(%r13)
+	lhi	%r2,2				# return code for timeout
+.LloopS1:
+	lpsw	.LwaitpswS1-.LbaseS1(%r13)	# wait until interrupt
+.LwaitS1:
+	lh	%r7,LC_EXT_INT_CODE
+	chi	%r7,0x1004			# timeout?
+	je	.LtimeoutS1
+	chi	%r7,0x2401			# service int?
+	jne	.LloopS1
+	sr	%r2,%r2
+	l	%r3,LC_EXT_INT_PARAM
+.LtimeoutS1:
+	lctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)	# restore interrupt setting
+	# restore old handler
+	mvc	0(8,%r8),.LoldpswS1-.LbaseS1(%r13)
+	lm	%r6,%r15,120(%r15)		# restore registers
+	br	%r14				# return to caller
+
+	.align	8
+.LoldpswS1:
+	.long	0, 0				# old ext int PSW
+.LextpswS1:
+	.long	0x00080000, 0x80000000+.LwaitS1	# PSW to handle ext int
+.LwaitpswS1:
+	.long	0x010a0000, 0x00000000+.LloopS1	# PSW to wait for ext int
+.LtimeS1:
+	.quad	0				# current time
+.LctlS1:
+	.long	0				# CT0 contents
+
+#
+# Subroutine to synchronously issue a service call.
+#
+# Parameters:
+#   R2	= command word
+#   R3	= sccb address
+#
+# Returns:
+#   R2	= 0 on success, 1 on failure
+#   R3	= sccb response code if R2 = 0
+#
+
+_sclp_servc:
+	stm	%r6,%r15,24(%r15)		# save registers
+	ahi	%r15,-96			# create stack frame
+	lr	%r6,%r2				# save command word
+	lr	%r7,%r3				# save sccb address
+.LretryS2:
+	lhi	%r2,1				# error return code
+	.insn	rre,0xb2200000,%r6,%r7		# servc
+	brc	1,.LendS2			# exit if not operational
+	brc	8,.LnotbusyS2			# go on if not busy
+	sr	%r2,%r2				# wait until no longer busy
+	bras	%r14,_sclp_wait_int
+	j	.LretryS2			# retry
+.LnotbusyS2:
+	sr	%r2,%r2				# wait until result
+	bras	%r14,_sclp_wait_int
+	sr	%r2,%r2
+	lh	%r3,6(%r7)
+.LendS2:
+	lm	%r6,%r15,120(%r15)		# restore registers
+	br	%r14
+
+#
+# Subroutine to set up the SCLP interface.
+#
+# Parameters:
+#   R2	= 0 to activate, non-zero to deactivate
+#
+# Returns:
+#   R2	= 0 on success, non-zero on failure
+#
+
+_sclp_setup:
+	stm	%r6,%r15,24(%r15)		# save registers
+	ahi	%r15,-96			# create stack frame
+	basr	%r13,0				# get base register
+.LbaseS3:
+	l	%r6,.LsccbS0-.LbaseS3(%r13)	# prepare init mask sccb
+	mvc	0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13)
+	ltr	%r2,%r2				# initialization?
+	jz	.LdoinitS3			# go ahead
+	# clear masks
+	xc	.LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6)
+.LdoinitS3:
+	l	%r2,.LwritemaskS3-.LbaseS3(%r13)# get command word
+	lr	%r3,%r6				# get sccb address
+	bras	%r14,_sclp_servc		# issue service call
+	ltr	%r2,%r2				# servc successful?
+	jnz	.LerrorS3
+	chi	%r3,0x20			# write mask successful?
+	jne	.LerrorS3
+	# check masks
+	la	%r2,.LinitmaskS3-.LinitsccbS3(%r6)
+	l	%r1,0(%r2)			# receive mask ok?
+	n	%r1,12(%r2)
+	cl	%r1,0(%r2)
+	jne	.LerrorS3
+	l	%r1,4(%r2)			# send mask ok?
+	n	%r1,8(%r2)
+	cl	%r1,4(%r2)
+	sr	%r2,%r2
+	je	.LendS3
+.LerrorS3:
+	lhi	%r2,1				# error return code
+.LendS3:
+	lm	%r6,%r15,120(%r15)		# restore registers
+	br	%r14
+.LwritemaskS3:
+	.long	0x00780005			# SCLP command for write mask
+.LinitsccbS3:
+	.word	.LinitendS3-.LinitsccbS3
+	.byte	0,0,0,0
+	.word	0
+	.word	0
+	.word	4
+.LinitmaskS3:
+	.long	0x80000000
+	.long	0x40000000
+	.long	0
+	.long	0
+.LinitendS3:
+
+#
+# Subroutine which prints a given text to the SCLP console.
+#
+# Parameters:
+#   R2	= address of nil-terminated ASCII text
+#
+# Returns:
+#   R2	= 0 on success, 1 on failure
+#
+
+_sclp_print:
+	stm	%r6,%r15,24(%r15)		# save registers
+	ahi	%r15,-96			# create stack frame
+	basr	%r13,0				# get base register
+.LbaseS4:
+	l	%r8,.LsccbS0-.LbaseS4(%r13)	# prepare write data sccb
+	mvc	0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13)
+	la	%r7,.LmtoS4-.LwritesccbS4(%r8)	# current mto addr
+	sr	%r0,%r0
+	l	%r10,.Lascebc-.LbaseS4(%r13)	# address of translation table
+.LinitmtoS4:
+	# initialize mto
+	mvc	0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13)
+	lhi	%r6,.LmtoendS4-.LmtoS4		# current mto length
+.LloopS4:
+	ic	%r0,0(%r2)			# get character
+	ahi	%r2,1
+	ltr	%r0,%r0				# end of string?
+	jz	.LfinalizemtoS4
+	chi	%r0,0x15			# end of line (NL)?
+	jz	.LfinalizemtoS4
+	stc	%r0,0(%r6,%r7)			# copy to mto
+	la	%r11,0(%r6,%r7)
+	tr	0(1,%r11),0(%r10)		# translate to EBCDIC
+	ahi	%r6,1
+	j	.LloopS4
+.LfinalizemtoS4:
+	sth	%r6,0(%r7)			# update mto length
+	lh	%r9,.LmdbS4-.LwritesccbS4(%r8)	# update mdb length
+	ar	%r9,%r6
+	sth	%r9,.LmdbS4-.LwritesccbS4(%r8)
+	lh	%r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length
+	ar	%r9,%r6
+	sth	%r9,.LevbufS4-.LwritesccbS4(%r8)
+	lh	%r9,0(%r8)			# update sccb length
+	ar	%r9,%r6
+	sth	%r9,0(%r8)
+	ar	%r7,%r6				# update current mto adress
+	ltr	%r0,%r0				# more characters?
+	jnz	.LinitmtoS4
+	l	%r2,.LwritedataS4-.LbaseS4(%r13)# write data
+	lr	%r3,%r8
+	bras	%r14,_sclp_servc
+	ltr	%r2,%r2				# servc successful?
+	jnz	.LendS4
+	chi	%r3,0x20			# write data successful?
+	je	.LendS4
+	lhi	%r2,1				# error return code
+.LendS4:
+	lm	%r6,%r15,120(%r15)		# restore registers
+	br	%r14
+
+#
+# Function which prints a given text to the SCLP console.
+#
+# Parameters:
+#   R2	= address of nil-terminated ASCII text
+#
+# Returns:
+#   R2	= 0 on success, 1 on failure
+#
+
+	.globl _sclp_print_early
+_sclp_print_early:
+	stm	%r6,%r15,24(%r15)		# save registers
+	ahi	%r15,-96			# create stack frame
+	lr	%r10,%r2			# save string pointer
+	lhi	%r2,0
+	bras	%r14,_sclp_setup		# enable console
+	ltr	%r2,%r2
+	jnz	.LendS5
+	lr	%r2,%r10
+	bras	%r14,_sclp_print		# print string
+	ltr	%r2,%r2
+	jnz	.LendS5
+	lhi	%r2,1
+	bras	%r14,_sclp_setup		# disable console
+.LendS5:
+	lm	%r6,%r15,120(%r15)		# restore registers
+	br	%r14
+
+.LwritedataS4:
+	.long	0x00760005			# SCLP command for write data
+.LwritesccbS4:
+	# sccb
+	.word	.LmtoS4-.LwritesccbS4
+	.byte	0
+	.byte	0,0,0
+	.word	0
+
+	# evbuf
+.LevbufS4:
+	.word	.LmtoS4-.LevbufS4
+	.byte	0x02
+	.byte	0
+	.word	0
+
+.LmdbS4:
+	# mdb
+	.word	.LmtoS4-.LmdbS4
+	.word	1
+	.long	0xd4c4c240
+	.long	1
+
+	# go
+.LgoS4:
+	.word	.LmtoS4-.LgoS4
+	.word	1
+	.long	0
+	.byte	0,0,0,0,0,0,0,0
+	.byte	0,0,0
+	.byte	0
+	.byte	0,0,0,0,0,0,0
+	.byte	0
+	.word	0
+	.byte	0,0,0,0,0,0,0,0,0,0
+	.byte	0,0,0,0,0,0,0,0
+	.byte	0,0,0,0,0,0,0,0
+
+.LmtoS4:
+	.word	.LmtoendS4-.LmtoS4
+	.word	4
+	.word	0x1000
+	.byte	0
+	.byte	0,0,0
+.LmtoendS4:
+
+	# Global constants
+.LsccbS0:
+	.long	_sclp_work_area
+.Lascebc:
+	.long	_ascebc
+.previous
+
+.section ".init.data","a"
+	.balign 4096
+_sclp_work_area:
+	.fill	4096
+.previous
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7402b6a39ead..9717717c6fea 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -42,6 +42,7 @@
 #include <linux/ctype.h>
 #include <linux/reboot.h>
 #include <linux/topology.h>
+#include <linux/ftrace.h>
 
 #include <asm/ipl.h>
 #include <asm/uaccess.h>
@@ -442,6 +443,7 @@ setup_lowcore(void)
 	lc->steal_timer = S390_lowcore.steal_timer;
 	lc->last_update_timer = S390_lowcore.last_update_timer;
 	lc->last_update_clock = S390_lowcore.last_update_clock;
+	lc->ftrace_func = S390_lowcore.ftrace_func;
 	set_prefix((u32)(unsigned long) lc);
 	lowcore_ptr[0] = lc;
 }
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 3cf74c3ccb69..062bd64e65fa 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -26,6 +26,7 @@
 #include <linux/binfmts.h>
 #include <linux/tracehook.h>
 #include <linux/syscalls.h>
+#include <linux/compat.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/lowcore.h>
@@ -482,7 +483,7 @@ void do_signal(struct pt_regs *regs)
 		/* Whee!  Actually deliver the signal.  */
 		int ret;
 #ifdef CONFIG_COMPAT
-		if (test_thread_flag(TIF_31BIT)) {
+		if (is_compat_task()) {
 			ret = handle_signal32(signr, &ka, &info, oldset, regs);
 	        }
 		else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a985a3ba4401..cc8c484984e3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -47,7 +47,7 @@
 #include <asm/timer.h>
 #include <asm/lowcore.h>
 #include <asm/sclp.h>
-#include <asm/cpu.h>
+#include <asm/cputime.h>
 #include <asm/vdso.h>
 #include "entry.h"
 
@@ -572,6 +572,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
 	cpu_lowcore->cpu_nr = cpu;
 	cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
 	cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
+	cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
 	eieio();
 
 	while (signal_processor(cpu, sigp_restart) == sigp_busy)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 2c7739fe70b1..ad1acd200385 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -338,3 +338,5 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
 SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
+SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
+SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index ef596d020573..215330a2c128 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
-unsigned long long sched_clock(void)
+unsigned long long notrace sched_clock(void)
 {
 	return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
 }
@@ -95,12 +95,6 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
 	xtime->tv_nsec = ((todval * 1000) >> 12);
 }
 
-#ifdef CONFIG_PROFILING
-#define s390_do_profile()	profile_tick(CPU_PROFILING)
-#else
-#define s390_do_profile()	do { ; } while(0)
-#endif /* CONFIG_PROFILING */
-
 void clock_comparator_work(void)
 {
 	struct clock_event_device *cd;
@@ -109,7 +103,6 @@ void clock_comparator_work(void)
 	set_clock_comparator(S390_lowcore.clock_comparator);
 	cd = &__get_cpu_var(comparators);
 	cd->event_handler(cd);
-	s390_do_profile();
 }
 
 /*
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 89b2e7f1b7a9..45e1708b70fd 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -22,7 +22,7 @@
 #include <linux/elf.h>
 #include <linux/security.h>
 #include <linux/bootmem.h>
-
+#include <linux/compat.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
 #include <asm/processor.h>
@@ -53,8 +53,19 @@ unsigned int __read_mostly vdso_enabled = 1;
 
 static int __init vdso_setup(char *s)
 {
-	vdso_enabled = simple_strtoul(s, NULL, 0);
-	return 1;
+	unsigned long val;
+	int rc;
+
+	rc = 0;
+	if (strncmp(s, "on", 3) == 0)
+		vdso_enabled = 1;
+	else if (strncmp(s, "off", 4) == 0)
+		vdso_enabled = 0;
+	else {
+		rc = strict_strtoul(s, 0, &val);
+		vdso_enabled = rc ? 0 : !!val;
+	}
+	return !rc;
 }
 __setup("vdso=", vdso_setup);
 
@@ -203,7 +214,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	vdso_pagelist = vdso64_pagelist;
 	vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-	if (test_thread_flag(TIF_31BIT)) {
+	if (is_compat_task()) {
 		vdso_pagelist = vdso32_pagelist;
 		vdso_pages = vdso32_pages;
 	}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 89399b8756c2..a53db23ee092 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -34,6 +34,7 @@ SECTIONS
 		SCHED_TEXT
 		LOCK_TEXT
 		KPROBES_TEXT
+		IRQENTRY_TEXT
 		*(.fixup)
 		*(.gnu.warning)
 	} :text = 0x0700
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c87f59bd8246..c8eb7255332b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -23,7 +23,7 @@
 #include <asm/s390_ext.h>
 #include <asm/timer.h>
 #include <asm/irq_regs.h>
-#include <asm/cpu.h>
+#include <asm/cputime.h>
 
 static ext_int_info_t ext_int_info_timer;
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 10bccd1f8aee..c18b21d6991c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -512,7 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		BUG();
 	}
 
-	might_sleep();
+	might_fault();
 
 	do {
 		__vcpu_run(vcpu);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e41f4008afc5..f7e0d30250b7 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -124,6 +124,27 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
 }
 EXPORT_SYMBOL(_raw_read_lock_wait);
 
+void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+{
+	unsigned int old;
+	int count = spin_retry;
+
+	local_irq_restore(flags);
+	while (1) {
+		if (count-- <= 0) {
+			_raw_yield();
+			count = spin_retry;
+		}
+		if (!__raw_read_can_lock(rw))
+			continue;
+		old = rw->lock & 0x7fffffffU;
+		local_irq_disable();
+		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+			return;
+	}
+}
+EXPORT_SYMBOL(_raw_read_lock_wait_flags);
+
 int _raw_read_trylock_retry(raw_rwlock_t *rw)
 {
 	unsigned int old;
@@ -157,6 +178,25 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
 }
 EXPORT_SYMBOL(_raw_write_lock_wait);
 
+void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+{
+	int count = spin_retry;
+
+	local_irq_restore(flags);
+	while (1) {
+		if (count-- <= 0) {
+			_raw_yield();
+			count = spin_retry;
+		}
+		if (!__raw_write_can_lock(rw))
+			continue;
+		local_irq_disable();
+		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+			return;
+	}
+}
+EXPORT_SYMBOL(_raw_write_lock_wait_flags);
+
 int _raw_write_trylock_retry(raw_rwlock_t *rw)
 {
 	int count = spin_retry;
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 2a7458134544..db05661ac895 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the linux s390-specific parts of the memory manager.
 #
 
-obj-y	 := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
+obj-y	 := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
 obj-$(CONFIG_CMM) += cmm.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 833e8366c351..220a152c836c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -19,6 +19,7 @@
 #include <linux/ptrace.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/compat.h>
 #include <linux/smp.h>
 #include <linux/kdebug.h>
 #include <linux/smp_lock.h>
@@ -239,7 +240,7 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
 	up_read(&mm->mmap_sem);
 	clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
 #ifdef CONFIG_COMPAT
-	compat = test_tsk_thread_flag(current, TIF_31BIT);
+	compat = is_compat_task();
 	if (compat && instruction == 0x0a77)
 		sys32_sigreturn();
 	else if (compat && instruction == 0x0aad)
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
new file mode 100644
index 000000000000..81756271dc44
--- /dev/null
+++ b/arch/s390/mm/maccess.c
@@ -0,0 +1,61 @@
+/*
+ * Access kernel memory without faulting -- s390 specific implementation.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+
+/*
+ * This function writes to kernel memory bypassing DAT and possible
+ * write protection. It copies one to four bytes from src to dst
+ * using the stura instruction.
+ * Returns the number of bytes copied or -EFAULT.
+ */
+static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+{
+	unsigned long count, aligned;
+	int offset, mask;
+	int rc = -EFAULT;
+
+	aligned = (unsigned long) dst & ~3UL;
+	offset = (unsigned long) dst & 3;
+	count = min_t(unsigned long, 4 - offset, size);
+	mask = (0xf << (4 - count)) & 0xf;
+	mask >>= offset;
+	asm volatile(
+		"	bras	1,0f\n"
+		"	icm	0,0,0(%3)\n"
+		"0:	l	0,0(%1)\n"
+		"	lra	%1,0(%1)\n"
+		"1:	ex	%2,0(1)\n"
+		"2:	stura	0,%1\n"
+		"	la	%0,0\n"
+		"3:\n"
+		EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
+		: "+d" (rc), "+a" (aligned)
+		: "a" (mask), "a" (src) : "cc", "memory", "0", "1");
+	return rc ? rc : count;
+}
+
+long probe_kernel_write(void *dst, void *src, size_t size)
+{
+	long copied = 0;
+
+	while (size) {
+		copied = probe_kernel_write_odd(dst, src, size);
+		if (copied < 0)
+			break;
+		dst += copied;
+		src += copied;
+		size -= copied;
+	}
+	return copied < 0 ? -EFAULT : 0;
+}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index e008d236cc15..f4558ccf02b9 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,6 +28,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <asm/pgalloc.h>
+#include <asm/compat.h>
 
 /*
  * Top of mmap area (just below the process stack).
@@ -55,7 +56,7 @@ static inline int mmap_is_legacy(void)
 	/*
 	 * Force standard allocation for 64 bit programs.
 	 */
-	if (!test_thread_flag(TIF_31BIT))
+	if (!is_compat_task())
 		return 1;
 #endif
 	return sysctl_legacy_va_layout ||
@@ -91,7 +92,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
 
 int s390_mmap_check(unsigned long addr, unsigned long len)
 {
-	if (!test_thread_flag(TIF_31BIT) &&
+	if (!is_compat_task() &&
 	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
 		return crst_table_upgrade(current->mm, 1UL << 53);
 	return 0;
@@ -108,8 +109,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
 	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
 	if (!(area & ~PAGE_MASK))
 		return area;
-	if (area == -ENOMEM &&
-	    !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
+	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
 		/* Upgrade the page table to 4 levels and retry. */
 		rc = crst_table_upgrade(mm, 1UL << 53);
 		if (rc)
@@ -131,8 +131,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
 	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
 	if (!(area & ~PAGE_MASK))
 		return area;
-	if (area == -ENOMEM &&
-	    !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
+	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
 		/* Upgrade the page table to 4 levels and retry. */
 		rc = crst_table_upgrade(mm, 1UL << 53);
 		if (rc)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be6c1cf4ad5a..4ca8e826bf30 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,7 +1,5 @@
 /*
- *  arch/s390/mm/pgtable.c
- *
- *    Copyright IBM Corp. 2007
+ *    Copyright IBM Corp. 2007,2009
  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
@@ -53,6 +51,18 @@ void clear_table_pgstes(unsigned long *table)
 
 #endif
 
+unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
+EXPORT_SYMBOL(VMALLOC_START);
+
+static int __init parse_vmalloc(char *arg)
+{
+	if (!arg)
+		return -EINVAL;
+	VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
+	return 0;
+}
+early_param("vmalloc", parse_vmalloc);
+
 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
 {
 	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);