summary refs log tree commit diff
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-09-28 16:56:37 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 16:56:37 +0200
commit25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (patch)
tree30d9bbc0a1051b837313edfafc40ffa6c5fbfedc
parent52149ba6b0ddf3e9d965257cc0513193650b3ea8 (diff)
downloadlinux-25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc.tar.gz
[S390] Whitespace cleanup.
Huge s390 assembly files whitespace cleanup.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/compat_wrapper.S442
-rw-r--r--arch/s390/kernel/entry.S469
-rw-r--r--arch/s390/kernel/entry64.S443
-rw-r--r--arch/s390/kernel/head.S624
-rw-r--r--arch/s390/kernel/head64.S432
-rw-r--r--arch/s390/kernel/reipl.S75
-rw-r--r--arch/s390/kernel/reipl64.S93
-rw-r--r--arch/s390/kernel/relocate_kernel.S74
-rw-r--r--arch/s390/kernel/relocate_kernel64.S82
9 files changed, 1364 insertions, 1370 deletions
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 4d53b2739357..4aabeeaa7cf7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -4,97 +4,97 @@
 *
 *    Copyright (C) IBM Corp. 2000,2006
 *    Author(s): Gerhard Tonn (ton@de.ibm.com),
-*               Thomas Spatzier (tspat@de.ibm.com)
-*/ 
+*		Thomas Spatzier (tspat@de.ibm.com)
+*/
 
-	.globl  sys32_exit_wrapper 
+	.globl	sys32_exit_wrapper
 sys32_exit_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_exit		# branch to sys_exit
-    
-	.globl  sys32_read_wrapper 
+
+	.globl	sys32_read_wrapper
 sys32_read_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# char *
 	llgfr	%r4,%r4			# size_t
 	jg	sys32_read		# branch to sys_read
 
-	.globl  sys32_write_wrapper 
+	.globl	sys32_write_wrapper
 sys32_write_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# const char *
 	llgfr	%r4,%r4			# size_t
 	jg	sys32_write		# branch to system call
 
-	.globl  sys32_open_wrapper 
+	.globl	sys32_open_wrapper
 sys32_open_wrapper:
 	llgtr	%r2,%r2			# const char *
 	lgfr	%r3,%r3			# int
 	lgfr	%r4,%r4			# int
 	jg	sys_open		# branch to system call
 
-	.globl  sys32_close_wrapper 
+	.globl	sys32_close_wrapper
 sys32_close_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_close		# branch to system call
 
-	.globl  sys32_creat_wrapper 
+	.globl	sys32_creat_wrapper
 sys32_creat_wrapper:
 	llgtr	%r2,%r2			# const char *
 	lgfr	%r3,%r3			# int
 	jg	sys_creat		# branch to system call
 
-	.globl  sys32_link_wrapper 
+	.globl	sys32_link_wrapper
 sys32_link_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgtr	%r3,%r3			# const char *
 	jg	sys_link		# branch to system call
 
-	.globl  sys32_unlink_wrapper 
+	.globl	sys32_unlink_wrapper
 sys32_unlink_wrapper:
 	llgtr	%r2,%r2			# const char *
 	jg	sys_unlink		# branch to system call
 
-	.globl  sys32_chdir_wrapper 
+	.globl	sys32_chdir_wrapper
 sys32_chdir_wrapper:
 	llgtr	%r2,%r2			# const char *
 	jg	sys_chdir		# branch to system call
 
-	.globl  sys32_time_wrapper 
+	.globl	sys32_time_wrapper
 sys32_time_wrapper:
 	llgtr	%r2,%r2			# int *
 	jg	compat_sys_time		# branch to system call
 
-	.globl  sys32_mknod_wrapper 
+	.globl	sys32_mknod_wrapper
 sys32_mknod_wrapper:
 	llgtr	%r2,%r2			# const char *
-	lgfr	%r3,%r3			# int 
+	lgfr	%r3,%r3			# int
 	llgfr	%r4,%r4			# dev
 	jg	sys_mknod		# branch to system call
 
-	.globl  sys32_chmod_wrapper 
+	.globl	sys32_chmod_wrapper
 sys32_chmod_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# mode_t
 	jg	sys_chmod		# branch to system call
 
-	.globl  sys32_lchown16_wrapper 
+	.globl	sys32_lchown16_wrapper
 sys32_lchown16_wrapper:
 	llgtr	%r2,%r2			# const char *
-	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
-	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t
 	jg	sys32_lchown16		# branch to system call
 
-	.globl  sys32_lseek_wrapper 
+	.globl	sys32_lseek_wrapper
 sys32_lseek_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	lgfr	%r3,%r3			# off_t
 	llgfr	%r4,%r4			# unsigned int
 	jg	sys_lseek		# branch to system call
 
-#sys32_getpid_wrapper				# void 
+#sys32_getpid_wrapper				# void
 
-	.globl  sys32_mount_wrapper 
+	.globl	sys32_mount_wrapper
 sys32_mount_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgtr	%r3,%r3			# char *
@@ -103,19 +103,19 @@ sys32_mount_wrapper:
 	llgtr	%r6,%r6			# void *
 	jg	compat_sys_mount	# branch to system call
 
-	.globl  sys32_oldumount_wrapper 
+	.globl	sys32_oldumount_wrapper
 sys32_oldumount_wrapper:
 	llgtr	%r2,%r2			# char *
 	jg	sys_oldumount		# branch to system call
 
-	.globl  sys32_setuid16_wrapper 
+	.globl	sys32_setuid16_wrapper
 sys32_setuid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t
 	jg	sys32_setuid16		# branch to system call
 
-#sys32_getuid16_wrapper			# void 
+#sys32_getuid16_wrapper			# void
 
-	.globl  sys32_ptrace_wrapper 
+	.globl	sys32_ptrace_wrapper
 sys32_ptrace_wrapper:
 	lgfr	%r2,%r2			# long
 	lgfr	%r3,%r3			# long
@@ -123,168 +123,168 @@ sys32_ptrace_wrapper:
 	llgfr	%r5,%r5			# long
 	jg	sys_ptrace		# branch to system call
 
-	.globl  sys32_alarm_wrapper 
+	.globl	sys32_alarm_wrapper
 sys32_alarm_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_alarm		# branch to system call
 
-#sys32_pause_wrapper			# void 
+#sys32_pause_wrapper			# void
 
-	.globl  compat_sys_utime_wrapper 
+	.globl	compat_sys_utime_wrapper
 compat_sys_utime_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgtr	%r3,%r3			# struct compat_utimbuf *
 	jg	compat_sys_utime	# branch to system call
 
-	.globl  sys32_access_wrapper 
+	.globl	sys32_access_wrapper
 sys32_access_wrapper:
 	llgtr	%r2,%r2			# const char *
 	lgfr	%r3,%r3			# int
 	jg	sys_access		# branch to system call
 
-	.globl  sys32_nice_wrapper 
+	.globl	sys32_nice_wrapper
 sys32_nice_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_nice		# branch to system call
 
-#sys32_sync_wrapper			# void 
+#sys32_sync_wrapper			# void
 
-	.globl  sys32_kill_wrapper 
+	.globl	sys32_kill_wrapper
 sys32_kill_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
 	jg	sys_kill		# branch to system call
 
-	.globl  sys32_rename_wrapper 
+	.globl	sys32_rename_wrapper
 sys32_rename_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgtr	%r3,%r3			# const char *
 	jg	sys_rename		# branch to system call
 
-	.globl  sys32_mkdir_wrapper 
+	.globl	sys32_mkdir_wrapper
 sys32_mkdir_wrapper:
 	llgtr	%r2,%r2			# const char *
 	lgfr	%r3,%r3			# int
 	jg	sys_mkdir		# branch to system call
 
-	.globl  sys32_rmdir_wrapper 
+	.globl	sys32_rmdir_wrapper
 sys32_rmdir_wrapper:
 	llgtr	%r2,%r2			# const char *
 	jg	sys_rmdir		# branch to system call
 
-	.globl  sys32_dup_wrapper 
+	.globl	sys32_dup_wrapper
 sys32_dup_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_dup			# branch to system call
 
-	.globl  sys32_pipe_wrapper 
+	.globl	sys32_pipe_wrapper
 sys32_pipe_wrapper:
 	llgtr	%r2,%r2			# u32 *
 	jg	sys_pipe		# branch to system call
 
-	.globl  compat_sys_times_wrapper 
+	.globl	compat_sys_times_wrapper
 compat_sys_times_wrapper:
 	llgtr	%r2,%r2			# struct compat_tms *
 	jg	compat_sys_times	# branch to system call
 
-	.globl  sys32_brk_wrapper 
+	.globl	sys32_brk_wrapper
 sys32_brk_wrapper:
 	llgtr	%r2,%r2			# unsigned long
 	jg	sys_brk			# branch to system call
 
-	.globl  sys32_setgid16_wrapper 
+	.globl	sys32_setgid16_wrapper
 sys32_setgid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t
 	jg	sys32_setgid16		# branch to system call
 
-#sys32_getgid16_wrapper			# void 
+#sys32_getgid16_wrapper			# void
 
 	.globl sys32_signal_wrapper
 sys32_signal_wrapper:
-	lgfr	%r2,%r2			# int 
+	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# __sighandler_t
 	jg	sys_signal
 
-#sys32_geteuid16_wrapper		# void 
+#sys32_geteuid16_wrapper		# void
 
-#sys32_getegid16_wrapper		# void 
+#sys32_getegid16_wrapper		# void
 
-	.globl  sys32_acct_wrapper 
+	.globl	sys32_acct_wrapper
 sys32_acct_wrapper:
 	llgtr	%r2,%r2			# char *
 	jg	sys_acct		# branch to system call
 
-	.globl  sys32_umount_wrapper 
+	.globl	sys32_umount_wrapper
 sys32_umount_wrapper:
 	llgtr	%r2,%r2			# char *
 	lgfr	%r3,%r3			# int
 	jg	sys_umount		# branch to system call
 
-	.globl  compat_sys_ioctl_wrapper
+	.globl	compat_sys_ioctl_wrapper
 compat_sys_ioctl_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned int
 	llgfr	%r4,%r4			# unsigned int
 	jg	compat_sys_ioctl	# branch to system call
 
-	.globl  compat_sys_fcntl_wrapper 
+	.globl	compat_sys_fcntl_wrapper
 compat_sys_fcntl_wrapper:
 	llgfr	%r2,%r2			# unsigned int
-	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r3,%r3			# unsigned int
 	llgfr	%r4,%r4			# unsigned long
 	jg	compat_sys_fcntl	# branch to system call
 
-	.globl  sys32_setpgid_wrapper 
+	.globl	sys32_setpgid_wrapper
 sys32_setpgid_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	lgfr	%r3,%r3			# pid_t
 	jg	sys_setpgid		# branch to system call
 
-	.globl  sys32_umask_wrapper 
+	.globl	sys32_umask_wrapper
 sys32_umask_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_umask		# branch to system call
 
-	.globl  sys32_chroot_wrapper 
+	.globl	sys32_chroot_wrapper
 sys32_chroot_wrapper:
 	llgtr	%r2,%r2			# char *
 	jg	sys_chroot		# branch to system call
 
 	.globl sys32_ustat_wrapper
 sys32_ustat_wrapper:
-	llgfr	%r2,%r2			# dev_t 
+	llgfr	%r2,%r2			# dev_t
 	llgtr	%r3,%r3			# struct ustat *
 	jg	sys_ustat
 
-	.globl  sys32_dup2_wrapper 
+	.globl	sys32_dup2_wrapper
 sys32_dup2_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned int
 	jg	sys_dup2		# branch to system call
 
-#sys32_getppid_wrapper			# void 
+#sys32_getppid_wrapper			# void
 
-#sys32_getpgrp_wrapper			# void 
+#sys32_getpgrp_wrapper			# void
 
-#sys32_setsid_wrapper			# void 
+#sys32_setsid_wrapper			# void
 
-	.globl  sys32_sigaction_wrapper
+	.globl	sys32_sigaction_wrapper
 sys32_sigaction_wrapper:
-	lgfr	%r2,%r2			# int 
+	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# const struct old_sigaction *
 	llgtr	%r4,%r4			# struct old_sigaction32 *
 	jg	sys32_sigaction		# branch to system call
 
-	.globl  sys32_setreuid16_wrapper 
+	.globl	sys32_setreuid16_wrapper
 sys32_setreuid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
-	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t
 	jg	sys32_setreuid16	# branch to system call
 
-	.globl  sys32_setregid16_wrapper 
+	.globl	sys32_setregid16_wrapper
 sys32_setregid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
-	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t
 	jg	sys32_setregid16	# branch to system call
 
 	.globl sys_sigsuspend_wrapper
@@ -294,95 +294,95 @@ sys_sigsuspend_wrapper:
 	llgfr	%r4,%r4			# old_sigset_t
 	jg	sys_sigsuspend
 
-	.globl  compat_sys_sigpending_wrapper 
+	.globl	compat_sys_sigpending_wrapper
 compat_sys_sigpending_wrapper:
 	llgtr	%r2,%r2			# compat_old_sigset_t *
 	jg	compat_sys_sigpending	# branch to system call
 
-	.globl  sys32_sethostname_wrapper 
+	.globl	sys32_sethostname_wrapper
 sys32_sethostname_wrapper:
 	llgtr	%r2,%r2			# char *
 	lgfr	%r3,%r3			# int
 	jg	sys_sethostname		# branch to system call
 
-	.globl  compat_sys_setrlimit_wrapper 
+	.globl	compat_sys_setrlimit_wrapper
 compat_sys_setrlimit_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# struct rlimit_emu31 *
 	jg	compat_sys_setrlimit	# branch to system call
 
-	.globl  compat_sys_old_getrlimit_wrapper 
+	.globl	compat_sys_old_getrlimit_wrapper
 compat_sys_old_getrlimit_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# struct rlimit_emu31 *
 	jg	compat_sys_old_getrlimit # branch to system call
 
-	.globl  compat_sys_getrlimit_wrapper 
+	.globl	compat_sys_getrlimit_wrapper
 compat_sys_getrlimit_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# struct rlimit_emu31 *
 	jg	compat_sys_getrlimit	# branch to system call
 
-	.globl  sys32_mmap2_wrapper 
+	.globl	sys32_mmap2_wrapper
 sys32_mmap2_wrapper:
 	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
 	jg	sys32_mmap2			# branch to system call
 
-	.globl  compat_sys_getrusage_wrapper 
+	.globl	compat_sys_getrusage_wrapper
 compat_sys_getrusage_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# struct rusage_emu31 *
 	jg	compat_sys_getrusage	# branch to system call
 
-	.globl  sys32_gettimeofday_wrapper 
+	.globl	sys32_gettimeofday_wrapper
 sys32_gettimeofday_wrapper:
 	llgtr	%r2,%r2			# struct timeval_emu31 *
 	llgtr	%r3,%r3			# struct timezone *
 	jg	sys32_gettimeofday	# branch to system call
 
-	.globl  sys32_settimeofday_wrapper 
+	.globl	sys32_settimeofday_wrapper
 sys32_settimeofday_wrapper:
 	llgtr	%r2,%r2			# struct timeval_emu31 *
 	llgtr	%r3,%r3			# struct timezone *
 	jg	sys32_settimeofday	# branch to system call
 
-	.globl  sys32_getgroups16_wrapper 
+	.globl	sys32_getgroups16_wrapper
 sys32_getgroups16_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
 	jg	sys32_getgroups16	# branch to system call
 
-	.globl  sys32_setgroups16_wrapper 
+	.globl	sys32_setgroups16_wrapper
 sys32_setgroups16_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
 	jg	sys32_setgroups16	# branch to system call
 
-	.globl  sys32_symlink_wrapper 
+	.globl	sys32_symlink_wrapper
 sys32_symlink_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgtr	%r3,%r3			# const char *
 	jg	sys_symlink		# branch to system call
 
-	.globl  sys32_readlink_wrapper 
+	.globl	sys32_readlink_wrapper
 sys32_readlink_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgtr	%r3,%r3			# char *
 	lgfr	%r4,%r4			# int
 	jg	sys_readlink		# branch to system call
 
-	.globl  sys32_uselib_wrapper 
+	.globl	sys32_uselib_wrapper
 sys32_uselib_wrapper:
 	llgtr	%r2,%r2			# const char *
 	jg	sys_uselib		# branch to system call
 
-	.globl  sys32_swapon_wrapper 
+	.globl	sys32_swapon_wrapper
 sys32_swapon_wrapper:
 	llgtr	%r2,%r2			# const char *
 	lgfr	%r3,%r3			# int
 	jg	sys_swapon		# branch to system call
 
-	.globl  sys32_reboot_wrapper 
+	.globl	sys32_reboot_wrapper
 sys32_reboot_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
@@ -390,121 +390,121 @@ sys32_reboot_wrapper:
 	llgtr	%r5,%r5			# void *
 	jg	sys_reboot		# branch to system call
 
-	.globl  old32_readdir_wrapper 
+	.globl	old32_readdir_wrapper
 old32_readdir_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# void *
 	llgfr	%r4,%r4			# unsigned int
 	jg	compat_sys_old_readdir	# branch to system call
 
-	.globl  old32_mmap_wrapper 
+	.globl	old32_mmap_wrapper
 old32_mmap_wrapper:
 	llgtr	%r2,%r2			# struct mmap_arg_struct_emu31 *
 	jg	old32_mmap		# branch to system call
 
-	.globl  sys32_munmap_wrapper 
+	.globl	sys32_munmap_wrapper
 sys32_munmap_wrapper:
 	llgfr	%r2,%r2			# unsigned long
-	llgfr	%r3,%r3			# size_t 
+	llgfr	%r3,%r3			# size_t
 	jg	sys_munmap		# branch to system call
 
-	.globl  sys32_truncate_wrapper 
+	.globl	sys32_truncate_wrapper
 sys32_truncate_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# unsigned long
 	jg	sys_truncate		# branch to system call
 
-	.globl  sys32_ftruncate_wrapper 
+	.globl	sys32_ftruncate_wrapper
 sys32_ftruncate_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned long
 	jg	sys_ftruncate		# branch to system call
 
-	.globl  sys32_fchmod_wrapper 
+	.globl	sys32_fchmod_wrapper
 sys32_fchmod_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# mode_t
 	jg	sys_fchmod		# branch to system call
 
-	.globl  sys32_fchown16_wrapper 
+	.globl	sys32_fchown16_wrapper
 sys32_fchown16_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# compat_uid_t
 	llgfr	%r4,%r4			# compat_uid_t
 	jg	sys32_fchown16		# branch to system call
 
-	.globl  sys32_getpriority_wrapper 
+	.globl	sys32_getpriority_wrapper
 sys32_getpriority_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
 	jg	sys_getpriority		# branch to system call
 
-	.globl  sys32_setpriority_wrapper 
+	.globl	sys32_setpriority_wrapper
 sys32_setpriority_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
 	lgfr	%r4,%r4			# int
 	jg	sys_setpriority		# branch to system call
 
-	.globl  compat_sys_statfs_wrapper 
+	.globl	compat_sys_statfs_wrapper
 compat_sys_statfs_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgtr	%r3,%r3			# struct compat_statfs *
 	jg	compat_sys_statfs	# branch to system call
 
-	.globl  compat_sys_fstatfs_wrapper 
+	.globl	compat_sys_fstatfs_wrapper
 compat_sys_fstatfs_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# struct compat_statfs *
 	jg	compat_sys_fstatfs	# branch to system call
 
-	.globl  compat_sys_socketcall_wrapper 
+	.globl	compat_sys_socketcall_wrapper
 compat_sys_socketcall_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# u32 *
 	jg	compat_sys_socketcall	# branch to system call
 
-	.globl  sys32_syslog_wrapper 
+	.globl	sys32_syslog_wrapper
 sys32_syslog_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# char *
 	lgfr	%r4,%r4			# int
 	jg	sys_syslog		# branch to system call
 
-	.globl  compat_sys_setitimer_wrapper 
+	.globl	compat_sys_setitimer_wrapper
 compat_sys_setitimer_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# struct itimerval_emu31 *
 	llgtr	%r4,%r4			# struct itimerval_emu31 *
 	jg	compat_sys_setitimer	# branch to system call
 
-	.globl  compat_sys_getitimer_wrapper 
+	.globl	compat_sys_getitimer_wrapper
 compat_sys_getitimer_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# struct itimerval_emu31 *
 	jg	compat_sys_getitimer	# branch to system call
 
-	.globl  compat_sys_newstat_wrapper 
+	.globl	compat_sys_newstat_wrapper
 compat_sys_newstat_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgtr	%r3,%r3			# struct stat_emu31 *
 	jg	compat_sys_newstat	# branch to system call
 
-	.globl  compat_sys_newlstat_wrapper 
+	.globl	compat_sys_newlstat_wrapper
 compat_sys_newlstat_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgtr	%r3,%r3			# struct stat_emu31 *
 	jg	compat_sys_newlstat	# branch to system call
 
-	.globl  compat_sys_newfstat_wrapper 
+	.globl	compat_sys_newfstat_wrapper
 compat_sys_newfstat_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# struct stat_emu31 *
 	jg	compat_sys_newfstat	# branch to system call
 
-#sys32_vhangup_wrapper			# void 
+#sys32_vhangup_wrapper			# void
 
-	.globl  compat_sys_wait4_wrapper 
+	.globl	compat_sys_wait4_wrapper
 compat_sys_wait4_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	llgtr	%r3,%r3			# unsigned int *
@@ -512,17 +512,17 @@ compat_sys_wait4_wrapper:
 	llgtr	%r5,%r5			# struct rusage *
 	jg	compat_sys_wait4	# branch to system call
 
-	.globl  sys32_swapoff_wrapper 
+	.globl	sys32_swapoff_wrapper
 sys32_swapoff_wrapper:
 	llgtr	%r2,%r2			# const char *
 	jg	sys_swapoff		# branch to system call
 
-	.globl  sys32_sysinfo_wrapper 
+	.globl	sys32_sysinfo_wrapper
 sys32_sysinfo_wrapper:
 	llgtr	%r2,%r2			# struct sysinfo_emu31 *
 	jg	sys32_sysinfo		# branch to system call
 
-	.globl  sys32_ipc_wrapper 
+	.globl	sys32_ipc_wrapper
 sys32_ipc_wrapper:
 	llgfr	%r2,%r2			# uint
 	lgfr	%r3,%r3			# int
@@ -531,59 +531,59 @@ sys32_ipc_wrapper:
 	llgfr	%r6,%r6			# u32
 	jg	sys32_ipc		# branch to system call
 
-	.globl  sys32_fsync_wrapper 
+	.globl	sys32_fsync_wrapper
 sys32_fsync_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_fsync		# branch to system call
 
-#sys32_sigreturn_wrapper		# done in sigreturn_glue 
+#sys32_sigreturn_wrapper		# done in sigreturn_glue
 
-#sys32_clone_wrapper			# done in clone_glue 
+#sys32_clone_wrapper			# done in clone_glue
 
-	.globl  sys32_setdomainname_wrapper 
+	.globl	sys32_setdomainname_wrapper
 sys32_setdomainname_wrapper:
 	llgtr	%r2,%r2			# char *
 	lgfr	%r3,%r3			# int
 	jg	sys_setdomainname	# branch to system call
 
-	.globl  sys32_newuname_wrapper 
+	.globl	sys32_newuname_wrapper
 sys32_newuname_wrapper:
 	llgtr	%r2,%r2			# struct new_utsname *
 	jg	s390x_newuname		# branch to system call
 
-	.globl  compat_sys_adjtimex_wrapper
+	.globl	compat_sys_adjtimex_wrapper
 compat_sys_adjtimex_wrapper:
 	llgtr	%r2,%r2			# struct compat_timex *
 	jg	compat_sys_adjtimex	# branch to system call
 
-	.globl  sys32_mprotect_wrapper 
+	.globl	sys32_mprotect_wrapper
 sys32_mprotect_wrapper:
 	llgtr	%r2,%r2			# unsigned long (actually pointer
 	llgfr	%r3,%r3			# size_t
 	llgfr	%r4,%r4			# unsigned long
 	jg	sys_mprotect		# branch to system call
 
-	.globl  compat_sys_sigprocmask_wrapper 
+	.globl	compat_sys_sigprocmask_wrapper
 compat_sys_sigprocmask_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# compat_old_sigset_t *
 	llgtr	%r4,%r4			# compat_old_sigset_t *
 	jg	compat_sys_sigprocmask		# branch to system call
 
-	.globl  sys32_init_module_wrapper 
+	.globl	sys32_init_module_wrapper
 sys32_init_module_wrapper:
 	llgtr	%r2,%r2			# void *
 	llgfr	%r3,%r3			# unsigned long
 	llgtr	%r4,%r4			# char *
 	jg	sys32_init_module	# branch to system call
 
-	.globl  sys32_delete_module_wrapper 
+	.globl	sys32_delete_module_wrapper
 sys32_delete_module_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# unsigned int
 	jg	sys32_delete_module	# branch to system call
 
-	.globl  sys32_quotactl_wrapper 
+	.globl	sys32_quotactl_wrapper
 sys32_quotactl_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# const char *
@@ -591,45 +591,45 @@ sys32_quotactl_wrapper:
 	llgtr	%r5,%r5			# caddr_t
 	jg	sys_quotactl		# branch to system call
 
-	.globl  sys32_getpgid_wrapper 
+	.globl	sys32_getpgid_wrapper
 sys32_getpgid_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	jg	sys_getpgid		# branch to system call
 
-	.globl  sys32_fchdir_wrapper 
+	.globl	sys32_fchdir_wrapper
 sys32_fchdir_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_fchdir		# branch to system call
 
-	.globl  sys32_bdflush_wrapper 
+	.globl	sys32_bdflush_wrapper
 sys32_bdflush_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# long
 	jg	sys_bdflush		# branch to system call
 
-	.globl  sys32_sysfs_wrapper 
+	.globl	sys32_sysfs_wrapper
 sys32_sysfs_wrapper:
 	lgfr	%r2,%r2			# int
 	llgfr	%r3,%r3			# unsigned long
 	llgfr	%r4,%r4			# unsigned long
 	jg	sys_sysfs		# branch to system call
 
-	.globl  sys32_personality_wrapper 
+	.globl	sys32_personality_wrapper
 sys32_personality_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	jg	s390x_personality	# branch to system call
 
-	.globl  sys32_setfsuid16_wrapper 
+	.globl	sys32_setfsuid16_wrapper
 sys32_setfsuid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t
 	jg	sys32_setfsuid16	# branch to system call
 
-	.globl  sys32_setfsgid16_wrapper 
+	.globl	sys32_setfsgid16_wrapper
 sys32_setfsgid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t
 	jg	sys32_setfsgid16	# branch to system call
 
-	.globl  sys32_llseek_wrapper 
+	.globl	sys32_llseek_wrapper
 sys32_llseek_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned long
@@ -638,14 +638,14 @@ sys32_llseek_wrapper:
 	llgfr	%r6,%r6			# unsigned int
 	jg	sys_llseek		# branch to system call
 
-	.globl  sys32_getdents_wrapper 
+	.globl	sys32_getdents_wrapper
 sys32_getdents_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# void *
 	llgfr	%r4,%r4			# unsigned int
 	jg	compat_sys_getdents	# branch to system call
 
-	.globl  compat_sys_select_wrapper
+	.globl	compat_sys_select_wrapper
 compat_sys_select_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# compat_fd_set *
@@ -654,113 +654,113 @@ compat_sys_select_wrapper:
 	llgtr	%r6,%r6			# struct compat_timeval *
 	jg	compat_sys_select	# branch to system call
 
-	.globl  sys32_flock_wrapper 
+	.globl	sys32_flock_wrapper
 sys32_flock_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned int
 	jg	sys_flock		# branch to system call
 
-	.globl  sys32_msync_wrapper 
+	.globl	sys32_msync_wrapper
 sys32_msync_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# size_t
 	lgfr	%r4,%r4			# int
 	jg	sys_msync		# branch to system call
 
-	.globl  compat_sys_readv_wrapper
+	.globl	compat_sys_readv_wrapper
 compat_sys_readv_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# const struct compat_iovec *
 	llgfr	%r4,%r4			# unsigned long
 	jg	compat_sys_readv	# branch to system call
 
-	.globl  compat_sys_writev_wrapper
+	.globl	compat_sys_writev_wrapper
 compat_sys_writev_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# const struct compat_iovec *
 	llgfr	%r4,%r4			# unsigned long
 	jg	compat_sys_writev	# branch to system call
 
-	.globl  sys32_getsid_wrapper 
+	.globl	sys32_getsid_wrapper
 sys32_getsid_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	jg	sys_getsid		# branch to system call
 
-	.globl  sys32_fdatasync_wrapper 
+	.globl	sys32_fdatasync_wrapper
 sys32_fdatasync_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	jg	sys_fdatasync		# branch to system call
 
-#sys32_sysctl_wrapper			# tbd 
+#sys32_sysctl_wrapper			# tbd
 
-	.globl  sys32_mlock_wrapper 
+	.globl	sys32_mlock_wrapper
 sys32_mlock_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# size_t
 	jg	sys_mlock		# branch to system call
 
-	.globl  sys32_munlock_wrapper 
+	.globl	sys32_munlock_wrapper
 sys32_munlock_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# size_t
 	jg	sys_munlock		# branch to system call
 
-	.globl  sys32_mlockall_wrapper 
+	.globl	sys32_mlockall_wrapper
 sys32_mlockall_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_mlockall		# branch to system call
 
-#sys32_munlockall_wrapper		# void 
+#sys32_munlockall_wrapper		# void
 
-	.globl  sys32_sched_setparam_wrapper 
+	.globl	sys32_sched_setparam_wrapper
 sys32_sched_setparam_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	llgtr	%r3,%r3			# struct sched_param *
 	jg	sys_sched_setparam	# branch to system call
 
-	.globl  sys32_sched_getparam_wrapper 
+	.globl	sys32_sched_getparam_wrapper
 sys32_sched_getparam_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	llgtr	%r3,%r3			# struct sched_param *
 	jg	sys_sched_getparam	# branch to system call
 
-	.globl  sys32_sched_setscheduler_wrapper 
+	.globl	sys32_sched_setscheduler_wrapper
 sys32_sched_setscheduler_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	lgfr	%r3,%r3			# int
 	llgtr	%r4,%r4			# struct sched_param *
 	jg	sys_sched_setscheduler	# branch to system call
 
-	.globl  sys32_sched_getscheduler_wrapper 
+	.globl	sys32_sched_getscheduler_wrapper
 sys32_sched_getscheduler_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	jg	sys_sched_getscheduler	# branch to system call
 
-#sys32_sched_yield_wrapper		# void 
+#sys32_sched_yield_wrapper		# void
 
-	.globl  sys32_sched_get_priority_max_wrapper 
+	.globl	sys32_sched_get_priority_max_wrapper
 sys32_sched_get_priority_max_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_sched_get_priority_max	# branch to system call
 
-	.globl  sys32_sched_get_priority_min_wrapper 
+	.globl	sys32_sched_get_priority_min_wrapper
 sys32_sched_get_priority_min_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_sched_get_priority_min	# branch to system call
 
-	.globl  sys32_sched_rr_get_interval_wrapper 
+	.globl	sys32_sched_rr_get_interval_wrapper
 sys32_sched_rr_get_interval_wrapper:
 	lgfr	%r2,%r2			# pid_t
 	llgtr	%r3,%r3			# struct compat_timespec *
 	jg	sys32_sched_rr_get_interval	# branch to system call
 
-	.globl  compat_sys_nanosleep_wrapper 
+	.globl	compat_sys_nanosleep_wrapper
 compat_sys_nanosleep_wrapper:
 	llgtr	%r2,%r2			# struct compat_timespec *
 	llgtr	%r3,%r3			# struct compat_timespec *
 	jg	compat_sys_nanosleep		# branch to system call
 
-	.globl  sys32_mremap_wrapper 
+	.globl	sys32_mremap_wrapper
 sys32_mremap_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# unsigned long
@@ -769,49 +769,49 @@ sys32_mremap_wrapper:
 	llgfr	%r6,%r6			# unsigned long
 	jg	sys_mremap		# branch to system call
 
-	.globl  sys32_setresuid16_wrapper 
+	.globl	sys32_setresuid16_wrapper
 sys32_setresuid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t 
-	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
-	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_uid_emu31_t
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t
+	llgfr	%r4,%r4			# __kernel_old_uid_emu31_t
 	jg	sys32_setresuid16	# branch to system call
 
-	.globl  sys32_getresuid16_wrapper 
+	.globl	sys32_getresuid16_wrapper
 sys32_getresuid16_wrapper:
 	llgtr	%r2,%r2			# __kernel_old_uid_emu31_t *
 	llgtr	%r3,%r3			# __kernel_old_uid_emu31_t *
 	llgtr	%r4,%r4			# __kernel_old_uid_emu31_t *
 	jg	sys32_getresuid16	# branch to system call
 
-	.globl  sys32_poll_wrapper 
+	.globl	sys32_poll_wrapper
 sys32_poll_wrapper:
-	llgtr	%r2,%r2			# struct pollfd * 
-	llgfr	%r3,%r3			# unsigned int 
-	lgfr	%r4,%r4			# long 
+	llgtr	%r2,%r2			# struct pollfd *
+	llgfr	%r3,%r3			# unsigned int
+	lgfr	%r4,%r4			# long
 	jg	sys_poll		# branch to system call
 
-	.globl  compat_sys_nfsservctl_wrapper
+	.globl	compat_sys_nfsservctl_wrapper
 compat_sys_nfsservctl_wrapper:
-	lgfr	%r2,%r2			# int 
+	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# struct compat_nfsctl_arg*
 	llgtr	%r4,%r4			# union compat_nfsctl_res*
 	jg	compat_sys_nfsservctl	# branch to system call
 
-	.globl  sys32_setresgid16_wrapper 
+	.globl	sys32_setresgid16_wrapper
 sys32_setresgid16_wrapper:
-	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t 
-	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t 
-	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	llgfr	%r2,%r2			# __kernel_old_gid_emu31_t
+	llgfr	%r3,%r3			# __kernel_old_gid_emu31_t
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t
 	jg	sys32_setresgid16	# branch to system call
 
-	.globl  sys32_getresgid16_wrapper 
+	.globl	sys32_getresgid16_wrapper
 sys32_getresgid16_wrapper:
 	llgtr	%r2,%r2			# __kernel_old_gid_emu31_t *
 	llgtr	%r3,%r3			# __kernel_old_gid_emu31_t *
 	llgtr	%r4,%r4			# __kernel_old_gid_emu31_t *
 	jg	sys32_getresgid16	# branch to system call
 
-	.globl  sys32_prctl_wrapper 
+	.globl	sys32_prctl_wrapper
 sys32_prctl_wrapper:
 	lgfr	%r2,%r2			# int
 	llgfr	%r3,%r3			# unsigned long
@@ -820,9 +820,9 @@ sys32_prctl_wrapper:
 	llgfr	%r6,%r6			# unsigned long
 	jg	sys_prctl		# branch to system call
 
-#sys32_rt_sigreturn_wrapper		# done in rt_sigreturn_glue 
+#sys32_rt_sigreturn_wrapper		# done in rt_sigreturn_glue
 
-	.globl  sys32_rt_sigaction_wrapper 
+	.globl	sys32_rt_sigaction_wrapper
 sys32_rt_sigaction_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# const struct sigaction_emu31 *
@@ -830,7 +830,7 @@ sys32_rt_sigaction_wrapper:
 	llgfr	%r5,%r5			# size_t
 	jg	sys32_rt_sigaction	# branch to system call
 
-	.globl  sys32_rt_sigprocmask_wrapper 
+	.globl	sys32_rt_sigprocmask_wrapper
 sys32_rt_sigprocmask_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# old_sigset_emu31 *
@@ -838,13 +838,13 @@ sys32_rt_sigprocmask_wrapper:
 	llgfr	%r5,%r5			# size_t
 	jg	sys32_rt_sigprocmask	# branch to system call
 
-	.globl  sys32_rt_sigpending_wrapper 
+	.globl	sys32_rt_sigpending_wrapper
 sys32_rt_sigpending_wrapper:
 	llgtr	%r2,%r2			# sigset_emu31 *
 	llgfr	%r3,%r3			# size_t
 	jg	sys32_rt_sigpending	# branch to system call
 
-	.globl  compat_sys_rt_sigtimedwait_wrapper
+	.globl	compat_sys_rt_sigtimedwait_wrapper
 compat_sys_rt_sigtimedwait_wrapper:
 	llgtr	%r2,%r2			# const sigset_emu31_t *
 	llgtr	%r3,%r3			# siginfo_emu31_t *
@@ -852,7 +852,7 @@ compat_sys_rt_sigtimedwait_wrapper:
 	llgfr	%r5,%r5			# size_t
 	jg	compat_sys_rt_sigtimedwait	# branch to system call
 
-	.globl  sys32_rt_sigqueueinfo_wrapper 
+	.globl	sys32_rt_sigqueueinfo_wrapper
 sys32_rt_sigqueueinfo_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
@@ -865,7 +865,7 @@ compat_sys_rt_sigsuspend_wrapper:
 	llgfr	%r3,%r3			# compat_size_t
 	jg	compat_sys_rt_sigsuspend
 
-	.globl  sys32_pread64_wrapper 
+	.globl	sys32_pread64_wrapper
 sys32_pread64_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# char *
@@ -874,7 +874,7 @@ sys32_pread64_wrapper:
 	llgfr	%r6,%r6			# u32
 	jg	sys32_pread64		# branch to system call
 
-	.globl  sys32_pwrite64_wrapper 
+	.globl	sys32_pwrite64_wrapper
 sys32_pwrite64_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# const char *
@@ -883,26 +883,26 @@ sys32_pwrite64_wrapper:
 	llgfr	%r6,%r6			# u32
 	jg	sys32_pwrite64		# branch to system call
 
-	.globl  sys32_chown16_wrapper 
+	.globl	sys32_chown16_wrapper
 sys32_chown16_wrapper:
 	llgtr	%r2,%r2			# const char *
-	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t 
-	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t 
+	llgfr	%r3,%r3			# __kernel_old_uid_emu31_t
+	llgfr	%r4,%r4			# __kernel_old_gid_emu31_t
 	jg	sys32_chown16		# branch to system call
 
-	.globl  sys32_getcwd_wrapper 
+	.globl	sys32_getcwd_wrapper
 sys32_getcwd_wrapper:
 	llgtr	%r2,%r2			# char *
 	llgfr	%r3,%r3			# unsigned long
 	jg	sys_getcwd		# branch to system call
 
-	.globl  sys32_capget_wrapper 
+	.globl	sys32_capget_wrapper
 sys32_capget_wrapper:
 	llgtr	%r2,%r2			# cap_user_header_t
 	llgtr	%r3,%r3			# cap_user_data_t
 	jg	sys_capget		# branch to system call
 
-	.globl  sys32_capset_wrapper 
+	.globl	sys32_capset_wrapper
 sys32_capset_wrapper:
 	llgtr	%r2,%r2			# cap_user_header_t
 	llgtr	%r3,%r3			# const cap_user_data_t
@@ -910,11 +910,11 @@ sys32_capset_wrapper:
 
 	.globl sys32_sigaltstack_wrapper
 sys32_sigaltstack_wrapper:
-	llgtr	%r2,%r2			# const stack_emu31_t * 
-	llgtr	%r3,%r3			# stack_emu31_t * 
+	llgtr	%r2,%r2			# const stack_emu31_t *
+	llgtr	%r3,%r3			# stack_emu31_t *
 	jg	sys32_sigaltstack
 
-	.globl  sys32_sendfile_wrapper 
+	.globl	sys32_sendfile_wrapper
 sys32_sendfile_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
@@ -922,33 +922,33 @@ sys32_sendfile_wrapper:
 	llgfr	%r5,%r5			# size_t
 	jg	sys32_sendfile		# branch to system call
 
-#sys32_vfork_wrapper			# done in vfork_glue 
+#sys32_vfork_wrapper			# done in vfork_glue
 
-	.globl  sys32_truncate64_wrapper 
+	.globl	sys32_truncate64_wrapper
 sys32_truncate64_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# unsigned long
 	llgfr	%r4,%r4			# unsigned long
 	jg	sys32_truncate64	# branch to system call
 
-	.globl  sys32_ftruncate64_wrapper 
+	.globl	sys32_ftruncate64_wrapper
 sys32_ftruncate64_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# unsigned long
 	llgfr	%r4,%r4			# unsigned long
 	jg	sys32_ftruncate64	# branch to system call
 
-	.globl sys32_lchown_wrapper	
+	.globl sys32_lchown_wrapper
 sys32_lchown_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# uid_t
 	llgfr	%r4,%r4			# gid_t
 	jg	sys_lchown		# branch to system call
 
-#sys32_getuid_wrapper			# void			 
-#sys32_getgid_wrapper			# void 
-#sys32_geteuid_wrapper			# void 
-#sys32_getegid_wrapper			# void 
+#sys32_getuid_wrapper			# void
+#sys32_getgid_wrapper			# void
+#sys32_geteuid_wrapper			# void
+#sys32_getegid_wrapper			# void
 
 	.globl sys32_setreuid_wrapper
 sys32_setreuid_wrapper:
@@ -962,111 +962,111 @@ sys32_setregid_wrapper:
 	llgfr	%r3,%r3			# gid_t
 	jg	sys_setregid		# branch to system call
 
-	.globl  sys32_getgroups_wrapper 
+	.globl	sys32_getgroups_wrapper
 sys32_getgroups_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# gid_t *
 	jg	sys_getgroups		# branch to system call
 
-	.globl  sys32_setgroups_wrapper 
+	.globl	sys32_setgroups_wrapper
 sys32_setgroups_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# gid_t *
 	jg	sys_setgroups		# branch to system call
 
-	.globl sys32_fchown_wrapper	
+	.globl sys32_fchown_wrapper
 sys32_fchown_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgfr	%r3,%r3			# uid_t
 	llgfr	%r4,%r4			# gid_t
 	jg	sys_fchown		# branch to system call
 
-	.globl sys32_setresuid_wrapper	
+	.globl sys32_setresuid_wrapper
 sys32_setresuid_wrapper:
 	llgfr	%r2,%r2			# uid_t
 	llgfr	%r3,%r3			# uid_t
 	llgfr	%r4,%r4			# uid_t
 	jg	sys_setresuid		# branch to system call
 
-	.globl sys32_getresuid_wrapper	
+	.globl sys32_getresuid_wrapper
 sys32_getresuid_wrapper:
 	llgtr	%r2,%r2			# uid_t *
 	llgtr	%r3,%r3			# uid_t *
 	llgtr	%r4,%r4			# uid_t *
 	jg	sys_getresuid		# branch to system call
 
-	.globl sys32_setresgid_wrapper	
+	.globl sys32_setresgid_wrapper
 sys32_setresgid_wrapper:
 	llgfr	%r2,%r2			# gid_t
 	llgfr	%r3,%r3			# gid_t
 	llgfr	%r4,%r4			# gid_t
 	jg	sys_setresgid		# branch to system call
 
-	.globl sys32_getresgid_wrapper	
+	.globl sys32_getresgid_wrapper
 sys32_getresgid_wrapper:
 	llgtr	%r2,%r2			# gid_t *
 	llgtr	%r3,%r3			# gid_t *
 	llgtr	%r4,%r4			# gid_t *
 	jg	sys_getresgid		# branch to system call
 
-	.globl sys32_chown_wrapper	
+	.globl sys32_chown_wrapper
 sys32_chown_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgfr	%r3,%r3			# uid_t
 	llgfr	%r4,%r4			# gid_t
 	jg	sys_chown		# branch to system call
 
-	.globl sys32_setuid_wrapper	
+	.globl sys32_setuid_wrapper
 sys32_setuid_wrapper:
 	llgfr	%r2,%r2			# uid_t
 	jg	sys_setuid		# branch to system call
 
-	.globl sys32_setgid_wrapper	
+	.globl sys32_setgid_wrapper
 sys32_setgid_wrapper:
 	llgfr	%r2,%r2			# gid_t
 	jg	sys_setgid		# branch to system call
 
-	.globl sys32_setfsuid_wrapper	
+	.globl sys32_setfsuid_wrapper
 sys32_setfsuid_wrapper:
 	llgfr	%r2,%r2			# uid_t
 	jg	sys_setfsuid		# branch to system call
 
-	.globl sys32_setfsgid_wrapper	
+	.globl sys32_setfsgid_wrapper
 sys32_setfsgid_wrapper:
 	llgfr	%r2,%r2			# gid_t
 	jg	sys_setfsgid		# branch to system call
 
-	.globl  sys32_pivot_root_wrapper 
+	.globl	sys32_pivot_root_wrapper
 sys32_pivot_root_wrapper:
 	llgtr	%r2,%r2			# const char *
 	llgtr	%r3,%r3			# const char *
 	jg	sys_pivot_root		# branch to system call
 
-	.globl  sys32_mincore_wrapper 
+	.globl	sys32_mincore_wrapper
 sys32_mincore_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# size_t
 	llgtr	%r4,%r4			# unsigned char *
 	jg	sys_mincore		# branch to system call
 
-	.globl  sys32_madvise_wrapper 
+	.globl	sys32_madvise_wrapper
 sys32_madvise_wrapper:
 	llgfr	%r2,%r2			# unsigned long
 	llgfr	%r3,%r3			# size_t
 	lgfr	%r4,%r4			# int
 	jg	sys_madvise		# branch to system call
 
-	.globl  sys32_getdents64_wrapper 
+	.globl	sys32_getdents64_wrapper
 sys32_getdents64_wrapper:
 	llgfr	%r2,%r2			# unsigned int
 	llgtr	%r3,%r3			# void *
 	llgfr	%r4,%r4			# unsigned int
 	jg	sys_getdents64		# branch to system call
 
-	.globl  compat_sys_fcntl64_wrapper 
+	.globl	compat_sys_fcntl64_wrapper
 compat_sys_fcntl64_wrapper:
 	llgfr	%r2,%r2			# unsigned int
-	llgfr	%r3,%r3			# unsigned int 
+	llgfr	%r3,%r3			# unsigned int
 	llgfr	%r4,%r4			# unsigned long
 	jg	compat_sys_fcntl64	# branch to system call
 
@@ -1087,10 +1087,10 @@ sys32_stime_wrapper:
 	llgtr	%r2,%r2			# long *
 	jg	compat_sys_stime	# branch to system call
 
-	.globl  sys32_sysctl_wrapper
+	.globl	sys32_sysctl_wrapper
 sys32_sysctl_wrapper:
-	llgtr   %r2,%r2                 # struct __sysctl_args32 *
-	jg      sys32_sysctl
+	llgtr	%r2,%r2 		# struct __sysctl_args32 *
+	jg	sys32_sysctl
 
 	.globl	sys32_fstat64_wrapper
 sys32_fstat64_wrapper:
@@ -1098,7 +1098,7 @@ sys32_fstat64_wrapper:
 	llgtr	%r3,%r3			# struct stat64 *
 	jg	sys32_fstat64		# branch to system call
 
-	.globl  compat_sys_futex_wrapper 
+	.globl	compat_sys_futex_wrapper
 compat_sys_futex_wrapper:
 	llgtr	%r2,%r2			# u32 *
 	lgfr	%r3,%r3			# int
@@ -1213,22 +1213,22 @@ sys32_sched_getaffinity_wrapper:
 	llgtr	%r4,%r4			# unsigned long *
 	jg	compat_sys_sched_getaffinity
 
-	.globl  sys32_exit_group_wrapper
+	.globl	sys32_exit_group_wrapper
 sys32_exit_group_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_exit_group		# branch to system call
 
-	.globl  sys32_set_tid_address_wrapper
+	.globl	sys32_set_tid_address_wrapper
 sys32_set_tid_address_wrapper:
 	llgtr	%r2,%r2			# int *
 	jg	sys_set_tid_address	# branch to system call
 
-	.globl  sys_epoll_create_wrapper
+	.globl	sys_epoll_create_wrapper
 sys_epoll_create_wrapper:
 	lgfr	%r2,%r2			# int
 	jg	sys_epoll_create	# branch to system call
 
-	.globl  sys_epoll_ctl_wrapper
+	.globl	sys_epoll_ctl_wrapper
 sys_epoll_ctl_wrapper:
 	lgfr	%r2,%r2			# int
 	lgfr	%r3,%r3			# int
@@ -1236,7 +1236,7 @@ sys_epoll_ctl_wrapper:
 	llgtr	%r5,%r5			# struct epoll_event *
 	jg	sys_epoll_ctl		# branch to system call
 
-	.globl  sys_epoll_wait_wrapper
+	.globl	sys_epoll_wait_wrapper
 sys_epoll_wait_wrapper:
 	lgfr	%r2,%r2			# int
 	llgtr	%r3,%r3			# struct epoll_event *
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 0c712b78a7e8..dddc3de30401 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -4,8 +4,8 @@
  *
  *    Copyright (C) IBM Corp. 1999,2006
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *               Hartmut Penner (hp@de.ibm.com),
- *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *		 Hartmut Penner (hp@de.ibm.com),
+ *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
@@ -24,29 +24,29 @@
  * Stack layout for the system_call stack entry.
  * The first few entries are identical to the user_regs_struct.
  */
-SP_PTREGS    =  STACK_FRAME_OVERHEAD
-SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 4
-SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 12
-SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 20
-SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 28
-SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 36
-SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 44
-SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 52
-SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 60
-SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
-SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
-SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+SP_PTREGS    =	STACK_FRAME_OVERHEAD
+SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 4
+SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 12
+SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 20
+SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 28
+SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 36
+SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 44
+SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 52
+SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 60
+SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC	     =	STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =	STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
@@ -81,14 +81,14 @@ STACK_SIZE  = 1 << STACK_SHIFT
  *    R15 - kernel stack pointer
  */
 
-	.macro  STORE_TIMER lc_offset
+	.macro	STORE_TIMER lc_offset
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 	stpt	\lc_offset
 #endif
 	.endm
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
 	lm	%r10,%r11,\lc_from
 	sl	%r10,\lc_to
 	sl	%r11,\lc_to+4
@@ -147,7 +147,7 @@ STACK_SIZE  = 1 << STACK_SHIFT
 2:
 	.endm
 
-	.macro  CREATE_STACK_FRAME psworg,savearea
+	.macro	CREATE_STACK_FRAME psworg,savearea
 	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
 	la	%r12,\psworg
@@ -160,7 +160,7 @@ STACK_SIZE  = 1 << STACK_SHIFT
 	st	%r12,__SF_BACKCHAIN(%r15)	# clear back chain
 	.endm
 
-	.macro  RESTORE_ALL psworg,sync
+	.macro	RESTORE_ALL psworg,sync
 	mvc	\psworg(8),SP_PSW(%r15) # move user PSW to lowcore
 	.if !\sync
 	ni	\psworg+1,0xfd		# clear wait state bit
@@ -177,16 +177,16 @@ STACK_SIZE  = 1 << STACK_SHIFT
  * Returns:
  *  gpr2 = prev
  */
-        .globl  __switch_to
+	.globl	__switch_to
 __switch_to:
-        basr    %r1,0
+	basr	%r1,0
 __switch_to_base:
 	tm	__THREAD_per(%r3),0xe8		# new process is using per ?
 	bz	__switch_to_noper-__switch_to_base(%r1)	# if not we're fine
-        stctl   %c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
-        clc     __THREAD_per(12,%r3),__SF_EMPTY(%r15)
-        be      __switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
-        lctl    %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+	stctl	%c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
+	clc	__THREAD_per(12,%r3),__SF_EMPTY(%r15)
+	be	__switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
+	lctl	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
 __switch_to_noper:
 	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
 	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
@@ -195,13 +195,13 @@ __switch_to_noper:
 	l	%r4,__THREAD_info(%r3)		# get thread_info of next
 	oi	__TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
 __switch_to_no_mcck:
-        stm     %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	stm	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
 	st	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
 	l	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
 	lm	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
 	st	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
 	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
-	l	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	l	%r3,__THREAD_info(%r3)	# load thread_info from task struct
 	st	%r3,__LC_THREAD_INFO
 	ahi	%r3,STACK_SIZE
 	st	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
@@ -213,7 +213,7 @@ __critical_start:
  * are executed with interrupts enabled.
  */
 
-	.globl  system_call
+	.globl	system_call
 system_call:
 	STORE_TIMER __LC_SYNC_ENTER_TIMER
 sysc_saveall:
@@ -233,24 +233,24 @@ sysc_update:
 #endif
 sysc_do_svc:
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-	sla	%r7,2             # *4 and test for svc 0
-	bnz	BASED(sysc_nr_ok) # svc number > 0
+	sla	%r7,2			# *4 and test for svc 0
+	bnz	BASED(sysc_nr_ok)	# svc number > 0
 	# svc 0: system call number in %r1
 	cl	%r1,BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_nr_ok)
-	lr	%r7,%r1           # copy svc number to %r7
-	sla	%r7,2             # *4
+	lr	%r7,%r1 	  # copy svc number to %r7
+	sla	%r7,2		  # *4
 sysc_nr_ok:
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 sysc_do_restart:
 	l	%r8,BASED(.Lsysc_table)
 	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
 	l	%r8,0(%r7,%r8)	  # get system call addr.
-        bnz     BASED(sysc_tracesys)
-        basr    %r14,%r8          # call sys_xxxx
-        st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
-                                  # ATTENTION: check sys_execve_glue before
-                                  # changing anything here !!
+	bnz	BASED(sysc_tracesys)
+	basr	%r14,%r8	  # call sys_xxxx
+	st	%r2,SP_R2(%r15)   # store return value (change R2 on stack)
+				  # ATTENTION: check sys_execve_glue before
+				  # changing anything here !!
 
 sysc_return:
 	tm	SP_PSW+1(%r15),0x01	# returning to user ?
@@ -258,14 +258,14 @@ sysc_return:
 	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
 	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
 sysc_leave:
-        RESTORE_ALL __LC_RETURN_PSW,1
+	RESTORE_ALL __LC_RETURN_PSW,1
 
 #
 # recheck if there is more work to do
 #
 sysc_work_loop:
 	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
-	bz	BASED(sysc_leave)      # there is no work to do
+	bz	BASED(sysc_leave)	# there is no work to do
 #
 # One of the work bits is on. Find out which one.
 #
@@ -284,11 +284,11 @@ sysc_work:
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
-#	
-sysc_reschedule:        
-        l       %r1,BASED(.Lschedule)
-	la      %r14,BASED(sysc_work_loop)
-	br      %r1		       # call scheduler
+#
+sysc_reschedule:
+	l	%r1,BASED(.Lschedule)
+	la	%r14,BASED(sysc_work_loop)
+	br	%r1			# call scheduler
 
 #
 # _TIF_MCCK_PENDING is set, call handler
@@ -301,11 +301,11 @@ sysc_mcck_pending:
 #
 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
 #
-sysc_sigpending:     
+sysc_sigpending:
 	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        l       %r1,BASED(.Ldo_signal)
-	basr	%r14,%r1               # call do_signal
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Ldo_signal)
+	basr	%r14,%r1		# call do_signal
 	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
 	bo	BASED(sysc_restart)
 	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
@@ -317,11 +317,11 @@ sysc_sigpending:
 #
 sysc_restart:
 	ni	__TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
-	l	%r7,SP_R2(%r15)        # load new svc number
+	l	%r7,SP_R2(%r15) 	# load new svc number
 	sla	%r7,2
 	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
-	lm	%r2,%r6,SP_R2(%r15)    # load svc arguments
-	b	BASED(sysc_do_restart) # restart svc
+	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
+	b	BASED(sysc_do_restart)	# restart svc
 
 #
 # _TIF_SINGLE_STEP is set, call do_single_step
@@ -338,8 +338,8 @@ sysc_singlestep:
 # call trace before and after sys_call
 #
 sysc_tracesys:
-        l       %r1,BASED(.Ltrace)
-	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	l	%r1,BASED(.Ltrace)
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	la	%r3,0
 	srl	%r7,2
 	st	%r7,SP_R2(%r15)
@@ -347,19 +347,19 @@ sysc_tracesys:
 	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_tracenogo)
 	l	%r8,BASED(.Lsysc_table)
-	l	%r7,SP_R2(%r15)        # strace might have changed the 
-	sll	%r7,2                  #  system call
+	l	%r7,SP_R2(%r15) 	# strace might have changed the
+	sll	%r7,2			#  system call
 	l	%r8,0(%r7,%r8)
 sysc_tracego:
 	lm	%r3,%r6,SP_R3(%r15)
 	l	%r2,SP_ORIG_R2(%r15)
-	basr	%r14,%r8          # call sys_xxx
-	st	%r2,SP_R2(%r15)   # store return value
+	basr	%r14,%r8		# call sys_xxx
+	st	%r2,SP_R2(%r15)		# store return value
 sysc_tracenogo:
 	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        bz      BASED(sysc_return)
+	bz	BASED(sysc_return)
 	l	%r1,BASED(.Ltrace)
-	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	la	%r3,1
 	la	%r14,BASED(sysc_return)
 	br	%r1
@@ -367,17 +367,17 @@ sysc_tracenogo:
 #
 # a new process exits the kernel with ret_from_fork
 #
-        .globl  ret_from_fork
+	.globl	ret_from_fork
 ret_from_fork:
 	l	%r13,__LC_SVC_NEW_PSW+4
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
 	bo	BASED(0f)
 	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
-0:	l       %r1,BASED(.Lschedtail)
-	basr    %r14,%r1
+0:	l	%r1,BASED(.Lschedtail)
+	basr	%r14,%r1
 	TRACE_IRQS_ON
-        stosm   __SF_EMPTY(%r15),0x03     # reenable interrupts
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	b	BASED(sysc_return)
 
 #
@@ -386,52 +386,51 @@ ret_from_fork:
 # but are called with different parameter.
 # return-address is set up above
 #
-sys_clone_glue: 
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        l       %r1,BASED(.Lclone)
-        br      %r1                   # branch to sys_clone
-
-sys_fork_glue:  
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        l       %r1,BASED(.Lfork)
-        br      %r1                   # branch to sys_fork
-
-sys_vfork_glue: 
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        l       %r1,BASED(.Lvfork)
-        br      %r1                   # branch to sys_vfork
-
-sys_execve_glue:        
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs
-        l       %r1,BASED(.Lexecve)
-	lr      %r12,%r14             # save return address
-        basr    %r14,%r1              # call sys_execve
-        ltr     %r2,%r2               # check if execve failed
-        bnz     0(%r12)               # it did fail -> store result in gpr2
-        b       4(%r12)               # SKIP ST 2,SP_R2(15) after BASR 14,8
-                                      # in system_call/sysc_tracesys
-
-sys_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        l       %r1,BASED(.Lsigreturn)
-        br      %r1                   # branch to sys_sigreturn
-
-sys_rt_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        l       %r1,BASED(.Lrt_sigreturn)
-        br      %r1                   # branch to sys_sigreturn
+sys_clone_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Lclone)
+	br	%r1			# branch to sys_clone
 
-sys_sigaltstack_glue:
-        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
-        l       %r1,BASED(.Lsigaltstack)
-        br      %r1                   # branch to sys_sigreturn
+sys_fork_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Lfork)
+	br	%r1			# branch to sys_fork
 
+sys_vfork_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Lvfork)
+	br	%r1			# branch to sys_vfork
+
+sys_execve_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Lexecve)
+	lr	%r12,%r14		# save return address
+	basr	%r14,%r1		# call sys_execve
+	ltr	%r2,%r2			# check if execve failed
+	bnz	0(%r12)			# it did fail -> store result in gpr2
+	b	4(%r12)			# SKIP ST 2,SP_R2(15) after BASR 14,8
+					# in system_call/sysc_tracesys
+
+sys_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	l	%r1,BASED(.Lsigreturn)
+	br	%r1			# branch to sys_sigreturn
+
+sys_rt_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	l	%r1,BASED(.Lrt_sigreturn)
+	br	%r1			# branch to sys_sigreturn
+
+sys_sigaltstack_glue:
+	la	%r4,SP_PTREGS(%r15)	# load pt_regs as parameter
+	l	%r1,BASED(.Lsigaltstack)
+	br	%r1			# branch to sys_sigreturn
 
 /*
  * Program check handler routine
  */
 
-        .globl  pgm_check_handler
+	.globl	pgm_check_handler
 pgm_check_handler:
 /*
  * First we need to check for a special case:
@@ -448,8 +447,8 @@ pgm_check_handler:
  */
 	STORE_TIMER __LC_SYNC_ENTER_TIMER
 	SAVE_ALL_BASE __LC_SAVE_AREA
-        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-        bnz     BASED(pgm_per)           # got per exception -> special case
+	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+	bnz	BASED(pgm_per)		# got per exception -> special case
 	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -461,29 +460,29 @@ pgm_check_handler:
 pgm_no_vtime:
 #endif
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-        l       %r3,__LC_PGM_ILC         # load program interruption code
+	l	%r3,__LC_PGM_ILC	# load program interruption code
 	la	%r8,0x7f
 	nr	%r8,%r3
 pgm_do_call:
-        l       %r7,BASED(.Ljump_table)
-        sll     %r8,2
-        l       %r7,0(%r8,%r7)		 # load address of handler routine
-        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
-	la      %r14,BASED(sysc_return)
-	br      %r7			 # branch to interrupt-handler
+	l	%r7,BASED(.Ljump_table)
+	sll	%r8,2
+	l	%r7,0(%r8,%r7)		# load address of handler routine
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	la	%r14,BASED(sysc_return)
+	br	%r7			# branch to interrupt-handler
 
 #
 # handle per exception
 #
 pgm_per:
-        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
-        bnz     BASED(pgm_per_std)       # ok, normal per event from user space
+	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
+	bnz	BASED(pgm_per_std)	# ok, normal per event from user space
 # ok its one of the special cases, now we need to find out which one
-        clc     __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
-        be      BASED(pgm_svcper)
+	clc	__LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
+	be	BASED(pgm_svcper)
 # no interesting special case, ignore PER event
-        lm      %r12,%r15,__LC_SAVE_AREA
-	lpsw    0x28
+	lm	%r12,%r15,__LC_SAVE_AREA
+	lpsw	0x28
 
 #
 # Normal per exception
@@ -507,10 +506,10 @@ pgm_no_vtime2:
 	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
 	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
 	bz	BASED(kernel_per)
-	l	%r3,__LC_PGM_ILC	 # load program interruption code
+	l	%r3,__LC_PGM_ILC	# load program interruption code
 	la	%r8,0x7f
-	nr	%r8,%r3                  # clear per-event-bit and ilc
-	be	BASED(sysc_return)       # only per or per+check ?
+	nr	%r8,%r3 		# clear per-event-bit and ilc
+	be	BASED(sysc_return)	# only per or per+check ?
 	b	BASED(pgm_do_call)
 
 #
@@ -552,7 +551,7 @@ kernel_per:
  * IO interrupt handler routine
  */
 
-        .globl io_int_handler
+	.globl io_int_handler
 io_int_handler:
 	STORE_TIMER __LC_ASYNC_ENTER_TIMER
 	stck	__LC_INT_CLOCK
@@ -569,42 +568,42 @@ io_no_vtime:
 #endif
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	TRACE_IRQS_OFF
-        l       %r1,BASED(.Ldo_IRQ)        # load address of do_IRQ
-        la      %r2,SP_PTREGS(%r15) # address of register-save area
-        basr    %r14,%r1          # branch to standard irq handler
+	l	%r1,BASED(.Ldo_IRQ)	# load address of do_IRQ
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	basr	%r14,%r1		# branch to standard irq handler
 	TRACE_IRQS_ON
 
 io_return:
-        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 #ifdef CONFIG_PREEMPT
-	bno     BASED(io_preempt)      # no -> check for preemptive scheduling
+	bno	BASED(io_preempt)	# no -> check for preemptive scheduling
 #else
-        bno     BASED(io_leave)        # no-> skip resched & signal
+	bno	BASED(io_leave) 	# no-> skip resched & signal
 #endif
 	tm	__TI_flags+3(%r9),_TIF_WORK_INT
-	bnz	BASED(io_work)         # there is work to do (signals etc.)
+	bnz	BASED(io_work)		# there is work to do (signals etc.)
 io_leave:
-        RESTORE_ALL __LC_RETURN_PSW,0
+	RESTORE_ALL __LC_RETURN_PSW,0
 io_done:
 
 #ifdef CONFIG_PREEMPT
 io_preempt:
 	icm	%r0,15,__TI_precount(%r9)
-	bnz     BASED(io_leave)
+	bnz	BASED(io_leave)
 	l	%r1,SP_R15(%r15)
 	s	%r1,BASED(.Lc_spsize)
 	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lr	%r15,%r1
 io_resume_loop:
 	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
 	bno	BASED(io_leave)
-	mvc     __TI_precount(4,%r9),BASED(.Lc_pactive)
-        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
-        l       %r1,BASED(.Lschedule)
+	mvc	__TI_precount(4,%r9),BASED(.Lc_pactive)
+	stosm	__SF_EMPTY(%r15),0x03  # reenable interrupts
+	l	%r1,BASED(.Lschedule)
 	basr	%r14,%r1	       # call schedule
-        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
-	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	stnsm	__SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+	xc	__TI_precount(4,%r9),__TI_precount(%r9)
 	b	BASED(io_resume_loop)
 #endif
 
@@ -615,16 +614,16 @@ io_work:
 	l	%r1,__LC_KERNEL_STACK
 	s	%r1,BASED(.Lc_spsize)
 	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lr	%r15,%r1
 #
 # One of the work bits is on. Find out which one.
 # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
-#	        and _TIF_MCCK_PENDING
+#		and _TIF_MCCK_PENDING
 #
 io_work_loop:
 	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
-	bo      BASED(io_mcck_pending)
+	bo	BASED(io_mcck_pending)
 	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
 	bo	BASED(io_reschedule)
 	tm	__TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
@@ -637,36 +636,36 @@ io_work_loop:
 io_mcck_pending:
 	l	%r1,BASED(.Ls390_handle_mcck)
 	la	%r14,BASED(io_work_loop)
-	br	%r1		       # TIF bit will be cleared by handler
+	br	%r1			# TIF bit will be cleared by handler
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
-#	
-io_reschedule:        
-        l       %r1,BASED(.Lschedule)
-        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
-	basr    %r14,%r1	       # call scheduler
-        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+#
+io_reschedule:
+	l	%r1,BASED(.Lschedule)
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	basr	%r14,%r1		# call scheduler
+	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 	tm	__TI_flags+3(%r9),_TIF_WORK_INT
-	bz	BASED(io_leave)        # there is no work to do
+	bz	BASED(io_leave) 	# there is no work to do
 	b	BASED(io_work_loop)
 
 #
 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
 #
-io_sigpending:     
-        stosm   __SF_EMPTY(%r15),0x03  # reenable interrupts
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        l       %r1,BASED(.Ldo_signal)
-	basr    %r14,%r1	       # call do_signal
-        stnsm   __SF_EMPTY(%r15),0xfc  # disable I/O and ext. interrupts
+io_sigpending:
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	l	%r1,BASED(.Ldo_signal)
+	basr	%r14,%r1		# call do_signal
+	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 	b	BASED(io_work_loop)
 
 /*
  * External interrupt handler routine
  */
 
-        .globl  ext_int_handler
+	.globl	ext_int_handler
 ext_int_handler:
 	STORE_TIMER __LC_ASYNC_ENTER_TIMER
 	stck	__LC_INT_CLOCK
@@ -683,8 +682,8 @@ ext_no_vtime:
 #endif
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	TRACE_IRQS_OFF
-	la	%r2,SP_PTREGS(%r15)    # address of register-save area
-	lh	%r3,__LC_EXT_INT_CODE  # get interruption code
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	lh	%r3,__LC_EXT_INT_CODE	# get interruption code
 	l	%r1,BASED(.Ldo_extint)
 	basr	%r14,%r1
 	TRACE_IRQS_ON
@@ -696,13 +695,13 @@ __critical_end:
  * Machine check handler routines
  */
 
-        .globl mcck_int_handler
+	.globl mcck_int_handler
 mcck_int_handler:
 	spt	__LC_CPU_TIMER_SAVE_AREA	# revalidate cpu timer
 	lm	%r0,%r15,__LC_GPREGS_SAVE_AREA	# revalidate gprs
 	SAVE_ALL_BASE __LC_SAVE_AREA+32
 	la	%r12,__LC_MCK_OLD_PSW
-	tm	__LC_MCCK_CODE,0x80     # system damage?
+	tm	__LC_MCCK_CODE,0x80	# system damage?
 	bo	BASED(mcck_int_main)	# yes -> rest of mcck code invalid
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 	mvc	__LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
@@ -741,7 +740,7 @@ mcck_int_main:
 	l	%r15,__LC_PANIC_STACK	# load panic stack
 0:	CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	tm	__LC_MCCK_CODE+2,0x08   # mwp of old psw valid?
+	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
 	bno	BASED(mcck_no_vtime)	# no -> skip cleanup critical
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 	bz	BASED(mcck_no_vtime)
@@ -752,14 +751,14 @@ mcck_no_vtime:
 #endif
 	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
-	l       %r1,BASED(.Ls390_mcck)
-	basr    %r14,%r1		# call machine check handler
-	tm      SP_PSW+1(%r15),0x01	# returning to user ?
+	l	%r1,BASED(.Ls390_mcck)
+	basr	%r14,%r1		# call machine check handler
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 	bno	BASED(mcck_return)
-	l	%r1,__LC_KERNEL_STACK   # switch to kernel stack
+	l	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 	s	%r1,BASED(.Lc_spsize)
 	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-	xc      __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lr	%r15,%r1
 	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
 	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
@@ -783,36 +782,36 @@ mcck_return:
 	lm	%r0,%r15,SP_R0(%r15)	# load gprs 0-15
 	lpsw	__LC_RETURN_MCCK_PSW	# back to caller
 
-        RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+	RESTORE_ALL __LC_RETURN_MCCK_PSW,0
 
 #ifdef CONFIG_SMP
 /*
  * Restart interruption handler, kick starter for additional CPUs
  */
-        .globl restart_int_handler
+	.globl restart_int_handler
 restart_int_handler:
-        l       %r15,__LC_SAVE_AREA+60 # load ksp
-        lctl    %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
-        lam     %a0,%a15,__LC_AREGS_SAVE_AREA
-        lm      %r6,%r15,__SF_GPRS(%r15) # load registers from clone
-        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
-        basr    %r14,0
-        l       %r14,restart_addr-.(%r14)
-        br      %r14                   # branch to start_secondary
+	l	%r15,__LC_SAVE_AREA+60	# load ksp
+	lctl	%c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
+	lam	%a0,%a15,__LC_AREGS_SAVE_AREA
+	lm	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
+	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
+	basr	%r14,0
+	l	%r14,restart_addr-.(%r14)
+	br	%r14			# branch to start_secondary
 restart_addr:
-        .long   start_secondary
+	.long	start_secondary
 #else
 /*
  * If we do not run with SMP enabled, let the new CPU crash ...
  */
-        .globl restart_int_handler
+	.globl restart_int_handler
 restart_int_handler:
-        basr    %r1,0
+	basr	%r1,0
 restart_base:
-        lpsw    restart_crash-restart_base(%r1)
-        .align 8
+	lpsw	restart_crash-restart_base(%r1)
+	.align	8
 restart_crash:
-        .long  0x000a0000,0x00000000
+	.long	0x000a0000,0x00000000
 restart_go:
 #endif
 
@@ -834,11 +833,11 @@ stack_overflow:
 	be	BASED(0f)
 	la	%r1,__LC_SAVE_AREA+16
 0:	mvc	SP_R12(16,%r15),0(%r1)	# move %r12-%r15 to stack
-        xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
 	l	%r1,BASED(1f)		# branch to kernel_stack_overflow
-        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	br	%r1
-1:	.long  kernel_stack_overflow
+1:	.long	kernel_stack_overflow
 #endif
 
 cleanup_table_system_call:
@@ -940,10 +939,10 @@ cleanup_novtime:
 cleanup_system_call_insn:
 	.long	sysc_saveall + 0x80000000
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	.long   system_call + 0x80000000
-	.long   sysc_vtime + 0x80000000
-	.long   sysc_stime + 0x80000000
-	.long   sysc_update + 0x80000000
+	.long	system_call + 0x80000000
+	.long	sysc_vtime + 0x80000000
+	.long	sysc_stime + 0x80000000
+	.long	sysc_update + 0x80000000
 #endif
 
 cleanup_sysc_return:
@@ -1009,57 +1008,57 @@ cleanup_io_leave_insn:
 /*
  * Integer constants
  */
-               .align 4
-.Lc_spsize:    .long  SP_SIZE
-.Lc_overhead:  .long  STACK_FRAME_OVERHEAD
-.Lc_pactive:   .long  PREEMPT_ACTIVE
-.Lnr_syscalls: .long  NR_syscalls
-.L0x018:       .short 0x018
-.L0x020:       .short 0x020
-.L0x028:       .short 0x028
-.L0x030:       .short 0x030
-.L0x038:       .short 0x038
-.Lc_1:         .long  1
+		.align	4
+.Lc_spsize:	.long	SP_SIZE
+.Lc_overhead:	.long	STACK_FRAME_OVERHEAD
+.Lc_pactive:	.long	PREEMPT_ACTIVE
+.Lnr_syscalls:	.long	NR_syscalls
+.L0x018:	.short	0x018
+.L0x020:	.short	0x020
+.L0x028:	.short	0x028
+.L0x030:	.short	0x030
+.L0x038:	.short	0x038
+.Lc_1:		.long	1
 
 /*
  * Symbol constants
  */
-.Ls390_mcck:   .long  s390_do_machine_check
+.Ls390_mcck:	.long	s390_do_machine_check
 .Ls390_handle_mcck:
-	       .long  s390_handle_mcck
-.Lmck_old_psw: .long  __LC_MCK_OLD_PSW
-.Ldo_IRQ:      .long  do_IRQ
-.Ldo_extint:   .long  do_extint
-.Ldo_signal:   .long  do_signal
-.Lhandle_per:  .long  do_single_step
-.Ljump_table:  .long  pgm_check_table
-.Lschedule:    .long  schedule
-.Lclone:       .long  sys_clone
-.Lexecve:      .long  sys_execve
-.Lfork:        .long  sys_fork
-.Lrt_sigreturn:.long  sys_rt_sigreturn
+		.long	s390_handle_mcck
+.Lmck_old_psw:	.long	__LC_MCK_OLD_PSW
+.Ldo_IRQ:	.long	do_IRQ
+.Ldo_extint:	.long	do_extint
+.Ldo_signal:	.long	do_signal
+.Lhandle_per:	.long	do_single_step
+.Ljump_table:	.long	pgm_check_table
+.Lschedule:	.long	schedule
+.Lclone:	.long	sys_clone
+.Lexecve:	.long	sys_execve
+.Lfork: 	.long	sys_fork
+.Lrt_sigreturn: .long	sys_rt_sigreturn
 .Lrt_sigsuspend:
-               .long  sys_rt_sigsuspend
-.Lsigreturn:   .long  sys_sigreturn
-.Lsigsuspend:  .long  sys_sigsuspend
-.Lsigaltstack: .long  sys_sigaltstack
-.Ltrace:       .long  syscall_trace
-.Lvfork:       .long  sys_vfork
-.Lschedtail:   .long  schedule_tail
-.Lsysc_table:  .long  sys_call_table
+		.long	sys_rt_sigsuspend
+.Lsigreturn:	.long	sys_sigreturn
+.Lsigsuspend:	.long	sys_sigsuspend
+.Lsigaltstack:	.long	sys_sigaltstack
+.Ltrace:	.long	syscall_trace
+.Lvfork:	.long	sys_vfork
+.Lschedtail:	.long	schedule_tail
+.Lsysc_table:	.long	sys_call_table
 #ifdef CONFIG_TRACE_IRQFLAGS
-.Ltrace_irq_on:.long  trace_hardirqs_on
+.Ltrace_irq_on: .long	trace_hardirqs_on
 .Ltrace_irq_off:
-	       .long  trace_hardirqs_off
+		.long	trace_hardirqs_off
 #endif
 .Lcritical_start:
-               .long  __critical_start + 0x80000000
+		.long	__critical_start + 0x80000000
 .Lcritical_end:
-               .long  __critical_end + 0x80000000
+		.long	__critical_end + 0x80000000
 .Lcleanup_critical:
-               .long  cleanup_critical
+		.long	cleanup_critical
 
-	       .section .rodata, "a"
+		.section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esa
 sys_call_table:
 #include "syscalls.S"
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 29bbfbab7332..0f758c329a5d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -4,8 +4,8 @@
  *
  *    Copyright (C) IBM Corp. 1999,2006
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *               Hartmut Penner (hp@de.ibm.com),
- *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *		 Hartmut Penner (hp@de.ibm.com),
+ *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
@@ -24,29 +24,29 @@
  * Stack layout for the system_call stack entry.
  * The first few entries are identical to the user_regs_struct.
  */
-SP_PTREGS    =  STACK_FRAME_OVERHEAD
-SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 64
-SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 72
-SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 80
-SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 88
-SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 96
-SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 104
-SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 112
-SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 120
-SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
-SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
-SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
+SP_PTREGS    =	STACK_FRAME_OVERHEAD
+SP_ARGS      =	STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW	     =	STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0	     =	STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R2	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R3	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R4	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R5	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R6	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R7	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R8	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 64
+SP_R9	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 72
+SP_R10	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 80
+SP_R11	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 88
+SP_R12	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 96
+SP_R13	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 104
+SP_R14	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 112
+SP_R15	     =	STACK_FRAME_OVERHEAD + __PT_GPRS + 120
+SP_ORIG_R2   =	STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC	     =	STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP      =	STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
@@ -71,14 +71,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
 #define TRACE_IRQS_OFF
 #endif
 
-	.macro  STORE_TIMER lc_offset
+	.macro	STORE_TIMER lc_offset
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 	stpt	\lc_offset
 #endif
 	.endm
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	.macro  UPDATE_VTIME lc_from,lc_to,lc_sum
+	.macro	UPDATE_VTIME lc_from,lc_to,lc_sum
 	lg	%r10,\lc_from
 	slg	%r10,\lc_to
 	alg	%r10,\lc_sum
@@ -94,7 +94,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
  *    R15 - kernel stack pointer
  */
 
-        .macro  SAVE_ALL_BASE savearea
+	.macro	SAVE_ALL_BASE savearea
 	stmg	%r12,%r15,\savearea
 	larl	%r13,system_call
 	.endm
@@ -139,8 +139,8 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
 	.endm
 
 	.macro	CREATE_STACK_FRAME psworg,savearea
-	aghi    %r15,-SP_SIZE		# make room for registers & psw
-	mvc     SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
+	aghi	%r15,-SP_SIZE		# make room for registers & psw
+	mvc	SP_PSW(16,%r15),0(%r12)	# move user PSW to stack
 	la	%r12,\psworg
 	stg	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
 	icm	%r12,12,__LC_SVC_ILC
@@ -149,7 +149,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
 	mvc	SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
 	la	%r12,0
 	stg	%r12,__SF_BACKCHAIN(%r15)
-        .endm
+	.endm
 
 	.macro	RESTORE_ALL psworg,sync
 	mvc	\psworg(16),SP_PSW(%r15) # move user PSW to lowcore
@@ -168,29 +168,29 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
  * Returns:
  *  gpr2 = prev
  */
-        .globl  __switch_to
+	.globl	__switch_to
 __switch_to:
 	tm	__THREAD_per+4(%r3),0xe8 # is the new process using per ?
 	jz	__switch_to_noper		# if not we're fine
-        stctg   %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
-        clc     __THREAD_per(24,%r3),__SF_EMPTY(%r15)
-        je      __switch_to_noper            # we got away without bashing TLB's
-        lctlg   %c9,%c11,__THREAD_per(%r3)	# Nope we didn't
+	stctg	%c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
+	clc	__THREAD_per(24,%r3),__SF_EMPTY(%r15)
+	je	__switch_to_noper	     # we got away without bashing TLB's
+	lctlg	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
 __switch_to_noper:
-	lg	%r4,__THREAD_info(%r2)              # get thread_info of prev
+	lg	%r4,__THREAD_info(%r2)		    # get thread_info of prev
 	tm	__TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
 	jz	__switch_to_no_mcck
 	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
 	lg	%r4,__THREAD_info(%r3)		    # get thread_info of next
 	oi	__TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
 __switch_to_no_mcck:
-        stmg    %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+	stmg	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
 	stg	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
 	lg	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
-        lmg     %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+	lmg	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
 	stg	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
 	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
-	lg	%r3,__THREAD_info(%r3)  # load thread_info from task struct
+	lg	%r3,__THREAD_info(%r3)	# load thread_info from task struct
 	stg	%r3,__LC_THREAD_INFO
 	aghi	%r3,STACK_SIZE
 	stg	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
@@ -202,14 +202,14 @@ __critical_start:
  * are executed with interrupts enabled.
  */
 
-	.globl  system_call
+	.globl	system_call
 system_call:
 	STORE_TIMER __LC_SYNC_ENTER_TIMER
 sysc_saveall:
 	SAVE_ALL_BASE __LC_SAVE_AREA
 	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-        CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	llgh    %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+	llgh	%r7,__LC_SVC_INT_CODE	# get svc number from lowcore
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 sysc_vtime:
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
@@ -222,45 +222,45 @@ sysc_update:
 #endif
 sysc_do_svc:
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-        slag    %r7,%r7,2         # *4 and test for svc 0
+	slag	%r7,%r7,2	# *4 and test for svc 0
 	jnz	sysc_nr_ok
 	# svc 0: system call number in %r1
 	cl	%r1,BASED(.Lnr_syscalls)
 	jnl	sysc_nr_ok
-	lgfr	%r7,%r1           # clear high word in r1
-	slag    %r7,%r7,2         # svc 0: system call number in %r1
+	lgfr	%r7,%r1 	# clear high word in r1
+	slag	%r7,%r7,2	# svc 0: system call number in %r1
 sysc_nr_ok:
 	mvc	SP_ARGS(8,%r15),SP_R7(%r15)
 sysc_do_restart:
-	larl    %r10,sys_call_table
+	larl	%r10,sys_call_table
 #ifdef CONFIG_COMPAT
 	tm	__TI_flags+5(%r9),(_TIF_31BIT>>16)  # running in 31 bit mode ?
 	jno	sysc_noemu
-	larl    %r10,sys_call_table_emu  # use 31 bit emulation system calls
+	larl	%r10,sys_call_table_emu  # use 31 bit emulation system calls
 sysc_noemu:
 #endif
 	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        lgf     %r8,0(%r7,%r10)   # load address of system call routine
-        jnz     sysc_tracesys
-        basr    %r14,%r8          # call sys_xxxx
-        stg     %r2,SP_R2(%r15)   # store return value (change R2 on stack)
-                                  # ATTENTION: check sys_execve_glue before
-                                  # changing anything here !!
+	lgf	%r8,0(%r7,%r10) # load address of system call routine
+	jnz	sysc_tracesys
+	basr	%r14,%r8	# call sys_xxxx
+	stg	%r2,SP_R2(%r15) # store return value (change R2 on stack)
+				# ATTENTION: check sys_execve_glue before
+				# changing anything here !!
 
 sysc_return:
-        tm      SP_PSW+1(%r15),0x01    # returning to user ?
-        jno     sysc_leave
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
+	jno	sysc_leave
 	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
-	jnz	sysc_work         # there is work to do (signals etc.)
+	jnz	sysc_work	# there is work to do (signals etc.)
 sysc_leave:
-        RESTORE_ALL __LC_RETURN_PSW,1
+	RESTORE_ALL __LC_RETURN_PSW,1
 
 #
 # recheck if there is more work to do
 #
 sysc_work_loop:
 	tm	__TI_flags+7(%r9),_TIF_WORK_SVC
-	jz	sysc_leave        # there is no work to do
+	jz	sysc_leave	  # there is no work to do
 #
 # One of the work bits is on. Find out which one.
 #
@@ -279,25 +279,25 @@ sysc_work:
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
-#	
-sysc_reschedule:        
-	larl    %r14,sysc_work_loop
-        jg      schedule            # return point is sysc_return
+#
+sysc_reschedule:
+	larl	%r14,sysc_work_loop
+	jg	schedule	# return point is sysc_return
 
 #
 # _TIF_MCCK_PENDING is set, call handler
 #
 sysc_mcck_pending:
 	larl	%r14,sysc_work_loop
-	jg	s390_handle_mcck    # TIF bit will be cleared by handler
+	jg	s390_handle_mcck	# TIF bit will be cleared by handler
 
 #
 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
 #
-sysc_sigpending:     
+sysc_sigpending:
 	ni	__TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
-        la      %r2,SP_PTREGS(%r15) # load pt_regs
-	brasl	%r14,do_signal    # call do_signal
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	brasl	%r14,do_signal		# call do_signal
 	tm	__TI_flags+7(%r9),_TIF_RESTART_SVC
 	jo	sysc_restart
 	tm	__TI_flags+7(%r9),_TIF_SINGLE_STEP
@@ -309,11 +309,11 @@ sysc_sigpending:
 #
 sysc_restart:
 	ni	__TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
-	lg	%r7,SP_R2(%r15)        # load new svc number
-        slag    %r7,%r7,2              # *4
+	lg	%r7,SP_R2(%r15)		# load new svc number
+	slag	%r7,%r7,2		# *4
 	mvc	SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
-	lmg	%r2,%r6,SP_R2(%r15)    # load svc arguments
-	j	sysc_do_restart        # restart svc
+	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
+	j	sysc_do_restart 	# restart svc
 
 #
 # _TIF_SINGLE_STEP is set, call do_single_step
@@ -326,49 +326,48 @@ sysc_singlestep:
 	larl	%r14,sysc_return	# load adr. of system return
 	jg	do_single_step		# branch to do_sigtrap
 
-
 #
 # call syscall_trace before and after system call
 # special linkage: %r12 contains the return address for trace_svc
 #
 sysc_tracesys:
-	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	la	%r3,0
 	srl	%r7,2
-	stg     %r7,SP_R2(%r15)
-        brasl   %r14,syscall_trace
+	stg	%r7,SP_R2(%r15)
+	brasl	%r14,syscall_trace
 	lghi	%r0,NR_syscalls
 	clg	%r0,SP_R2(%r15)
 	jnh	sysc_tracenogo
-	lg	%r7,SP_R2(%r15)   # strace might have changed the
-	sll     %r7,2             #  system call
+	lg	%r7,SP_R2(%r15)		# strace might have changed the
+	sll	%r7,2			# system call
 	lgf	%r8,0(%r7,%r10)
 sysc_tracego:
-	lmg     %r3,%r6,SP_R3(%r15)
-	lg      %r2,SP_ORIG_R2(%r15)
-        basr    %r14,%r8            # call sys_xxx
-        stg     %r2,SP_R2(%r15)     # store return value
+	lmg	%r3,%r6,SP_R3(%r15)
+	lg	%r2,SP_ORIG_R2(%r15)
+	basr	%r14,%r8		# call sys_xxx
+	stg	%r2,SP_R2(%r15)		# store return value
 sysc_tracenogo:
 	tm	__TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        jz      sysc_return
-	la	%r2,SP_PTREGS(%r15)    # load pt_regs
+	jz	sysc_return
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	la	%r3,1
-	larl	%r14,sysc_return    # return point is sysc_return
+	larl	%r14,sysc_return	# return point is sysc_return
 	jg	syscall_trace
 
 #
 # a new process exits the kernel with ret_from_fork
 #
-        .globl  ret_from_fork
+	.globl	ret_from_fork
 ret_from_fork:
 	lg	%r13,__LC_SVC_NEW_PSW+8
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
 	jo	0f
 	stg	%r15,SP_R15(%r15)	# store stack pointer for new kthread
-0:	brasl   %r14,schedule_tail
+0:	brasl	%r14,schedule_tail
 	TRACE_IRQS_ON
-        stosm   24(%r15),0x03     # reenable interrupts
+	stosm	24(%r15),0x03		# reenable interrupts
 	j	sysc_return
 
 #
@@ -377,78 +376,78 @@ ret_from_fork:
 # but are called with different parameter.
 # return-address is set up above
 #
-sys_clone_glue: 
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        jg      sys_clone              # branch to sys_clone
+sys_clone_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	sys_clone		# branch to sys_clone
 
 #ifdef CONFIG_COMPAT
-sys32_clone_glue: 
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        jg      sys32_clone            # branch to sys32_clone
+sys32_clone_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	sys32_clone		# branch to sys32_clone
 #endif
 
-sys_fork_glue:  
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        jg      sys_fork               # branch to sys_fork
-
-sys_vfork_glue: 
-        la      %r2,SP_PTREGS(%r15)    # load pt_regs
-        jg      sys_vfork              # branch to sys_vfork
-
-sys_execve_glue:        
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs
-	lgr     %r12,%r14             # save return address
-        brasl   %r14,sys_execve       # call sys_execve
-        ltgr    %r2,%r2               # check if execve failed
-        bnz     0(%r12)               # it did fail -> store result in gpr2
-        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
-                                      # system_call/sysc_tracesys
+sys_fork_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	sys_fork		# branch to sys_fork
+
+sys_vfork_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	jg	sys_vfork		# branch to sys_vfork
+
+sys_execve_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lgr	%r12,%r14		# save return address
+	brasl	%r14,sys_execve 	# call sys_execve
+	ltgr	%r2,%r2 		# check if execve failed
+	bnz	0(%r12) 		# it did fail -> store result in gpr2
+	b	6(%r12) 		# SKIP STG 2,SP_R2(15) in
+					# system_call/sysc_tracesys
 #ifdef CONFIG_COMPAT
-sys32_execve_glue:        
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs
-	lgr     %r12,%r14             # save return address
-        brasl   %r14,sys32_execve     # call sys32_execve
-        ltgr    %r2,%r2               # check if execve failed
-        bnz     0(%r12)               # it did fail -> store result in gpr2
-        b       6(%r12)               # SKIP STG 2,SP_R2(15) in
-                                      # system_call/sysc_tracesys
+sys32_execve_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
+	lgr	%r12,%r14		# save return address
+	brasl	%r14,sys32_execve	# call sys32_execve
+	ltgr	%r2,%r2 		# check if execve failed
+	bnz	0(%r12) 		# it did fail -> store result in gpr2
+	b	6(%r12) 		# SKIP STG 2,SP_R2(15) in
+					# system_call/sysc_tracesys
 #endif
 
-sys_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys_sigreturn         # branch to sys_sigreturn
+sys_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys_sigreturn		# branch to sys_sigreturn
 
 #ifdef CONFIG_COMPAT
-sys32_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys32_sigreturn       # branch to sys32_sigreturn
+sys32_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys32_sigreturn 	# branch to sys32_sigreturn
 #endif
 
-sys_rt_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys_rt_sigreturn      # branch to sys_sigreturn
+sys_rt_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys_rt_sigreturn	# branch to sys_sigreturn
 
 #ifdef CONFIG_COMPAT
-sys32_rt_sigreturn_glue:     
-        la      %r2,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys32_rt_sigreturn    # branch to sys32_sigreturn
+sys32_rt_sigreturn_glue:
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys32_rt_sigreturn	# branch to sys32_sigreturn
 #endif
 
 sys_sigaltstack_glue:
-        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys_sigaltstack       # branch to sys_sigreturn
+	la	%r4,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys_sigaltstack 	# branch to sys_sigreturn
 
 #ifdef CONFIG_COMPAT
 sys32_sigaltstack_glue:
-        la      %r4,SP_PTREGS(%r15)   # load pt_regs as parameter
-        jg      sys32_sigaltstack_wrapper # branch to sys_sigreturn
+	la	%r4,SP_PTREGS(%r15)	# load pt_regs as parameter
+	jg	sys32_sigaltstack_wrapper # branch to sys_sigreturn
 #endif
 
 /*
  * Program check handler routine
  */
 
-        .globl  pgm_check_handler
+	.globl	pgm_check_handler
 pgm_check_handler:
 /*
  * First we need to check for a special case:
@@ -465,8 +464,8 @@ pgm_check_handler:
  */
 	STORE_TIMER __LC_SYNC_ENTER_TIMER
 	SAVE_ALL_BASE __LC_SAVE_AREA
-        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
-        jnz     pgm_per                  # got per exception -> special case
+	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+	jnz	pgm_per 		 # got per exception -> special case
 	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -478,29 +477,29 @@ pgm_check_handler:
 pgm_no_vtime:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lgf	%r3,__LC_PGM_ILC	# load program interruption code
 	lghi	%r8,0x7f
 	ngr	%r8,%r3
 pgm_do_call:
-        sll     %r8,3
-        larl    %r1,pgm_check_table
-        lg      %r1,0(%r8,%r1)		 # load address of handler routine
-        la      %r2,SP_PTREGS(%r15)	 # address of register-save area
+	sll	%r8,3
+	larl	%r1,pgm_check_table
+	lg	%r1,0(%r8,%r1)		# load address of handler routine
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	larl	%r14,sysc_return
-        br      %r1			 # branch to interrupt-handler
+	br	%r1			# branch to interrupt-handler
 
 #
 # handle per exception
 #
 pgm_per:
-        tm      __LC_PGM_OLD_PSW,0x40    # test if per event recording is on
-        jnz     pgm_per_std              # ok, normal per event from user space
+	tm	__LC_PGM_OLD_PSW,0x40	# test if per event recording is on
+	jnz	pgm_per_std		# ok, normal per event from user space
 # ok its one of the special cases, now we need to find out which one
-        clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
-        je      pgm_svcper
+	clc	__LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
+	je	pgm_svcper
 # no interesting special case, ignore PER event
 	lmg	%r12,%r15,__LC_SAVE_AREA
-	lpswe   __LC_PGM_OLD_PSW
+	lpswe	__LC_PGM_OLD_PSW
 
 #
 # Normal per exception
@@ -524,9 +523,9 @@ pgm_no_vtime2:
 	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
 	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
 	oi	__TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
-	lgf     %r3,__LC_PGM_ILC	 # load program interruption code
+	lgf	%r3,__LC_PGM_ILC	# load program interruption code
 	lghi	%r8,0x7f
-	ngr	%r8,%r3			 # clear per-event-bit and ilc
+	ngr	%r8,%r3			# clear per-event-bit and ilc
 	je	sysc_return
 	j	pgm_do_call
 
@@ -544,7 +543,7 @@ pgm_svcper:
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 pgm_no_vtime3:
 #endif
-	llgh    %r7,__LC_SVC_INT_CODE	# get svc number from lowcore
+	llgh	%r7,__LC_SVC_INT_CODE	# get svc number from lowcore
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	lg	%r1,__TI_task(%r9)
 	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
@@ -568,7 +567,7 @@ kernel_per:
 /*
  * IO interrupt handler routine
  */
-        .globl io_int_handler
+	.globl io_int_handler
 io_int_handler:
 	STORE_TIMER __LC_ASYNC_ENTER_TIMER
 	stck	__LC_INT_CLOCK
@@ -585,42 +584,42 @@ io_no_vtime:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	TRACE_IRQS_OFF
-        la      %r2,SP_PTREGS(%r15)    # address of register-save area
-	brasl   %r14,do_IRQ            # call standard irq handler
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	brasl	%r14,do_IRQ		# call standard irq handler
 	TRACE_IRQS_ON
 
 io_return:
-        tm      SP_PSW+1(%r15),0x01    # returning to user ?
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 #ifdef CONFIG_PREEMPT
-	jno     io_preempt             # no -> check for preemptive scheduling
+	jno	io_preempt		# no -> check for preemptive scheduling
 #else
-        jno     io_leave               # no-> skip resched & signal
+	jno	io_leave		# no-> skip resched & signal
 #endif
 	tm	__TI_flags+7(%r9),_TIF_WORK_INT
-	jnz	io_work                # there is work to do (signals etc.)
+	jnz	io_work 		# there is work to do (signals etc.)
 io_leave:
-        RESTORE_ALL __LC_RETURN_PSW,0
+	RESTORE_ALL __LC_RETURN_PSW,0
 io_done:
 
 #ifdef CONFIG_PREEMPT
 io_preempt:
-	icm	%r0,15,__TI_precount(%r9)	
-	jnz     io_leave
+	icm	%r0,15,__TI_precount(%r9)
+	jnz	io_leave
 	# switch to kernel stack
 	lg	%r1,SP_R15(%r15)
 	aghi	%r1,-SP_SIZE
 	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lgr	%r15,%r1
 io_resume_loop:
 	tm	__TI_flags+7(%r9),_TIF_NEED_RESCHED
 	jno	io_leave
-	larl    %r1,.Lc_pactive
-	mvc     __TI_precount(4,%r9),0(%r1)
-        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
-	brasl   %r14,schedule          # call schedule
-        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
-	xc      __TI_precount(4,%r9),__TI_precount(%r9)
+	larl	%r1,.Lc_pactive
+	mvc	__TI_precount(4,%r9),0(%r1)
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	brasl	%r14,schedule		# call schedule
+	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	xc	__TI_precount(4,%r9),__TI_precount(%r9)
 	j	io_resume_loop
 #endif
 
@@ -631,7 +630,7 @@ io_work:
 	lg	%r1,__LC_KERNEL_STACK
 	aghi	%r1,-SP_SIZE
 	mvc	SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
-        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lgr	%r15,%r1
 #
 # One of the work bits is on. Find out which one.
@@ -656,11 +655,11 @@ io_mcck_pending:
 
 #
 # _TIF_NEED_RESCHED is set, call schedule
-#	
-io_reschedule:        
-	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
-	brasl   %r14,schedule		# call scheduler
-	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+#
+io_reschedule:
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	brasl	%r14,schedule		# call scheduler
+	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 	tm	__TI_flags+7(%r9),_TIF_WORK_INT
 	jz	io_leave		# there is no work to do
 	j	io_work_loop
@@ -668,17 +667,17 @@ io_reschedule:
 #
 # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
 #
-io_sigpending:     
-	stosm   __SF_EMPTY(%r15),0x03	# reenable interrupts
-	la      %r2,SP_PTREGS(%r15)	# load pt_regs
+io_sigpending:
+	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	brasl	%r14,do_signal		# call do_signal
-	stnsm   __SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
+	stnsm	__SF_EMPTY(%r15),0xfc	# disable I/O and ext. interrupts
 	j	io_work_loop
 
 /*
  * External interrupt handler routine
  */
-        .globl  ext_int_handler
+	.globl	ext_int_handler
 ext_int_handler:
 	STORE_TIMER __LC_ASYNC_ENTER_TIMER
 	stck	__LC_INT_CLOCK
@@ -695,9 +694,9 @@ ext_no_vtime:
 #endif
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	TRACE_IRQS_OFF
-	la	%r2,SP_PTREGS(%r15)    # address of register-save area
-	llgh	%r3,__LC_EXT_INT_CODE  # get interruption code
-	brasl   %r14,do_extint
+	la	%r2,SP_PTREGS(%r15)	# address of register-save area
+	llgh	%r3,__LC_EXT_INT_CODE	# get interruption code
+	brasl	%r14,do_extint
 	TRACE_IRQS_ON
 	j	io_return
 
@@ -706,14 +705,14 @@ __critical_end:
 /*
  * Machine check handler routines
  */
-        .globl mcck_int_handler
+	.globl mcck_int_handler
 mcck_int_handler:
 	la	%r1,4095		# revalidate r1
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
-  	lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 	SAVE_ALL_BASE __LC_SAVE_AREA+64
 	la	%r12,__LC_MCK_OLD_PSW
-	tm	__LC_MCCK_CODE,0x80     # system damage?
+	tm	__LC_MCCK_CODE,0x80	# system damage?
 	jo	mcck_int_main		# yes -> rest of mcck code invalid
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 	la	%r14,4095
@@ -737,19 +736,19 @@ mcck_int_handler:
 #endif
 	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
 	jno	mcck_int_main		# no -> skip cleanup critical
-	tm      __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
+	tm	__LC_MCK_OLD_PSW+1,0x01 # test problem state bit
 	jnz	mcck_int_main		# from user -> load kernel stack
 	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
 	jhe	mcck_int_main
-	clc     __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
+	clc	__LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
 	jl	mcck_int_main
-	brasl   %r14,cleanup_critical
+	brasl	%r14,cleanup_critical
 mcck_int_main:
-	lg      %r14,__LC_PANIC_STACK   # are we already on the panic stack?
+	lg	%r14,__LC_PANIC_STACK	# are we already on the panic stack?
 	slgr	%r14,%r15
 	srag	%r14,%r14,PAGE_SHIFT
 	jz	0f
-	lg      %r15,__LC_PANIC_STACK   # load panic stack
+	lg	%r15,__LC_PANIC_STACK	# load panic stack
 0:	CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
@@ -764,7 +763,7 @@ mcck_no_vtime:
 	lg	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	brasl	%r14,s390_do_machine_check
-	tm	SP_PSW+1(%r15),0x01     # returning to user ?
+	tm	SP_PSW+1(%r15),0x01	# returning to user ?
 	jno	mcck_return
 	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
 	aghi	%r1,-SP_SIZE
@@ -794,28 +793,28 @@ mcck_return:
 /*
  * Restart interruption handler, kick starter for additional CPUs
  */
-        .globl restart_int_handler
+	.globl restart_int_handler
 restart_int_handler:
-        lg      %r15,__LC_SAVE_AREA+120 # load ksp
-        lghi    %r10,__LC_CREGS_SAVE_AREA
-        lctlg   %c0,%c15,0(%r10) # get new ctl regs
-        lghi    %r10,__LC_AREGS_SAVE_AREA
-        lam     %a0,%a15,0(%r10)
-        lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
-        stosm   __SF_EMPTY(%r15),0x04    # now we can turn dat on
-	jg      start_secondary
+	lg	%r15,__LC_SAVE_AREA+120 # load ksp
+	lghi	%r10,__LC_CREGS_SAVE_AREA
+	lctlg	%c0,%c15,0(%r10) # get new ctl regs
+	lghi	%r10,__LC_AREGS_SAVE_AREA
+	lam	%a0,%a15,0(%r10)
+	lmg	%r6,%r15,__SF_GPRS(%r15) # load registers from clone
+	stosm	__SF_EMPTY(%r15),0x04	# now we can turn dat on
+	jg	start_secondary
 #else
 /*
  * If we do not run with SMP enabled, let the new CPU crash ...
  */
-        .globl restart_int_handler
+	.globl restart_int_handler
 restart_int_handler:
-        basr    %r1,0
+	basr	%r1,0
 restart_base:
-        lpswe   restart_crash-restart_base(%r1)
-        .align 8
+	lpswe	restart_crash-restart_base(%r1)
+	.align 8
 restart_crash:
-        .long  0x000a0000,0x00000000,0x00000000,0x00000000
+	.long  0x000a0000,0x00000000,0x00000000,0x00000000
 restart_go:
 #endif
 
@@ -836,9 +835,9 @@ stack_overflow:
 	chi	%r12,__LC_PGM_OLD_PSW
 	je	0f
 	la	%r1,__LC_SAVE_AREA+32
-0:	mvc	SP_R12(32,%r15),0(%r1)  # move %r12-%r15 to stack
-        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
-        la      %r2,SP_PTREGS(%r15)	# load pt_regs
+0:	mvc	SP_R12(32,%r15),0(%r1)	# move %r12-%r15 to stack
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	jg	kernel_stack_overflow
 #endif
 
@@ -941,10 +940,10 @@ cleanup_novtime:
 cleanup_system_call_insn:
 	.quad	sysc_saveall
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
-	.quad   system_call
-	.quad   sysc_vtime
-	.quad   sysc_stime
-	.quad   sysc_update
+	.quad	system_call
+	.quad	sysc_vtime
+	.quad	sysc_stime
+	.quad	sysc_update
 #endif
 
 cleanup_sysc_return:
@@ -1010,21 +1009,21 @@ cleanup_io_leave_insn:
 /*
  * Integer constants
  */
-               .align 4
+		.align	4
 .Lconst:
-.Lc_pactive:   .long  PREEMPT_ACTIVE
-.Lnr_syscalls: .long  NR_syscalls
-.L0x0130:      .short 0x130
-.L0x0140:      .short 0x140
-.L0x0150:      .short 0x150
-.L0x0160:      .short 0x160
-.L0x0170:      .short 0x170
+.Lc_pactive:	.long	PREEMPT_ACTIVE
+.Lnr_syscalls:	.long	NR_syscalls
+.L0x0130:	.short	0x130
+.L0x0140:	.short	0x140
+.L0x0150:	.short	0x150
+.L0x0160:	.short	0x160
+.L0x0170:	.short	0x170
 .Lcritical_start:
-               .quad  __critical_start
+		.quad	__critical_start
 .Lcritical_end:
-               .quad  __critical_end
+		.quad	__critical_end
 
-	       .section .rodata, "a"
+		.section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esame
 sys_call_table:
 #include "syscalls.S"
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 0f1db268a8a9..0cf59bb7a857 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -36,449 +36,449 @@
 #endif
 
 #ifndef CONFIG_IPL
-        .org   0
-        .long  0x00080000,0x80000000+startup   # Just a restart PSW
+	.org   0
+	.long  0x00080000,0x80000000+startup	# Just a restart PSW
 #else
 #ifdef CONFIG_IPL_TAPE
 #define IPL_BS 1024
-        .org   0
-        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
-        .long  0x27000000,0x60000001           # by ipl to addresses 0-23.
-        .long  0x02000000,0x20000000+IPL_BS    # (a PSW and two CCWs).
-        .long  0x00000000,0x00000000           # external old psw
-        .long  0x00000000,0x00000000           # svc old psw
-        .long  0x00000000,0x00000000           # program check old psw
-        .long  0x00000000,0x00000000           # machine check old psw
-        .long  0x00000000,0x00000000           # io old psw
-        .long  0x00000000,0x00000000
-        .long  0x00000000,0x00000000
-        .long  0x00000000,0x00000000
-        .long  0x000a0000,0x00000058           # external new psw
-        .long  0x000a0000,0x00000060           # svc new psw
-        .long  0x000a0000,0x00000068           # program check new psw
-        .long  0x000a0000,0x00000070           # machine check new psw
-        .long  0x00080000,0x80000000+.Lioint   # io new psw
+	.org   0
+	.long  0x00080000,0x80000000+iplstart	# The first 24 bytes are loaded
+	.long  0x27000000,0x60000001		# by ipl to addresses 0-23.
+	.long  0x02000000,0x20000000+IPL_BS	# (a PSW and two CCWs).
+	.long  0x00000000,0x00000000		# external old psw
+	.long  0x00000000,0x00000000		# svc old psw
+	.long  0x00000000,0x00000000		# program check old psw
+	.long  0x00000000,0x00000000		# machine check old psw
+	.long  0x00000000,0x00000000		# io old psw
+	.long  0x00000000,0x00000000
+	.long  0x00000000,0x00000000
+	.long  0x00000000,0x00000000
+	.long  0x000a0000,0x00000058		# external new psw
+	.long  0x000a0000,0x00000060		# svc new psw
+	.long  0x000a0000,0x00000068		# program check new psw
+	.long  0x000a0000,0x00000070		# machine check new psw
+	.long  0x00080000,0x80000000+.Lioint	# io new psw
 
-        .org   0x100
+	.org   0x100
 #
 # subroutine for loading from tape
-# Paramters:	
+# Paramters:
 #  R1 = device number
 #  R2 = load address
-.Lloader:	
-        st    %r14,.Lldret
-        la    %r3,.Lorbread                    # r3 = address of orb 
-	la    %r5,.Lirb                        # r5 = address of irb
-        st    %r2,.Lccwread+4                  # initialize CCW data addresses
-        lctl  %c6,%c6,.Lcr6               
-        slr   %r2,%r2
+.Lloader:
+	st	%r14,.Lldret
+	la	%r3,.Lorbread		# r3 = address of orb
+	la	%r5,.Lirb		# r5 = address of irb
+	st	%r2,.Lccwread+4 	# initialize CCW data addresses
+	lctl	%c6,%c6,.Lcr6
+	slr	%r2,%r2
 .Lldlp:
-        la    %r6,3                            # 3 retries
+	la	%r6,3			# 3 retries
 .Lssch:
-        ssch  0(%r3)                           # load chunk of IPL_BS bytes
-        bnz   .Llderr
+	ssch	0(%r3)			# load chunk of IPL_BS bytes
+	bnz	.Llderr
 .Lw4end:
-        bas   %r14,.Lwait4io
-        tm    8(%r5),0x82                      # do we have a problem ?
-        bnz   .Lrecov
-        slr   %r7,%r7
-        icm   %r7,3,10(%r5)                    # get residual count
-        lcr   %r7,%r7
-        la    %r7,IPL_BS(%r7)                  # IPL_BS-residual=#bytes read
-        ar    %r2,%r7                          # add to total size
-        tm    8(%r5),0x01                      # found a tape mark ?
-        bnz   .Ldone
-        l     %r0,.Lccwread+4                  # update CCW data addresses
-        ar    %r0,%r7
-        st    %r0,.Lccwread+4                
-        b     .Lldlp
+	bas	%r14,.Lwait4io
+	tm	8(%r5),0x82		# do we have a problem ?
+	bnz	.Lrecov
+	slr	%r7,%r7
+	icm	%r7,3,10(%r5)		# get residual count
+	lcr	%r7,%r7
+	la	%r7,IPL_BS(%r7) 	# IPL_BS-residual=#bytes read
+	ar	%r2,%r7 		# add to total size
+	tm	8(%r5),0x01		# found a tape mark ?
+	bnz	.Ldone
+	l	%r0,.Lccwread+4 	# update CCW data addresses
+	ar	%r0,%r7
+	st	%r0,.Lccwread+4
+	b	.Lldlp
 .Ldone:
-        l     %r14,.Lldret
-        br    %r14                             # r2 contains the total size
+	l	%r14,.Lldret
+	br	%r14			# r2 contains the total size
 .Lrecov:
-        bas   %r14,.Lsense                     # do the sensing
-        bct   %r6,.Lssch                       # dec. retry count & branch
-        b     .Llderr
+	bas	%r14,.Lsense		# do the sensing
+	bct	%r6,.Lssch		# dec. retry count & branch
+	b	.Llderr
 #
 # Sense subroutine
 #
 .Lsense:
-        st    %r14,.Lsnsret
-        la    %r7,.Lorbsense              
-        ssch  0(%r7)                           # start sense command
-        bnz   .Llderr
-        bas   %r14,.Lwait4io
-        l     %r14,.Lsnsret
-        tm    8(%r5),0x82                      # do we have a problem ?
-        bnz   .Llderr
-        br    %r14
+	st	%r14,.Lsnsret
+	la	%r7,.Lorbsense
+	ssch	0(%r7)			# start sense command
+	bnz	.Llderr
+	bas	%r14,.Lwait4io
+	l	%r14,.Lsnsret
+	tm	8(%r5),0x82		# do we have a problem ?
+	bnz	.Llderr
+	br	%r14
 #
 # Wait for interrupt subroutine
 #
 .Lwait4io:
-        lpsw  .Lwaitpsw                 
+	lpsw	.Lwaitpsw
 .Lioint:
-        c     %r1,0xb8                         # compare subchannel number
-        bne   .Lwait4io
-        tsch  0(%r5)
-        slr   %r0,%r0
-        tm    8(%r5),0x82                      # do we have a problem ?
-        bnz   .Lwtexit
-        tm    8(%r5),0x04                      # got device end ?
-        bz    .Lwait4io
+	c	%r1,0xb8		# compare subchannel number
+	bne	.Lwait4io
+	tsch	0(%r5)
+	slr	%r0,%r0
+	tm	8(%r5),0x82		# do we have a problem ?
+	bnz	.Lwtexit
+	tm	8(%r5),0x04		# got device end ?
+	bz	.Lwait4io
 .Lwtexit:
-        br    %r14
+	br	%r14
 .Llderr:
-        lpsw  .Lcrash              
+	lpsw	.Lcrash
 
-        .align 8
+	.align	8
 .Lorbread:
-	.long  0x00000000,0x0080ff00,.Lccwread
-        .align 8
+	.long	0x00000000,0x0080ff00,.Lccwread
+	.align	8
 .Lorbsense:
-        .long  0x00000000,0x0080ff00,.Lccwsense
-        .align 8
+	.long	0x00000000,0x0080ff00,.Lccwsense
+	.align	8
 .Lccwread:
-        .long  0x02200000+IPL_BS,0x00000000
+	.long	0x02200000+IPL_BS,0x00000000
 .Lccwsense:
-        .long  0x04200001,0x00000000
+	.long	0x04200001,0x00000000
 .Lwaitpsw:
-	.long  0x020a0000,0x80000000+.Lioint
+	.long	0x020a0000,0x80000000+.Lioint
 
-.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6:  .long  0xff000000
-        .align 8
-.Lcrash:.long  0x000a0000,0x00000000
-.Lldret:.long  0
+.Lirb:	.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:	.long	0xff000000
+	.align	8
+.Lcrash:.long	0x000a0000,0x00000000
+.Lldret:.long	0
 .Lsnsret: .long 0
-#endif  /* CONFIG_IPL_TAPE */
+#endif	/* CONFIG_IPL_TAPE */
 
 #ifdef CONFIG_IPL_VM
-#define IPL_BS 0x730
-        .org   0
-        .long  0x00080000,0x80000000+iplstart  # The first 24 bytes are loaded
-        .long  0x02000018,0x60000050           # by ipl to addresses 0-23.
-        .long  0x02000068,0x60000050           # (a PSW and two CCWs).
-        .fill  80-24,1,0x40                    # bytes 24-79 are discarded !!
-        .long  0x020000f0,0x60000050           # The next 160 byte are loaded
-        .long  0x02000140,0x60000050           # to addresses 0x18-0xb7
-        .long  0x02000190,0x60000050           # They form the continuation
-        .long  0x020001e0,0x60000050           # of the CCW program started
-        .long  0x02000230,0x60000050           # by ipl and load the range
-        .long  0x02000280,0x60000050           # 0x0f0-0x730 from the image
-        .long  0x020002d0,0x60000050           # to the range 0x0f0-0x730
-        .long  0x02000320,0x60000050           # in memory. At the end of
-        .long  0x02000370,0x60000050           # the channel program the PSW
-        .long  0x020003c0,0x60000050           # at location 0 is loaded.
-        .long  0x02000410,0x60000050           # Initial processing starts
-        .long  0x02000460,0x60000050           # at 0xf0 = iplstart.
-        .long  0x020004b0,0x60000050
-        .long  0x02000500,0x60000050
-        .long  0x02000550,0x60000050
-        .long  0x020005a0,0x60000050
-        .long  0x020005f0,0x60000050
-        .long  0x02000640,0x60000050
-        .long  0x02000690,0x60000050
-        .long  0x020006e0,0x20000050
+#define IPL_BS	0x730
+	.org	0
+	.long	0x00080000,0x80000000+iplstart	# The first 24 bytes are loaded
+	.long	0x02000018,0x60000050		# by ipl to addresses 0-23.
+	.long	0x02000068,0x60000050		# (a PSW and two CCWs).
+	.fill	80-24,1,0x40			# bytes 24-79 are discarded !!
+	.long	0x020000f0,0x60000050		# The next 160 byte are loaded
+	.long	0x02000140,0x60000050		# to addresses 0x18-0xb7
+	.long	0x02000190,0x60000050		# They form the continuation
+	.long	0x020001e0,0x60000050		# of the CCW program started
+	.long	0x02000230,0x60000050		# by ipl and load the range
+	.long	0x02000280,0x60000050		# 0x0f0-0x730 from the image
+	.long	0x020002d0,0x60000050		# to the range 0x0f0-0x730
+	.long	0x02000320,0x60000050		# in memory. At the end of
+	.long	0x02000370,0x60000050		# the channel program the PSW
+	.long	0x020003c0,0x60000050		# at location 0 is loaded.
+	.long	0x02000410,0x60000050		# Initial processing starts
+	.long	0x02000460,0x60000050		# at 0xf0 = iplstart.
+	.long	0x020004b0,0x60000050
+	.long	0x02000500,0x60000050
+	.long	0x02000550,0x60000050
+	.long	0x020005a0,0x60000050
+	.long	0x020005f0,0x60000050
+	.long	0x02000640,0x60000050
+	.long	0x02000690,0x60000050
+	.long	0x020006e0,0x20000050
 
-        .org   0xf0
+	.org	0xf0
 #
 # subroutine for loading cards from the reader
 #
-.Lloader:	
-	la    %r3,.Lorb                        # r2 = address of orb into r2
-	la    %r5,.Lirb                        # r4 = address of irb
-        la    %r6,.Lccws              
-        la    %r7,20
+.Lloader:
+	la	%r3,.Lorb		# r2 = address of orb into r2
+	la	%r5,.Lirb		# r4 = address of irb
+	la	%r6,.Lccws
+	la	%r7,20
 .Linit:
-        st    %r2,4(%r6)                       # initialize CCW data addresses
-        la    %r2,0x50(%r2)
-        la    %r6,8(%r6)
-        bct   7,.Linit
+	st	%r2,4(%r6)		# initialize CCW data addresses
+	la	%r2,0x50(%r2)
+	la	%r6,8(%r6)
+	bct	7,.Linit
 
-        lctl  %c6,%c6,.Lcr6                    # set IO subclass mask
-	slr   %r2,%r2
+	lctl	%c6,%c6,.Lcr6		# set IO subclass mask
+	slr	%r2,%r2
 .Lldlp:
-        ssch  0(%r3)                           # load chunk of 1600 bytes
-        bnz   .Llderr
+	ssch	0(%r3)			# load chunk of 1600 bytes
+	bnz	.Llderr
 .Lwait4irq:
-        mvc   0x78(8),.Lnewpsw                 # set up IO interrupt psw
-        lpsw  .Lwaitpsw              
+	mvc	0x78(8),.Lnewpsw	# set up IO interrupt psw
+	lpsw	.Lwaitpsw
 .Lioint:
-        c     %r1,0xb8                         # compare subchannel number
-	bne   .Lwait4irq
-	tsch  0(%r5)
+	c	%r1,0xb8		# compare subchannel number
+	bne	.Lwait4irq
+	tsch	0(%r5)
 
-	slr   %r0,%r0
-	ic    %r0,8(%r5)                       # get device status
-	chi   %r0,8                            # channel end ?
-	be    .Lcont
-	chi   %r0,12                           # channel end + device end ?
-	be    .Lcont
+	slr	%r0,%r0
+	ic	%r0,8(%r5)		# get device status
+	chi	%r0,8			# channel end ?
+	be	.Lcont
+	chi	%r0,12			# channel end + device end ?
+	be	.Lcont
 
-        l     %r0,4(%r5)
-        s     %r0,8(%r3)                       # r0/8 = number of ccws executed
-        mhi   %r0,10                           # *10 = number of bytes in ccws
-        lh    %r3,10(%r5)                      # get residual count
-        sr    %r0,%r3                          # #ccws*80-residual=#bytes read
-	ar    %r2,%r0
-	
-        br    %r14                             # r2 contains the total size
+	l	%r0,4(%r5)
+	s	%r0,8(%r3)		# r0/8 = number of ccws executed
+	mhi	%r0,10			# *10 = number of bytes in ccws
+	lh	%r3,10(%r5)		# get residual count
+	sr	%r0,%r3 		# #ccws*80-residual=#bytes read
+	ar	%r2,%r0
+
+	br	%r14			# r2 contains the total size
 
 .Lcont:
-	ahi   %r2,0x640                        # add 0x640 to total size
-        la    %r6,.Lccws             
-        la    %r7,20
+	ahi	%r2,0x640		# add 0x640 to total size
+	la	%r6,.Lccws
+	la	%r7,20
 .Lincr:
-        l     %r0,4(%r6)                       # update CCW data addresses
-        ahi   %r0,0x640
-        st    %r0,4(%r6)
-        ahi   %r6,8
-        bct   7,.Lincr
+	l	%r0,4(%r6)		# update CCW data addresses
+	ahi	%r0,0x640
+	st	%r0,4(%r6)
+	ahi	%r6,8
+	bct	7,.Lincr
 
-        b     .Lldlp
+	b	.Lldlp
 .Llderr:
-        lpsw  .Lcrash              
+	lpsw	.Lcrash
 
-        .align 8
-.Lorb:	.long  0x00000000,0x0080ff00,.Lccws
-.Lirb:	.long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6:  .long  0xff000000
-.Lloadp:.long  0,0
-        .align 8
-.Lcrash:.long  0x000a0000,0x00000000
+	.align	8
+.Lorb:	.long	0x00000000,0x0080ff00,.Lccws
+.Lirb:	.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6:	.long	0xff000000
+.Lloadp:.long	0,0
+	.align	8
+.Lcrash:.long	0x000a0000,0x00000000
 .Lnewpsw:
-        .long  0x00080000,0x80000000+.Lioint
+	.long	0x00080000,0x80000000+.Lioint
 .Lwaitpsw:
-        .long  0x020a0000,0x80000000+.Lioint
+	.long	0x020a0000,0x80000000+.Lioint
 
-        .align 8
-.Lccws: .rept  19
-        .long  0x02600050,0x00000000
-        .endr
-        .long  0x02200050,0x00000000
-#endif  /* CONFIG_IPL_VM */
+	.align	8
+.Lccws: .rept	19
+	.long	0x02600050,0x00000000
+	.endr
+	.long	0x02200050,0x00000000
+#endif	/* CONFIG_IPL_VM */
 
 iplstart:
-        lh    %r1,0xb8                         # test if subchannel number
-        bct   %r1,.Lnoload                     #  is valid
-	l     %r1,0xb8                         # load ipl subchannel number
-        la    %r2,IPL_BS                       # load start address
-        bas   %r14,.Lloader                    # load rest of ipl image
-        l     %r12,.Lparm                      # pointer to parameter area
-        st    %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
+	lh	%r1,0xb8		# test if subchannel number
+	bct	%r1,.Lnoload		#  is valid
+	l	%r1,0xb8		# load ipl subchannel number
+	la	%r2,IPL_BS		# load start address
+	bas	%r14,.Lloader		# load rest of ipl image
+	l	%r12,.Lparm		# pointer to parameter area
+	st	%r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
 
 #
 # load parameter file from ipl device
 #
 .Lagain1:
-	l     %r2,.Linitrd		       # ramdisk loc. is temp
-        bas   %r14,.Lloader                    # load parameter file
-        ltr   %r2,%r2                          # got anything ?
-        bz    .Lnopf
-	chi   %r2,895
-	bnh   .Lnotrunc
-	la    %r2,895
+	l	%r2,.Linitrd		# ramdisk loc. is temp
+	bas	%r14,.Lloader		# load parameter file
+	ltr	%r2,%r2 		# got anything ?
+	bz	.Lnopf
+	chi	%r2,895
+	bnh	.Lnotrunc
+	la	%r2,895
 .Lnotrunc:
-	l     %r4,.Linitrd
-	clc   0(3,%r4),.L_hdr		       # if it is HDRx
-	bz    .Lagain1			       # skip dataset header
-	clc   0(3,%r4),.L_eof		       # if it is EOFx
-	bz    .Lagain1			       # skip dateset trailer
-        la    %r5,0(%r4,%r2)
-        lr    %r3,%r2
+	l	%r4,.Linitrd
+	clc	0(3,%r4),.L_hdr		# if it is HDRx
+	bz	.Lagain1		# skip dataset header
+	clc	0(3,%r4),.L_eof		# if it is EOFx
+	bz	.Lagain1		# skip dateset trailer
+	la	%r5,0(%r4,%r2)
+	lr	%r3,%r2
 .Lidebc:
-        tm    0(%r5),0x80                      # high order bit set ?
-        bo    .Ldocv                           #  yes -> convert from EBCDIC
-        ahi   %r5,-1
-        bct   %r3,.Lidebc
-        b     .Lnocv
+	tm	0(%r5),0x80		# high order bit set ?
+	bo	.Ldocv			#  yes -> convert from EBCDIC
+	ahi	%r5,-1
+	bct	%r3,.Lidebc
+	b	.Lnocv
 .Ldocv:
-        l     %r3,.Lcvtab
-        tr    0(256,%r4),0(%r3)                # convert parameters to ascii
-        tr    256(256,%r4),0(%r3)
-        tr    512(256,%r4),0(%r3)
-        tr    768(122,%r4),0(%r3)
-.Lnocv: la    %r3,COMMAND_LINE-PARMAREA(%r12)  # load adr. of command line
-	mvc   0(256,%r3),0(%r4)
-	mvc   256(256,%r3),256(%r4)
-	mvc   512(256,%r3),512(%r4)
-	mvc   768(122,%r3),768(%r4)
-        slr   %r0,%r0
-        b     .Lcntlp
+	l	%r3,.Lcvtab
+	tr	0(256,%r4),0(%r3)	# convert parameters to ascii
+	tr	256(256,%r4),0(%r3)
+	tr	512(256,%r4),0(%r3)
+	tr	768(122,%r4),0(%r3)
+.Lnocv: la	%r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
+	mvc	0(256,%r3),0(%r4)
+	mvc	256(256,%r3),256(%r4)
+	mvc	512(256,%r3),512(%r4)
+	mvc	768(122,%r3),768(%r4)
+	slr	%r0,%r0
+	b	.Lcntlp
 .Ldelspc:
-        ic    %r0,0(%r2,%r3)
-        chi   %r0,0x20                         # is it a space ?
-        be    .Lcntlp
-        ahi   %r2,1
-        b     .Leolp
+	ic	%r0,0(%r2,%r3)
+	chi	%r0,0x20		# is it a space ?
+	be	.Lcntlp
+	ahi	%r2,1
+	b	.Leolp
 .Lcntlp:
-        brct  %r2,.Ldelspc
+	brct	%r2,.Ldelspc
 .Leolp:
-        slr   %r0,%r0
-        stc   %r0,0(%r2,%r3)                   # terminate buffer
+	slr	%r0,%r0
+	stc	%r0,0(%r2,%r3)		# terminate buffer
 .Lnopf:
 
 #
 # load ramdisk from ipl device
-#	
+#
 .Lagain2:
-	l     %r2,.Linitrd		       # addr of ramdisk
-	st    %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
-        bas   %r14,.Lloader                    # load ramdisk
- 	st    %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
-        ltr   %r2,%r2
-        bnz   .Lrdcont
-        st    %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
+	l	%r2,.Linitrd		# addr of ramdisk
+	st	%r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
+	bas	%r14,.Lloader		# load ramdisk
+	st	%r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd
+	ltr	%r2,%r2
+	bnz	.Lrdcont
+	st	%r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
 .Lrdcont:
-	l     %r2,.Linitrd
+	l	%r2,.Linitrd
 
-	clc   0(3,%r2),.L_hdr		       # skip HDRx and EOFx 
-	bz    .Lagain2
-	clc   0(3,%r2),.L_eof
-	bz    .Lagain2
+	clc	0(3,%r2),.L_hdr		# skip HDRx and EOFx
+	bz	.Lagain2
+	clc	0(3,%r2),.L_eof
+	bz	.Lagain2
 
 #ifdef CONFIG_IPL_VM
 #
 # reset files in VM reader
 #
-        stidp __LC_CPUID                       # store cpuid
-	tm    __LC_CPUID,0xff                  # running VM ?
-	bno   .Lnoreset
-        la    %r2,.Lreset              
-        lhi   %r3,26
-	diag  %r2,%r3,8
-	la    %r5,.Lirb
-	stsch 0(%r5)			       # check if irq is pending
-	tm    30(%r5),0x0f		       # by verifying if any of the
-	bnz   .Lwaitforirq		       # activity or status control
-	tm    31(%r5),0xff		       # bits is set in the schib
-	bz    .Lnoreset
+	stidp	__LC_CPUID		# store cpuid
+	tm	__LC_CPUID,0xff 	# running VM ?
+	bno	.Lnoreset
+	la	%r2,.Lreset
+	lhi	%r3,26
+	diag	%r2,%r3,8
+	la	%r5,.Lirb
+	stsch	0(%r5)			# check if irq is pending
+	tm	30(%r5),0x0f		# by verifying if any of the
+	bnz	.Lwaitforirq		# activity or status control
+	tm	31(%r5),0xff		# bits is set in the schib
+	bz	.Lnoreset
 .Lwaitforirq:
-	mvc   0x78(8),.Lrdrnewpsw              # set up IO interrupt psw
+	mvc	0x78(8),.Lrdrnewpsw	# set up IO interrupt psw
 .Lwaitrdrirq:
-	lpsw  .Lrdrwaitpsw
+	lpsw	.Lrdrwaitpsw
 .Lrdrint:
-	c     %r1,0xb8                         # compare subchannel number
-	bne   .Lwaitrdrirq
-	la    %r5,.Lirb
-	tsch  0(%r5)
+	c	%r1,0xb8		# compare subchannel number
+	bne	.Lwaitrdrirq
+	la	%r5,.Lirb
+	tsch	0(%r5)
 .Lnoreset:
-        b     .Lnoload
+	b	.Lnoload
 
-	.align 8
+	.align	8
 .Lrdrnewpsw:
-	.long  0x00080000,0x80000000+.Lrdrint
+	.long	0x00080000,0x80000000+.Lrdrint
 .Lrdrwaitpsw:
-	.long  0x020a0000,0x80000000+.Lrdrint
+	.long	0x020a0000,0x80000000+.Lrdrint
 #endif
 
 #
 # everything loaded, go for it
 #
 .Lnoload:
-        l     %r1,.Lstartup
-        br    %r1
+	l	%r1,.Lstartup
+	br	%r1
 
-.Linitrd:.long _end + 0x400000		       # default address of initrd
+.Linitrd:.long _end + 0x400000		# default address of initrd
 .Lparm:	.long  PARMAREA
 .Lstartup: .long startup
-.Lcvtab:.long  _ebcasc                         # ebcdic to ascii table
-.Lreset:.byte  0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
-        .byte  0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
-        .byte  0xc8,0xd6,0xd3,0xc4             # "change rdr all keep nohold"
-.L_eof: .long  0xc5d6c600       /* C'EOF' */
-.L_hdr: .long  0xc8c4d900       /* C'HDR' */
+.Lcvtab:.long	_ebcasc 		# ebcdic to ascii table
+.Lreset:.byte	0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+	.byte	0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+	.byte	0xc8,0xd6,0xd3,0xc4	# "change rdr all keep nohold"
+.L_eof: .long	0xc5d6c600	 /* C'EOF' */
+.L_hdr: .long	0xc8c4d900	 /* C'HDR' */
 
-#endif  /* CONFIG_IPL */
+#endif	/* CONFIG_IPL */
 
 #
 # SALIPL loader support. Based on a patch by Rob van der Heij.
 # This entry point is called directly from the SALIPL loader and
 # doesn't need a builtin ipl record.
 #
-        .org  0x800
-	.globl start
+	.org	0x800
+	.globl	start
 start:
-	stm   %r0,%r15,0x07b0		# store registers
-	basr  %r12,%r0
+	stm	%r0,%r15,0x07b0		# store registers
+	basr	%r12,%r0
 .base:
-	l     %r11,.parm
-	l     %r8,.cmd			# pointer to command buffer
+	l	%r11,.parm
+	l	%r8,.cmd		# pointer to command buffer
 
-	ltr   %r9,%r9			# do we have SALIPL parameters?
-	bp    .sk8x8
+	ltr	%r9,%r9			# do we have SALIPL parameters?
+	bp	.sk8x8
 
-	mvc   0(64,%r8),0x00b0		# copy saved registers
-	xc    64(240-64,%r8),0(%r8)	# remainder of buffer
-	tr    0(64,%r8),.lowcase	
-	b     .gotr
+	mvc	0(64,%r8),0x00b0	# copy saved registers
+	xc	64(240-64,%r8),0(%r8)	# remainder of buffer
+	tr	0(64,%r8),.lowcase
+	b	.gotr
 .sk8x8:
-	mvc   0(240,%r8),0(%r9)		# copy iplparms into buffer
+	mvc	0(240,%r8),0(%r9)	# copy iplparms into buffer
 .gotr:
-	l     %r10,.tbl			# EBCDIC to ASCII table
-	tr    0(240,%r8),0(%r10)
-	stidp __LC_CPUID		# Are we running on VM maybe
-	cli   __LC_CPUID,0xff
-	bnz   .test
-	.long 0x83300060		# diag 3,0,x'0060' - storage size
-	b     .done
+	l	%r10,.tbl		# EBCDIC to ASCII table
+	tr	0(240,%r8),0(%r10)
+	stidp	__LC_CPUID		# Are we running on VM maybe
+	cli	__LC_CPUID,0xff
+	bnz	.test
+	.long	0x83300060		# diag 3,0,x'0060' - storage size
+	b	.done
 .test:
-	mvc   0x68(8),.pgmnw		# set up pgm check handler
-	l     %r2,.fourmeg
-	lr    %r3,%r2
-	bctr  %r3,%r0			# 4M-1
-.loop:  iske  %r0,%r3
-	ar    %r3,%r2
+	mvc	0x68(8),.pgmnw		# set up pgm check handler
+	l	%r2,.fourmeg
+	lr	%r3,%r2
+	bctr	%r3,%r0			# 4M-1
+.loop:	iske	%r0,%r3
+	ar	%r3,%r2
 .pgmx:
-	sr    %r3,%r2
-	la    %r3,1(%r3)
+	sr	%r3,%r2
+	la	%r3,1(%r3)
 .done:
-        l     %r1,.memsize
-	st    %r3,ARCH_OFFSET(%r1)
-	slr   %r0,%r0
-	st    %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
-	st    %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
-	j     startup                   # continue with startup
-.tbl:	.long _ebcasc			# translate table
-.cmd:	.long COMMAND_LINE		# address of command line buffer
-.parm:	.long PARMAREA
+	l	%r1,.memsize
+	st	%r3,ARCH_OFFSET(%r1)
+	slr	%r0,%r0
+	st	%r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
+	st	%r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
+	j	startup 		# continue with startup
+.tbl:	.long	_ebcasc			# translate table
+.cmd:	.long	COMMAND_LINE		# address of command line buffer
+.parm:	.long	PARMAREA
 .memsize: .long memory_size
 .fourmeg: .long 0x00400000      	# 4M
-.pgmnw:	.long 0x00080000,.pgmx
+.pgmnw:	.long	0x00080000,.pgmx
 .lowcase:
-	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 
+	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
 	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
-	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 
+	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
 	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
-	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 
+	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
 	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
-	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 
+	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
 	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
-	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 
+	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
 	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
-	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 
+	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
 	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
-	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 
+	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
 	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
-	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 
+	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
 	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
 
-	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 
+	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
 	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
-	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 
+	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
 	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
-	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 
+	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
 	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
-	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 
+	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
 	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
-	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg 
+	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg
 	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
-	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 	# .jklmnop
+	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97	# .jklmnop
 	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
 	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
 	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
-	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 
+	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
 	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
 
 #ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index a8bdd96494c7..48998d50b00a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -15,232 +15,232 @@
 # this is called either by the ipl loader or directly by PSW restart
 # or linload or SALIPL
 #
-	.org  0x10000
-startup:basr  %r13,0			 # get base
-.LPG0:	l     %r13,0f-.LPG0(%r13)
-	b     0(%r13)
-0:	.long startup_continue
+	.org	0x10000
+startup:basr	%r13,0			# get base
+.LPG0:	l	%r13,0f-.LPG0(%r13)
+	b	0(%r13)
+0:	.long	startup_continue
 
 #
 # params at 10400 (setup.h)
 #
-	.org   PARMAREA
-	.quad  0			# IPL_DEVICE
-	.quad  0			# INITRD_START
-	.quad  0			# INITRD_SIZE
+	.org	PARMAREA
+	.quad	0			# IPL_DEVICE
+	.quad	0			# INITRD_START
+	.quad	0			# INITRD_SIZE
 
-	.org   COMMAND_LINE
-	.byte  "root=/dev/ram0 ro"
-	.byte  0
+	.org	COMMAND_LINE
+	.byte	"root=/dev/ram0 ro"
+	.byte	0
 
-	.org   0x11000
+	.org	0x11000
 
 startup_continue:
-	basr  %r13,0			 # get base
-.LPG1:  sll   %r13,1                     # remove high order bit
-        srl   %r13,1
-        lhi   %r1,1                      # mode 1 = esame
-	mvi   __LC_AR_MODE_ID,1		 # set esame flag
-        slr   %r0,%r0                    # set cpuid to zero
-        sigp  %r1,%r0,0x12               # switch to esame mode
-	sam64				 # switch to 64 bit mode
-	lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
-	lg    %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
-					 # move IPL device to lowcore
-        mvc   __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+	basr	%r13,0			# get base
+.LPG1:	sll	%r13,1			# remove high order bit
+	srl	%r13,1
+	lhi	%r1,1			# mode 1 = esame
+	mvi	__LC_AR_MODE_ID,1	# set esame flag
+	slr	%r0,%r0 		# set cpuid to zero
+	sigp	%r1,%r0,0x12		# switch to esame mode
+	sam64				# switch to 64 bit mode
+	lctlg	%c0,%c15,.Lctl-.LPG1(%r13)	# load control registers
+	lg	%r12,.Lparmaddr-.LPG1(%r13)	# pointer to parameter area
+					# move IPL device to lowcore
+	mvc	__LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
 #
 # Setup stack
 #
-	larl  %r15,init_thread_union
-	lg    %r14,__TI_task(%r15)	# cache current in lowcore
-	stg   %r14,__LC_CURRENT
-	aghi  %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
-	stg   %r15,__LC_KERNEL_STACK	# set end of kernel stack
-	aghi  %r15,-160
-	xc    __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+	larl	%r15,init_thread_union
+	lg	%r14,__TI_task(%r15)	# cache current in lowcore
+	stg	%r14,__LC_CURRENT
+	aghi	%r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+	stg	%r15,__LC_KERNEL_STACK	# set end of kernel stack
+	aghi	%r15,-160
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
 
-	brasl %r14,ipl_save_parameters
+	brasl	%r14,ipl_save_parameters
 #
 # clear bss memory
 #
-	larl  %r2,__bss_start           # start of bss segment
-        larl  %r3,_end                  # end of bss segment
-        sgr   %r3,%r2                   # length of bss
-        sgr   %r4,%r4                   #
-        sgr   %r5,%r5                   # set src,length and pad to zero
-        mvcle %r2,%r4,0                 # clear mem
-        jo    .-4                       # branch back, if not finish
+	larl	%r2,__bss_start 	# start of bss segment
+	larl	%r3,_end		# end of bss segment
+	sgr	%r3,%r2 		# length of bss
+	sgr	%r4,%r4 		#
+	sgr	%r5,%r5 		# set src,length and pad to zero
+	mvcle	%r2,%r4,0		# clear mem
+	jo	.-4			# branch back, if not finish
 
-	l     %r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
+	l	%r2,.Lrcp-.LPG1(%r13)	# Read SCP forced command word
 .Lservicecall:
-	stosm .Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
+	stosm	.Lpmask-.LPG1(%r13),0x01	# authorize ext interrupts
 
-	stctg %r0,%r0,.Lcr-.LPG1(%r13)	# get cr0
-	la    %r1,0x200			# set bit 22
-	og    %r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
-	stg   %r1,.Lcr-.LPG1(%r13)
-	lctlg %r0,%r0,.Lcr-.LPG1(%r13)	# load modified cr0
+	stctg	%r0,%r0,.Lcr-.LPG1(%r13)	# get cr0
+	la	%r1,0x200		# set bit 22
+	og	%r1,.Lcr-.LPG1(%r13)	# or old cr0 with r1
+	stg	%r1,.Lcr-.LPG1(%r13)
+	lctlg	%r0,%r0,.Lcr-.LPG1(%r13)	# load modified cr0
 
-	mvc   __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
-	larl  %r1,.Lsclph
-	stg   %r1,__LC_EXT_NEW_PSW+8	# set handler
+	mvc	__LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
+	larl	%r1,.Lsclph
+	stg	%r1,__LC_EXT_NEW_PSW+8	# set handler
 
-	larl  %r4,.Lsccb		# %r4 is our index for sccb stuff
-	lgr   %r1,%r4			# our sccb
-	.insn rre,0xb2200000,%r2,%r1	# service call
-	ipm   %r1
-	srl   %r1,28			# get cc code
-	xr    %r3,%r3
-	chi   %r1,3
-	be    .Lfchunk-.LPG1(%r13)	# leave
-	chi   %r1,2
-	be    .Lservicecall-.LPG1(%r13)
-	lpswe .Lwaitsclp-.LPG1(%r13)
+	larl	%r4,.Lsccb		# %r4 is our index for sccb stuff
+	lgr	%r1,%r4			# our sccb
+	.insn	rre,0xb2200000,%r2,%r1	# service call
+	ipm	%r1
+	srl	%r1,28			# get cc code
+	xr	%r3,%r3
+	chi	%r1,3
+	be	.Lfchunk-.LPG1(%r13)	# leave
+	chi	%r1,2
+	be	.Lservicecall-.LPG1(%r13)
+	lpswe	.Lwaitsclp-.LPG1(%r13)
 .Lsclph:
-	lh    %r1,.Lsccbr-.Lsccb(%r4)
-	chi   %r1,0x10			# 0x0010 is the sucess code
-	je    .Lprocsccb		# let's process the sccb
-	chi   %r1,0x1f0
-	bne   .Lfchunk-.LPG1(%r13)	# unhandled error code
-	c     %r2,.Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
-	bne   .Lfchunk-.LPG1(%r13)	# if no, give up
-	l     %r2,.Lrcp2-.LPG1(%r13)	# try with Read SCP
-	b     .Lservicecall-.LPG1(%r13)
+	lh	%r1,.Lsccbr-.Lsccb(%r4)
+	chi	%r1,0x10		# 0x0010 is the sucess code
+	je	.Lprocsccb		# let's process the sccb
+	chi	%r1,0x1f0
+	bne	.Lfchunk-.LPG1(%r13)	# unhandled error code
+	c	%r2,.Lrcp-.LPG1(%r13)	# Did we try Read SCP forced
+	bne	.Lfchunk-.LPG1(%r13)	# if no, give up
+	l	%r2,.Lrcp2-.LPG1(%r13)	# try with Read SCP
+	b	.Lservicecall-.LPG1(%r13)
 .Lprocsccb:
-	lghi  %r1,0
-	icm   %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
-	jnz   .Lscnd
-	lg    %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
+	lghi	%r1,0
+	icm	%r1,3,.Lscpincr1-.Lsccb(%r4)	# use this one if != 0
+	jnz	.Lscnd
+	lg	%r1,.Lscpincr2-.Lsccb(%r4)	# otherwise use this one
 .Lscnd:
-	xr    %r3,%r3			# same logic
-	ic    %r3,.Lscpa1-.Lsccb(%r4)
-	chi   %r3,0x00
-	jne   .Lcompmem
-	l     %r3,.Lscpa2-.Lsccb(%r4)
+	xr	%r3,%r3			# same logic
+	ic	%r3,.Lscpa1-.Lsccb(%r4)
+	chi	%r3,0x00
+	jne	.Lcompmem
+	l	%r3,.Lscpa2-.Lsccb(%r4)
 .Lcompmem:
-	mlgr  %r2,%r1			# mem in MB on 128-bit
-	l     %r1,.Lonemb-.LPG1(%r13)
-	mlgr  %r2,%r1			# mem size in bytes in %r3
-	b     .Lfchunk-.LPG1(%r13)
+	mlgr	%r2,%r1			# mem in MB on 128-bit
+	l	%r1,.Lonemb-.LPG1(%r13)
+	mlgr	%r2,%r1			# mem size in bytes in %r3
+	b	.Lfchunk-.LPG1(%r13)
 
-	.align 4
+	.align	4
 .Lpmask:
-	.byte 0
-	.align 8
+	.byte	0
+	.align	8
 .Lcr:
-	.quad 0x00  # place holder for cr0
+	.quad	0x00  # place holder for cr0
 .Lwaitsclp:
-	.quad  0x0102000180000000,.Lsclph
+	.quad	0x0102000180000000,.Lsclph
 .Lrcp:
-	.int 0x00120001 # Read SCP forced code
+	.int	0x00120001 # Read SCP forced code
 .Lrcp2:
-	.int 0x00020001 # Read SCP code
+	.int	0x00020001 # Read SCP code
 .Lonemb:
-	.int 0x100000
+	.int	0x100000
 
 .Lfchunk:
-					 # set program check new psw mask
-	mvc   __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+					# set program check new psw mask
+	mvc	__LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
 
 #
 # find memory chunks.
 #
-	lgr   %r9,%r3			 # end of mem
-	larl  %r1,.Lchkmem               # set program check address
-	stg   %r1,__LC_PGM_NEW_PSW+8
-	la    %r1,1                      # test in increments of 128KB
-	sllg  %r1,%r1,17
-	larl  %r3,memory_chunk
-	slgr  %r4,%r4                    # set start of chunk to zero
-	slgr  %r5,%r5                    # set end of chunk to zero
-	slr  %r6,%r6			 # set access code to zero
-	la    %r10,MEMORY_CHUNKS	 # number of chunks
+	lgr	%r9,%r3			# end of mem
+	larl	%r1,.Lchkmem		# set program check address
+	stg	%r1,__LC_PGM_NEW_PSW+8
+	la	%r1,1			# test in increments of 128KB
+	sllg	%r1,%r1,17
+	larl	%r3,memory_chunk
+	slgr	%r4,%r4 		# set start of chunk to zero
+	slgr	%r5,%r5 		# set end of chunk to zero
+	slr	%r6,%r6			# set access code to zero
+	la	%r10,MEMORY_CHUNKS	# number of chunks
 .Lloop:
-	tprot 0(%r5),0			 # test protection of first byte
-	ipm   %r7
-	srl   %r7,28
-	clr   %r6,%r7			 # compare cc with last access code
-	je    .Lsame
-	j     .Lchkmem
+	tprot	0(%r5),0		# test protection of first byte
+	ipm	%r7
+	srl	%r7,28
+	clr	%r6,%r7			# compare cc with last access code
+	je	.Lsame
+	j	.Lchkmem
 .Lsame:
-	algr  %r5,%r1			 # add 128KB to end of chunk
-					 # no need to check here,
-	brc   12,.Lloop			 # this is the same chunk
-.Lchkmem:				 # > 16EB or tprot got a program check
-	clgr  %r4,%r5			 # chunk size > 0?
-	je    .Lchkloop
-	stg   %r4,0(%r3)		 # store start address of chunk
-	lgr   %r0,%r5
-	slgr  %r0,%r4
-	stg   %r0,8(%r3)		 # store size of chunk
-	st    %r6,20(%r3)		 # store type of chunk
-	la    %r3,24(%r3)
-	larl  %r8,memory_size
-	stg   %r5,0(%r8)                 # store memory size
-	ahi   %r10,-1			 # update chunk number
+	algr	%r5,%r1			# add 128KB to end of chunk
+					# no need to check here,
+	brc	12,.Lloop		# this is the same chunk
+.Lchkmem:				# > 16EB or tprot got a program check
+	clgr	%r4,%r5			# chunk size > 0?
+	je	.Lchkloop
+	stg	%r4,0(%r3)		# store start address of chunk
+	lgr	%r0,%r5
+	slgr	%r0,%r4
+	stg	%r0,8(%r3)		# store size of chunk
+	st	%r6,20(%r3)		# store type of chunk
+	la	%r3,24(%r3)
+	larl	%r8,memory_size
+	stg	%r5,0(%r8)		# store memory size
+	ahi	%r10,-1			# update chunk number
 .Lchkloop:
-	lr    %r6,%r7			 # set access code to last cc
+	lr	%r6,%r7			# set access code to last cc
 	# we got an exception or we're starting a new
 	# chunk , we must check if we should
 	# still try to find valid memory (if we detected
 	# the amount of available storage), and if we
 	# have chunks left
-	lghi  %r4,1
-	sllg  %r4,%r4,31
-	clgr  %r5,%r4
-	je    .Lhsaskip
-	xr    %r0, %r0
-	clgr  %r0, %r9			 # did we detect memory?
-	je    .Ldonemem			 # if not, leave
-	chi   %r10, 0			 # do we have chunks left?
-	je    .Ldonemem
+	lghi	%r4,1
+	sllg	%r4,%r4,31
+	clgr	%r5,%r4
+	je	.Lhsaskip
+	xr	%r0, %r0
+	clgr	%r0, %r9		# did we detect memory?
+	je	.Ldonemem		# if not, leave
+	chi	%r10, 0			# do we have chunks left?
+	je	.Ldonemem
 .Lhsaskip:
-	algr  %r5,%r1			 # add 128KB to end of chunk
-	lgr   %r4,%r5			 # potential new chunk
-	clgr  %r5,%r9			 # should we go on?
-	jl    .Lloop
-.Ldonemem:		
+	algr	%r5,%r1			# add 128KB to end of chunk
+	lgr	%r4,%r5			# potential new chunk
+	clgr	%r5,%r9			# should we go on?
+	jl	.Lloop
+.Ldonemem:
 
-	larl  %r12,machine_flags
+	larl	%r12,machine_flags
 #
 # find out if we are running under VM
 #
-        stidp  __LC_CPUID               # store cpuid
-	tm     __LC_CPUID,0xff          # running under VM ?
-	bno    0f-.LPG1(%r13)
-        oi     7(%r12),1                # set VM flag
-0:      lh     %r0,__LC_CPUID+4         # get cpu version
-        chi    %r0,0x7490               # running on a P/390 ?
-        bne    1f-.LPG1(%r13)
-        oi     7(%r12),4                # set P/390 flag
+	stidp	__LC_CPUID		# store cpuid
+	tm	__LC_CPUID,0xff 	# running under VM ?
+	bno	0f-.LPG1(%r13)
+	oi	7(%r12),1		# set VM flag
+0:	lh	%r0,__LC_CPUID+4	# get cpu version
+	chi	%r0,0x7490		# running on a P/390 ?
+	bne	1f-.LPG1(%r13)
+	oi	7(%r12),4		# set P/390 flag
 1:
 
 #
 # find out if we have the MVPG instruction
 #
-	la     %r1,0f-.LPG1(%r13)       # set program check address
-	stg    %r1,__LC_PGM_NEW_PSW+8
-	sgr    %r0,%r0
-	lghi   %r1,0
-	lghi   %r2,0
-	mvpg   %r1,%r2                  # test MVPG instruction
-	oi     7(%r12),16               # set MVPG flag
+	la	%r1,0f-.LPG1(%r13)	# set program check address
+	stg	%r1,__LC_PGM_NEW_PSW+8
+	sgr	%r0,%r0
+	lghi	%r1,0
+	lghi	%r2,0
+	mvpg	%r1,%r2 		# test MVPG instruction
+	oi	7(%r12),16		# set MVPG flag
 0:
 
 #
 # find out if the diag 0x44 works in 64 bit mode
 #
-	la     %r1,0f-.LPG1(%r13)	# set program check address
-	stg    %r1,__LC_PGM_NEW_PSW+8
-	diag   0,0,0x44			# test diag 0x44
-	oi     7(%r12),32		# set diag44 flag
-0:	
+	la	%r1,0f-.LPG1(%r13)	# set program check address
+	stg	%r1,__LC_PGM_NEW_PSW+8
+	diag	0,0,0x44		# test diag 0x44
+	oi	7(%r12),32		# set diag44 flag
+0:
 
 #
 # find out if we have the IDTE instruction
 #
-	la     %r1,0f-.LPG1(%r13)	# set program check address
-	stg    %r1,__LC_PGM_NEW_PSW+8
+	la	%r1,0f-.LPG1(%r13)	# set program check address
+	stg	%r1,__LC_PGM_NEW_PSW+8
 	.long	0xb2b10000		# store facility list
 	tm	0xc8,0x08		# check bit for clearing-by-ASCE
 	bno	0f-.LPG1(%r13)
@@ -263,45 +263,45 @@ startup_continue:
 	oi	6(%r12),2		# set MVCOS flag
 1:
 
-        lpswe .Lentry-.LPG1(13)         # jump to _stext in primary-space,
-                                        # virtual and never return ...
-        .align 16
-.Lentry:.quad  0x0000000180000000,_stext
-.Lctl:  .quad  0x04b50002               # cr0: various things
-        .quad  0                        # cr1: primary space segment table
-        .quad  .Lduct                   # cr2: dispatchable unit control table
-        .quad  0                        # cr3: instruction authorization
-        .quad  0                        # cr4: instruction authorization
-        .quad  0xffffffffffffffff       # cr5: primary-aste origin
-        .quad  0                        # cr6:  I/O interrupts
-        .quad  0                        # cr7:  secondary space segment table
-        .quad  0                        # cr8:  access registers translation
-        .quad  0                        # cr9:  tracing off
-        .quad  0                        # cr10: tracing off
-        .quad  0                        # cr11: tracing off
-        .quad  0                        # cr12: tracing off
-        .quad  0                        # cr13: home space segment table
-        .quad  0xc0000000               # cr14: machine check handling off
-        .quad  0                        # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
-	.long 0,0,0,0,0,0,0,0
-.Lpcmsk:.quad  0x0000000180000000
+	lpswe	.Lentry-.LPG1(13)	# jump to _stext in primary-space,
+					# virtual and never return ...
+	.align	16
+.Lentry:.quad	0x0000000180000000,_stext
+.Lctl:	.quad	0x04b50002		# cr0: various things
+	.quad	0			# cr1: primary space segment table
+	.quad	.Lduct			# cr2: dispatchable unit control table
+	.quad	0			# cr3: instruction authorization
+	.quad	0			# cr4: instruction authorization
+	.quad	0xffffffffffffffff	# cr5: primary-aste origin
+	.quad	0			# cr6:	I/O interrupts
+	.quad	0			# cr7:	secondary space segment table
+	.quad	0			# cr8:	access registers translation
+	.quad	0			# cr9:	tracing off
+	.quad	0			# cr10: tracing off
+	.quad	0			# cr11: tracing off
+	.quad	0			# cr12: tracing off
+	.quad	0			# cr13: home space segment table
+	.quad	0xc0000000		# cr14: machine check handling off
+	.quad	0			# cr15: linkage stack operations
+.Lduct: .long	0,0,0,0,0,0,0,0
+	.long	0,0,0,0,0,0,0,0
+.Lpcmsk:.quad	0x0000000180000000
 .L4malign:.quad 0xffffffffffc00000
-.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
-.Lnop:	.long  0x07000700
+.Lscan2g:.quad	0x80000000 + 0x20000 - 8	# 2GB + 128K - 8
+.Lnop:	.long	0x07000700
 .Lparmaddr:
 	.quad	PARMAREA
 
-	.globl ipl_schib
+	.globl	ipl_schib
 ipl_schib:
 	.rept 13
 	.long 0
 	.endr
 
-	.globl ipl_flags
+	.globl	ipl_flags
 ipl_flags:
-	.long 0
-	.globl ipl_devno
+	.long	0
+	.globl	ipl_devno
 ipl_devno:
 	.word 0
 
@@ -309,47 +309,47 @@ ipl_devno:
 .globl s390_readinfo_sccb
 s390_readinfo_sccb:
 .Lsccb:
-	.hword 0x1000			# length, one page
-	.byte 0x00,0x00,0x00
-	.byte 0x80			# variable response bit set
+	.hword	0x1000			# length, one page
+	.byte	0x00,0x00,0x00
+	.byte	0x80			# variable response bit set
 .Lsccbr:
-	.hword 0x00			# response code
+	.hword	0x00			# response code
 .Lscpincr1:
-	.hword 0x00
+	.hword	0x00
 .Lscpa1:
-	.byte 0x00
-	.fill 89,1,0
+	.byte	0x00
+	.fill	89,1,0
 .Lscpa2:
-	.int 0x00
+	.int	0x00
 .Lscpincr2:
-	.quad 0x00
-	.fill 3984,1,0
+	.quad	0x00
+	.fill	3984,1,0
 	.org	0x13000
 
 #ifdef CONFIG_SHARED_KERNEL
-	.org   0x100000
+	.org	0x100000
 #endif
-	
+
 #
 # startup-code, running in absolute addressing mode
 #
-        .globl _stext
-_stext:	basr  %r13,0                    # get base
+	.globl	_stext
+_stext:	basr	%r13,0			# get base
 .LPG3:
 # check control registers
-        stctg  %c0,%c15,0(%r15)
-	oi     6(%r15),0x40             # enable sigp emergency signal
-	oi     4(%r15),0x10             # switch on low address proctection
-        lctlg  %c0,%c15,0(%r15)
+	stctg	%c0,%c15,0(%r15)
+	oi	6(%r15),0x40		# enable sigp emergency signal
+	oi	4(%r15),0x10		# switch on low address proctection
+	lctlg	%c0,%c15,0(%r15)
 
-        lam    0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
-        brasl  %r14,start_kernel        # go to C code
+	lam	0,15,.Laregs-.LPG3(%r13)	# load acrs needed by uaccess
+	brasl	%r14,start_kernel	# go to C code
 #
 # We returned from start_kernel ?!? PANIK
 #
-        basr  %r13,0
-	lpswe .Ldw-.(%r13)           # load disabled wait psw
+	basr	%r13,0
+	lpswe	.Ldw-.(%r13)		# load disabled wait psw
 
-            .align 8
-.Ldw:       .quad  0x0002000180000000,0x0000000000000000
-.Laregs:    .long  0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+	.align	8
+.Ldw:	.quad	0x0002000180000000,0x0000000000000000
+.Laregs:.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 4562cdbce8eb..0340477f3b08 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -32,58 +32,58 @@ do_reipl_asm:	basr	%r13,0
 		st	%r13, __LC_PSW_SAVE_AREA+4
 
 		lctl	%c6,%c6,.Lall-.Lpg0(%r13)
-                lr      %r1,%r2
-        	mvc     __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
-                stsch   .Lschib-.Lpg0(%r13)                                    
-	        oi      .Lschib+5-.Lpg0(%r13),0x84 
-.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
-        	msch    .Lschib-.Lpg0(%r13) 
-                lhi     %r0,5
-.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		lr	%r1,%r2
+		mvc	__LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+		stsch	.Lschib-.Lpg0(%r13)
+		oi	.Lschib+5-.Lpg0(%r13),0x84
+.Lecs:  	xi	.Lschib+27-.Lpg0(%r13),0x01
+		msch	.Lschib-.Lpg0(%r13)
+		lhi	%r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)
 		jz	.L001
-                brct    %r0,.Lssch  
+		brct	%r0,.Lssch
 		bas	%r14,.Ldisab-.Lpg0(%r13)
-.L001:		mvc	__LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)	
-.Ltpi:		lpsw	.Lwaitpsw-.Lpg0(%r13)          
+.L001:		mvc	__LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
+.Ltpi:		lpsw	.Lwaitpsw-.Lpg0(%r13)
 .Lcont:		c	%r1,__LC_SUBCHANNEL_ID
 		jnz	.Ltpi
 		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
 		jnz	.Ltpi
-		tsch	.Liplirb-.Lpg0(%r13)           
+		tsch	.Liplirb-.Lpg0(%r13)
 		tm	.Liplirb+9-.Lpg0(%r13),0xbf
-                jz      .L002
-                bas     %r14,.Ldisab-.Lpg0(%r13)    
-.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
-                jz      .L003
-                bas     %r14,.Ldisab-.Lpg0(%r13)	
+		jz	.L002
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3
+		jz	.L003
+		bas	%r14,.Ldisab-.Lpg0(%r13)
 .L003:		spx	.Lnull-.Lpg0(%r13)
-		st 	%r1,__LC_SUBCHANNEL_ID
-                lpsw 	0
-		sigp    0,0,0(6)               
-.Ldisab:	st      %r14,.Ldispsw+4-.Lpg0(%r13)
+		st	%r1,__LC_SUBCHANNEL_ID
+		lpsw	0
+		sigp	0,0,0(6)
+.Ldisab:	st	%r14,.Ldispsw+4-.Lpg0(%r13)
 		lpsw	.Ldispsw-.Lpg0(%r13)
-                .align 	8
+		.align	8
 .Lclkcmp:	.quad	0x0000000000000000
 .Lall:		.long	0xff000000
-.Lnull:		.long   0x00000000
+.Lnull:		.long	0x00000000
 .Lctlsave1:	.long	0x00000000
 .Lctlsave2:	.long	0x00000000
-                .align 	8
-.Lnewpsw:	.long   0x00080000,0x80000000+.Lpg1
-.Lpcnew:  	.long   0x00080000,0x80000000+.Lecs
-.Lionew:	.long   0x00080000,0x80000000+.Lcont
+		.align	8
+.Lnewpsw:	.long	0x00080000,0x80000000+.Lpg1
+.Lpcnew:	.long	0x00080000,0x80000000+.Lecs
+.Lionew:	.long	0x00080000,0x80000000+.Lcont
 .Lwaitpsw:	.long	0x020a0000,0x00000000+.Ltpi
-.Ldispsw:	.long   0x000a0000,0x00000000
-.Liplccws:	.long   0x02000000,0x60000018
-		.long   0x08000008,0x20000001
+.Ldispsw:	.long	0x000a0000,0x00000000
+.Liplccws:	.long	0x02000000,0x60000018
+		.long	0x08000008,0x20000001
 .Liplorb:	.long	0x0049504c,0x0040ff80
 		.long	0x00000000+.Liplccws
-.Lschib:        .long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
+.Lschib:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
 .Liplirb:	.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
@@ -92,6 +92,3 @@ do_reipl_asm:	basr	%r13,0
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
-	
-
-	
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 95bd1e234f63..de7435054f7c 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,7 +4,7 @@
  *  S390 version
  *    Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
-	         Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  */
 
 #include <asm/lowcore.h>
@@ -32,46 +32,46 @@ do_reipl_asm:	basr	%r13,0
 		stctg	%c0,%c0,.Lregsave-.Lpg0(%r13)
 		ni	.Lregsave+4-.Lpg0(%r13),0xef
 		lctlg	%c0,%c0,.Lregsave-.Lpg0(%r13)
-                lgr     %r1,%r2
-        	mvc     __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
-                stsch   .Lschib-.Lpg0(%r13)                                    
-	        oi      .Lschib+5-.Lpg0(%r13),0x84 
-.Lecs:  	xi      .Lschib+27-.Lpg0(%r13),0x01 
-        	msch    .Lschib-.Lpg0(%r13) 
-	        lghi    %r0,5
-.Lssch:		ssch	.Liplorb-.Lpg0(%r13)           
+		lgr	%r1,%r2
+		mvc	__LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
+		stsch	.Lschib-.Lpg0(%r13)
+		oi	.Lschib+5-.Lpg0(%r13),0x84
+.Lecs:  	xi	.Lschib+27-.Lpg0(%r13),0x01
+		msch	.Lschib-.Lpg0(%r13)
+		lghi	%r0,5
+.Lssch:		ssch	.Liplorb-.Lpg0(%r13)
 		jz	.L001
-		brct    %r0,.Lssch   
+		brct	%r0,.Lssch
 		bas	%r14,.Ldisab-.Lpg0(%r13)
-.L001:		mvc	__LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)	
-.Ltpi:		lpswe	.Lwaitpsw-.Lpg0(%r13)          
+.L001:		mvc	__LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi:		lpswe	.Lwaitpsw-.Lpg0(%r13)
 .Lcont:		c	%r1,__LC_SUBCHANNEL_ID
 		jnz	.Ltpi
 		clc	__LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
 		jnz	.Ltpi
-		tsch	.Liplirb-.Lpg0(%r13)           
+		tsch	.Liplirb-.Lpg0(%r13)
 		tm	.Liplirb+9-.Lpg0(%r13),0xbf
-                jz      .L002
-                bas     %r14,.Ldisab-.Lpg0(%r13)    
-.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3    
-                jz      .L003
-                bas     %r14,.Ldisab-.Lpg0(%r13)	
+		jz	.L002
+		bas	%r14,.Ldisab-.Lpg0(%r13)
+.L002:		tm	.Liplirb+8-.Lpg0(%r13),0xf3
+		jz	.L003
+		bas	%r14,.Ldisab-.Lpg0(%r13)
 .L003:		spx	.Lnull-.Lpg0(%r13)
-		st 	%r1,__LC_SUBCHANNEL_ID
-                lhi     %r1,0            # mode 0 = esa
-                slr     %r0,%r0          # set cpuid to zero
-                sigp    %r1,%r0,0x12     # switch to esa mode
-                lpsw 	0
-.Ldisab:	sll    %r14,1
-		srl    %r14,1            # need to kill hi bit to avoid specification exceptions.
-		st     %r14,.Ldispsw+12-.Lpg0(%r13)
+		st	%r1,__LC_SUBCHANNEL_ID
+		lhi	%r1,0		 # mode 0 = esa
+		slr	%r0,%r0 	 # set cpuid to zero
+		sigp	%r1,%r0,0x12	 # switch to esa mode
+		lpsw	0
+.Ldisab:	sll	%r14,1
+		srl	%r14,1		 # need to kill hi bit to avoid specification exceptions.
+		st	%r14,.Ldispsw+12-.Lpg0(%r13)
 		lpswe	.Ldispsw-.Lpg0(%r13)
-                .align 	8
+		.align	8
 .Lclkcmp:	.quad	0x0000000000000000
 .Lall:		.quad	0x00000000ff000000
 .Lregsave:	.quad	0x0000000000000000
-.Lnull:		.long   0x0000000000000000
-                .align 	16
+.Lnull:		.long	0x0000000000000000
+		.align	16
 /*
  * These addresses have to be 31 bit otherwise
  * the sigp will throw a specifcation exception
@@ -81,26 +81,26 @@ do_reipl_asm:	basr	%r13,0
  * 31bit lpswe instruction a fact they appear to have
  * ommited from the pop.
  */
-.Lnewpsw:	.quad   0x0000000080000000
-		.quad   .Lpg1
-.Lpcnew:	.quad   0x0000000080000000
-	  	.quad   .Lecs
-.Lionew:	.quad   0x0000000080000000
-		.quad   .Lcont
+.Lnewpsw:	.quad	0x0000000080000000
+		.quad	.Lpg1
+.Lpcnew:	.quad	0x0000000080000000
+		.quad	.Lecs
+.Lionew:	.quad	0x0000000080000000
+		.quad	.Lcont
 .Lwaitpsw:	.quad	0x0202000080000000
-		.quad   .Ltpi
-.Ldispsw:	.quad   0x0002000080000000
-		.quad   0x0000000000000000
-.Liplccws:	.long   0x02000000,0x60000018
-		.long   0x08000008,0x20000001
+		.quad	.Ltpi
+.Ldispsw:	.quad	0x0002000080000000
+		.quad	0x0000000000000000
+.Liplccws:	.long	0x02000000,0x60000018
+		.long	0x08000008,0x20000001
 .Liplorb:	.long	0x0049504c,0x0040ff80
 		.long	0x00000000+.Liplccws
-.Lschib:        .long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
-		.long   0x00000000,0x00000000
+.Lschib:	.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
+		.long	0x00000000,0x00000000
 .Liplirb:	.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
@@ -109,4 +109,3 @@ do_reipl_asm:	basr	%r13,0
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
 		.long	0x00000000,0x00000000
-	
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 2a25ec7147ff..f9899ff2e5b0 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -3,7 +3,7 @@
  *
  * (C) Copyright IBM Corp. 2005
  *
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
+ * Author(s): Rolf Adelsberger,
  *	      Heiko Carstens <heiko.carstens@de.ibm.com>
  *
  */
@@ -24,14 +24,14 @@
 	.text
 	.globl		relocate_kernel
 	relocate_kernel:
-		basr	%r13,0		#base address
+		basr	%r13,0		# base address
 	.base:
-		stnsm	sys_msk-.base(%r13),0xf8	#disable DAT and IRQ (external)
-		spx	zero64-.base(%r13)	#absolute addressing mode
+		stnsm	sys_msk-.base(%r13),0xf8	# disable DAT and IRQ (external)
+		spx	zero64-.base(%r13)	# absolute addressing mode
 		stctl	%c0,%c15,ctlregs-.base(%r13)
 		stm	%r0,%r15,gprregs-.base(%r13)
 		la	%r1,load_psw-.base(%r13)
-		mvc     0(8,%r0),0(%r1)
+		mvc	0(8,%r0),0(%r1)
 		la	%r0,.back-.base(%r13)
 		st	%r0,4(%r0)
 		oi	4(%r0),0x80
@@ -51,50 +51,50 @@
 	.back_pgm:
 		lm	%r0,%r15,gprregs-.base(%r13)
 	.start_reloc:
-		lhi	%r10,-1		#preparing the mask
-		sll	%r10,12		#shift it such that it becomes 0xf000
+		lhi	%r10,-1		# preparing the mask
+		sll	%r10,12		# shift it such that it becomes 0xf000
 	.top:
-		lhi	%r7,4096	#load PAGE_SIZE in r7
-		lhi	%r9,4096	#load PAGE_SIZE in r9
-		l	%r5,0(%r2)	#read another word for indirection page
-		ahi	%r2,4		#increment pointer
-		tml	%r5,0x1		#is it a destination page?
-		je	.indir_check	#NO, goto "indir_check"
-		lr	%r6,%r5		#r6 = r5
-		nr	%r6,%r10	#mask it out and...
-		j	.top		#...next iteration
+		lhi	%r7,4096	# load PAGE_SIZE in r7
+		lhi	%r9,4096	# load PAGE_SIZE in r9
+		l	%r5,0(%r2)	# read another word for indirection page
+		ahi	%r2,4		# increment pointer
+		tml	%r5,0x1		# is it a destination page?
+		je	.indir_check	# NO, goto "indir_check"
+		lr	%r6,%r5		# r6 = r5
+		nr	%r6,%r10	# mask it out and...
+		j	.top		# ...next iteration
 	.indir_check:
-		tml	%r5,0x2		#is it a indirection page?
-		je	.done_test	#NO, goto "done_test"
-		nr	%r5,%r10	#YES, mask out,
-		lr	%r2,%r5		#move it into the right register,
-		j	.top		#and read next...
+		tml	%r5,0x2		# is it a indirection page?
+		je	.done_test	# NO, goto "done_test"
+		nr	%r5,%r10	# YES, mask out,
+		lr	%r2,%r5		# move it into the right register,
+		j	.top		# and read next...
 	.done_test:
-		tml	%r5,0x4		#is it the done indicator?
-		je	.source_test	#NO! Well, then it should be the source indicator...
-		j	.done		#ok, lets finish it here...
+		tml	%r5,0x4		# is it the done indicator?
+		je	.source_test	# NO! Well, then it should be the source indicator...
+		j	.done		# ok, lets finish it here...
 	.source_test:
-		tml	%r5,0x8		#it should be a source indicator...
-		je	.top		#NO, ignore it...
-		lr	%r8,%r5		#r8 = r5
-		nr	%r8,%r10	#masking
-	0:	mvcle	%r6,%r8,0x0	#copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+		tml	%r5,0x8		# it should be a source indicator...
+		je	.top		# NO, ignore it...
+		lr	%r8,%r5		# r8 = r5
+		nr	%r8,%r10	# masking
+	0:	mvcle	%r6,%r8,0x0	# copy PAGE_SIZE bytes from r8 to r6 - pad with 0
 		jo	0b
 		j	.top
 	.done:
-		sr	%r0,%r0		#clear register r0
-		la	%r4,load_psw-.base(%r13)	#load psw-address into the register
-		o	%r3,4(%r4)	#or load address into psw
+		sr	%r0,%r0		# clear register r0
+		la	%r4,load_psw-.base(%r13)	# load psw-address into the register
+		o	%r3,4(%r4)	# or load address into psw
 		st	%r3,4(%r4)
-		mvc	0(8,%r0),0(%r4)	#copy psw to absolute address 0
+		mvc	0(8,%r0),0(%r4)	# copy psw to absolute address 0
 		tm	have_diag308-.base(%r13),0x01
 		jno	.no_diag308
 		diag	%r0,%r0,0x308
 	.no_diag308:
-		sr	%r1,%r1		#clear %r1
-		sr	%r2,%r2		#clear %r2
-		sigp	%r1,%r2,0x12	#set cpuid to zero
-		lpsw	0		#hopefully start new kernel...
+		sr	%r1,%r1		# clear %r1
+		sr	%r2,%r2		# clear %r2
+		sigp	%r1,%r2,0x12	# set cpuid to zero
+		lpsw	0		# hopefully start new kernel...
 
 		.align	8
 	zero64:
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 8cdb86e8911f..4fb443042d9c 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -3,7 +3,7 @@
  *
  * (C) Copyright IBM Corp. 2005
  *
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
+ * Author(s): Rolf Adelsberger,
  *	      Heiko Carstens <heiko.carstens@de.ibm.com>
  *
  */
@@ -25,10 +25,10 @@
 	.text
 	.globl		relocate_kernel
 	relocate_kernel:
-		basr	%r13,0		#base address
+		basr	%r13,0		# base address
 	.base:
-		stnsm	sys_msk-.base(%r13),0xf8	#disable DAT and IRQs
-		spx	zero64-.base(%r13)	#absolute addressing mode
+		stnsm	sys_msk-.base(%r13),0xf8	# disable DAT and IRQs
+		spx	zero64-.base(%r13)	# absolute addressing mode
 		stctg	%c0,%c15,ctlregs-.base(%r13)
 		stmg	%r0,%r15,gprregs-.base(%r13)
 		lghi	%r0,3
@@ -37,16 +37,16 @@
 		la	%r0,.back_pgm-.base(%r13)
 		stg	%r0,0x1d8(%r0)
 		la	%r1,load_psw-.base(%r13)
-		mvc     0(8,%r0),0(%r1)
+		mvc	0(8,%r0),0(%r1)
 		la	%r0,.back-.base(%r13)
 		st	%r0,4(%r0)
 		oi	4(%r0),0x80
 		lghi	%r0,0
 		diag	%r0,%r0,0x308
 	.back:
-		lhi	%r1,1		#mode 1 = esame
-		sigp	%r1,%r0,0x12	#switch to esame mode
-		sam64			#switch to 64 bit addressing mode
+		lhi	%r1,1		# mode 1 = esame
+		sigp	%r1,%r0,0x12	# switch to esame mode
+		sam64			# switch to 64 bit addressing mode
 		basr	%r13,0
 	.back_base:
 		oi	have_diag308-.back_base(%r13),0x01
@@ -56,50 +56,50 @@
 	.back_pgm:
 		lmg	%r0,%r15,gprregs-.base(%r13)
 	.top:
-		lghi	%r7,4096	#load PAGE_SIZE in r7
-		lghi	%r9,4096	#load PAGE_SIZE in r9
-		lg	%r5,0(%r2)	#read another word for indirection page
-		aghi	%r2,8		#increment pointer
-		tml	%r5,0x1		#is it a destination page?
-		je	.indir_check	#NO, goto "indir_check"
-		lgr	%r6,%r5		#r6 = r5
-		nill	%r6,0xf000	#mask it out and...
-		j	.top		#...next iteration
+		lghi	%r7,4096	# load PAGE_SIZE in r7
+		lghi	%r9,4096	# load PAGE_SIZE in r9
+		lg	%r5,0(%r2)	# read another word for indirection page
+		aghi	%r2,8		# increment pointer
+		tml	%r5,0x1		# is it a destination page?
+		je	.indir_check	# NO, goto "indir_check"
+		lgr	%r6,%r5		# r6 = r5
+		nill	%r6,0xf000	# mask it out and...
+		j	.top		# ...next iteration
 	.indir_check:
-		tml     %r5,0x2		#is it a indirection page?
-		je      .done_test	#NO, goto "done_test"
-		nill    %r5,0xf000	#YES, mask out,
-		lgr     %r2,%r5		#move it into the right register,
-		j       .top		#and read next...
+		tml	%r5,0x2		# is it a indirection page?
+		je	.done_test	# NO, goto "done_test"
+		nill	%r5,0xf000	# YES, mask out,
+		lgr	%r2,%r5		# move it into the right register,
+		j	.top		# and read next...
 	.done_test:
-		tml     %r5,0x4		#is it the done indicator?
-		je      .source_test	#NO! Well, then it should be the source indicator...
-		j       .done		#ok, lets finish it here...
+		tml	%r5,0x4		# is it the done indicator?
+		je	.source_test	# NO! Well, then it should be the source indicator...
+		j	.done		# ok, lets finish it here...
 	.source_test:
-		tml     %r5,0x8		#it should be a source indicator...
-		je      .top		#NO, ignore it...
-		lgr     %r8,%r5		#r8 = r5
-		nill    %r8,0xf000	#masking
-	0:	mvcle   %r6,%r8,0x0	#copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+		tml	%r5,0x8		# it should be a source indicator...
+		je	.top		# NO, ignore it...
+		lgr	%r8,%r5		# r8 = r5
+		nill	%r8,0xf000	# masking
+	0:	mvcle	%r6,%r8,0x0	# copy PAGE_SIZE bytes from r8 to r6 - pad with 0
 		jo	0b
-		j       .top
+		j	.top
 	.done:
-		sgr     %r0,%r0		#clear register r0
-		la      %r4,load_psw-.base(%r13)	#load psw-address into the register
-		o	%r3,4(%r4)	#or load address into psw
+		sgr	%r0,%r0		# clear register r0
+		la	%r4,load_psw-.base(%r13)	# load psw-address into the register
+		o	%r3,4(%r4)	# or load address into psw
 		st	%r3,4(%r4)
-		mvc     0(8,%r0),0(%r4)	#copy psw to absolute address 0
+		mvc	0(8,%r0),0(%r4)	# copy psw to absolute address 0
 		tm	have_diag308-.base(%r13),0x01
 		jno	.no_diag308
 		diag	%r0,%r0,0x308
 	.no_diag308:
-		sam31			#31 bit mode
-		sr      %r1,%r1		#erase register r1
-		sr      %r2,%r2		#erase register r2
-		sigp    %r1,%r2,0x12	#set cpuid to zero
-		lpsw    0		#hopefully start new kernel...
+		sam31			# 31 bit mode
+		sr	%r1,%r1		# erase register r1
+		sr	%r2,%r2		# erase register r2
+		sigp	%r1,%r2,0x12	# set cpuid to zero
+		lpsw	0		# hopefully start new kernel...
 
-	        .align	8
+		.align	8
 	zero64:
 		.quad	0
 	load_psw: