summary refs log tree commit diff
path: root/arch/sparc/lib
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 03:10:25 -0800
committerDavid S. Miller <davem@davemloft.net>2008-12-04 09:17:19 -0800
commit478b8fecda511942404ac232897a718cecd13e48 (patch)
treee022fce8ca92f93a000e1cd686255f0b126e9fd5 /arch/sparc/lib
parent18269c0fd4db9bec2668f895f21d742486ccb90f (diff)
downloadlinux-478b8fecda511942404ac232897a718cecd13e48.tar.gz
sparc,sparc64: unify lib/
o Renamed files in sparc64 to <name>_64.S when identical
  to sparc32 files.
o iomap.c were equal for sparc32 and sparc64
o adjusted sparc/Makefile now we have only one lib/

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/lib')
-rw-r--r--arch/sparc/lib/GENbzero.S160
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S34
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S38
-rw-r--r--arch/sparc/lib/GENmemcpy.S121
-rw-r--r--arch/sparc/lib/GENpage.S77
-rw-r--r--arch/sparc/lib/GENpatch.S33
-rw-r--r--arch/sparc/lib/Makefile22
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S40
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S49
-rw-r--r--arch/sparc/lib/NG2memcpy.S520
-rw-r--r--arch/sparc/lib/NG2page.S61
-rw-r--r--arch/sparc/lib/NG2patch.S33
-rw-r--r--arch/sparc/lib/NGbzero.S164
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S37
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S40
-rw-r--r--arch/sparc/lib/NGmemcpy.S425
-rw-r--r--arch/sparc/lib/NGpage.S99
-rw-r--r--arch/sparc/lib/NGpatch.S33
-rw-r--r--arch/sparc/lib/PeeCeeI.c203
-rw-r--r--arch/sparc/lib/U1copy_from_user.S33
-rw-r--r--arch/sparc/lib/U1copy_to_user.S33
-rw-r--r--arch/sparc/lib/U1memcpy.S563
-rw-r--r--arch/sparc/lib/U3copy_from_user.S22
-rw-r--r--arch/sparc/lib/U3copy_to_user.S33
-rw-r--r--arch/sparc/lib/U3memcpy.S422
-rw-r--r--arch/sparc/lib/U3patch.S33
-rw-r--r--arch/sparc/lib/VISsave.S144
-rw-r--r--arch/sparc/lib/atomic_64.S138
-rw-r--r--arch/sparc/lib/bitops.S141
-rw-r--r--arch/sparc/lib/bzero.S158
-rw-r--r--arch/sparc/lib/checksum_64.S173
-rw-r--r--arch/sparc/lib/clear_page.S103
-rw-r--r--arch/sparc/lib/copy_in_user.S119
-rw-r--r--arch/sparc/lib/copy_page.S250
-rw-r--r--arch/sparc/lib/csum_copy.S309
-rw-r--r--arch/sparc/lib/csum_copy_from_user.S21
-rw-r--r--arch/sparc/lib/csum_copy_to_user.S21
-rw-r--r--arch/sparc/lib/ipcsum.S34
-rw-r--r--arch/sparc/lib/mcount.S143
-rw-r--r--arch/sparc/lib/memcmp_64.S28
-rw-r--r--arch/sparc/lib/memmove.S31
-rw-r--r--arch/sparc/lib/memscan_64.S129
-rw-r--r--arch/sparc/lib/rwsem_64.S163
-rw-r--r--arch/sparc/lib/strlen_64.S80
-rw-r--r--arch/sparc/lib/strlen_user_64.S95
-rw-r--r--arch/sparc/lib/strncmp_64.S32
-rw-r--r--arch/sparc/lib/strncpy_from_user_64.S135
-rw-r--r--arch/sparc/lib/user_fixup.c66
-rw-r--r--arch/sparc/lib/xor.S652
49 files changed, 6493 insertions, 0 deletions
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
new file mode 100644
index 000000000000..6a4f956a2f7a
--- /dev/null
+++ b/arch/sparc/lib/GENbzero.S
@@ -0,0 +1,160 @@
+/* GENbzero.S: Generic sparc64 memset/clear_user.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+#include <asm/asi.h>
+
+#define EX_ST(x,y)		\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	%o1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.align	32
+	.text
+
+	.globl		GENmemset
+	.type		GENmemset, #function
+GENmemset:		/* %o0=buf, %o1=pat, %o2=len */
+	and		%o1, 0xff, %o3
+	mov		%o2, %o1
+	sllx		%o3, 8, %g1
+	or		%g1, %o3, %o2
+	sllx		%o2, 16, %g1
+	or		%g1, %o2, %o2
+	sllx		%o2, 32, %g1
+	ba,pt		%xcc, 1f
+	 or		%g1, %o2, %o2
+
+	.globl		GENbzero
+	.type		GENbzero, #function
+GENbzero:
+	clr		%o2
+1:	brz,pn		%o1, GENbzero_return
+	 mov		%o0, %o3
+
+	/* %o5: saved %asi, restored at GENbzero_done
+	 * %o4:	store %asi to use
+	 */
+	rd		%asi, %o5
+	mov		ASI_P, %o4
+	wr		%o4, 0x0, %asi
+
+GENbzero_from_clear_user:
+	cmp		%o1, 15
+	bl,pn		%icc, GENbzero_tiny
+	 andcc		%o0, 0x7, %g1
+	be,pt		%xcc, 2f
+	 mov		8, %g2
+	sub		%g2, %g1, %g1
+	sub		%o1, %g1, %o1
+1:	EX_ST(stba %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 1, %o0
+2:	cmp		%o1, 128
+	bl,pn		%icc, GENbzero_medium
+	 andcc		%o0, (64 - 1), %g1
+	be,pt		%xcc, GENbzero_pre_loop
+	 mov		64, %g2
+	sub		%g2, %g1, %g1
+	sub		%o1, %g1, %o1
+1:	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 8, %o0
+
+GENbzero_pre_loop:
+	andn		%o1, (64 - 1), %g1
+	sub		%o1, %g1, %o1
+GENbzero_loop:
+	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x08] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x10] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x18] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x20] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x28] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x30] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x38] %asi)
+	subcc		%g1, 64, %g1
+	bne,pt		%xcc, GENbzero_loop
+	 add		%o0, 64, %o0
+
+	membar		#Sync
+	wr		%o4, 0x0, %asi
+	brz,pn		%o1, GENbzero_done
+GENbzero_medium:
+	 andncc		%o1, 0x7, %g1
+	be,pn		%xcc, 2f
+	 sub		%o1, %g1, %o1
+1:	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 8, %o0
+2:	brz,pt		%o1, GENbzero_done
+	 nop
+
+GENbzero_tiny:
+1:	EX_ST(stba %o2, [%o0 + 0x00] %asi)
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+
+	/* fallthrough */
+
+GENbzero_done:
+	wr		%o5, 0x0, %asi
+
+GENbzero_return:
+	retl
+	 mov		%o3, %o0
+	.size		GENbzero, .-GENbzero
+	.size		GENmemset, .-GENmemset
+
+	.globl		GENclear_user
+	.type		GENclear_user, #function
+GENclear_user:		/* %o0=buf, %o1=len */
+	rd		%asi, %o5
+	brz,pn		%o1, GENbzero_done
+	 clr		%o3
+	cmp		%o5, ASI_AIUS
+	bne,pn		%icc, GENbzero
+	 clr		%o2
+	ba,pt		%xcc, GENbzero_from_clear_user
+	 mov		ASI_AIUS, %o4
+	.size		GENclear_user, .-GENclear_user
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define GEN_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	generic_patch_bzero
+	.type	generic_patch_bzero,#function
+generic_patch_bzero:
+	GEN_DO_PATCH(memset, GENmemset)
+	GEN_DO_PATCH(__bzero, GENbzero)
+	GEN_DO_PATCH(__clear_user, GENclear_user)
+	retl
+	 nop
+	.size	generic_patch_bzero,.-generic_patch_bzero
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
new file mode 100644
index 000000000000..2b9df99e87f9
--- /dev/null
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -0,0 +1,34 @@
+/* GENcopy_from_user.S: Generic sparc64 copy from userspace.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#define FUNC_NAME		GENcopy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] ASI_AIUS, dest
+#define EX_RETVAL(x)		0
+
+#ifdef __KERNEL__
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "GENmemcpy.S"
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
new file mode 100644
index 000000000000..bb3f7084daf9
--- /dev/null
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -0,0 +1,38 @@
+/* GENcopy_to_user.S: Generic sparc64 copy to userspace.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#define FUNC_NAME		GENcopy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define EX_RETVAL(x)		0
+
+#ifdef __KERNEL__
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "GENmemcpy.S"
diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S
new file mode 100644
index 000000000000..89358ee94851
--- /dev/null
+++ b/arch/sparc/lib/GENmemcpy.S
@@ -0,0 +1,121 @@
+/* GENmemcpy.S: Generic sparc64 memcpy.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifdef __KERNEL__
+#define GLOBAL_SPARE	%g7
+#else
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	GENmemcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%XCC, 5
+	PREAMBLE
+	mov		%o0, GLOBAL_SPARE
+
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	xor		%o0, %o1, %o4
+	andcc		%o4, 0x7, %g0
+	bne,a,pn	%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+	and		%o0, 0x7, %o4
+	sub		%o4, 0x8, %o4
+	sub		%g0, %o4, %o4
+	sub		%o2, %o4, %o2
+1:	subcc		%o4, 1, %o4
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o0))
+	add		%o1, 1, %o1
+	bne,pt		%XCC, 1b
+	add		%o0, 1, %o0
+
+	andn		%o2, 0x7, %g1
+	sub		%o2, %g1, %o2
+1:	subcc		%g1, 0x8, %g1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	EX_ST(STORE(stx, %g2, %o0))
+	add		%o1, 0x8, %o1
+	bne,pt		%XCC, 1b
+	 add		%o0, 0x8, %o0
+
+	brz,pt		%o2, 85f
+	 sub		%o0, %o1, %o3
+	ba,a,pt		%XCC, 90f
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:
+	subcc		%o2, 4, %o2
+	EX_LD(LOAD(lduw, %o1, %g1))
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+
+	.align		32
+90:
+	subcc		%o2, 1, %o2
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/GENpage.S b/arch/sparc/lib/GENpage.S
new file mode 100644
index 000000000000..2ef9d05f21bc
--- /dev/null
+++ b/arch/sparc/lib/GENpage.S
@@ -0,0 +1,77 @@
+/* GENpage.S: Generic clear and copy page.
+ *
+ * Copyright (C) 2007 (davem@davemloft.net)
+ */
+#include <asm/page.h>
+
+	.text
+	.align	32
+
+GENcopy_user_page:
+	set	PAGE_SIZE, %g7
+1:	ldx	[%o1 + 0x00], %o2
+	ldx	[%o1 + 0x08], %o3
+	ldx	[%o1 + 0x10], %o4
+	ldx	[%o1 + 0x18], %o5
+	stx	%o2, [%o0 + 0x00]
+	stx	%o3, [%o0 + 0x08]
+	stx	%o4, [%o0 + 0x10]
+	stx	%o5, [%o0 + 0x18]
+	ldx	[%o1 + 0x20], %o2
+	ldx	[%o1 + 0x28], %o3
+	ldx	[%o1 + 0x30], %o4
+	ldx	[%o1 + 0x38], %o5
+	stx	%o2, [%o0 + 0x20]
+	stx	%o3, [%o0 + 0x28]
+	stx	%o4, [%o0 + 0x30]
+	stx	%o5, [%o0 + 0x38]
+	subcc	%g7, 64, %g7
+	add	%o1, 64, %o1
+	bne,pt	%xcc, 1b
+	 add	%o0, 64, %o0
+	retl
+	 nop
+
+GENclear_page:
+GENclear_user_page:
+	set	PAGE_SIZE, %g7
+1:	stx	%g0, [%o0 + 0x00]
+	stx	%g0, [%o0 + 0x08]
+	stx	%g0, [%o0 + 0x10]
+	stx	%g0, [%o0 + 0x18]
+	stx	%g0, [%o0 + 0x20]
+	stx	%g0, [%o0 + 0x28]
+	stx	%g0, [%o0 + 0x30]
+	stx	%g0, [%o0 + 0x38]
+	subcc	%g7, 64, %g7
+	bne,pt	%xcc, 1b
+	 add	%o0, 64, %o0
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define GEN_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	generic_patch_pageops
+	.type	generic_patch_pageops,#function
+generic_patch_pageops:
+	GEN_DO_PATCH(copy_user_page, GENcopy_user_page)
+	GEN_DO_PATCH(_clear_page, GENclear_page)
+	GEN_DO_PATCH(clear_user_page, GENclear_user_page)
+	retl
+	 nop
+	.size	generic_patch_pageops,.-generic_patch_pageops
diff --git a/arch/sparc/lib/GENpatch.S b/arch/sparc/lib/GENpatch.S
new file mode 100644
index 000000000000..fab9e89f16bd
--- /dev/null
+++ b/arch/sparc/lib/GENpatch.S
@@ -0,0 +1,33 @@
+/* GENpatch.S: Patch Ultra-I routines with generic variant.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define GEN_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	generic_patch_copyops
+	.type	generic_patch_copyops,#function
+generic_patch_copyops:
+	GEN_DO_PATCH(memcpy, GENmemcpy)
+	GEN_DO_PATCH(___copy_from_user, GENcopy_from_user)
+	GEN_DO_PATCH(___copy_to_user, GENcopy_to_user)
+	retl
+	 nop
+	.size	generic_patch_copyops,.-generic_patch_copyops
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index f2650545c774..05ae5c945e35 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -18,5 +18,27 @@ lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-y                 += rwsem_$(BITS).o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
+lib-$(CONFIG_SPARC64) += PeeCeeI.o copy_page.o clear_page.o bzero.o
+lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
+lib-$(CONFIG_SPARC64) += VISsave.o
+lib-$(CONFIG_SPARC64) += bitops.o
+
+lib-$(CONFIG_SPARC64) += U1memcpy.o U1copy_from_user.o U1copy_to_user.o
+
+lib-$(CONFIG_SPARC64) += U3memcpy.o U3copy_from_user.o U3copy_to_user.o
+lib-$(CONFIG_SPARC64) += U3patch.o
+
+lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o
+lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
+
+lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
+lib-$(CONFIG_SPARC64) +=  NG2patch.o NG2page.o
+
+lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
+lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
+
+lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o
+
 obj-y                 += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
new file mode 100644
index 000000000000..c77ef5f22102
--- /dev/null
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -0,0 +1,40 @@
+/* NG2copy_from_user.S: Niagara-2 optimized copy from userspace.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	wr	%g0, ASI_AIUS, %asi;\
+	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#ifndef ASI_BLK_AIUS_4V
+#define ASI_BLK_AIUS_4V	0x17
+#endif
+
+#define FUNC_NAME		NG2copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_AIUS_4V, dest
+#define EX_RETVAL(x)		0
+
+#ifdef __KERNEL__
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "NG2memcpy.S"
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
new file mode 100644
index 000000000000..4bd4093acbbd
--- /dev/null
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -0,0 +1,49 @@
+/* NG2copy_to_user.S: Niagara-2 optimized copy to userspace.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	wr	%g0, ASI_AIUS, %asi;\
+	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#ifndef ASI_BLK_AIUS_4V
+#define ASI_BLK_AIUS_4V	0x17
+#endif
+
+#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23
+#endif
+
+#define FUNC_NAME		NG2copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_ASI		ASI_BLK_INIT_QUAD_LDD_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS_4V
+#define EX_RETVAL(x)		0
+
+#ifdef __KERNEL__
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "NG2memcpy.S"
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
new file mode 100644
index 000000000000..0aed75653b50
--- /dev/null
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -0,0 +1,520 @@
+/* NG2memcpy.S: Niagara-2 optimized memcpy.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	%g7
+#else
+#define ASI_PNF 0x82
+#define ASI_BLK_P 0xf0
+#define ASI_BLK_INIT_QUAD_LDD_P 0xe2
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef STORE_ASI
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
+#define STORE_ASI	ASI_BLK_INIT_QUAD_LDD_P
+#else
+#define STORE_ASI	0x80		/* ASI_P */
+#endif
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef LOAD_BLK
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
+#endif
+
+#ifndef STORE
+#ifndef MEMCPY_DEBUG
+#define STORE(type,src,addr)	type src, [addr]
+#else
+#define STORE(type,src,addr)	type##a src, [addr] 0x80
+#endif
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef STORE_INIT
+#define STORE_INIT(src,addr)	stxa src, [addr] STORE_ASI
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	NG2memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+#define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
+	faligndata	%x0, %x1, %f0; \
+	faligndata	%x1, %x2, %f2; \
+	faligndata	%x2, %x3, %f4; \
+	faligndata	%x3, %x4, %f6; \
+	faligndata	%x4, %x5, %f8; \
+	faligndata	%x5, %x6, %f10; \
+	faligndata	%x6, %x7, %f12; \
+	faligndata	%x7, %x8, %f14;
+
+#define FREG_MOVE_1(x0) \
+	fmovd		%x0, %f0;
+#define FREG_MOVE_2(x0, x1) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2;
+#define FREG_MOVE_3(x0, x1, x2) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4;
+#define FREG_MOVE_4(x0, x1, x2, x3) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4; \
+	fmovd		%x3, %f6;
+#define FREG_MOVE_5(x0, x1, x2, x3, x4) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4; \
+	fmovd		%x3, %f6; \
+	fmovd		%x4, %f8;
+#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4; \
+	fmovd		%x3, %f6; \
+	fmovd		%x4, %f8; \
+	fmovd		%x5, %f10;
+#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4; \
+	fmovd		%x3, %f6; \
+	fmovd		%x4, %f8; \
+	fmovd		%x5, %f10; \
+	fmovd		%x6, %f12;
+#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
+	fmovd		%x0, %f0; \
+	fmovd		%x1, %f2; \
+	fmovd		%x2, %f4; \
+	fmovd		%x3, %f6; \
+	fmovd		%x4, %f8; \
+	fmovd		%x5, %f10; \
+	fmovd		%x6, %f12; \
+	fmovd		%x7, %f14;
+#define FREG_LOAD_1(base, x0) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0))
+#define FREG_LOAD_2(base, x0, x1) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1));
+#define FREG_LOAD_3(base, x0, x1, x2) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1)); \
+	EX_LD(LOAD(ldd, base + 0x10, %x2));
+#define FREG_LOAD_4(base, x0, x1, x2, x3) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1)); \
+	EX_LD(LOAD(ldd, base + 0x10, %x2)); \
+	EX_LD(LOAD(ldd, base + 0x18, %x3));
+#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1)); \
+	EX_LD(LOAD(ldd, base + 0x10, %x2)); \
+	EX_LD(LOAD(ldd, base + 0x18, %x3)); \
+	EX_LD(LOAD(ldd, base + 0x20, %x4));
+#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1)); \
+	EX_LD(LOAD(ldd, base + 0x10, %x2)); \
+	EX_LD(LOAD(ldd, base + 0x18, %x3)); \
+	EX_LD(LOAD(ldd, base + 0x20, %x4)); \
+	EX_LD(LOAD(ldd, base + 0x28, %x5));
+#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
+	EX_LD(LOAD(ldd, base + 0x00, %x0)); \
+	EX_LD(LOAD(ldd, base + 0x08, %x1)); \
+	EX_LD(LOAD(ldd, base + 0x10, %x2)); \
+	EX_LD(LOAD(ldd, base + 0x18, %x3)); \
+	EX_LD(LOAD(ldd, base + 0x20, %x4)); \
+	EX_LD(LOAD(ldd, base + 0x28, %x5)); \
+	EX_LD(LOAD(ldd, base + 0x30, %x6));
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, GLOBAL_SPARE
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	/* 2 blocks (128 bytes) is the minimum we can do the block
+	 * copy with.  We need to ensure that we'll iterate at least
+	 * once in the block copy loop.  At worst we'll need to align
+	 * the destination to a 64-byte boundary which can chew up
+	 * to (64 - 1) bytes from the length before we perform the
+	 * block copy loop.
+	 *
+	 * However, the cut-off point, performance wise, is around
+	 * 4 64-byte blocks.
+	 */
+	cmp		%o2, (4 * 64)
+	blu,pt		%XCC, 75f
+	 andcc		%o3, 0x7, %g0
+
+	/* %o0:	dst
+	 * %o1:	src
+	 * %o2:	len  (known to be >= 128)
+	 *
+	 * The block copy loops can use %o4, %g2, %g3 as
+	 * temporaries while copying the data.  %o5 must
+	 * be preserved between VISEntryHalf and VISExitHalf
+	 */
+
+	LOAD(prefetch, %o1 + 0x000, #one_read)
+	LOAD(prefetch, %o1 + 0x040, #one_read)
+	LOAD(prefetch, %o1 + 0x080, #one_read)
+
+	/* Align destination on 64-byte boundary.  */
+	andcc		%o0, (64 - 1), %o4
+	be,pt		%XCC, 2f
+	 sub		%o4, 64, %o4
+	sub		%g0, %o4, %o4	! bytes to align dst
+	sub		%o2, %o4, %o2
+1:	subcc		%o4, 1, %o4
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o0))
+	add		%o1, 1, %o1
+	bne,pt		%XCC, 1b
+	add		%o0, 1, %o0
+
+2:
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
+	 * o5 from here until we hit VISExitHalf.
+	 */
+	VISEntryHalf
+
+	alignaddr	%o1, %g0, %g0
+
+	add		%o1, (64 - 1), %o4
+	andn		%o4, (64 - 1), %o4
+	andn		%o2, (64 - 1), %g1
+	sub		%o2, %g1, %o2
+
+	and		%o1, (64 - 1), %g2
+	add		%o1, %g1, %o1
+	sub		%o0, %o4, %g3
+	brz,pt		%g2, 190f
+	 cmp		%g2, 32
+	blu,a		5f
+	 cmp		%g2, 16
+	cmp		%g2, 48
+	blu,a		4f
+	 cmp		%g2, 40
+	cmp		%g2, 56
+	blu		170f
+	 nop
+	ba,a,pt		%xcc, 180f
+
+4:	/* 32 <= low bits < 48 */
+	blu		150f
+	 nop
+	ba,a,pt		%xcc, 160f
+5:	/* 0 < low bits < 32 */
+	blu,a		6f
+	 cmp		%g2, 8
+	cmp		%g2, 24
+	blu		130f
+	 nop
+	ba,a,pt		%xcc, 140f
+6:	/* 0 < low bits < 16 */
+	bgeu		120f
+	 nop
+	/* fall through for 0 < low bits < 8 */
+110:	sub		%o4, 64, %g2
+	EX_LD(LOAD_BLK(%g2, %f0))
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+120:	sub		%o4, 56, %g2
+	FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+130:	sub		%o4, 48, %g2
+	FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+140:	sub		%o4, 40, %g2
+	FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_5(f22, f24, f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+150:	sub		%o4, 32, %g2
+	FREG_LOAD_4(%g2, f0, f2, f4, f6)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_4(f24, f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+160:	sub		%o4, 24, %g2
+	FREG_LOAD_3(%g2, f0, f2, f4)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_3(f26, f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+170:	sub		%o4, 16, %g2
+	FREG_LOAD_2(%g2, f0, f2)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_2(f28, f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+180:	sub		%o4, 8, %g2
+	FREG_LOAD_1(%g2, f0)
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	EX_LD(LOAD_BLK(%o4, %f16))
+	FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	FREG_MOVE_1(f30)
+	subcc		%g1, 64, %g1
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+	ba,pt		%xcc, 195f
+	 nop
+
+190:
+1:	EX_ST(STORE_INIT(%g0, %o4 + %g3))
+	subcc		%g1, 64, %g1
+	EX_LD(LOAD_BLK(%o4, %f0))
+	EX_ST(STORE_BLK(%f0, %o4 + %g3))
+	add		%o4, 64, %o4
+	bne,pt		%xcc, 1b
+	 LOAD(prefetch, %o4 + 64, #one_read)
+
+195:
+	add		%o4, %g3, %o0
+	membar		#Sync
+
+	VISExitHalf
+
+	/* %o2 contains any final bytes still needed to be copied
+	 * over. If anything is left, we copy it one byte at a time.
+	 */
+	brz,pt		%o2, 85f
+	 sub		%o0, %o1, %o3
+	ba,a,pt		%XCC, 90f
+
+	.align		64
+75: /* 16 < len <= 64 */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:
+	andn		%o2, 0xf, %o4
+	and		%o2, 0xf, %o2
+1:	subcc		%o4, 0x10, %o4
+	EX_LD(LOAD(ldx, %o1, %o5))
+	add		%o1, 0x08, %o1
+	EX_LD(LOAD(ldx, %o1, %g1))
+	sub		%o1, 0x08, %o1
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x8, %o2
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:
+	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	subcc		%g1, 1, %g1
+	EX_LD(LOAD(ldub, %o1, %o5))
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, %o4
+	sllx		%g2, %g1, %g2
+1:	add		%o1, 0x8, %o1
+	EX_LD(LOAD(ldx, %o1, %g3))
+	subcc		%o4, 0x8, %o4
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:
+	subcc		%o2, 4, %o2
+	EX_LD(LOAD(lduw, %o1, %g1))
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+
+	.align		32
+90:
+	subcc		%o2, 1, %o2
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG2page.S b/arch/sparc/lib/NG2page.S
new file mode 100644
index 000000000000..73b6b7c72cbf
--- /dev/null
+++ b/arch/sparc/lib/NG2page.S
@@ -0,0 +1,61 @@
+/* NG2page.S: Niagara-2 optimized clear and copy page.
+ *
+ * Copyright (C) 2007 (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/visasm.h>
+
+	.text
+	.align	32
+
+	/* This is heavily simplified from the sun4u variants
+	 * because Niagara-2 does not have any D-cache aliasing issues.
+	 */
+NG2copy_user_page:	/* %o0=dest, %o1=src, %o2=vaddr */
+	prefetch	[%o1 + 0x00], #one_read
+	prefetch	[%o1 + 0x40], #one_read
+	VISEntryHalf
+	set		PAGE_SIZE, %g7
+	sub		%o0, %o1, %g3
+1:	stxa		%g0, [%o1 + %g3] ASI_BLK_INIT_QUAD_LDD_P
+	subcc		%g7, 64, %g7
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f0, [%o1 + %g3] ASI_BLK_P
+	add		%o1, 64, %o1
+	bne,pt		%xcc, 1b
+	 prefetch	[%o1 + 0x40], #one_read
+	membar		#Sync
+	VISExitHalf
+	retl
+	 nop
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define NG_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	niagara2_patch_pageops
+	.type	niagara2_patch_pageops,#function
+niagara2_patch_pageops:
+	NG_DO_PATCH(copy_user_page, NG2copy_user_page)
+	NG_DO_PATCH(_clear_page, NGclear_page)
+	NG_DO_PATCH(clear_user_page, NGclear_user_page)
+	retl
+	 nop
+	.size	niagara2_patch_pageops,.-niagara2_patch_pageops
diff --git a/arch/sparc/lib/NG2patch.S b/arch/sparc/lib/NG2patch.S
new file mode 100644
index 000000000000..28c36f06a6d1
--- /dev/null
+++ b/arch/sparc/lib/NG2patch.S
@@ -0,0 +1,33 @@
+/* NG2patch.S: Patch Ultra-I routines with Niagara-2 variant.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define NG_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	niagara2_patch_copyops
+	.type	niagara2_patch_copyops,#function
+niagara2_patch_copyops:
+	NG_DO_PATCH(memcpy, NG2memcpy)
+	NG_DO_PATCH(___copy_from_user, NG2copy_from_user)
+	NG_DO_PATCH(___copy_to_user, NG2copy_to_user)
+	retl
+	 nop
+	.size	niagara2_patch_copyops,.-niagara2_patch_copyops
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
new file mode 100644
index 000000000000..814d5f7a45e1
--- /dev/null
+++ b/arch/sparc/lib/NGbzero.S
@@ -0,0 +1,164 @@
+/* NGbzero.S: Niagara optimized memset/clear_user.
+ *
+ * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ */
+#include <asm/asi.h>
+
+#define EX_ST(x,y)		\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	%o1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.text
+
+	.globl		NGmemset
+	.type		NGmemset, #function
+NGmemset:		/* %o0=buf, %o1=pat, %o2=len */
+	and		%o1, 0xff, %o3
+	mov		%o2, %o1
+	sllx		%o3, 8, %g1
+	or		%g1, %o3, %o2
+	sllx		%o2, 16, %g1
+	or		%g1, %o2, %o2
+	sllx		%o2, 32, %g1
+	ba,pt		%xcc, 1f
+	 or		%g1, %o2, %o2
+
+	.globl		NGbzero
+	.type		NGbzero, #function
+NGbzero:
+	clr		%o2
+1:	brz,pn		%o1, NGbzero_return
+	 mov		%o0, %o3
+
+	/* %o5: saved %asi, restored at NGbzero_done
+	 * %g7: store-init %asi to use
+	 * %o4:	non-store-init %asi to use
+	 */
+	rd		%asi, %o5
+	mov		ASI_BLK_INIT_QUAD_LDD_P, %g7
+	mov		ASI_P, %o4
+	wr		%o4, 0x0, %asi
+
+NGbzero_from_clear_user:
+	cmp		%o1, 15
+	bl,pn		%icc, NGbzero_tiny
+	 andcc		%o0, 0x7, %g1
+	be,pt		%xcc, 2f
+	 mov		8, %g2
+	sub		%g2, %g1, %g1
+	sub		%o1, %g1, %o1
+1:	EX_ST(stba %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 1, %o0
+2:	cmp		%o1, 128
+	bl,pn		%icc, NGbzero_medium
+	 andcc		%o0, (64 - 1), %g1
+	be,pt		%xcc, NGbzero_pre_loop
+	 mov		64, %g2
+	sub		%g2, %g1, %g1
+	sub		%o1, %g1, %o1
+1:	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 8, %o0
+
+NGbzero_pre_loop:
+	wr		%g7, 0x0, %asi
+	andn		%o1, (64 - 1), %g1
+	sub		%o1, %g1, %o1
+NGbzero_loop:
+	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x08] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x10] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x18] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x20] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x28] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x30] %asi)
+	EX_ST(stxa %o2, [%o0 + 0x38] %asi)
+	subcc		%g1, 64, %g1
+	bne,pt		%xcc, NGbzero_loop
+	 add		%o0, 64, %o0
+
+	membar		#Sync
+	wr		%o4, 0x0, %asi
+	brz,pn		%o1, NGbzero_done
+NGbzero_medium:
+	 andncc		%o1, 0x7, %g1
+	be,pn		%xcc, 2f
+	 sub		%o1, %g1, %o1
+1:	EX_ST(stxa %o2, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%xcc, 1b
+	 add		%o0, 8, %o0
+2:	brz,pt		%o1, NGbzero_done
+	 nop
+
+NGbzero_tiny:
+1:	EX_ST(stba %o2, [%o0 + 0x00] %asi)
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+
+	/* fallthrough */
+
+NGbzero_done:
+	wr		%o5, 0x0, %asi
+
+NGbzero_return:
+	retl
+	 mov		%o3, %o0
+	.size		NGbzero, .-NGbzero
+	.size		NGmemset, .-NGmemset
+
+	.globl		NGclear_user
+	.type		NGclear_user, #function
+NGclear_user:		/* %o0=buf, %o1=len */
+	rd		%asi, %o5
+	brz,pn		%o1, NGbzero_done
+	 clr		%o3
+	cmp		%o5, ASI_AIUS
+	bne,pn		%icc, NGbzero
+	 clr		%o2
+	mov		ASI_BLK_INIT_QUAD_LDD_AIUS, %g7
+	ba,pt		%xcc, NGbzero_from_clear_user
+	 mov		ASI_AIUS, %o4
+	.size		NGclear_user, .-NGclear_user
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define NG_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	niagara_patch_bzero
+	.type	niagara_patch_bzero,#function
+niagara_patch_bzero:
+	NG_DO_PATCH(memset, NGmemset)
+	NG_DO_PATCH(__bzero, NGbzero)
+	NG_DO_PATCH(__clear_user, NGclear_user)
+	NG_DO_PATCH(tsb_init, NGtsb_init)
+	retl
+	 nop
+	.size	niagara_patch_bzero,.-niagara_patch_bzero
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
new file mode 100644
index 000000000000..e7f433f71b42
--- /dev/null
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -0,0 +1,37 @@
+/* NGcopy_from_user.S: Niagara optimized copy from userspace.
+ *
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	wr	%g0, ASI_AIUS, %asi;\
+	ret;			\
+	 restore %g0, 1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#define FUNC_NAME		NGcopy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] ASI_AIUS, dest
+#define LOAD_TWIN(addr_reg,dest0,dest1)	\
+	ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
+#define EX_RETVAL(x)		%g0
+
+#ifdef __KERNEL__
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "NGmemcpy.S"
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
new file mode 100644
index 000000000000..6ea01c5532a0
--- /dev/null
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -0,0 +1,40 @@
+/* NGcopy_to_user.S: Niagara optimized copy to userspace.
+ *
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	wr	%g0, ASI_AIUS, %asi;\
+	ret;			\
+	 restore %g0, 1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS	0x11
+#endif
+
+#define FUNC_NAME		NGcopy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_ASI		ASI_BLK_INIT_QUAD_LDD_AIUS
+#define EX_RETVAL(x)		%g0
+
+#ifdef __KERNEL__
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop
+#endif
+
+#include "NGmemcpy.S"
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
new file mode 100644
index 000000000000..96a14caf6966
--- /dev/null
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -0,0 +1,425 @@
+/* NGmemcpy.S: Niagara optimized memcpy.
+ *
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifdef __KERNEL__
+#include <asm/asi.h>
+#include <asm/thread_info.h>
+#define GLOBAL_SPARE	%g7
+#define RESTORE_ASI(TMP)	\
+	ldub	[%g6 + TI_CURRENT_DS], TMP;  \
+	wr	TMP, 0x0, %asi;
+#else
+#define GLOBAL_SPARE	%g5
+#define RESTORE_ASI(TMP)	\
+	wr	%g0, ASI_PNF, %asi
+#endif
+
+#ifdef __sparc_v9__
+#define SAVE_AMOUNT	128
+#else
+#define SAVE_AMOUNT	64
+#endif
+
+#ifndef STORE_ASI
+#define STORE_ASI	ASI_BLK_INIT_QUAD_LDD_P
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#ifndef MEMCPY_DEBUG
+#define LOAD(type,addr,dest)	type [addr], dest
+#else
+#define LOAD(type,addr,dest)	type##a [addr] 0x80, dest
+#endif
+#endif
+
+#ifndef LOAD_TWIN
+#define LOAD_TWIN(addr_reg,dest0,dest1)	\
+	ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_INIT
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
+#define STORE_INIT(src,addr)	stxa src, [addr] %asi
+#else
+#define STORE_INIT(src,addr)	stx src, [addr + 0x00]
+#endif
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	NGmemcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %i0=dst, %i1=src, %i2=len */
+	PREAMBLE
+	save		%sp, -SAVE_AMOUNT, %sp
+	srlx		%i2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	mov		%i0, %o0
+	cmp		%i2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %i1, %i3
+	cmp		%i2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%i3, %i2, %i3
+
+	/* 2 blocks (128 bytes) is the minimum we can do the block
+	 * copy with.  We need to ensure that we'll iterate at least
+	 * once in the block copy loop.  At worst we'll need to align
+	 * the destination to a 64-byte boundary which can chew up
+	 * to (64 - 1) bytes from the length before we perform the
+	 * block copy loop.
+	 */
+	cmp		%i2, (2 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%i3, 0x7, %g0
+
+	/* %o0:	dst
+	 * %i1:	src
+	 * %i2:	len  (known to be >= 128)
+	 *
+	 * The block copy loops will use %i4/%i5,%g2/%g3 as
+	 * temporaries while copying the data.
+	 */
+
+	LOAD(prefetch, %i1, #one_read)
+	wr		%g0, STORE_ASI, %asi
+
+	/* Align destination on 64-byte boundary.  */
+	andcc		%o0, (64 - 1), %i4
+	be,pt		%XCC, 2f
+	 sub		%i4, 64, %i4
+	sub		%g0, %i4, %i4	! bytes to align dst
+	sub		%i2, %i4, %i2
+1:	subcc		%i4, 1, %i4
+	EX_LD(LOAD(ldub, %i1, %g1))
+	EX_ST(STORE(stb, %g1, %o0))
+	add		%i1, 1, %i1
+	bne,pt		%XCC, 1b
+	add		%o0, 1, %o0
+
+	/* If the source is on a 16-byte boundary we can do
+	 * the direct block copy loop.  If it is 8-byte aligned
+	 * we can do the 16-byte loads offset by -8 bytes and the
+	 * init stores offset by one register.
+	 *
+	 * If the source is not even 8-byte aligned, we need to do
+	 * shifting and masking (basically integer faligndata).
+	 *
+	 * The careful bit with init stores is that if we store
+	 * to any part of the cache line we have to store the whole
+	 * cacheline else we can end up with corrupt L2 cache line
+	 * contents.  Since the loop works on 64-bytes of 64-byte
+	 * aligned store data at a time, this is easy to ensure.
+	 */
+2:
+	andcc		%i1, (16 - 1), %i4
+	andn		%i2, (64 - 1), %g1	! block copy loop iterator
+	be,pt		%XCC, 50f
+	 sub		%i2, %g1, %i2		! final sub-block copy bytes
+
+	cmp		%i4, 8
+	be,pt		%XCC, 10f
+	 sub		%i1, %i4, %i1
+
+	/* Neither 8-byte nor 16-byte aligned, shift and mask.  */
+	and		%i4, 0x7, GLOBAL_SPARE
+	sll		GLOBAL_SPARE, 3, GLOBAL_SPARE
+	mov		64, %i5
+	EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+	sub		%i5, GLOBAL_SPARE, %i5
+	mov		16, %o4
+	mov		32, %o5
+	mov		48, %o7
+	mov		64, %i3
+
+	bg,pn	   	%XCC, 9f
+	 nop
+
+#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
+	sllx		WORD1, POST_SHIFT, WORD1; \
+	srlx		WORD2, PRE_SHIFT, TMP; \
+	sllx		WORD2, POST_SHIFT, WORD2; \
+	or		WORD1, TMP, WORD1; \
+	srlx		WORD3, PRE_SHIFT, TMP; \
+	or		WORD2, TMP, WORD2;
+
+8:	EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+	MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+	LOAD(prefetch, %i1 + %i3, #one_read)
+
+	EX_ST(STORE_INIT(%g2, %o0 + 0x00))
+	EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+
+	EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+	MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%g2, %o0 + 0x20))
+	EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+
+	EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+	add		%i1, 64, %i1
+	MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+
+	subcc		%g1, 64, %g1
+	bne,pt		%XCC, 8b
+	 add		%o0, 64, %o0
+
+	ba,pt		%XCC, 60f
+	 add		%i1, %i4, %i1
+
+9:	EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+	MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+	LOAD(prefetch, %i1 + %i3, #one_read)
+
+	EX_ST(STORE_INIT(%g3, %o0 + 0x00))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+
+	EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+	MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o3, %o0 + 0x10))
+	EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%g3, %o0 + 0x20))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+
+	EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+	add		%i1, 64, %i1
+	MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o3, %o0 + 0x30))
+	EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+
+	subcc		%g1, 64, %g1
+	bne,pt		%XCC, 9b
+	 add		%o0, 64, %o0
+
+	ba,pt		%XCC, 60f
+	 add		%i1, %i4, %i1
+
+10:	/* Destination is 64-byte aligned, source was only 8-byte
+	 * aligned but it has been subtracted by 8 and we perform
+	 * one twin load ahead, then add 8 back into source when
+	 * we finish the loop.
+	 */
+	EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+	mov	16, %o7
+	mov	32, %g2
+	mov	48, %g3
+	mov	64, %o1
+1:	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	LOAD(prefetch, %i1 + %o1, #one_read)
+	EX_ST(STORE_INIT(%o5, %o0 + 0x00))	! initializes cache line
+	EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+	EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o4, %o0 + 0x18))
+	EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+	EX_ST(STORE_INIT(%o5, %o0 + 0x20))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+	EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+	add		%i1, 64, %i1
+	EX_ST(STORE_INIT(%o3, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o4, %o0 + 0x38))
+	subcc		%g1, 64, %g1
+	bne,pt		%XCC, 1b
+	 add		%o0, 64, %o0
+
+	ba,pt		%XCC, 60f
+	 add		%i1, 0x8, %i1
+
+50:	/* Destination is 64-byte aligned, and source is 16-byte
+	 * aligned.
+	 */
+	mov	16, %o7
+	mov	32, %g2
+	mov	48, %g3
+	mov	64, %o1
+1:	EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	LOAD(prefetch, %i1 + %o1, #one_read)
+	EX_ST(STORE_INIT(%o4, %o0 + 0x00))	! initializes cache line
+	EX_ST(STORE_INIT(%o5, %o0 + 0x08))
+	EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+	EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+	add	%i1, 64, %i1
+	EX_ST(STORE_INIT(%o4, %o0 + 0x20))
+	EX_ST(STORE_INIT(%o5, %o0 + 0x28))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+	subcc	%g1, 64, %g1
+	bne,pt	%XCC, 1b
+	 add	%o0, 64, %o0
+	/* fall through */
+
+60:	
+	membar		#Sync
+
+	/* %i2 contains any final bytes still needed to be copied
+	 * over. If anything is left, we copy it one byte at a time.
+	 */
+	RESTORE_ASI(%i3)
+	brz,pt		%i2, 85f
+	 sub		%o0, %i1, %i3
+	ba,a,pt		%XCC, 90f
+
+	.align		64
+70: /* 16 < len <= 64 */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %i1, %i3
+
+72:
+	andn		%i2, 0xf, %i4
+	and		%i2, 0xf, %i2
+1:	subcc		%i4, 0x10, %i4
+	EX_LD(LOAD(ldx, %i1, %o4))
+	add		%i1, 0x08, %i1
+	EX_LD(LOAD(ldx, %i1, %g1))
+	sub		%i1, 0x08, %i1
+	EX_ST(STORE(stx, %o4, %i1 + %i3))
+	add		%i1, 0x8, %i1
+	EX_ST(STORE(stx, %g1, %i1 + %i3))
+	bgu,pt		%XCC, 1b
+	 add		%i1, 0x8, %i1
+73:	andcc		%i2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%i2, 0x8, %i2
+	EX_LD(LOAD(ldx, %i1, %o4))
+	EX_ST(STORE(stx, %o4, %i1 + %i3))
+	add		%i1, 0x8, %i1
+1:	andcc		%i2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%i2, 0x4, %i2
+	EX_LD(LOAD(lduw, %i1, %i5))
+	EX_ST(STORE(stw, %i5, %i1 + %i3))
+	add		%i1, 0x4, %i1
+1:	cmp		%i2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:
+	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%i2, %g1, %i2
+
+1:	subcc		%g1, 1, %g1
+	EX_LD(LOAD(ldub, %i1, %i5))
+	EX_ST(STORE(stb, %i5, %i1 + %i3))
+	bgu,pt		%icc, 1b
+	 add		%i1, 1, %i1
+
+2:	add		%i1, %i3, %o0
+	andcc		%i1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%i2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %i3
+	andn		%i1, 0x7, %i1
+	EX_LD(LOAD(ldx, %i1, %g2))
+	sub		%i3, %g1, %i3
+	andn		%i2, 0x7, %i4
+	sllx		%g2, %g1, %g2
+1:	add		%i1, 0x8, %i1
+	EX_LD(LOAD(ldx, %i1, %g3))
+	subcc		%i4, 0x8, %i4
+	srlx		%g3, %i3, %i5
+	or		%i5, %g2, %i5
+	EX_ST(STORE(stx, %i5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%i2, 0x7, %i2
+	be,pn		%icc, 85f
+	 add		%i1, %g1, %i1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %i1, %i3
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%i3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %i1, %i3
+
+1:
+	subcc		%i2, 4, %i2
+	EX_LD(LOAD(lduw, %i1, %g1))
+	EX_ST(STORE(stw, %g1, %i1 + %i3))
+	bgu,pt		%XCC, 1b
+	 add		%i1, 4, %i1
+
+85:	ret
+	 restore	EX_RETVAL(%i0), %g0, %o0
+
+	.align		32
+90:
+	subcc		%i2, 1, %i2
+	EX_LD(LOAD(ldub, %i1, %g1))
+	EX_ST(STORE(stb, %g1, %i1 + %i3))
+	bgu,pt		%XCC, 90b
+	 add		%i1, 1, %i1
+	ret
+	 restore	EX_RETVAL(%i0), %g0, %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NGpage.S b/arch/sparc/lib/NGpage.S
new file mode 100644
index 000000000000..428920de05ba
--- /dev/null
+++ b/arch/sparc/lib/NGpage.S
@@ -0,0 +1,99 @@
+/* NGpage.S: Niagara optimize clear and copy page.
+ *
+ * Copyright (C) 2006 (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+
+	.text
+	.align	32
+
+	/* This is heavily simplified from the sun4u variants
+	 * because Niagara does not have any D-cache aliasing issues
+	 * and also we don't need to use the FPU in order to implement
+	 * an optimal page copy/clear.
+	 */
+
+NGcopy_user_page:	/* %o0=dest, %o1=src, %o2=vaddr */
+	prefetch	[%o1 + 0x00], #one_read
+	mov		8, %g1
+	mov		16, %g2
+	mov		24, %g3
+	set		PAGE_SIZE, %g7
+
+1:	ldda		[%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
+	ldda		[%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
+	prefetch	[%o1 + 0x40], #one_read
+	add		%o1, 32, %o1
+	stxa		%o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
+	ldda		[%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2
+	stxa		%o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
+	ldda		[%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4
+	add		%o1, 32, %o1
+	add		%o0, 32, %o0
+	stxa		%o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
+	subcc		%g7, 64, %g7
+	bne,pt		%xcc, 1b
+	 add		%o0, 32, %o0
+	membar		#Sync
+	retl
+	 nop
+
+	.globl		NGclear_page, NGclear_user_page
+NGclear_page:		/* %o0=dest */
+NGclear_user_page:	/* %o0=dest, %o1=vaddr */
+	mov		8, %g1
+	mov		16, %g2
+	mov		24, %g3
+	set		PAGE_SIZE, %g7
+
+1:	stxa		%g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
+	add		%o0, 32, %o0
+	stxa		%g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P
+	stxa		%g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P
+	subcc		%g7, 64, %g7
+	bne,pt		%xcc, 1b
+	 add		%o0, 32, %o0
+	membar		#Sync
+	retl
+	 nop
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define NG_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	niagara_patch_pageops
+	.type	niagara_patch_pageops,#function
+niagara_patch_pageops:
+	NG_DO_PATCH(copy_user_page, NGcopy_user_page)
+	NG_DO_PATCH(_clear_page, NGclear_page)
+	NG_DO_PATCH(clear_user_page, NGclear_user_page)
+	retl
+	 nop
+	.size	niagara_patch_pageops,.-niagara_patch_pageops
diff --git a/arch/sparc/lib/NGpatch.S b/arch/sparc/lib/NGpatch.S
new file mode 100644
index 000000000000..3b0674fc3366
--- /dev/null
+++ b/arch/sparc/lib/NGpatch.S
@@ -0,0 +1,33 @@
+/* NGpatch.S: Patch Ultra-I routines with Niagara variant.
+ *
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define NG_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	niagara_patch_copyops
+	.type	niagara_patch_copyops,#function
+niagara_patch_copyops:
+	NG_DO_PATCH(memcpy, NGmemcpy)
+	NG_DO_PATCH(___copy_from_user, NGcopy_from_user)
+	NG_DO_PATCH(___copy_to_user, NGcopy_to_user)
+	retl
+	 nop
+	.size	niagara_patch_copyops,.-niagara_patch_copyops
diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c
new file mode 100644
index 000000000000..46053e6ddd7b
--- /dev/null
+++ b/arch/sparc/lib/PeeCeeI.c
@@ -0,0 +1,203 @@
+/*
+ * PeeCeeI.c: The emerging standard...
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+void outsb(unsigned long __addr, const void *src, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+	const u8 *p = src;
+
+	while (count--)
+		outb(*p++, addr);
+}
+
+void outsw(unsigned long __addr, const void *src, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+
+	while (count--) {
+		__raw_writew(*(u16 *)src, addr);
+		src += sizeof(u16);
+	}
+}
+
+void outsl(unsigned long __addr, const void *src, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+	u32 l, l2;
+
+	if (!count)
+		return;
+
+	switch (((unsigned long)src) & 0x3) {
+	case 0x0:
+		/* src is naturally aligned */
+		while (count--) {
+			__raw_writel(*(u32 *)src, addr);
+			src += sizeof(u32);
+		}
+		break;
+	case 0x2:
+		/* 2-byte alignment */
+		while (count--) {
+			l = (*(u16 *)src) << 16;
+			l |= *(u16 *)(src + sizeof(u16));
+			__raw_writel(l, addr);
+			src += sizeof(u32);
+		}
+		break;
+	case 0x1:
+		/* Hold three bytes in l each time, grab a byte from l2 */
+		l = (*(u8 *)src) << 24;
+		l |= (*(u16 *)(src + sizeof(u8))) << 8;
+		src += sizeof(u8) + sizeof(u16);
+		while (count--) {
+			l2 = *(u32 *)src;
+			l |= (l2 >> 24);
+			__raw_writel(l, addr);
+			l = l2 << 8;
+			src += sizeof(u32);
+		}
+		break;
+	case 0x3:
+		/* Hold a byte in l each time, grab 3 bytes from l2 */
+		l = (*(u8 *)src) << 24;
+		src += sizeof(u8);
+		while (count--) {
+			l2 = *(u32 *)src;
+			l |= (l2 >> 8);
+			__raw_writel(l, addr);
+			l = l2 << 24;
+			src += sizeof(u32);
+		}
+		break;
+	}
+}
+
+void insb(unsigned long __addr, void *dst, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+
+	if (count) {
+		u32 *pi;
+		u8 *pb = dst;
+
+		while ((((unsigned long)pb) & 0x3) && count--)
+			*pb++ = inb(addr);
+		pi = (u32 *)pb;
+		while (count >= 4) {
+			u32 w;
+
+			w  = (inb(addr) << 24);
+			w |= (inb(addr) << 16);
+			w |= (inb(addr) << 8);
+			w |= (inb(addr) << 0);
+			*pi++ = w;
+			count -= 4;
+		}
+		pb = (u8 *)pi;
+		while (count--)
+			*pb++ = inb(addr);
+	}
+}
+
+void insw(unsigned long __addr, void *dst, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+
+	if (count) {
+		u16 *ps = dst;
+		u32 *pi;
+
+		if (((unsigned long)ps) & 0x2) {
+			*ps++ = le16_to_cpu(inw(addr));
+			count--;
+		}
+		pi = (u32 *)ps;
+		while (count >= 2) {
+			u32 w;
+
+			w  = (le16_to_cpu(inw(addr)) << 16);
+			w |= (le16_to_cpu(inw(addr)) << 0);
+			*pi++ = w;
+			count -= 2;
+		}
+		ps = (u16 *)pi;
+		if (count)
+			*ps = le16_to_cpu(inw(addr));
+	}
+}
+
+void insl(unsigned long __addr, void *dst, unsigned long count)
+{
+	void __iomem *addr = (void __iomem *) __addr;
+
+	if (count) {
+		if ((((unsigned long)dst) & 0x3) == 0) {
+			u32 *pi = dst;
+			while (count--)
+				*pi++ = le32_to_cpu(inl(addr));
+		} else {
+			u32 l = 0, l2, *pi;
+			u16 *ps;
+			u8 *pb;
+
+			switch (((unsigned long)dst) & 3) {
+			case 0x2:
+				ps = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*ps++ = l;
+				pi = (u32 *)ps;
+				while (count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 16) | (l2 >> 16);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps = l;
+				break;
+
+			case 0x1:
+				pb = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				ps = (u16 *)pb;
+				*ps++ = ((l >> 8) & 0xffff);
+				pi = (u32 *)ps;
+				while (count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 24) | (l2 >> 8);
+					l = l2;
+				}
+				pb = (u8 *)pi;
+				*pb = l;
+				break;
+
+			case 0x3:
+				pb = (u8 *)dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				pi = (u32 *)pb;
+				while (count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 8) | (l2 >> 24);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps++ = ((l >> 8) & 0xffff);
+				pb = (u8 *)ps;
+				*pb = l;
+				break;
+			}
+		}
+	}
+}
+
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
new file mode 100644
index 000000000000..3192b0bf4fab
--- /dev/null
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -0,0 +1,33 @@
+/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_AIUS, dest
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
new file mode 100644
index 000000000000..d1210ffb0b82
--- /dev/null
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -0,0 +1,33 @@
+/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
new file mode 100644
index 000000000000..bafd2fc07acb
--- /dev/null
+++ b/arch/sparc/lib/U1memcpy.S
@@ -0,0 +1,563 @@
+/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	g7
+#else
+#define GLOBAL_SPARE	g5
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef LOAD_BLK
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9)		\
+	faligndata		%f1, %f2, %f48;			\
+	faligndata		%f2, %f3, %f50;			\
+	faligndata		%f3, %f4, %f52;			\
+	faligndata		%f4, %f5, %f54;			\
+	faligndata		%f5, %f6, %f56;			\
+	faligndata		%f6, %f7, %f58;			\
+	faligndata		%f7, %f8, %f60;			\
+	faligndata		%f8, %f9, %f62;
+
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt)	\
+	EX_LD(LOAD_BLK(%src, %fdest));				\
+	EX_ST(STORE_BLK(%fsrc, %dest));				\
+	add			%src, 0x40, %src;		\
+	subcc			%len, 0x40, %len;		\
+	be,pn			%xcc, jmptgt;			\
+	 add			%dest, 0x40, %dest;		\
+
+#define LOOP_CHUNK1(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f0,  f48, len, branch_dest)
+#define LOOP_CHUNK2(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
+#define LOOP_CHUNK3(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+
+#define DO_SYNC			membar	#Sync;
+#define STORE_SYNC(dest, fsrc)				\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;	\
+	DO_SYNC
+
+#define STORE_JUMP(dest, fsrc, target)			\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;	\
+	ba,pt			%xcc, target;		\
+	 nop;
+
+#define FINISH_VISCHUNK(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;\
+	bl,pn			%xcc, 95f;	\
+	 faligndata		%f0, %f1, %f48;	\
+	EX_ST(STORE(std, %f48, %dest));		\
+	add			%dest, 8, %dest;
+
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;	\
+	bl,pn			%xcc, 95f;		\
+	 fsrc1			%f0, %f1;
+
+#define UNEVEN_VISCHUNK(dest, f0, f1, left)		\
+	UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	ba,a,pt			%xcc, 93f;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl		FUNC_NAME
+	.type		FUNC_NAME,#function
+FUNC_NAME:		/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (5 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  */
+	VISEntry
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, %GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, %GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+	/* Destination is 64-byte aligned.  */
+3:	
+	membar		  #LoadStore | #StoreStore | #StoreLoad
+
+	subcc		%o2, 0x40, %GLOBAL_SPARE
+	add		%o1, %g1, %g1
+	andncc		%GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
+	srl		%g1, 3, %g2
+	sub		%o2, %GLOBAL_SPARE, %g3
+	andn		%o1, (0x40 - 1), %o1
+	and		%g2, 7, %g2
+	andncc		%g3, 0x7, %g3
+	fmovd		%f0, %f2
+	sub		%g3, 0x8, %g3
+	sub		%o2, %GLOBAL_SPARE, %o2
+
+	add		%g1, %GLOBAL_SPARE, %g1
+	subcc		%o2, %g3, %o2
+
+	EX_LD(LOAD_BLK(%o1, %f0))
+	add		%o1, 0x40, %o1
+	add		%g1, %g3, %g1
+	EX_LD(LOAD_BLK(%o1, %f16))
+	add		%o1, 0x40, %o1
+	sub		%GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+	EX_LD(LOAD_BLK(%o1, %f32))
+	add		%o1, 0x40, %o1
+
+	/* There are 8 instances of the unrolled loop,
+	 * one for each possible alignment of the
+	 * source buffer.  Each loop instance is 452
+	 * bytes.
+	 */
+	sll		%g2, 3, %o3
+	sub		%o3, %g2, %o3
+	sllx		%o3, 4, %o3
+	add		%o3, %g2, %o3
+	sllx		%o3, 2, %g2
+1:	rd		%pc, %o3
+	add		%o3, %lo(1f - 1b), %o3
+	jmpl		%o3 + %g2, %g0
+	 nop
+
+	.align		64
+1:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f0, %f2, %f48
+1:	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_JUMP(o0, f48, 40f)
+2:	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_JUMP(o0, f48, 48f)
+3:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_JUMP(o0, f48, 56f)
+
+1:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f2, %f4, %f48
+1:	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_JUMP(o0, f48, 41f)
+2:	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_JUMP(o0, f48, 49f)
+3:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_JUMP(o0, f48, 57f)
+
+1:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f4, %f6, %f48
+1:	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_JUMP(o0, f48, 42f)
+2:	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_JUMP(o0, f48, 50f)
+3:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_JUMP(o0, f48, 58f)
+
+1:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f6, %f8, %f48
+1:	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_JUMP(o0, f48, 43f)
+2:	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_JUMP(o0, f48, 51f)
+3:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_JUMP(o0, f48, 59f)
+
+1:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f8, %f10, %f48
+1:	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_JUMP(o0, f48, 44f)
+2:	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_JUMP(o0, f48, 52f)
+3:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_JUMP(o0, f48, 60f)
+
+1:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f10, %f12, %f48
+1:	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_JUMP(o0, f48, 45f)
+2:	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_JUMP(o0, f48, 53f)
+3:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_JUMP(o0, f48, 61f)
+
+1:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f12, %f14, %f48
+1:	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_JUMP(o0, f48, 46f)
+2:	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_JUMP(o0, f48, 54f)
+3:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_JUMP(o0, f48, 62f)
+
+1:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f14, %f16, %f48
+1:	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_JUMP(o0, f48, 47f)
+2:	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_JUMP(o0, f48, 55f)
+3:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_SYNC(o0, f48)
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_JUMP(o0, f48, 63f)
+
+40:	FINISH_VISCHUNK(o0, f0,  f2,  g3)
+41:	FINISH_VISCHUNK(o0, f2,  f4,  g3)
+42:	FINISH_VISCHUNK(o0, f4,  f6,  g3)
+43:	FINISH_VISCHUNK(o0, f6,  f8,  g3)
+44:	FINISH_VISCHUNK(o0, f8,  f10, g3)
+45:	FINISH_VISCHUNK(o0, f10, f12, g3)
+46:	FINISH_VISCHUNK(o0, f12, f14, g3)
+47:	UNEVEN_VISCHUNK(o0, f14, f0,  g3)
+48:	FINISH_VISCHUNK(o0, f16, f18, g3)
+49:	FINISH_VISCHUNK(o0, f18, f20, g3)
+50:	FINISH_VISCHUNK(o0, f20, f22, g3)
+51:	FINISH_VISCHUNK(o0, f22, f24, g3)
+52:	FINISH_VISCHUNK(o0, f24, f26, g3)
+53:	FINISH_VISCHUNK(o0, f26, f28, g3)
+54:	FINISH_VISCHUNK(o0, f28, f30, g3)
+55:	UNEVEN_VISCHUNK(o0, f30, f0,  g3)
+56:	FINISH_VISCHUNK(o0, f32, f34, g3)
+57:	FINISH_VISCHUNK(o0, f34, f36, g3)
+58:	FINISH_VISCHUNK(o0, f36, f38, g3)
+59:	FINISH_VISCHUNK(o0, f38, f40, g3)
+60:	FINISH_VISCHUNK(o0, f40, f42, g3)
+61:	FINISH_VISCHUNK(o0, f42, f44, g3)
+62:	FINISH_VISCHUNK(o0, f44, f46, g3)
+63:	UNEVEN_VISCHUNK_LAST(o0, f46, f0,  g3)
+
+93:	EX_LD(LOAD(ldd, %o1, %f2))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bl,pn		%xcc, 95f
+	 add		%o0, 8, %o0
+	EX_LD(LOAD(ldd, %o1, %f0))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bge,pt		%xcc, 93b
+	 add		%o0, 8, %o0
+
+95:	brz,pt		%o2, 2f
+	 mov		%g1, %o1
+
+1:	EX_LD(LOAD(ldub, %o1, %o3))
+	add		%o1, 1, %o1
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %o3, %o0))
+	bne,pt		%xcc, 1b
+	 add		%o0, 1, %o0
+
+2:	membar		#StoreLoad | #StoreStore
+	VISExit
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		64
+70:	/* 16 < len <= (5 * 64) */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:	andn		%o2, 0xf, %GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	subcc		%GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	sub		%o2, 0x8, %o2
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	sub		%o2, 0x4, %o2
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	EX_LD(LOAD(ldub, %o1, %o5))
+	subcc		%g1, 1, %g1
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, %GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		%GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:	EX_LD(LOAD(lduw, %o1, %g1))
+	subcc		%o2, 4, %o2
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:	EX_LD(LOAD(ldub, %o1, %g1))
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
new file mode 100644
index 000000000000..f5bfc8d9d216
--- /dev/null
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -0,0 +1,22 @@
+/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define EX_RETVAL(x)		0
+
+#include "U3memcpy.S"
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
new file mode 100644
index 000000000000..2334f111bb0c
--- /dev/null
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -0,0 +1,33 @@
+/* U3copy_to_user.S: UltraSparc-III optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U3memcpy.S"
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
new file mode 100644
index 000000000000..7cae9cc6a204
--- /dev/null
+++ b/arch/sparc/lib/U3memcpy.S
@@ -0,0 +1,422 @@
+/* U3memcpy.S: UltraSparc-III optimized memcpy.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	%g7
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	U3memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	/* Special/non-trivial issues of this code:
+	 *
+	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+	 * 2) Only low 32 FPU registers are used so that only the
+	 *    lower half of the FPU register set is dirtied by this
+	 *    code.  This is especially important in the kernel.
+	 * 3) This code never prefetches cachelines past the end
+	 *    of the source buffer.
+	 */
+
+	.text
+	.align		64
+
+	/* The cheetah's flexible spine, oversized liver, enlarged heart,
+	 * slender muscular body, and claws make it the swiftest hunter
+	 * in Africa and the fastest animal on land.  Can reach speeds
+	 * of up to 2.4GB per second.
+	 */
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (3 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
+	 * o5 from here until we hit VISExitHalf.
+	 */
+	VISEntryHalf
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f2
+	EX_ST(STORE(std, %f2, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+3:	LOAD(prefetch, %o1 + 0x000, #one_read)
+	LOAD(prefetch, %o1 + 0x040, #one_read)
+	andn		%o2, (0x40 - 1), GLOBAL_SPARE
+	LOAD(prefetch, %o1 + 0x080, #one_read)
+	LOAD(prefetch, %o1 + 0x0c0, #one_read)
+	LOAD(prefetch, %o1 + 0x100, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
+	LOAD(prefetch, %o1 + 0x140, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	LOAD(prefetch, %o1 + 0x180, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f6, %f8, %f22
+
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f10, %f12, %f26
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+
+	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
+	add		%o1, 0x40, %o1
+	bgu,pt		%XCC, 1f
+	 srl		GLOBAL_SPARE, 6, %o3
+	ba,pt		%xcc, 2f
+	 nop
+
+	.align		64
+1:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	add		%o0, 0x40, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	subcc		%o3, 0x01, %o3
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f10, %f12, %f26
+	bg,pt		%XCC, 1b
+	 add		%o1, 0x40, %o1
+
+	/* Finally we copy the last full 64-byte block. */
+2:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f8, %f10, %f24
+	cmp		%g1, 0
+	be,pt		%XCC, 1f
+	 add		%o0, 0x40, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+1:	faligndata	%f10, %f12, %f26
+	faligndata	%f12, %f14, %f28
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	add		%o0, 0x40, %o0
+	add		%o1, 0x40, %o1
+	membar		#Sync
+
+	/* Now we copy the (len modulo 64) bytes at the end.
+	 * Note how we borrow the %f0 loaded above.
+	 *
+	 * Also notice how this code is careful not to perform a
+	 * load past the end of the src buffer.
+	 */
+	and		%o2, 0x3f, %o2
+	andcc		%o2, 0x38, %g2
+	be,pn		%XCC, 2f
+	 subcc		%g2, 0x8, %g2
+	be,pn		%XCC, 2f
+	 cmp		%g1, 0
+
+	sub		%o2, %g2, %o2
+	be,a,pt		%XCC, 1f
+	 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
+
+1:	EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	be,pn		%XCC, 2f
+	 add		%o0, 0x8, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bne,pn		%XCC, 1b
+	 add		%o0, 0x8, %o0
+
+	/* If anything is left, we copy it one byte at a time.
+	 * Note that %g1 is (src & 0x3) saved above before the
+	 * alignaddr was performed.
+	 */
+2:
+	cmp		%o2, 0
+	add		%o1, %g1, %o1
+	VISExitHalf
+	be,pn		%XCC, 85f
+	 sub		%o0, %o1, %o3
+
+	andcc		%g1, 0x7, %g0
+	bne,pn		%icc, 90f
+	 andcc		%o2, 0x8, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+
+1:	andcc		%o2, 0x2, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o1, %o5))
+	EX_ST(STORE(sth, %o5, %o1 + %o3))
+	add		%o1, 0x2, %o1
+
+1:	andcc		%o2, 0x1, %g0
+	be,pt		%icc, 85f
+	 nop
+	EX_LD(LOAD(ldub, %o1, %o5))
+	ba,pt		%xcc, 85f
+	 EX_ST(STORE(stb, %o5, %o1 + %o3))
+
+	.align		64
+70: /* 16 < len <= 64 */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:
+	andn		%o2, 0xf, GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	subcc		GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x8, %o2
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:
+	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	subcc		%g1, 1, %g1
+	EX_LD(LOAD(ldub, %o1, %o5))
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		GLOBAL_SPARE, 0x8, GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:
+	subcc		%o2, 4, %o2
+	EX_LD(LOAD(lduw, %o1, %g1))
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:
+	subcc		%o2, 1, %o2
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/U3patch.S b/arch/sparc/lib/U3patch.S
new file mode 100644
index 000000000000..ecc302619a6e
--- /dev/null
+++ b/arch/sparc/lib/U3patch.S
@@ -0,0 +1,33 @@
+/* U3patch.S: Patch Ultra-I routines with Ultra-III variant.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define ULTRA3_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	sll	%g1, 11, %g1; \
+	srl	%g1, 11 + 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	cheetah_patch_copyops
+	.type	cheetah_patch_copyops,#function
+cheetah_patch_copyops:
+	ULTRA3_DO_PATCH(memcpy, U3memcpy)
+	ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
+	ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
+	retl
+	 nop
+	.size	cheetah_patch_copyops,.-cheetah_patch_copyops
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
new file mode 100644
index 000000000000..b320ae9e2e2e
--- /dev/null
+++ b/arch/sparc/lib/VISsave.S
@@ -0,0 +1,144 @@
+/*
+ * VISsave.S: Code for saving FPU register state for
+ *            VIS routines. One should not call this directly,
+ *            but use macros provided in <asm/visasm.h>.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+
+	.text
+	.globl		VISenter, VISenterhalf
+
+	/* On entry: %o5=current FPRS value, %g7 is callers address */
+	/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
+
+	/* Nothing special need be done here to handle pre-emption, this
+	 * FPU save/restore mechanism is already preemption safe.
+	 */
+
+	.align		32
+VISenter:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+9:	jmpl		%g7 + %g0, %g0
+	 nop
+1:	bne,pn		%icc, 2f
+
+	 srl		%g1, 1, %g1
+vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+	stx		%fsr, [%g6 + TI_XFSR]
+	or		%g3, %o5, %g3
+	stb		%g3, [%g6 + TI_FPSAVED]
+	rd		%gsr, %g3
+	clr		%g1
+	ba,pt		%xcc, 3f
+
+	 stx		%g3, [%g6 + TI_GSR]
+2:	add		%g6, %g1, %g3
+	cmp		%o5, FPRS_DU
+	be,pn		%icc, 6f
+	 sll		%g1, 3, %g1
+	stb		%o5, [%g3 + TI_FPSAVED]
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL|FPRS_DU, %g0
+	be,pn		%icc, 9b
+	 add		%g6, TI_FPREGS, %g2
+	andcc		%o5, FPRS_DL, %g0
+
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS+0x40, %g3
+	membar		#Sync
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+	andcc		%o5, FPRS_DU, %g0
+	be,pn		%icc, 5f
+4:	 add		%g1, 128, %g1
+	membar		#Sync
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+5:	membar		#Sync
+	ba,pt		%xcc, 80f
+	 nop
+
+	.align		32
+80:	jmpl		%g7 + %g0, %g0
+	 nop
+
+6:	ldub		[%g3 + TI_FPSAVED], %o5
+	or		%o5, FPRS_DU, %o5
+	add		%g6, TI_FPREGS+0x80, %g2
+	stb		%o5, [%g3 + TI_FPSAVED]
+
+	sll		%g1, 5, %g1
+	add		%g6, TI_FPREGS+0xc0, %g3
+	wr		%g0, FPRS_FEF, %fprs
+	membar		#Sync
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+	ba,pt		%xcc, 80f
+	 nop
+
+	.align		32
+80:	jmpl		%g7 + %g0, %g0
+	 nop
+
+	.align		32
+VISenterhalf:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+	clr		%o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%g0, FPRS_FEF, %fprs
+
+1:	bne,pn		%icc, 2f
+	 srl		%g1, 1, %g1
+	ba,pt		%xcc, vis1
+	 sub		%g7, 8, %g7
+2:	addcc		%g6, %g1, %g3
+	sll		%g1, 3, %g1
+	andn		%o5, FPRS_DU, %g2
+	stb		%g2, [%g3 + TI_FPSAVED]
+
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL, %g0
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS, %g2
+
+	add		%g6, TI_FPREGS+0x40, %g3
+	membar		#Sync
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+	ba,pt		%xcc, 4f
+	 nop
+
+	.align		32
+4:	and		%o5, FPRS_DU, %o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
new file mode 100644
index 000000000000..0268210ca168
--- /dev/null
+++ b/arch/sparc/lib/atomic_64.S
@@ -0,0 +1,138 @@
+/* atomic.S: These things are too big to do inline.
+ *
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/backoff.h>
+
+	.text
+
+	/* Two versions of the atomic routines, one that
+	 * does not return a value and does not perform
+	 * memory barriers, and a second which returns
+	 * a value and does the barriers.
+	 */
+	.globl	atomic_add
+	.type	atomic_add,#function
+atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic_add, .-atomic_add
+
+	.globl	atomic_sub
+	.type	atomic_sub,#function
+atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic_sub, .-atomic_sub
+
+	.globl	atomic_add_ret
+	.type	atomic_add_ret,#function
+atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 add	%g7, %o0, %g7
+	sra	%g7, 0, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic_add_ret, .-atomic_add_ret
+
+	.globl	atomic_sub_ret
+	.type	atomic_sub_ret,#function
+atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 sub	%g7, %o0, %g7
+	sra	%g7, 0, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic_sub_ret, .-atomic_sub_ret
+
+	.globl	atomic64_add
+	.type	atomic64_add,#function
+atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic64_add, .-atomic64_add
+
+	.globl	atomic64_sub
+	.type	atomic64_sub,#function
+atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic64_sub, .-atomic64_sub
+
+	.globl	atomic64_add_ret
+	.type	atomic64_add_ret,#function
+atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 add	%g7, %o0, %g7
+	mov	%g7, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic64_add_ret, .-atomic64_add_ret
+
+	.globl	atomic64_sub_ret
+	.type	atomic64_sub_ret,#function
+atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 sub	%g7, %o0, %g7
+	mov	%g7, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+	.size	atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
new file mode 100644
index 000000000000..2b7228cb8c22
--- /dev/null
+++ b/arch/sparc/lib/bitops.S
@@ -0,0 +1,141 @@
+/* bitops.S: Sparc64 atomic bit operations.
+ *
+ * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/backoff.h>
+
+	.text
+
+	.globl	test_and_set_bit
+	.type	test_and_set_bit,#function
+test_and_set_bit:	/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 and	%g7, %o2, %g2
+	clr	%o0
+	movrne	%g2, 1, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	test_and_set_bit, .-test_and_set_bit
+
+	.globl	test_and_clear_bit
+	.type	test_and_clear_bit,#function
+test_and_clear_bit:	/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 and	%g7, %o2, %g2
+	clr	%o0
+	movrne	%g2, 1, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	test_and_clear_bit, .-test_and_clear_bit
+
+	.globl	test_and_change_bit
+	.type	test_and_change_bit,#function
+test_and_change_bit:	/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 and	%g7, %o2, %g2
+	clr	%o0
+	movrne	%g2, 1, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	test_and_change_bit, .-test_and_change_bit
+
+	.globl	set_bit
+	.type	set_bit,#function
+set_bit:		/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	set_bit, .-set_bit
+
+	.globl	clear_bit
+	.type	clear_bit,#function
+clear_bit:		/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	clear_bit, .-clear_bit
+
+	.globl	change_bit
+	.type	change_bit,#function
+change_bit:		/* %o0=nr, %o1=addr */
+	BACKOFF_SETUP(%o3)
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o3, %o4, 1b)
+	.size	change_bit, .-change_bit
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
new file mode 100644
index 000000000000..c7bbae8c590f
--- /dev/null
+++ b/arch/sparc/lib/bzero.S
@@ -0,0 +1,158 @@
+/* bzero.S: Simple prefetching memset, bzero, and clear_user
+ *          implementations.
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+	.text
+
+	.globl	__memset
+	.type	__memset, #function
+__memset:		/* %o0=buf, %o1=pat, %o2=len */
+
+	.globl	memset
+	.type	memset, #function
+memset:			/* %o0=buf, %o1=pat, %o2=len */
+	and		%o1, 0xff, %o3
+	mov		%o2, %o1
+	sllx		%o3, 8, %g1
+	or		%g1, %o3, %o2
+	sllx		%o2, 16, %g1
+	or		%g1, %o2, %o2
+	sllx		%o2, 32, %g1
+	ba,pt		%xcc, 1f
+	 or		%g1, %o2, %o2
+
+	.globl	__bzero
+	.type	__bzero, #function
+__bzero:		/* %o0=buf, %o1=len */
+	clr		%o2
+1:	mov		%o0, %o3
+	brz,pn		%o1, __bzero_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __bzero_tiny
+	 prefetch	[%o0 + 0x000], #n_writes
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 stb		%o2, [%o0 + 0x00]
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 stw		%o2, [%o0 + 0x00]
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	prefetch	[%o0 + 0x040], #n_writes
+	prefetch	[%o0 + 0x080], #n_writes
+	prefetch	[%o0 + 0x0c0], #n_writes
+	prefetch	[%o0 + 0x100], #n_writes
+	prefetch	[%o0 + 0x140], #n_writes
+4:	prefetch	[%o0 + 0x180], #n_writes
+	stx		%o2, [%o0 + 0x00]
+	stx		%o2, [%o0 + 0x08]
+	stx		%o2, [%o0 + 0x10]
+	stx		%o2, [%o0 + 0x18]
+	stx		%o2, [%o0 + 0x20]
+	stx		%o2, [%o0 + 0x28]
+	stx		%o2, [%o0 + 0x30]
+	stx		%o2, [%o0 + 0x38]
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	stx		%o2, [%o0 + 0x00]
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __bzero_done
+	 nop
+__bzero_tiny:
+1:	stb		%o2, [%o0 + 0x00]
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__bzero_done:
+	retl
+	 mov		%o3, %o0
+	.size		__bzero, .-__bzero
+	.size		__memset, .-__memset
+	.size		memset, .-memset
+
+#define EX_ST(x,y)		\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	%o1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.globl	__clear_user
+	.type	__clear_user, #function
+__clear_user:		/* %o0=buf, %o1=len */
+	brz,pn		%o1, __clear_user_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __clear_user_tiny
+	 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 EX_ST(stwa	%g0, [%o0 + 0x00] %asi)
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	EX_ST(prefetcha	[%o0 + 0x040] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x080] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x0c0] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x100] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x140] %asi, #n_writes)
+4:	EX_ST(prefetcha	[%o0 + 0x180] %asi, #n_writes)
+	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x08] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x10] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x18] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x20] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x28] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x30] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x38] %asi)
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __clear_user_done
+	 nop
+__clear_user_tiny:
+1:	EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__clear_user_done:
+	retl
+	 clr		%o0
+	.size		__clear_user, .-__clear_user
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
new file mode 100644
index 000000000000..1d230f693dc4
--- /dev/null
+++ b/arch/sparc/lib/checksum_64.S
@@ -0,0 +1,173 @@
+/* checksum.S: Sparc V9 optimized checksum code.
+ *
+ *  Copyright(C) 1995 Linus Torvalds
+ *  Copyright(C) 1995 Miguel de Icaza
+ *  Copyright(C) 1996, 2000 David S. Miller
+ *  Copyright(C) 1997 Jakub Jelinek
+ *
+ * derived from:
+ *	Linux/Alpha checksum c-code
+ *      Linux/ix86 inline checksum assembly
+ *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ *	David Mosberger-Tang for optimized reference c-code
+ *	BSD4.4 portable checksum routine
+ */
+
+	.text
+
+csum_partial_fix_alignment:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o4
+	add		%o0, 1, %o0
+	sub		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, csum_partial_post_align
+	 cmp		%o1, 2
+	blu,pn		%icc, csum_partial_end_cruft
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	add		%o0, 2, %o0
+	sub		%o1, 2, %o1
+	ba,pt		%xcc, csum_partial_post_align
+	 add		%o5, %o4, %o4
+
+	.align		32
+	.globl		csum_partial
+csum_partial:		/* %o0=buff, %o1=len, %o2=sum */
+	prefetch	[%o0 + 0x000], #n_reads
+	clr		%o4
+	prefetch	[%o0 + 0x040], #n_reads
+	brz,pn		%o1, csum_partial_finish
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in %g7.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, csum_partial_fix_alignment
+	 andcc		%o0, 0x1, %g7
+
+csum_partial_post_align:
+	prefetch	[%o0 + 0x080], #n_reads
+	andncc		%o1, 0x3f, %o3
+
+	prefetch	[%o0 + 0x0c0], #n_reads
+	sub		%o1, %o3, %o1
+	brz,pn		%o3, 2f
+	 prefetch	[%o0 + 0x100], #n_reads
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	prefetch	[%o0 + 0x140], #n_reads
+1:	lduw		[%o0 + 0x00], %o5
+	lduw		[%o0 + 0x04], %g1
+	lduw		[%o0 + 0x08], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x0c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x10], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x14], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x18], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x1c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x20], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x24], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x28], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x2c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x30], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x34], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x38], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x3c], %g3
+	add		%o4, %g1, %o4
+	prefetch	[%o0 + 0x180], #n_reads
+	add		%o4, %g2, %o4
+	subcc		%o3, 0x40, %o3
+	add		%o0, 0x40, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %g3, %o4
+
+2:	and		%o1, 0x3c, %o3
+	brz,pn		%o3, 2f
+	 sub		%o1, %o3, %o1
+1:	lduw		[%o0 + 0x00], %o5
+	subcc		%o3, 0x4, %o3
+	add		%o0, 0x4, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %o5, %o4
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+csum_partial_end_cruft:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o1, 2
+	blu,pt		%icc, 1f
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	sub		%o1, 2, %o1
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+1:	brz,pt		%o1, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o5
+	sub		%o1, 1, %o1
+	add		%o0, 1, %o0
+	sllx		%o5, 8, %o5
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		%g7, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	addcc		%o2, %o4, %o2
+	addc		%g0, %o2, %o2
+
+csum_partial_finish:
+	retl
+	 srl		%o2, 0, %o0
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
new file mode 100644
index 000000000000..77e531f6c2a7
--- /dev/null
+++ b/arch/sparc/lib/clear_page.S
@@ -0,0 +1,103 @@
+/* clear_page.S: UltraSparc optimized clear page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+	.text
+
+	.globl		_clear_page
+_clear_page:		/* %o0=dest */
+	ba,pt		%xcc, clear_page_common
+	 clr		%o4
+
+	/* This thing is pretty important, it shows up
+	 * on the profiles via do_anonymous_page().
+	 */
+	.align		32
+	.globl		clear_user_page
+clear_user_page:	/* %o0=dest, %o1=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o2
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o4
+
+	sllx		%g2, 32, %g2
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
+
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+	sub		%o0, %g2, %g1		! paddr
+
+	and		%o1, %o4, %o0		! vaddr D-cache alias bit
+
+	or		%g1, %g3, %g1		! TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	add		%o2, 1, %o4
+	add		%o0, %o3, %o0		! TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entry.  */
+	rdpr		%pstate, %o4
+	wrpr		%o4, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	sethi		%hi(KERNBASE), %g1
+	flush		%g1
+	wrpr		%o4, 0x0, %pstate
+
+	mov		1, %o4
+
+clear_page_common:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	fzero		%f0
+	sethi		%hi(PAGE_SIZE/64), %o1
+	mov		%o0, %g1		! remember vaddr for tlbflush
+	fzero		%f2
+	or		%o1, %lo(PAGE_SIZE/64), %o1
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+
+	faddd		%f0, %f2, %f12
+	fmuld		%f0, %f2, %f14
+1:	stda		%f0, [%o0 + %g0] ASI_BLK_P
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 0x40, %o0
+	membar		#Sync
+	VISExitHalf
+
+	brz,pn		%o4, out
+	 nop
+
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+out:	retl
+	 nop
+
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
new file mode 100644
index 000000000000..650af3f21f78
--- /dev/null
+++ b/arch/sparc/lib/copy_in_user.S
@@ -0,0 +1,119 @@
+/* copy_in_user.S: Copy from userspace to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#include <asm/asi.h>
+
+#define XCC xcc
+
+#define EX(x,y)			\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov 1, %o0;		\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align	32
+
+	/* Don't try to get too fancy here, just nice and
+	 * simple.  This is predominantly used for well aligned
+	 * small copies in the compat layer.  It is also used
+	 * to copy register windows around during thread cloning.
+	 */
+
+	.globl		___copy_in_user
+	.type		___copy_in_user,#function
+___copy_in_user:	/* %o0=dst, %o1=src, %o2=len */
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+	rd		%asi, %g1
+	cmp		%g1, ASI_AIUS
+	bne,pn		%icc, memcpy_user_stub
+	 nop
+
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	bleu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	/* 16 < len <= 64 */
+	andcc		%o3, 0x7, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+	andn		%o2, 0x7, %o4
+	and		%o2, 0x7, %o2
+1:	subcc		%o4, 0x8, %o4
+	EX(ldxa [%o1] %asi, %o5)
+	EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX(lduwa [%o1] %asi, %o5)
+	EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+82:
+	subcc		%o2, 4, %o2
+	EX(lduwa [%o1] %asi, %g1)
+	EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 82b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 clr		%o0
+
+	.align	32
+90:
+	subcc		%o2, 1, %o2
+	EX(lduba [%o1] %asi, %g1)
+	EX(stba %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 clr		%o0
+
+	.size		___copy_in_user, .-___copy_in_user
+
+	/* Act like copy_{to,in}_user(), ie. return zero instead
+	 * of original destination pointer.  This is invoked when
+	 * copy_{to,in}_user() finds that %asi is kernel space.
+	 */
+	.globl		memcpy_user_stub
+	.type		memcpy_user_stub,#function
+memcpy_user_stub:
+	save		%sp, -192, %sp
+	mov		%i0, %o0
+	mov		%i1, %o1
+	call		memcpy
+	 mov		%i2, %o2
+	ret
+	 restore	%g0, %g0, %o0
+	.size		memcpy_user_stub, .-memcpy_user_stub
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
new file mode 100644
index 000000000000..b243d3b606ba
--- /dev/null
+++ b/arch/sparc/lib/copy_page.S
@@ -0,0 +1,250 @@
+/* clear_page.S: UltraSparc optimized copy page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+#define	DCACHE_SIZE	(PAGE_SIZE * 2)
+
+#if (PAGE_SHIFT == 13)
+#define PAGE_SIZE_REM	0x80
+#elif (PAGE_SHIFT == 16)
+#define PAGE_SIZE_REM	0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7)	\
+	fmovd	%reg0, %f48; 	fmovd	%reg1, %f50;		\
+	fmovd	%reg2, %f52; 	fmovd	%reg3, %f54;		\
+	fmovd	%reg4, %f56; 	fmovd	%reg5, %f58;		\
+	fmovd	%reg6, %f60; 	fmovd	%reg7, %f62;
+
+	.text
+
+	.align		32
+	.globl		copy_user_page
+	.type		copy_user_page,#function
+copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o4
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o3
+
+	sllx		%g2, 32, %g2
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
+
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
+	sub		%o0, %g2, %g1		! dest paddr
+
+	sub		%o1, %g2, %g2		! src paddr
+
+	and		%o2, %o3, %o0		! vaddr D-cache alias bit
+	or		%g1, %g3, %g1		! dest TTE data
+
+	or		%g2, %g3, %g2		! src TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	sethi		%hi(DCACHE_SIZE), %o1
+	add		%o0, %o3, %o0		! dest TTE vaddr
+
+	add		%o4, 1, %o2
+	add		%o0, %o1, %o1		! src TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entries.  */
+	rdpr		%pstate, %o2
+	wrpr		%o2, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	stxa		%o1, [%g3] ASI_DMMU
+	stxa		%g2, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	wrpr		%o2, 0x0, %pstate
+
+cheetah_copy_page_insn:
+	ba,pt		%xcc, 9f
+	 nop
+
+1:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	sethi		%hi((PAGE_SIZE/64)-2), %o2
+	mov		%o0, %g1
+	prefetch	[%o1 + 0x000], #one_read
+	or		%o2, %lo((PAGE_SIZE/64)-2), %o2
+	prefetch	[%o1 + 0x040], #one_read
+	prefetch	[%o1 + 0x080], #one_read
+	prefetch	[%o1 + 0x0c0], #one_read
+	ldd		[%o1 + 0x000], %f0
+	prefetch	[%o1 + 0x100], #one_read
+	ldd		[%o1 + 0x008], %f2
+	prefetch	[%o1 + 0x140], #one_read
+	ldd		[%o1 + 0x010], %f4
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x018], %f6
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x020], %f8
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x028], %f10
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x030], %f12
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x038], %f14
+	fmovd		%f10, %f26
+	ldd		[%o1 + 0x040], %f0
+1:	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x080], %f0
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f10, %f26
+	subcc		%o2, 1, %o2
+	add		%o0, 0x40, %o0
+	bne,pt		%xcc, 1b
+	 add		%o1, 0x40, %o1
+
+	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	add		%o0, 0x40, %o0
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	fmovd		%f10, %f26
+	fmovd		%f12, %f28
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	membar		#Sync
+	VISExitHalf
+	ba,pt		%xcc, 5f
+	 nop
+
+9:
+	VISEntry
+	ldub		[%g6 + TI_FAULT_CODE], %g3
+	mov		%o0, %g1
+	cmp		%g3, 0
+	rd		%asi, %g3
+	be,a,pt		%icc, 1f
+	 wr		%g0, ASI_BLK_P, %asi
+	wr		%g0, ASI_BLK_COMMIT_P, %asi
+1:	ldda		[%o1] ASI_BLK_P, %f0
+	add		%o1, 0x40, %o1
+	ldda		[%o1] ASI_BLK_P, %f16
+	add		%o1, 0x40, %o1
+	sethi		%hi(PAGE_SIZE), %o2
+1:	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+	ldda		[%o1] ASI_BLK_P, %f16
+	stda		%f48, [%o0] %asi
+	sub		%o2, 0x40, %o2
+	add		%o1, 0x40, %o1
+	cmp		%o2, PAGE_SIZE_REM
+	bne,pt		%xcc, 1b
+	 add		%o0, 0x40, %o0
+#if (PAGE_SHIFT == 16)
+	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	membar		#Sync
+	stda		%f32, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f0, [%o0] %asi
+#else
+	membar		#Sync
+	stda		%f0, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f16, [%o0] %asi
+#endif
+	membar		#Sync
+	wr		%g3, 0x0, %asi
+	VISExit
+
+5:
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	sethi		%hi(DCACHE_SIZE), %g2
+	stxa		%g0, [%g1 + %g2] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	retl
+	 stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	.size		copy_user_page, .-copy_user_page
+
+	.globl		cheetah_patch_copy_page
+cheetah_patch_copy_page:
+	sethi		%hi(0x01000000), %o1	! NOP
+	sethi		%hi(cheetah_copy_page_insn), %o0
+	or		%o0, %lo(cheetah_copy_page_insn), %o0
+	stw		%o1, [%o0]
+	membar		#StoreStore
+	flush		%o0
+	retl
+	 nop
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
new file mode 100644
index 000000000000..e566c770a0f6
--- /dev/null
+++ b/arch/sparc/lib/csum_copy.S
@@ -0,0 +1,309 @@
+/* csum_copy.S: Checksum+copy code for sparc64
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+#ifdef __KERNEL__
+#define GLOBAL_SPARE	%g7
+#else
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	csum_partial_copy_nocheck
+#endif
+
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
+	.text
+
+90:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o4))
+	add		%o0, 1, %o0
+	sub		%o2, 1, %o2
+	EX_ST(STORE(stb, %o4, %o1 + 0x00))
+	add		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, 80f
+	 cmp		%o2, 2
+	blu,pn		%icc, 60f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	add		%o0, 2, %o0
+	sub		%o2, 2, %o2
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 2, %o1
+	ba,pt		%xcc, 80f
+	 add		%o5, %o4, %o4
+
+	.globl		FUNC_NAME
+FUNC_NAME:		/* %o0=src, %o1=dst, %o2=len, %o3=sum */
+	LOAD(prefetch, %o0 + 0x000, #n_reads)
+	xor		%o0, %o1, %g1
+	clr		%o4
+	andcc		%g1, 0x3, %g0
+	bne,pn		%icc, 95f
+	 LOAD(prefetch, %o0 + 0x040, #n_reads)
+	
+	brz,pn		%o2, 70f
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in GLOBAL_SPARE.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, 90b
+	 andcc		%o0, 0x1, GLOBAL_SPARE
+
+80:
+	LOAD(prefetch, %o0 + 0x080, #n_reads)
+	andncc		%o2, 0x3f, %g3
+
+	LOAD(prefetch, %o0 + 0x0c0, #n_reads)
+	sub		%o2, %g3, %o2
+	brz,pn		%g3, 2f
+	 LOAD(prefetch, %o0 + 0x100, #n_reads)
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	ba,pt		%xcc, 1f
+	LOAD(prefetch, %o0 + 0x140, #n_reads)
+
+	.align		32
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	EX_LD(LOAD(lduw, %o0 + 0x04, %g1))
+	EX_LD(LOAD(lduw, %o0 + 0x08, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	EX_LD(LOAD(lduw, %o0 + 0x0c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x04))
+	EX_LD(LOAD(lduw, %o0 + 0x10, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x08))
+	EX_LD(LOAD(lduw, %o0 + 0x14, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x0c))
+	EX_LD(LOAD(lduw, %o0 + 0x18, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x10))
+	EX_LD(LOAD(lduw, %o0 + 0x1c, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x14))
+	EX_LD(LOAD(lduw, %o0 + 0x20, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x18))
+	EX_LD(LOAD(lduw, %o0 + 0x24, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x1c))
+	EX_LD(LOAD(lduw, %o0 + 0x28, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x20))
+	EX_LD(LOAD(lduw, %o0 + 0x2c, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x24))
+	EX_LD(LOAD(lduw, %o0 + 0x30, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x28))
+	EX_LD(LOAD(lduw, %o0 + 0x34, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x2c))
+	EX_LD(LOAD(lduw, %o0 + 0x38, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x30))
+	EX_LD(LOAD(lduw, %o0 + 0x3c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x34))
+	LOAD(prefetch, %o0 + 0x180, #n_reads)
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x38))
+	subcc		%g3, 0x40, %g3
+	add		%o0, 0x40, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x3c))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x40, %o1
+
+2:	and		%o2, 0x3c, %g3
+	brz,pn		%g3, 2f
+	 sub		%o2, %g3, %o2
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	subcc		%g3, 0x4, %g3
+	add		%o0, 0x4, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x4, %o1
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+60:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o2, 2
+	blu,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	sub		%o2, 2, %o2
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 0x2, %o1
+1:	brz,pt		%o2, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o5))
+	sub		%o2, 1, %o2
+	add		%o0, 1, %o0
+	EX_ST(STORE(stb, %o5, %o1 + 0x00))
+	sllx		%o5, 8, %o5
+	add		%o1, 1, %o1
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		GLOBAL_SPARE, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	addcc		%o3, %o4, %o3
+	addc		%g0, %o3, %o3
+
+70:
+	retl
+	 srl		%o3, 0, %o0
+
+95:	mov		0, GLOBAL_SPARE
+	brlez,pn	%o2, 4f
+	 andcc		%o0, 1, %o5		
+	be,a,pt		%icc, 1f
+	 srl		%o2, 1, %g1		
+	sub		%o2, 1, %o2	
+	EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE))
+	add		%o0, 1, %o0	
+	EX_ST(STORE(stb, GLOBAL_SPARE, %o1))
+	srl		%o2, 1, %g1
+	add		%o1, 1, %o1
+1:	brz,a,pn	%g1, 3f
+	 andcc		%o2, 1, %g0
+	andcc		%o0, 2, %g0	
+	be,a,pt		%icc, 1f
+	 srl		%g1, 1, %g1
+	EX_LD(LOAD(lduh, %o0, %o4))
+	sub		%o2, 2, %o2	
+	srl		%o4, 8, %g2
+	sub		%g1, 1, %g1	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o0, 2, %o0	
+	srl		%g1, 1, %g1
+	add		%o1, 2, %o1
+1:	brz,a,pn	%g1, 2f		
+	 andcc		%o2, 2, %g0
+	EX_LD(LOAD(lduw, %o0, %o4))
+5:	srl		%o4, 24, %g2
+	srl		%o4, 16, %g3
+	EX_ST(STORE(stb, %g2, %o1))
+	srl		%o4, 8, %g2
+	EX_ST(STORE(stb, %g3, %o1 + 1))
+	add		%o0, 4, %o0
+	EX_ST(STORE(stb, %g2, %o1 + 2))
+	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 3))
+	addc		GLOBAL_SPARE, %g0, GLOBAL_SPARE
+	add		%o1, 4, %o1
+	subcc		%g1, 1, %g1
+	bne,a,pt	%icc, 5b
+	 EX_LD(LOAD(lduw, %o0, %o4))
+	sll		GLOBAL_SPARE, 16, %g2
+	srl		GLOBAL_SPARE, 16, GLOBAL_SPARE
+	srl		%g2, 16, %g2
+	andcc		%o2, 2, %g0
+	add		%g2, GLOBAL_SPARE, GLOBAL_SPARE 
+2:	be,a,pt		%icc, 3f		
+	 andcc		%o2, 1, %g0
+	EX_LD(LOAD(lduh, %o0, %o4))
+	andcc		%o2, 1, %g0
+	srl		%o4, 8, %g2
+	add		%o0, 2, %o0	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o1, 2, %o1
+3:	be,a,pt		%icc, 1f		
+	 sll		GLOBAL_SPARE, 16, %o4
+	EX_LD(LOAD(ldub, %o0, %g2))
+	sll		%g2, 8, %o4	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	sll		GLOBAL_SPARE, 16, %o4
+1:	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	srl		GLOBAL_SPARE, 16, %o4
+	addc		%g0, %o4, GLOBAL_SPARE
+	brz,pt		%o5, 4f
+	 srl		GLOBAL_SPARE, 8, %o4
+	and		GLOBAL_SPARE, 0xff, %g2
+	and		%o4, 0xff, %o4
+	sll		%g2, 8, %g2
+	or		%g2, %o4, GLOBAL_SPARE
+4:	addcc		%o3, GLOBAL_SPARE, %o3
+	addc		%g0, %o3, %o0
+	retl
+	 srl		%o0, 0, %o0
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/csum_copy_from_user.S b/arch/sparc/lib/csum_copy_from_user.S
new file mode 100644
index 000000000000..a22eddbe5dba
--- /dev/null
+++ b/arch/sparc/lib/csum_copy_from_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_from_user.S: Checksum+copy from userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+
+#include "csum_copy.S"
diff --git a/arch/sparc/lib/csum_copy_to_user.S b/arch/sparc/lib/csum_copy_to_user.S
new file mode 100644
index 000000000000..d5b12f441f02
--- /dev/null
+++ b/arch/sparc/lib/csum_copy_to_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_to_user.S: Checksum+copy to userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table,"a";\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] %asi
+
+#include "csum_copy.S"
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
new file mode 100644
index 000000000000..58ca5b9a8778
--- /dev/null
+++ b/arch/sparc/lib/ipcsum.S
@@ -0,0 +1,34 @@
+	.text
+	.align	32
+	.globl	ip_fast_csum
+	.type	ip_fast_csum,#function
+ip_fast_csum:	/* %o0 = iph, %o1 = ihl */
+	sub	%o1, 4, %g7
+	lduw	[%o0 + 0x00], %o2
+	lduw	[%o0 + 0x04], %g2
+	lduw	[%o0 + 0x08], %g3
+	addcc	%g2, %o2, %o2
+	lduw	[%o0 + 0x0c], %g2
+	addccc	%g3, %o2, %o2
+	lduw	[%o0 + 0x10], %g3
+
+	addccc	%g2, %o2, %o2
+	addc	%o2, %g0, %o2
+1:	addcc	%g3, %o2, %o2
+	add	%o0, 4, %o0
+	addccc	%o2, %g0, %o2
+	subcc	%g7, 1, %g7
+	be,a,pt	%icc, 2f
+	 sll	%o2, 16, %g2
+
+	lduw	[%o0 + 0x10], %g3
+	ba,pt	%xcc, 1b
+	 nop
+2:	addcc	%o2, %g2, %g2
+	srl	%g2, 16, %o2
+	addc	%o2, %g0, %o2
+	xnor	%g0, %o2, %o2
+	set	0xffff, %o1
+	retl
+	 and	%o2, %o1, %o0
+	.size	ip_fast_csum, .-ip_fast_csum
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
new file mode 100644
index 000000000000..7ce9c65f3592
--- /dev/null
+++ b/arch/sparc/lib/mcount.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
+ *
+ * This file implements mcount(), which is used to collect profiling data.
+ * This can also be tweaked for kernel stack overflow detection.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+
+/*
+ * This is the main variant and is called by C code.  GCC's -pg option
+ * automatically instruments every C function with a call to this.
+ */
+
+#ifdef CONFIG_STACK_DEBUG
+
+#define OVSTACKSIZE	4096		/* lets hope this is enough */
+
+	.data
+	.align		8
+panicstring:
+	.asciz		"Stack overflow\n"
+	.align		8
+ovstack:
+	.skip		OVSTACKSIZE
+#endif
+	.text
+	.align		32
+	.globl		_mcount
+	.type		_mcount,#function
+	.globl		mcount
+	.type		mcount,#function
+_mcount:
+mcount:
+#ifdef CONFIG_STACK_DEBUG
+	/*
+	 * Check whether %sp is dangerously low.
+	 */
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	srl		%g1, 1, %g3
+	add		%g3, 1, %g3
+	sllx		%g3, 8, %g3			! each fpregs frame is 256b
+	add		%g3, 192, %g3
+	add		%g6, %g3, %g3			! where does task_struct+frame end?
+	sub		%g3, STACK_BIAS, %g3
+	cmp		%sp, %g3
+	bg,pt		%xcc, 1f
+	 nop
+	lduh		[%g6 + TI_CPU], %g1
+	sethi		%hi(hardirq_stack), %g3
+	or		%g3, %lo(hardirq_stack), %g3
+	sllx		%g1, 3, %g1
+	ldx		[%g3 + %g1], %g7
+	sub		%g7, STACK_BIAS, %g7
+	cmp		%sp, %g7
+	bleu,pt		%xcc, 2f
+	 sethi		%hi(THREAD_SIZE), %g3
+	add		%g7, %g3, %g7
+	cmp		%sp, %g7
+	blu,pn		%xcc, 1f
+2:	 sethi		%hi(softirq_stack), %g3
+	or		%g3, %lo(softirq_stack), %g3
+	ldx		[%g3 + %g1], %g7
+	cmp		%sp, %g7
+	bleu,pt		%xcc, 2f
+	 sethi		%hi(THREAD_SIZE), %g3
+	add		%g7, %g3, %g7
+	cmp		%sp, %g7
+	blu,pn		%xcc, 1f
+	 nop
+	/* If we are already on ovstack, don't hop onto it
+	 * again, we are already trying to output the stack overflow
+	 * message.
+	 */
+	sethi		%hi(ovstack), %g7		! cant move to panic stack fast enough
+	 or		%g7, %lo(ovstack), %g7
+	add		%g7, OVSTACKSIZE, %g3
+	sub		%g3, STACK_BIAS + 192, %g3
+	sub		%g7, STACK_BIAS, %g7
+	cmp		%sp, %g7
+	blu,pn		%xcc, 2f
+	 cmp		%sp, %g3
+	bleu,pn		%xcc, 1f
+	 nop
+2:	mov		%g3, %sp
+	sethi		%hi(panicstring), %g3
+	call		prom_printf
+	 or		%g3, %lo(panicstring), %o0
+	call		prom_halt
+	 nop
+1:
+#endif
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+	mov		%o7, %o0
+	.globl		mcount_call
+mcount_call:
+	call		ftrace_stub
+	 mov		%o0, %o7
+#else
+	sethi		%hi(ftrace_trace_function), %g1
+	sethi		%hi(ftrace_stub), %g2
+	ldx		[%g1 + %lo(ftrace_trace_function)], %g1
+	or		%g2, %lo(ftrace_stub), %g2
+	cmp		%g1, %g2
+	be,pn		%icc, 1f
+	 mov		%i7, %o1
+	jmpl		%g1, %g0
+	 mov		%o7, %o0
+	/* not reached */
+1:
+#endif
+#endif
+	retl
+	 nop
+	.size		_mcount,.-_mcount
+	.size		mcount,.-mcount
+
+#ifdef CONFIG_FUNCTION_TRACER
+	.globl		ftrace_stub
+	.type		ftrace_stub,#function
+ftrace_stub:
+	retl
+	 nop
+	.size		ftrace_stub,.-ftrace_stub
+#ifdef CONFIG_DYNAMIC_FTRACE
+	.globl		ftrace_caller
+	.type		ftrace_caller,#function
+ftrace_caller:
+	mov		%i7, %o1
+	mov		%o7, %o0
+	.globl		ftrace_call
+ftrace_call:
+	call		ftrace_stub
+	 mov		%o0, %o7
+	retl
+	 nop
+	.size		ftrace_caller,.-ftrace_caller
+#endif
+#endif
diff --git a/arch/sparc/lib/memcmp_64.S b/arch/sparc/lib/memcmp_64.S
new file mode 100644
index 000000000000..d3fdaa898566
--- /dev/null
+++ b/arch/sparc/lib/memcmp_64.S
@@ -0,0 +1,28 @@
+/*
+ * Sparc64 optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+	.text
+	.align	32
+	.globl	__memcmp, memcmp
+__memcmp:
+memcmp:
+	cmp	%o2, 0		! IEU1	Group
+loop:	be,pn	%icc, ret_0	! CTI
+	 nop			! IEU0
+	ldub	[%o0], %g7	! LSU	Group
+	ldub	[%o1], %g3	! LSU	Group
+	sub	%o2, 1, %o2	! IEU0
+	add	%o0, 1, %o0	! IEU1
+	add	%o1, 1, %o1	! IEU0	Group
+	subcc	%g7, %g3, %g3	! IEU1	Group
+	be,pt	%icc, loop	! CTI
+	 cmp	%o2, 0		! IEU1	Group
+
+ret_n0:	retl
+	 mov	%g3, %o0
+ret_0:	retl
+	 mov	0, %o0
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
new file mode 100644
index 000000000000..97395802c23c
--- /dev/null
+++ b/arch/sparc/lib/memmove.S
@@ -0,0 +1,31 @@
+/* memmove.S: Simple memmove implementation.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+	.text
+	.align		32
+	.globl		memmove
+	.type		memmove,#function
+memmove:		/* o0=dst o1=src o2=len */
+	mov		%o0, %g1
+	cmp		%o0, %o1
+	bleu,pt		%xcc, memcpy
+	 add		%o1, %o2, %g7
+	cmp		%g7, %o0
+	bleu,pt		%xcc, memcpy
+	 add		%o0, %o2, %o5
+	sub		%g7, 1, %o1
+
+	sub		%o5, 1, %o0
+1:	ldub		[%o1], %g7
+	subcc		%o2, 1, %o2
+	sub		%o1, 1, %o1
+	stb		%g7, [%o0]
+	bne,pt		%icc, 1b
+	 sub		%o0, 1, %o0
+
+	retl
+	 mov		%g1, %o0
+	.size		memmove, .-memmove
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
new file mode 100644
index 000000000000..5686dfa5dc15
--- /dev/null
+++ b/arch/sparc/lib/memscan_64.S
@@ -0,0 +1,129 @@
+/*
+ * memscan.S: Optimized memscan for Sparc64.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ */
+
+#define HI_MAGIC	0x8080808080808080
+#define LO_MAGIC	0x0101010101010101
+#define ASI_PL		0x88
+
+	.text
+	.align	32
+	.globl		__memscan_zero, __memscan_generic
+	.globl		memscan
+
+__memscan_zero:
+	/* %o0 = bufp, %o1 = size */
+	brlez,pn	%o1, szzero
+	 andcc		%o0, 7, %g0
+	be,pt		%icc, we_are_aligned
+	 sethi		%hi(HI_MAGIC), %o4
+	ldub		[%o0], %o5
+1:	subcc		%o1, 1, %o1
+	brz,pn		%o5, 10f
+	 add		%o0, 1, %o0
+
+	be,pn		%xcc, szzero
+	 andcc		%o0, 7, %g0
+	bne,a,pn	%icc, 1b
+	 ldub		[%o0], %o5
+we_are_aligned:
+	ldxa		[%o0] ASI_PL, %o5
+	or		%o4, %lo(HI_MAGIC), %o3
+	sllx		%o3, 32, %o4
+	or		%o4, %o3, %o3
+
+	srlx		%o3, 7, %o2
+msloop:
+	sub		%o1, 8, %o1
+	add		%o0, 8, %o0
+	sub		%o5, %o2, %o4
+	xor		%o4, %o5, %o4
+	andcc		%o4, %o3, %g3
+	bne,pn		%xcc, check_bytes
+	 srlx		%o4, 32, %g3
+
+	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+check_bytes:
+	bne,a,pn	%icc, 2f
+	 andcc		%o5, 0xff, %g0
+	add		%o0, -5, %g2
+	ba,pt		%xcc, 3f
+	 srlx		%o5, 32, %g7
+
+2:	srlx		%o5, 8, %g7
+	be,pn		%icc, 1f
+	 add		%o0, -8, %g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g3, %o3, %g0
+
+	be,a,pn		%icc, 2f
+	 mov		%o0, %g2
+3:	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+2:	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+	inc		%g2
+1:	add		%o0, %o1, %o0
+	cmp		%g2, %o0
+	retl
+
+	 movle		%xcc, %g2, %o0
+10:	retl
+	 sub		%o0, 1, %o0
+szzero:	retl
+	 nop
+
+memscan:
+__memscan_generic:
+	/* %o0 = addr, %o1 = c, %o2 = size */
+	brz,pn		%o2, 3f
+	 add		%o0, %o2, %o3
+	ldub		[%o0], %o5
+	sub		%g0, %o2, %o4
+1:
+	cmp		%o5, %o1
+	be,pn		%icc, 2f
+	 addcc		%o4, 1, %o4
+	bne,a,pt 	%xcc, 1b
+	 ldub		[%o3 + %o4], %o5
+	retl
+	/* The delay slot is the same as the next insn, this is just to make it look more awful */
+2:
+	 add		%o3, %o4, %o0
+	retl
+	 sub		%o0, 1, %o0
+3:
+	retl
+	 nop
diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
new file mode 100644
index 000000000000..91a7d29a79d5
--- /dev/null
+++ b/arch/sparc/lib/rwsem_64.S
@@ -0,0 +1,163 @@
+/* rwsem.S: RW semaphore assembler.
+ *
+ * Written by David S. Miller (davem@redhat.com), 2001.
+ * Derived from asm-i386/rwsem.h
+ */
+
+#include <asm/rwsem-const.h>
+
+	.section	.sched.text, "ax"
+
+	.globl		__down_read
+__down_read:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 add		%g7, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 nop
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_read_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_read, .-__down_read
+
+	.globl		__down_read_trylock
+__down_read_trylock:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 2f
+	 mov		0, %o1
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_read_trylock, .-__down_read_trylock
+
+	.globl		__down_write
+__down_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bne,pn		%icc, 3f
+	 nop
+2:	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_write_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_write, .-__down_write
+
+	.globl		__down_write_trylock
+__down_write_trylock:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	cmp		%g3, 0
+	bne,pn		%icc, 2f
+	 mov		0, %o1
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_write_trylock, .-__down_write_trylock
+
+	.globl		__up_read
+__up_read:
+1:
+	lduw		[%o0], %g1
+	sub		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 nop
+2:	retl
+	 nop
+3:	sethi		%hi(RWSEM_ACTIVE_MASK), %g1
+	sub		%g7, 1, %g7
+	or		%g1, %lo(RWSEM_ACTIVE_MASK), %g1
+	andcc		%g7, %g1, %g0
+	bne,pn		%icc, 2b
+	 nop
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_read, .-__up_read
+
+	.globl		__up_write
+__up_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 nop
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_write, .-__up_write
+
+	.globl		__downgrade_write
+__downgrade_write:
+	sethi		%hi(RWSEM_WAITING_BIAS), %g1
+	or		%g1, %lo(RWSEM_WAITING_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 nop
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_downgrade_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__downgrade_write, .-__downgrade_write
diff --git a/arch/sparc/lib/strlen_64.S b/arch/sparc/lib/strlen_64.S
new file mode 100644
index 000000000000..e9ba1920d818
--- /dev/null
+++ b/arch/sparc/lib/strlen_64.S
@@ -0,0 +1,80 @@
+/* strlen.S: Sparc64 optimized strlen code
+ * Hand optimized from GNU libc's strlen
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align	32
+	.globl	strlen
+	.type	strlen,#function
+strlen:
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 11f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+	ldub	[%o0], %o5
+	brz,pn	%o5, 12f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 5f
+	 sethi	%hi(LO_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 13f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 8f
+	 or	%o4, %lo(LO_MAGIC), %o2
+9:
+	or	%o4, %lo(HI_MAGIC), %o3
+4:
+	sethi	%hi(LO_MAGIC), %o4
+5:
+	or	%o4, %lo(LO_MAGIC), %o2
+8:
+	ld	[%o0], %o5
+2:
+	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	be,pt	%icc, 8b
+	 add	%o0, 4, %o0
+
+	/* Check every byte. */
+	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -4, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,a,pt %icc, 2b
+	 ld	[%o0], %o5
+	add	%o4, 1, %o4
+1:
+	retl
+	 sub	%o4, %o1, %o0
+11:
+	retl
+	 mov	0, %o0
+12:
+	retl
+	 mov	1, %o0
+13:
+	retl
+	 mov	2, %o0
+
+	.size	strlen, .-strlen
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
new file mode 100644
index 000000000000..114ed111e251
--- /dev/null
+++ b/arch/sparc/lib/strlen_user_64.S
@@ -0,0 +1,95 @@
+/* strlen_user.S: Sparc64 optimized strlen_user code
+ *
+ * Return length of string in userspace including terminating 0
+ * or 0 for error
+ *
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align 4
+	.global __strlen_user, __strnlen_user
+__strlen_user:
+	sethi	%hi(32768), %o1
+__strnlen_user:	
+	mov	%o1, %g1
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+10:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 21f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+11:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 22f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 13f
+	 srl	%o3, 7, %o2
+12:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 23f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 2f
+15:	 lda	[%o0] %asi, %o5
+9:	or	%o4, %lo(HI_MAGIC), %o3
+4:	srl	%o3, 7, %o2
+13:	lda	[%o0] %asi, %o5
+2:	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	bne,pn	%icc, 82f
+	 add	%o0, 4, %o0
+	sub	%o0, %o1, %g2
+81:	cmp	%g2, %g1
+	blu,pt	%icc, 13b
+	 mov	%o0, %o4
+	ba,a,pt	%xcc, 1f
+
+	/* Check every byte. */
+82:	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -3, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,pt	%icc, 81b
+	 sub	%o0, %o1, %g2
+	add	%o4, 1, %o4
+1:	retl
+	 sub	%o4, %o1, %o0
+21:	retl
+	 mov	1, %o0
+22:	retl
+	 mov	2, %o0
+23:	retl
+	 mov	3, %o0
+
+        .section .fixup,#alloc,#execinstr
+        .align  4
+30:
+        retl
+         clr    %o0
+
+	.section __ex_table,"a"
+	.align	4
+
+	.word	10b, 30b
+	.word	11b, 30b
+	.word	12b, 30b
+	.word	15b, 30b
+	.word	13b, 30b
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
new file mode 100644
index 000000000000..980e83751556
--- /dev/null
+++ b/arch/sparc/lib/strncmp_64.S
@@ -0,0 +1,32 @@
+/*
+ * Sparc64 optimized strncmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+	.text
+	.align	32
+	.globl	strncmp
+	.type	strncmp,#function
+strncmp:
+	brlez,pn %o2, 3f
+	 lduba	[%o0] (ASI_PNF), %o3
+1:
+	add	%o0, 1, %o0
+	ldub	[%o1], %o4
+	brz,pn	%o3, 2f
+	 add	%o1, 1, %o1
+	cmp	%o3, %o4
+	bne,pn	%icc, 2f
+	 subcc	%o2, 1, %o2
+	bne,a,pt %xcc, 1b
+	 ldub	[%o0], %o3
+2:
+	retl
+	 sub	%o3, %o4, %o0
+3:
+	retl
+	 clr	%o0
+	.size	strncmp, .-strncmp
diff --git a/arch/sparc/lib/strncpy_from_user_64.S b/arch/sparc/lib/strncpy_from_user_64.S
new file mode 100644
index 000000000000..511c8f136f95
--- /dev/null
+++ b/arch/sparc/lib/strncpy_from_user_64.S
@@ -0,0 +1,135 @@
+/*
+ * strncpy_from_user.S: Sparc64 strncpy from userspace.
+ *
+ *  Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/errno.h>
+
+	.data
+	.align	8
+0:	.xword	0x0101010101010101
+
+	.text
+	.align	32
+
+	/* Must return:
+	 *
+	 * -EFAULT		for an exception
+	 * count		if we hit the buffer limit
+	 * bytes copied		if we hit a null byte
+	 * (without the null byte)
+	 *
+	 * This implementation assumes:
+	 * %o1 is 8 aligned => !(%o2 & 7)
+	 * %o0 is 8 aligned (if not, it will be slooooow, but will work)
+	 *
+	 * This is optimized for the common case:
+	 * in my stats, 90% of src are 8 aligned (even on sparc32)
+	 * and average length is 18 or so.
+	 */
+
+	.globl	__strncpy_from_user
+	.type	__strncpy_from_user,#function
+__strncpy_from_user:
+	/* %o0=dest, %o1=src, %o2=count */
+	andcc	%o1, 7, %g0		! IEU1	Group
+	bne,pn	%icc, 30f		! CTI
+	 add	%o0, %o2, %g3		! IEU0
+60:	ldxa	[%o1] %asi, %g1		! Load	Group
+	brlez,pn %o2, 10f		! CTI
+	 mov	%o0, %o3		! IEU0
+50:	sethi	%hi(0b), %o4		! IEU0	Group
+	ldx	[%o4 + %lo(0b)], %o4	! Load
+	sllx	%o4, 7, %o5		! IEU1	Group
+1:	sub	%g1, %o4, %g2		! IEU0	Group
+	stx	%g1, [%o0]		! Store
+	add	%o0, 8, %o0		! IEU1
+	andcc	%g2, %o5, %g0		! IEU1	Group
+	bne,pn	%xcc, 5f		! CTI
+	 add	%o1, 8, %o1		! IEU0
+	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt %xcc, 1b		! CTI
+61:	 ldxa	[%o1] %asi, %g1		! Load
+10:	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+5:	srlx	%g2, 32, %g7		! IEU0	Group
+	sethi	%hi(0xff00), %o4	! IEU1
+	andcc	%g7, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 or	%o4, %lo(0xff00), %o4	! IEU0
+	srlx	%g1, 48, %g7		! IEU0	Group
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 50f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 51f		! CTI
+	 srlx	%g1, 32, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 52f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 53f		! CTI
+2:	 andcc	%g2, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 srl	%g1, 16, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 54f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 55f		! CTI
+	 andcc	%g1, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 56f		! CTI
+	 andcc	%g1, 0xff, %g0		! IEU1	Group
+	be,a,pn	%icc, 57f		! CTI
+	 sub	%o0, %o3, %o0		! IEU0
+2:	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt	%xcc, 50b		! CTI
+62:	 ldxa	[%o1] %asi, %g1		! Load
+	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+50:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 8, %o0
+51:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 7, %o0
+52:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 6, %o0
+53:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 5, %o0
+54:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 4, %o0
+55:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 3, %o0
+56:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 2, %o0
+57:	retl
+	 sub	%o0, 1, %o0
+30:	brlez,pn %o2, 3f
+	 sub	%g0, %o2, %o3
+	add	%o0, %o2, %o0
+63:	lduba	[%o1] %asi, %o4
+1:	add	%o1, 1, %o1
+	brz,pn	%o4, 2f
+	 stb	%o4, [%o0 + %o3]
+	addcc	%o3, 1, %o3
+	bne,pt	%xcc, 1b
+64:	 lduba	[%o1] %asi, %o4
+3:	retl
+	 mov	%o2, %o0
+2:	retl
+	 add	%o2, %o3, %o0
+	.size	__strncpy_from_user, .-__strncpy_from_user
+
+	.section __ex_table,"a"
+	.align	4
+	.word	60b, __retl_efault
+	.word	61b, __retl_efault
+	.word	62b, __retl_efault
+	.word	63b, __retl_efault
+	.word	64b, __retl_efault
+	.previous
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
new file mode 100644
index 000000000000..05a361b0a1a4
--- /dev/null
+++ b/arch/sparc/lib/user_fixup.c
@@ -0,0 +1,66 @@
+/* user_fixup.c: Fix up user copy faults.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+
+/* Calculating the exact fault address when using
+ * block loads and stores can be very complicated.
+ *
+ * Instead of trying to be clever and handling all
+ * of the cases, just fix things up simply here.
+ */
+
+static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
+{
+	unsigned long fault_addr = current_thread_info()->fault_address;
+	unsigned long end = start + size;
+
+	if (fault_addr < start || fault_addr >= end) {
+		*offset = 0;
+	} else {
+		*offset = fault_addr - start;
+		size = end - fault_addr;
+	}
+	return size;
+}
+
+unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+{
+	unsigned long offset;
+
+	size = compute_size((unsigned long) from, size, &offset);
+	if (likely(size))
+		memset(to + offset, 0, size);
+
+	return size;
+}
+
+unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
+{
+	unsigned long offset;
+
+	return compute_size((unsigned long) to, size, &offset);
+}
+
+unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
+{
+	unsigned long fault_addr = current_thread_info()->fault_address;
+	unsigned long start = (unsigned long) to;
+	unsigned long end = start + size;
+
+	if (fault_addr >= start && fault_addr < end)
+		return end - fault_addr;
+
+	start = (unsigned long) from;
+	end = start + size;
+	if (fault_addr >= start && fault_addr < end)
+		return end - fault_addr;
+
+	return size;
+}
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
new file mode 100644
index 000000000000..f44f58f40234
--- /dev/null
+++ b/arch/sparc/lib/xor.S
@@ -0,0 +1,652 @@
+/*
+ * arch/sparc64/lib/xor.S
+ *
+ * High speed xor_block operation for RAID4/5 utilizing the
+ * UltraSparc Visual Instruction Set and Niagara store-init/twin-load.
+ *
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
+ */
+
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+
+/*
+ *	Requirements:
+ *	!(((long)dest | (long)sourceN) & (64 - 1)) &&
+ *	!(len & 127) && len >= 256
+ */
+	.text
+	.align	32
+
+	/* VIS versions. */
+	.globl	xor_vis_2
+	.type	xor_vis_2,#function
+xor_vis_2:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 128, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+2:	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	ldda	[%o1 + 128] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	add	%o1, 128, %o1
+	fxor	%f36, %f52, %f52
+	add	%o2, 128, %o2
+	fxor	%f38, %f54, %f54
+	subcc	%o0, 128, %o0
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 2b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 + 64] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	  wr	%g0, 0, %fprs
+	.size	xor_vis_2, .-xor_vis_2
+
+	.globl	xor_vis_3
+	.type	xor_vis_3,#function
+xor_vis_3:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+3:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%o1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%o3, 64, %o3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 3b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_3, .-xor_vis_3
+
+	.globl	xor_vis_4
+	.type	xor_vis_4,#function
+xor_vis_4:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+4:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	add	%o3, 64, %o3
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	ldda	[%o1] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	add	%o4, 64, %o4
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 4b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_4, .-xor_vis_4
+
+	.globl	xor_vis_5
+	.type	xor_vis_5,#function
+xor_vis_5:
+	save	%sp, -192, %sp
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%i0, 64, %i0
+	ldda	[%i1] %asi, %f0
+	ldda	[%i2] %asi, %f16
+
+5:	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%i1, 64, %i1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%i2, 64, %i2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%i3, 64, %i3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	add	%i4, 64, %i4
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	add	%i5, 64, %i5
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	ldda	[%i1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%i0, 64, %i0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1 - 64] %asi
+	bne,pt	%xcc, 5b
+	 ldda	[%i2] %asi, %f16
+
+	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	wr	%g0, 0, %fprs
+	ret
+	 restore
+	.size	xor_vis_5, .-xor_vis_5
+
+	/* Niagara versions. */
+	.globl		xor_niagara_2
+	.type		xor_niagara_2,#function
+xor_niagara_2:		/* %o0=bytes, %o1=dest, %o2=src */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src  + 0x00 */
+	ldda		[%i1 + 0x10] %asi, %i4	/* %i4/%i5 = src  + 0x10 */
+	ldda		[%i1 + 0x20] %asi, %g2	/* %g2/%g3 = src  + 0x20 */
+	ldda		[%i1 + 0x30] %asi, %l0	/* %l0/%l1 = src  + 0x30 */
+	prefetch	[%i1 + 0x40], #one_read
+	ldda		[%i0 + 0x00] %asi, %o0  /* %o0/%o1 = dest + 0x00 */
+	ldda		[%i0 + 0x10] %asi, %o2  /* %o2/%o3 = dest + 0x10 */
+	ldda		[%i0 + 0x20] %asi, %o4  /* %o4/%o5 = dest + 0x20 */
+	ldda		[%i0 + 0x30] %asi, %l2  /* %l2/%l3 = dest + 0x30 */
+	prefetch	[%i0 + 0x40], #n_writes
+	xor		%o0, %i2, %o0
+	xor		%o1, %i3, %o1
+	stxa		%o0, [%i0 + 0x00] %asi
+	stxa		%o1, [%i0 + 0x08] %asi
+	xor		%o2, %i4, %o2
+	xor		%o3, %i5, %o3
+	stxa		%o2, [%i0 + 0x10] %asi
+	stxa		%o3, [%i0 + 0x18] %asi
+	xor		%o4, %g2, %o4
+	xor		%o5, %g3, %o5
+	stxa		%o4, [%i0 + 0x20] %asi
+	stxa		%o5, [%i0 + 0x28] %asi
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x30] %asi
+	stxa		%l3, [%i0 + 0x38] %asi
+	add		%i0, 0x40, %i0
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%i1, 0x40, %i1
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_2, .-xor_niagara_2
+
+	.globl		xor_niagara_3
+	.type		xor_niagara_3,#function
+xor_niagara_3:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%i1 + 0x10] %asi, %i4	/* %i4/%i5 = src1 + 0x10 */
+	ldda		[%l7 + 0x00] %asi, %g2	/* %g2/%g3 = src2 + 0x00 */
+	ldda		[%l7 + 0x10] %asi, %l0	/* %l0/%l1 = src2 + 0x10 */
+	ldda		[%i0 + 0x00] %asi, %o0  /* %o0/%o1 = dest + 0x00 */
+	ldda		[%i0 + 0x10] %asi, %o2  /* %o2/%o3 = dest + 0x10 */
+	xor		%g2, %i2, %g2
+	xor		%g3, %i3, %g3
+	xor		%o0, %g2, %o0
+	xor		%o1, %g3, %o1
+	stxa		%o0, [%i0 + 0x00] %asi
+	stxa		%o1, [%i0 + 0x08] %asi
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	ldda		[%l7 + 0x20] %asi, %g2	/* %g2/%g3 = src2 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %o0	/* %o0/%o1 = dest + 0x20 */
+	xor		%l0, %i4, %l0
+	xor		%l1, %i5, %l1
+	xor		%o2, %l0, %o2
+	xor		%o3, %l1, %o3
+	stxa		%o2, [%i0 + 0x10] %asi
+	stxa		%o3, [%i0 + 0x18] %asi
+	ldda		[%i1 + 0x30] %asi, %i4	/* %i4/%i5 = src1 + 0x30 */
+	ldda		[%l7 + 0x30] %asi, %l0	/* %l0/%l1 = src2 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %o2	/* %o2/%o3 = dest + 0x30 */
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+	xor		%g2, %i2, %g2
+	xor		%g3, %i3, %g3
+	xor		%o0, %g2, %o0
+	xor		%o1, %g3, %o1
+	stxa		%o0, [%i0 + 0x20] %asi
+	stxa		%o1, [%i0 + 0x28] %asi
+	xor		%l0, %i4, %l0
+	xor		%l1, %i5, %l1
+	xor		%o2, %l0, %o2
+	xor		%o3, %l1, %o3
+	stxa		%o2, [%i0 + 0x30] %asi
+	stxa		%o3, [%i0 + 0x38] %asi
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l7, 0x40, %l7
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_3, .-xor_niagara_3
+
+	.globl		xor_niagara_4
+	.type		xor_niagara_4,#function
+xor_niagara_4:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	prefetch	[%i4], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+	mov		%i4, %l6
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%l7 + 0x00] %asi, %i4	/* %i4/%i5 = src2 + 0x00 */
+	ldda		[%l6 + 0x00] %asi, %g2	/* %g2/%g3 = src3 + 0x00 */
+	ldda		[%i0 + 0x00] %asi, %l0	/* %l0/%l1 = dest + 0x00 */
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x10] %asi, %i2	/* %i2/%i3 = src1 + 0x10 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x10] %asi, %i4	/* %i4/%i5 = src2 + 0x10 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x00] %asi
+	stxa		%l1, [%i0 + 0x08] %asi
+	ldda		[%l6 + 0x10] %asi, %g2	/* %g2/%g3 = src3 + 0x10 */
+	ldda		[%i0 + 0x10] %asi, %l0	/* %l0/%l1 = dest + 0x10 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x20] %asi, %i4	/* %i4/%i5 = src2 + 0x20 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x10] %asi
+	stxa		%l1, [%i0 + 0x18] %asi
+	ldda		[%l6 + 0x20] %asi, %g2	/* %g2/%g3 = src3 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %l0	/* %l0/%l1 = dest + 0x20 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x30] %asi, %i2	/* %i2/%i3 = src1 + 0x30 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x30] %asi, %i4	/* %i4/%i5 = src2 + 0x30 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x20] %asi
+	stxa		%l1, [%i0 + 0x28] %asi
+	ldda		[%l6 + 0x30] %asi, %g2	/* %g2/%g3 = src3 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %l0	/* %l0/%l1 = dest + 0x30 */
+
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%l6 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	stxa		%l0, [%i0 + 0x30] %asi
+	stxa		%l1, [%i0 + 0x38] %asi
+
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	add		%l7, 0x40, %l7
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l6, 0x40, %l6
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_4, .-xor_niagara_4
+
+	.globl		xor_niagara_5
+	.type		xor_niagara_5,#function
+xor_niagara_5:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
+	save		%sp, -192, %sp
+	prefetch	[%i1], #n_writes
+	prefetch	[%i2], #one_read
+	prefetch	[%i3], #one_read
+	prefetch	[%i4], #one_read
+	prefetch	[%i5], #one_read
+	rd		%asi, %g7
+	wr		%g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+	srlx		%i0, 6, %g1
+	mov		%i1, %i0
+	mov		%i2, %i1
+	mov		%i3, %l7
+	mov		%i4, %l6
+	mov		%i5, %l5
+1:	ldda		[%i1 + 0x00] %asi, %i2	/* %i2/%i3 = src1 + 0x00 */
+	ldda		[%l7 + 0x00] %asi, %i4	/* %i4/%i5 = src2 + 0x00 */
+	ldda		[%l6 + 0x00] %asi, %g2	/* %g2/%g3 = src3 + 0x00 */
+	ldda		[%l5 + 0x00] %asi, %l0	/* %l0/%l1 = src4 + 0x00 */
+	ldda		[%i0 + 0x00] %asi, %l2	/* %l2/%l3 = dest + 0x00 */
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x10] %asi, %i2	/* %i2/%i3 = src1 + 0x10 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x10] %asi, %i4	/* %i4/%i5 = src2 + 0x10 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x10] %asi, %g2	/* %g2/%g3 = src3 + 0x10 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x00] %asi
+	stxa		%l3, [%i0 + 0x08] %asi
+	ldda		[%l5 + 0x10] %asi, %l0	/* %l0/%l1 = src4 + 0x10 */
+	ldda		[%i0 + 0x10] %asi, %l2	/* %l2/%l3 = dest + 0x10 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x20] %asi, %i2	/* %i2/%i3 = src1 + 0x20 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x20] %asi, %i4	/* %i4/%i5 = src2 + 0x20 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x20] %asi, %g2	/* %g2/%g3 = src3 + 0x20 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x10] %asi
+	stxa		%l3, [%i0 + 0x18] %asi
+	ldda		[%l5 + 0x20] %asi, %l0	/* %l0/%l1 = src4 + 0x20 */
+	ldda		[%i0 + 0x20] %asi, %l2	/* %l2/%l3 = dest + 0x20 */
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	ldda		[%i1 + 0x30] %asi, %i2	/* %i2/%i3 = src1 + 0x30 */
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	ldda		[%l7 + 0x30] %asi, %i4	/* %i4/%i5 = src2 + 0x30 */
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	ldda		[%l6 + 0x30] %asi, %g2	/* %g2/%g3 = src3 + 0x30 */
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x20] %asi
+	stxa		%l3, [%i0 + 0x28] %asi
+	ldda		[%l5 + 0x30] %asi, %l0	/* %l0/%l1 = src4 + 0x30 */
+	ldda		[%i0 + 0x30] %asi, %l2	/* %l2/%l3 = dest + 0x30 */
+
+	prefetch	[%i1 + 0x40], #one_read
+	prefetch	[%l7 + 0x40], #one_read
+	prefetch	[%l6 + 0x40], #one_read
+	prefetch	[%l5 + 0x40], #one_read
+	prefetch	[%i0 + 0x40], #n_writes
+
+	xor		%i4, %i2, %i4
+	xor		%i5, %i3, %i5
+	xor		%g2, %i4, %g2
+	xor		%g3, %i5, %g3
+	xor		%l0, %g2, %l0
+	xor		%l1, %g3, %l1
+	xor		%l2, %l0, %l2
+	xor		%l3, %l1, %l3
+	stxa		%l2, [%i0 + 0x30] %asi
+	stxa		%l3, [%i0 + 0x38] %asi
+
+	add		%i0, 0x40, %i0
+	add		%i1, 0x40, %i1
+	add		%l7, 0x40, %l7
+	add		%l6, 0x40, %l6
+	subcc		%g1, 1, %g1
+	bne,pt		%xcc, 1b
+	 add		%l5, 0x40, %l5
+	membar		#Sync
+	wr		%g7, 0x0, %asi
+	ret
+	 restore
+	.size		xor_niagara_5, .-xor_niagara_5