summary refs log tree commit diff
path: root/arch/hexagon/mm
diff options
context:
space:
mode:
authorRichard Kuo <rkuo@codeaurora.org>2011-10-31 18:48:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 07:34:20 -0700
commit7567746e1c0d66ac0ef8a9d8816ca694462c7370 (patch)
tree5812837c9a8024091e253496cbdbbe888c0f7ab9 /arch/hexagon/mm
parentdd472da380c3819740d740cfd70b7f8e700e834b (diff)
downloadlinux-7567746e1c0d66ac0ef8a9d8816ca694462c7370.tar.gz
Hexagon: Add user access functions
Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/hexagon/mm')
-rw-r--r--arch/hexagon/mm/copy_from_user.S114
-rw-r--r--arch/hexagon/mm/copy_to_user.S92
-rw-r--r--arch/hexagon/mm/copy_user_template.S185
-rw-r--r--arch/hexagon/mm/strnlen_user.S139
-rw-r--r--arch/hexagon/mm/uaccess.c59
5 files changed, 589 insertions, 0 deletions
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 000000000000..8eb1d4d61a3d
--- /dev/null
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,114 @@
+/*
+ * User memory copy functions for kernel
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy from user: loads can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME __copy_from_user_hexagon
+#include "copy_user_template.S"
+
+	/* LOAD FAULTS from COPY_FROM_USER */
+
+	/* Alignment loop.  r2 has been updated. Return it. */
+	.falign
+1009:
+2009:
+4009:
+	{
+		r0 = r2
+		jumpr r31
+	}
+	/* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
+	/* X - (A - B) == X + B - A */
+	.falign
+8089:
+	{
+		memd(dst) = d_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+4089:
+	{
+		memw(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+2089:
+	{
+		memh(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+	.falign
+1089:
+	{
+		memb(dst) = w_dbuf
+		r2 += sub(src_sav,src)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+
+	/* COPY FROM USER: only loads can fail */
+
+	.section __ex_table,"a"
+	.long 1000b,1009b
+	.long 2000b,2009b
+	.long 4000b,4009b
+	.long 8080b,8089b
+	.long 4080b,4089b
+	.long 2080b,2089b
+	.long 1080b,1089b
+	.previous
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
new file mode 100644
index 000000000000..cb9740ed9e7d
--- /dev/null
+++ b/arch/hexagon/mm/copy_to_user.S
@@ -0,0 +1,92 @@
+/*
+ * User memory copying routines for the Hexagon Kernel
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy to user: stores can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME __copy_to_user_hexagon
+#include "copy_user_template.S"
+
+	/* STORE FAULTS from COPY_TO_USER */
+	.falign
+1109:
+2109:
+4109:
+	/* Alignment loop.  r2 has been updated.  Return it. */
+	{
+		r0 = r2
+		jumpr r31
+	}
+	/* Normal copy loops.  Use dst-dst_sav to compute distance */
+	/* dst holds best write, no need to unwind any loops */
+	/* X - (A - B) == X + B - A */
+	.falign
+8189:
+8199:
+4189:
+4199:
+2189:
+2199:
+1189:
+1199:
+	{
+		r2 += sub(dst_sav,dst)
+	}
+	{
+		r0 = r2
+		jumpr r31
+	}
+
+	/* COPY TO USER: only stores can fail */
+	.section __ex_table,"a"
+	.long 1100b,1109b
+	.long 2100b,2109b
+	.long 4100b,4109b
+	.long 8180b,8189b
+	.long 8190b,8199b
+	.long 4180b,4189b
+	.long 4190b,4199b
+	.long 2180b,2189b
+	.long 2190b,2199b
+	.long 1180b,1189b
+	.long 1190b,1199b
+	.previous
diff --git a/arch/hexagon/mm/copy_user_template.S b/arch/hexagon/mm/copy_user_template.S
new file mode 100644
index 000000000000..08d7d7b23daa
--- /dev/null
+++ b/arch/hexagon/mm/copy_user_template.S
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* Numerology:
+ * WXYZ
+ * W: width in bytes
+ * X: Load=0, Store=1
+ * Y: Location 0=preamble,8=loop,9=epilog
+ * Z: Location=0,handler=9
+ */
+	.text
+	.global FUNCNAME
+	.type FUNCNAME, @function
+	.p2align 5
+FUNCNAME:
+	{
+		p0 = cmp.gtu(bytes,#0)
+		if (!p0.new) jump:nt .Ldone
+		r3 = or(dst,src)
+		r4 = xor(dst,src)
+	}
+	{
+		p1 = cmp.gtu(bytes,#15)
+		p0 = bitsclr(r3,#7)
+		if (!p0.new) jump:nt .Loop_not_aligned_8
+		src_dst_sav = combine(src,dst)
+	}
+
+	{
+		loopcount = lsr(bytes,#3)
+		if (!p1) jump .Lsmall
+	}
+	p3=sp1loop0(.Loop8,loopcount)
+.Loop8:
+8080:
+8180:
+	{
+		if (p3) memd(dst++#8) = d_dbuf
+		d_dbuf = memd(src++#8)
+	}:endloop0
+8190:
+	{
+		memd(dst++#8) = d_dbuf
+		bytes -= asl(loopcount,#3)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned_8:
+	{
+		p0 = bitsclr(r4,#7)
+		if (p0.new) jump:nt .Lalign
+	}
+	{
+		p0 = bitsclr(r3,#3)
+		if (!p0.new) jump:nt .Loop_not_aligned_4
+		p1 = cmp.gtu(bytes,#7)
+	}
+
+	{
+		if (!p1) jump .Lsmall
+		loopcount = lsr(bytes,#2)
+	}
+	p3=sp1loop0(.Loop4,loopcount)
+.Loop4:
+4080:
+4180:
+	{
+		if (p3) memw(dst++#4) = w_dbuf
+		w_dbuf = memw(src++#4)
+	}:endloop0
+4190:
+	{
+		memw(dst++#4) = w_dbuf
+		bytes -= asl(loopcount,#2)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned_4:
+	{
+		p0 = bitsclr(r3,#1)
+		if (!p0.new) jump:nt .Loop_not_aligned
+		p1 = cmp.gtu(bytes,#3)
+	}
+
+	{
+		if (!p1) jump .Lsmall
+		loopcount = lsr(bytes,#1)
+	}
+	p3=sp1loop0(.Loop2,loopcount)
+.Loop2:
+2080:
+2180:
+	{
+		if (p3) memh(dst++#2) = w_dbuf
+		w_dbuf = memuh(src++#2)
+	}:endloop0
+2190:
+	{
+		memh(dst++#2) = w_dbuf
+		bytes -= asl(loopcount,#1)
+		jump .Lsmall
+	}
+
+.Loop_not_aligned: /* Works for as small as one byte */
+	p3=sp1loop0(.Loop1,bytes)
+.Loop1:
+1080:
+1180:
+	{
+		if (p3) memb(dst++#1) = w_dbuf
+		w_dbuf = memub(src++#1)
+	}:endloop0
+	/* Done */
+1190:
+	{
+		memb(dst) = w_dbuf
+		jumpr r31
+		r0 = #0
+	}
+
+.Lsmall:
+	{
+		p0 = cmp.gtu(bytes,#0)
+		if (p0.new) jump:nt .Loop_not_aligned
+	}
+.Ldone:
+	{
+		r0 = #0
+		jumpr r31
+	}
+	.falign
+.Lalign:
+1000:
+	{
+		if (p0.new) w_dbuf = memub(src)
+		p0 = tstbit(src,#0)
+		if (!p1) jump .Lsmall
+	}
+1100:
+	{
+		if (p0) memb(dst++#1) = w_dbuf
+		if (p0) bytes = add(bytes,#-1)
+		if (p0) src = add(src,#1)
+	}
+2000:
+	{
+		if (p0.new) w_dbuf = memuh(src)
+		p0 = tstbit(src,#1)
+		if (!p1) jump .Lsmall
+	}
+2100:
+	{
+		if (p0) memh(dst++#2) = w_dbuf
+		if (p0) bytes = add(bytes,#-2)
+		if (p0) src = add(src,#2)
+	}
+4000:
+	{
+		if (p0.new) w_dbuf = memw(src)
+		p0 = tstbit(src,#2)
+		if (!p1) jump .Lsmall
+	}
+4100:
+	{
+		if (p0) memw(dst++#4) = w_dbuf
+		if (p0) bytes = add(bytes,#-4)
+		if (p0) src = add(src,#4)
+		jump FUNCNAME
+	}
+	.size FUNCNAME,.-FUNCNAME
diff --git a/arch/hexagon/mm/strnlen_user.S b/arch/hexagon/mm/strnlen_user.S
new file mode 100644
index 000000000000..5c6a16c7c72a
--- /dev/null
+++ b/arch/hexagon/mm/strnlen_user.S
@@ -0,0 +1,139 @@
+/*
+ * User string length functions for kernel
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#define isrc	r0
+#define max	r1	/*  Do not change!  */
+
+#define end	r2
+#define tmp1	r3
+
+#define obo	r6	/*  off-by-one  */
+#define start	r7
+#define mod8	r8
+#define dbuf    r15:14
+#define dcmp	r13:12
+
+/*
+ * The vector mask version of this turned out *really* badly.
+ * The hardware loop version also turned out *really* badly.
+ * Seems straight pointer arithmetic basically wins here.
+ */
+
+#define fname __strnlen_user
+
+	.text
+	.global fname
+	.type fname, @function
+	.p2align 5  /*  why?  */
+fname:
+	{
+		mod8 = and(isrc,#7);
+		end = add(isrc,max);
+		start = isrc;
+	}
+	{
+		P0 = cmp.eq(mod8,#0);
+		mod8 = and(end,#7);
+		dcmp = #0;
+		if (P0.new) jump:t dw_loop;	/*  fire up the oven  */
+	}
+
+alignment_loop:
+fail_1:	{
+		tmp1 = memb(start++#1);
+	}
+	{
+		P0 = cmp.eq(tmp1,#0);
+		if (P0.new) jump:nt exit_found;
+		P1 = cmp.gtu(end,start);
+		mod8 = and(start,#7);
+	}
+	{
+		if (!P1) jump exit_error;  /*  hit the end  */
+		P0 = cmp.eq(mod8,#0);
+	}
+	{
+		if (!P0) jump alignment_loop;
+	}
+
+
+
+dw_loop:
+fail_2:	{
+		dbuf = memd(start);
+		obo = add(start,#1);
+	}
+	{
+		P0 = vcmpb.eq(dbuf,dcmp);
+	}
+	{
+		tmp1 = P0;
+		P0 = cmp.gtu(end,start);
+	}
+	{
+		tmp1 = ct0(tmp1);
+		mod8 = and(end,#7);
+		if (!P0) jump end_check;
+	}
+	{
+		P0 = cmp.eq(tmp1,#32);
+		if (!P0.new) jump:nt exit_found;
+		if (!P0.new) start = add(obo,tmp1);
+	}
+	{
+		start = add(start,#8);
+		jump dw_loop;
+	}	/*  might be nice to combine these jumps...   */
+
+
+end_check:
+	{
+		P0 = cmp.gt(tmp1,mod8);
+		if (P0.new) jump:nt exit_error;	/*  neverfound!  */
+		start = add(obo,tmp1);
+	}
+
+exit_found:
+	{
+		R0 = sub(start,isrc);
+		jumpr R31;
+	}
+
+exit_error:
+	{
+		R0 = add(max,#1);
+		jumpr R31;
+	}
+
+	/*  Uh, what does the "fixup" return here?  */
+	.falign
+fix_1:
+	{
+		R0 = #0;
+		jumpr R31;
+	}
+
+	.size fname,.-fname
+
+
+.section __ex_table,"a"
+.long fail_1,fix_1
+.long fail_2,fix_1
+.previous
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
new file mode 100644
index 000000000000..e748108b47a7
--- /dev/null
+++ b/arch/hexagon/mm/uaccess.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Support for user memory access from kernel.  This will
+ * probably be inlined for performance at some point, but
+ * for ease of debug, and to a lesser degree for code size,
+ * we implement here as subroutines.
+ */
+#include <linux/types.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+/*
+ * For clear_user(), exploit previously defined copy_to_user function
+ * and the fact that we've got a handy zero page defined in kernel/head.S
+ *
+ * dczero here would be even faster.
+ */
+__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
+{
+	long uncleared;
+
+	while (count > PAGE_SIZE) {
+		uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
+						PAGE_SIZE);
+		if (uncleared)
+			return count - (PAGE_SIZE - uncleared);
+		count -= PAGE_SIZE;
+		dest += PAGE_SIZE;
+	}
+	if (count)
+		count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
+
+	return count;
+}
+
+unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
+{
+	if (!access_ok(VERIFY_WRITE, dest, count))
+		return count;
+	else
+		return __clear_user_hexagon(dest, count);
+}