summary refs log tree commit diff
path: root/arch/xtensa
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:41:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 14:41:04 -0700
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/xtensa
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.gz
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
 "This is the uaccess unification pile. It's _not_ the end of uaccess
  work, but the next batch of that will go into the next cycle. This one
  mostly takes copy_from_user() and friends out of arch/* and gets the
  zero-padding behaviour in sync for all architectures.

  Dealing with the nocache/writethrough mess is for the next cycle;
  fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
  sold on access_ok() in there, BTW; just not in this pile), same for
  reducing __copy_... callsites, strn*... stuff, etc. - there will be a
  pile about as large as this one in the next merge window.

  This one sat in -next for weeks. -3KLoC"

* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
  HAVE_ARCH_HARDENED_USERCOPY is unconditional now
  CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
  m32r: switch to RAW_COPY_USER
  hexagon: switch to RAW_COPY_USER
  microblaze: switch to RAW_COPY_USER
  get rid of padding, switch to RAW_COPY_USER
  ia64: get rid of copy_in_user()
  ia64: sanitize __access_ok()
  ia64: get rid of 'segment' argument of __do_{get,put}_user()
  ia64: get rid of 'segment' argument of __{get,put}_user_check()
  ia64: add extable.h
  powerpc: get rid of zeroing, switch to RAW_COPY_USER
  esas2r: don't open-code memdup_user()
  alpha: fix stack smashing in old_adjtimex(2)
  don't open-code kernel_setsockopt()
  mips: switch to RAW_COPY_USER
  mips: get rid of tail-zeroing in primitives
  mips: make copy_from_user() zero tail explicitly
  mips: clean and reorder the forest of macros...
  mips: consolidate __invoke_... wrappers
  ...
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/asm-uaccess.h3
-rw-r--r--arch/xtensa/include/asm/uaccess.h67
-rw-r--r--arch/xtensa/lib/usercopy.S116
4 files changed, 58 insertions, 129 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index f41408c53fe1..cc23e9ecc6bb 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
+generic-y += extable.h
 generic-y += fcntl.h
 generic-y += hardirq.h
 generic-y += ioctl.h
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
index a7a110039786..dfdf9fae1f84 100644
--- a/arch/xtensa/include/asm/asm-uaccess.h
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -19,9 +19,6 @@
 #include <linux/errno.h>
 #include <asm/types.h>
 
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor.h>
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 848a3d736bcb..2e7bac0d4b2c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -16,14 +16,9 @@
 #ifndef _XTENSA_UACCESS_H
 #define _XTENSA_UACCESS_H
 
-#include <linux/errno.h>
 #include <linux/prefetch.h>
 #include <asm/types.h>
-
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
-#include <linux/sched.h>
+#include <asm/extable.h>
 
 /*
  * The fs value determines whether argument validity checking should
@@ -43,7 +38,7 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __kernel_ok (uaccess_kernel())
 #define __user_ok(addr, size) \
 		(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
@@ -239,60 +234,22 @@ __asm__ __volatile__(			\
  * Copy to/from user space
  */
 
-/*
- * We use a generic, arbitrary-sized copy subroutine.  The Xtensa
- * architecture would cause heavy code bloat if we tried to inline
- * these functions and provide __constant_copy_* equivalents like the
- * i386 versions.  __xtensa_copy_user is quite efficient.  See the
- * .fixup section of __xtensa_copy_user for a discussion on the
- * X_zeroing equivalents for Xtensa.
- */
-
 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
-#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
-
 
 static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	return __copy_user(to, from, n);
-}
-
-static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
-{
-	return __copy_user(to, from, n);
+	prefetchw(to);
+	return __xtensa_copy_user(to, (__force const void *)from, n);
 }
-
 static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	prefetch(from);
-	if (access_ok(VERIFY_WRITE, to, n))
-		return __copy_user(to, from, n);
-	return n;
-}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
-	prefetchw(to);
-	if (access_ok(VERIFY_READ, from, n))
-		return __copy_user(to, from, n);
-	else
-		memset(to, 0, n);
-	return n;
+	return __xtensa_copy_user((__force void *)to, from, n);
 }
-
-#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
-#define __copy_to_user(to, from, n) \
-	__generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user(to, from, n) \
-	__generic_copy_from_user_nocheck((to), (from), (n))
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
 
 /*
  * We need to return the number of bytes not cleared.  Our memset()
@@ -348,10 +305,4 @@ static inline long strnlen_user(const char *str, long len)
 	return __strnlen_user(str, len);
 }
 
-
-struct exception_table_entry
-{
-	unsigned long insn, fixup;
-};
-
 #endif	/* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 7ea4dd68893e..d9cd766bde3e 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -102,9 +102,9 @@ __xtensa_copy_user:
 	bltui	a4, 7, .Lbytecopy	# do short copies byte by byte
 
 	# copy 1 byte
-	EX(l8ui, a6, a3, 0, l_fixup)
+	EX(l8ui, a6, a3, 0, fixup)
 	addi	a3, a3,  1
-	EX(s8i, a6, a5,  0, s_fixup)
+	EX(s8i, a6, a5,  0, fixup)
 	addi	a5, a5,  1
 	addi	a4, a4, -1
 	bbci.l	a5, 1, .Ldstaligned	# if dst is now aligned, then
@@ -112,11 +112,11 @@ __xtensa_copy_user:
 .Ldst2mod4:	# dst 16-bit aligned
 	# copy 2 bytes
 	bltui	a4, 6, .Lbytecopy	# do short copies byte by byte
-	EX(l8ui, a6, a3, 0, l_fixup)
-	EX(l8ui, a7, a3, 1, l_fixup)
+	EX(l8ui, a6, a3, 0, fixup)
+	EX(l8ui, a7, a3, 1, fixup)
 	addi	a3, a3,  2
-	EX(s8i, a6, a5,  0, s_fixup)
-	EX(s8i, a7, a5,  1, s_fixup)
+	EX(s8i, a6, a5,  0, fixup)
+	EX(s8i, a7, a5,  1, fixup)
 	addi	a5, a5,  2
 	addi	a4, a4, -2
 	j	.Ldstaligned	# dst is now aligned, return to main algorithm
@@ -135,9 +135,9 @@ __xtensa_copy_user:
 	add	a7, a3, a4	# a7 = end address for source
 #endif /* !XCHAL_HAVE_LOOPS */
 .Lnextbyte:
-	EX(l8ui, a6, a3, 0, l_fixup)
+	EX(l8ui, a6, a3, 0, fixup)
 	addi	a3, a3, 1
-	EX(s8i, a6, a5, 0, s_fixup)
+	EX(s8i, a6, a5, 0, fixup)
 	addi	a5, a5, 1
 #if !XCHAL_HAVE_LOOPS
 	blt	a3, a7, .Lnextbyte
@@ -161,15 +161,15 @@ __xtensa_copy_user:
 	add	a8, a8, a3	# a8 = end of last 16B source chunk
 #endif /* !XCHAL_HAVE_LOOPS */
 .Loop1:
-	EX(l32i, a6, a3,  0, l_fixup)
-	EX(l32i, a7, a3,  4, l_fixup)
-	EX(s32i, a6, a5,  0, s_fixup)
-	EX(l32i, a6, a3,  8, l_fixup)
-	EX(s32i, a7, a5,  4, s_fixup)
-	EX(l32i, a7, a3, 12, l_fixup)
-	EX(s32i, a6, a5,  8, s_fixup)
+	EX(l32i, a6, a3,  0, fixup)
+	EX(l32i, a7, a3,  4, fixup)
+	EX(s32i, a6, a5,  0, fixup)
+	EX(l32i, a6, a3,  8, fixup)
+	EX(s32i, a7, a5,  4, fixup)
+	EX(l32i, a7, a3, 12, fixup)
+	EX(s32i, a6, a5,  8, fixup)
 	addi	a3, a3, 16
-	EX(s32i, a7, a5, 12, s_fixup)
+	EX(s32i, a7, a5, 12, fixup)
 	addi	a5, a5, 16
 #if !XCHAL_HAVE_LOOPS
 	blt	a3, a8, .Loop1
@@ -177,31 +177,31 @@ __xtensa_copy_user:
 .Loop1done:
 	bbci.l	a4, 3, .L2
 	# copy 8 bytes
-	EX(l32i, a6, a3,  0, l_fixup)
-	EX(l32i, a7, a3,  4, l_fixup)
+	EX(l32i, a6, a3,  0, fixup)
+	EX(l32i, a7, a3,  4, fixup)
 	addi	a3, a3,  8
-	EX(s32i, a6, a5,  0, s_fixup)
-	EX(s32i, a7, a5,  4, s_fixup)
+	EX(s32i, a6, a5,  0, fixup)
+	EX(s32i, a7, a5,  4, fixup)
 	addi	a5, a5,  8
 .L2:
 	bbci.l	a4, 2, .L3
 	# copy 4 bytes
-	EX(l32i, a6, a3,  0, l_fixup)
+	EX(l32i, a6, a3,  0, fixup)
 	addi	a3, a3,  4
-	EX(s32i, a6, a5,  0, s_fixup)
+	EX(s32i, a6, a5,  0, fixup)
 	addi	a5, a5,  4
 .L3:
 	bbci.l	a4, 1, .L4
 	# copy 2 bytes
-	EX(l16ui, a6, a3,  0, l_fixup)
+	EX(l16ui, a6, a3,  0, fixup)
 	addi	a3, a3,  2
-	EX(s16i,  a6, a5,  0, s_fixup)
+	EX(s16i,  a6, a5,  0, fixup)
 	addi	a5, a5,  2
 .L4:
 	bbci.l	a4, 0, .L5
 	# copy 1 byte
-	EX(l8ui, a6, a3,  0, l_fixup)
-	EX(s8i,  a6, a5,  0, s_fixup)
+	EX(l8ui, a6, a3,  0, fixup)
+	EX(s8i,  a6, a5,  0, fixup)
 .L5:
 	movi	a2, 0		# return success for len bytes copied
 	retw
@@ -217,7 +217,7 @@ __xtensa_copy_user:
 	# copy 16 bytes per iteration for word-aligned dst and unaligned src
 	and	a10, a3, a8	# save unalignment offset for below
 	sub	a3, a3, a10	# align a3 (to avoid sim warnings only; not needed for hardware)
-	EX(l32i, a6, a3, 0, l_fixup)	# load first word
+	EX(l32i, a6, a3, 0, fixup)	# load first word
 #if XCHAL_HAVE_LOOPS
 	loopnez	a7, .Loop2done
 #else /* !XCHAL_HAVE_LOOPS */
@@ -226,19 +226,19 @@ __xtensa_copy_user:
 	add	a12, a12, a3	# a12 = end of last 16B source chunk
 #endif /* !XCHAL_HAVE_LOOPS */
 .Loop2:
-	EX(l32i, a7, a3,  4, l_fixup)
-	EX(l32i, a8, a3,  8, l_fixup)
+	EX(l32i, a7, a3,  4, fixup)
+	EX(l32i, a8, a3,  8, fixup)
 	ALIGN(	a6, a6, a7)
-	EX(s32i, a6, a5,  0, s_fixup)
-	EX(l32i, a9, a3, 12, l_fixup)
+	EX(s32i, a6, a5,  0, fixup)
+	EX(l32i, a9, a3, 12, fixup)
 	ALIGN(	a7, a7, a8)
-	EX(s32i, a7, a5,  4, s_fixup)
-	EX(l32i, a6, a3, 16, l_fixup)
+	EX(s32i, a7, a5,  4, fixup)
+	EX(l32i, a6, a3, 16, fixup)
 	ALIGN(	a8, a8, a9)
-	EX(s32i, a8, a5,  8, s_fixup)
+	EX(s32i, a8, a5,  8, fixup)
 	addi	a3, a3, 16
 	ALIGN(	a9, a9, a6)
-	EX(s32i, a9, a5, 12, s_fixup)
+	EX(s32i, a9, a5, 12, fixup)
 	addi	a5, a5, 16
 #if !XCHAL_HAVE_LOOPS
 	blt	a3, a12, .Loop2
@@ -246,39 +246,39 @@ __xtensa_copy_user:
 .Loop2done:
 	bbci.l	a4, 3, .L12
 	# copy 8 bytes
-	EX(l32i, a7, a3,  4, l_fixup)
-	EX(l32i, a8, a3,  8, l_fixup)
+	EX(l32i, a7, a3,  4, fixup)
+	EX(l32i, a8, a3,  8, fixup)
 	ALIGN(	a6, a6, a7)
-	EX(s32i, a6, a5,  0, s_fixup)
+	EX(s32i, a6, a5,  0, fixup)
 	addi	a3, a3,  8
 	ALIGN(	a7, a7, a8)
-	EX(s32i, a7, a5,  4, s_fixup)
+	EX(s32i, a7, a5,  4, fixup)
 	addi	a5, a5,  8
 	mov	a6, a8
 .L12:
 	bbci.l	a4, 2, .L13
 	# copy 4 bytes
-	EX(l32i, a7, a3,  4, l_fixup)
+	EX(l32i, a7, a3,  4, fixup)
 	addi	a3, a3,  4
 	ALIGN(	a6, a6, a7)
-	EX(s32i, a6, a5,  0, s_fixup)
+	EX(s32i, a6, a5,  0, fixup)
 	addi	a5, a5,  4
 	mov	a6, a7
 .L13:
 	add	a3, a3, a10	# readjust a3 with correct misalignment
 	bbci.l	a4, 1, .L14
 	# copy 2 bytes
-	EX(l8ui, a6, a3,  0, l_fixup)
-	EX(l8ui, a7, a3,  1, l_fixup)
+	EX(l8ui, a6, a3,  0, fixup)
+	EX(l8ui, a7, a3,  1, fixup)
 	addi	a3, a3,  2
-	EX(s8i, a6, a5,  0, s_fixup)
-	EX(s8i, a7, a5,  1, s_fixup)
+	EX(s8i, a6, a5,  0, fixup)
+	EX(s8i, a7, a5,  1, fixup)
 	addi	a5, a5,  2
 .L14:
 	bbci.l	a4, 0, .L15
 	# copy 1 byte
-	EX(l8ui, a6, a3,  0, l_fixup)
-	EX(s8i,  a6, a5,  0, s_fixup)
+	EX(l8ui, a6, a3,  0, fixup)
+	EX(s8i,  a6, a5,  0, fixup)
 .L15:
 	movi	a2, 0		# return success for len bytes copied
 	retw
@@ -291,30 +291,10 @@ __xtensa_copy_user:
  * bytes_copied = a5 - a2
  * retval = bytes_not_copied = original len - bytes_copied
  * retval = a11 - (a5 - a2)
- *
- * Clearing the remaining pieces of kernel memory plugs security
- * holes.  This functionality is the equivalent of the *_zeroing
- * functions that some architectures provide.
  */
 
-.Lmemset:
-	.word	memset
 
-s_fixup:
+fixup:
 	sub	a2, a5, a2	/* a2 <-- bytes copied */
 	sub	a2, a11, a2	/* a2 <-- bytes not copied */
 	retw
-
-l_fixup:
-	sub	a2, a5, a2	/* a2 <-- bytes copied */
-	sub	a2, a11, a2	/* a2 <-- bytes not copied == return value */
-
-	/* void *memset(void *s, int c, size_t n); */
-	mov	a6, a5		/* s */
-	movi	a7, 0		/* c */
-	mov	a8, a2		/* n */
-	l32r	a4, .Lmemset
-	callx4	a4
-	/* Ignore memset return value in a6. */
-	/* a2 still contains bytes not copied. */
-	retw