summary refs log tree commit diff
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2009-01-14 14:13:59 +0100
committerHeiko Carstens <heiko.carstens@de.ibm.com>2009-01-14 14:15:16 +0100
commit1a94bc34768e463a93cb3751819709ab0ea80a01 (patch)
treea27753a61c2aa02d31b2d1f8efe9f00e68770856
parentf627a741d24f12955fa2d9f8831c3b12860635bd (diff)
downloadlinux-1a94bc34768e463a93cb3751819709ab0ea80a01.tar.gz
[CVE-2009-0029] System call wrapper infrastructure
From: Martin Schwidefsky <schwidefsky@de.ibm.com>

By selecting HAVE_SYSCALL_WRAPPERS architectures can activate
system call wrappers in order to sign extend system call arguments.

All architectures where the ABI defines that the caller of a function
has to perform sign extension probably need this.

Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
-rw-r--r--arch/Kconfig3
-rw-r--r--include/linux/syscalls.h62
2 files changed, 65 insertions, 0 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 2e13aa261929..550dab22daa1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -62,6 +62,9 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
 	  See Documentation/unaligned-memory-access.txt for more
 	  information on the topic of unaligned memory accesses.
 
+config HAVE_SYSCALL_WRAPPERS
+	bool
+
 config KRETPROBES
 	def_bool y
 	depends on KPROBES && HAVE_KRETPROBES
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index ca079c3d09e3..0bb537d7ba2e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -66,6 +66,68 @@ struct old_linux_dirent;
 #include <linux/quota.h>
 #include <linux/key.h>
 
+#define __SC_DECL1(t1, a1)	t1 a1
+#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
+#define __SC_DECL3(t3, a3, ...) t3 a3, __SC_DECL2(__VA_ARGS__)
+#define __SC_DECL4(t4, a4, ...) t4 a4, __SC_DECL3(__VA_ARGS__)
+#define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__)
+#define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__)
+
+#define __SC_LONG1(t1, a1) 	long a1
+#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__)
+#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__)
+#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__)
+#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__)
+#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__)
+
+#define __SC_CAST1(t1, a1)	(t1) a1
+#define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__)
+#define __SC_CAST3(t3, a3, ...) (t3) a3, __SC_CAST2(__VA_ARGS__)
+#define __SC_CAST4(t4, a4, ...) (t4) a4, __SC_CAST3(__VA_ARGS__)
+#define __SC_CAST5(t5, a5, ...) (t5) a5, __SC_CAST4(__VA_ARGS__)
+#define __SC_CAST6(t6, a6, ...) (t6) a6, __SC_CAST5(__VA_ARGS__)
+
+#define __SC_TEST(type)		BUILD_BUG_ON(sizeof(type) > sizeof(long))
+#define __SC_TEST1(t1, a1)	__SC_TEST(t1)
+#define __SC_TEST2(t2, a2, ...)	__SC_TEST(t2); __SC_TEST1(__VA_ARGS__)
+#define __SC_TEST3(t3, a3, ...)	__SC_TEST(t3); __SC_TEST2(__VA_ARGS__)
+#define __SC_TEST4(t4, a4, ...)	__SC_TEST(t4); __SC_TEST3(__VA_ARGS__)
+#define __SC_TEST5(t5, a5, ...)	__SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
+#define __SC_TEST6(t6, a6, ...)	__SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+
+#define SYSCALL_DEFINE0(name)   asmlinkage long sys_##name(void)
+#define SYSCALL_DEFINE1(...)    SYSCALL_DEFINEx(1, __VA_ARGS__)
+#define SYSCALL_DEFINE2(...)    SYSCALL_DEFINEx(2, __VA_ARGS__)
+#define SYSCALL_DEFINE3(...)    SYSCALL_DEFINEx(3, __VA_ARGS__)
+#define SYSCALL_DEFINE4(...)    SYSCALL_DEFINEx(4, __VA_ARGS__)
+#define SYSCALL_DEFINE5(...)    SYSCALL_DEFINEx(5, __VA_ARGS__)
+#define SYSCALL_DEFINE6(...)    SYSCALL_DEFINEx(6, __VA_ARGS__)
+
+#define SYSCALL_ALIAS(alias, name)					\
+	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name)
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+
+#define SYSCALL_DEFINE(name) static inline long SYSC_##name
+#define SYSCALL_DEFINEx(x, name, ...)					\
+	asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__));		\
+	static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__));	\
+	asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__))		\
+	{								\
+		__SC_TEST##x(__VA_ARGS__);				\
+		return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__));	\
+	}								\
+	SYSCALL_ALIAS(sys_##name, SyS_##name);				\
+	static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__))
+
+#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
+#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
+#define SYSCALL_DEFINEx(x, name, ...)					\
+	asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__))
+
+#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
 asmlinkage long sys_time(time_t __user *tloc);
 asmlinkage long sys_stime(time_t __user *tptr);
 asmlinkage long sys_gettimeofday(struct timeval __user *tv,