summary refs log tree commit diff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-06-23 15:24:05 -0700
committerKees Cook <keescook@chromium.org>2016-07-26 14:43:54 -0700
commited18adc1cdd00a5c55a20fbdaed4804660772281 (patch)
tree652ad77b2d5e9ed24eb4782c2b270c7e7b4fc0c1 /mm/slub.c
parent04385fc5e8fffed84425d909a783c0f0c587d847 (diff)
downloadlinux-ed18adc1cdd00a5c55a20fbdaed4804660772281.tar.gz
mm: SLUB hardened usercopy support
Under CONFIG_HARDENED_USERCOPY, this adds object size checking to the
SLUB allocator to catch any copies that may span objects. Includes a
redzone handling fix discovered by Michael Ellerman.

Based on code from PaX and grsecurity.

Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
Reviwed-by: Laura Abbott <labbott@redhat.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 825ff4505336..256a8efd165e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3614,6 +3614,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+				struct page *page)
+{
+	struct kmem_cache *s;
+	unsigned long offset;
+	size_t object_size;
+
+	/* Find object and usable object size. */
+	s = page->slab_cache;
+	object_size = slab_ksize(s);
+
+	/* Reject impossible pointers. */
+	if (ptr < page_address(page))
+		return s->name;
+
+	/* Find offset within object. */
+	offset = (ptr - page_address(page)) % s->size;
+
+	/* Adjust for redzone and reject if within the redzone. */
+	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+		if (offset < s->red_left_pad)
+			return s->name;
+		offset -= s->red_left_pad;
+	}
+
+	/* Allow address range falling entirely within object size. */
+	if (offset <= object_size && n <= object_size - offset)
+		return NULL;
+
+	return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
 static size_t __ksize(const void *object)
 {
 	struct page *page;