summary refs log tree commit diff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2019-07-11 20:59:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 11:05:46 -0700
commitec11408a1630eed2cb03db55b8b372267f5f1032 (patch)
tree00553e799f8002bffb51a1e10664f0f1f00082d5 /mm/page_alloc.c
parentd9009d67f42e59760aae5471ba2f62b3d5d531d1 (diff)
downloadlinux-ec11408a1630eed2cb03db55b8b372267f5f1032.tar.gz
mm/large system hash: use vmalloc for size > MAX_ORDER when !hashdist
The kernel currently clamps large system hashes to MAX_ORDER when hashdist
is not set, which is rather arbitrary.

vmalloc space is limited on 32-bit machines, but this shouldn't result in
much more used because of small physical memory limiting system hash
sizes.

Include "vmalloc" or "linear" in the kernel log message.

Link: http://lkml.kernel.org/r/20190605144814.29319-1-npiggin@gmail.com
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae56e8feec0c..05143e0f821f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7981,6 +7981,7 @@ void *__init alloc_large_system_hash(const char *tablename,
 	unsigned long log2qty, size;
 	void *table = NULL;
 	gfp_t gfp_flags;
+	bool virt;
 
 	/* allow the kernel cmdline to have a say */
 	if (!numentries) {
@@ -8037,6 +8038,7 @@ void *__init alloc_large_system_hash(const char *tablename,
 
 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
 	do {
+		virt = false;
 		size = bucketsize << log2qty;
 		if (flags & HASH_EARLY) {
 			if (flags & HASH_ZERO)
@@ -8044,26 +8046,26 @@ void *__init alloc_large_system_hash(const char *tablename,
 			else
 				table = memblock_alloc_raw(size,
 							   SMP_CACHE_BYTES);
-		} else if (hashdist) {
+		} else if (get_order(size) >= MAX_ORDER || hashdist) {
 			table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
+			virt = true;
 		} else {
 			/*
 			 * If bucketsize is not a power-of-two, we may free
 			 * some pages at the end of hash table which
 			 * alloc_pages_exact() automatically does
 			 */
-			if (get_order(size) < MAX_ORDER) {
-				table = alloc_pages_exact(size, gfp_flags);
-				kmemleak_alloc(table, size, 1, gfp_flags);
-			}
+			table = alloc_pages_exact(size, gfp_flags);
+			kmemleak_alloc(table, size, 1, gfp_flags);
 		}
 	} while (!table && size > PAGE_SIZE && --log2qty);
 
 	if (!table)
 		panic("Failed to allocate %s hash table\n", tablename);
 
-	pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
-		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
+	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
+		virt ? "vmalloc" : "linear");
 
 	if (_hash_shift)
 		*_hash_shift = log2qty;