summary refs log tree commit diff
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2020-08-07 10:40:13 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 09:20:57 -0700
commit995909a4e22bc7b3ea3a71388cbb62ffebd76e7b (patch)
tree2b72f1c5f8a4759b66537a1536a0515fc527290c /arch/x86/mm/init_64.c
parent9bceb80b3cc483e6763c39a4928402fa82815d3e (diff)
downloadlinux-995909a4e22bc7b3ea3a71388cbb62ffebd76e7b.tar.gz
x86/mm/64: Do not dereference non-present PGD entries
The code for preallocate_vmalloc_pages() was written under the
assumption that the p4d_offset() and pud_offset() functions will perform
present checks before dereferencing the parent entries.

This assumption is wrong an leads to a bug in the code which causes the
physical address found in the PGD be used as a page-table page, even if
the PGD is not present.

So the code flow currently is:

	pgd = pgd_offset_k(addr);
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		p4d = p4d_alloc(&init_mm, pgd, addr);

This lacks a check for pgd_none() at least, the correct flow would be:

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		p4d = p4d_alloc(&init_mm, pgd, addr);
	else
		p4d = p4d_offset(pgd, addr);

But this is the same flow that the p4d_alloc() and the pud_alloc()
functions use internally, so there is no need to duplicate them.

Remove the p?d_none() checks from the function and just call into
p4d_alloc() and pud_alloc() to correctly pre-allocate the PGD entries.

Reported-and-tested-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Fixes: 6eb82f994026 ("x86/mm: Pre-allocate P4D/PUD pages for vmalloc area")
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3f4e29a78f2b..449e071240e1 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1253,28 +1253,23 @@ static void __init preallocate_vmalloc_pages(void)
 		p4d_t *p4d;
 		pud_t *pud;
 
-		p4d = p4d_offset(pgd, addr);
-		if (p4d_none(*p4d)) {
-			/* Can only happen with 5-level paging */
-			p4d = p4d_alloc(&init_mm, pgd, addr);
-			if (!p4d) {
-				lvl = "p4d";
-				goto failed;
-			}
-		}
+		lvl = "p4d";
+		p4d = p4d_alloc(&init_mm, pgd, addr);
+		if (!p4d)
+			goto failed;
 
+		/*
+		 * With 5-level paging the P4D level is not folded. So the PGDs
+		 * are now populated and there is no need to walk down to the
+		 * PUD level.
+		 */
 		if (pgtable_l5_enabled())
 			continue;
 
-		pud = pud_offset(p4d, addr);
-		if (pud_none(*pud)) {
-			/* Ends up here only with 4-level paging */
-			pud = pud_alloc(&init_mm, p4d, addr);
-			if (!pud) {
-				lvl = "pud";
-				goto failed;
-			}
-		}
+		lvl = "pud";
+		pud = pud_alloc(&init_mm, p4d, addr);
+		if (!pud)
+			goto failed;
 	}
 
 	return;