summary refs log tree commit diff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2021-11-04 07:14:46 +0100
committerHeiko Carstens <hca@linux.ibm.com>2021-12-16 19:58:08 +0100
commit4c88bb96e40b757f4796f70a4a7507df554467c4 (patch)
treec67090799526e8f853c3790a69b4e54e2ed8906f /arch/s390/mm
parent1194372db6f3c917c9c6f6907e8378cf1076c557 (diff)
downloadlinux-4c88bb96e40b757f4796f70a4a7507df554467c4.tar.gz
s390/mm: check 2KB-fragment page on release
When CONFIG_DEBUG_VM is defined check that pending remove
and tracking nibbles (bits 31-24 of the page refcount) are
cleared. Should the earlier stages of the page lifespan
have a race or logical error, such check could help in
exposing the issue.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgalloc.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 5d5549843c5c..fd35c1a0213b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -311,10 +311,23 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	return table;
 }
 
+static void page_table_release_check(struct page *page, void *table,
+				     unsigned int half, unsigned int mask)
+{
+	char msg[128];
+
+	if (!IS_ENABLED(CONFIG_DEBUG_VM) || !mask)
+		return;
+	snprintf(msg, sizeof(msg),
+		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
+		 table, half, mask);
+	dump_page(page, msg);
+}
+
 void page_table_free(struct mm_struct *mm, unsigned long *table)
 {
+	unsigned int mask, bit, half;
 	struct page *page;
-	unsigned int bit, mask;
 
 	page = virt_to_page(table);
 	if (!mm_alloc_pgste(mm)) {
@@ -337,10 +350,14 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		mask >>= 24;
 		if (mask != 0x00U)
 			return;
+		half = 0x01U << bit;
 	} else {
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		half = 0x03U;
+		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		mask >>= 24;
 	}
 
+	page_table_release_check(page, table, half, mask);
 	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }
@@ -380,28 +397,30 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 
 void __tlb_remove_table(void *_table)
 {
-	unsigned int mask = (unsigned long) _table & 0x03U;
+	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
 	void *table = (void *)((unsigned long) _table ^ mask);
 	struct page *page = virt_to_page(table);
 
-	switch (mask) {
+	switch (half) {
 	case 0x00U:	/* pmd, pud, or p4d */
 		free_pages((unsigned long) table, 2);
-		break;
+		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
 		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
 		mask >>= 24;
 		if (mask != 0x00U)
-			break;
-		fallthrough;
+			return;
+		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		if (mask & 0x03U)
-			atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		pgtable_pte_page_dtor(page);
-		__free_page(page);
+		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		mask >>= 24;
 		break;
 	}
+
+	page_table_release_check(page, table, half, mask);
+	pgtable_pte_page_dtor(page);
+	__free_page(page);
 }
 
 /*