summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/Kconfig.debug8
-rw-r--r--arch/sparc64/kernel/head.S6
-rw-r--r--arch/sparc64/kernel/ktlb.S33
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/mm/init.c109
-rw-r--r--include/asm-sparc64/cacheflush.h5
-rw-r--r--include/asm-sparc64/pgtable.h7
7 files changed, 156 insertions, 15 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index af0e9411b83e..fa06ea04837b 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,6 +33,14 @@ config DEBUG_BOOTMEM
 	depends on DEBUG_KERNEL
 	bool "Debug BOOTMEM initialization"
 
+config DEBUG_PAGEALLOC
+	bool "Page alloc debugging"
+	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+	help
+	  Unmap pages from the kernel linear mapping after free_pages().
+	  This results in a large slowdown, but helps to find certain types
+	  of memory corruptions.
+
 config MCOUNT
 	bool
 	depends on STACK_DEBUG
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 56af714f5f1b..ecc748fb9ad7 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -525,12 +525,6 @@ bootup_user_stack_end:
 
 #include "ttable.S"
 #include "systbls.S"
-
-	.align	1024
-	.globl	swapper_pg_dir
-swapper_pg_dir:
-	.word	0
-
 #include "ktlb.S"
 #include "etrap.S"
 #include "rtrap.S"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index a591bc0ebc7b..7796b37f478c 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -132,9 +132,40 @@ kvmap_do_obp:
  */
 	.align		32
 kvmap:
-	brlz,pt		%g4, kvmap_load
+	brgez,pn	%g4, kvmap_nonlinear
+	 nop
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	.globl		kvmap_linear_patch
+kvmap_linear_patch:
+#endif
+	ba,pt		%xcc, kvmap_load
 	 xor		%g2, %g4, %g5
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	sethi		%hi(swapper_pg_dir), %g5
+	or		%g5, %lo(swapper_pg_dir), %g5
+	sllx		%g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
+	srlx		%g6, 64 - PAGE_SHIFT, %g6
+	andn		%g6, 0x3, %g6
+	lduw		[%g5 + %g6], %g5
+	brz,pn		%g5, longpath
+	 sllx		%g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
+	srlx		%g6, 64 - PAGE_SHIFT, %g6
+	sllx		%g5, 11, %g5
+	andn		%g6, 0x3, %g6
+	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 sllx		%g4, 64 - PMD_SHIFT, %g6
+	srlx		%g6, 64 - PAGE_SHIFT, %g6
+	sllx		%g5, 11, %g5
+	andn		%g6, 0x7, %g6
+	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 nop
+	ba,a,pt		%xcc, kvmap_load
+#endif
+
 kvmap_nonlinear:
 	sethi		%hi(MODULES_VADDR), %g5
 	cmp		%g4, %g5
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be39378..2af0cf0a8640 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
 jiffies = jiffies_64;
 SECTIONS
 {
-  swapper_pmd_dir = 0x0000000000402000;
-  empty_pg_dir = 0x0000000000403000;
+  swapper_low_pmd_dir = 0x0000000000402000;
   . = 0x4000;
   .text 0x0000000000404000 :
   {
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index cf747372f0c9..8d72f8a1268e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1332,15 +1332,114 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 	return end_pfn;
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
+{
+	unsigned long vstart = PAGE_OFFSET + pstart;
+	unsigned long vend = PAGE_OFFSET + pend;
+	unsigned long alloc_bytes = 0UL;
+
+	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
+		prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
+			    vstart, vend);
+		prom_halt();
+	}
+
+	while (vstart < vend) {
+		unsigned long this_end, paddr = __pa(vstart);
+		pgd_t *pgd = pgd_offset_k(vstart);
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *pte;
+
+		pud = pud_offset(pgd, vstart);
+		if (pud_none(*pud)) {
+			pmd_t *new;
+
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			alloc_bytes += PAGE_SIZE;
+			pud_populate(&init_mm, pud, new);
+		}
+
+		pmd = pmd_offset(pud, vstart);
+		if (!pmd_present(*pmd)) {
+			pte_t *new;
+
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			alloc_bytes += PAGE_SIZE;
+			pmd_populate_kernel(&init_mm, pmd, new);
+		}
+
+		pte = pte_offset_kernel(pmd, vstart);
+		this_end = (vstart + PMD_SIZE) & PMD_MASK;
+		if (this_end > vend)
+			this_end = vend;
+
+		while (vstart < this_end) {
+			pte_val(*pte) = (paddr | pgprot_val(prot));
+
+			vstart += PAGE_SIZE;
+			paddr += PAGE_SIZE;
+			pte++;
+		}
+	}
+
+	return alloc_bytes;
+}
+
+extern struct linux_mlist_p1275 *prom_ptot_ptr;
+extern unsigned int kvmap_linear_patch[1];
+
+static void __init kernel_physical_mapping_init(void)
+{
+	struct linux_mlist_p1275 *p = prom_ptot_ptr;
+	unsigned long mem_alloced = 0UL;
+
+	while (p) {
+		unsigned long phys_start, phys_end;
+
+		phys_start = p->start_adr;
+		phys_end = phys_start + p->num_bytes;
+		mem_alloced += kernel_map_range(phys_start, phys_end,
+						PAGE_KERNEL);
+
+		p = p->theres_more;
+	}
+
+	printk("Allocated %ld bytes for kernel page tables.\n",
+	       mem_alloced);
+
+	kvmap_linear_patch[0] = 0x01000000; /* nop */
+	flushi(&kvmap_linear_patch[0]);
+
+	__flush_tlb_all();
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
+	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+	kernel_map_range(phys_start, phys_end,
+			 (enable ? PAGE_KERNEL : __pgprot(0)));
+
+	/* we should perform an IPI and flush all tlbs,
+	 * but that can deadlock->flush only current cpu.
+	 */
+	__flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
+				 PAGE_OFFSET + phys_end);
+}
+#endif
+
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
 
 static unsigned long last_valid_pfn;
+pgd_t swapper_pg_dir[2048];
 
 void __init paging_init(void)
 {
-	extern pmd_t swapper_pmd_dir[1024];
 	unsigned long end_pfn, pages_avail, shift;
 	unsigned long real_end;
 
@@ -1361,11 +1460,11 @@ void __init paging_init(void)
 	 */
 	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
 	
-	memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+	memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
 
 	/* Now can init the kernel/bad page tables. */
 	pud_set(pud_offset(&swapper_pg_dir[0], 0),
-		swapper_pmd_dir + (shift / sizeof(pgd_t)));
+		swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
 	
 	swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
 	
@@ -1390,6 +1489,10 @@ void __init paging_init(void)
 	pages_avail = 0;
 	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	kernel_physical_mapping_init();
+#endif
+
 	{
 		unsigned long zones_size[MAX_NR_ZONES];
 		unsigned long zholes_size[MAX_NR_ZONES];
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index ededd2659eab..b3f61659ba81 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -66,6 +66,11 @@ extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
 #define flush_cache_vmap(start, end)		do { } while (0)
 #define flush_cache_vunmap(start, end)		do { } while (0)
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+/* internal debugging function */
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index a297f6144f0f..43cbb089cde2 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -60,13 +60,13 @@
  * table can map
  */
 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
-#define PMD_SIZE	(1UL << PMD_SHIFT)
+#define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
 #define PMD_BITS	(PAGE_SHIFT - 2)
 
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
-#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 #define PGDIR_BITS	(PAGE_SHIFT - 2)
 
@@ -336,7 +336,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
 #define pte_clear(mm,addr,ptep)		\
 	set_pte_at((mm), (addr), (ptep), __pte(0UL))
 
-extern pgd_t swapper_pg_dir[1];
+extern pgd_t swapper_pg_dir[2048];
+extern pmd_t swapper_low_pmd_dir[2048];
 
 /* These do nothing with the way I have things setup. */
 #define mmu_lockarea(vaddr, len)		(vaddr)