summary refs log tree commit diff
path: root/arch/xtensa/mm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-31 17:08:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-31 17:08:42 -0700
commit05bdb8c90e9973ca5f1d338a697f9582b11a27e2 (patch)
tree3a4acaea1941d8b0e41e79a2d33b96ec31e628f9 /arch/xtensa/mm/mmu.c
parentca98565a6182a960cd857d7546267a0775154eb8 (diff)
parente792290be763932d1b8cdf8a36d7015482a49d07 (diff)
downloadlinux-05bdb8c90e9973ca5f1d338a697f9582b11a27e2.tar.gz
Merge tag 'xtensa-20140830' of git://github.com/czankel/xtensa-linux
Pull Xtensa updates from Chris Zankel:
 "Xtensa improvements for 3.17:
   - support highmem on cores with aliasing data cache.  Enable highmem
     on kc705 by default
   - simplify addition of new core variants (no need to modify Kconfig /
     Makefiles)
   - improve robustness of unaligned access handler and its interaction
     with window overflow/underflow exception handlers
   - deprecate atomic and spill registers syscalls
   - clean up Kconfig: remove orphan MATH_EMULATION, sort 'select'
     statements
   - wire up renameat2 syscall.

  Various fixes:
   - fix address checks in dma_{alloc,free}_coherent (runtime BUG)
   - fix access to THREAD_RA/THREAD_SP/THREAD_DS (debug build breakage)
   - fix TLBTEMP_BASE_2 region handling in fast_second_level_miss
     (runtime unrecoverable exception)
   - fix a6 and a7 handling in fast_syscall_xtensa (runtime userspace
     register clobbering)
   - fix kernel/user jump out of fast_unaligned (potential runtime
     unrecoverabl exception)
   - replace termios IOCTL code definitions with constants (userspace
     build breakage)"

* tag 'xtensa-20140830' of git://github.com/czankel/xtensa-linux: (25 commits)
  xtensa: deprecate fast_xtensa and fast_spill_registers syscalls
  xtensa: don't allow overflow/underflow on unaligned stack
  xtensa: fix a6 and a7 handling in fast_syscall_xtensa
  xtensa: allow single-stepping through unaligned load/store
  xtensa: move invalid unaligned instruction handler closer to its users
  xtensa: make fast_unaligned store restartable
  xtensa: add double exception fixup handler for fast_unaligned
  xtensa: fix kernel/user jump out of fast_unaligned
  xtensa: configure kc705 for highmem
  xtensa: support highmem in aliasing cache flushing code
  xtensa: support aliasing cache in kmap
  xtensa: support aliasing cache in k[un]map_atomic
  xtensa: implement clear_user_highpage and copy_user_highpage
  xtensa: fix TLBTEMP_BASE_2 region handling in fast_second_level_miss
  xtensa: allow fixmap and kmap span more than one page table
  xtensa: make fixmap region addressing grow with index
  xtensa: fix access to THREAD_RA/THREAD_SP/THREAD_DS
  xtensa: add renameat2 syscall
  xtensa: fix address checks in dma_{alloc,free}_coherent
  xtensa: replace IOCTL code definitions with constants
  ...
Diffstat (limited to 'arch/xtensa/mm/mmu.c')
-rw-r--r--arch/xtensa/mm/mmu.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 3429b483d9f8..abe4513eb0dd 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -18,32 +18,38 @@
 #include <asm/io.h>
 
 #if defined(CONFIG_HIGHMEM)
-static void * __init init_pmd(unsigned long vaddr)
+static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 {
 	pgd_t *pgd = pgd_offset_k(vaddr);
 	pmd_t *pmd = pmd_offset(pgd, vaddr);
+	pte_t *pte;
+	unsigned long i;
 
-	if (pmd_none(*pmd)) {
-		unsigned i;
-		pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+	n_pages = ALIGN(n_pages, PTRS_PER_PTE);
 
-		for (i = 0; i < 1024; i++)
-			pte_clear(NULL, 0, pte + i);
+	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
+		 __func__, vaddr, n_pages);
 
-		set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
-		BUG_ON(pte != pte_offset_kernel(pmd, 0));
-		pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
-			 __func__, vaddr, pmd, pte);
-		return pte;
-	} else {
-		return pte_offset_kernel(pmd, 0);
+	pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+
+	for (i = 0; i < n_pages; ++i)
+		pte_clear(NULL, 0, pte + i);
+
+	for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
+		pte_t *cur_pte = pte + i;
+
+		BUG_ON(!pmd_none(*pmd));
+		set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
+		BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
+		pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
+			 __func__, pmd, cur_pte);
 	}
+	return pte;
 }
 
 static void __init fixedrange_init(void)
 {
-	BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
-	init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+	init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
 }
 #endif
 
@@ -52,7 +58,7 @@ void __init paging_init(void)
 	memset(swapper_pg_dir, 0, PAGE_SIZE);
 #ifdef CONFIG_HIGHMEM
 	fixedrange_init();
-	pkmap_page_table = init_pmd(PKMAP_BASE);
+	pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
 	kmap_init();
 #endif
 }