summary refs log tree commit diff
path: root/arch/nios2/mm
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-11-07 10:35:34 +0800
committerLey Foon Tan <ley.foon.tan@intel.com>2019-03-07 05:29:35 +0800
commit3ac23944de570df7a6309425aeef063be38f37c4 (patch)
tree7c5a3a2fd805045dfbd4eaa507d25dbc89d9cd1c /arch/nios2/mm
parentb6a10463438d8775aa6aa09ece46e8af14345712 (diff)
downloadlinux-3ac23944de570df7a6309425aeef063be38f37c4.tar.gz
nios2: update_mmu_cache preload the TLB with the new PTE
Rather than flush the TLB entry when installing a new PTE to allow
the fast TLB reload to re-fill the TLB, just refill the TLB entry
when removing the old one.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Ley Foon Tan <ley.foon.tan@intel.com>
Diffstat (limited to 'arch/nios2/mm')
-rw-r--r--arch/nios2/mm/cacheflush.c7
-rw-r--r--arch/nios2/mm/tlb.c32
2 files changed, 30 insertions, 9 deletions
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index d58e7e80dc0d..65de1bd6a760 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -198,13 +198,14 @@ void flush_dcache_page(struct page *page)
 EXPORT_SYMBOL(flush_dcache_page);
 
 void update_mmu_cache(struct vm_area_struct *vma,
-		      unsigned long address, pte_t *pte)
+		      unsigned long address, pte_t *ptep)
 {
-	unsigned long pfn = pte_pfn(*pte);
+	pte_t pte = *ptep;
+	unsigned long pfn = pte_pfn(pte);
 	struct page *page;
 	struct address_space *mapping;
 
-	flush_tlb_page(vma, address);
+	reload_tlb_page(vma, address, pte);
 
 	if (!pfn_valid(pfn))
 		return;
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index 2e49993d29ef..af8711885569 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -43,13 +43,11 @@ static unsigned long pteaddr_invalid(unsigned long addr)
  * This one is only used for pages with the global bit set so we don't care
  * much about the ASID.
  */
-void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+static void replace_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, unsigned long tlbacc)
 {
 	unsigned int way;
 	unsigned long org_misc, pid_misc;
 
-	pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
-
 	/* remember pid/way until we return. */
 	get_misc_and_pid(&org_misc, &pid_misc);
 
@@ -72,10 +70,11 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
 		if (pid != mmu_pid)
 			continue;
 
-		tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+		tlbmisc = mmu_pid | TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
 		WRCTL(CTL_TLBMISC, tlbmisc);
-		WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
-		WRCTL(CTL_TLBACC, 0);
+		if (tlbacc == 0)
+			WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
+		WRCTL(CTL_TLBACC, tlbacc);
 		/*
 		 * There should be only a single entry that maps a
 		 * particular {address,pid} so break after a match.
@@ -86,6 +85,20 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
 	WRCTL(CTL_TLBMISC, org_misc);
 }
 
+static void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+{
+	pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+	replace_tlb_one_pid(addr, mmu_pid, 0);
+}
+
+static void reload_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, pte_t pte)
+{
+	pr_debug("Reload tlb-entry for vaddr=%#lx\n", addr);
+
+	replace_tlb_one_pid(addr, mmu_pid, pte_val(pte));
+}
+
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 			unsigned long end)
 {
@@ -97,6 +110,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 	}
 }
 
+void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+	unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
+
+	reload_tlb_one_pid(addr, mmu_pid, pte);
+}
+
 /*
  * This one is only used for pages with the global bit set so we don't care
  * much about the ASID.