1 /* SPDX-License-Identifier: GPL-2.0 */
6 # include <asm/tlb_64.h>
10 #include <linux/pagemap.h>
13 #include <linux/swap.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/mmu_context.h>
19 * TLB handling. This allows us to remove pages from the page
20 * tables, and efficiently handle the TLB issues.
25 unsigned long start, end;
28 static inline void init_tlb_gather(struct mmu_gather *tlb)
30 tlb->start = TASK_SIZE;
40 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
41 unsigned long start, unsigned long end)
46 tlb->fullmm = !(start | (end+1));
52 arch_tlb_finish_mmu(struct mmu_gather *tlb,
53 unsigned long start, unsigned long end, bool force)
55 if (tlb->fullmm || force)
56 flush_tlb_mm(tlb->mm);
58 /* keep the page table cache within bounds */
63 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
65 if (tlb->start > address)
67 if (tlb->end < address + PAGE_SIZE)
68 tlb->end = address + PAGE_SIZE;
71 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
72 tlb_remove_tlb_entry(tlb, ptep, address)
75 * In the case of tlb vma handling, we can optimise these away in the
76 * case where we're doing a full MM flush. When we're doing a munmap,
77 * the vmas are adjusted to only cover the region to be torn down.
80 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
83 flush_cache_range(vma, vma->vm_start, vma->vm_end);
87 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
89 if (!tlb->fullmm && tlb->end) {
90 flush_tlb_range(vma, tlb->start, tlb->end);
95 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
99 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
103 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
107 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
109 free_page_and_swap_cache(page);
110 return false; /* avoid calling tlb_flush_mmu */
113 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
115 __tlb_remove_page(tlb, page);
118 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
119 struct page *page, int page_size)
121 return __tlb_remove_page(tlb, page);
124 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
125 struct page *page, int page_size)
127 return tlb_remove_page(tlb, page);
131 tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
134 if (tlb->start > address)
135 tlb->start = address;
136 if (tlb->end < address + size)
137 tlb->end = address + size;
140 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
141 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
142 unsigned int page_size)
146 #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
147 #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
148 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
150 #define tlb_migrate_finish(mm) do { } while (0)
152 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
153 extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
154 extern void tlb_unwire_entry(void);
156 static inline void tlb_wire_entry(struct vm_area_struct *vma ,
157 unsigned long addr, pte_t pte)
162 static inline void tlb_unwire_entry(void)
168 #else /* CONFIG_MMU */
170 #define tlb_start_vma(tlb, vma) do { } while (0)
171 #define tlb_end_vma(tlb, vma) do { } while (0)
172 #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
173 #define tlb_flush(tlb) do { } while (0)
175 #include <asm-generic/tlb.h>
177 #endif /* CONFIG_MMU */
178 #endif /* __ASSEMBLY__ */
179 #endif /* __ASM_SH_TLB_H */