2 * Based on arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
29 * Raw TLBI operations.
31 * Where necessary, use the __tlbi() macro to avoid asm()
32 * boilerplate. Drivers and most kernel code should use the TLB
33 * management routines in preference to the macro below.
35 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36 * on whether a particular TLBI operation takes an argument or
37 * not. The macros handles invoking the asm with or without the
38 * register argument as appropriate.
40 #define __TLBI_0(op, arg) asm ("tlbi " #op)
41 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg))
42 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
44 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
46 #define __tlbi_user(op, arg) do { \
47 if (arm64_kernel_unmapped_at_el0()) \
48 __tlbi(op, (arg) | USER_ASID_FLAG); \
55 * The TLB specific code is expected to perform whatever tests it needs
56 * to determine if it should invalidate the TLB for each call. Start
57 * addresses are inclusive and end addresses are exclusive; it is safe to
58 * round these addresses down.
62 * Invalidate the entire TLB.
66 * Invalidate all TLB entries in a particular address space.
67 * - mm - mm_struct describing address space
69 * flush_tlb_range(mm,start,end)
71 * Invalidate a range of TLB entries in the specified address
73 * - mm - mm_struct describing address space
74 * - start - start address (may not be aligned)
75 * - end - end address (exclusive, may not be aligned)
77 * flush_tlb_page(vaddr,vma)
79 * Invalidate the specified page in the specified address range.
80 * - vaddr - virtual address (may not be aligned)
81 * - vma - vma_struct describing address range
83 * flush_kern_tlb_page(kaddr)
85 * Invalidate the TLB entry for the specified page. The address
86 * will be in the kernels virtual memory space. Current uses
87 * only require the D-TLB to be invalidated.
88 * - kaddr - Kernel virtual memory address
90 static inline void local_flush_tlb_all(void)
98 static inline void flush_tlb_all(void)
106 static inline void flush_tlb_mm(struct mm_struct *mm)
108 unsigned long asid = ASID(mm) << 48;
111 __tlbi(aside1is, asid);
112 __tlbi_user(aside1is, asid);
116 static inline void flush_tlb_page(struct vm_area_struct *vma,
119 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
122 __tlbi(vale1is, addr);
123 __tlbi_user(vale1is, addr);
128 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
129 * necessarily a performance improvement.
131 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
133 static inline void __flush_tlb_range(struct vm_area_struct *vma,
134 unsigned long start, unsigned long end,
137 unsigned long asid = ASID(vma->vm_mm) << 48;
140 if ((end - start) > MAX_TLB_RANGE) {
141 flush_tlb_mm(vma->vm_mm);
145 start = asid | (start >> 12);
146 end = asid | (end >> 12);
149 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
151 __tlbi(vale1is, addr);
152 __tlbi_user(vale1is, addr);
154 __tlbi(vae1is, addr);
155 __tlbi_user(vae1is, addr);
161 static inline void flush_tlb_range(struct vm_area_struct *vma,
162 unsigned long start, unsigned long end)
164 __flush_tlb_range(vma, start, end, false);
167 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
171 if ((end - start) > MAX_TLB_RANGE) {
180 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
181 __tlbi(vaae1is, addr);
187 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
188 * table levels (pgd/pud/pmd).
190 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
193 unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
195 __tlbi(vae1is, addr);
196 __tlbi_user(vae1is, addr);