1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/tlbflush.h
5 * Copyright (C) 1999-2003 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 #ifndef __ASM_TLBFLUSH_H
9 #define __ASM_TLBFLUSH_H
13 #include <linux/bitfield.h>
14 #include <linux/mm_types.h>
15 #include <linux/sched.h>
16 #include <linux/mmu_notifier.h>
17 #include <asm/cputype.h>
21 * Raw TLBI operations.
23 * Where necessary, use the __tlbi() macro to avoid asm()
24 * boilerplate. Drivers and most kernel code should use the TLB
25 * management routines in preference to the macro below.
27 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
28 * on whether a particular TLBI operation takes an argument or
29 * not. The macros handles invoking the asm with or without the
30 * register argument as appropriate.
32 #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
34 ALTERNATIVE("nop\n nop", \
35 "dsb ish\n tlbi " #op, \
36 ARM64_WORKAROUND_REPEAT_TLBI, \
37 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
40 #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
41 "tlbi " #op ", %0\n" \
42 ALTERNATIVE("nop\n nop", \
43 "dsb ish\n tlbi " #op ", %0", \
44 ARM64_WORKAROUND_REPEAT_TLBI, \
45 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
48 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
50 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
52 #define __tlbi_user(op, arg) do { \
53 if (arm64_kernel_unmapped_at_el0()) \
54 __tlbi(op, (arg) | USER_ASID_FLAG); \
57 /* This macro creates a properly formatted VA operand for the TLBI */
58 #define __TLBI_VADDR(addr, asid) \
60 unsigned long __ta = (addr) >> 12; \
61 __ta &= GENMASK_ULL(43, 0); \
62 __ta |= (unsigned long)(asid) << 48; \
67 * Get translation granule of the system, which is decided by
68 * PAGE_SIZE. Used by TTL.
73 #define TLBI_TTL_TG_4K 1
74 #define TLBI_TTL_TG_16K 2
75 #define TLBI_TTL_TG_64K 3
77 static inline unsigned long get_trans_granule(void)
81 return TLBI_TTL_TG_4K;
83 return TLBI_TTL_TG_16K;
85 return TLBI_TTL_TG_64K;
92 * Level-based TLBI operations.
94 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
95 * the level at which the invalidation must take place. If the level is
96 * wrong, no invalidation may take place. In the case where the level
97 * cannot be easily determined, a 0 value for the level parameter will
98 * perform a non-hinted invalidation.
100 * For Stage-2 invalidation, use the level values provided to that effect
101 * in asm/stage2_pgtable.h.
103 #define TLBI_TTL_MASK GENMASK_ULL(47, 44)
105 #define __tlbi_level(op, addr, level) do { \
108 if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
110 u64 ttl = level & 3; \
111 ttl |= get_trans_granule() << 2; \
112 arg &= ~TLBI_TTL_MASK; \
113 arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
119 #define __tlbi_user_level(op, arg, level) do { \
120 if (arm64_kernel_unmapped_at_el0()) \
121 __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
125 * This macro creates a properly formatted VA operand for the TLB RANGE.
126 * The value bit assignments are:
128 * +----------+------+-------+-------+-------+----------------------+
129 * | ASID | TG | SCALE | NUM | TTL | BADDR |
130 * +-----------------+-------+-------+-------+----------------------+
131 * |63 48|47 46|45 44|43 39|38 37|36 0|
133 * The address range is determined by below formula:
134 * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
137 #define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \
139 unsigned long __ta = (addr) >> PAGE_SHIFT; \
140 __ta &= GENMASK_ULL(36, 0); \
141 __ta |= (unsigned long)(ttl) << 37; \
142 __ta |= (unsigned long)(num) << 39; \
143 __ta |= (unsigned long)(scale) << 44; \
144 __ta |= get_trans_granule() << 46; \
145 __ta |= (unsigned long)(asid) << 48; \
149 /* These macros are used by the TLBI RANGE feature. */
150 #define __TLBI_RANGE_PAGES(num, scale) \
151 ((unsigned long)((num) + 1) << (5 * (scale) + 1))
152 #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
155 * Generate 'num' values from -1 to 30 with -1 rejected by the
156 * __flush_tlb_range() loop below.
158 #define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
159 #define __TLBI_RANGE_NUM(pages, scale) \
160 ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
166 * This header file implements the low-level TLB invalidation routines
167 * (sometimes referred to as "flushing" in the kernel) for arm64.
169 * Every invalidation operation uses the following template:
171 * DSB ISHST // Ensure prior page-table updates have completed
172 * TLBI ... // Invalidate the TLB
173 * DSB ISH // Ensure the TLB invalidation has completed
174 * if (invalidated kernel mappings)
175 * ISB // Discard any instructions fetched from the old mapping
178 * The following functions form part of the "core" TLB invalidation API,
179 * as documented in Documentation/core-api/cachetlb.rst:
182 * Invalidate the entire TLB (kernel + user) on all CPUs
185 * Invalidate an entire user address space on all CPUs.
186 * The 'mm' argument identifies the ASID to invalidate.
188 * flush_tlb_range(vma, start, end)
189 * Invalidate the virtual-address range '[start, end)' on all
190 * CPUs for the user address space corresponding to 'vma->mm'.
191 * Note that this operation also invalidates any walk-cache
192 * entries associated with translations for the specified address
195 * flush_tlb_kernel_range(start, end)
196 * Same as flush_tlb_range(..., start, end), but applies to
197 * kernel mappings rather than a particular user address space.
198 * Whilst not explicitly documented, this function is used when
199 * unmapping pages from vmalloc/io space.
201 * flush_tlb_page(vma, addr)
202 * Invalidate a single user mapping for address 'addr' in the
203 * address space corresponding to 'vma->mm'. Note that this
204 * operation only invalidates a single, last-level page-table
205 * entry and therefore does not affect any walk-caches.
208 * Next, we have some undocumented invalidation routines that you probably
209 * don't want to call unless you know what you're doing:
211 * local_flush_tlb_all()
212 * Same as flush_tlb_all(), but only applies to the calling CPU.
214 * __flush_tlb_kernel_pgtable(addr)
215 * Invalidate a single kernel mapping for address 'addr' on all
216 * CPUs, ensuring that any walk-cache entries associated with the
217 * translation are also invalidated.
219 * __flush_tlb_range(vma, start, end, stride, last_level)
220 * Invalidate the virtual-address range '[start, end)' on all
221 * CPUs for the user address space corresponding to 'vma->mm'.
222 * The invalidation operations are issued at a granularity
223 * determined by 'stride' and only affect any walk-cache entries
224 * if 'last_level' is equal to false.
227 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
228 * on top of these routines, since that is our interface to the mmu_gather
229 * API as used by munmap() and friends.
231 static inline void local_flush_tlb_all(void)
239 static inline void flush_tlb_all(void)
247 static inline void flush_tlb_mm(struct mm_struct *mm)
252 asid = __TLBI_VADDR(0, ASID(mm));
253 __tlbi(aside1is, asid);
254 __tlbi_user(aside1is, asid);
256 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
259 static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
265 addr = __TLBI_VADDR(uaddr, ASID(mm));
266 __tlbi(vale1is, addr);
267 __tlbi_user(vale1is, addr);
268 mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
269 (uaddr & PAGE_MASK) + PAGE_SIZE);
272 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
275 return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
278 static inline void flush_tlb_page(struct vm_area_struct *vma,
281 flush_tlb_page_nosync(vma, uaddr);
285 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
288 * TLB flush deferral is not required on systems which are affected by
289 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
290 * will have two consecutive TLBI instructions with a dsb(ish) in between
291 * defeating the purpose (i.e save overall 'dsb ish' cost).
293 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
299 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
300 struct mm_struct *mm,
303 __flush_tlb_page_nosync(mm, uaddr);
307 * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
308 * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
309 * flush_tlb_batched_pending().
311 static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
317 * To support TLB batched flush for multiple pages unmapping, we only send
318 * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
319 * completion at the end in arch_tlbbatch_flush(). Since we've already issued
320 * TLBI for each page so only a DSB is needed to synchronise its effect on the
323 * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
326 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
332 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
333 * necessarily a performance improvement.
335 #define MAX_DVM_OPS PTRS_PER_PTE
338 * __flush_tlb_range_op - Perform TLBI operation upon a range
340 * @op: TLBI instruction that operates on a range (has 'r' prefix)
341 * @start: The start address of the range
342 * @pages: Range as the number of pages from 'start'
343 * @stride: Flush granularity
344 * @asid: The ASID of the task (0 for IPA instructions)
345 * @tlb_level: Translation Table level hint, if known
346 * @tlbi_user: If 'true', call an additional __tlbi_user()
347 * (typically for user ASIDs). 'flase' for IPA instructions
349 * When the CPU does not support TLB range operations, flush the TLB
350 * entries one by one at the granularity of 'stride'. If the TLB
351 * range ops are supported, then:
353 * 1. If 'pages' is odd, flush the first page through non-range
356 * 2. For remaining pages: the minimum range granularity is decided
357 * by 'scale', so multiple range TLBI operations may be required.
358 * Start from scale = 0, flush the corresponding number of pages
359 * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
360 * until no pages left.
362 * Note that certain ranges can be represented by either num = 31 and
363 * scale or num = 0 and scale + 1. The loop below favours the latter
364 * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
366 #define __flush_tlb_range_op(op, start, pages, stride, \
367 asid, tlb_level, tlbi_user) \
371 unsigned long addr; \
373 while (pages > 0) { \
374 if (!system_supports_tlb_range() || \
376 addr = __TLBI_VADDR(start, asid); \
377 __tlbi_level(op, addr, tlb_level); \
379 __tlbi_user_level(op, addr, tlb_level); \
381 pages -= stride >> PAGE_SHIFT; \
385 num = __TLBI_RANGE_NUM(pages, scale); \
387 addr = __TLBI_VADDR_RANGE(start, asid, scale, \
389 __tlbi(r##op, addr); \
391 __tlbi_user(r##op, addr); \
392 start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
393 pages -= __TLBI_RANGE_PAGES(num, scale); \
399 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
400 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
402 static inline void __flush_tlb_range(struct vm_area_struct *vma,
403 unsigned long start, unsigned long end,
404 unsigned long stride, bool last_level,
407 unsigned long asid, pages;
409 start = round_down(start, stride);
410 end = round_up(end, stride);
411 pages = (end - start) >> PAGE_SHIFT;
414 * When not uses TLB range ops, we can handle up to
415 * (MAX_DVM_OPS - 1) pages;
416 * When uses TLB range ops, we can handle up to
417 * (MAX_TLBI_RANGE_PAGES - 1) pages.
419 if ((!system_supports_tlb_range() &&
420 (end - start) >= (MAX_DVM_OPS * stride)) ||
421 pages >= MAX_TLBI_RANGE_PAGES) {
422 flush_tlb_mm(vma->vm_mm);
427 asid = ASID(vma->vm_mm);
430 __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
432 __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
435 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
438 static inline void flush_tlb_range(struct vm_area_struct *vma,
439 unsigned long start, unsigned long end)
442 * We cannot use leaf-only invalidation here, since we may be invalidating
443 * table entries as part of collapsing hugepages or moving page tables.
444 * Set the tlb_level to 0 because we can not get enough information here.
446 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
449 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
453 if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
458 start = __TLBI_VADDR(start, 0);
459 end = __TLBI_VADDR(end, 0);
462 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
463 __tlbi(vaale1is, addr);
469 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
470 * table levels (pgd/pud/pmd).
472 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
474 unsigned long addr = __TLBI_VADDR(kaddr, 0);
477 __tlbi(vaae1is, addr);