2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
16 #include <asm/ppc-opcode.h>
18 #include <asm/tlbflush.h>
19 #include <asm/trace.h>
20 #include <asm/cputhreads.h>
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
26 static inline void __tlbie_va(unsigned long va, unsigned long pid,
27 unsigned long ap, unsigned long ric)
29 unsigned long rb,rs,prs,r;
31 rb = va & ~(PPC_BITMASK(52, 63));
32 rb |= ap << PPC_BITLSHIFT(58);
33 rs = pid << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
37 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
38 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
39 trace_tlbie(0, 0, rb, rs, ric, prs, r);
43 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
46 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
47 asm volatile("ptesync": : :"memory");
48 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
51 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
52 asm volatile("ptesync": : :"memory");
53 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
57 static inline void __tlbiel_pid(unsigned long pid, int set,
60 unsigned long rb,rs,prs,r;
62 rb = PPC_BIT(53); /* IS = 1 */
63 rb |= set << PPC_BITLSHIFT(51);
64 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
65 prs = 1; /* process scoped */
66 r = 1; /* raidx format */
68 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
69 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
70 trace_tlbie(0, 1, rb, rs, ric, prs, r);
74 * We use 128 set in radix mode and 256 set in hpt mode.
76 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
80 asm volatile("ptesync": : :"memory");
83 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
84 * also flush the entire Page Walk Cache.
86 __tlbiel_pid(pid, 0, ric);
88 /* For PWC, only one flush is needed */
89 if (ric == RIC_FLUSH_PWC) {
90 asm volatile("ptesync": : :"memory");
94 /* For the remaining sets, just flush the TLB */
95 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
96 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
98 asm volatile("ptesync": : :"memory");
99 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
102 static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
104 unsigned long rb,rs,prs,r;
106 rb = PPC_BIT(53); /* IS = 1 */
107 rs = pid << PPC_BITLSHIFT(31);
108 prs = 1; /* process scoped */
109 r = 1; /* radix format */
111 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
112 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
113 trace_tlbie(0, 0, rb, rs, ric, prs, r);
116 static inline void fixup_tlbie_pid(unsigned long pid)
119 * We can use any address for the invalidation, pick one which is
120 * probably unused as an optimisation.
122 unsigned long va = ((1UL << 52) - 1);
124 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
125 asm volatile("ptesync": : :"memory");
126 __tlbie_pid(0, RIC_FLUSH_TLB);
129 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
130 asm volatile("ptesync": : :"memory");
131 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
135 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
137 asm volatile("ptesync": : :"memory");
140 * Workaround the fact that the "ric" argument to __tlbie_pid
141 * must be a compile-time contraint to match the "i" constraint
142 * in the asm statement.
146 __tlbie_pid(pid, RIC_FLUSH_TLB);
147 fixup_tlbie_pid(pid);
150 __tlbie_pid(pid, RIC_FLUSH_PWC);
154 __tlbie_pid(pid, RIC_FLUSH_ALL);
155 fixup_tlbie_pid(pid);
157 asm volatile("eieio; tlbsync; ptesync": : :"memory");
160 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
161 unsigned long ap, unsigned long ric)
163 unsigned long rb,rs,prs,r;
165 rb = va & ~(PPC_BITMASK(52, 63));
166 rb |= ap << PPC_BITLSHIFT(58);
167 rs = pid << PPC_BITLSHIFT(31);
168 prs = 1; /* process scoped */
169 r = 1; /* raidx format */
171 asm volatile("ptesync": : :"memory");
172 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
173 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
174 asm volatile("ptesync": : :"memory");
175 trace_tlbie(0, 1, rb, rs, ric, prs, r);
178 static inline void _tlbie_va(unsigned long va, unsigned long pid,
179 unsigned long ap, unsigned long ric)
181 asm volatile("ptesync": : :"memory");
182 __tlbie_va(va, pid, ap, ric);
183 fixup_tlbie_va(va, pid, ap);
184 asm volatile("eieio; tlbsync; ptesync": : :"memory");
188 * Base TLB flushing operations:
190 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
191 * - flush_tlb_page(vma, vmaddr) flushes one page
192 * - flush_tlb_range(vma, start, end) flushes a range of pages
193 * - flush_tlb_kernel_range(start, end) flushes kernel pages
195 * - local_* variants of page and mm only apply to the current
198 void radix__local_flush_tlb_mm(struct mm_struct *mm)
203 pid = mm->context.id;
204 if (pid != MMU_NO_CONTEXT)
205 _tlbiel_pid(pid, RIC_FLUSH_TLB);
208 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
211 static void radix__local_flush_all_mm(struct mm_struct *mm)
216 pid = mm->context.id;
217 if (pid != MMU_NO_CONTEXT)
218 _tlbiel_pid(pid, RIC_FLUSH_ALL);
221 #endif /* CONFIG_SMP */
223 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
227 unsigned long ap = mmu_get_ap(psize);
230 pid = mm ? mm->context.id : 0;
231 if (pid != MMU_NO_CONTEXT)
232 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
236 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
238 #ifdef CONFIG_HUGETLB_PAGE
239 /* need the return fix for nohash.c */
240 if (vma && is_vm_hugetlb_page(vma))
241 return __local_flush_hugetlb_page(vma, vmaddr);
243 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
246 EXPORT_SYMBOL(radix__local_flush_tlb_page);
249 void radix__flush_tlb_mm(struct mm_struct *mm)
254 pid = mm->context.id;
255 if (unlikely(pid == MMU_NO_CONTEXT))
258 if (!mm_is_thread_local(mm))
259 _tlbie_pid(pid, RIC_FLUSH_TLB);
261 _tlbiel_pid(pid, RIC_FLUSH_TLB);
265 EXPORT_SYMBOL(radix__flush_tlb_mm);
267 static void radix__flush_all_mm(struct mm_struct *mm)
272 pid = mm->context.id;
273 if (unlikely(pid == MMU_NO_CONTEXT))
276 if (!mm_is_thread_local(mm))
277 _tlbie_pid(pid, RIC_FLUSH_ALL);
279 _tlbiel_pid(pid, RIC_FLUSH_ALL);
284 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
286 tlb->need_flush_all = 1;
288 EXPORT_SYMBOL(radix__flush_tlb_pwc);
290 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
294 unsigned long ap = mmu_get_ap(psize);
297 pid = mm ? mm->context.id : 0;
298 if (unlikely(pid == MMU_NO_CONTEXT))
300 if (!mm_is_thread_local(mm))
301 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
303 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
308 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
310 #ifdef CONFIG_HUGETLB_PAGE
311 if (vma && is_vm_hugetlb_page(vma))
312 return flush_hugetlb_page(vma, vmaddr);
314 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
317 EXPORT_SYMBOL(radix__flush_tlb_page);
319 #else /* CONFIG_SMP */
320 #define radix__flush_all_mm radix__local_flush_all_mm
321 #endif /* CONFIG_SMP */
323 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
325 _tlbie_pid(0, RIC_FLUSH_ALL);
327 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
330 * Currently, for range flushing, we just do a full mm flush. Because
331 * we use this in code path where we don' track the page size.
333 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
337 struct mm_struct *mm = vma->vm_mm;
339 radix__flush_tlb_mm(mm);
341 EXPORT_SYMBOL(radix__flush_tlb_range);
343 static int radix_get_mmu_psize(int page_size)
347 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
348 psize = mmu_virtual_psize;
349 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
351 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
358 void radix__tlb_flush(struct mmu_gather *tlb)
361 struct mm_struct *mm = tlb->mm;
362 int page_size = tlb->page_size;
364 psize = radix_get_mmu_psize(page_size);
366 * if page size is not something we understand, do a full mm flush
368 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
369 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
370 else if (tlb->need_flush_all) {
371 tlb->need_flush_all = 0;
372 radix__flush_all_mm(mm);
374 radix__flush_tlb_mm(mm);
377 #define TLB_FLUSH_ALL -1UL
379 * Number of pages above which we will do a bcast tlbie. Just a
380 * number at this point copied from x86
382 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
384 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
385 unsigned long end, int psize)
389 int local = mm_is_thread_local(mm);
390 unsigned long ap = mmu_get_ap(psize);
391 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
395 pid = mm ? mm->context.id : 0;
396 if (unlikely(pid == MMU_NO_CONTEXT))
399 if (end == TLB_FLUSH_ALL ||
400 (end - start) > tlb_single_page_flush_ceiling * page_size) {
402 _tlbiel_pid(pid, RIC_FLUSH_TLB);
404 _tlbie_pid(pid, RIC_FLUSH_TLB);
407 for (addr = start; addr < end; addr += page_size) {
410 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
412 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
421 int local = mm_is_thread_local(mm);
422 unsigned long ap = mmu_get_ap(mmu_virtual_psize);
423 unsigned long pid, end;
426 pid = mm ? mm->context.id : 0;
428 if (unlikely(pid == MMU_NO_CONTEXT))
431 /* 4k page size, just blow the world */
432 if (PAGE_SIZE == 0x1000) {
433 radix__flush_all_mm(mm);
438 /* Otherwise first do the PWC */
440 _tlbiel_pid(pid, RIC_FLUSH_PWC);
442 _tlbie_pid(pid, RIC_FLUSH_PWC);
444 /* Then iterate the pages */
445 end = addr + HPAGE_PMD_SIZE;
446 for (; addr < end; addr += PAGE_SIZE) {
448 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
450 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
455 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
457 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
458 unsigned long page_size)
460 unsigned long rb,rs,prs,r;
462 unsigned long ric = RIC_FLUSH_TLB;
464 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
465 rb = gpa & ~(PPC_BITMASK(52, 63));
466 rb |= ap << PPC_BITLSHIFT(58);
467 rs = lpid & ((1UL << 32) - 1);
468 prs = 0; /* process scoped */
469 r = 1; /* raidx format */
471 asm volatile("ptesync": : :"memory");
472 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
473 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
474 asm volatile("eieio; tlbsync; ptesync": : :"memory");
475 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
477 EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
479 void radix__flush_tlb_lpid(unsigned long lpid)
481 unsigned long rb,rs,prs,r;
482 unsigned long ric = RIC_FLUSH_ALL;
484 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
485 rs = lpid & ((1UL << 32) - 1);
486 prs = 0; /* partition scoped */
487 r = 1; /* raidx format */
489 asm volatile("ptesync": : :"memory");
490 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
491 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
492 asm volatile("eieio; tlbsync; ptesync": : :"memory");
493 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
495 EXPORT_SYMBOL(radix__flush_tlb_lpid);
497 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
498 unsigned long start, unsigned long end)
500 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
502 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
504 void radix__flush_tlb_all(void)
506 unsigned long rb,prs,r,rs;
507 unsigned long ric = RIC_FLUSH_ALL;
509 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
510 prs = 0; /* partition scoped */
511 r = 1; /* raidx format */
512 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
514 asm volatile("ptesync": : :"memory");
516 * now flush guest entries by passing PRS = 1 and LPID != 0
518 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
519 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
521 * now flush host entires by passing PRS = 0 and LPID == 0
523 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
524 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
525 asm volatile("eieio; tlbsync; ptesync": : :"memory");
528 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
529 unsigned long address)
532 * We track page size in pte only for DD1, So we can
533 * call this only on DD1.
535 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
540 if (old_pte & R_PAGE_LARGE)
541 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
543 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
546 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
547 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
549 unsigned int pid = mm->context.id;
551 if (unlikely(pid == MMU_NO_CONTEXT))
555 * If this context hasn't run on that CPU before and KVM is
556 * around, there's a slim chance that the guest on another
557 * CPU just brought in obsolete translation into the TLB of
558 * this CPU due to a bad prefetch using the guest PID on
559 * the way into the hypervisor.
561 * We work around this here. If KVM is possible, we check if
562 * any sibling thread is in KVM. If it is, the window may exist
563 * and thus we flush that PID from the core.
565 * A potential future improvement would be to mark which PIDs
566 * have never been used on the system and avoid it if the PID
567 * is new and the process has no other cpumask bit set.
569 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
570 int cpu = smp_processor_id();
571 int sib = cpu_first_thread_sibling(cpu);
574 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
577 if (paca[sib].kvm_hstate.kvm_vcpu)
581 _tlbiel_pid(pid, RIC_FLUSH_ALL);
584 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
585 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */