1 // SPDX-License-Identifier: GPL-2.0
3 * PPC Huge TLB Page Support for Book3E MMU
5 * Copyright (C) 2009 David Gibson, IBM Corporation.
6 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
10 #include <linux/hugetlb.h>
17 static inline int tlb1_next(void)
19 struct paca_struct *paca = get_paca();
20 struct tlb_core_data *tcd;
24 this = tcd->esel_next;
27 if (next >= tcd->esel_max)
28 next = tcd->esel_first;
30 tcd->esel_next = next;
34 static inline void book3e_tlb_lock(void)
36 struct paca_struct *paca = get_paca();
38 int token = smp_processor_id() + 1;
41 * Besides being unnecessary in the absence of SMT, this
42 * check prevents trying to do lbarx/stbcx. on e5500 which
43 * doesn't implement either feature.
45 if (!cpu_has_feature(CPU_FTR_SMT))
48 asm volatile(".machine push;"
63 : "r" (&paca->tcd_ptr->lock), "r" (token)
67 static inline void book3e_tlb_unlock(void)
69 struct paca_struct *paca = get_paca();
71 if (!cpu_has_feature(CPU_FTR_SMT))
75 paca->tcd_ptr->lock = 0;
78 static inline int tlb1_next(void)
82 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
84 index = this_cpu_read(next_tlbcam_idx);
86 /* Just round-robin the entries and wrap when we hit the end */
87 if (unlikely(index == ncams - 1))
88 __this_cpu_write(next_tlbcam_idx, tlbcam_index);
90 __this_cpu_inc(next_tlbcam_idx);
95 static inline void book3e_tlb_lock(void)
99 static inline void book3e_tlb_unlock(void)
104 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
108 mtspr(SPRN_MAS6, pid << 16);
113 : "=&r"(found) : "r"(ea));
119 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
121 unsigned long mas1, mas2;
123 unsigned long psize, tsize, shift;
125 struct mm_struct *mm;
128 if (unlikely(is_kernel_addr(ea)))
133 psize = vma_mmu_pagesize(vma);
134 shift = __ilog2(psize);
137 * We can't be interrupted while we're setting up the MAS
138 * registers or after we've confirmed that no tlb exists.
140 local_irq_save(flags);
144 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
146 local_irq_restore(flags);
150 /* We have to use the CAM(TLB1) on FSL parts for hugepages */
152 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
154 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
155 mas2 = ea & ~((1UL << shift) - 1);
156 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
157 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
158 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
160 mas7_3 &= ~(MAS3_SW|MAS3_UW);
162 mtspr(SPRN_MAS1, mas1);
163 mtspr(SPRN_MAS2, mas2);
165 if (mmu_has_feature(MMU_FTR_BIG_PHYS))
166 mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
167 mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
169 asm volatile ("tlbwe");
172 local_irq_restore(flags);
176 * This is called at the end of handling a user page fault, when the
177 * fault has been handled by updating a PTE in the linux page tables.
179 * This must always be called with the pte lock held.
181 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
183 if (is_vm_hugetlb_page(vma))
184 book3e_hugetlb_preload(vma, address, *ptep);
187 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
189 struct hstate *hstate = hstate_file(vma->vm_file);
190 unsigned long tsize = huge_page_shift(hstate) - 10;
192 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);