2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/sched.h>
11 #include <linux/mm_types.h>
12 #include <linux/memblock.h>
13 #include <misc/cxl-base.h>
15 #include <asm/pgalloc.h>
17 #include <asm/trace.h>
18 #include <asm/powernv.h>
21 #include <trace/events/thp.h>
23 unsigned long __pmd_frag_nr;
24 EXPORT_SYMBOL(__pmd_frag_nr);
25 unsigned long __pmd_frag_size_shift;
26 EXPORT_SYMBOL(__pmd_frag_size_shift);
28 int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
31 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
39 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
43 #ifdef CONFIG_DEBUG_VM
44 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
45 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
47 changed = !pmd_same(*(pmdp), entry);
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
59 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
68 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
71 #ifdef CONFIG_DEBUG_VM
72 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
73 assert_spin_locked(pmd_lockptr(mm, pmdp));
74 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
76 trace_hugepage_set_pmd(addr, pmd_val(pmd));
77 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
80 static void do_nothing(void *unused)
85 * Serialize against find_current_mm_pte which does lock-less
86 * lookup in page tables with local interrupts disabled. For huge pages
87 * it casts pmd_t to pte_t. Since format of pte_t is different from
88 * pmd_t we want to prevent transit from pmd pointing to page table
89 * to pmd pointing to huge page (and back) while interrupts are disabled.
90 * We clear pmd to possibly replace it with page table pointer in
91 * different code paths. So make sure we wait for the parallel
92 * find_current_mm_pte to finish.
94 void serialize_against_pte_lookup(struct mm_struct *mm)
97 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
101 * We use this to invalidate a pmdp entry before switching from a
102 * hugepte to regular pmd entry.
104 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
107 unsigned long old_pmd;
109 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
112 * This ensures that generic code that rely on IRQ disabling
113 * to prevent a parallel THP split work as expected.
115 serialize_against_pte_lookup(vma->vm_mm);
116 return __pmd(old_pmd);
119 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
121 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
124 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
128 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
129 return pmd_set_protbits(__pmd(pmdv), pgprot);
132 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
134 return pfn_pmd(page_to_pfn(page), pgprot);
137 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
142 pmdv &= _HPAGE_CHG_MASK;
143 return pmd_set_protbits(__pmd(pmdv), newprot);
147 * This is called at the end of handling a user page fault, when the
148 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
149 * We use it to preload an HPTE into the hash table corresponding to
150 * the updated linux HUGE PMD entry.
152 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
156 prefetch((void *)addr);
158 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
160 /* For use by kexec */
161 void mmu_cleanup_all(void)
164 radix__mmu_cleanup_all();
165 else if (mmu_hash_ops.hpte_clear_all)
166 mmu_hash_ops.hpte_clear_all();
169 #ifdef CONFIG_MEMORY_HOTPLUG
170 int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
173 return radix__create_section_mapping(start, end, nid);
175 return hash__create_section_mapping(start, end, nid);
178 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
181 return radix__remove_section_mapping(start, end);
183 return hash__remove_section_mapping(start, end);
185 #endif /* CONFIG_MEMORY_HOTPLUG */
187 void __init mmu_partition_table_init(void)
189 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
192 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
193 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
194 MEMBLOCK_ALLOC_ANYWHERE));
196 /* Initialize the Partition Table with no entries */
197 memset((void *)partition_tb, 0, patb_size);
200 * update partition table control register,
203 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
204 mtspr(SPRN_PTCR, ptcr);
205 powernv_set_nmmu_ptcr(ptcr);
208 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
211 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
213 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
214 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
217 * Global flush of TLBs and partition table caches for this lpid.
218 * The type of flush (hash or radix) depends on what the previous
219 * use of this partition ID was, not the new use.
221 asm volatile("ptesync" : : : "memory");
223 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
224 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
225 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
226 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
227 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
229 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
233 /* do we need fixup here ?*/
234 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
236 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
238 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
240 void *pmd_frag, *ret;
242 spin_lock(&mm->page_table_lock);
243 ret = mm->context.pmd_frag;
245 pmd_frag = ret + PMD_FRAG_SIZE;
247 * If we have taken up all the fragments mark PTE page NULL
249 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
251 mm->context.pmd_frag = pmd_frag;
253 spin_unlock(&mm->page_table_lock);
257 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
261 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
264 gfp &= ~__GFP_ACCOUNT;
265 page = alloc_page(gfp);
268 if (!pgtable_pmd_page_ctor(page)) {
269 __free_pages(page, 0);
273 atomic_set(&page->pt_frag_refcount, 1);
275 ret = page_address(page);
277 * if we support only one fragment just return the
280 if (PMD_FRAG_NR == 1)
283 spin_lock(&mm->page_table_lock);
285 * If we find pgtable_page set, we return
286 * the allocated page with single fragement
289 if (likely(!mm->context.pmd_frag)) {
290 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
291 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
293 spin_unlock(&mm->page_table_lock);
298 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
302 pmd = get_pmd_from_cache(mm);
306 return __alloc_for_pmdcache(mm);
309 void pmd_fragment_free(unsigned long *pmd)
311 struct page *page = virt_to_page(pmd);
313 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
314 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
315 pgtable_pmd_page_dtor(page);
320 static pte_t *get_pte_from_cache(struct mm_struct *mm)
322 void *pte_frag, *ret;
324 spin_lock(&mm->page_table_lock);
325 ret = mm->context.pte_frag;
327 pte_frag = ret + PTE_FRAG_SIZE;
329 * If we have taken up all the fragments mark PTE page NULL
331 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
333 mm->context.pte_frag = pte_frag;
335 spin_unlock(&mm->page_table_lock);
339 static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
345 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
348 if (!pgtable_page_ctor(page)) {
353 page = alloc_page(PGALLOC_GFP);
358 atomic_set(&page->pt_frag_refcount, 1);
360 ret = page_address(page);
362 * if we support only one fragment just return the
365 if (PTE_FRAG_NR == 1)
367 spin_lock(&mm->page_table_lock);
369 * If we find pgtable_page set, we return
370 * the allocated page with single fragement
373 if (likely(!mm->context.pte_frag)) {
374 atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
375 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
377 spin_unlock(&mm->page_table_lock);
382 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
386 pte = get_pte_from_cache(mm);
390 return __alloc_for_ptecache(mm, kernel);
393 void pte_fragment_free(unsigned long *table, int kernel)
395 struct page *page = virt_to_page(table);
397 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
398 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
400 pgtable_page_dtor(page);
405 static inline void pgtable_free(void *table, int index)
409 pte_fragment_free(table, 0);
412 pmd_fragment_free(table);
415 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
417 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
418 /* 16M hugepd directory at pud level */
420 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
421 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
423 /* 16G hugepd directory at the pgd level */
425 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
426 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
429 /* We don't free pgd table via RCU callback */
436 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
438 unsigned long pgf = (unsigned long)table;
440 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
442 tlb_remove_table(tlb, (void *)pgf);
445 void __tlb_remove_table(void *_table)
447 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
448 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
450 return pgtable_free(table, index);
453 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
455 return pgtable_free(table, index);
459 #ifdef CONFIG_PROC_FS
460 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
462 void arch_report_meminfo(struct seq_file *m)
465 * Hash maps the memory with one size mmu_linear_psize.
466 * So don't bother to print these on hash
468 if (!radix_enabled())
470 seq_printf(m, "DirectMap4k: %8lu kB\n",
471 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
472 seq_printf(m, "DirectMap64k: %8lu kB\n",
473 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
474 seq_printf(m, "DirectMap2M: %8lu kB\n",
475 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
476 seq_printf(m, "DirectMap1G: %8lu kB\n",
477 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
479 #endif /* CONFIG_PROC_FS */
482 * For hash translation mode, we use the deposited table to store hash slot
483 * information and they are stored at PTRS_PER_PMD offset from related pmd
484 * location. Hence a pmd move requires deposit and withdraw.
486 * For radix translation with split pmd ptl, we store the deposited table in the
487 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
490 * With hash we use deposited table always irrespective of anon or not.
491 * With radix we use deposited table only for anonymous mapping.
493 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
494 struct spinlock *old_pmd_ptl,
495 struct vm_area_struct *vma)
498 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);