3 #include <linux/hugetlb.h>
4 #include <asm/pgalloc.h>
5 #include <asm/pgtable.h>
7 #include <asm/fixmap.h>
10 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
13 #define PGALLOC_USER_GFP __GFP_HIGHMEM
15 #define PGALLOC_USER_GFP 0
18 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
20 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
22 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
25 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
29 pte = alloc_pages(__userpte_alloc_gfp, 0);
32 if (!pgtable_page_ctor(pte)) {
39 static int __init setup_userpte(char *arg)
45 * "userpte=nohigh" disables allocation of user pagetables in
48 if (strcmp(arg, "nohigh") == 0)
49 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
54 early_param("userpte", setup_userpte);
56 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58 pgtable_page_dtor(pte);
59 paravirt_release_pte(page_to_pfn(pte));
60 tlb_remove_page(tlb, pte);
63 #if CONFIG_PGTABLE_LEVELS > 2
64 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
66 struct page *page = virt_to_page(pmd);
67 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
69 * NOTE! For PAE, any changes to the top page-directory-pointer-table
70 * entries need a full cr3 reload to flush.
73 tlb->need_flush_all = 1;
75 pgtable_pmd_page_dtor(page);
76 tlb_remove_page(tlb, page);
79 #if CONFIG_PGTABLE_LEVELS > 3
80 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
82 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
83 tlb_remove_page(tlb, virt_to_page(pud));
85 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
86 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
88 static inline void pgd_list_add(pgd_t *pgd)
90 struct page *page = virt_to_page(pgd);
92 list_add(&page->lru, &pgd_list);
95 static inline void pgd_list_del(pgd_t *pgd)
97 struct page *page = virt_to_page(pgd);
102 #define UNSHARED_PTRS_PER_PGD \
103 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
106 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
108 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
109 virt_to_page(pgd)->index = (pgoff_t)mm;
112 struct mm_struct *pgd_page_get_mm(struct page *page)
114 return (struct mm_struct *)page->index;
117 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
119 /* If the pgd points to a shared pagetable level (either the
120 ptes in non-PAE, or shared PMD in PAE), then just copy the
121 references from swapper_pg_dir. */
122 if (CONFIG_PGTABLE_LEVELS == 2 ||
123 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
124 CONFIG_PGTABLE_LEVELS == 4) {
125 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
126 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
130 /* list required to sync kernel mapping updates */
131 if (!SHARED_KERNEL_PMD) {
137 static void pgd_dtor(pgd_t *pgd)
139 if (SHARED_KERNEL_PMD)
142 spin_lock(&pgd_lock);
144 spin_unlock(&pgd_lock);
148 * List of all pgd's needed for non-PAE so it can invalidate entries
149 * in both cached and uncached pgd's; not needed for PAE since the
150 * kernel pmd is shared. If PAE were not to share the pmd a similar
151 * tactic would be needed. This is essentially codepath-based locking
152 * against pageattr.c; it is the unique case in which a valid change
153 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
154 * vmalloc faults work because attached pagetables are never freed.
158 #ifdef CONFIG_X86_PAE
160 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
161 * updating the top-level pagetable entries to guarantee the
162 * processor notices the update. Since this is expensive, and
163 * all 4 top-level entries are used almost immediately in a
164 * new process's life, we just pre-populate them here.
166 * Also, if we're in a paravirt environment where the kernel pmd is
167 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
168 * and initialize the kernel pmds here.
170 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
172 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
174 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
176 /* Note: almost everything apart from _PAGE_PRESENT is
177 reserved at the pmd (PDPT) level. */
178 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
181 * According to Intel App note "TLBs, Paging-Structure Caches,
182 * and Their Invalidation", April 2007, document 317080-001,
183 * section 8.1: in PAE mode we explicitly have to flush the
184 * TLB via cr3 if the top-level pgd is changed...
188 #else /* !CONFIG_X86_PAE */
190 /* No need to prepopulate any pagetable entries in non-PAE modes. */
191 #define PREALLOCATED_PMDS 0
193 #endif /* CONFIG_X86_PAE */
195 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
199 for(i = 0; i < PREALLOCATED_PMDS; i++)
201 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
202 free_page((unsigned long)pmds[i]);
207 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
211 gfp_t gfp = PGALLOC_GFP;
214 gfp &= ~__GFP_ACCOUNT;
216 for(i = 0; i < PREALLOCATED_PMDS; i++) {
217 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
220 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
221 free_page((unsigned long)pmd);
239 * Mop up any pmd pages which may still be attached to the pgd.
240 * Normally they will be freed by munmap/exit_mmap, but any pmd we
241 * preallocate which never got a corresponding vma will need to be
244 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
248 for(i = 0; i < PREALLOCATED_PMDS; i++) {
251 if (pgd_val(pgd) != 0) {
252 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
256 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
263 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
268 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
271 pud = pud_offset(pgd, 0);
273 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
274 pmd_t *pmd = pmds[i];
276 if (i >= KERNEL_PGD_BOUNDARY)
277 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
278 sizeof(pmd_t) * PTRS_PER_PMD);
280 pud_populate(mm, pud, pmd);
285 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
286 * assumes that pgd should be in one page.
288 * But kernel with PAE paging that is not running as a Xen domain
289 * only needs to allocate 32 bytes for pgd instead of one page.
291 #ifdef CONFIG_X86_PAE
293 #include <linux/slab.h>
295 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
298 static struct kmem_cache *pgd_cache;
300 static int __init pgd_cache_init(void)
303 * When PAE kernel is running as a Xen domain, it does not use
304 * shared kernel pmd. And this requires a whole page for pgd.
306 if (!SHARED_KERNEL_PMD)
310 * when PAE kernel is not running as a Xen domain, it uses
311 * shared kernel pmd. Shared kernel pmd does not require a whole
312 * page for pgd. We are able to just allocate a 32-byte for pgd.
313 * During boot time, we create a 32-byte slab for pgd table allocation.
315 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
322 core_initcall(pgd_cache_init);
324 static inline pgd_t *_pgd_alloc(void)
327 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
328 * We allocate one page for pgd.
330 if (!SHARED_KERNEL_PMD)
331 return (pgd_t *)__get_free_page(PGALLOC_GFP);
334 * Now PAE kernel is not running as a Xen domain. We can allocate
335 * a 32-byte slab for pgd to save memory space.
337 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
340 static inline void _pgd_free(pgd_t *pgd)
342 if (!SHARED_KERNEL_PMD)
343 free_page((unsigned long)pgd);
345 kmem_cache_free(pgd_cache, pgd);
349 static inline pgd_t *_pgd_alloc(void)
351 return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
354 static inline void _pgd_free(pgd_t *pgd)
356 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
358 #endif /* CONFIG_X86_PAE */
360 pgd_t *pgd_alloc(struct mm_struct *mm)
363 pmd_t *pmds[PREALLOCATED_PMDS];
372 if (preallocate_pmds(mm, pmds) != 0)
375 if (paravirt_pgd_alloc(mm) != 0)
379 * Make sure that pre-populating the pmds is atomic with
380 * respect to anything walking the pgd_list, so that they
381 * never see a partially populated pgd.
383 spin_lock(&pgd_lock);
386 pgd_prepopulate_pmd(mm, pgd, pmds);
388 spin_unlock(&pgd_lock);
400 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
402 pgd_mop_up_pmds(mm, pgd);
404 paravirt_pgd_free(mm, pgd);
409 * Used to set accessed or dirty bits in the page table entries
410 * on other architectures. On x86, the accessed and dirty bits
411 * are tracked by hardware. However, do_wp_page calls this function
412 * to also make the pte writeable at the same time the dirty bit is
413 * set. In that case we do actually need to write the PTE.
415 int ptep_set_access_flags(struct vm_area_struct *vma,
416 unsigned long address, pte_t *ptep,
417 pte_t entry, int dirty)
419 int changed = !pte_same(*ptep, entry);
421 if (changed && dirty) {
422 set_pte(ptep, entry);
423 pte_update(vma->vm_mm, address, ptep);
429 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
430 int pmdp_set_access_flags(struct vm_area_struct *vma,
431 unsigned long address, pmd_t *pmdp,
432 pmd_t entry, int dirty)
434 int changed = !pmd_same(*pmdp, entry);
436 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
438 if (changed && dirty) {
439 set_pmd(pmdp, entry);
441 * We had a write-protection fault here and changed the pmd
442 * to to more permissive. No need to flush the TLB for that,
443 * #PF is architecturally guaranteed to do that and in the
444 * worst-case we'll generate a spurious fault.
452 int ptep_test_and_clear_young(struct vm_area_struct *vma,
453 unsigned long addr, pte_t *ptep)
457 if (pte_young(*ptep))
458 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
459 (unsigned long *) &ptep->pte);
462 pte_update(vma->vm_mm, addr, ptep);
467 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
468 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
469 unsigned long addr, pmd_t *pmdp)
473 if (pmd_young(*pmdp))
474 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
475 (unsigned long *)pmdp);
481 int ptep_clear_flush_young(struct vm_area_struct *vma,
482 unsigned long address, pte_t *ptep)
485 * On x86 CPUs, clearing the accessed bit without a TLB flush
486 * doesn't cause data corruption. [ It could cause incorrect
487 * page aging and the (mistaken) reclaim of hot pages, but the
488 * chance of that should be relatively low. ]
490 * So as a performance optimization don't flush the TLB when
491 * clearing the accessed bit, it will eventually be flushed by
492 * a context switch or a VM operation anyway. [ In the rare
493 * event of it not getting flushed for a long time the delay
494 * shouldn't really matter because there's no real memory
495 * pressure for swapout to react to. ]
497 return ptep_test_and_clear_young(vma, address, ptep);
500 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
501 int pmdp_clear_flush_young(struct vm_area_struct *vma,
502 unsigned long address, pmd_t *pmdp)
506 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
508 young = pmdp_test_and_clear_young(vma, address, pmdp);
510 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
517 * reserve_top_address - reserves a hole in the top of kernel address space
518 * @reserve - size of hole to reserve
520 * Can be used to relocate the fixmap area and poke a hole in the top
521 * of kernel address space to make room for a hypervisor.
523 void __init reserve_top_address(unsigned long reserve)
526 BUG_ON(fixmaps_set > 0);
527 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
528 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
529 -reserve, __FIXADDR_TOP + PAGE_SIZE);
535 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
537 unsigned long address = __fix_to_virt(idx);
539 if (idx >= __end_of_fixed_addresses) {
543 set_pte_vaddr(address, pte);
547 void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
548 phys_addr_t phys, pgprot_t flags)
550 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
553 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
555 * pud_set_huge - setup kernel PUD mapping
557 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
558 * function sets up a huge page only if any of the following conditions are met:
560 * - MTRRs are disabled, or
562 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
564 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
565 * has no effect on the requested PAT memory type.
567 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
568 * page mapping attempt fails.
570 * Returns 1 on success and 0 on failure.
572 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
576 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
577 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
578 (mtrr != MTRR_TYPE_WRBACK))
581 /* Bail out if we are we on a populated non-leaf entry: */
582 if (pud_present(*pud) && !pud_huge(*pud))
585 prot = pgprot_4k_2_large(prot);
587 set_pte((pte_t *)pud, pfn_pte(
588 (u64)addr >> PAGE_SHIFT,
589 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
595 * pmd_set_huge - setup kernel PMD mapping
597 * See text over pud_set_huge() above.
599 * Returns 1 on success and 0 on failure.
601 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
605 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
606 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
607 (mtrr != MTRR_TYPE_WRBACK)) {
608 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
609 __func__, addr, addr + PMD_SIZE);
613 /* Bail out if we are we on a populated non-leaf entry: */
614 if (pmd_present(*pmd) && !pmd_huge(*pmd))
617 prot = pgprot_4k_2_large(prot);
619 set_pte((pte_t *)pmd, pfn_pte(
620 (u64)addr >> PAGE_SHIFT,
621 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
627 * pud_clear_huge - clear kernel PUD mapping when it is set
629 * Returns 1 on success and 0 on failure (no PUD map is found).
631 int pud_clear_huge(pud_t *pud)
633 if (pud_large(*pud)) {
642 * pmd_clear_huge - clear kernel PMD mapping when it is set
644 * Returns 1 on success and 0 on failure (no PMD map is found).
646 int pmd_clear_huge(pmd_t *pmd)
648 if (pmd_large(*pmd)) {
658 * pud_free_pmd_page - Clear pud entry and free pmd page.
659 * @pud: Pointer to a PUD.
660 * @addr: Virtual address associated with pud.
662 * Context: The pud range has been unmapped and TLB purged.
663 * Return: 1 if clearing the entry succeeded. 0 otherwise.
665 * NOTE: Callers must allow a single page allocation.
667 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
676 pmd = (pmd_t *)pud_page_vaddr(*pud);
677 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
681 for (i = 0; i < PTRS_PER_PMD; i++) {
683 if (!pmd_none(pmd[i]))
689 /* INVLPG to clear all paging-structure caches */
690 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
692 for (i = 0; i < PTRS_PER_PMD; i++) {
693 if (!pmd_none(pmd_sv[i])) {
694 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
695 free_page((unsigned long)pte);
699 free_page((unsigned long)pmd_sv);
701 pgtable_pmd_page_dtor(virt_to_page(pmd));
702 free_page((unsigned long)pmd);
708 * pmd_free_pte_page - Clear pmd entry and free pte page.
709 * @pmd: Pointer to a PMD.
710 * @addr: Virtual address associated with pmd.
712 * Context: The pmd range has been unmapped and TLB purged.
713 * Return: 1 if clearing the entry succeeded. 0 otherwise.
715 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
722 pte = (pte_t *)pmd_page_vaddr(*pmd);
725 /* INVLPG to clear all paging-structure caches */
726 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
728 free_page((unsigned long)pte);
733 #else /* !CONFIG_X86_64 */
735 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
737 return pud_none(*pud);
741 * Disable free page handling on x86-PAE. This assures that ioremap()
742 * does not update sync'd pmd entries. See vmalloc_sync_one().
744 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
746 return pmd_none(*pmd);
749 #endif /* CONFIG_X86_64 */
750 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */