1 // SPDX-License-Identifier: GPL-2.0
3 #include <asm/pgalloc.h>
5 #include <linux/kernel.h>
6 #include <linux/pgtable.h>
8 int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty)
12 if (!pte_same(ptep_get(ptep), entry))
13 __set_pte_at(ptep, entry);
15 * update_mmu_cache will unconditionally execute, handling both
16 * the case that the PTE changed and the spurious fault case.
21 int ptep_test_and_clear_young(struct vm_area_struct *vma,
22 unsigned long address,
25 if (!pte_young(ptep_get(ptep)))
27 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
29 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
32 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
34 if (pgtable_l4_enabled)
35 return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
40 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
42 if (pgtable_l5_enabled)
43 return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
50 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
55 void p4d_clear_huge(p4d_t *p4d)
59 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
61 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
63 set_pud(pud, new_pud);
67 int pud_clear_huge(pud_t *pud)
69 if (!pud_leaf(pudp_get(pud)))
75 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
77 pmd_t *pmd = pud_pgtable(pudp_get(pud));
82 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
84 for (i = 0; i < PTRS_PER_PMD; i++) {
85 if (!pmd_none(pmd[i])) {
86 pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
88 pte_free_kernel(NULL, pte);
97 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
99 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
101 set_pmd(pmd, new_pmd);
105 int pmd_clear_huge(pmd_t *pmd)
107 if (!pmd_leaf(pmdp_get(pmd)))
113 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
115 pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
119 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
120 pte_free_kernel(NULL, pte);
124 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
126 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
127 unsigned long address, pmd_t *pmdp)
129 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
131 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
132 VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
134 * When leaf PTE entries (regular pages) are collapsed into a leaf
135 * PMD entry (huge page), a valid non-leaf PTE is converted into a
136 * valid leaf PTE at the level 1 page table. Since the sfence.vma
137 * forms that specify an address only apply to leaf PTEs, we need a
138 * global flush here. collapse_huge_page() assumes these flushes are
139 * eager, so just do the fence here.
141 flush_tlb_mm(vma->vm_mm);
144 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */