1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
6 #include <linux/pagewalk.h>
7 #include <linux/pgtable.h>
8 #include <asm/tlbflush.h>
9 #include <asm/bitops.h>
10 #include <asm/set_memory.h>
12 struct pageattr_masks {
17 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
19 struct pageattr_masks *masks = walk->private;
20 unsigned long new_val = val;
22 new_val &= ~(pgprot_val(masks->clear_mask));
23 new_val |= (pgprot_val(masks->set_mask));
28 static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
29 unsigned long next, struct mm_walk *walk)
31 pgd_t val = READ_ONCE(*pgd);
34 val = __pgd(set_pageattr_masks(pgd_val(val), walk));
41 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
42 unsigned long next, struct mm_walk *walk)
44 p4d_t val = READ_ONCE(*p4d);
47 val = __p4d(set_pageattr_masks(p4d_val(val), walk));
54 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
55 unsigned long next, struct mm_walk *walk)
57 pud_t val = READ_ONCE(*pud);
60 val = __pud(set_pageattr_masks(pud_val(val), walk));
67 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
68 unsigned long next, struct mm_walk *walk)
70 pmd_t val = READ_ONCE(*pmd);
73 val = __pmd(set_pageattr_masks(pmd_val(val), walk));
80 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
81 unsigned long next, struct mm_walk *walk)
83 pte_t val = READ_ONCE(*pte);
85 val = __pte(set_pageattr_masks(pte_val(val), walk));
91 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
92 int depth, struct mm_walk *walk)
94 /* Nothing to do here */
98 static const struct mm_walk_ops pageattr_ops = {
99 .pgd_entry = pageattr_pgd_entry,
100 .p4d_entry = pageattr_p4d_entry,
101 .pud_entry = pageattr_pud_entry,
102 .pmd_entry = pageattr_pmd_entry,
103 .pte_entry = pageattr_pte_entry,
104 .pte_hole = pageattr_pte_hole,
107 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
111 unsigned long start = addr;
112 unsigned long end = start + PAGE_SIZE * numpages;
113 struct pageattr_masks masks = {
114 .set_mask = set_mask,
115 .clear_mask = clear_mask
121 mmap_write_lock(&init_mm);
122 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
124 mmap_write_unlock(&init_mm);
126 flush_tlb_kernel_range(start, end);
131 int set_memory_ro(unsigned long addr, int numpages)
133 return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
134 __pgprot(_PAGE_WRITE));
137 int set_memory_rw(unsigned long addr, int numpages)
139 return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
143 int set_memory_x(unsigned long addr, int numpages)
145 return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
148 int set_memory_nx(unsigned long addr, int numpages)
150 return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
153 int set_direct_map_invalid_noflush(struct page *page)
156 unsigned long start = (unsigned long)page_address(page);
157 unsigned long end = start + PAGE_SIZE;
158 struct pageattr_masks masks = {
159 .set_mask = __pgprot(0),
160 .clear_mask = __pgprot(_PAGE_PRESENT)
163 mmap_read_lock(&init_mm);
164 ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
165 mmap_read_unlock(&init_mm);
170 int set_direct_map_default_noflush(struct page *page)
173 unsigned long start = (unsigned long)page_address(page);
174 unsigned long end = start + PAGE_SIZE;
175 struct pageattr_masks masks = {
176 .set_mask = PAGE_KERNEL,
177 .clear_mask = __pgprot(0)
180 mmap_read_lock(&init_mm);
181 ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
182 mmap_read_unlock(&init_mm);
187 void __kernel_map_pages(struct page *page, int numpages, int enable)
189 if (!debug_pagealloc_enabled())
193 __set_memory((unsigned long)page_address(page), numpages,
194 __pgprot(_PAGE_PRESENT), __pgprot(0));
196 __set_memory((unsigned long)page_address(page), numpages,
197 __pgprot(0), __pgprot(_PAGE_PRESENT));