1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2008
5 * Guest page hinting for unused pages.
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/asm-extable.h>
18 #include <asm/facility.h>
19 #include <asm/page-states.h>
21 static int cmma_flag = 1;
23 static int __init cmma(char *str)
27 if (!kstrtobool(str, &enabled))
31 __setup("cmma=", cmma);
33 static inline int cmma_test_essa(void)
35 unsigned long tmp = 0;
38 /* test ESSA_GET_STATE */
40 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
44 : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
45 : [cmd] "i" (ESSA_GET_STATE));
49 void __init cmma_init(void)
53 if (cmma_test_essa()) {
57 if (test_facility(147))
61 static inline unsigned char get_page_state(struct page *page)
65 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
67 : "a" (page_to_phys(page)),
68 "i" (ESSA_GET_STATE));
72 static inline void set_page_unused(struct page *page, int order)
76 for (i = 0; i < (1 << order); i++)
77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
79 : "a" (page_to_phys(page + i)),
80 "i" (ESSA_SET_UNUSED));
83 static inline void set_page_stable_dat(struct page *page, int order)
87 for (i = 0; i < (1 << order); i++)
88 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
90 : "a" (page_to_phys(page + i)),
91 "i" (ESSA_SET_STABLE));
94 static inline void set_page_stable_nodat(struct page *page, int order)
98 for (i = 0; i < (1 << order); i++)
99 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
101 : "a" (page_to_phys(page + i)),
102 "i" (ESSA_SET_STABLE_NODAT));
105 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
111 pmd = pmd_offset(pud, addr);
113 next = pmd_addr_end(addr, end);
114 if (pmd_none(*pmd) || pmd_large(*pmd))
116 page = phys_to_page(pmd_val(*pmd));
117 set_bit(PG_arch_1, &page->flags);
118 } while (pmd++, addr = next, addr != end);
121 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
128 pud = pud_offset(p4d, addr);
130 next = pud_addr_end(addr, end);
131 if (pud_none(*pud) || pud_large(*pud))
133 if (!pud_folded(*pud)) {
134 page = phys_to_page(pud_val(*pud));
135 for (i = 0; i < 4; i++)
136 set_bit(PG_arch_1, &page[i].flags);
138 mark_kernel_pmd(pud, addr, next);
139 } while (pud++, addr = next, addr != end);
142 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
149 p4d = p4d_offset(pgd, addr);
151 next = p4d_addr_end(addr, end);
154 if (!p4d_folded(*p4d)) {
155 page = phys_to_page(p4d_val(*p4d));
156 for (i = 0; i < 4; i++)
157 set_bit(PG_arch_1, &page[i].flags);
159 mark_kernel_pud(p4d, addr, next);
160 } while (p4d++, addr = next, addr != end);
163 static void mark_kernel_pgd(void)
165 unsigned long addr, next;
171 pgd = pgd_offset_k(addr);
173 next = pgd_addr_end(addr, MODULES_END);
176 if (!pgd_folded(*pgd)) {
177 page = phys_to_page(pgd_val(*pgd));
178 for (i = 0; i < 4; i++)
179 set_bit(PG_arch_1, &page[i].flags);
181 mark_kernel_p4d(pgd, addr, next);
182 } while (pgd++, addr = next, addr != MODULES_END);
185 void __init cmma_init_nodat(void)
188 unsigned long start, end, ix;
193 /* Mark pages used in kernel page tables */
196 /* Set all kernel pages not used for page tables to stable/no-dat */
197 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
198 page = pfn_to_page(start);
199 for (ix = start; ix < end; ix++, page++) {
200 if (__test_and_clear_bit(PG_arch_1, &page->flags))
201 continue; /* skip page table pages */
202 if (!list_empty(&page->lru))
203 continue; /* skip free pages */
204 set_page_stable_nodat(page, 0);
209 void arch_free_page(struct page *page, int order)
213 set_page_unused(page, order);
216 void arch_alloc_page(struct page *page, int order)
221 set_page_stable_dat(page, order);
223 set_page_stable_nodat(page, order);
226 void arch_set_page_dat(struct page *page, int order)
230 set_page_stable_dat(page, order);