1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2008
5 * Guest page hinting for unused pages.
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/facility.h>
18 #include <asm/page-states.h>
20 static int cmma_flag = 1;
22 static int __init cmma(char *str)
27 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
32 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
36 __setup("cmma=", cmma);
38 static inline int cmma_test_essa(void)
40 register unsigned long tmp asm("0") = 0;
41 register int rc asm("1");
43 /* test ESSA_GET_STATE */
45 " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
49 : "=&d" (rc), "+&d" (tmp)
50 : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
54 void __init cmma_init(void)
58 if (cmma_test_essa()) {
62 if (test_facility(147))
66 static inline unsigned char get_page_state(struct page *page)
70 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
72 : "a" (page_to_phys(page)),
73 "i" (ESSA_GET_STATE));
77 static inline void set_page_unused(struct page *page, int order)
81 for (i = 0; i < (1 << order); i++)
82 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
84 : "a" (page_to_phys(page + i)),
85 "i" (ESSA_SET_UNUSED));
88 static inline void set_page_stable_dat(struct page *page, int order)
92 for (i = 0; i < (1 << order); i++)
93 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
95 : "a" (page_to_phys(page + i)),
96 "i" (ESSA_SET_STABLE));
99 static inline void set_page_stable_nodat(struct page *page, int order)
103 for (i = 0; i < (1 << order); i++)
104 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
106 : "a" (page_to_phys(page + i)),
107 "i" (ESSA_SET_STABLE_NODAT));
110 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
116 pmd = pmd_offset(pud, addr);
118 next = pmd_addr_end(addr, end);
119 if (pmd_none(*pmd) || pmd_large(*pmd))
121 page = virt_to_page(pmd_val(*pmd));
122 set_bit(PG_arch_1, &page->flags);
123 } while (pmd++, addr = next, addr != end);
126 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
133 pud = pud_offset(p4d, addr);
135 next = pud_addr_end(addr, end);
136 if (pud_none(*pud) || pud_large(*pud))
138 if (!pud_folded(*pud)) {
139 page = virt_to_page(pud_val(*pud));
140 for (i = 0; i < 3; i++)
141 set_bit(PG_arch_1, &page[i].flags);
143 mark_kernel_pmd(pud, addr, next);
144 } while (pud++, addr = next, addr != end);
147 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
154 p4d = p4d_offset(pgd, addr);
156 next = p4d_addr_end(addr, end);
159 if (!p4d_folded(*p4d)) {
160 page = virt_to_page(p4d_val(*p4d));
161 for (i = 0; i < 3; i++)
162 set_bit(PG_arch_1, &page[i].flags);
164 mark_kernel_pud(p4d, addr, next);
165 } while (p4d++, addr = next, addr != end);
168 static void mark_kernel_pgd(void)
170 unsigned long addr, next;
176 pgd = pgd_offset_k(addr);
178 next = pgd_addr_end(addr, MODULES_END);
181 if (!pgd_folded(*pgd)) {
182 page = virt_to_page(pgd_val(*pgd));
183 for (i = 0; i < 3; i++)
184 set_bit(PG_arch_1, &page[i].flags);
186 mark_kernel_p4d(pgd, addr, next);
187 } while (pgd++, addr = next, addr != MODULES_END);
190 void __init cmma_init_nodat(void)
192 struct memblock_region *reg;
194 unsigned long start, end, ix;
198 /* Mark pages used in kernel page tables */
201 /* Set all kernel pages not used for page tables to stable/no-dat */
202 for_each_memblock(memory, reg) {
203 start = memblock_region_memory_base_pfn(reg);
204 end = memblock_region_memory_end_pfn(reg);
205 page = pfn_to_page(start);
206 for (ix = start; ix < end; ix++, page++) {
207 if (__test_and_clear_bit(PG_arch_1, &page->flags))
208 continue; /* skip page table pages */
209 if (!list_empty(&page->lru))
210 continue; /* skip free pages */
211 set_page_stable_nodat(page, 0);
216 void arch_free_page(struct page *page, int order)
220 set_page_unused(page, order);
223 void arch_alloc_page(struct page *page, int order)
228 set_page_stable_dat(page, order);
230 set_page_stable_nodat(page, order);
233 void arch_set_page_dat(struct page *page, int order)
237 set_page_stable_dat(page, order);
240 void arch_set_page_nodat(struct page *page, int order)
244 set_page_stable_nodat(page, order);
247 int arch_test_page_nodat(struct page *page)
253 state = get_page_state(page);
254 return !!(state & 0x20);
257 void arch_set_page_states(int make_stable)
259 unsigned long flags, order, t;
267 drain_local_pages(NULL);
268 for_each_populated_zone(zone) {
269 spin_lock_irqsave(&zone->lock, flags);
270 for_each_migratetype_order(order, t) {
271 list_for_each(l, &zone->free_area[order].free_list[t]) {
272 page = list_entry(l, struct page, lru);
274 set_page_stable_dat(page, order);
276 set_page_unused(page, order);
279 spin_unlock_irqrestore(&zone->lock, flags);