1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bootmem.h>
3 #include <linux/compiler.h>
5 #include <linux/init.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memcontrol.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/page_idle.h>
16 #include <linux/kernel-page-flags.h>
17 #include <linux/uaccess.h>
20 #define KPMSIZE sizeof(u64)
21 #define KPMMASK (KPMSIZE - 1)
22 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24 /* /proc/kpagecount - an array exposing page counts
26 * Each entry is a u64 representing the corresponding
27 * physical page count.
29 static ssize_t kpagecount_read(struct file *file, char __user *buf,
30 size_t count, loff_t *ppos)
32 u64 __user *out = (u64 __user *)buf;
34 unsigned long src = *ppos;
40 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
41 if (src & KPMMASK || count & KPMMASK)
46 * TODO: ZONE_DEVICE support requires to identify
47 * memmaps that were actually initialized.
49 ppage = pfn_to_online_page(pfn);
51 if (!ppage || PageSlab(ppage))
54 pcount = page_mapcount(ppage);
56 if (put_user(pcount, out)) {
68 *ppos += (char __user *)out - buf;
70 ret = (char __user *)out - buf;
74 static const struct file_operations proc_kpagecount_operations = {
76 .read = kpagecount_read,
79 /* /proc/kpageflags - an array exposing page flags
81 * Each entry is a u64 representing the corresponding
82 * physical page flags.
85 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
87 return ((kflags >> kbit) & 1) << ubit;
90 u64 stable_page_flags(struct page *page)
96 * pseudo flag: KPF_NOPAGE
97 * it differentiates a memory hole from a page with no flags
100 return 1 << KPF_NOPAGE;
106 * pseudo flags for the well known (anonymous) memory mapped pages
108 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
109 * simple test in page_mapped() is not enough.
111 if (!PageSlab(page) && page_mapped(page))
119 * compound pages: export both head/tail info
120 * they together define a compound page's start/end pos and order
123 u |= 1 << KPF_COMPOUND_HEAD;
125 u |= 1 << KPF_COMPOUND_TAIL;
129 * PageTransCompound can be true for non-huge compound pages (slab
130 * pages or pages allocated by drivers with __GFP_COMP) because it
131 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
132 * to make sure a given page is a thp, not a non-huge compound page.
134 else if (PageTransCompound(page)) {
135 struct page *head = compound_head(page);
137 if (PageLRU(head) || PageAnon(head))
139 else if (is_huge_zero_page(head)) {
140 u |= 1 << KPF_ZERO_PAGE;
143 } else if (is_zero_pfn(page_to_pfn(page)))
144 u |= 1 << KPF_ZERO_PAGE;
148 * Caveats on high order pages: page->_refcount will only be set
149 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
150 * SLOB won't set PG_slab at all on compound pages.
154 else if (page_count(page) == 0 && is_free_buddy_page(page))
157 if (PageBalloon(page))
158 u |= 1 << KPF_BALLOON;
160 u |= 1 << KPF_PGTABLE;
162 if (page_is_idle(page))
165 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
167 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
168 if (PageTail(page) && PageSlab(compound_head(page)))
171 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
172 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
173 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
174 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
176 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
177 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
178 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
179 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
181 if (PageSwapCache(page))
182 u |= 1 << KPF_SWAPCACHE;
183 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
185 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
186 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
188 #ifdef CONFIG_MEMORY_FAILURE
189 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
192 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
193 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
196 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
197 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
198 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
199 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
200 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
201 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
206 static ssize_t kpageflags_read(struct file *file, char __user *buf,
207 size_t count, loff_t *ppos)
209 u64 __user *out = (u64 __user *)buf;
211 unsigned long src = *ppos;
216 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
217 if (src & KPMMASK || count & KPMMASK)
222 * TODO: ZONE_DEVICE support requires to identify
223 * memmaps that were actually initialized.
225 ppage = pfn_to_online_page(pfn);
227 if (put_user(stable_page_flags(ppage), out)) {
239 *ppos += (char __user *)out - buf;
241 ret = (char __user *)out - buf;
245 static const struct file_operations proc_kpageflags_operations = {
247 .read = kpageflags_read,
251 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
252 size_t count, loff_t *ppos)
254 u64 __user *out = (u64 __user *)buf;
256 unsigned long src = *ppos;
262 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
263 if (src & KPMMASK || count & KPMMASK)
268 * TODO: ZONE_DEVICE support requires to identify
269 * memmaps that were actually initialized.
271 ppage = pfn_to_online_page(pfn);
274 ino = page_cgroup_ino(ppage);
278 if (put_user(ino, out)) {
290 *ppos += (char __user *)out - buf;
292 ret = (char __user *)out - buf;
296 static const struct file_operations proc_kpagecgroup_operations = {
298 .read = kpagecgroup_read,
300 #endif /* CONFIG_MEMCG */
302 static int __init proc_page_init(void)
304 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
305 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
307 proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
311 fs_initcall(proc_page_init);