1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Macros for manipulating and testing page->flags
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72 * file-backed pagecache (see mm/vmscan.c).
74 * PG_error is set to indicate that an I/O error occurred on this page.
76 * PG_arch_1 is an architecture specific page state bit. The generic code
77 * guarantees that this bit is cleared for a page when it first is entered into
80 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * data with incorrect ECC bits that triggered a machine check. Accessing is
82 * not safe since it may cause another machine check. Don't touch!
86 * Don't use the pageflags directly. Use the PageFoo macros.
88 * The page flags field is split into two parts, the main flags area
89 * which extends from the low bits upwards, and the fields area which
90 * extends from the high bits downwards.
92 * | FIELD | ... | FLAGS |
96 * The fields area is reserved for fields mapping zone, node (for NUMA) and
97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
101 PG_locked, /* Page is locked. Don't touch. */
108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
111 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
114 PG_private, /* If pagecache, has fs-private data */
115 PG_private_2, /* If pagecache, has fs aux data */
116 PG_writeback, /* Page is under writeback */
117 PG_head, /* A head page */
118 PG_mappedtodisk, /* Has blocks allocated on-disk */
119 PG_reclaim, /* To be reclaimed asap */
120 PG_swapbacked, /* Page is backed by RAM/swap */
121 PG_unevictable, /* Page is "unevictable" */
123 PG_mlocked, /* Page is vma mlocked */
125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 PG_uncached, /* Page has been mapped as uncached */
128 #ifdef CONFIG_MEMORY_FAILURE
129 PG_hwpoison, /* hardware poisoned page. Don't touch */
131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
138 #ifdef CONFIG_KASAN_HW_TAGS
139 PG_skip_kasan_poison,
143 PG_readahead = PG_reclaim,
146 * Depending on the way an anonymous folio can be mapped into a page
147 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
148 * THP), PG_anon_exclusive may be set only for the head page or for
149 * tail pages of an anonymous folio. For now, we only expect it to be
150 * set on tail pages for PTE-mapped THP.
152 PG_anon_exclusive = PG_mappedtodisk,
155 PG_checked = PG_owner_priv_1,
158 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
160 /* Two page bits are conscripted by FS-Cache to maintain local caching
161 * state. These bits are set on pages belonging to the netfs's inodes
162 * when those inodes are being locally cached.
164 PG_fscache = PG_private_2, /* page backed by cache */
167 /* Pinned in Xen as a read-only pagetable page. */
168 PG_pinned = PG_owner_priv_1,
169 /* Pinned as part of domain save (see xen_mm_pin_all()). */
170 PG_savepinned = PG_dirty,
171 /* Has a grant mapping of another (foreign) domain's page. */
172 PG_foreign = PG_owner_priv_1,
173 /* Remapped by swiotlb-xen. */
174 PG_xen_remapped = PG_owner_priv_1,
177 PG_slob_free = PG_private,
179 /* Compound pages. Stored in first tail page's flags */
180 PG_double_map = PG_workingset,
182 #ifdef CONFIG_MEMORY_FAILURE
184 * Compound pages. Stored in first tail page's flags.
185 * Indicates that at least one subpage is hwpoisoned in the
188 PG_has_hwpoisoned = PG_error,
191 /* non-lru isolated movable page */
192 PG_isolated = PG_reclaim,
194 /* Only valid for buddy pages. Used to track pages that are reported */
195 PG_reported = PG_uptodate,
198 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
200 #ifndef __GENERATING_BOUNDS_H
202 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
203 DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
204 hugetlb_optimize_vmemmap_key);
206 static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
208 return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
209 &hugetlb_optimize_vmemmap_key);
213 * If the feature of optimizing vmemmap pages associated with each HugeTLB
214 * page is enabled, the head vmemmap page frame is reused and all of the tail
215 * vmemmap addresses map to the head vmemmap page frame (furture details can
216 * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other
217 * words, there are more than one page struct with PG_head associated with each
218 * HugeTLB page. We __know__ that there is only one head page struct, the tail
219 * page structs with PG_head are fake head page structs. We need an approach
220 * to distinguish between those two different types of page structs so that
221 * compound_head() can return the real head page struct when the parameter is
222 * the tail page struct but with PG_head.
224 * The page_fixed_fake_head() returns the real head page struct if the @page is
225 * fake page head, otherwise, returns @page which can either be a true page
228 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
230 if (!hugetlb_optimize_vmemmap_enabled())
234 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
235 * struct page. The alignment check aims to avoid access the fields (
236 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
237 * cold cacheline in some cases.
239 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
240 test_bit(PG_head, &page->flags)) {
242 * We can safely access the field of the @page[1] with PG_head
243 * because the @page is a compound page composed with at least
244 * two contiguous pages.
246 unsigned long head = READ_ONCE(page[1].compound_head);
248 if (likely(head & 1))
249 return (const struct page *)(head - 1);
254 static inline const struct page *page_fixed_fake_head(const struct page *page)
259 static inline bool hugetlb_optimize_vmemmap_enabled(void)
265 static __always_inline int page_is_fake_head(struct page *page)
267 return page_fixed_fake_head(page) != page;
270 static inline unsigned long _compound_head(const struct page *page)
272 unsigned long head = READ_ONCE(page->compound_head);
274 if (unlikely(head & 1))
276 return (unsigned long)page_fixed_fake_head(page);
279 #define compound_head(page) ((typeof(page))_compound_head(page))
282 * page_folio - Converts from page to folio.
285 * Every page is part of a folio. This function cannot be called on a
288 * Context: No reference, nor lock is required on @page. If the caller
289 * does not hold a reference, this call may race with a folio split, so
290 * it should re-check the folio still contains this page after gaining
291 * a reference on the folio.
292 * Return: The folio which contains this page.
294 #define page_folio(p) (_Generic((p), \
295 const struct page *: (const struct folio *)_compound_head(p), \
296 struct page *: (struct folio *)_compound_head(p)))
299 * folio_page - Return a page from a folio.
301 * @n: The page number to return.
303 * @n is relative to the start of the folio. This function does not
304 * check that the page number lies within @folio; the caller is presumed
305 * to have a reference to the page.
307 #define folio_page(folio, n) nth_page(&(folio)->page, n)
309 static __always_inline int PageTail(struct page *page)
311 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
314 static __always_inline int PageCompound(struct page *page)
316 return test_bit(PG_head, &page->flags) ||
317 READ_ONCE(page->compound_head) & 1;
320 #define PAGE_POISON_PATTERN -1l
321 static inline int PagePoisoned(const struct page *page)
323 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
326 #ifdef CONFIG_DEBUG_VM
327 void page_init_poison(struct page *page, size_t size);
329 static inline void page_init_poison(struct page *page, size_t size)
334 static unsigned long *folio_flags(struct folio *folio, unsigned n)
336 struct page *page = &folio->page;
338 VM_BUG_ON_PGFLAGS(PageTail(page), page);
339 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
340 return &page[n].flags;
344 * Page flags policies wrt compound pages
347 * check if this struct page poisoned/uninitialized
350 * the page flag is relevant for small, head and tail pages.
353 * for compound page all operations related to the page flag applied to
357 * for compound page, callers only ever operate on the head page.
360 * modifications of the page flag must be done on small or head pages,
361 * checks can be done on tail pages too.
364 * the page flag is not relevant for compound pages.
367 * the page flag is stored in the first tail page.
369 #define PF_POISONED_CHECK(page) ({ \
370 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
372 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
373 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
374 #define PF_ONLY_HEAD(page, enforce) ({ \
375 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
376 PF_POISONED_CHECK(page); })
377 #define PF_NO_TAIL(page, enforce) ({ \
378 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
379 PF_POISONED_CHECK(compound_head(page)); })
380 #define PF_NO_COMPOUND(page, enforce) ({ \
381 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
382 PF_POISONED_CHECK(page); })
383 #define PF_SECOND(page, enforce) ({ \
384 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
385 PF_POISONED_CHECK(&page[1]); })
387 /* Which page is the flag stored in */
388 #define FOLIO_PF_ANY 0
389 #define FOLIO_PF_HEAD 0
390 #define FOLIO_PF_ONLY_HEAD 0
391 #define FOLIO_PF_NO_TAIL 0
392 #define FOLIO_PF_NO_COMPOUND 0
393 #define FOLIO_PF_SECOND 1
396 * Macros to create function definitions for page flags
398 #define TESTPAGEFLAG(uname, lname, policy) \
399 static __always_inline bool folio_test_##lname(struct folio *folio) \
400 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
401 static __always_inline int Page##uname(struct page *page) \
402 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
404 #define SETPAGEFLAG(uname, lname, policy) \
405 static __always_inline \
406 void folio_set_##lname(struct folio *folio) \
407 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
408 static __always_inline void SetPage##uname(struct page *page) \
409 { set_bit(PG_##lname, &policy(page, 1)->flags); }
411 #define CLEARPAGEFLAG(uname, lname, policy) \
412 static __always_inline \
413 void folio_clear_##lname(struct folio *folio) \
414 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
415 static __always_inline void ClearPage##uname(struct page *page) \
416 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
418 #define __SETPAGEFLAG(uname, lname, policy) \
419 static __always_inline \
420 void __folio_set_##lname(struct folio *folio) \
421 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
422 static __always_inline void __SetPage##uname(struct page *page) \
423 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
425 #define __CLEARPAGEFLAG(uname, lname, policy) \
426 static __always_inline \
427 void __folio_clear_##lname(struct folio *folio) \
428 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
429 static __always_inline void __ClearPage##uname(struct page *page) \
430 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
432 #define TESTSETFLAG(uname, lname, policy) \
433 static __always_inline \
434 bool folio_test_set_##lname(struct folio *folio) \
435 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
436 static __always_inline int TestSetPage##uname(struct page *page) \
437 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
439 #define TESTCLEARFLAG(uname, lname, policy) \
440 static __always_inline \
441 bool folio_test_clear_##lname(struct folio *folio) \
442 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
443 static __always_inline int TestClearPage##uname(struct page *page) \
444 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
446 #define PAGEFLAG(uname, lname, policy) \
447 TESTPAGEFLAG(uname, lname, policy) \
448 SETPAGEFLAG(uname, lname, policy) \
449 CLEARPAGEFLAG(uname, lname, policy)
451 #define __PAGEFLAG(uname, lname, policy) \
452 TESTPAGEFLAG(uname, lname, policy) \
453 __SETPAGEFLAG(uname, lname, policy) \
454 __CLEARPAGEFLAG(uname, lname, policy)
456 #define TESTSCFLAG(uname, lname, policy) \
457 TESTSETFLAG(uname, lname, policy) \
458 TESTCLEARFLAG(uname, lname, policy)
460 #define TESTPAGEFLAG_FALSE(uname, lname) \
461 static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
462 static inline int Page##uname(const struct page *page) { return 0; }
464 #define SETPAGEFLAG_NOOP(uname, lname) \
465 static inline void folio_set_##lname(struct folio *folio) { } \
466 static inline void SetPage##uname(struct page *page) { }
468 #define CLEARPAGEFLAG_NOOP(uname, lname) \
469 static inline void folio_clear_##lname(struct folio *folio) { } \
470 static inline void ClearPage##uname(struct page *page) { }
472 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
473 static inline void __folio_clear_##lname(struct folio *folio) { } \
474 static inline void __ClearPage##uname(struct page *page) { }
476 #define TESTSETFLAG_FALSE(uname, lname) \
477 static inline bool folio_test_set_##lname(struct folio *folio) \
479 static inline int TestSetPage##uname(struct page *page) { return 0; }
481 #define TESTCLEARFLAG_FALSE(uname, lname) \
482 static inline bool folio_test_clear_##lname(struct folio *folio) \
484 static inline int TestClearPage##uname(struct page *page) { return 0; }
486 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
487 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
489 #define TESTSCFLAG_FALSE(uname, lname) \
490 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
492 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
493 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
494 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
495 PAGEFLAG(Referenced, referenced, PF_HEAD)
496 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
497 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
498 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
499 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
500 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
501 TESTCLEARFLAG(LRU, lru, PF_HEAD)
502 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
503 TESTCLEARFLAG(Active, active, PF_HEAD)
504 PAGEFLAG(Workingset, workingset, PF_HEAD)
505 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
506 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
507 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
508 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
511 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
512 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
513 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
514 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
515 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
516 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
518 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
519 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
520 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
521 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
522 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
523 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
526 * Private page markings that may be used by the filesystem that owns the page
527 * for its own purposes.
528 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
530 PAGEFLAG(Private, private, PF_ANY)
531 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
532 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
533 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
536 * Only test-and-set exist for PG_writeback. The unconditional operators are
537 * risky: they bypass page accounting.
539 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
540 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
541 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
543 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
544 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
545 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
546 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
547 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
549 #ifdef CONFIG_HIGHMEM
551 * Must use a macro here due to header dependency issues. page_zone() is not
552 * available at this point.
554 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
556 PAGEFLAG_FALSE(HighMem, highmem)
560 static __always_inline bool folio_test_swapcache(struct folio *folio)
562 return folio_test_swapbacked(folio) &&
563 test_bit(PG_swapcache, folio_flags(folio, 0));
566 static __always_inline bool PageSwapCache(struct page *page)
568 return folio_test_swapcache(page_folio(page));
571 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
572 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
574 PAGEFLAG_FALSE(SwapCache, swapcache)
577 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
578 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
579 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
582 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
583 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
584 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
586 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
587 TESTSCFLAG_FALSE(Mlocked, mlocked)
590 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
591 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
593 PAGEFLAG_FALSE(Uncached, uncached)
596 #ifdef CONFIG_MEMORY_FAILURE
597 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
598 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
599 #define __PG_HWPOISON (1UL << PG_hwpoison)
600 #define MAGIC_HWPOISON 0x48575053U /* HWPS */
601 extern void SetPageHWPoisonTakenOff(struct page *page);
602 extern void ClearPageHWPoisonTakenOff(struct page *page);
603 extern bool take_page_off_buddy(struct page *page);
604 extern bool put_page_back_buddy(struct page *page);
606 PAGEFLAG_FALSE(HWPoison, hwpoison)
607 #define __PG_HWPOISON 0
610 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
611 TESTPAGEFLAG(Young, young, PF_ANY)
612 SETPAGEFLAG(Young, young, PF_ANY)
613 TESTCLEARFLAG(Young, young, PF_ANY)
614 PAGEFLAG(Idle, idle, PF_ANY)
617 #ifdef CONFIG_KASAN_HW_TAGS
618 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
620 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
624 * PageReported() is used to track reported free pages within the Buddy
625 * allocator. We can use the non-atomic version of the test and set
626 * operations as both should be shielded with the zone lock to prevent
627 * any possible races on the setting or clearing of the bit.
629 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
632 * On an anonymous page mapped into a user virtual memory area,
633 * page->mapping points to its anon_vma, not to a struct address_space;
634 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
636 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
637 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
638 * bit; and then page->mapping points, not to an anon_vma, but to a private
639 * structure which KSM associates with that merged page. See ksm.h.
641 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
642 * page and then page->mapping points a struct address_space.
644 * Please note that, confusingly, "page_mapping" refers to the inode
645 * address_space which maps the page from disk; whereas "page_mapped"
646 * refers to user virtual address space into which the page is mapped.
648 #define PAGE_MAPPING_ANON 0x1
649 #define PAGE_MAPPING_MOVABLE 0x2
650 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
651 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
653 static __always_inline bool folio_mapping_flags(struct folio *folio)
655 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
658 static __always_inline int PageMappingFlags(struct page *page)
660 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
663 static __always_inline bool folio_test_anon(struct folio *folio)
665 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
668 static __always_inline bool PageAnon(struct page *page)
670 return folio_test_anon(page_folio(page));
673 static __always_inline int __PageMovable(struct page *page)
675 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
676 PAGE_MAPPING_MOVABLE;
681 * A KSM page is one of those write-protected "shared pages" or "merged pages"
682 * which KSM maps into multiple mms, wherever identical anonymous page content
683 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
684 * anon_vma, but to that page's node of the stable tree.
686 static __always_inline bool folio_test_ksm(struct folio *folio)
688 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
692 static __always_inline bool PageKsm(struct page *page)
694 return folio_test_ksm(page_folio(page));
697 TESTPAGEFLAG_FALSE(Ksm, ksm)
700 u64 stable_page_flags(struct page *page);
703 * folio_test_uptodate - Is this folio up to date?
706 * The uptodate flag is set on a folio when every byte in the folio is
707 * at least as new as the corresponding bytes on storage. Anonymous
708 * and CoW folios are always uptodate. If the folio is not uptodate,
709 * some of the bytes in it may be; see the is_partially_uptodate()
710 * address_space operation.
712 static inline bool folio_test_uptodate(struct folio *folio)
714 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
716 * Must ensure that the data we read out of the folio is loaded
717 * _after_ we've loaded folio->flags to check the uptodate bit.
718 * We can skip the barrier if the folio is not uptodate, because
719 * we wouldn't be reading anything from it.
721 * See folio_mark_uptodate() for the other side of the story.
729 static inline int PageUptodate(struct page *page)
731 return folio_test_uptodate(page_folio(page));
734 static __always_inline void __folio_mark_uptodate(struct folio *folio)
737 __set_bit(PG_uptodate, folio_flags(folio, 0));
740 static __always_inline void folio_mark_uptodate(struct folio *folio)
743 * Memory barrier must be issued before setting the PG_uptodate bit,
744 * so that all previous stores issued in order to bring the folio
745 * uptodate are actually visible before folio_test_uptodate becomes true.
748 set_bit(PG_uptodate, folio_flags(folio, 0));
751 static __always_inline void __SetPageUptodate(struct page *page)
753 __folio_mark_uptodate((struct folio *)page);
756 static __always_inline void SetPageUptodate(struct page *page)
758 folio_mark_uptodate((struct folio *)page);
761 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
763 bool __folio_start_writeback(struct folio *folio, bool keep_write);
764 bool set_page_writeback(struct page *page);
766 #define folio_start_writeback(folio) \
767 __folio_start_writeback(folio, false)
768 #define folio_start_writeback_keepwrite(folio) \
769 __folio_start_writeback(folio, true)
771 static inline void set_page_writeback_keepwrite(struct page *page)
773 folio_start_writeback_keepwrite(page_folio(page));
776 static inline bool test_set_page_writeback(struct page *page)
778 return set_page_writeback(page);
781 static __always_inline bool folio_test_head(struct folio *folio)
783 return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
786 static __always_inline int PageHead(struct page *page)
788 PF_POISONED_CHECK(page);
789 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
792 __SETPAGEFLAG(Head, head, PF_ANY)
793 __CLEARPAGEFLAG(Head, head, PF_ANY)
794 CLEARPAGEFLAG(Head, head, PF_ANY)
797 * folio_test_large() - Does this folio contain more than one page?
798 * @folio: The folio to test.
800 * Return: True if the folio is larger than one page.
802 static inline bool folio_test_large(struct folio *folio)
804 return folio_test_head(folio);
807 static __always_inline void set_compound_head(struct page *page, struct page *head)
809 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
812 static __always_inline void clear_compound_head(struct page *page)
814 WRITE_ONCE(page->compound_head, 0);
817 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
818 static inline void ClearPageCompound(struct page *page)
820 BUG_ON(!PageHead(page));
825 #define PG_head_mask ((1UL << PG_head))
827 #ifdef CONFIG_HUGETLB_PAGE
828 int PageHuge(struct page *page);
829 int PageHeadHuge(struct page *page);
830 static inline bool folio_test_hugetlb(struct folio *folio)
832 return PageHeadHuge(&folio->page);
835 TESTPAGEFLAG_FALSE(Huge, hugetlb)
836 TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
841 * PageHuge() only returns true for hugetlbfs pages, but not for
842 * normal or transparent huge pages.
844 * PageTransHuge() returns true for both transparent huge and
845 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
846 * called only in the core VM paths where hugetlbfs pages can't exist.
848 static inline int PageTransHuge(struct page *page)
850 VM_BUG_ON_PAGE(PageTail(page), page);
851 return PageHead(page);
854 static inline bool folio_test_transhuge(struct folio *folio)
856 return folio_test_head(folio);
860 * PageTransCompound returns true for both transparent huge pages
861 * and hugetlbfs pages, so it should only be called when it's known
862 * that hugetlbfs pages aren't involved.
864 static inline int PageTransCompound(struct page *page)
866 return PageCompound(page);
870 * PageTransTail returns true for both transparent huge pages
871 * and hugetlbfs pages, so it should only be called when it's known
872 * that hugetlbfs pages aren't involved.
874 static inline int PageTransTail(struct page *page)
876 return PageTail(page);
880 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
883 * This is required for optimization of rmap operations for THP: we can postpone
884 * per small page mapcount accounting (and its overhead from atomic operations)
885 * until the first PMD split.
887 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
888 * by one. This reference will go away with last compound_mapcount.
890 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
892 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
893 TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
895 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
896 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
897 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
898 TESTPAGEFLAG_FALSE(TransTail, transtail)
899 PAGEFLAG_FALSE(DoubleMap, double_map)
900 TESTSCFLAG_FALSE(DoubleMap, double_map)
903 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
905 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
908 * This flag is set by hwpoison handler. Cleared by THP split or free page.
910 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
911 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
913 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
914 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
918 * Check if a page is currently marked HWPoisoned. Note that this check is
919 * best effort only and inherently racy: there is no way to synchronize with
922 static inline bool is_page_hwpoison(struct page *page)
924 if (PageHWPoison(page))
926 return PageHuge(page) && PageHWPoison(compound_head(page));
930 * For pages that are never mapped to userspace (and aren't PageSlab),
931 * page_type may be used. Because it is initialised to -1, we invert the
932 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
933 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
934 * low bits so that an underflow or overflow of page_mapcount() won't be
935 * mistaken for a page type value.
938 #define PAGE_TYPE_BASE 0xf0000000
939 /* Reserve 0x0000007f to catch underflows of page_mapcount */
940 #define PAGE_MAPCOUNT_RESERVE -128
941 #define PG_buddy 0x00000080
942 #define PG_offline 0x00000100
943 #define PG_table 0x00000200
944 #define PG_guard 0x00000400
946 #define PageType(page, flag) \
947 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
949 static inline int page_has_type(struct page *page)
951 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
954 #define PAGE_TYPE_OPS(uname, lname) \
955 static __always_inline int Page##uname(struct page *page) \
957 return PageType(page, PG_##lname); \
959 static __always_inline void __SetPage##uname(struct page *page) \
961 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
962 page->page_type &= ~PG_##lname; \
964 static __always_inline void __ClearPage##uname(struct page *page) \
966 VM_BUG_ON_PAGE(!Page##uname(page), page); \
967 page->page_type |= PG_##lname; \
971 * PageBuddy() indicates that the page is free and in the buddy system
972 * (see mm/page_alloc.c).
974 PAGE_TYPE_OPS(Buddy, buddy)
977 * PageOffline() indicates that the page is logically offline although the
978 * containing section is online. (e.g. inflated in a balloon driver or
979 * not onlined when onlining the section).
980 * The content of these pages is effectively stale. Such pages should not
981 * be touched (read/write/dump/save) except by their owner.
983 * If a driver wants to allow to offline unmovable PageOffline() pages without
984 * putting them back to the buddy, it can do so via the memory notifier by
985 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
986 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
987 * pages (now with a reference count of zero) are treated like free pages,
988 * allowing the containing memory block to get offlined. A driver that
989 * relies on this feature is aware that re-onlining the memory block will
990 * require to re-set the pages PageOffline() and not giving them to the
991 * buddy via online_page_callback_t.
993 * There are drivers that mark a page PageOffline() and expect there won't be
994 * any further access to page content. PFN walkers that read content of random
995 * pages should check PageOffline() and synchronize with such drivers using
996 * page_offline_freeze()/page_offline_thaw().
998 PAGE_TYPE_OPS(Offline, offline)
1000 extern void page_offline_freeze(void);
1001 extern void page_offline_thaw(void);
1002 extern void page_offline_begin(void);
1003 extern void page_offline_end(void);
1006 * Marks pages in use as page tables.
1008 PAGE_TYPE_OPS(Table, table)
1011 * Marks guardpages used with debug_pagealloc.
1013 PAGE_TYPE_OPS(Guard, guard)
1015 extern bool is_free_buddy_page(struct page *page);
1017 PAGEFLAG(Isolated, isolated, PF_ANY);
1019 static __always_inline int PageAnonExclusive(struct page *page)
1021 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1022 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1023 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1026 static __always_inline void SetPageAnonExclusive(struct page *page)
1028 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1029 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1030 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1033 static __always_inline void ClearPageAnonExclusive(struct page *page)
1035 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1036 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1037 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1040 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1042 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1043 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1044 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1048 #define __PG_MLOCKED (1UL << PG_mlocked)
1050 #define __PG_MLOCKED 0
1054 * Flags checked when a page is freed. Pages being freed should not have
1055 * these flags set. If they are, there is a problem.
1057 #define PAGE_FLAGS_CHECK_AT_FREE \
1058 (1UL << PG_lru | 1UL << PG_locked | \
1059 1UL << PG_private | 1UL << PG_private_2 | \
1060 1UL << PG_writeback | 1UL << PG_reserved | \
1061 1UL << PG_slab | 1UL << PG_active | \
1062 1UL << PG_unevictable | __PG_MLOCKED)
1065 * Flags checked when a page is prepped for return by the page allocator.
1066 * Pages being prepped should not have these flags set. If they are set,
1067 * there has been a kernel bug or struct page corruption.
1069 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1070 * alloc-free cycle to prevent from reusing the page.
1072 #define PAGE_FLAGS_CHECK_AT_PREP \
1073 (PAGEFLAGS_MASK & ~__PG_HWPOISON)
1075 #define PAGE_FLAGS_PRIVATE \
1076 (1UL << PG_private | 1UL << PG_private_2)
1078 * page_has_private - Determine if page has private stuff
1079 * @page: The page to be checked
1081 * Determine if a page has private stuff, indicating that release routines
1082 * should be invoked upon it.
1084 static inline int page_has_private(struct page *page)
1086 return !!(page->flags & PAGE_FLAGS_PRIVATE);
1089 static inline bool folio_has_private(struct folio *folio)
1091 return page_has_private(&folio->page);
1098 #undef PF_NO_COMPOUND
1100 #endif /* !__GENERATING_BOUNDS_H */
1102 #endif /* PAGE_FLAGS_H */