Lines Matching full:page

3  * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
53 * specific data (which is normally at page->private). It can be used by
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
74 * PG_error is set to indicate that an I/O error occurred on this page.
76 * PG_arch_1 is an architecture specific page state bit. The generic code
77 * guarantees that this bit is cleared for a page when it first is entered into
78 * the page cache.
80 * PG_hwpoison indicates that a page got corrupted in hardware and contains
88 * The page flags field is split into two parts, the main flags area
101 PG_locked, /* Page is locked. Don't touch. */
102 PG_writeback, /* Page is under writeback */
108 …PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_…
120 PG_swapbacked, /* Page is backed by RAM/swap */
121 PG_unevictable, /* Page is "unevictable" */
123 PG_mlocked, /* Page is vma mlocked */
126 PG_uncached, /* Page has been mapped as uncached */
129 PG_hwpoison, /* hardware poisoned page. Don't touch */
144 * Depending on the way an anonymous folio can be mapped into a page
145 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
146 * THP), PG_anon_exclusive may be set only for the head page or for
156 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
158 /* Two page bits are conscripted by FS-Cache to maintain local caching
162 PG_fscache = PG_private_2, /* page backed by cache */
165 /* Pinned in Xen as a read-only pagetable page. */
169 /* Has a grant mapping of another (foreign) domain's page. */
174 /* non-lru isolated movable page */
186 * Flags only valid for compound pages. Stored in first tail page's
191 /* At least one page in this folio has the hwpoison flag set */
205 * Return the real head page struct iff the @page is a fake head page, otherwise
206 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
208 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
211 return page; in page_fixed_fake_head()
214 * Only addresses aligned with PAGE_SIZE of struct page may be fake head in page_fixed_fake_head()
215 * struct page. The alignment check aims to avoid access the fields ( in page_fixed_fake_head()
216 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) in page_fixed_fake_head()
219 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head()
220 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head()
222 * We can safely access the field of the @page[1] with PG_head in page_fixed_fake_head()
223 * because the @page is a compound page composed with at least in page_fixed_fake_head()
226 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head()
229 return (const struct page *)(head - 1); in page_fixed_fake_head()
231 return page; in page_fixed_fake_head()
234 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
236 return page; in page_fixed_fake_head()
240 static __always_inline int page_is_fake_head(struct page *page) in page_is_fake_head() argument
242 return page_fixed_fake_head(page) != page; in page_is_fake_head()
245 static inline unsigned long _compound_head(const struct page *page) in _compound_head() argument
247 unsigned long head = READ_ONCE(page->compound_head); in _compound_head()
251 return (unsigned long)page_fixed_fake_head(page); in _compound_head()
254 #define compound_head(page) ((typeof(page))_compound_head(page)) argument
257 * page_folio - Converts from page to folio.
258 * @p: The page.
260 * Every page is part of a folio. This function cannot be called on a
263 * Context: No reference, nor lock is required on @page. If the caller
265 * it should re-check the folio still contains this page after gaining
267 * Return: The folio which contains this page.
270 const struct page *: (const struct folio *)_compound_head(p), \
271 struct page *: (struct folio *)_compound_head(p)))
274 * folio_page - Return a page from a folio.
276 * @n: The page number to return.
279 * check that the page number lies within @folio; the caller is presumed
280 * to have a reference to the page.
282 #define folio_page(folio, n) nth_page(&(folio)->page, n)
284 static __always_inline int PageTail(struct page *page) in PageTail() argument
286 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); in PageTail()
289 static __always_inline int PageCompound(struct page *page) in PageCompound() argument
291 return test_bit(PG_head, &page->flags) || in PageCompound()
292 READ_ONCE(page->compound_head) & 1; in PageCompound()
296 static inline int PagePoisoned(const struct page *page) in PagePoisoned() argument
298 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; in PagePoisoned()
302 void page_init_poison(struct page *page, size_t size);
304 static inline void page_init_poison(struct page *page, size_t size) in page_init_poison() argument
311 struct page *page = &folio->page; in folio_flags() local
313 VM_BUG_ON_PGFLAGS(PageTail(page), page); in folio_flags()
314 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in folio_flags()
315 return &page[n].flags; in folio_flags()
319 * Page flags policies wrt compound pages
322 * check if this struct page poisoned/uninitialized
325 * the page flag is relevant for small, head and tail pages.
328 * for compound page all operations related to the page flag applied to
329 * head page.
332 * for compound page, callers only ever operate on the head page.
335 * modifications of the page flag must be done on small or head pages,
339 * the page flag is not relevant for compound pages.
342 * the page flag is stored in the first tail page.
344 #define PF_POISONED_CHECK(page) ({ \ argument
345 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
346 page; })
347 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) argument
348 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) argument
349 #define PF_ONLY_HEAD(page, enforce) ({ \ argument
350 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
351 PF_POISONED_CHECK(page); })
352 #define PF_NO_TAIL(page, enforce) ({ \ argument
353 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
354 PF_POISONED_CHECK(compound_head(page)); })
355 #define PF_NO_COMPOUND(page, enforce) ({ \ argument
356 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
357 PF_POISONED_CHECK(page); })
358 #define PF_SECOND(page, enforce) ({ \ argument
359 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
360 PF_POISONED_CHECK(&page[1]); })
362 /* Which page is the flag stored in */
371 * Macros to create function definitions for page flags
376 static __always_inline int Page##uname(struct page *page) \
377 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
383 static __always_inline void SetPage##uname(struct page *page) \
384 { set_bit(PG_##lname, &policy(page, 1)->flags); }
390 static __always_inline void ClearPage##uname(struct page *page) \
391 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
397 static __always_inline void __SetPage##uname(struct page *page) \
398 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
404 static __always_inline void __ClearPage##uname(struct page *page) \
405 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
411 static __always_inline int TestSetPage##uname(struct page *page) \
412 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
418 static __always_inline int TestClearPage##uname(struct page *page) \
419 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
437 static inline int Page##uname(const struct page *page) { return 0; }
441 static inline void SetPage##uname(struct page *page) { }
445 static inline void ClearPage##uname(struct page *page) { }
449 static inline void __ClearPage##uname(struct page *page) { }
454 static inline int TestSetPage##uname(struct page *page) { return 0; }
459 static inline int TestClearPage##uname(struct page *page) { return 0; }
500 * Private page markings that may be used by the filesystem that owns the page in PAGEFLAG()
511 * risky: they bypass page accounting. in PAGEFLAG()
541 static __always_inline bool PageSwapCache(struct page *page) in PageSwapCache() argument
543 return folio_test_swapcache(page_folio(page)); in PageSwapCache()
576 extern void SetPageHWPoisonTakenOff(struct page *page);
577 extern void ClearPageHWPoisonTakenOff(struct page *page);
578 extern bool take_page_off_buddy(struct page *page);
579 extern bool put_page_back_buddy(struct page *page);
607 * On an anonymous page mapped into a user virtual memory area, in TESTPAGEFLAG()
608 * page->mapping points to its anon_vma, not to a struct address_space; in TESTPAGEFLAG()
611 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, in TESTPAGEFLAG()
613 * bit; and then page->mapping points, not to an anon_vma, but to a private in TESTPAGEFLAG()
614 * structure which KSM associates with that merged page. See ksm.h. in TESTPAGEFLAG()
617 * page and then page->mapping points to a struct movable_operations. in TESTPAGEFLAG()
620 * address_space which maps the page from disk; whereas "page_mapped" in TESTPAGEFLAG()
621 * refers to user virtual address space into which the page is mapped. in TESTPAGEFLAG()
623 * For slab pages, since slab reuses the bits in struct page to store its in TESTPAGEFLAG()
624 * internal states, the page->mapping does not exist as such, nor do these in TESTPAGEFLAG()
626 * make sure that PageSlab(page) actually evaluates to false before calling in TESTPAGEFLAG()
636 * indicates that this page->mapping is now under reflink case. in TESTPAGEFLAG()
645 static __always_inline int PageMappingFlags(struct page *page) in PageMappingFlags() argument
647 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; in PageMappingFlags()
655 static __always_inline bool PageAnon(struct page *page) in PageAnon() argument
657 return folio_test_anon(page_folio(page)); in PageAnon()
666 static __always_inline int __PageMovable(struct page *page) in __PageMovable() argument
668 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in __PageMovable()
674 * A KSM page is one of those write-protected "shared pages" or "merged pages"
675 * which KSM maps into multiple mms, wherever identical anonymous page content
676 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
677 * anon_vma, but to that page's node of the stable tree.
685 static __always_inline bool PageKsm(struct page *page) in PageKsm() argument
687 return folio_test_ksm(page_folio(page)); in PageKsm()
693 u64 stable_page_flags(struct page *page);
741 static inline int PageUptodate(struct page *page) in PageUptodate() argument
743 return folio_test_uptodate(page_folio(page)); in PageUptodate()
763 static __always_inline void __SetPageUptodate(struct page *page) in __SetPageUptodate() argument
765 __folio_mark_uptodate((struct folio *)page); in __SetPageUptodate()
768 static __always_inline void SetPageUptodate(struct page *page) in SetPageUptodate() argument
770 folio_mark_uptodate((struct folio *)page); in SetPageUptodate()
776 void set_page_writeback(struct page *page);
788 static __always_inline int PageHead(struct page *page) in PageHead() argument
790 PF_POISONED_CHECK(page); in PageHead()
791 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); in PageHead()
799 * folio_test_large() - Does this folio contain more than one page? in __SETPAGEFLAG()
802 * Return: True if the folio is larger than one page. in __SETPAGEFLAG()
809 static __always_inline void set_compound_head(struct page *page, struct page *head) in set_compound_head() argument
811 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in set_compound_head()
814 static __always_inline void clear_compound_head(struct page *page) in clear_compound_head() argument
816 WRITE_ONCE(page->compound_head, 0); in clear_compound_head()
820 static inline void ClearPageCompound(struct page *page) in ClearPageCompound() argument
822 BUG_ON(!PageHead(page)); in ClearPageCompound()
823 ClearPageHead(page); in ClearPageCompound()
833 int PageHuge(struct page *page);
842 * prevent it from being turned into a tail page. in SETPAGEFLAG()
864 static inline int PageTransHuge(struct page *page) in PageTransHuge() argument
866 VM_BUG_ON_PAGE(PageTail(page), page); in PageTransHuge()
867 return PageHead(page); in PageTransHuge()
875 static inline int PageTransCompound(struct page *page) in PageTransCompound() argument
877 return PageCompound(page); in PageTransCompound()
885 static inline int PageTransTail(struct page *page) in PageTransTail() argument
887 return PageTail(page); in PageTransTail()
899 * compound page. in TESTPAGEFLAG_FALSE()
901 * This flag is set by hwpoison handler. Cleared by THP split or free page. in TESTPAGEFLAG_FALSE()
911 * Check if a page is currently marked HWPoisoned. Note that this check is in TESTPAGEFLAG_FALSE()
915 static inline bool is_page_hwpoison(struct page *page) in TESTPAGEFLAG_FALSE()
917 if (PageHWPoison(page)) in TESTPAGEFLAG_FALSE()
919 return PageHuge(page) && PageHWPoison(compound_head(page)); in TESTPAGEFLAG_FALSE()
928 * mistaken for a page type value.
939 #define PageType(page, flag) \ argument
940 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
942 ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
949 static inline int page_has_type(struct page *page) in page_has_type() argument
951 return page_type_has_type(page->page_type); in page_has_type()
955 static __always_inline int Page##uname(const struct page *page) \
957 return PageType(page, PG_##lname); \
963 static __always_inline void __SetPage##uname(struct page *page) \
965 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
966 page->page_type &= ~PG_##lname; \
971 folio->page.page_type &= ~PG_##lname; \
973 static __always_inline void __ClearPage##uname(struct page *page) \
975 VM_BUG_ON_PAGE(!Page##uname(page), page); \
976 page->page_type |= PG_##lname; \
981 folio->page.page_type |= PG_##lname; \
985 * PageBuddy() indicates that the page is free and in the buddy system
991 * PageOffline() indicates that the page is logically offline although the
1007 * There are drivers that mark a page PageOffline() and expect there won't be
1008 * any further access to page content. PFN walkers that read content of random
1020 * Marks pages in use as page tables.
1029 extern bool is_free_buddy_page(struct page *page);
1033 static __always_inline int PageAnonExclusive(struct page *page) in PageAnonExclusive() argument
1035 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in PageAnonExclusive()
1036 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in PageAnonExclusive()
1037 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in PageAnonExclusive()
1040 static __always_inline void SetPageAnonExclusive(struct page *page) in SetPageAnonExclusive() argument
1042 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); in SetPageAnonExclusive()
1043 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in SetPageAnonExclusive()
1044 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in SetPageAnonExclusive()
1047 static __always_inline void ClearPageAnonExclusive(struct page *page) in ClearPageAnonExclusive() argument
1049 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); in ClearPageAnonExclusive()
1050 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in ClearPageAnonExclusive()
1051 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in ClearPageAnonExclusive()
1054 static __always_inline void __ClearPageAnonExclusive(struct page *page) in __ClearPageAnonExclusive() argument
1056 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in __ClearPageAnonExclusive()
1057 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in __ClearPageAnonExclusive()
1058 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in __ClearPageAnonExclusive()
1068 * Flags checked when a page is freed. Pages being freed should not have
1079 * Flags checked when a page is prepped for return by the page allocator.
1081 * there has been a kernel bug or struct page corruption.
1083 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1084 * alloc-free cycle to prevent from reusing the page.
1090 * Flags stored in the second page of a compound page. They may overlap
1100 * page_has_private - Determine if page has private stuff
1101 * @page: The page to be checked
1103 * Determine if a page has private stuff, indicating that release routines
1106 static inline int page_has_private(struct page *page) in page_has_private() argument
1108 return !!(page->flags & PAGE_FLAGS_PRIVATE); in page_has_private()
1113 return page_has_private(&folio->page); in folio_has_private()