Lines Matching full:page

3  * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
48 * specific data (which is normally at page->private). It can be used by
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
69 * PG_arch_1 is an architecture specific page state bit. The generic code
70 * guarantees that this bit is cleared for a page when it first is entered into
71 * the page cache.
73 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * The page flags field is split into two parts, the main flags area
94 PG_locked, /* Page is locked. Don't touch. */
95 PG_writeback, /* Page is under writeback */
101 …PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_…
111 PG_swapbacked, /* Page is backed by RAM/swap */
112 PG_unevictable, /* Page is "unevictable" */
115 PG_mlocked, /* Page is vma mlocked */
118 PG_hwpoison, /* hardware poisoned page. Don't touch */
135 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
140 * Depending on the way an anonymous folio can be mapped into a page
141 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
142 * THP), PG_anon_exclusive may be set only for the head page or for
154 /* Two page bits are conscripted by FS-Cache to maintain local caching
158 PG_fscache = PG_private_2, /* page backed by cache */
161 /* Pinned in Xen as a read-only pagetable page. */
165 /* Has a grant mapping of another (foreign) domain's page. */
170 /* non-lru isolated movable page */
182 * Flags only valid for compound pages. Stored in first tail page's
187 /* At least one page in this folio has the hwpoison flag set */
201 * Return the real head page struct iff the @page is a fake head page, otherwise
202 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
204 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
207 return page; in page_fixed_fake_head()
210 * Only addresses aligned with PAGE_SIZE of struct page may be fake head in page_fixed_fake_head()
211 * struct page. The alignment check aims to avoid access the fields ( in page_fixed_fake_head()
212 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) in page_fixed_fake_head()
215 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head()
216 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head()
218 * We can safely access the field of the @page[1] with PG_head in page_fixed_fake_head()
219 * because the @page is a compound page composed with at least in page_fixed_fake_head()
222 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head()
225 return (const struct page *)(head - 1); in page_fixed_fake_head()
227 return page; in page_fixed_fake_head()
230 static __always_inline bool page_count_writable(const struct page *page, int u) in page_count_writable() argument
245 * XXX: struct page[] becomes r/o in page_count_writable()
250 * atomic_add_unless(&page->_refcount) in page_count_writable()
251 * XXX: try to modify r/o struct page[] in page_count_writable()
256 if (atomic_read_acquire(&page->_refcount) == u) in page_count_writable()
259 return page_fixed_fake_head(page) == page; in page_count_writable()
262 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
264 return page; in page_fixed_fake_head()
267 static inline bool page_count_writable(const struct page *page, int u) in page_count_writable() argument
273 static __always_inline int page_is_fake_head(const struct page *page) in page_is_fake_head() argument
275 return page_fixed_fake_head(page) != page; in page_is_fake_head()
278 static __always_inline unsigned long _compound_head(const struct page *page) in _compound_head() argument
280 unsigned long head = READ_ONCE(page->compound_head); in _compound_head()
284 return (unsigned long)page_fixed_fake_head(page); in _compound_head()
287 #define compound_head(page) ((typeof(page))_compound_head(page)) argument
290 * page_folio - Converts from page to folio.
291 * @p: The page.
293 * Every page is part of a folio. This function cannot be called on a
296 * Context: No reference, nor lock is required on @page. If the caller
298 * it should re-check the folio still contains this page after gaining
300 * Return: The folio which contains this page.
303 const struct page *: (const struct folio *)_compound_head(p), \
304 struct page *: (struct folio *)_compound_head(p)))
307 * folio_page - Return a page from a folio.
309 * @n: The page number to return.
312 * check that the page number lies within @folio; the caller is presumed
313 * to have a reference to the page.
315 #define folio_page(folio, n) nth_page(&(folio)->page, n)
317 static __always_inline int PageTail(const struct page *page) in PageTail() argument
319 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); in PageTail()
322 static __always_inline int PageCompound(const struct page *page) in PageCompound() argument
324 return test_bit(PG_head, &page->flags) || in PageCompound()
325 READ_ONCE(page->compound_head) & 1; in PageCompound()
329 static inline int PagePoisoned(const struct page *page) in PagePoisoned() argument
331 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; in PagePoisoned()
335 void page_init_poison(struct page *page, size_t size);
337 static inline void page_init_poison(struct page *page, size_t size) in page_init_poison() argument
345 const struct page *page = &folio->page; in const_folio_flags() local
347 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); in const_folio_flags()
348 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in const_folio_flags()
349 return &page[n].flags; in const_folio_flags()
354 struct page *page = &folio->page; in folio_flags() local
356 VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); in folio_flags()
357 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in folio_flags()
358 return &page[n].flags; in folio_flags()
362 * Page flags policies wrt compound pages
365 * check if this struct page poisoned/uninitialized
368 * the page flag is relevant for small, head and tail pages.
371 * for compound page all operations related to the page flag applied to
372 * head page.
375 * modifications of the page flag must be done on small or head pages,
379 * the page flag is not relevant for compound pages.
382 * the page flag is stored in the first tail page.
384 #define PF_POISONED_CHECK(page) ({ \ argument
385 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
386 page; })
387 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) argument
388 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) argument
389 #define PF_NO_TAIL(page, enforce) ({ \ argument
390 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
391 PF_POISONED_CHECK(compound_head(page)); })
392 #define PF_NO_COMPOUND(page, enforce) ({ \ argument
393 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
394 PF_POISONED_CHECK(page); })
395 #define PF_SECOND(page, enforce) ({ \ argument
396 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
397 PF_POISONED_CHECK(&page[1]); })
399 /* Which page is the flag stored in */
410 * Macros to create function definitions for page flags
412 #define FOLIO_TEST_FLAG(name, page) \ argument
414 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
416 #define FOLIO_SET_FLAG(name, page) \ argument
418 { set_bit(PG_##name, folio_flags(folio, page)); }
420 #define FOLIO_CLEAR_FLAG(name, page) \ argument
422 { clear_bit(PG_##name, folio_flags(folio, page)); }
424 #define __FOLIO_SET_FLAG(name, page) \ argument
426 { __set_bit(PG_##name, folio_flags(folio, page)); }
428 #define __FOLIO_CLEAR_FLAG(name, page) \ argument
430 { __clear_bit(PG_##name, folio_flags(folio, page)); }
432 #define FOLIO_TEST_SET_FLAG(name, page) \ argument
434 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
436 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ argument
438 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
440 #define FOLIO_FLAG(name, page) \ argument
441 FOLIO_TEST_FLAG(name, page) \
442 FOLIO_SET_FLAG(name, page) \
443 FOLIO_CLEAR_FLAG(name, page)
447 static __always_inline int Page##uname(const struct page *page) \
448 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
452 static __always_inline void SetPage##uname(struct page *page) \
453 { set_bit(PG_##lname, &policy(page, 1)->flags); }
457 static __always_inline void ClearPage##uname(struct page *page) \
458 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
462 static __always_inline void __SetPage##uname(struct page *page) \
463 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
467 static __always_inline void __ClearPage##uname(struct page *page) \
468 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
472 static __always_inline int TestSetPage##uname(struct page *page) \
473 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
477 static __always_inline int TestClearPage##uname(struct page *page) \
478 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
519 static inline int Page##uname(const struct page *page) { return 0; }
523 static inline void SetPage##uname(struct page *page) { }
527 static inline void ClearPage##uname(struct page *page) { }
531 static inline void __ClearPage##uname(struct page *page) { }
535 static inline int TestSetPage##uname(struct page *page) { return 0; }
539 static inline int TestClearPage##uname(struct page *page) { return 0; }
579 * Private page markings that may be used by the filesystem that owns the page in PAGEFLAG()
591 * risky: they bypass page accounting. in PAGEFLAG()
618 /* Does kmap_local_folio() only allow access to one page of the folio? */ in PAGEFLAG()
696 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, in FOLIO_SET_FLAG()
699 * structure which KSM associates with that merged page. See ksm.h. in FOLIO_SET_FLAG()
702 * page and then folio->mapping points to a struct movable_operations. in FOLIO_SET_FLAG()
708 * For slab pages, since slab reuses the bits in struct page to store its in FOLIO_SET_FLAG()
725 static __always_inline bool PageMappingFlags(const struct page *page) in PageMappingFlags() argument
727 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; in PageMappingFlags()
735 static __always_inline bool PageAnonNotKsm(const struct page *page) in PageAnonNotKsm() argument
737 unsigned long flags = (unsigned long)page_folio(page)->mapping; in PageAnonNotKsm()
742 static __always_inline bool PageAnon(const struct page *page) in PageAnon() argument
744 return folio_test_anon(page_folio(page)); in PageAnon()
753 static __always_inline bool __PageMovable(const struct page *page) in __PageMovable() argument
755 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in __PageMovable()
761 * A KSM page is one of those write-protected "shared pages" or "merged pages"
762 * which KSM maps into multiple mms, wherever identical anonymous page content
763 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
764 * anon_vma, but to that page's node of the stable tree.
775 u64 stable_page_flags(const struct page *page);
823 static inline bool PageUptodate(const struct page *page) in PageUptodate() argument
825 return folio_test_uptodate(page_folio(page)); in PageUptodate()
845 static __always_inline void __SetPageUptodate(struct page *page) in __SetPageUptodate() argument
847 __folio_mark_uptodate((struct folio *)page); in __SetPageUptodate()
850 static __always_inline void SetPageUptodate(struct page *page) in SetPageUptodate() argument
852 folio_mark_uptodate((struct folio *)page); in SetPageUptodate()
858 void set_page_writeback(struct page *page);
870 static __always_inline int PageHead(const struct page *page) in PageHead() argument
872 PF_POISONED_CHECK(page); in PageHead()
873 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); in PageHead()
881 * folio_test_large() - Does this folio contain more than one page? in __SETPAGEFLAG()
884 * Return: True if the folio is larger than one page. in __SETPAGEFLAG()
891 static __always_inline void set_compound_head(struct page *page, struct page *head) in set_compound_head() argument
893 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in set_compound_head()
896 static __always_inline void clear_compound_head(struct page *page) in clear_compound_head() argument
898 WRITE_ONCE(page->compound_head, 0); in clear_compound_head()
902 static inline void ClearPageCompound(struct page *page) in ClearPageCompound() argument
904 BUG_ON(!PageHead(page)); in ClearPageCompound()
905 ClearPageHead(page); in ClearPageCompound()
925 static inline int PageTransHuge(const struct page *page) in FOLIO_FLAG()
927 VM_BUG_ON_PAGE(PageTail(page), page); in FOLIO_FLAG()
928 return PageHead(page); in FOLIO_FLAG()
936 static inline int PageTransCompound(const struct page *page) in PageTransCompound() argument
938 return PageCompound(page); in PageTransCompound()
948 * compound page.
950 * This flag is set by hwpoison handler. Cleared by THP split or free page.
961 * pagetype will be overwritten when you clear the page_type from the page.
984 /* This takes a mapcount which is one more than page->_mapcount */
990 static inline bool page_has_type(const struct page *page) in page_has_type() argument
992 return page_mapcount_is_type(data_race(page->page_type)); in page_has_type()
998 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
1004 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
1006 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
1010 if (folio->page.page_type == UINT_MAX) \
1013 folio->page.page_type = UINT_MAX; \
1018 static __always_inline int Page##uname(const struct page *page) \
1020 return data_race(page->page_type >> 24) == PGTY_##lname; \
1022 static __always_inline void __SetPage##uname(struct page *page) \
1024 if (Page##uname(page)) \
1026 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
1027 page->page_type = (unsigned int)PGTY_##lname << 24; \
1029 static __always_inline void __ClearPage##uname(struct page *page) \
1031 if (page->page_type == UINT_MAX) \
1033 VM_BUG_ON_PAGE(!Page##uname(page), page); \
1034 page->page_type = UINT_MAX; \
1038 * PageBuddy() indicates that the page is free and in the buddy system
1044 * PageOffline() indicates that the page is logically offline although the
1063 * Memory offlining code will not adjust the managed page count for any
1067 * There are drivers that mark a page PageOffline() and expect there won't be
1068 * any further access to page content. PFN walkers that read content of random
1080 * Marks pages in use as page tables.
1092 * PageSlab - Determine if the page belongs to the slab allocator in PAGE_TYPE_OPS()
1093 * @page: The page to test. in PAGE_TYPE_OPS()
1096 * Return: True for slab pages, false for any other kind of page. in PAGE_TYPE_OPS()
1098 static inline bool PageSlab(const struct page *page) in PAGE_TYPE_OPS()
1100 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS()
1120 * PageHuge - Determine if the page belongs to hugetlbfs in FOLIO_TYPE_OPS()
1121 * @page: The page to test. in FOLIO_TYPE_OPS()
1127 static inline bool PageHuge(const struct page *page) in FOLIO_TYPE_OPS()
1129 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS()
1133 * Check if a page is currently marked HWPoisoned. Note that this check is
1137 static inline bool is_page_hwpoison(const struct page *page) in is_page_hwpoison() argument
1141 if (PageHWPoison(page)) in is_page_hwpoison()
1143 folio = page_folio(page); in is_page_hwpoison()
1144 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); in is_page_hwpoison()
1153 bool is_free_buddy_page(const struct page *page);
1157 static __always_inline int PageAnonExclusive(const struct page *page) in PageAnonExclusive() argument
1159 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in PageAnonExclusive()
1161 * HugeTLB stores this information on the head page; THP keeps it per in PageAnonExclusive()
1162 * page in PageAnonExclusive()
1164 if (PageHuge(page)) in PageAnonExclusive()
1165 page = compound_head(page); in PageAnonExclusive()
1166 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in PageAnonExclusive()
1169 static __always_inline void SetPageAnonExclusive(struct page *page) in SetPageAnonExclusive() argument
1171 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); in SetPageAnonExclusive()
1172 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in SetPageAnonExclusive()
1173 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in SetPageAnonExclusive()
1176 static __always_inline void ClearPageAnonExclusive(struct page *page) in ClearPageAnonExclusive() argument
1178 VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); in ClearPageAnonExclusive()
1179 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in ClearPageAnonExclusive()
1180 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in ClearPageAnonExclusive()
1183 static __always_inline void __ClearPageAnonExclusive(struct page *page) in __ClearPageAnonExclusive() argument
1185 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in __ClearPageAnonExclusive()
1186 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in __ClearPageAnonExclusive()
1187 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in __ClearPageAnonExclusive()
1197 * Flags checked when a page is freed. Pages being freed should not have
1208 * Flags checked when a page is prepped for return by the page allocator.
1210 * there has been a kernel bug or struct page corruption.
1212 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1213 * alloc-free cycle to prevent from reusing the page.
1219 * Flags stored in the second page of a compound page. They may overlap