Lines Matching full:page
3 * Macros for manipulating and testing page->flags
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
25 * - Pages reserved or allocated early during boot (before the page allocator
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
29 * be given to the page allocator.
32 * - The zero page(s)
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
48 * specific data (which is normally at page->private). It can be used by
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
69 * PG_arch_1 is an architecture specific page state bit. The generic code
70 * guarantees that this bit is cleared for a page when it first is entered into
71 * the page cache.
73 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * The page flags field is split into two parts, the main flags area
94 PG_locked, /* Page is locked. Don't touch. */
95 PG_writeback, /* Page is under writeback */
101 …PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_…
111 PG_swapbacked, /* Page is backed by RAM/swap */
112 PG_unevictable, /* Page is "unevictable" */
114 PG_mlocked, /* Page is vma mlocked */
117 PG_hwpoison, /* hardware poisoned page. Don't touch */
134 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
139 * Depending on the way an anonymous folio can be mapped into a page
140 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
141 * THP), PG_anon_exclusive may be set only for the head page or for
153 /* Two page bits are conscripted by FS-Cache to maintain local caching
157 PG_fscache = PG_private_2, /* page backed by cache */
160 /* Pinned in Xen as a read-only pagetable page. */
164 /* Has a grant mapping of another (foreign) domain's page. */
169 /* non-lru isolated movable page */
181 * Flags only valid for compound pages. Stored in first tail page's
186 /* At least one page in this folio has the hwpoison flag set */
200 * Return the real head page struct iff the @page is a fake head page, otherwise
201 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
203 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
206 return page; in page_fixed_fake_head()
209 * Only addresses aligned with PAGE_SIZE of struct page may be fake head in page_fixed_fake_head()
210 * struct page. The alignment check aims to avoid access the fields ( in page_fixed_fake_head()
211 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) in page_fixed_fake_head()
214 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head()
215 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head()
217 * We can safely access the field of the @page[1] with PG_head in page_fixed_fake_head()
218 * because the @page is a compound page composed with at least in page_fixed_fake_head()
221 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head()
224 return (const struct page *)(head - 1); in page_fixed_fake_head()
226 return page; in page_fixed_fake_head()
229 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument
231 return page; in page_fixed_fake_head()
235 static __always_inline int page_is_fake_head(const struct page *page) in page_is_fake_head() argument
237 return page_fixed_fake_head(page) != page; in page_is_fake_head()
240 static __always_inline unsigned long _compound_head(const struct page *page) in _compound_head() argument
242 unsigned long head = READ_ONCE(page->compound_head); in _compound_head()
246 return (unsigned long)page_fixed_fake_head(page); in _compound_head()
249 #define compound_head(page) ((typeof(page))_compound_head(page)) argument
252 * page_folio - Converts from page to folio.
253 * @p: The page.
255 * Every page is part of a folio. This function cannot be called on a
258 * Context: No reference, nor lock is required on @page. If the caller
260 * it should re-check the folio still contains this page after gaining
262 * Return: The folio which contains this page.
265 const struct page *: (const struct folio *)_compound_head(p), \
266 struct page *: (struct folio *)_compound_head(p)))
269 * folio_page - Return a page from a folio.
271 * @n: The page number to return.
274 * check that the page number lies within @folio; the caller is presumed
275 * to have a reference to the page.
277 #define folio_page(folio, n) nth_page(&(folio)->page, n)
279 static __always_inline int PageTail(const struct page *page) in PageTail() argument
281 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); in PageTail()
284 static __always_inline int PageCompound(const struct page *page) in PageCompound() argument
286 return test_bit(PG_head, &page->flags) || in PageCompound()
287 READ_ONCE(page->compound_head) & 1; in PageCompound()
291 static inline int PagePoisoned(const struct page *page) in PagePoisoned() argument
293 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; in PagePoisoned()
297 void page_init_poison(struct page *page, size_t size);
299 static inline void page_init_poison(struct page *page, size_t size) in page_init_poison() argument
307 const struct page *page = &folio->page; in const_folio_flags() local
309 VM_BUG_ON_PGFLAGS(PageTail(page), page); in const_folio_flags()
310 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in const_folio_flags()
311 return &page[n].flags; in const_folio_flags()
316 struct page *page = &folio->page; in folio_flags() local
318 VM_BUG_ON_PGFLAGS(PageTail(page), page); in folio_flags()
319 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); in folio_flags()
320 return &page[n].flags; in folio_flags()
324 * Page flags policies wrt compound pages
327 * check if this struct page poisoned/uninitialized
330 * the page flag is relevant for small, head and tail pages.
333 * for compound page all operations related to the page flag applied to
334 * head page.
337 * modifications of the page flag must be done on small or head pages,
341 * the page flag is not relevant for compound pages.
344 * the page flag is stored in the first tail page.
346 #define PF_POISONED_CHECK(page) ({ \ argument
347 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
348 page; })
349 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) argument
350 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) argument
351 #define PF_NO_TAIL(page, enforce) ({ \ argument
352 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
353 PF_POISONED_CHECK(compound_head(page)); })
354 #define PF_NO_COMPOUND(page, enforce) ({ \ argument
355 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
356 PF_POISONED_CHECK(page); })
357 #define PF_SECOND(page, enforce) ({ \ argument
358 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
359 PF_POISONED_CHECK(&page[1]); })
361 /* Which page is the flag stored in */
372 * Macros to create function definitions for page flags
374 #define FOLIO_TEST_FLAG(name, page) \ argument
376 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
378 #define FOLIO_SET_FLAG(name, page) \ argument
380 { set_bit(PG_##name, folio_flags(folio, page)); }
382 #define FOLIO_CLEAR_FLAG(name, page) \ argument
384 { clear_bit(PG_##name, folio_flags(folio, page)); }
386 #define __FOLIO_SET_FLAG(name, page) \ argument
388 { __set_bit(PG_##name, folio_flags(folio, page)); }
390 #define __FOLIO_CLEAR_FLAG(name, page) \ argument
392 { __clear_bit(PG_##name, folio_flags(folio, page)); }
394 #define FOLIO_TEST_SET_FLAG(name, page) \ argument
396 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
398 #define FOLIO_TEST_CLEAR_FLAG(name, page) \ argument
400 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
402 #define FOLIO_FLAG(name, page) \ argument
403 FOLIO_TEST_FLAG(name, page) \
404 FOLIO_SET_FLAG(name, page) \
405 FOLIO_CLEAR_FLAG(name, page)
409 static __always_inline int Page##uname(const struct page *page) \
410 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
414 static __always_inline void SetPage##uname(struct page *page) \
415 { set_bit(PG_##lname, &policy(page, 1)->flags); }
419 static __always_inline void ClearPage##uname(struct page *page) \
420 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
424 static __always_inline void __SetPage##uname(struct page *page) \
425 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
429 static __always_inline void __ClearPage##uname(struct page *page) \
430 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
434 static __always_inline int TestSetPage##uname(struct page *page) \
435 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
439 static __always_inline int TestClearPage##uname(struct page *page) \
440 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
481 static inline int Page##uname(const struct page *page) { return 0; }
485 static inline void SetPage##uname(struct page *page) { }
489 static inline void ClearPage##uname(struct page *page) { }
493 static inline void __ClearPage##uname(struct page *page) { }
497 static inline int TestSetPage##uname(struct page *page) { return 0; }
501 static inline int TestClearPage##uname(struct page *page) { return 0; }
541 * Private page markings that may be used by the filesystem that owns the page in PAGEFLAG()
553 * risky: they bypass page accounting. in PAGEFLAG()
647 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, in FOLIO_SET_FLAG()
650 * structure which KSM associates with that merged page. See ksm.h. in FOLIO_SET_FLAG()
653 * page and then folio->mapping points to a struct movable_operations. in FOLIO_SET_FLAG()
659 * For slab pages, since slab reuses the bits in struct page to store its in FOLIO_SET_FLAG()
673 * indicates that this page->mapping is now under reflink case. in FOLIO_SET_FLAG()
682 static __always_inline bool PageMappingFlags(const struct page *page) in PageMappingFlags() argument
684 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; in PageMappingFlags()
692 static __always_inline bool PageAnon(const struct page *page) in PageAnon() argument
694 return folio_test_anon(page_folio(page)); in PageAnon()
703 static __always_inline bool __PageMovable(const struct page *page) in __PageMovable() argument
705 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == in __PageMovable()
711 * A KSM page is one of those write-protected "shared pages" or "merged pages"
712 * which KSM maps into multiple mms, wherever identical anonymous page content
713 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
714 * anon_vma, but to that page's node of the stable tree.
722 static __always_inline bool PageKsm(const struct page *page) in PageKsm() argument
724 return folio_test_ksm(page_folio(page)); in PageKsm()
730 u64 stable_page_flags(const struct page *page);
778 static inline bool PageUptodate(const struct page *page) in PageUptodate() argument
780 return folio_test_uptodate(page_folio(page)); in PageUptodate()
800 static __always_inline void __SetPageUptodate(struct page *page) in __SetPageUptodate() argument
802 __folio_mark_uptodate((struct folio *)page); in __SetPageUptodate()
805 static __always_inline void SetPageUptodate(struct page *page) in SetPageUptodate() argument
807 folio_mark_uptodate((struct folio *)page); in SetPageUptodate()
813 void set_page_writeback(struct page *page);
825 static __always_inline int PageHead(const struct page *page) in PageHead() argument
827 PF_POISONED_CHECK(page); in PageHead()
828 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); in PageHead()
836 * folio_test_large() - Does this folio contain more than one page? in __SETPAGEFLAG()
839 * Return: True if the folio is larger than one page. in __SETPAGEFLAG()
846 static __always_inline void set_compound_head(struct page *page, struct page *head) in set_compound_head() argument
848 WRITE_ONCE(page->compound_head, (unsigned long)head + 1); in set_compound_head()
851 static __always_inline void clear_compound_head(struct page *page) in clear_compound_head() argument
853 WRITE_ONCE(page->compound_head, 0); in clear_compound_head()
857 static inline void ClearPageCompound(struct page *page) in ClearPageCompound() argument
859 BUG_ON(!PageHead(page)); in ClearPageCompound()
860 ClearPageHead(page); in ClearPageCompound()
888 static inline int PageTransHuge(const struct page *page) in FOLIO_FLAG()
890 VM_BUG_ON_PAGE(PageTail(page), page); in FOLIO_FLAG()
891 return PageHead(page); in FOLIO_FLAG()
899 static inline int PageTransCompound(const struct page *page) in PageTransCompound() argument
901 return PageCompound(page); in PageTransCompound()
909 static inline int PageTransTail(const struct page *page) in PageTransTail() argument
911 return PageTail(page); in PageTransTail()
923 * compound page.
925 * This flag is set by hwpoison handler. Cleared by THP split or free page.
938 * pagetype will be overwritten when you clear the page_type from the page.
960 /* This takes a mapcount which is one more than page->_mapcount */
966 static inline bool page_has_type(const struct page *page) in page_has_type() argument
968 return page_mapcount_is_type(data_race(page->page_type)); in page_has_type()
974 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
980 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
982 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
986 if (folio->page.page_type == UINT_MAX) \
989 folio->page.page_type = UINT_MAX; \
994 static __always_inline int Page##uname(const struct page *page) \
996 return data_race(page->page_type >> 24) == PGTY_##lname; \
998 static __always_inline void __SetPage##uname(struct page *page) \
1000 if (Page##uname(page)) \
1002 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
1003 page->page_type = (unsigned int)PGTY_##lname << 24; \
1005 static __always_inline void __ClearPage##uname(struct page *page) \
1007 if (page->page_type == UINT_MAX) \
1009 VM_BUG_ON_PAGE(!Page##uname(page), page); \
1010 page->page_type = UINT_MAX; \
1014 * PageBuddy() indicates that the page is free and in the buddy system
1020 * PageOffline() indicates that the page is logically offline although the
1039 * Memory offlining code will not adjust the managed page count for any
1043 * There are drivers that mark a page PageOffline() and expect there won't be
1044 * any further access to page content. PFN walkers that read content of random
1056 * Marks pages in use as page tables.
1068 * PageSlab - Determine if the page belongs to the slab allocator in PAGE_TYPE_OPS()
1069 * @page: The page to test. in PAGE_TYPE_OPS()
1072 * Return: True for slab pages, false for any other kind of page. in PAGE_TYPE_OPS()
1074 static inline bool PageSlab(const struct page *page) in PAGE_TYPE_OPS()
1076 return folio_test_slab(page_folio(page)); in PAGE_TYPE_OPS()
1095 * PageHuge - Determine if the page belongs to hugetlbfs in FOLIO_TYPE_OPS()
1096 * @page: The page to test. in FOLIO_TYPE_OPS()
1102 static inline bool PageHuge(const struct page *page) in FOLIO_TYPE_OPS()
1104 return folio_test_hugetlb(page_folio(page)); in FOLIO_TYPE_OPS()
1108 * Check if a page is currently marked HWPoisoned. Note that this check is
1112 static inline bool is_page_hwpoison(const struct page *page) in is_page_hwpoison() argument
1116 if (PageHWPoison(page)) in is_page_hwpoison()
1118 folio = page_folio(page); in is_page_hwpoison()
1119 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); in is_page_hwpoison()
1122 bool is_free_buddy_page(const struct page *page);
1126 static __always_inline int PageAnonExclusive(const struct page *page) in PageAnonExclusive() argument
1128 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in PageAnonExclusive()
1130 * HugeTLB stores this information on the head page; THP keeps it per in PageAnonExclusive()
1131 * page in PageAnonExclusive()
1133 if (PageHuge(page)) in PageAnonExclusive()
1134 page = compound_head(page); in PageAnonExclusive()
1135 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in PageAnonExclusive()
1138 static __always_inline void SetPageAnonExclusive(struct page *page) in SetPageAnonExclusive() argument
1140 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); in SetPageAnonExclusive()
1141 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in SetPageAnonExclusive()
1142 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in SetPageAnonExclusive()
1145 static __always_inline void ClearPageAnonExclusive(struct page *page) in ClearPageAnonExclusive() argument
1147 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); in ClearPageAnonExclusive()
1148 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in ClearPageAnonExclusive()
1149 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in ClearPageAnonExclusive()
1152 static __always_inline void __ClearPageAnonExclusive(struct page *page) in __ClearPageAnonExclusive() argument
1154 VM_BUG_ON_PGFLAGS(!PageAnon(page), page); in __ClearPageAnonExclusive()
1155 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); in __ClearPageAnonExclusive()
1156 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); in __ClearPageAnonExclusive()
1166 * Flags checked when a page is freed. Pages being freed should not have
1177 * Flags checked when a page is prepped for return by the page allocator.
1179 * there has been a kernel bug or struct page corruption.
1181 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1182 * alloc-free cycle to prevent from reusing the page.
1188 * Flags stored in the second page of a compound page. They may overlap