1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /* internal.h: mm/ internal definitions
3   *
4   * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5   * Written by David Howells (dhowells@redhat.com)
6   */
7  #ifndef __MM_INTERNAL_H
8  #define __MM_INTERNAL_H
9  
10  #include <linux/fs.h>
11  #include <linux/khugepaged.h>
12  #include <linux/mm.h>
13  #include <linux/mm_inline.h>
14  #include <linux/pagemap.h>
15  #include <linux/rmap.h>
16  #include <linux/swap.h>
17  #include <linux/swapops.h>
18  #include <linux/swap_cgroup.h>
19  #include <linux/tracepoint-defs.h>
20  
21  /* Internal core VMA manipulation functions. */
22  #include "vma.h"
23  
24  struct folio_batch;
25  
26  /*
27   * The set of flags that only affect watermark checking and reclaim
28   * behaviour. This is used by the MM to obey the caller constraints
29   * about IO, FS and watermark checking while ignoring placement
30   * hints such as HIGHMEM usage.
31   */
32  #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
33  			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
34  			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
35  			__GFP_NOLOCKDEP)
36  
37  /* The GFP flags allowed during early boot */
38  #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
39  
40  /* Control allocation cpuset and node placement constraints */
41  #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
42  
43  /* Do not use these with a slab allocator */
44  #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
45  
46  /*
47   * Different from WARN_ON_ONCE(), no warning will be issued
48   * when we specify __GFP_NOWARN.
49   */
50  #define WARN_ON_ONCE_GFP(cond, gfp)	({				\
51  	static bool __section(".data.once") __warned;			\
52  	int __ret_warn_once = !!(cond);					\
53  									\
54  	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
55  		__warned = true;					\
56  		WARN_ON(1);						\
57  	}								\
58  	unlikely(__ret_warn_once);					\
59  })
60  
61  void page_writeback_init(void);
62  
63  /*
64   * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
65   * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
66   * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
67   * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
68   */
69  #define ENTIRELY_MAPPED		0x800000
70  #define FOLIO_PAGES_MAPPED	(ENTIRELY_MAPPED - 1)
71  
72  /*
73   * Flags passed to __show_mem() and show_free_areas() to suppress output in
74   * various contexts.
75   */
76  #define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
77  
78  /*
79   * How many individual pages have an elevated _mapcount.  Excludes
80   * the folio's entire_mapcount.
81   *
82   * Don't use this function outside of debugging code.
83   */
folio_nr_pages_mapped(const struct folio * folio)84  static inline int folio_nr_pages_mapped(const struct folio *folio)
85  {
86  	return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
87  }
88  
89  /*
90   * Retrieve the first entry of a folio based on a provided entry within the
91   * folio. We cannot rely on folio->swap as there is no guarantee that it has
92   * been initialized. Used for calling arch_swap_restore()
93   */
folio_swap(swp_entry_t entry,const struct folio * folio)94  static inline swp_entry_t folio_swap(swp_entry_t entry,
95  		const struct folio *folio)
96  {
97  	swp_entry_t swap = {
98  		.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
99  	};
100  
101  	return swap;
102  }
103  
folio_raw_mapping(const struct folio * folio)104  static inline void *folio_raw_mapping(const struct folio *folio)
105  {
106  	unsigned long mapping = (unsigned long)folio->mapping;
107  
108  	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
109  }
110  
111  /*
112   * This is a file-backed mapping, and is about to be memory mapped - invoke its
113   * mmap hook and safely handle error conditions. On error, VMA hooks will be
114   * mutated.
115   *
116   * @file: File which backs the mapping.
117   * @vma:  VMA which we are mapping.
118   *
119   * Returns: 0 if success, error otherwise.
120   */
mmap_file(struct file * file,struct vm_area_struct * vma)121  static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
122  {
123  	int err = call_mmap(file, vma);
124  
125  	if (likely(!err))
126  		return 0;
127  
128  	/*
129  	 * OK, we tried to call the file hook for mmap(), but an error
130  	 * arose. The mapping is in an inconsistent state and we most not invoke
131  	 * any further hooks on it.
132  	 */
133  	vma->vm_ops = &vma_dummy_vm_ops;
134  
135  	return err;
136  }
137  
138  /*
139   * If the VMA has a close hook then close it, and since closing it might leave
140   * it in an inconsistent state which makes the use of any hooks suspect, clear
141   * them down by installing dummy empty hooks.
142   */
vma_close(struct vm_area_struct * vma)143  static inline void vma_close(struct vm_area_struct *vma)
144  {
145  	if (vma->vm_ops && vma->vm_ops->close) {
146  		vma->vm_ops->close(vma);
147  
148  		/*
149  		 * The mapping is in an inconsistent state, and no further hooks
150  		 * may be invoked upon it.
151  		 */
152  		vma->vm_ops = &vma_dummy_vm_ops;
153  	}
154  }
155  
156  #ifdef CONFIG_MMU
157  
158  /* Flags for folio_pte_batch(). */
159  typedef int __bitwise fpb_t;
160  
161  /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
162  #define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
163  
164  /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
165  #define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
166  
__pte_batch_clear_ignored(pte_t pte,fpb_t flags)167  static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
168  {
169  	if (flags & FPB_IGNORE_DIRTY)
170  		pte = pte_mkclean(pte);
171  	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
172  		pte = pte_clear_soft_dirty(pte);
173  	return pte_wrprotect(pte_mkold(pte));
174  }
175  
176  /**
177   * folio_pte_batch - detect a PTE batch for a large folio
178   * @folio: The large folio to detect a PTE batch for.
179   * @addr: The user virtual address the first page is mapped at.
180   * @start_ptep: Page table pointer for the first entry.
181   * @pte: Page table entry for the first page.
182   * @max_nr: The maximum number of table entries to consider.
183   * @flags: Flags to modify the PTE batch semantics.
184   * @any_writable: Optional pointer to indicate whether any entry except the
185   *		  first one is writable.
186   * @any_young: Optional pointer to indicate whether any entry except the
187   *		  first one is young.
188   * @any_dirty: Optional pointer to indicate whether any entry except the
189   *		  first one is dirty.
190   *
191   * Detect a PTE batch: consecutive (present) PTEs that map consecutive
192   * pages of the same large folio.
193   *
194   * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
195   * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
196   * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
197   *
198   * start_ptep must map any page of the folio. max_nr must be at least one and
199   * must be limited by the caller so scanning cannot exceed a single page table.
200   *
201   * Return: the number of table entries in the batch.
202   */
folio_pte_batch(struct folio * folio,unsigned long addr,pte_t * start_ptep,pte_t pte,int max_nr,fpb_t flags,bool * any_writable,bool * any_young,bool * any_dirty)203  static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
204  		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
205  		bool *any_writable, bool *any_young, bool *any_dirty)
206  {
207  	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
208  	const pte_t *end_ptep = start_ptep + max_nr;
209  	pte_t expected_pte, *ptep;
210  	bool writable, young, dirty;
211  	int nr;
212  
213  	if (any_writable)
214  		*any_writable = false;
215  	if (any_young)
216  		*any_young = false;
217  	if (any_dirty)
218  		*any_dirty = false;
219  
220  	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
221  	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
222  	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
223  
224  	nr = pte_batch_hint(start_ptep, pte);
225  	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
226  	ptep = start_ptep + nr;
227  
228  	while (ptep < end_ptep) {
229  		pte = ptep_get(ptep);
230  		if (any_writable)
231  			writable = !!pte_write(pte);
232  		if (any_young)
233  			young = !!pte_young(pte);
234  		if (any_dirty)
235  			dirty = !!pte_dirty(pte);
236  		pte = __pte_batch_clear_ignored(pte, flags);
237  
238  		if (!pte_same(pte, expected_pte))
239  			break;
240  
241  		/*
242  		 * Stop immediately once we reached the end of the folio. In
243  		 * corner cases the next PFN might fall into a different
244  		 * folio.
245  		 */
246  		if (pte_pfn(pte) >= folio_end_pfn)
247  			break;
248  
249  		if (any_writable)
250  			*any_writable |= writable;
251  		if (any_young)
252  			*any_young |= young;
253  		if (any_dirty)
254  			*any_dirty |= dirty;
255  
256  		nr = pte_batch_hint(ptep, pte);
257  		expected_pte = pte_advance_pfn(expected_pte, nr);
258  		ptep += nr;
259  	}
260  
261  	return min(ptep - start_ptep, max_nr);
262  }
263  
264  /**
265   * pte_move_swp_offset - Move the swap entry offset field of a swap pte
266   *	 forward or backward by delta
267   * @pte: The initial pte state; is_swap_pte(pte) must be true and
268   *	 non_swap_entry() must be false.
269   * @delta: The direction and the offset we are moving; forward if delta
270   *	 is positive; backward if delta is negative
271   *
272   * Moves the swap offset, while maintaining all other fields, including
273   * swap type, and any swp pte bits. The resulting pte is returned.
274   */
pte_move_swp_offset(pte_t pte,long delta)275  static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
276  {
277  	swp_entry_t entry = pte_to_swp_entry(pte);
278  	pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
279  						   (swp_offset(entry) + delta)));
280  
281  	if (pte_swp_soft_dirty(pte))
282  		new = pte_swp_mksoft_dirty(new);
283  	if (pte_swp_exclusive(pte))
284  		new = pte_swp_mkexclusive(new);
285  	if (pte_swp_uffd_wp(pte))
286  		new = pte_swp_mkuffd_wp(new);
287  
288  	return new;
289  }
290  
291  
292  /**
293   * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
294   * @pte: The initial pte state; is_swap_pte(pte) must be true and
295   *	 non_swap_entry() must be false.
296   *
297   * Increments the swap offset, while maintaining all other fields, including
298   * swap type, and any swp pte bits. The resulting pte is returned.
299   */
pte_next_swp_offset(pte_t pte)300  static inline pte_t pte_next_swp_offset(pte_t pte)
301  {
302  	return pte_move_swp_offset(pte, 1);
303  }
304  
305  /**
306   * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
307   * @start_ptep: Page table pointer for the first entry.
308   * @max_nr: The maximum number of table entries to consider.
309   * @pte: Page table entry for the first entry.
310   *
311   * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
312   * containing swap entries all with consecutive offsets and targeting the same
313   * swap type, all with matching swp pte bits.
314   *
315   * max_nr must be at least one and must be limited by the caller so scanning
316   * cannot exceed a single page table.
317   *
318   * Return: the number of table entries in the batch.
319   */
swap_pte_batch(pte_t * start_ptep,int max_nr,pte_t pte)320  static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
321  {
322  	pte_t expected_pte = pte_next_swp_offset(pte);
323  	const pte_t *end_ptep = start_ptep + max_nr;
324  	swp_entry_t entry = pte_to_swp_entry(pte);
325  	pte_t *ptep = start_ptep + 1;
326  	unsigned short cgroup_id;
327  
328  	VM_WARN_ON(max_nr < 1);
329  	VM_WARN_ON(!is_swap_pte(pte));
330  	VM_WARN_ON(non_swap_entry(entry));
331  
332  	cgroup_id = lookup_swap_cgroup_id(entry);
333  	while (ptep < end_ptep) {
334  		pte = ptep_get(ptep);
335  
336  		if (!pte_same(pte, expected_pte))
337  			break;
338  		if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
339  			break;
340  		expected_pte = pte_next_swp_offset(expected_pte);
341  		ptep++;
342  	}
343  
344  	return ptep - start_ptep;
345  }
346  #endif /* CONFIG_MMU */
347  
348  void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
349  						int nr_throttled);
acct_reclaim_writeback(struct folio * folio)350  static inline void acct_reclaim_writeback(struct folio *folio)
351  {
352  	pg_data_t *pgdat = folio_pgdat(folio);
353  	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
354  
355  	if (nr_throttled)
356  		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
357  }
358  
wake_throttle_isolated(pg_data_t * pgdat)359  static inline void wake_throttle_isolated(pg_data_t *pgdat)
360  {
361  	wait_queue_head_t *wqh;
362  
363  	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
364  	if (waitqueue_active(wqh))
365  		wake_up(wqh);
366  }
367  
368  vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
vmf_anon_prepare(struct vm_fault * vmf)369  static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
370  {
371  	vm_fault_t ret = __vmf_anon_prepare(vmf);
372  
373  	if (unlikely(ret & VM_FAULT_RETRY))
374  		vma_end_read(vmf->vma);
375  	return ret;
376  }
377  
378  vm_fault_t do_swap_page(struct vm_fault *vmf);
379  void folio_rotate_reclaimable(struct folio *folio);
380  bool __folio_end_writeback(struct folio *folio);
381  void deactivate_file_folio(struct folio *folio);
382  void folio_activate(struct folio *folio);
383  
384  void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
385  		   struct vm_area_struct *start_vma, unsigned long floor,
386  		   unsigned long ceiling, bool mm_wr_locked);
387  void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
388  
389  struct zap_details;
390  void unmap_page_range(struct mmu_gather *tlb,
391  			     struct vm_area_struct *vma,
392  			     unsigned long addr, unsigned long end,
393  			     struct zap_details *details);
394  
395  void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
396  		unsigned int order);
397  void force_page_cache_ra(struct readahead_control *, unsigned long nr);
force_page_cache_readahead(struct address_space * mapping,struct file * file,pgoff_t index,unsigned long nr_to_read)398  static inline void force_page_cache_readahead(struct address_space *mapping,
399  		struct file *file, pgoff_t index, unsigned long nr_to_read)
400  {
401  	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
402  	force_page_cache_ra(&ractl, nr_to_read);
403  }
404  
405  unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
406  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
407  unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
408  		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
409  void filemap_free_folio(struct address_space *mapping, struct folio *folio);
410  int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
411  bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
412  		loff_t end);
413  long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
414  unsigned long mapping_try_invalidate(struct address_space *mapping,
415  		pgoff_t start, pgoff_t end, unsigned long *nr_failed);
416  
417  /**
418   * folio_evictable - Test whether a folio is evictable.
419   * @folio: The folio to test.
420   *
421   * Test whether @folio is evictable -- i.e., should be placed on
422   * active/inactive lists vs unevictable list.
423   *
424   * Reasons folio might not be evictable:
425   * 1. folio's mapping marked unevictable
426   * 2. One of the pages in the folio is part of an mlocked VMA
427   */
folio_evictable(struct folio * folio)428  static inline bool folio_evictable(struct folio *folio)
429  {
430  	bool ret;
431  
432  	/* Prevent address_space of inode and swap cache from being freed */
433  	rcu_read_lock();
434  	ret = !mapping_unevictable(folio_mapping(folio)) &&
435  			!folio_test_mlocked(folio);
436  	rcu_read_unlock();
437  	return ret;
438  }
439  
440  /*
441   * Turn a non-refcounted page (->_refcount == 0) into refcounted with
442   * a count of one.
443   */
set_page_refcounted(struct page * page)444  static inline void set_page_refcounted(struct page *page)
445  {
446  	VM_BUG_ON_PAGE(PageTail(page), page);
447  	VM_BUG_ON_PAGE(page_ref_count(page), page);
448  	set_page_count(page, 1);
449  }
450  
451  /*
452   * Return true if a folio needs ->release_folio() calling upon it.
453   */
folio_needs_release(struct folio * folio)454  static inline bool folio_needs_release(struct folio *folio)
455  {
456  	struct address_space *mapping = folio_mapping(folio);
457  
458  	return folio_has_private(folio) ||
459  		(mapping && mapping_release_always(mapping));
460  }
461  
462  extern unsigned long highest_memmap_pfn;
463  
464  /*
465   * Maximum number of reclaim retries without progress before the OOM
466   * killer is consider the only way forward.
467   */
468  #define MAX_RECLAIM_RETRIES 16
469  
470  /*
471   * in mm/vmscan.c:
472   */
473  bool folio_isolate_lru(struct folio *folio);
474  void folio_putback_lru(struct folio *folio);
475  extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
476  
477  /*
478   * in mm/rmap.c:
479   */
480  pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
481  
482  /*
483   * in mm/page_alloc.c
484   */
485  #define K(x) ((x) << (PAGE_SHIFT-10))
486  
487  extern char * const zone_names[MAX_NR_ZONES];
488  
489  /* perform sanity checks on struct pages being allocated or freed */
490  DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
491  
492  extern int min_free_kbytes;
493  
494  void setup_per_zone_wmarks(void);
495  void calculate_min_free_kbytes(void);
496  int __meminit init_per_zone_wmark_min(void);
497  void page_alloc_sysctl_init(void);
498  
499  /*
500   * Structure for holding the mostly immutable allocation parameters passed
501   * between functions involved in allocations, including the alloc_pages*
502   * family of functions.
503   *
504   * nodemask, migratetype and highest_zoneidx are initialized only once in
505   * __alloc_pages() and then never change.
506   *
507   * zonelist, preferred_zone and highest_zoneidx are set first in
508   * __alloc_pages() for the fast path, and might be later changed
509   * in __alloc_pages_slowpath(). All other functions pass the whole structure
510   * by a const pointer.
511   */
512  struct alloc_context {
513  	struct zonelist *zonelist;
514  	nodemask_t *nodemask;
515  	struct zoneref *preferred_zoneref;
516  	int migratetype;
517  
518  	/*
519  	 * highest_zoneidx represents highest usable zone index of
520  	 * the allocation request. Due to the nature of the zone,
521  	 * memory on lower zone than the highest_zoneidx will be
522  	 * protected by lowmem_reserve[highest_zoneidx].
523  	 *
524  	 * highest_zoneidx is also used by reclaim/compaction to limit
525  	 * the target zone since higher zone than this index cannot be
526  	 * usable for this allocation request.
527  	 */
528  	enum zone_type highest_zoneidx;
529  	bool spread_dirty_pages;
530  };
531  
532  /*
533   * This function returns the order of a free page in the buddy system. In
534   * general, page_zone(page)->lock must be held by the caller to prevent the
535   * page from being allocated in parallel and returning garbage as the order.
536   * If a caller does not hold page_zone(page)->lock, it must guarantee that the
537   * page cannot be allocated or merged in parallel. Alternatively, it must
538   * handle invalid values gracefully, and use buddy_order_unsafe() below.
539   */
buddy_order(struct page * page)540  static inline unsigned int buddy_order(struct page *page)
541  {
542  	/* PageBuddy() must be checked by the caller */
543  	return page_private(page);
544  }
545  
546  /*
547   * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
548   * PageBuddy() should be checked first by the caller to minimize race window,
549   * and invalid values must be handled gracefully.
550   *
551   * READ_ONCE is used so that if the caller assigns the result into a local
552   * variable and e.g. tests it for valid range before using, the compiler cannot
553   * decide to remove the variable and inline the page_private(page) multiple
554   * times, potentially observing different values in the tests and the actual
555   * use of the result.
556   */
557  #define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
558  
559  /*
560   * This function checks whether a page is free && is the buddy
561   * we can coalesce a page and its buddy if
562   * (a) the buddy is not in a hole (check before calling!) &&
563   * (b) the buddy is in the buddy system &&
564   * (c) a page and its buddy have the same order &&
565   * (d) a page and its buddy are in the same zone.
566   *
567   * For recording whether a page is in the buddy system, we set PageBuddy.
568   * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
569   *
570   * For recording page's order, we use page_private(page).
571   */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)572  static inline bool page_is_buddy(struct page *page, struct page *buddy,
573  				 unsigned int order)
574  {
575  	if (!page_is_guard(buddy) && !PageBuddy(buddy))
576  		return false;
577  
578  	if (buddy_order(buddy) != order)
579  		return false;
580  
581  	/*
582  	 * zone check is done late to avoid uselessly calculating
583  	 * zone/node ids for pages that could never merge.
584  	 */
585  	if (page_zone_id(page) != page_zone_id(buddy))
586  		return false;
587  
588  	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
589  
590  	return true;
591  }
592  
593  /*
594   * Locate the struct page for both the matching buddy in our
595   * pair (buddy1) and the combined O(n+1) page they form (page).
596   *
597   * 1) Any buddy B1 will have an order O twin B2 which satisfies
598   * the following equation:
599   *     B2 = B1 ^ (1 << O)
600   * For example, if the starting buddy (buddy2) is #8 its order
601   * 1 buddy is #10:
602   *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
603   *
604   * 2) Any buddy B will have an order O+1 parent P which
605   * satisfies the following equation:
606   *     P = B & ~(1 << O)
607   *
608   * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
609   */
610  static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn,unsigned int order)611  __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
612  {
613  	return page_pfn ^ (1 << order);
614  }
615  
616  /*
617   * Find the buddy of @page and validate it.
618   * @page: The input page
619   * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
620   *       function is used in the performance-critical __free_one_page().
621   * @order: The order of the page
622   * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
623   *             page_to_pfn().
624   *
625   * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
626   * not the same as @page. The validation is necessary before use it.
627   *
628   * Return: the found buddy page or NULL if not found.
629   */
find_buddy_page_pfn(struct page * page,unsigned long pfn,unsigned int order,unsigned long * buddy_pfn)630  static inline struct page *find_buddy_page_pfn(struct page *page,
631  			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
632  {
633  	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
634  	struct page *buddy;
635  
636  	buddy = page + (__buddy_pfn - pfn);
637  	if (buddy_pfn)
638  		*buddy_pfn = __buddy_pfn;
639  
640  	if (page_is_buddy(page, buddy, order))
641  		return buddy;
642  	return NULL;
643  }
644  
645  extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
646  				unsigned long end_pfn, struct zone *zone);
647  
pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)648  static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
649  				unsigned long end_pfn, struct zone *zone)
650  {
651  	if (zone->contiguous)
652  		return pfn_to_page(start_pfn);
653  
654  	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
655  }
656  
657  void set_zone_contiguous(struct zone *zone);
658  
clear_zone_contiguous(struct zone * zone)659  static inline void clear_zone_contiguous(struct zone *zone)
660  {
661  	zone->contiguous = false;
662  }
663  
664  extern int __isolate_free_page(struct page *page, unsigned int order);
665  extern void __putback_isolated_page(struct page *page, unsigned int order,
666  				    int mt);
667  extern void memblock_free_pages(struct page *page, unsigned long pfn,
668  					unsigned int order);
669  extern void __free_pages_core(struct page *page, unsigned int order,
670  		enum meminit_context context);
671  
672  /*
673   * This will have no effect, other than possibly generating a warning, if the
674   * caller passes in a non-large folio.
675   */
folio_set_order(struct folio * folio,unsigned int order)676  static inline void folio_set_order(struct folio *folio, unsigned int order)
677  {
678  	if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
679  		return;
680  
681  	folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
682  #ifdef CONFIG_64BIT
683  	folio->_folio_nr_pages = 1U << order;
684  #endif
685  }
686  
687  bool __folio_unqueue_deferred_split(struct folio *folio);
folio_unqueue_deferred_split(struct folio * folio)688  static inline bool folio_unqueue_deferred_split(struct folio *folio)
689  {
690  	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
691  		return false;
692  
693  	/*
694  	 * At this point, there is no one trying to add the folio to
695  	 * deferred_list. If folio is not in deferred_list, it's safe
696  	 * to check without acquiring the split_queue_lock.
697  	 */
698  	if (data_race(list_empty(&folio->_deferred_list)))
699  		return false;
700  
701  	return __folio_unqueue_deferred_split(folio);
702  }
703  
page_rmappable_folio(struct page * page)704  static inline struct folio *page_rmappable_folio(struct page *page)
705  {
706  	struct folio *folio = (struct folio *)page;
707  
708  	if (folio && folio_test_large(folio))
709  		folio_set_large_rmappable(folio);
710  	return folio;
711  }
712  
prep_compound_head(struct page * page,unsigned int order)713  static inline void prep_compound_head(struct page *page, unsigned int order)
714  {
715  	struct folio *folio = (struct folio *)page;
716  
717  	folio_set_order(folio, order);
718  	atomic_set(&folio->_large_mapcount, -1);
719  	atomic_set(&folio->_entire_mapcount, -1);
720  	atomic_set(&folio->_nr_pages_mapped, 0);
721  	atomic_set(&folio->_pincount, 0);
722  	if (order > 1)
723  		INIT_LIST_HEAD(&folio->_deferred_list);
724  }
725  
prep_compound_tail(struct page * head,int tail_idx)726  static inline void prep_compound_tail(struct page *head, int tail_idx)
727  {
728  	struct page *p = head + tail_idx;
729  
730  	p->mapping = TAIL_MAPPING;
731  	set_compound_head(p, head);
732  	set_page_private(p, 0);
733  }
734  
735  extern void prep_compound_page(struct page *page, unsigned int order);
736  
737  extern void post_alloc_hook(struct page *page, unsigned int order,
738  					gfp_t gfp_flags);
739  extern bool free_pages_prepare(struct page *page, unsigned int order);
740  
741  extern int user_min_free_kbytes;
742  
743  void free_unref_page(struct page *page, unsigned int order);
744  void free_unref_folios(struct folio_batch *fbatch);
745  
746  extern void zone_pcp_reset(struct zone *zone);
747  extern void zone_pcp_disable(struct zone *zone);
748  extern void zone_pcp_enable(struct zone *zone);
749  extern void zone_pcp_init(struct zone *zone);
750  
751  extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
752  			  phys_addr_t min_addr,
753  			  int nid, bool exact_nid);
754  
755  void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
756  		unsigned long, enum meminit_context, struct vmem_altmap *, int);
757  
758  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
759  
760  /*
761   * in mm/compaction.c
762   */
763  /*
764   * compact_control is used to track pages being migrated and the free pages
765   * they are being migrated to during memory compaction. The free_pfn starts
766   * at the end of a zone and migrate_pfn begins at the start. Movable pages
767   * are moved to the end of a zone during a compaction run and the run
768   * completes when free_pfn <= migrate_pfn
769   */
770  struct compact_control {
771  	struct list_head freepages[NR_PAGE_ORDERS];	/* List of free pages to migrate to */
772  	struct list_head migratepages;	/* List of pages being migrated */
773  	unsigned int nr_freepages;	/* Number of isolated free pages */
774  	unsigned int nr_migratepages;	/* Number of pages to migrate */
775  	unsigned long free_pfn;		/* isolate_freepages search base */
776  	/*
777  	 * Acts as an in/out parameter to page isolation for migration.
778  	 * isolate_migratepages uses it as a search base.
779  	 * isolate_migratepages_block will update the value to the next pfn
780  	 * after the last isolated one.
781  	 */
782  	unsigned long migrate_pfn;
783  	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
784  	struct zone *zone;
785  	unsigned long total_migrate_scanned;
786  	unsigned long total_free_scanned;
787  	unsigned short fast_search_fail;/* failures to use free list searches */
788  	short search_order;		/* order to start a fast search at */
789  	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
790  	int order;			/* order a direct compactor needs */
791  	int migratetype;		/* migratetype of direct compactor */
792  	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
793  	const int highest_zoneidx;	/* zone index of a direct compactor */
794  	enum migrate_mode mode;		/* Async or sync migration mode */
795  	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
796  	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
797  	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
798  	bool direct_compaction;		/* False from kcompactd or /proc/... */
799  	bool proactive_compaction;	/* kcompactd proactive compaction */
800  	bool whole_zone;		/* Whole zone should/has been scanned */
801  	bool contended;			/* Signal lock contention */
802  	bool finish_pageblock;		/* Scan the remainder of a pageblock. Used
803  					 * when there are potentially transient
804  					 * isolation or migration failures to
805  					 * ensure forward progress.
806  					 */
807  	bool alloc_contig;		/* alloc_contig_range allocation */
808  };
809  
810  /*
811   * Used in direct compaction when a page should be taken from the freelists
812   * immediately when one is created during the free path.
813   */
814  struct capture_control {
815  	struct compact_control *cc;
816  	struct page *page;
817  };
818  
819  unsigned long
820  isolate_freepages_range(struct compact_control *cc,
821  			unsigned long start_pfn, unsigned long end_pfn);
822  int
823  isolate_migratepages_range(struct compact_control *cc,
824  			   unsigned long low_pfn, unsigned long end_pfn);
825  
826  int __alloc_contig_migrate_range(struct compact_control *cc,
827  					unsigned long start, unsigned long end,
828  					int migratetype);
829  
830  /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
831  void init_cma_reserved_pageblock(struct page *page);
832  
833  #endif /* CONFIG_COMPACTION || CONFIG_CMA */
834  
835  int find_suitable_fallback(struct free_area *area, unsigned int order,
836  			int migratetype, bool only_stealable, bool *can_steal);
837  
free_area_empty(struct free_area * area,int migratetype)838  static inline bool free_area_empty(struct free_area *area, int migratetype)
839  {
840  	return list_empty(&area->free_list[migratetype]);
841  }
842  
843  /* mm/util.c */
844  struct anon_vma *folio_anon_vma(struct folio *folio);
845  
846  #ifdef CONFIG_MMU
847  void unmap_mapping_folio(struct folio *folio);
848  extern long populate_vma_page_range(struct vm_area_struct *vma,
849  		unsigned long start, unsigned long end, int *locked);
850  extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
851  		unsigned long end, bool write, int *locked);
852  extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
853  			       unsigned long bytes);
854  
855  /*
856   * NOTE: This function can't tell whether the folio is "fully mapped" in the
857   * range.
858   * "fully mapped" means all the pages of folio is associated with the page
859   * table of range while this function just check whether the folio range is
860   * within the range [start, end). Function caller needs to do page table
861   * check if it cares about the page table association.
862   *
863   * Typical usage (like mlock or madvise) is:
864   * Caller knows at least 1 page of folio is associated with page table of VMA
865   * and the range [start, end) is intersect with the VMA range. Caller wants
866   * to know whether the folio is fully associated with the range. It calls
867   * this function to check whether the folio is in the range first. Then checks
868   * the page table to know whether the folio is fully mapped to the range.
869   */
870  static inline bool
folio_within_range(struct folio * folio,struct vm_area_struct * vma,unsigned long start,unsigned long end)871  folio_within_range(struct folio *folio, struct vm_area_struct *vma,
872  		unsigned long start, unsigned long end)
873  {
874  	pgoff_t pgoff, addr;
875  	unsigned long vma_pglen = vma_pages(vma);
876  
877  	VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
878  	if (start > end)
879  		return false;
880  
881  	if (start < vma->vm_start)
882  		start = vma->vm_start;
883  
884  	if (end > vma->vm_end)
885  		end = vma->vm_end;
886  
887  	pgoff = folio_pgoff(folio);
888  
889  	/* if folio start address is not in vma range */
890  	if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
891  		return false;
892  
893  	addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
894  
895  	return !(addr < start || end - addr < folio_size(folio));
896  }
897  
898  static inline bool
folio_within_vma(struct folio * folio,struct vm_area_struct * vma)899  folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
900  {
901  	return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
902  }
903  
904  /*
905   * mlock_vma_folio() and munlock_vma_folio():
906   * should be called with vma's mmap_lock held for read or write,
907   * under page table lock for the pte/pmd being added or removed.
908   *
909   * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
910   * the end of folio_remove_rmap_*(); but new anon folios are managed by
911   * folio_add_lru_vma() calling mlock_new_folio().
912   */
913  void mlock_folio(struct folio *folio);
mlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)914  static inline void mlock_vma_folio(struct folio *folio,
915  				struct vm_area_struct *vma)
916  {
917  	/*
918  	 * The VM_SPECIAL check here serves two purposes.
919  	 * 1) VM_IO check prevents migration from double-counting during mlock.
920  	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
921  	 *    is never left set on a VM_SPECIAL vma, there is an interval while
922  	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
923  	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
924  	 */
925  	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
926  		mlock_folio(folio);
927  }
928  
929  void munlock_folio(struct folio *folio);
munlock_vma_folio(struct folio * folio,struct vm_area_struct * vma)930  static inline void munlock_vma_folio(struct folio *folio,
931  					struct vm_area_struct *vma)
932  {
933  	/*
934  	 * munlock if the function is called. Ideally, we should only
935  	 * do munlock if any page of folio is unmapped from VMA and
936  	 * cause folio not fully mapped to VMA.
937  	 *
938  	 * But it's not easy to confirm that's the situation. So we
939  	 * always munlock the folio and page reclaim will correct it
940  	 * if it's wrong.
941  	 */
942  	if (unlikely(vma->vm_flags & VM_LOCKED))
943  		munlock_folio(folio);
944  }
945  
946  void mlock_new_folio(struct folio *folio);
947  bool need_mlock_drain(int cpu);
948  void mlock_drain_local(void);
949  void mlock_drain_remote(int cpu);
950  
951  extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
952  
953  /**
954   * vma_address - Find the virtual address a page range is mapped at
955   * @vma: The vma which maps this object.
956   * @pgoff: The page offset within its object.
957   * @nr_pages: The number of pages to consider.
958   *
959   * If any page in this range is mapped by this VMA, return the first address
960   * where any of these pages appear.  Otherwise, return -EFAULT.
961   */
vma_address(struct vm_area_struct * vma,pgoff_t pgoff,unsigned long nr_pages)962  static inline unsigned long vma_address(struct vm_area_struct *vma,
963  		pgoff_t pgoff, unsigned long nr_pages)
964  {
965  	unsigned long address;
966  
967  	if (pgoff >= vma->vm_pgoff) {
968  		address = vma->vm_start +
969  			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
970  		/* Check for address beyond vma (or wrapped through 0?) */
971  		if (address < vma->vm_start || address >= vma->vm_end)
972  			address = -EFAULT;
973  	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
974  		/* Test above avoids possibility of wrap to 0 on 32-bit */
975  		address = vma->vm_start;
976  	} else {
977  		address = -EFAULT;
978  	}
979  	return address;
980  }
981  
982  /*
983   * Then at what user virtual address will none of the range be found in vma?
984   * Assumes that vma_address() already returned a good starting address.
985   */
vma_address_end(struct page_vma_mapped_walk * pvmw)986  static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
987  {
988  	struct vm_area_struct *vma = pvmw->vma;
989  	pgoff_t pgoff;
990  	unsigned long address;
991  
992  	/* Common case, plus ->pgoff is invalid for KSM */
993  	if (pvmw->nr_pages == 1)
994  		return pvmw->address + PAGE_SIZE;
995  
996  	pgoff = pvmw->pgoff + pvmw->nr_pages;
997  	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
998  	/* Check for address beyond vma (or wrapped through 0?) */
999  	if (address < vma->vm_start || address > vma->vm_end)
1000  		address = vma->vm_end;
1001  	return address;
1002  }
1003  
maybe_unlock_mmap_for_io(struct vm_fault * vmf,struct file * fpin)1004  static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1005  						    struct file *fpin)
1006  {
1007  	int flags = vmf->flags;
1008  
1009  	if (fpin)
1010  		return fpin;
1011  
1012  	/*
1013  	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1014  	 * anything, so we only pin the file and drop the mmap_lock if only
1015  	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1016  	 */
1017  	if (fault_flag_allow_retry_first(flags) &&
1018  	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1019  		fpin = get_file(vmf->vma->vm_file);
1020  		release_fault_lock(vmf);
1021  	}
1022  	return fpin;
1023  }
1024  #else /* !CONFIG_MMU */
unmap_mapping_folio(struct folio * folio)1025  static inline void unmap_mapping_folio(struct folio *folio) { }
mlock_new_folio(struct folio * folio)1026  static inline void mlock_new_folio(struct folio *folio) { }
need_mlock_drain(int cpu)1027  static inline bool need_mlock_drain(int cpu) { return false; }
mlock_drain_local(void)1028  static inline void mlock_drain_local(void) { }
mlock_drain_remote(int cpu)1029  static inline void mlock_drain_remote(int cpu) { }
vunmap_range_noflush(unsigned long start,unsigned long end)1030  static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1031  {
1032  }
1033  #endif /* !CONFIG_MMU */
1034  
1035  /* Memory initialisation debug and verification */
1036  #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1037  DECLARE_STATIC_KEY_TRUE(deferred_pages);
1038  
1039  bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1040  #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1041  
1042  enum mminit_level {
1043  	MMINIT_WARNING,
1044  	MMINIT_VERIFY,
1045  	MMINIT_TRACE
1046  };
1047  
1048  #ifdef CONFIG_DEBUG_MEMORY_INIT
1049  
1050  extern int mminit_loglevel;
1051  
1052  #define mminit_dprintk(level, prefix, fmt, arg...) \
1053  do { \
1054  	if (level < mminit_loglevel) { \
1055  		if (level <= MMINIT_WARNING) \
1056  			pr_warn("mminit::" prefix " " fmt, ##arg);	\
1057  		else \
1058  			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1059  	} \
1060  } while (0)
1061  
1062  extern void mminit_verify_pageflags_layout(void);
1063  extern void mminit_verify_zonelist(void);
1064  #else
1065  
mminit_dprintk(enum mminit_level level,const char * prefix,const char * fmt,...)1066  static inline void mminit_dprintk(enum mminit_level level,
1067  				const char *prefix, const char *fmt, ...)
1068  {
1069  }
1070  
mminit_verify_pageflags_layout(void)1071  static inline void mminit_verify_pageflags_layout(void)
1072  {
1073  }
1074  
mminit_verify_zonelist(void)1075  static inline void mminit_verify_zonelist(void)
1076  {
1077  }
1078  #endif /* CONFIG_DEBUG_MEMORY_INIT */
1079  
1080  #define NODE_RECLAIM_NOSCAN	-2
1081  #define NODE_RECLAIM_FULL	-1
1082  #define NODE_RECLAIM_SOME	0
1083  #define NODE_RECLAIM_SUCCESS	1
1084  
1085  #ifdef CONFIG_NUMA
1086  extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1087  extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1088  #else
node_reclaim(struct pglist_data * pgdat,gfp_t mask,unsigned int order)1089  static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1090  				unsigned int order)
1091  {
1092  	return NODE_RECLAIM_NOSCAN;
1093  }
find_next_best_node(int node,nodemask_t * used_node_mask)1094  static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1095  {
1096  	return NUMA_NO_NODE;
1097  }
1098  #endif
1099  
1100  /*
1101   * mm/memory-failure.c
1102   */
1103  #ifdef CONFIG_MEMORY_FAILURE
1104  void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
1105  void shake_folio(struct folio *folio);
1106  extern int hwpoison_filter(struct page *p);
1107  
1108  extern u32 hwpoison_filter_dev_major;
1109  extern u32 hwpoison_filter_dev_minor;
1110  extern u64 hwpoison_filter_flags_mask;
1111  extern u64 hwpoison_filter_flags_value;
1112  extern u64 hwpoison_filter_memcg;
1113  extern u32 hwpoison_filter_enable;
1114  #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
1115  void SetPageHWPoisonTakenOff(struct page *page);
1116  void ClearPageHWPoisonTakenOff(struct page *page);
1117  bool take_page_off_buddy(struct page *page);
1118  bool put_page_back_buddy(struct page *page);
1119  struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1120  void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
1121  		     struct vm_area_struct *vma, struct list_head *to_kill,
1122  		     unsigned long ksm_addr);
1123  unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
1124  
1125  #else
unmap_poisoned_folio(struct folio * folio,enum ttu_flags ttu)1126  static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
1127  {
1128  }
1129  #endif
1130  
1131  extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
1132          unsigned long, unsigned long,
1133          unsigned long, unsigned long);
1134  
1135  extern void set_pageblock_order(void);
1136  struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1137  unsigned long reclaim_pages(struct list_head *folio_list);
1138  unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1139  					    struct list_head *folio_list);
1140  /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1141  #define ALLOC_WMARK_MIN		WMARK_MIN
1142  #define ALLOC_WMARK_LOW		WMARK_LOW
1143  #define ALLOC_WMARK_HIGH	WMARK_HIGH
1144  #define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
1145  
1146  /* Mask to get the watermark bits */
1147  #define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
1148  
1149  /*
1150   * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1151   * cannot assume a reduced access to memory reserves is sufficient for
1152   * !MMU
1153   */
1154  #ifdef CONFIG_MMU
1155  #define ALLOC_OOM		0x08
1156  #else
1157  #define ALLOC_OOM		ALLOC_NO_WATERMARKS
1158  #endif
1159  
1160  #define ALLOC_NON_BLOCK		 0x10 /* Caller cannot block. Allow access
1161  				       * to 25% of the min watermark or
1162  				       * 62.5% if __GFP_HIGH is set.
1163  				       */
1164  #define ALLOC_MIN_RESERVE	 0x20 /* __GFP_HIGH set. Allow access to 50%
1165  				       * of the min watermark.
1166  				       */
1167  #define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
1168  #define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
1169  #ifdef CONFIG_ZONE_DMA32
1170  #define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
1171  #else
1172  #define ALLOC_NOFRAGMENT	  0x0
1173  #endif
1174  #define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1175  #define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1176  
1177  /* Flags that allow allocations below the min watermark. */
1178  #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1179  
1180  enum ttu_flags;
1181  struct tlbflush_unmap_batch;
1182  
1183  
1184  /*
1185   * only for MM internal work items which do not depend on
1186   * any allocations or locks which might depend on allocations
1187   */
1188  extern struct workqueue_struct *mm_percpu_wq;
1189  
1190  #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1191  void try_to_unmap_flush(void);
1192  void try_to_unmap_flush_dirty(void);
1193  void flush_tlb_batched_pending(struct mm_struct *mm);
1194  #else
try_to_unmap_flush(void)1195  static inline void try_to_unmap_flush(void)
1196  {
1197  }
try_to_unmap_flush_dirty(void)1198  static inline void try_to_unmap_flush_dirty(void)
1199  {
1200  }
flush_tlb_batched_pending(struct mm_struct * mm)1201  static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1202  {
1203  }
1204  #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1205  
1206  extern const struct trace_print_flags pageflag_names[];
1207  extern const struct trace_print_flags vmaflag_names[];
1208  extern const struct trace_print_flags gfpflag_names[];
1209  
is_migrate_highatomic(enum migratetype migratetype)1210  static inline bool is_migrate_highatomic(enum migratetype migratetype)
1211  {
1212  	return migratetype == MIGRATE_HIGHATOMIC;
1213  }
1214  
1215  void setup_zone_pageset(struct zone *zone);
1216  
1217  struct migration_target_control {
1218  	int nid;		/* preferred node id */
1219  	nodemask_t *nmask;
1220  	gfp_t gfp_mask;
1221  	enum migrate_reason reason;
1222  };
1223  
1224  /*
1225   * mm/filemap.c
1226   */
1227  size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1228  			      struct folio *folio, loff_t fpos, size_t size);
1229  
1230  /*
1231   * mm/vmalloc.c
1232   */
1233  #ifdef CONFIG_MMU
1234  void __init vmalloc_init(void);
1235  int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1236                  pgprot_t prot, struct page **pages, unsigned int page_shift);
1237  #else
vmalloc_init(void)1238  static inline void vmalloc_init(void)
1239  {
1240  }
1241  
1242  static inline
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)1243  int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1244                  pgprot_t prot, struct page **pages, unsigned int page_shift)
1245  {
1246  	return -EINVAL;
1247  }
1248  #endif
1249  
1250  int __must_check __vmap_pages_range_noflush(unsigned long addr,
1251  			       unsigned long end, pgprot_t prot,
1252  			       struct page **pages, unsigned int page_shift);
1253  
1254  void vunmap_range_noflush(unsigned long start, unsigned long end);
1255  
1256  void __vunmap_range_noflush(unsigned long start, unsigned long end);
1257  
1258  int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1259  		      unsigned long addr, int *flags, bool writable,
1260  		      int *last_cpupid);
1261  
1262  void free_zone_device_folio(struct folio *folio);
1263  int migrate_device_coherent_folio(struct folio *folio);
1264  
1265  /*
1266   * mm/gup.c
1267   */
1268  int __must_check try_grab_folio(struct folio *folio, int refs,
1269  				unsigned int flags);
1270  
1271  /*
1272   * mm/huge_memory.c
1273   */
1274  void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1275  	       pud_t *pud, bool write);
1276  void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1277  	       pmd_t *pmd, bool write);
1278  
1279  enum {
1280  	/* mark page accessed */
1281  	FOLL_TOUCH = 1 << 16,
1282  	/* a retry, previous pass started an IO */
1283  	FOLL_TRIED = 1 << 17,
1284  	/* we are working on non-current tsk/mm */
1285  	FOLL_REMOTE = 1 << 18,
1286  	/* pages must be released via unpin_user_page */
1287  	FOLL_PIN = 1 << 19,
1288  	/* gup_fast: prevent fall-back to slow gup */
1289  	FOLL_FAST_ONLY = 1 << 20,
1290  	/* allow unlocking the mmap lock */
1291  	FOLL_UNLOCKABLE = 1 << 21,
1292  	/* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1293  	FOLL_MADV_POPULATE = 1 << 22,
1294  };
1295  
1296  #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1297  			    FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1298  			    FOLL_MADV_POPULATE)
1299  
1300  /*
1301   * Indicates for which pages that are write-protected in the page table,
1302   * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1303   * GUP pin will remain consistent with the pages mapped into the page tables
1304   * of the MM.
1305   *
1306   * Temporary unmapping of PageAnonExclusive() pages or clearing of
1307   * PageAnonExclusive() has to protect against concurrent GUP:
1308   * * Ordinary GUP: Using the PT lock
1309   * * GUP-fast and fork(): mm->write_protect_seq
1310   * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1311   *    folio_try_share_anon_rmap_*()
1312   *
1313   * Must be called with the (sub)page that's actually referenced via the
1314   * page table entry, which might not necessarily be the head page for a
1315   * PTE-mapped THP.
1316   *
1317   * If the vma is NULL, we're coming from the GUP-fast path and might have
1318   * to fallback to the slow path just to lookup the vma.
1319   */
gup_must_unshare(struct vm_area_struct * vma,unsigned int flags,struct page * page)1320  static inline bool gup_must_unshare(struct vm_area_struct *vma,
1321  				    unsigned int flags, struct page *page)
1322  {
1323  	/*
1324  	 * FOLL_WRITE is implicitly handled correctly as the page table entry
1325  	 * has to be writable -- and if it references (part of) an anonymous
1326  	 * folio, that part is required to be marked exclusive.
1327  	 */
1328  	if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1329  		return false;
1330  	/*
1331  	 * Note: PageAnon(page) is stable until the page is actually getting
1332  	 * freed.
1333  	 */
1334  	if (!PageAnon(page)) {
1335  		/*
1336  		 * We only care about R/O long-term pining: R/O short-term
1337  		 * pinning does not have the semantics to observe successive
1338  		 * changes through the process page tables.
1339  		 */
1340  		if (!(flags & FOLL_LONGTERM))
1341  			return false;
1342  
1343  		/* We really need the vma ... */
1344  		if (!vma)
1345  			return true;
1346  
1347  		/*
1348  		 * ... because we only care about writable private ("COW")
1349  		 * mappings where we have to break COW early.
1350  		 */
1351  		return is_cow_mapping(vma->vm_flags);
1352  	}
1353  
1354  	/* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1355  	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1356  		smp_rmb();
1357  
1358  	/*
1359  	 * Note that PageKsm() pages cannot be exclusive, and consequently,
1360  	 * cannot get pinned.
1361  	 */
1362  	return !PageAnonExclusive(page);
1363  }
1364  
1365  extern bool mirrored_kernelcore;
1366  extern bool memblock_has_mirror(void);
1367  
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)1368  static __always_inline void vma_set_range(struct vm_area_struct *vma,
1369  					  unsigned long start, unsigned long end,
1370  					  pgoff_t pgoff)
1371  {
1372  	vma->vm_start = start;
1373  	vma->vm_end = end;
1374  	vma->vm_pgoff = pgoff;
1375  }
1376  
vma_soft_dirty_enabled(struct vm_area_struct * vma)1377  static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1378  {
1379  	/*
1380  	 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1381  	 * enablements, because when without soft-dirty being compiled in,
1382  	 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1383  	 * will be constantly true.
1384  	 */
1385  	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1386  		return false;
1387  
1388  	/*
1389  	 * Soft-dirty is kind of special: its tracking is enabled when the
1390  	 * vma flags not set.
1391  	 */
1392  	return !(vma->vm_flags & VM_SOFTDIRTY);
1393  }
1394  
pmd_needs_soft_dirty_wp(struct vm_area_struct * vma,pmd_t pmd)1395  static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1396  {
1397  	return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1398  }
1399  
pte_needs_soft_dirty_wp(struct vm_area_struct * vma,pte_t pte)1400  static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1401  {
1402  	return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1403  }
1404  
1405  void __meminit __init_single_page(struct page *page, unsigned long pfn,
1406  				unsigned long zone, int nid);
1407  
1408  /* shrinker related functions */
1409  unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1410  			  int priority);
1411  
1412  #ifdef CONFIG_64BIT
can_do_mseal(unsigned long flags)1413  static inline int can_do_mseal(unsigned long flags)
1414  {
1415  	if (flags)
1416  		return -EINVAL;
1417  
1418  	return 0;
1419  }
1420  
1421  #else
can_do_mseal(unsigned long flags)1422  static inline int can_do_mseal(unsigned long flags)
1423  {
1424  	return -EPERM;
1425  }
1426  #endif
1427  
1428  #ifdef CONFIG_SHRINKER_DEBUG
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1429  static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1430  			struct shrinker *shrinker, const char *fmt, va_list ap)
1431  {
1432  	shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1433  
1434  	return shrinker->name ? 0 : -ENOMEM;
1435  }
1436  
shrinker_debugfs_name_free(struct shrinker * shrinker)1437  static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1438  {
1439  	kfree_const(shrinker->name);
1440  	shrinker->name = NULL;
1441  }
1442  
1443  extern int shrinker_debugfs_add(struct shrinker *shrinker);
1444  extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1445  					      int *debugfs_id);
1446  extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1447  				    int debugfs_id);
1448  #else /* CONFIG_SHRINKER_DEBUG */
shrinker_debugfs_add(struct shrinker * shrinker)1449  static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1450  {
1451  	return 0;
1452  }
shrinker_debugfs_name_alloc(struct shrinker * shrinker,const char * fmt,va_list ap)1453  static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1454  					      const char *fmt, va_list ap)
1455  {
1456  	return 0;
1457  }
shrinker_debugfs_name_free(struct shrinker * shrinker)1458  static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1459  {
1460  }
shrinker_debugfs_detach(struct shrinker * shrinker,int * debugfs_id)1461  static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1462  						     int *debugfs_id)
1463  {
1464  	*debugfs_id = -1;
1465  	return NULL;
1466  }
shrinker_debugfs_remove(struct dentry * debugfs_entry,int debugfs_id)1467  static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1468  					   int debugfs_id)
1469  {
1470  }
1471  #endif /* CONFIG_SHRINKER_DEBUG */
1472  
1473  /* Only track the nodes of mappings with shadow entries */
1474  void workingset_update_node(struct xa_node *node);
1475  extern struct list_lru shadow_nodes;
1476  
1477  /* mremap.c */
1478  unsigned long move_page_tables(struct vm_area_struct *vma,
1479  	unsigned long old_addr, struct vm_area_struct *new_vma,
1480  	unsigned long new_addr, unsigned long len,
1481  	bool need_rmap_locks, bool for_stack);
1482  
1483  #ifdef CONFIG_UNACCEPTED_MEMORY
1484  void accept_page(struct page *page);
1485  #else /* CONFIG_UNACCEPTED_MEMORY */
accept_page(struct page * page)1486  static inline void accept_page(struct page *page)
1487  {
1488  }
1489  #endif /* CONFIG_UNACCEPTED_MEMORY */
1490  
1491  #endif	/* __MM_INTERNAL_H */
1492