1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_MM_H
3  #define _LINUX_MM_H
4  
5  #include <linux/errno.h>
6  #include <linux/mmdebug.h>
7  #include <linux/gfp.h>
8  #include <linux/pgalloc_tag.h>
9  #include <linux/bug.h>
10  #include <linux/list.h>
11  #include <linux/mmzone.h>
12  #include <linux/rbtree.h>
13  #include <linux/atomic.h>
14  #include <linux/debug_locks.h>
15  #include <linux/mm_types.h>
16  #include <linux/mmap_lock.h>
17  #include <linux/range.h>
18  #include <linux/pfn.h>
19  #include <linux/percpu-refcount.h>
20  #include <linux/bit_spinlock.h>
21  #include <linux/shrinker.h>
22  #include <linux/resource.h>
23  #include <linux/page_ext.h>
24  #include <linux/err.h>
25  #include <linux/page-flags.h>
26  #include <linux/page_ref.h>
27  #include <linux/overflow.h>
28  #include <linux/sizes.h>
29  #include <linux/sched.h>
30  #include <linux/pgtable.h>
31  #include <linux/kasan.h>
32  #include <linux/memremap.h>
33  #include <linux/slab.h>
34  
35  struct mempolicy;
36  struct anon_vma;
37  struct anon_vma_chain;
38  struct user_struct;
39  struct pt_regs;
40  struct folio_batch;
41  
42  extern int sysctl_page_lock_unfairness;
43  
44  void mm_core_init(void);
45  void init_mm_internals(void);
46  
47  #ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
48  extern unsigned long max_mapnr;
49  
set_max_mapnr(unsigned long limit)50  static inline void set_max_mapnr(unsigned long limit)
51  {
52  	max_mapnr = limit;
53  }
54  #else
set_max_mapnr(unsigned long limit)55  static inline void set_max_mapnr(unsigned long limit) { }
56  #endif
57  
58  extern atomic_long_t _totalram_pages;
totalram_pages(void)59  static inline unsigned long totalram_pages(void)
60  {
61  	return (unsigned long)atomic_long_read(&_totalram_pages);
62  }
63  
totalram_pages_inc(void)64  static inline void totalram_pages_inc(void)
65  {
66  	atomic_long_inc(&_totalram_pages);
67  }
68  
totalram_pages_dec(void)69  static inline void totalram_pages_dec(void)
70  {
71  	atomic_long_dec(&_totalram_pages);
72  }
73  
totalram_pages_add(long count)74  static inline void totalram_pages_add(long count)
75  {
76  	atomic_long_add(count, &_totalram_pages);
77  }
78  
79  extern void * high_memory;
80  extern int page_cluster;
81  extern const int page_cluster_max;
82  
83  #ifdef CONFIG_SYSCTL
84  extern int sysctl_legacy_va_layout;
85  #else
86  #define sysctl_legacy_va_layout 0
87  #endif
88  
89  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
90  extern const int mmap_rnd_bits_min;
91  extern int mmap_rnd_bits_max __ro_after_init;
92  extern int mmap_rnd_bits __read_mostly;
93  #endif
94  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
95  extern const int mmap_rnd_compat_bits_min;
96  extern const int mmap_rnd_compat_bits_max;
97  extern int mmap_rnd_compat_bits __read_mostly;
98  #endif
99  
100  #ifndef PHYSMEM_END
101  # ifdef MAX_PHYSMEM_BITS
102  # define PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
103  # else
104  # define PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
105  # endif
106  #endif
107  
108  #include <asm/page.h>
109  #include <asm/processor.h>
110  
111  #ifndef __pa_symbol
112  #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
113  #endif
114  
115  #ifndef page_to_virt
116  #define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
117  #endif
118  
119  #ifndef lm_alias
120  #define lm_alias(x)	__va(__pa_symbol(x))
121  #endif
122  
123  /*
124   * To prevent common memory management code establishing
125   * a zero page mapping on a read fault.
126   * This macro should be defined within <asm/pgtable.h>.
127   * s390 does this to prevent multiplexing of hardware bits
128   * related to the physical page in case of virtualization.
129   */
130  #ifndef mm_forbids_zeropage
131  #define mm_forbids_zeropage(X)	(0)
132  #endif
133  
134  /*
135   * On some architectures it is expensive to call memset() for small sizes.
136   * If an architecture decides to implement their own version of
137   * mm_zero_struct_page they should wrap the defines below in a #ifndef and
138   * define their own version of this macro in <asm/pgtable.h>
139   */
140  #if BITS_PER_LONG == 64
141  /* This function must be updated when the size of struct page grows above 96
142   * or reduces below 56. The idea that compiler optimizes out switch()
143   * statement, and only leaves move/store instructions. Also the compiler can
144   * combine write statements if they are both assignments and can be reordered,
145   * this can result in several of the writes here being dropped.
146   */
147  #define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
__mm_zero_struct_page(struct page * page)148  static inline void __mm_zero_struct_page(struct page *page)
149  {
150  	unsigned long *_pp = (void *)page;
151  
152  	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
153  	BUILD_BUG_ON(sizeof(struct page) & 7);
154  	BUILD_BUG_ON(sizeof(struct page) < 56);
155  	BUILD_BUG_ON(sizeof(struct page) > 96);
156  
157  	switch (sizeof(struct page)) {
158  	case 96:
159  		_pp[11] = 0;
160  		fallthrough;
161  	case 88:
162  		_pp[10] = 0;
163  		fallthrough;
164  	case 80:
165  		_pp[9] = 0;
166  		fallthrough;
167  	case 72:
168  		_pp[8] = 0;
169  		fallthrough;
170  	case 64:
171  		_pp[7] = 0;
172  		fallthrough;
173  	case 56:
174  		_pp[6] = 0;
175  		_pp[5] = 0;
176  		_pp[4] = 0;
177  		_pp[3] = 0;
178  		_pp[2] = 0;
179  		_pp[1] = 0;
180  		_pp[0] = 0;
181  	}
182  }
183  #else
184  #define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
185  #endif
186  
187  /*
188   * Default maximum number of active map areas, this limits the number of vmas
189   * per mm struct. Users can overwrite this number by sysctl but there is a
190   * problem.
191   *
192   * When a program's coredump is generated as ELF format, a section is created
193   * per a vma. In ELF, the number of sections is represented in unsigned short.
194   * This means the number of sections should be smaller than 65535 at coredump.
195   * Because the kernel adds some informative sections to a image of program at
196   * generating coredump, we need some margin. The number of extra sections is
197   * 1-3 now and depends on arch. We use "5" as safe margin, here.
198   *
199   * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
200   * not a hard limit any more. Although some userspace tools can be surprised by
201   * that.
202   */
203  #define MAPCOUNT_ELF_CORE_MARGIN	(5)
204  #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
205  
206  extern int sysctl_max_map_count;
207  
208  extern unsigned long sysctl_user_reserve_kbytes;
209  extern unsigned long sysctl_admin_reserve_kbytes;
210  
211  extern int sysctl_overcommit_memory;
212  extern int sysctl_overcommit_ratio;
213  extern unsigned long sysctl_overcommit_kbytes;
214  
215  int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *,
216  		loff_t *);
217  int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *,
218  		loff_t *);
219  int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *,
220  		loff_t *);
221  
222  #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
223  #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
224  #define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
225  #else
226  #define nth_page(page,n) ((page) + (n))
227  #define folio_page_idx(folio, p)	((p) - &(folio)->page)
228  #endif
229  
230  /* to align the pointer to the (next) page boundary */
231  #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
232  
233  /* to align the pointer to the (prev) page boundary */
234  #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
235  
236  /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
237  #define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
238  
lru_to_folio(struct list_head * head)239  static inline struct folio *lru_to_folio(struct list_head *head)
240  {
241  	return list_entry((head)->prev, struct folio, lru);
242  }
243  
244  void setup_initial_init_mm(void *start_code, void *end_code,
245  			   void *end_data, void *brk);
246  
247  /*
248   * Linux kernel virtual memory manager primitives.
249   * The idea being to have a "virtual" mm in the same way
250   * we have a virtual fs - giving a cleaner interface to the
251   * mm details, and allowing different kinds of memory mappings
252   * (from shared memory to executable loading to arbitrary
253   * mmap() functions).
254   */
255  
256  struct vm_area_struct *vm_area_alloc(struct mm_struct *);
257  struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
258  void vm_area_free(struct vm_area_struct *);
259  /* Use only if VMA has no other users */
260  void __vm_area_free(struct vm_area_struct *vma);
261  
262  #ifndef CONFIG_MMU
263  extern struct rb_root nommu_region_tree;
264  extern struct rw_semaphore nommu_region_sem;
265  
266  extern unsigned int kobjsize(const void *objp);
267  #endif
268  
269  /*
270   * vm_flags in vm_area_struct, see mm_types.h.
271   * When changing, update also include/trace/events/mmflags.h
272   */
273  #define VM_NONE		0x00000000
274  
275  #define VM_READ		0x00000001	/* currently active flags */
276  #define VM_WRITE	0x00000002
277  #define VM_EXEC		0x00000004
278  #define VM_SHARED	0x00000008
279  
280  /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
281  #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
282  #define VM_MAYWRITE	0x00000020
283  #define VM_MAYEXEC	0x00000040
284  #define VM_MAYSHARE	0x00000080
285  
286  #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
287  #ifdef CONFIG_MMU
288  #define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
289  #else /* CONFIG_MMU */
290  #define VM_MAYOVERLAY	0x00000200	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
291  #define VM_UFFD_MISSING	0
292  #endif /* CONFIG_MMU */
293  #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
294  #define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
295  
296  #define VM_LOCKED	0x00002000
297  #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
298  
299  					/* Used by sys_madvise() */
300  #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
301  #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
302  
303  #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
304  #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
305  #define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
306  #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
307  #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
308  #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
309  #define VM_SYNC		0x00800000	/* Synchronous page faults */
310  #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
311  #define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
312  #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
313  
314  #ifdef CONFIG_MEM_SOFT_DIRTY
315  # define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
316  #else
317  # define VM_SOFTDIRTY	0
318  #endif
319  
320  #define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
321  #define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
322  #define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
323  #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
324  
325  #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
326  #define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
327  #define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
328  #define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
329  #define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
330  #define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
331  #define VM_HIGH_ARCH_BIT_5	37	/* bit only usable on 64-bit architectures */
332  #define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
333  #define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
334  #define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
335  #define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
336  #define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
337  #define VM_HIGH_ARCH_5	BIT(VM_HIGH_ARCH_BIT_5)
338  #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
339  
340  #ifdef CONFIG_ARCH_HAS_PKEYS
341  # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
342  # define VM_PKEY_BIT0  VM_HIGH_ARCH_0
343  # define VM_PKEY_BIT1  VM_HIGH_ARCH_1
344  # define VM_PKEY_BIT2  VM_HIGH_ARCH_2
345  #if CONFIG_ARCH_PKEY_BITS > 3
346  # define VM_PKEY_BIT3  VM_HIGH_ARCH_3
347  #else
348  # define VM_PKEY_BIT3  0
349  #endif
350  #if CONFIG_ARCH_PKEY_BITS > 4
351  # define VM_PKEY_BIT4  VM_HIGH_ARCH_4
352  #else
353  # define VM_PKEY_BIT4  0
354  #endif
355  #endif /* CONFIG_ARCH_HAS_PKEYS */
356  
357  #ifdef CONFIG_X86_USER_SHADOW_STACK
358  /*
359   * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
360   * support core mm.
361   *
362   * These VMAs will get a single end guard page. This helps userspace protect
363   * itself from attacks. A single page is enough for current shadow stack archs
364   * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
365   * for more details on the guard size.
366   */
367  # define VM_SHADOW_STACK	VM_HIGH_ARCH_5
368  #else
369  # define VM_SHADOW_STACK	VM_NONE
370  #endif
371  
372  #if defined(CONFIG_X86)
373  # define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
374  #elif defined(CONFIG_PPC64)
375  # define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
376  #elif defined(CONFIG_PARISC)
377  # define VM_GROWSUP	VM_ARCH_1
378  #elif defined(CONFIG_SPARC64)
379  # define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
380  # define VM_ARCH_CLEAR	VM_SPARC_ADI
381  #elif defined(CONFIG_ARM64)
382  # define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
383  # define VM_ARCH_CLEAR	VM_ARM64_BTI
384  #elif !defined(CONFIG_MMU)
385  # define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
386  #endif
387  
388  #if defined(CONFIG_ARM64_MTE)
389  # define VM_MTE		VM_HIGH_ARCH_4	/* Use Tagged memory for access control */
390  # define VM_MTE_ALLOWED	VM_HIGH_ARCH_5	/* Tagged memory permitted */
391  #else
392  # define VM_MTE		VM_NONE
393  # define VM_MTE_ALLOWED	VM_NONE
394  #endif
395  
396  #ifndef VM_GROWSUP
397  # define VM_GROWSUP	VM_NONE
398  #endif
399  
400  #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
401  # define VM_UFFD_MINOR_BIT	38
402  # define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
403  #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
404  # define VM_UFFD_MINOR		VM_NONE
405  #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
406  
407  /*
408   * This flag is used to connect VFIO to arch specific KVM code. It
409   * indicates that the memory under this VMA is safe for use with any
410   * non-cachable memory type inside KVM. Some VFIO devices, on some
411   * platforms, are thought to be unsafe and can cause machine crashes
412   * if KVM does not lock down the memory type.
413   */
414  #ifdef CONFIG_64BIT
415  #define VM_ALLOW_ANY_UNCACHED_BIT	39
416  #define VM_ALLOW_ANY_UNCACHED		BIT(VM_ALLOW_ANY_UNCACHED_BIT)
417  #else
418  #define VM_ALLOW_ANY_UNCACHED		VM_NONE
419  #endif
420  
421  #ifdef CONFIG_64BIT
422  #define VM_DROPPABLE_BIT	40
423  #define VM_DROPPABLE		BIT(VM_DROPPABLE_BIT)
424  #elif defined(CONFIG_PPC32)
425  #define VM_DROPPABLE		VM_ARCH_1
426  #else
427  #define VM_DROPPABLE		VM_NONE
428  #endif
429  
430  #ifdef CONFIG_64BIT
431  /* VM is sealed, in vm_flags */
432  #define VM_SEALED	_BITUL(63)
433  #endif
434  
435  /* Bits set in the VMA until the stack is in its final location */
436  #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
437  
438  #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
439  
440  /* Common data flag combinations */
441  #define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
442  				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
443  #define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
444  				 VM_MAYWRITE | VM_MAYEXEC)
445  #define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
446  				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
447  
448  #ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
449  #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
450  #endif
451  
452  #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
453  #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
454  #endif
455  
456  #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
457  
458  #ifdef CONFIG_STACK_GROWSUP
459  #define VM_STACK	VM_GROWSUP
460  #define VM_STACK_EARLY	VM_GROWSDOWN
461  #else
462  #define VM_STACK	VM_GROWSDOWN
463  #define VM_STACK_EARLY	0
464  #endif
465  
466  #define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
467  
468  /* VMA basic access permission flags */
469  #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
470  
471  
472  /*
473   * Special vmas that are non-mergable, non-mlock()able.
474   */
475  #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
476  
477  /* This mask prevents VMA from being scanned with khugepaged */
478  #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
479  
480  /* This mask defines which mm->def_flags a process can inherit its parent */
481  #define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
482  
483  /* This mask represents all the VMA flag bits used by mlock */
484  #define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
485  
486  /* Arch-specific flags to clear when updating VM flags on protection change */
487  #ifndef VM_ARCH_CLEAR
488  # define VM_ARCH_CLEAR	VM_NONE
489  #endif
490  #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
491  
492  /*
493   * mapping from the currently active vm_flags protection bits (the
494   * low four bits) to a page protection mask..
495   */
496  
497  /*
498   * The default fault flags that should be used by most of the
499   * arch-specific page fault handlers.
500   */
501  #define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
502  			     FAULT_FLAG_KILLABLE | \
503  			     FAULT_FLAG_INTERRUPTIBLE)
504  
505  /**
506   * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
507   * @flags: Fault flags.
508   *
509   * This is mostly used for places where we want to try to avoid taking
510   * the mmap_lock for too long a time when waiting for another condition
511   * to change, in which case we can try to be polite to release the
512   * mmap_lock in the first round to avoid potential starvation of other
513   * processes that would also want the mmap_lock.
514   *
515   * Return: true if the page fault allows retry and this is the first
516   * attempt of the fault handling; false otherwise.
517   */
fault_flag_allow_retry_first(enum fault_flag flags)518  static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
519  {
520  	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
521  	    (!(flags & FAULT_FLAG_TRIED));
522  }
523  
524  #define FAULT_FLAG_TRACE \
525  	{ FAULT_FLAG_WRITE,		"WRITE" }, \
526  	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
527  	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
528  	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
529  	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
530  	{ FAULT_FLAG_TRIED,		"TRIED" }, \
531  	{ FAULT_FLAG_USER,		"USER" }, \
532  	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
533  	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
534  	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
535  	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
536  
537  /*
538   * vm_fault is filled by the pagefault handler and passed to the vma's
539   * ->fault function. The vma's ->fault is responsible for returning a bitmask
540   * of VM_FAULT_xxx flags that give details about how the fault was handled.
541   *
542   * MM layer fills up gfp_mask for page allocations but fault handler might
543   * alter it if its implementation requires a different allocation context.
544   *
545   * pgoff should be used in favour of virtual_address, if possible.
546   */
547  struct vm_fault {
548  	const struct {
549  		struct vm_area_struct *vma;	/* Target VMA */
550  		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
551  		pgoff_t pgoff;			/* Logical page offset based on vma */
552  		unsigned long address;		/* Faulting virtual address - masked */
553  		unsigned long real_address;	/* Faulting virtual address - unmasked */
554  	};
555  	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
556  					 * XXX: should really be 'const' */
557  	pmd_t *pmd;			/* Pointer to pmd entry matching
558  					 * the 'address' */
559  	pud_t *pud;			/* Pointer to pud entry matching
560  					 * the 'address'
561  					 */
562  	union {
563  		pte_t orig_pte;		/* Value of PTE at the time of fault */
564  		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
565  					 * used by PMD fault only.
566  					 */
567  	};
568  
569  	struct page *cow_page;		/* Page handler may use for COW fault */
570  	struct page *page;		/* ->fault handlers should return a
571  					 * page here, unless VM_FAULT_NOPAGE
572  					 * is set (which is also implied by
573  					 * VM_FAULT_ERROR).
574  					 */
575  	/* These three entries are valid only while holding ptl lock */
576  	pte_t *pte;			/* Pointer to pte entry matching
577  					 * the 'address'. NULL if the page
578  					 * table hasn't been allocated.
579  					 */
580  	spinlock_t *ptl;		/* Page table lock.
581  					 * Protects pte page table if 'pte'
582  					 * is not NULL, otherwise pmd.
583  					 */
584  	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
585  					 * vm_ops->map_pages() sets up a page
586  					 * table from atomic context.
587  					 * do_fault_around() pre-allocates
588  					 * page table to avoid allocation from
589  					 * atomic context.
590  					 */
591  };
592  
593  /*
594   * These are the virtual MM functions - opening of an area, closing and
595   * unmapping it (needed to keep files on disk up-to-date etc), pointer
596   * to the functions called when a no-page or a wp-page exception occurs.
597   */
598  struct vm_operations_struct {
599  	void (*open)(struct vm_area_struct * area);
600  	/**
601  	 * @close: Called when the VMA is being removed from the MM.
602  	 * Context: User context.  May sleep.  Caller holds mmap_lock.
603  	 */
604  	void (*close)(struct vm_area_struct * area);
605  	/* Called any time before splitting to check if it's allowed */
606  	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
607  	int (*mremap)(struct vm_area_struct *area);
608  	/*
609  	 * Called by mprotect() to make driver-specific permission
610  	 * checks before mprotect() is finalised.   The VMA must not
611  	 * be modified.  Returns 0 if mprotect() can proceed.
612  	 */
613  	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
614  			unsigned long end, unsigned long newflags);
615  	vm_fault_t (*fault)(struct vm_fault *vmf);
616  	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
617  	vm_fault_t (*map_pages)(struct vm_fault *vmf,
618  			pgoff_t start_pgoff, pgoff_t end_pgoff);
619  	unsigned long (*pagesize)(struct vm_area_struct * area);
620  
621  	/* notification that a previously read-only page is about to become
622  	 * writable, if an error is returned it will cause a SIGBUS */
623  	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
624  
625  	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
626  	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
627  
628  	/* called by access_process_vm when get_user_pages() fails, typically
629  	 * for use by special VMAs. See also generic_access_phys() for a generic
630  	 * implementation useful for any iomem mapping.
631  	 */
632  	int (*access)(struct vm_area_struct *vma, unsigned long addr,
633  		      void *buf, int len, int write);
634  
635  	/* Called by the /proc/PID/maps code to ask the vma whether it
636  	 * has a special name.  Returning non-NULL will also cause this
637  	 * vma to be dumped unconditionally. */
638  	const char *(*name)(struct vm_area_struct *vma);
639  
640  #ifdef CONFIG_NUMA
641  	/*
642  	 * set_policy() op must add a reference to any non-NULL @new mempolicy
643  	 * to hold the policy upon return.  Caller should pass NULL @new to
644  	 * remove a policy and fall back to surrounding context--i.e. do not
645  	 * install a MPOL_DEFAULT policy, nor the task or system default
646  	 * mempolicy.
647  	 */
648  	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
649  
650  	/*
651  	 * get_policy() op must add reference [mpol_get()] to any policy at
652  	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
653  	 * in mm/mempolicy.c will do this automatically.
654  	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
655  	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
656  	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
657  	 * must return NULL--i.e., do not "fallback" to task or system default
658  	 * policy.
659  	 */
660  	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
661  					unsigned long addr, pgoff_t *ilx);
662  #endif
663  	/*
664  	 * Called by vm_normal_page() for special PTEs to find the
665  	 * page for @addr.  This is useful if the default behavior
666  	 * (using pte_page()) would not find the correct page.
667  	 */
668  	struct page *(*find_special_page)(struct vm_area_struct *vma,
669  					  unsigned long addr);
670  };
671  
672  #ifdef CONFIG_NUMA_BALANCING
vma_numab_state_init(struct vm_area_struct * vma)673  static inline void vma_numab_state_init(struct vm_area_struct *vma)
674  {
675  	vma->numab_state = NULL;
676  }
vma_numab_state_free(struct vm_area_struct * vma)677  static inline void vma_numab_state_free(struct vm_area_struct *vma)
678  {
679  	kfree(vma->numab_state);
680  }
681  #else
vma_numab_state_init(struct vm_area_struct * vma)682  static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
vma_numab_state_free(struct vm_area_struct * vma)683  static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
684  #endif /* CONFIG_NUMA_BALANCING */
685  
686  #ifdef CONFIG_PER_VMA_LOCK
687  /*
688   * Try to read-lock a vma. The function is allowed to occasionally yield false
689   * locked result to avoid performance overhead, in which case we fall back to
690   * using mmap_lock. The function should never yield false unlocked result.
691   */
vma_start_read(struct vm_area_struct * vma)692  static inline bool vma_start_read(struct vm_area_struct *vma)
693  {
694  	/*
695  	 * Check before locking. A race might cause false locked result.
696  	 * We can use READ_ONCE() for the mm_lock_seq here, and don't need
697  	 * ACQUIRE semantics, because this is just a lockless check whose result
698  	 * we don't rely on for anything - the mm_lock_seq read against which we
699  	 * need ordering is below.
700  	 */
701  	if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
702  		return false;
703  
704  	if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
705  		return false;
706  
707  	/*
708  	 * Overflow might produce false locked result.
709  	 * False unlocked result is impossible because we modify and check
710  	 * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
711  	 * modification invalidates all existing locks.
712  	 *
713  	 * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
714  	 * racing with vma_end_write_all(), we only start reading from the VMA
715  	 * after it has been unlocked.
716  	 * This pairs with RELEASE semantics in vma_end_write_all().
717  	 */
718  	if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
719  		up_read(&vma->vm_lock->lock);
720  		return false;
721  	}
722  	return true;
723  }
724  
vma_end_read(struct vm_area_struct * vma)725  static inline void vma_end_read(struct vm_area_struct *vma)
726  {
727  	rcu_read_lock(); /* keeps vma alive till the end of up_read */
728  	up_read(&vma->vm_lock->lock);
729  	rcu_read_unlock();
730  }
731  
732  /* WARNING! Can only be used if mmap_lock is expected to be write-locked */
__is_vma_write_locked(struct vm_area_struct * vma,int * mm_lock_seq)733  static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
734  {
735  	mmap_assert_write_locked(vma->vm_mm);
736  
737  	/*
738  	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
739  	 * mm->mm_lock_seq can't be concurrently modified.
740  	 */
741  	*mm_lock_seq = vma->vm_mm->mm_lock_seq;
742  	return (vma->vm_lock_seq == *mm_lock_seq);
743  }
744  
745  /*
746   * Begin writing to a VMA.
747   * Exclude concurrent readers under the per-VMA lock until the currently
748   * write-locked mmap_lock is dropped or downgraded.
749   */
vma_start_write(struct vm_area_struct * vma)750  static inline void vma_start_write(struct vm_area_struct *vma)
751  {
752  	int mm_lock_seq;
753  
754  	if (__is_vma_write_locked(vma, &mm_lock_seq))
755  		return;
756  
757  	down_write(&vma->vm_lock->lock);
758  	/*
759  	 * We should use WRITE_ONCE() here because we can have concurrent reads
760  	 * from the early lockless pessimistic check in vma_start_read().
761  	 * We don't really care about the correctness of that early check, but
762  	 * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
763  	 */
764  	WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
765  	up_write(&vma->vm_lock->lock);
766  }
767  
vma_assert_write_locked(struct vm_area_struct * vma)768  static inline void vma_assert_write_locked(struct vm_area_struct *vma)
769  {
770  	int mm_lock_seq;
771  
772  	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
773  }
774  
vma_assert_locked(struct vm_area_struct * vma)775  static inline void vma_assert_locked(struct vm_area_struct *vma)
776  {
777  	if (!rwsem_is_locked(&vma->vm_lock->lock))
778  		vma_assert_write_locked(vma);
779  }
780  
vma_mark_detached(struct vm_area_struct * vma,bool detached)781  static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
782  {
783  	/* When detaching vma should be write-locked */
784  	if (detached)
785  		vma_assert_write_locked(vma);
786  	vma->detached = detached;
787  }
788  
release_fault_lock(struct vm_fault * vmf)789  static inline void release_fault_lock(struct vm_fault *vmf)
790  {
791  	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
792  		vma_end_read(vmf->vma);
793  	else
794  		mmap_read_unlock(vmf->vma->vm_mm);
795  }
796  
assert_fault_locked(struct vm_fault * vmf)797  static inline void assert_fault_locked(struct vm_fault *vmf)
798  {
799  	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
800  		vma_assert_locked(vmf->vma);
801  	else
802  		mmap_assert_locked(vmf->vma->vm_mm);
803  }
804  
805  struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
806  					  unsigned long address);
807  
808  #else /* CONFIG_PER_VMA_LOCK */
809  
vma_start_read(struct vm_area_struct * vma)810  static inline bool vma_start_read(struct vm_area_struct *vma)
811  		{ return false; }
vma_end_read(struct vm_area_struct * vma)812  static inline void vma_end_read(struct vm_area_struct *vma) {}
vma_start_write(struct vm_area_struct * vma)813  static inline void vma_start_write(struct vm_area_struct *vma) {}
vma_assert_write_locked(struct vm_area_struct * vma)814  static inline void vma_assert_write_locked(struct vm_area_struct *vma)
815  		{ mmap_assert_write_locked(vma->vm_mm); }
vma_mark_detached(struct vm_area_struct * vma,bool detached)816  static inline void vma_mark_detached(struct vm_area_struct *vma,
817  				     bool detached) {}
818  
lock_vma_under_rcu(struct mm_struct * mm,unsigned long address)819  static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
820  		unsigned long address)
821  {
822  	return NULL;
823  }
824  
vma_assert_locked(struct vm_area_struct * vma)825  static inline void vma_assert_locked(struct vm_area_struct *vma)
826  {
827  	mmap_assert_locked(vma->vm_mm);
828  }
829  
release_fault_lock(struct vm_fault * vmf)830  static inline void release_fault_lock(struct vm_fault *vmf)
831  {
832  	mmap_read_unlock(vmf->vma->vm_mm);
833  }
834  
assert_fault_locked(struct vm_fault * vmf)835  static inline void assert_fault_locked(struct vm_fault *vmf)
836  {
837  	mmap_assert_locked(vmf->vma->vm_mm);
838  }
839  
840  #endif /* CONFIG_PER_VMA_LOCK */
841  
842  extern const struct vm_operations_struct vma_dummy_vm_ops;
843  
844  /*
845   * WARNING: vma_init does not initialize vma->vm_lock.
846   * Use vm_area_alloc()/vm_area_free() if vma needs locking.
847   */
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)848  static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
849  {
850  	memset(vma, 0, sizeof(*vma));
851  	vma->vm_mm = mm;
852  	vma->vm_ops = &vma_dummy_vm_ops;
853  	INIT_LIST_HEAD(&vma->anon_vma_chain);
854  	vma_mark_detached(vma, false);
855  	vma_numab_state_init(vma);
856  }
857  
858  /* Use when VMA is not part of the VMA tree and needs no locking */
vm_flags_init(struct vm_area_struct * vma,vm_flags_t flags)859  static inline void vm_flags_init(struct vm_area_struct *vma,
860  				 vm_flags_t flags)
861  {
862  	ACCESS_PRIVATE(vma, __vm_flags) = flags;
863  }
864  
865  /*
866   * Use when VMA is part of the VMA tree and modifications need coordination
867   * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
868   * it should be locked explicitly beforehand.
869   */
vm_flags_reset(struct vm_area_struct * vma,vm_flags_t flags)870  static inline void vm_flags_reset(struct vm_area_struct *vma,
871  				  vm_flags_t flags)
872  {
873  	vma_assert_write_locked(vma);
874  	vm_flags_init(vma, flags);
875  }
876  
vm_flags_reset_once(struct vm_area_struct * vma,vm_flags_t flags)877  static inline void vm_flags_reset_once(struct vm_area_struct *vma,
878  				       vm_flags_t flags)
879  {
880  	vma_assert_write_locked(vma);
881  	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
882  }
883  
vm_flags_set(struct vm_area_struct * vma,vm_flags_t flags)884  static inline void vm_flags_set(struct vm_area_struct *vma,
885  				vm_flags_t flags)
886  {
887  	vma_start_write(vma);
888  	ACCESS_PRIVATE(vma, __vm_flags) |= flags;
889  }
890  
vm_flags_clear(struct vm_area_struct * vma,vm_flags_t flags)891  static inline void vm_flags_clear(struct vm_area_struct *vma,
892  				  vm_flags_t flags)
893  {
894  	vma_start_write(vma);
895  	ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
896  }
897  
898  /*
899   * Use only if VMA is not part of the VMA tree or has no other users and
900   * therefore needs no locking.
901   */
__vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)902  static inline void __vm_flags_mod(struct vm_area_struct *vma,
903  				  vm_flags_t set, vm_flags_t clear)
904  {
905  	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
906  }
907  
908  /*
909   * Use only when the order of set/clear operations is unimportant, otherwise
910   * use vm_flags_{set|clear} explicitly.
911   */
vm_flags_mod(struct vm_area_struct * vma,vm_flags_t set,vm_flags_t clear)912  static inline void vm_flags_mod(struct vm_area_struct *vma,
913  				vm_flags_t set, vm_flags_t clear)
914  {
915  	vma_start_write(vma);
916  	__vm_flags_mod(vma, set, clear);
917  }
918  
vma_set_anonymous(struct vm_area_struct * vma)919  static inline void vma_set_anonymous(struct vm_area_struct *vma)
920  {
921  	vma->vm_ops = NULL;
922  }
923  
vma_is_anonymous(struct vm_area_struct * vma)924  static inline bool vma_is_anonymous(struct vm_area_struct *vma)
925  {
926  	return !vma->vm_ops;
927  }
928  
929  /*
930   * Indicate if the VMA is a heap for the given task; for
931   * /proc/PID/maps that is the heap of the main task.
932   */
vma_is_initial_heap(const struct vm_area_struct * vma)933  static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
934  {
935  	return vma->vm_start < vma->vm_mm->brk &&
936  		vma->vm_end > vma->vm_mm->start_brk;
937  }
938  
939  /*
940   * Indicate if the VMA is a stack for the given task; for
941   * /proc/PID/maps that is the stack of the main task.
942   */
vma_is_initial_stack(const struct vm_area_struct * vma)943  static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
944  {
945  	/*
946  	 * We make no effort to guess what a given thread considers to be
947  	 * its "stack".  It's not even well-defined for programs written
948  	 * languages like Go.
949  	 */
950  	return vma->vm_start <= vma->vm_mm->start_stack &&
951  		vma->vm_end >= vma->vm_mm->start_stack;
952  }
953  
vma_is_temporary_stack(struct vm_area_struct * vma)954  static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
955  {
956  	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
957  
958  	if (!maybe_stack)
959  		return false;
960  
961  	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
962  						VM_STACK_INCOMPLETE_SETUP)
963  		return true;
964  
965  	return false;
966  }
967  
vma_is_foreign(struct vm_area_struct * vma)968  static inline bool vma_is_foreign(struct vm_area_struct *vma)
969  {
970  	if (!current->mm)
971  		return true;
972  
973  	if (current->mm != vma->vm_mm)
974  		return true;
975  
976  	return false;
977  }
978  
vma_is_accessible(struct vm_area_struct * vma)979  static inline bool vma_is_accessible(struct vm_area_struct *vma)
980  {
981  	return vma->vm_flags & VM_ACCESS_FLAGS;
982  }
983  
is_shared_maywrite(vm_flags_t vm_flags)984  static inline bool is_shared_maywrite(vm_flags_t vm_flags)
985  {
986  	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
987  		(VM_SHARED | VM_MAYWRITE);
988  }
989  
vma_is_shared_maywrite(struct vm_area_struct * vma)990  static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
991  {
992  	return is_shared_maywrite(vma->vm_flags);
993  }
994  
995  static inline
vma_find(struct vma_iterator * vmi,unsigned long max)996  struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
997  {
998  	return mas_find(&vmi->mas, max - 1);
999  }
1000  
vma_next(struct vma_iterator * vmi)1001  static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1002  {
1003  	/*
1004  	 * Uses mas_find() to get the first VMA when the iterator starts.
1005  	 * Calling mas_next() could skip the first entry.
1006  	 */
1007  	return mas_find(&vmi->mas, ULONG_MAX);
1008  }
1009  
1010  static inline
vma_iter_next_range(struct vma_iterator * vmi)1011  struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1012  {
1013  	return mas_next_range(&vmi->mas, ULONG_MAX);
1014  }
1015  
1016  
vma_prev(struct vma_iterator * vmi)1017  static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1018  {
1019  	return mas_prev(&vmi->mas, 0);
1020  }
1021  
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)1022  static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1023  			unsigned long start, unsigned long end, gfp_t gfp)
1024  {
1025  	__mas_set_range(&vmi->mas, start, end - 1);
1026  	mas_store_gfp(&vmi->mas, NULL, gfp);
1027  	if (unlikely(mas_is_err(&vmi->mas)))
1028  		return -ENOMEM;
1029  
1030  	return 0;
1031  }
1032  
1033  /* Free any unused preallocations */
vma_iter_free(struct vma_iterator * vmi)1034  static inline void vma_iter_free(struct vma_iterator *vmi)
1035  {
1036  	mas_destroy(&vmi->mas);
1037  }
1038  
vma_iter_bulk_store(struct vma_iterator * vmi,struct vm_area_struct * vma)1039  static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1040  				      struct vm_area_struct *vma)
1041  {
1042  	vmi->mas.index = vma->vm_start;
1043  	vmi->mas.last = vma->vm_end - 1;
1044  	mas_store(&vmi->mas, vma);
1045  	if (unlikely(mas_is_err(&vmi->mas)))
1046  		return -ENOMEM;
1047  
1048  	return 0;
1049  }
1050  
vma_iter_invalidate(struct vma_iterator * vmi)1051  static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1052  {
1053  	mas_pause(&vmi->mas);
1054  }
1055  
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)1056  static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1057  {
1058  	mas_set(&vmi->mas, addr);
1059  }
1060  
1061  #define for_each_vma(__vmi, __vma)					\
1062  	while (((__vma) = vma_next(&(__vmi))) != NULL)
1063  
1064  /* The MM code likes to work with exclusive end addresses */
1065  #define for_each_vma_range(__vmi, __vma, __end)				\
1066  	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1067  
1068  #ifdef CONFIG_SHMEM
1069  /*
1070   * The vma_is_shmem is not inline because it is used only by slow
1071   * paths in userfault.
1072   */
1073  bool vma_is_shmem(struct vm_area_struct *vma);
1074  bool vma_is_anon_shmem(struct vm_area_struct *vma);
1075  #else
vma_is_shmem(struct vm_area_struct * vma)1076  static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
vma_is_anon_shmem(struct vm_area_struct * vma)1077  static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1078  #endif
1079  
1080  int vma_is_stack_for_current(struct vm_area_struct *vma);
1081  
1082  /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1083  #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1084  
1085  struct mmu_gather;
1086  struct inode;
1087  
1088  /*
1089   * compound_order() can be called without holding a reference, which means
1090   * that niceties like page_folio() don't work.  These callers should be
1091   * prepared to handle wild return values.  For example, PG_head may be
1092   * set before the order is initialised, or this may be a tail page.
1093   * See compaction.c for some good examples.
1094   */
compound_order(struct page * page)1095  static inline unsigned int compound_order(struct page *page)
1096  {
1097  	struct folio *folio = (struct folio *)page;
1098  
1099  	if (!test_bit(PG_head, &folio->flags))
1100  		return 0;
1101  	return folio->_flags_1 & 0xff;
1102  }
1103  
1104  /**
1105   * folio_order - The allocation order of a folio.
1106   * @folio: The folio.
1107   *
1108   * A folio is composed of 2^order pages.  See get_order() for the definition
1109   * of order.
1110   *
1111   * Return: The order of the folio.
1112   */
folio_order(const struct folio * folio)1113  static inline unsigned int folio_order(const struct folio *folio)
1114  {
1115  	if (!folio_test_large(folio))
1116  		return 0;
1117  	return folio->_flags_1 & 0xff;
1118  }
1119  
1120  #include <linux/huge_mm.h>
1121  
1122  /*
1123   * Methods to modify the page usage count.
1124   *
1125   * What counts for a page usage:
1126   * - cache mapping   (page->mapping)
1127   * - private data    (page->private)
1128   * - page mapped in a task's page tables, each mapping
1129   *   is counted separately
1130   *
1131   * Also, many kernel routines increase the page count before a critical
1132   * routine so they can be sure the page doesn't go away from under them.
1133   */
1134  
1135  /*
1136   * Drop a ref, return true if the refcount fell to zero (the page has no users)
1137   */
put_page_testzero(struct page * page)1138  static inline int put_page_testzero(struct page *page)
1139  {
1140  	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1141  	return page_ref_dec_and_test(page);
1142  }
1143  
folio_put_testzero(struct folio * folio)1144  static inline int folio_put_testzero(struct folio *folio)
1145  {
1146  	return put_page_testzero(&folio->page);
1147  }
1148  
1149  /*
1150   * Try to grab a ref unless the page has a refcount of zero, return false if
1151   * that is the case.
1152   * This can be called when MMU is off so it must not access
1153   * any of the virtual mappings.
1154   */
get_page_unless_zero(struct page * page)1155  static inline bool get_page_unless_zero(struct page *page)
1156  {
1157  	return page_ref_add_unless(page, 1, 0);
1158  }
1159  
folio_get_nontail_page(struct page * page)1160  static inline struct folio *folio_get_nontail_page(struct page *page)
1161  {
1162  	if (unlikely(!get_page_unless_zero(page)))
1163  		return NULL;
1164  	return (struct folio *)page;
1165  }
1166  
1167  extern int page_is_ram(unsigned long pfn);
1168  
1169  enum {
1170  	REGION_INTERSECTS,
1171  	REGION_DISJOINT,
1172  	REGION_MIXED,
1173  };
1174  
1175  int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1176  		      unsigned long desc);
1177  
1178  /* Support for virtually mapped pages */
1179  struct page *vmalloc_to_page(const void *addr);
1180  unsigned long vmalloc_to_pfn(const void *addr);
1181  
1182  /*
1183   * Determine if an address is within the vmalloc range
1184   *
1185   * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1186   * is no special casing required.
1187   */
1188  #ifdef CONFIG_MMU
1189  extern bool is_vmalloc_addr(const void *x);
1190  extern int is_vmalloc_or_module_addr(const void *x);
1191  #else
is_vmalloc_addr(const void * x)1192  static inline bool is_vmalloc_addr(const void *x)
1193  {
1194  	return false;
1195  }
is_vmalloc_or_module_addr(const void * x)1196  static inline int is_vmalloc_or_module_addr(const void *x)
1197  {
1198  	return 0;
1199  }
1200  #endif
1201  
1202  /*
1203   * How many times the entire folio is mapped as a single unit (eg by a
1204   * PMD or PUD entry).  This is probably not what you want, except for
1205   * debugging purposes or implementation of other core folio_*() primitives.
1206   */
folio_entire_mapcount(const struct folio * folio)1207  static inline int folio_entire_mapcount(const struct folio *folio)
1208  {
1209  	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1210  	return atomic_read(&folio->_entire_mapcount) + 1;
1211  }
1212  
folio_large_mapcount(const struct folio * folio)1213  static inline int folio_large_mapcount(const struct folio *folio)
1214  {
1215  	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1216  	return atomic_read(&folio->_large_mapcount) + 1;
1217  }
1218  
1219  /**
1220   * folio_mapcount() - Number of mappings of this folio.
1221   * @folio: The folio.
1222   *
1223   * The folio mapcount corresponds to the number of present user page table
1224   * entries that reference any part of a folio. Each such present user page
1225   * table entry must be paired with exactly on folio reference.
1226   *
1227   * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1228   * exactly once.
1229   *
1230   * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1231   * references the entire folio counts exactly once, even when such special
1232   * page table entries are comprised of multiple ordinary page table entries.
1233   *
1234   * Will report 0 for pages which cannot be mapped into userspace, such as
1235   * slab, page tables and similar.
1236   *
1237   * Return: The number of times this folio is mapped.
1238   */
folio_mapcount(const struct folio * folio)1239  static inline int folio_mapcount(const struct folio *folio)
1240  {
1241  	int mapcount;
1242  
1243  	if (likely(!folio_test_large(folio))) {
1244  		mapcount = atomic_read(&folio->_mapcount) + 1;
1245  		if (page_mapcount_is_type(mapcount))
1246  			mapcount = 0;
1247  		return mapcount;
1248  	}
1249  	return folio_large_mapcount(folio);
1250  }
1251  
1252  /**
1253   * folio_mapped - Is this folio mapped into userspace?
1254   * @folio: The folio.
1255   *
1256   * Return: True if any page in this folio is referenced by user page tables.
1257   */
folio_mapped(const struct folio * folio)1258  static inline bool folio_mapped(const struct folio *folio)
1259  {
1260  	return folio_mapcount(folio) >= 1;
1261  }
1262  
1263  /*
1264   * Return true if this page is mapped into pagetables.
1265   * For compound page it returns true if any sub-page of compound page is mapped,
1266   * even if this particular sub-page is not itself mapped by any PTE or PMD.
1267   */
page_mapped(const struct page * page)1268  static inline bool page_mapped(const struct page *page)
1269  {
1270  	return folio_mapped(page_folio(page));
1271  }
1272  
virt_to_head_page(const void * x)1273  static inline struct page *virt_to_head_page(const void *x)
1274  {
1275  	struct page *page = virt_to_page(x);
1276  
1277  	return compound_head(page);
1278  }
1279  
virt_to_folio(const void * x)1280  static inline struct folio *virt_to_folio(const void *x)
1281  {
1282  	struct page *page = virt_to_page(x);
1283  
1284  	return page_folio(page);
1285  }
1286  
1287  void __folio_put(struct folio *folio);
1288  
1289  void put_pages_list(struct list_head *pages);
1290  
1291  void split_page(struct page *page, unsigned int order);
1292  void folio_copy(struct folio *dst, struct folio *src);
1293  int folio_mc_copy(struct folio *dst, struct folio *src);
1294  
1295  unsigned long nr_free_buffer_pages(void);
1296  
1297  /* Returns the number of bytes in this potentially compound page. */
page_size(struct page * page)1298  static inline unsigned long page_size(struct page *page)
1299  {
1300  	return PAGE_SIZE << compound_order(page);
1301  }
1302  
1303  /* Returns the number of bits needed for the number of bytes in a page */
page_shift(struct page * page)1304  static inline unsigned int page_shift(struct page *page)
1305  {
1306  	return PAGE_SHIFT + compound_order(page);
1307  }
1308  
1309  /**
1310   * thp_order - Order of a transparent huge page.
1311   * @page: Head page of a transparent huge page.
1312   */
thp_order(struct page * page)1313  static inline unsigned int thp_order(struct page *page)
1314  {
1315  	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1316  	return compound_order(page);
1317  }
1318  
1319  /**
1320   * thp_size - Size of a transparent huge page.
1321   * @page: Head page of a transparent huge page.
1322   *
1323   * Return: Number of bytes in this page.
1324   */
thp_size(struct page * page)1325  static inline unsigned long thp_size(struct page *page)
1326  {
1327  	return PAGE_SIZE << thp_order(page);
1328  }
1329  
1330  #ifdef CONFIG_MMU
1331  /*
1332   * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1333   * servicing faults for write access.  In the normal case, do always want
1334   * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1335   * that do not have writing enabled, when used by access_process_vm.
1336   */
maybe_mkwrite(pte_t pte,struct vm_area_struct * vma)1337  static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1338  {
1339  	if (likely(vma->vm_flags & VM_WRITE))
1340  		pte = pte_mkwrite(pte, vma);
1341  	return pte;
1342  }
1343  
1344  vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1345  void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1346  		struct page *page, unsigned int nr, unsigned long addr);
1347  
1348  vm_fault_t finish_fault(struct vm_fault *vmf);
1349  #endif
1350  
1351  /*
1352   * Multiple processes may "see" the same page. E.g. for untouched
1353   * mappings of /dev/null, all processes see the same page full of
1354   * zeroes, and text pages of executables and shared libraries have
1355   * only one copy in memory, at most, normally.
1356   *
1357   * For the non-reserved pages, page_count(page) denotes a reference count.
1358   *   page_count() == 0 means the page is free. page->lru is then used for
1359   *   freelist management in the buddy allocator.
1360   *   page_count() > 0  means the page has been allocated.
1361   *
1362   * Pages are allocated by the slab allocator in order to provide memory
1363   * to kmalloc and kmem_cache_alloc. In this case, the management of the
1364   * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1365   * unless a particular usage is carefully commented. (the responsibility of
1366   * freeing the kmalloc memory is the caller's, of course).
1367   *
1368   * A page may be used by anyone else who does a __get_free_page().
1369   * In this case, page_count still tracks the references, and should only
1370   * be used through the normal accessor functions. The top bits of page->flags
1371   * and page->virtual store page management information, but all other fields
1372   * are unused and could be used privately, carefully. The management of this
1373   * page is the responsibility of the one who allocated it, and those who have
1374   * subsequently been given references to it.
1375   *
1376   * The other pages (we may call them "pagecache pages") are completely
1377   * managed by the Linux memory manager: I/O, buffers, swapping etc.
1378   * The following discussion applies only to them.
1379   *
1380   * A pagecache page contains an opaque `private' member, which belongs to the
1381   * page's address_space. Usually, this is the address of a circular list of
1382   * the page's disk buffers. PG_private must be set to tell the VM to call
1383   * into the filesystem to release these pages.
1384   *
1385   * A page may belong to an inode's memory mapping. In this case, page->mapping
1386   * is the pointer to the inode, and page->index is the file offset of the page,
1387   * in units of PAGE_SIZE.
1388   *
1389   * If pagecache pages are not associated with an inode, they are said to be
1390   * anonymous pages. These may become associated with the swapcache, and in that
1391   * case PG_swapcache is set, and page->private is an offset into the swapcache.
1392   *
1393   * In either case (swapcache or inode backed), the pagecache itself holds one
1394   * reference to the page. Setting PG_private should also increment the
1395   * refcount. The each user mapping also has a reference to the page.
1396   *
1397   * The pagecache pages are stored in a per-mapping radix tree, which is
1398   * rooted at mapping->i_pages, and indexed by offset.
1399   * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1400   * lists, we instead now tag pages as dirty/writeback in the radix tree.
1401   *
1402   * All pagecache pages may be subject to I/O:
1403   * - inode pages may need to be read from disk,
1404   * - inode pages which have been modified and are MAP_SHARED may need
1405   *   to be written back to the inode on disk,
1406   * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1407   *   modified may need to be swapped out to swap space and (later) to be read
1408   *   back into memory.
1409   */
1410  
1411  #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1412  DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1413  
1414  bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
put_devmap_managed_folio_refs(struct folio * folio,int refs)1415  static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1416  {
1417  	if (!static_branch_unlikely(&devmap_managed_key))
1418  		return false;
1419  	if (!folio_is_zone_device(folio))
1420  		return false;
1421  	return __put_devmap_managed_folio_refs(folio, refs);
1422  }
1423  #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
put_devmap_managed_folio_refs(struct folio * folio,int refs)1424  static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1425  {
1426  	return false;
1427  }
1428  #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1429  
1430  /* 127: arbitrary random number, small enough to assemble well */
1431  #define folio_ref_zero_or_close_to_overflow(folio) \
1432  	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1433  
1434  /**
1435   * folio_get - Increment the reference count on a folio.
1436   * @folio: The folio.
1437   *
1438   * Context: May be called in any context, as long as you know that
1439   * you have a refcount on the folio.  If you do not already have one,
1440   * folio_try_get() may be the right interface for you to use.
1441   */
folio_get(struct folio * folio)1442  static inline void folio_get(struct folio *folio)
1443  {
1444  	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1445  	folio_ref_inc(folio);
1446  }
1447  
get_page(struct page * page)1448  static inline void get_page(struct page *page)
1449  {
1450  	folio_get(page_folio(page));
1451  }
1452  
try_get_page(struct page * page)1453  static inline __must_check bool try_get_page(struct page *page)
1454  {
1455  	page = compound_head(page);
1456  	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1457  		return false;
1458  	page_ref_inc(page);
1459  	return true;
1460  }
1461  
1462  /**
1463   * folio_put - Decrement the reference count on a folio.
1464   * @folio: The folio.
1465   *
1466   * If the folio's reference count reaches zero, the memory will be
1467   * released back to the page allocator and may be used by another
1468   * allocation immediately.  Do not access the memory or the struct folio
1469   * after calling folio_put() unless you can be sure that it wasn't the
1470   * last reference.
1471   *
1472   * Context: May be called in process or interrupt context, but not in NMI
1473   * context.  May be called while holding a spinlock.
1474   */
folio_put(struct folio * folio)1475  static inline void folio_put(struct folio *folio)
1476  {
1477  	if (folio_put_testzero(folio))
1478  		__folio_put(folio);
1479  }
1480  
1481  /**
1482   * folio_put_refs - Reduce the reference count on a folio.
1483   * @folio: The folio.
1484   * @refs: The amount to subtract from the folio's reference count.
1485   *
1486   * If the folio's reference count reaches zero, the memory will be
1487   * released back to the page allocator and may be used by another
1488   * allocation immediately.  Do not access the memory or the struct folio
1489   * after calling folio_put_refs() unless you can be sure that these weren't
1490   * the last references.
1491   *
1492   * Context: May be called in process or interrupt context, but not in NMI
1493   * context.  May be called while holding a spinlock.
1494   */
folio_put_refs(struct folio * folio,int refs)1495  static inline void folio_put_refs(struct folio *folio, int refs)
1496  {
1497  	if (folio_ref_sub_and_test(folio, refs))
1498  		__folio_put(folio);
1499  }
1500  
1501  void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1502  
1503  /*
1504   * union release_pages_arg - an array of pages or folios
1505   *
1506   * release_pages() releases a simple array of multiple pages, and
1507   * accepts various different forms of said page array: either
1508   * a regular old boring array of pages, an array of folios, or
1509   * an array of encoded page pointers.
1510   *
1511   * The transparent union syntax for this kind of "any of these
1512   * argument types" is all kinds of ugly, so look away.
1513   */
1514  typedef union {
1515  	struct page **pages;
1516  	struct folio **folios;
1517  	struct encoded_page **encoded_pages;
1518  } release_pages_arg __attribute__ ((__transparent_union__));
1519  
1520  void release_pages(release_pages_arg, int nr);
1521  
1522  /**
1523   * folios_put - Decrement the reference count on an array of folios.
1524   * @folios: The folios.
1525   *
1526   * Like folio_put(), but for a batch of folios.  This is more efficient
1527   * than writing the loop yourself as it will optimise the locks which need
1528   * to be taken if the folios are freed.  The folios batch is returned
1529   * empty and ready to be reused for another batch; there is no need to
1530   * reinitialise it.
1531   *
1532   * Context: May be called in process or interrupt context, but not in NMI
1533   * context.  May be called while holding a spinlock.
1534   */
folios_put(struct folio_batch * folios)1535  static inline void folios_put(struct folio_batch *folios)
1536  {
1537  	folios_put_refs(folios, NULL);
1538  }
1539  
put_page(struct page * page)1540  static inline void put_page(struct page *page)
1541  {
1542  	struct folio *folio = page_folio(page);
1543  
1544  	/*
1545  	 * For some devmap managed pages we need to catch refcount transition
1546  	 * from 2 to 1:
1547  	 */
1548  	if (put_devmap_managed_folio_refs(folio, 1))
1549  		return;
1550  	folio_put(folio);
1551  }
1552  
1553  /*
1554   * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1555   * the page's refcount so that two separate items are tracked: the original page
1556   * reference count, and also a new count of how many pin_user_pages() calls were
1557   * made against the page. ("gup-pinned" is another term for the latter).
1558   *
1559   * With this scheme, pin_user_pages() becomes special: such pages are marked as
1560   * distinct from normal pages. As such, the unpin_user_page() call (and its
1561   * variants) must be used in order to release gup-pinned pages.
1562   *
1563   * Choice of value:
1564   *
1565   * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1566   * counts with respect to pin_user_pages() and unpin_user_page() becomes
1567   * simpler, due to the fact that adding an even power of two to the page
1568   * refcount has the effect of using only the upper N bits, for the code that
1569   * counts up using the bias value. This means that the lower bits are left for
1570   * the exclusive use of the original code that increments and decrements by one
1571   * (or at least, by much smaller values than the bias value).
1572   *
1573   * Of course, once the lower bits overflow into the upper bits (and this is
1574   * OK, because subtraction recovers the original values), then visual inspection
1575   * no longer suffices to directly view the separate counts. However, for normal
1576   * applications that don't have huge page reference counts, this won't be an
1577   * issue.
1578   *
1579   * Locking: the lockless algorithm described in folio_try_get_rcu()
1580   * provides safe operation for get_user_pages(), folio_mkclean() and
1581   * other calls that race to set up page table entries.
1582   */
1583  #define GUP_PIN_COUNTING_BIAS (1U << 10)
1584  
1585  void unpin_user_page(struct page *page);
1586  void unpin_folio(struct folio *folio);
1587  void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1588  				 bool make_dirty);
1589  void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1590  				      bool make_dirty);
1591  void unpin_user_pages(struct page **pages, unsigned long npages);
1592  void unpin_user_folio(struct folio *folio, unsigned long npages);
1593  void unpin_folios(struct folio **folios, unsigned long nfolios);
1594  
is_cow_mapping(vm_flags_t flags)1595  static inline bool is_cow_mapping(vm_flags_t flags)
1596  {
1597  	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1598  }
1599  
1600  #ifndef CONFIG_MMU
is_nommu_shared_mapping(vm_flags_t flags)1601  static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1602  {
1603  	/*
1604  	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1605  	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1606  	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1607  	 * underlying memory if ptrace is active, so this is only possible if
1608  	 * ptrace does not apply. Note that there is no mprotect() to upgrade
1609  	 * write permissions later.
1610  	 */
1611  	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1612  }
1613  #endif
1614  
1615  #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1616  #define SECTION_IN_PAGE_FLAGS
1617  #endif
1618  
1619  /*
1620   * The identification function is mainly used by the buddy allocator for
1621   * determining if two pages could be buddies. We are not really identifying
1622   * the zone since we could be using the section number id if we do not have
1623   * node id available in page flags.
1624   * We only guarantee that it will return the same value for two combinable
1625   * pages in a zone.
1626   */
page_zone_id(struct page * page)1627  static inline int page_zone_id(struct page *page)
1628  {
1629  	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1630  }
1631  
1632  #ifdef NODE_NOT_IN_PAGE_FLAGS
1633  int page_to_nid(const struct page *page);
1634  #else
page_to_nid(const struct page * page)1635  static inline int page_to_nid(const struct page *page)
1636  {
1637  	return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
1638  }
1639  #endif
1640  
folio_nid(const struct folio * folio)1641  static inline int folio_nid(const struct folio *folio)
1642  {
1643  	return page_to_nid(&folio->page);
1644  }
1645  
1646  #ifdef CONFIG_NUMA_BALANCING
1647  /* page access time bits needs to hold at least 4 seconds */
1648  #define PAGE_ACCESS_TIME_MIN_BITS	12
1649  #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1650  #define PAGE_ACCESS_TIME_BUCKETS				\
1651  	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1652  #else
1653  #define PAGE_ACCESS_TIME_BUCKETS	0
1654  #endif
1655  
1656  #define PAGE_ACCESS_TIME_MASK				\
1657  	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1658  
cpu_pid_to_cpupid(int cpu,int pid)1659  static inline int cpu_pid_to_cpupid(int cpu, int pid)
1660  {
1661  	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1662  }
1663  
cpupid_to_pid(int cpupid)1664  static inline int cpupid_to_pid(int cpupid)
1665  {
1666  	return cpupid & LAST__PID_MASK;
1667  }
1668  
cpupid_to_cpu(int cpupid)1669  static inline int cpupid_to_cpu(int cpupid)
1670  {
1671  	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1672  }
1673  
cpupid_to_nid(int cpupid)1674  static inline int cpupid_to_nid(int cpupid)
1675  {
1676  	return cpu_to_node(cpupid_to_cpu(cpupid));
1677  }
1678  
cpupid_pid_unset(int cpupid)1679  static inline bool cpupid_pid_unset(int cpupid)
1680  {
1681  	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1682  }
1683  
cpupid_cpu_unset(int cpupid)1684  static inline bool cpupid_cpu_unset(int cpupid)
1685  {
1686  	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1687  }
1688  
__cpupid_match_pid(pid_t task_pid,int cpupid)1689  static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1690  {
1691  	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1692  }
1693  
1694  #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1695  #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
folio_xchg_last_cpupid(struct folio * folio,int cpupid)1696  static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1697  {
1698  	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1699  }
1700  
folio_last_cpupid(struct folio * folio)1701  static inline int folio_last_cpupid(struct folio *folio)
1702  {
1703  	return folio->_last_cpupid;
1704  }
page_cpupid_reset_last(struct page * page)1705  static inline void page_cpupid_reset_last(struct page *page)
1706  {
1707  	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1708  }
1709  #else
folio_last_cpupid(struct folio * folio)1710  static inline int folio_last_cpupid(struct folio *folio)
1711  {
1712  	return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1713  }
1714  
1715  int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1716  
page_cpupid_reset_last(struct page * page)1717  static inline void page_cpupid_reset_last(struct page *page)
1718  {
1719  	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1720  }
1721  #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1722  
folio_xchg_access_time(struct folio * folio,int time)1723  static inline int folio_xchg_access_time(struct folio *folio, int time)
1724  {
1725  	int last_time;
1726  
1727  	last_time = folio_xchg_last_cpupid(folio,
1728  					   time >> PAGE_ACCESS_TIME_BUCKETS);
1729  	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1730  }
1731  
vma_set_access_pid_bit(struct vm_area_struct * vma)1732  static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1733  {
1734  	unsigned int pid_bit;
1735  
1736  	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1737  	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1738  		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1739  	}
1740  }
1741  
1742  bool folio_use_access_time(struct folio *folio);
1743  #else /* !CONFIG_NUMA_BALANCING */
folio_xchg_last_cpupid(struct folio * folio,int cpupid)1744  static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1745  {
1746  	return folio_nid(folio); /* XXX */
1747  }
1748  
folio_xchg_access_time(struct folio * folio,int time)1749  static inline int folio_xchg_access_time(struct folio *folio, int time)
1750  {
1751  	return 0;
1752  }
1753  
folio_last_cpupid(struct folio * folio)1754  static inline int folio_last_cpupid(struct folio *folio)
1755  {
1756  	return folio_nid(folio); /* XXX */
1757  }
1758  
cpupid_to_nid(int cpupid)1759  static inline int cpupid_to_nid(int cpupid)
1760  {
1761  	return -1;
1762  }
1763  
cpupid_to_pid(int cpupid)1764  static inline int cpupid_to_pid(int cpupid)
1765  {
1766  	return -1;
1767  }
1768  
cpupid_to_cpu(int cpupid)1769  static inline int cpupid_to_cpu(int cpupid)
1770  {
1771  	return -1;
1772  }
1773  
cpu_pid_to_cpupid(int nid,int pid)1774  static inline int cpu_pid_to_cpupid(int nid, int pid)
1775  {
1776  	return -1;
1777  }
1778  
cpupid_pid_unset(int cpupid)1779  static inline bool cpupid_pid_unset(int cpupid)
1780  {
1781  	return true;
1782  }
1783  
page_cpupid_reset_last(struct page * page)1784  static inline void page_cpupid_reset_last(struct page *page)
1785  {
1786  }
1787  
cpupid_match_pid(struct task_struct * task,int cpupid)1788  static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1789  {
1790  	return false;
1791  }
1792  
vma_set_access_pid_bit(struct vm_area_struct * vma)1793  static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1794  {
1795  }
folio_use_access_time(struct folio * folio)1796  static inline bool folio_use_access_time(struct folio *folio)
1797  {
1798  	return false;
1799  }
1800  #endif /* CONFIG_NUMA_BALANCING */
1801  
1802  #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1803  
1804  /*
1805   * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1806   * setting tags for all pages to native kernel tag value 0xff, as the default
1807   * value 0x00 maps to 0xff.
1808   */
1809  
page_kasan_tag(const struct page * page)1810  static inline u8 page_kasan_tag(const struct page *page)
1811  {
1812  	u8 tag = KASAN_TAG_KERNEL;
1813  
1814  	if (kasan_enabled()) {
1815  		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1816  		tag ^= 0xff;
1817  	}
1818  
1819  	return tag;
1820  }
1821  
page_kasan_tag_set(struct page * page,u8 tag)1822  static inline void page_kasan_tag_set(struct page *page, u8 tag)
1823  {
1824  	unsigned long old_flags, flags;
1825  
1826  	if (!kasan_enabled())
1827  		return;
1828  
1829  	tag ^= 0xff;
1830  	old_flags = READ_ONCE(page->flags);
1831  	do {
1832  		flags = old_flags;
1833  		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1834  		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1835  	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1836  }
1837  
page_kasan_tag_reset(struct page * page)1838  static inline void page_kasan_tag_reset(struct page *page)
1839  {
1840  	if (kasan_enabled())
1841  		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1842  }
1843  
1844  #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1845  
page_kasan_tag(const struct page * page)1846  static inline u8 page_kasan_tag(const struct page *page)
1847  {
1848  	return 0xff;
1849  }
1850  
page_kasan_tag_set(struct page * page,u8 tag)1851  static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
page_kasan_tag_reset(struct page * page)1852  static inline void page_kasan_tag_reset(struct page *page) { }
1853  
1854  #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1855  
page_zone(const struct page * page)1856  static inline struct zone *page_zone(const struct page *page)
1857  {
1858  	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1859  }
1860  
page_pgdat(const struct page * page)1861  static inline pg_data_t *page_pgdat(const struct page *page)
1862  {
1863  	return NODE_DATA(page_to_nid(page));
1864  }
1865  
folio_zone(const struct folio * folio)1866  static inline struct zone *folio_zone(const struct folio *folio)
1867  {
1868  	return page_zone(&folio->page);
1869  }
1870  
folio_pgdat(const struct folio * folio)1871  static inline pg_data_t *folio_pgdat(const struct folio *folio)
1872  {
1873  	return page_pgdat(&folio->page);
1874  }
1875  
1876  #ifdef SECTION_IN_PAGE_FLAGS
set_page_section(struct page * page,unsigned long section)1877  static inline void set_page_section(struct page *page, unsigned long section)
1878  {
1879  	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1880  	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1881  }
1882  
page_to_section(const struct page * page)1883  static inline unsigned long page_to_section(const struct page *page)
1884  {
1885  	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1886  }
1887  #endif
1888  
1889  /**
1890   * folio_pfn - Return the Page Frame Number of a folio.
1891   * @folio: The folio.
1892   *
1893   * A folio may contain multiple pages.  The pages have consecutive
1894   * Page Frame Numbers.
1895   *
1896   * Return: The Page Frame Number of the first page in the folio.
1897   */
folio_pfn(struct folio * folio)1898  static inline unsigned long folio_pfn(struct folio *folio)
1899  {
1900  	return page_to_pfn(&folio->page);
1901  }
1902  
pfn_folio(unsigned long pfn)1903  static inline struct folio *pfn_folio(unsigned long pfn)
1904  {
1905  	return page_folio(pfn_to_page(pfn));
1906  }
1907  
1908  /**
1909   * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
1910   * @folio: The folio.
1911   *
1912   * This function checks if a folio has been pinned via a call to
1913   * a function in the pin_user_pages() family.
1914   *
1915   * For small folios, the return value is partially fuzzy: false is not fuzzy,
1916   * because it means "definitely not pinned for DMA", but true means "probably
1917   * pinned for DMA, but possibly a false positive due to having at least
1918   * GUP_PIN_COUNTING_BIAS worth of normal folio references".
1919   *
1920   * False positives are OK, because: a) it's unlikely for a folio to
1921   * get that many refcounts, and b) all the callers of this routine are
1922   * expected to be able to deal gracefully with a false positive.
1923   *
1924   * For large folios, the result will be exactly correct. That's because
1925   * we have more tracking data available: the _pincount field is used
1926   * instead of the GUP_PIN_COUNTING_BIAS scheme.
1927   *
1928   * For more information, please see Documentation/core-api/pin_user_pages.rst.
1929   *
1930   * Return: True, if it is likely that the folio has been "dma-pinned".
1931   * False, if the folio is definitely not dma-pinned.
1932   */
folio_maybe_dma_pinned(struct folio * folio)1933  static inline bool folio_maybe_dma_pinned(struct folio *folio)
1934  {
1935  	if (folio_test_large(folio))
1936  		return atomic_read(&folio->_pincount) > 0;
1937  
1938  	/*
1939  	 * folio_ref_count() is signed. If that refcount overflows, then
1940  	 * folio_ref_count() returns a negative value, and callers will avoid
1941  	 * further incrementing the refcount.
1942  	 *
1943  	 * Here, for that overflow case, use the sign bit to count a little
1944  	 * bit higher via unsigned math, and thus still get an accurate result.
1945  	 */
1946  	return ((unsigned int)folio_ref_count(folio)) >=
1947  		GUP_PIN_COUNTING_BIAS;
1948  }
1949  
1950  /*
1951   * This should most likely only be called during fork() to see whether we
1952   * should break the cow immediately for an anon page on the src mm.
1953   *
1954   * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
1955   */
folio_needs_cow_for_dma(struct vm_area_struct * vma,struct folio * folio)1956  static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
1957  					  struct folio *folio)
1958  {
1959  	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1960  
1961  	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1962  		return false;
1963  
1964  	return folio_maybe_dma_pinned(folio);
1965  }
1966  
1967  /**
1968   * is_zero_page - Query if a page is a zero page
1969   * @page: The page to query
1970   *
1971   * This returns true if @page is one of the permanent zero pages.
1972   */
is_zero_page(const struct page * page)1973  static inline bool is_zero_page(const struct page *page)
1974  {
1975  	return is_zero_pfn(page_to_pfn(page));
1976  }
1977  
1978  /**
1979   * is_zero_folio - Query if a folio is a zero page
1980   * @folio: The folio to query
1981   *
1982   * This returns true if @folio is one of the permanent zero pages.
1983   */
is_zero_folio(const struct folio * folio)1984  static inline bool is_zero_folio(const struct folio *folio)
1985  {
1986  	return is_zero_page(&folio->page);
1987  }
1988  
1989  /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
1990  #ifdef CONFIG_MIGRATION
folio_is_longterm_pinnable(struct folio * folio)1991  static inline bool folio_is_longterm_pinnable(struct folio *folio)
1992  {
1993  #ifdef CONFIG_CMA
1994  	int mt = folio_migratetype(folio);
1995  
1996  	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
1997  		return false;
1998  #endif
1999  	/* The zero page can be "pinned" but gets special handling. */
2000  	if (is_zero_folio(folio))
2001  		return true;
2002  
2003  	/* Coherent device memory must always allow eviction. */
2004  	if (folio_is_device_coherent(folio))
2005  		return false;
2006  
2007  	/* Otherwise, non-movable zone folios can be pinned. */
2008  	return !folio_is_zone_movable(folio);
2009  
2010  }
2011  #else
folio_is_longterm_pinnable(struct folio * folio)2012  static inline bool folio_is_longterm_pinnable(struct folio *folio)
2013  {
2014  	return true;
2015  }
2016  #endif
2017  
set_page_zone(struct page * page,enum zone_type zone)2018  static inline void set_page_zone(struct page *page, enum zone_type zone)
2019  {
2020  	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2021  	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2022  }
2023  
set_page_node(struct page * page,unsigned long node)2024  static inline void set_page_node(struct page *page, unsigned long node)
2025  {
2026  	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2027  	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2028  }
2029  
set_page_links(struct page * page,enum zone_type zone,unsigned long node,unsigned long pfn)2030  static inline void set_page_links(struct page *page, enum zone_type zone,
2031  	unsigned long node, unsigned long pfn)
2032  {
2033  	set_page_zone(page, zone);
2034  	set_page_node(page, node);
2035  #ifdef SECTION_IN_PAGE_FLAGS
2036  	set_page_section(page, pfn_to_section_nr(pfn));
2037  #endif
2038  }
2039  
2040  /**
2041   * folio_nr_pages - The number of pages in the folio.
2042   * @folio: The folio.
2043   *
2044   * Return: A positive power of two.
2045   */
folio_nr_pages(const struct folio * folio)2046  static inline long folio_nr_pages(const struct folio *folio)
2047  {
2048  	if (!folio_test_large(folio))
2049  		return 1;
2050  #ifdef CONFIG_64BIT
2051  	return folio->_folio_nr_pages;
2052  #else
2053  	return 1L << (folio->_flags_1 & 0xff);
2054  #endif
2055  }
2056  
2057  /* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2058  #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2059  #define MAX_FOLIO_NR_PAGES	(1UL << PUD_ORDER)
2060  #else
2061  #define MAX_FOLIO_NR_PAGES	MAX_ORDER_NR_PAGES
2062  #endif
2063  
2064  /*
2065   * compound_nr() returns the number of pages in this potentially compound
2066   * page.  compound_nr() can be called on a tail page, and is defined to
2067   * return 1 in that case.
2068   */
compound_nr(struct page * page)2069  static inline unsigned long compound_nr(struct page *page)
2070  {
2071  	struct folio *folio = (struct folio *)page;
2072  
2073  	if (!test_bit(PG_head, &folio->flags))
2074  		return 1;
2075  #ifdef CONFIG_64BIT
2076  	return folio->_folio_nr_pages;
2077  #else
2078  	return 1L << (folio->_flags_1 & 0xff);
2079  #endif
2080  }
2081  
2082  /**
2083   * thp_nr_pages - The number of regular pages in this huge page.
2084   * @page: The head page of a huge page.
2085   */
thp_nr_pages(struct page * page)2086  static inline int thp_nr_pages(struct page *page)
2087  {
2088  	return folio_nr_pages((struct folio *)page);
2089  }
2090  
2091  /**
2092   * folio_next - Move to the next physical folio.
2093   * @folio: The folio we're currently operating on.
2094   *
2095   * If you have physically contiguous memory which may span more than
2096   * one folio (eg a &struct bio_vec), use this function to move from one
2097   * folio to the next.  Do not use it if the memory is only virtually
2098   * contiguous as the folios are almost certainly not adjacent to each
2099   * other.  This is the folio equivalent to writing ``page++``.
2100   *
2101   * Context: We assume that the folios are refcounted and/or locked at a
2102   * higher level and do not adjust the reference counts.
2103   * Return: The next struct folio.
2104   */
folio_next(struct folio * folio)2105  static inline struct folio *folio_next(struct folio *folio)
2106  {
2107  	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2108  }
2109  
2110  /**
2111   * folio_shift - The size of the memory described by this folio.
2112   * @folio: The folio.
2113   *
2114   * A folio represents a number of bytes which is a power-of-two in size.
2115   * This function tells you which power-of-two the folio is.  See also
2116   * folio_size() and folio_order().
2117   *
2118   * Context: The caller should have a reference on the folio to prevent
2119   * it from being split.  It is not necessary for the folio to be locked.
2120   * Return: The base-2 logarithm of the size of this folio.
2121   */
folio_shift(const struct folio * folio)2122  static inline unsigned int folio_shift(const struct folio *folio)
2123  {
2124  	return PAGE_SHIFT + folio_order(folio);
2125  }
2126  
2127  /**
2128   * folio_size - The number of bytes in a folio.
2129   * @folio: The folio.
2130   *
2131   * Context: The caller should have a reference on the folio to prevent
2132   * it from being split.  It is not necessary for the folio to be locked.
2133   * Return: The number of bytes in this folio.
2134   */
folio_size(const struct folio * folio)2135  static inline size_t folio_size(const struct folio *folio)
2136  {
2137  	return PAGE_SIZE << folio_order(folio);
2138  }
2139  
2140  /**
2141   * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
2142   *				tables of more than one MM
2143   * @folio: The folio.
2144   *
2145   * This function checks if the folio is currently mapped into more than one
2146   * MM ("mapped shared"), or if the folio is only mapped into a single MM
2147   * ("mapped exclusively").
2148   *
2149   * For KSM folios, this function also returns "mapped shared" when a folio is
2150   * mapped multiple times into the same MM, because the individual page mappings
2151   * are independent.
2152   *
2153   * As precise information is not easily available for all folios, this function
2154   * estimates the number of MMs ("sharers") that are currently mapping a folio
2155   * using the number of times the first page of the folio is currently mapped
2156   * into page tables.
2157   *
2158   * For small anonymous folios and anonymous hugetlb folios, the return
2159   * value will be exactly correct: non-KSM folios can only be mapped at most once
2160   * into an MM, and they cannot be partially mapped. KSM folios are
2161   * considered shared even if mapped multiple times into the same MM.
2162   *
2163   * For other folios, the result can be fuzzy:
2164   *    #. For partially-mappable large folios (THP), the return value can wrongly
2165   *       indicate "mapped exclusively" (false negative) when the folio is
2166   *       only partially mapped into at least one MM.
2167   *    #. For pagecache folios (including hugetlb), the return value can wrongly
2168   *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2169   *       cover the same file range.
2170   *
2171   * Further, this function only considers current page table mappings that
2172   * are tracked using the folio mapcount(s).
2173   *
2174   * This function does not consider:
2175   *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2176   *       pagecache, temporary unmapping for migration).
2177   *    #. If the folio is mapped differently (VM_PFNMAP).
2178   *    #. If hugetlb page table sharing applies. Callers might want to check
2179   *       hugetlb_pmd_shared().
2180   *
2181   * Return: Whether the folio is estimated to be mapped into more than one MM.
2182   */
folio_likely_mapped_shared(struct folio * folio)2183  static inline bool folio_likely_mapped_shared(struct folio *folio)
2184  {
2185  	int mapcount = folio_mapcount(folio);
2186  
2187  	/* Only partially-mappable folios require more care. */
2188  	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2189  		return mapcount > 1;
2190  
2191  	/* A single mapping implies "mapped exclusively". */
2192  	if (mapcount <= 1)
2193  		return false;
2194  
2195  	/* If any page is mapped more than once we treat it "mapped shared". */
2196  	if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
2197  		return true;
2198  
2199  	/* Let's guess based on the first subpage. */
2200  	return atomic_read(&folio->_mapcount) > 0;
2201  }
2202  
2203  #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
arch_make_folio_accessible(struct folio * folio)2204  static inline int arch_make_folio_accessible(struct folio *folio)
2205  {
2206  	return 0;
2207  }
2208  #endif
2209  
2210  /*
2211   * Some inline functions in vmstat.h depend on page_zone()
2212   */
2213  #include <linux/vmstat.h>
2214  
2215  #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2216  #define HASHED_PAGE_VIRTUAL
2217  #endif
2218  
2219  #if defined(WANT_PAGE_VIRTUAL)
page_address(const struct page * page)2220  static inline void *page_address(const struct page *page)
2221  {
2222  	return page->virtual;
2223  }
set_page_address(struct page * page,void * address)2224  static inline void set_page_address(struct page *page, void *address)
2225  {
2226  	page->virtual = address;
2227  }
2228  #define page_address_init()  do { } while(0)
2229  #endif
2230  
2231  #if defined(HASHED_PAGE_VIRTUAL)
2232  void *page_address(const struct page *page);
2233  void set_page_address(struct page *page, void *virtual);
2234  void page_address_init(void);
2235  #endif
2236  
lowmem_page_address(const struct page * page)2237  static __always_inline void *lowmem_page_address(const struct page *page)
2238  {
2239  	return page_to_virt(page);
2240  }
2241  
2242  #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2243  #define page_address(page) lowmem_page_address(page)
2244  #define set_page_address(page, address)  do { } while(0)
2245  #define page_address_init()  do { } while(0)
2246  #endif
2247  
folio_address(const struct folio * folio)2248  static inline void *folio_address(const struct folio *folio)
2249  {
2250  	return page_address(&folio->page);
2251  }
2252  
2253  /*
2254   * Return true only if the page has been allocated with
2255   * ALLOC_NO_WATERMARKS and the low watermark was not
2256   * met implying that the system is under some pressure.
2257   */
page_is_pfmemalloc(const struct page * page)2258  static inline bool page_is_pfmemalloc(const struct page *page)
2259  {
2260  	/*
2261  	 * lru.next has bit 1 set if the page is allocated from the
2262  	 * pfmemalloc reserves.  Callers may simply overwrite it if
2263  	 * they do not need to preserve that information.
2264  	 */
2265  	return (uintptr_t)page->lru.next & BIT(1);
2266  }
2267  
2268  /*
2269   * Return true only if the folio has been allocated with
2270   * ALLOC_NO_WATERMARKS and the low watermark was not
2271   * met implying that the system is under some pressure.
2272   */
folio_is_pfmemalloc(const struct folio * folio)2273  static inline bool folio_is_pfmemalloc(const struct folio *folio)
2274  {
2275  	/*
2276  	 * lru.next has bit 1 set if the page is allocated from the
2277  	 * pfmemalloc reserves.  Callers may simply overwrite it if
2278  	 * they do not need to preserve that information.
2279  	 */
2280  	return (uintptr_t)folio->lru.next & BIT(1);
2281  }
2282  
2283  /*
2284   * Only to be called by the page allocator on a freshly allocated
2285   * page.
2286   */
set_page_pfmemalloc(struct page * page)2287  static inline void set_page_pfmemalloc(struct page *page)
2288  {
2289  	page->lru.next = (void *)BIT(1);
2290  }
2291  
clear_page_pfmemalloc(struct page * page)2292  static inline void clear_page_pfmemalloc(struct page *page)
2293  {
2294  	page->lru.next = NULL;
2295  }
2296  
2297  /*
2298   * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2299   */
2300  extern void pagefault_out_of_memory(void);
2301  
2302  #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
2303  #define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
2304  #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2305  
2306  /*
2307   * Parameter block passed down to zap_pte_range in exceptional cases.
2308   */
2309  struct zap_details {
2310  	struct folio *single_folio;	/* Locked folio to be unmapped */
2311  	bool even_cows;			/* Zap COWed private pages too? */
2312  	zap_flags_t zap_flags;		/* Extra flags for zapping */
2313  };
2314  
2315  /*
2316   * Whether to drop the pte markers, for example, the uffd-wp information for
2317   * file-backed memory.  This should only be specified when we will completely
2318   * drop the page in the mm, either by truncation or unmapping of the vma.  By
2319   * default, the flag is not set.
2320   */
2321  #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
2322  /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
2323  #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
2324  
2325  #ifdef CONFIG_SCHED_MM_CID
2326  void sched_mm_cid_before_execve(struct task_struct *t);
2327  void sched_mm_cid_after_execve(struct task_struct *t);
2328  void sched_mm_cid_fork(struct task_struct *t);
2329  void sched_mm_cid_exit_signals(struct task_struct *t);
task_mm_cid(struct task_struct * t)2330  static inline int task_mm_cid(struct task_struct *t)
2331  {
2332  	return t->mm_cid;
2333  }
2334  #else
sched_mm_cid_before_execve(struct task_struct * t)2335  static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
sched_mm_cid_after_execve(struct task_struct * t)2336  static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
sched_mm_cid_fork(struct task_struct * t)2337  static inline void sched_mm_cid_fork(struct task_struct *t) { }
sched_mm_cid_exit_signals(struct task_struct * t)2338  static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
task_mm_cid(struct task_struct * t)2339  static inline int task_mm_cid(struct task_struct *t)
2340  {
2341  	/*
2342  	 * Use the processor id as a fall-back when the mm cid feature is
2343  	 * disabled. This provides functional per-cpu data structure accesses
2344  	 * in user-space, althrough it won't provide the memory usage benefits.
2345  	 */
2346  	return raw_smp_processor_id();
2347  }
2348  #endif
2349  
2350  #ifdef CONFIG_MMU
2351  extern bool can_do_mlock(void);
2352  #else
can_do_mlock(void)2353  static inline bool can_do_mlock(void) { return false; }
2354  #endif
2355  extern int user_shm_lock(size_t, struct ucounts *);
2356  extern void user_shm_unlock(size_t, struct ucounts *);
2357  
2358  struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2359  			     pte_t pte);
2360  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2361  			     pte_t pte);
2362  struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2363  				  unsigned long addr, pmd_t pmd);
2364  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2365  				pmd_t pmd);
2366  
2367  void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2368  		  unsigned long size);
2369  void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2370  			   unsigned long size, struct zap_details *details);
zap_vma_pages(struct vm_area_struct * vma)2371  static inline void zap_vma_pages(struct vm_area_struct *vma)
2372  {
2373  	zap_page_range_single(vma, vma->vm_start,
2374  			      vma->vm_end - vma->vm_start, NULL);
2375  }
2376  void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2377  		struct vm_area_struct *start_vma, unsigned long start,
2378  		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2379  
2380  struct mmu_notifier_range;
2381  
2382  void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2383  		unsigned long end, unsigned long floor, unsigned long ceiling);
2384  int
2385  copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2386  int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2387  			void *buf, int len, int write);
2388  
2389  struct follow_pfnmap_args {
2390  	/**
2391  	 * Inputs:
2392  	 * @vma: Pointer to @vm_area_struct struct
2393  	 * @address: the virtual address to walk
2394  	 */
2395  	struct vm_area_struct *vma;
2396  	unsigned long address;
2397  	/**
2398  	 * Internals:
2399  	 *
2400  	 * The caller shouldn't touch any of these.
2401  	 */
2402  	spinlock_t *lock;
2403  	pte_t *ptep;
2404  	/**
2405  	 * Outputs:
2406  	 *
2407  	 * @pfn: the PFN of the address
2408  	 * @pgprot: the pgprot_t of the mapping
2409  	 * @writable: whether the mapping is writable
2410  	 * @special: whether the mapping is a special mapping (real PFN maps)
2411  	 */
2412  	unsigned long pfn;
2413  	pgprot_t pgprot;
2414  	bool writable;
2415  	bool special;
2416  };
2417  int follow_pfnmap_start(struct follow_pfnmap_args *args);
2418  void follow_pfnmap_end(struct follow_pfnmap_args *args);
2419  
2420  extern void truncate_pagecache(struct inode *inode, loff_t new);
2421  extern void truncate_setsize(struct inode *inode, loff_t newsize);
2422  void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2423  void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2424  int generic_error_remove_folio(struct address_space *mapping,
2425  		struct folio *folio);
2426  
2427  struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2428  		unsigned long address, struct pt_regs *regs);
2429  
2430  #ifdef CONFIG_MMU
2431  extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2432  				  unsigned long address, unsigned int flags,
2433  				  struct pt_regs *regs);
2434  extern int fixup_user_fault(struct mm_struct *mm,
2435  			    unsigned long address, unsigned int fault_flags,
2436  			    bool *unlocked);
2437  void unmap_mapping_pages(struct address_space *mapping,
2438  		pgoff_t start, pgoff_t nr, bool even_cows);
2439  void unmap_mapping_range(struct address_space *mapping,
2440  		loff_t const holebegin, loff_t const holelen, int even_cows);
2441  #else
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)2442  static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2443  					 unsigned long address, unsigned int flags,
2444  					 struct pt_regs *regs)
2445  {
2446  	/* should never happen if there's no MMU */
2447  	BUG();
2448  	return VM_FAULT_SIGBUS;
2449  }
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)2450  static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2451  		unsigned int fault_flags, bool *unlocked)
2452  {
2453  	/* should never happen if there's no MMU */
2454  	BUG();
2455  	return -EFAULT;
2456  }
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)2457  static inline void unmap_mapping_pages(struct address_space *mapping,
2458  		pgoff_t start, pgoff_t nr, bool even_cows) { }
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)2459  static inline void unmap_mapping_range(struct address_space *mapping,
2460  		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2461  #endif
2462  
unmap_shared_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen)2463  static inline void unmap_shared_mapping_range(struct address_space *mapping,
2464  		loff_t const holebegin, loff_t const holelen)
2465  {
2466  	unmap_mapping_range(mapping, holebegin, holelen, 0);
2467  }
2468  
2469  static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2470  						unsigned long addr);
2471  
2472  extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2473  		void *buf, int len, unsigned int gup_flags);
2474  extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2475  		void *buf, int len, unsigned int gup_flags);
2476  
2477  long get_user_pages_remote(struct mm_struct *mm,
2478  			   unsigned long start, unsigned long nr_pages,
2479  			   unsigned int gup_flags, struct page **pages,
2480  			   int *locked);
2481  long pin_user_pages_remote(struct mm_struct *mm,
2482  			   unsigned long start, unsigned long nr_pages,
2483  			   unsigned int gup_flags, struct page **pages,
2484  			   int *locked);
2485  
2486  /*
2487   * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2488   */
get_user_page_vma_remote(struct mm_struct * mm,unsigned long addr,int gup_flags,struct vm_area_struct ** vmap)2489  static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2490  						    unsigned long addr,
2491  						    int gup_flags,
2492  						    struct vm_area_struct **vmap)
2493  {
2494  	struct page *page;
2495  	struct vm_area_struct *vma;
2496  	int got;
2497  
2498  	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2499  		return ERR_PTR(-EINVAL);
2500  
2501  	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2502  
2503  	if (got < 0)
2504  		return ERR_PTR(got);
2505  
2506  	vma = vma_lookup(mm, addr);
2507  	if (WARN_ON_ONCE(!vma)) {
2508  		put_page(page);
2509  		return ERR_PTR(-EINVAL);
2510  	}
2511  
2512  	*vmap = vma;
2513  	return page;
2514  }
2515  
2516  long get_user_pages(unsigned long start, unsigned long nr_pages,
2517  		    unsigned int gup_flags, struct page **pages);
2518  long pin_user_pages(unsigned long start, unsigned long nr_pages,
2519  		    unsigned int gup_flags, struct page **pages);
2520  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2521  		    struct page **pages, unsigned int gup_flags);
2522  long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2523  		    struct page **pages, unsigned int gup_flags);
2524  long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
2525  		      struct folio **folios, unsigned int max_folios,
2526  		      pgoff_t *offset);
2527  
2528  int get_user_pages_fast(unsigned long start, int nr_pages,
2529  			unsigned int gup_flags, struct page **pages);
2530  int pin_user_pages_fast(unsigned long start, int nr_pages,
2531  			unsigned int gup_flags, struct page **pages);
2532  void folio_add_pin(struct folio *folio);
2533  
2534  int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2535  int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2536  			struct task_struct *task, bool bypass_rlim);
2537  
2538  struct kvec;
2539  struct page *get_dump_page(unsigned long addr);
2540  
2541  bool folio_mark_dirty(struct folio *folio);
2542  bool set_page_dirty(struct page *page);
2543  int set_page_dirty_lock(struct page *page);
2544  
2545  int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2546  
2547  /*
2548   * Flags used by change_protection().  For now we make it a bitmap so
2549   * that we can pass in multiple flags just like parameters.  However
2550   * for now all the callers are only use one of the flags at the same
2551   * time.
2552   */
2553  /*
2554   * Whether we should manually check if we can map individual PTEs writable,
2555   * because something (e.g., COW, uffd-wp) blocks that from happening for all
2556   * PTEs automatically in a writable mapping.
2557   */
2558  #define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2559  /* Whether this protection change is for NUMA hints */
2560  #define  MM_CP_PROT_NUMA                   (1UL << 1)
2561  /* Whether this change is for write protecting */
2562  #define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2563  #define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2564  #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2565  					    MM_CP_UFFD_WP_RESOLVE)
2566  
2567  bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2568  			     pte_t pte);
2569  extern long change_protection(struct mmu_gather *tlb,
2570  			      struct vm_area_struct *vma, unsigned long start,
2571  			      unsigned long end, unsigned long cp_flags);
2572  extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2573  	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
2574  	  unsigned long start, unsigned long end, unsigned long newflags);
2575  
2576  /*
2577   * doesn't attempt to fault and will return short.
2578   */
2579  int get_user_pages_fast_only(unsigned long start, int nr_pages,
2580  			     unsigned int gup_flags, struct page **pages);
2581  
get_user_page_fast_only(unsigned long addr,unsigned int gup_flags,struct page ** pagep)2582  static inline bool get_user_page_fast_only(unsigned long addr,
2583  			unsigned int gup_flags, struct page **pagep)
2584  {
2585  	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2586  }
2587  /*
2588   * per-process(per-mm_struct) statistics.
2589   */
get_mm_counter(struct mm_struct * mm,int member)2590  static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2591  {
2592  	return percpu_counter_read_positive(&mm->rss_stat[member]);
2593  }
2594  
2595  void mm_trace_rss_stat(struct mm_struct *mm, int member);
2596  
add_mm_counter(struct mm_struct * mm,int member,long value)2597  static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2598  {
2599  	percpu_counter_add(&mm->rss_stat[member], value);
2600  
2601  	mm_trace_rss_stat(mm, member);
2602  }
2603  
inc_mm_counter(struct mm_struct * mm,int member)2604  static inline void inc_mm_counter(struct mm_struct *mm, int member)
2605  {
2606  	percpu_counter_inc(&mm->rss_stat[member]);
2607  
2608  	mm_trace_rss_stat(mm, member);
2609  }
2610  
dec_mm_counter(struct mm_struct * mm,int member)2611  static inline void dec_mm_counter(struct mm_struct *mm, int member)
2612  {
2613  	percpu_counter_dec(&mm->rss_stat[member]);
2614  
2615  	mm_trace_rss_stat(mm, member);
2616  }
2617  
2618  /* Optimized variant when folio is already known not to be anon */
mm_counter_file(struct folio * folio)2619  static inline int mm_counter_file(struct folio *folio)
2620  {
2621  	if (folio_test_swapbacked(folio))
2622  		return MM_SHMEMPAGES;
2623  	return MM_FILEPAGES;
2624  }
2625  
mm_counter(struct folio * folio)2626  static inline int mm_counter(struct folio *folio)
2627  {
2628  	if (folio_test_anon(folio))
2629  		return MM_ANONPAGES;
2630  	return mm_counter_file(folio);
2631  }
2632  
get_mm_rss(struct mm_struct * mm)2633  static inline unsigned long get_mm_rss(struct mm_struct *mm)
2634  {
2635  	return get_mm_counter(mm, MM_FILEPAGES) +
2636  		get_mm_counter(mm, MM_ANONPAGES) +
2637  		get_mm_counter(mm, MM_SHMEMPAGES);
2638  }
2639  
get_mm_hiwater_rss(struct mm_struct * mm)2640  static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2641  {
2642  	return max(mm->hiwater_rss, get_mm_rss(mm));
2643  }
2644  
get_mm_hiwater_vm(struct mm_struct * mm)2645  static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2646  {
2647  	return max(mm->hiwater_vm, mm->total_vm);
2648  }
2649  
update_hiwater_rss(struct mm_struct * mm)2650  static inline void update_hiwater_rss(struct mm_struct *mm)
2651  {
2652  	unsigned long _rss = get_mm_rss(mm);
2653  
2654  	if ((mm)->hiwater_rss < _rss)
2655  		(mm)->hiwater_rss = _rss;
2656  }
2657  
update_hiwater_vm(struct mm_struct * mm)2658  static inline void update_hiwater_vm(struct mm_struct *mm)
2659  {
2660  	if (mm->hiwater_vm < mm->total_vm)
2661  		mm->hiwater_vm = mm->total_vm;
2662  }
2663  
reset_mm_hiwater_rss(struct mm_struct * mm)2664  static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2665  {
2666  	mm->hiwater_rss = get_mm_rss(mm);
2667  }
2668  
setmax_mm_hiwater_rss(unsigned long * maxrss,struct mm_struct * mm)2669  static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2670  					 struct mm_struct *mm)
2671  {
2672  	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2673  
2674  	if (*maxrss < hiwater_rss)
2675  		*maxrss = hiwater_rss;
2676  }
2677  
2678  #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
pte_special(pte_t pte)2679  static inline int pte_special(pte_t pte)
2680  {
2681  	return 0;
2682  }
2683  
pte_mkspecial(pte_t pte)2684  static inline pte_t pte_mkspecial(pte_t pte)
2685  {
2686  	return pte;
2687  }
2688  #endif
2689  
2690  #ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
pmd_special(pmd_t pmd)2691  static inline bool pmd_special(pmd_t pmd)
2692  {
2693  	return false;
2694  }
2695  
pmd_mkspecial(pmd_t pmd)2696  static inline pmd_t pmd_mkspecial(pmd_t pmd)
2697  {
2698  	return pmd;
2699  }
2700  #endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
2701  
2702  #ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
pud_special(pud_t pud)2703  static inline bool pud_special(pud_t pud)
2704  {
2705  	return false;
2706  }
2707  
pud_mkspecial(pud_t pud)2708  static inline pud_t pud_mkspecial(pud_t pud)
2709  {
2710  	return pud;
2711  }
2712  #endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
2713  
2714  #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t pte)2715  static inline int pte_devmap(pte_t pte)
2716  {
2717  	return 0;
2718  }
2719  #endif
2720  
2721  extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2722  			       spinlock_t **ptl);
get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)2723  static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2724  				    spinlock_t **ptl)
2725  {
2726  	pte_t *ptep;
2727  	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2728  	return ptep;
2729  }
2730  
2731  #ifdef __PAGETABLE_P4D_FOLDED
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)2732  static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2733  						unsigned long address)
2734  {
2735  	return 0;
2736  }
2737  #else
2738  int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2739  #endif
2740  
2741  #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)2742  static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2743  						unsigned long address)
2744  {
2745  	return 0;
2746  }
mm_inc_nr_puds(struct mm_struct * mm)2747  static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
mm_dec_nr_puds(struct mm_struct * mm)2748  static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2749  
2750  #else
2751  int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2752  
mm_inc_nr_puds(struct mm_struct * mm)2753  static inline void mm_inc_nr_puds(struct mm_struct *mm)
2754  {
2755  	if (mm_pud_folded(mm))
2756  		return;
2757  	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2758  }
2759  
mm_dec_nr_puds(struct mm_struct * mm)2760  static inline void mm_dec_nr_puds(struct mm_struct *mm)
2761  {
2762  	if (mm_pud_folded(mm))
2763  		return;
2764  	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2765  }
2766  #endif
2767  
2768  #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)2769  static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2770  						unsigned long address)
2771  {
2772  	return 0;
2773  }
2774  
mm_inc_nr_pmds(struct mm_struct * mm)2775  static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
mm_dec_nr_pmds(struct mm_struct * mm)2776  static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2777  
2778  #else
2779  int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2780  
mm_inc_nr_pmds(struct mm_struct * mm)2781  static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2782  {
2783  	if (mm_pmd_folded(mm))
2784  		return;
2785  	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2786  }
2787  
mm_dec_nr_pmds(struct mm_struct * mm)2788  static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2789  {
2790  	if (mm_pmd_folded(mm))
2791  		return;
2792  	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2793  }
2794  #endif
2795  
2796  #ifdef CONFIG_MMU
mm_pgtables_bytes_init(struct mm_struct * mm)2797  static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2798  {
2799  	atomic_long_set(&mm->pgtables_bytes, 0);
2800  }
2801  
mm_pgtables_bytes(const struct mm_struct * mm)2802  static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2803  {
2804  	return atomic_long_read(&mm->pgtables_bytes);
2805  }
2806  
mm_inc_nr_ptes(struct mm_struct * mm)2807  static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2808  {
2809  	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2810  }
2811  
mm_dec_nr_ptes(struct mm_struct * mm)2812  static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2813  {
2814  	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2815  }
2816  #else
2817  
mm_pgtables_bytes_init(struct mm_struct * mm)2818  static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
mm_pgtables_bytes(const struct mm_struct * mm)2819  static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2820  {
2821  	return 0;
2822  }
2823  
mm_inc_nr_ptes(struct mm_struct * mm)2824  static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
mm_dec_nr_ptes(struct mm_struct * mm)2825  static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2826  #endif
2827  
2828  int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2829  int __pte_alloc_kernel(pmd_t *pmd);
2830  
2831  #if defined(CONFIG_MMU)
2832  
p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)2833  static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2834  		unsigned long address)
2835  {
2836  	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2837  		NULL : p4d_offset(pgd, address);
2838  }
2839  
pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)2840  static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2841  		unsigned long address)
2842  {
2843  	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2844  		NULL : pud_offset(p4d, address);
2845  }
2846  
pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)2847  static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2848  {
2849  	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2850  		NULL: pmd_offset(pud, address);
2851  }
2852  #endif /* CONFIG_MMU */
2853  
virt_to_ptdesc(const void * x)2854  static inline struct ptdesc *virt_to_ptdesc(const void *x)
2855  {
2856  	return page_ptdesc(virt_to_page(x));
2857  }
2858  
ptdesc_to_virt(const struct ptdesc * pt)2859  static inline void *ptdesc_to_virt(const struct ptdesc *pt)
2860  {
2861  	return page_to_virt(ptdesc_page(pt));
2862  }
2863  
ptdesc_address(const struct ptdesc * pt)2864  static inline void *ptdesc_address(const struct ptdesc *pt)
2865  {
2866  	return folio_address(ptdesc_folio(pt));
2867  }
2868  
pagetable_is_reserved(struct ptdesc * pt)2869  static inline bool pagetable_is_reserved(struct ptdesc *pt)
2870  {
2871  	return folio_test_reserved(ptdesc_folio(pt));
2872  }
2873  
2874  /**
2875   * pagetable_alloc - Allocate pagetables
2876   * @gfp:    GFP flags
2877   * @order:  desired pagetable order
2878   *
2879   * pagetable_alloc allocates memory for page tables as well as a page table
2880   * descriptor to describe that memory.
2881   *
2882   * Return: The ptdesc describing the allocated page tables.
2883   */
pagetable_alloc_noprof(gfp_t gfp,unsigned int order)2884  static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
2885  {
2886  	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
2887  
2888  	return page_ptdesc(page);
2889  }
2890  #define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
2891  
2892  /**
2893   * pagetable_free - Free pagetables
2894   * @pt:	The page table descriptor
2895   *
2896   * pagetable_free frees the memory of all page tables described by a page
2897   * table descriptor and the memory for the descriptor itself.
2898   */
pagetable_free(struct ptdesc * pt)2899  static inline void pagetable_free(struct ptdesc *pt)
2900  {
2901  	struct page *page = ptdesc_page(pt);
2902  
2903  	__free_pages(page, compound_order(page));
2904  }
2905  
2906  #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
2907  #if ALLOC_SPLIT_PTLOCKS
2908  void __init ptlock_cache_init(void);
2909  bool ptlock_alloc(struct ptdesc *ptdesc);
2910  void ptlock_free(struct ptdesc *ptdesc);
2911  
ptlock_ptr(struct ptdesc * ptdesc)2912  static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2913  {
2914  	return ptdesc->ptl;
2915  }
2916  #else /* ALLOC_SPLIT_PTLOCKS */
ptlock_cache_init(void)2917  static inline void ptlock_cache_init(void)
2918  {
2919  }
2920  
ptlock_alloc(struct ptdesc * ptdesc)2921  static inline bool ptlock_alloc(struct ptdesc *ptdesc)
2922  {
2923  	return true;
2924  }
2925  
ptlock_free(struct ptdesc * ptdesc)2926  static inline void ptlock_free(struct ptdesc *ptdesc)
2927  {
2928  }
2929  
ptlock_ptr(struct ptdesc * ptdesc)2930  static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2931  {
2932  	return &ptdesc->ptl;
2933  }
2934  #endif /* ALLOC_SPLIT_PTLOCKS */
2935  
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)2936  static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2937  {
2938  	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
2939  }
2940  
ptep_lockptr(struct mm_struct * mm,pte_t * pte)2941  static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
2942  {
2943  	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
2944  	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
2945  	return ptlock_ptr(virt_to_ptdesc(pte));
2946  }
2947  
ptlock_init(struct ptdesc * ptdesc)2948  static inline bool ptlock_init(struct ptdesc *ptdesc)
2949  {
2950  	/*
2951  	 * prep_new_page() initialize page->private (and therefore page->ptl)
2952  	 * with 0. Make sure nobody took it in use in between.
2953  	 *
2954  	 * It can happen if arch try to use slab for page table allocation:
2955  	 * slab code uses page->slab_cache, which share storage with page->ptl.
2956  	 */
2957  	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
2958  	if (!ptlock_alloc(ptdesc))
2959  		return false;
2960  	spin_lock_init(ptlock_ptr(ptdesc));
2961  	return true;
2962  }
2963  
2964  #else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
2965  /*
2966   * We use mm->page_table_lock to guard all pagetable pages of the mm.
2967   */
pte_lockptr(struct mm_struct * mm,pmd_t * pmd)2968  static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2969  {
2970  	return &mm->page_table_lock;
2971  }
ptep_lockptr(struct mm_struct * mm,pte_t * pte)2972  static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
2973  {
2974  	return &mm->page_table_lock;
2975  }
ptlock_cache_init(void)2976  static inline void ptlock_cache_init(void) {}
ptlock_init(struct ptdesc * ptdesc)2977  static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
ptlock_free(struct ptdesc * ptdesc)2978  static inline void ptlock_free(struct ptdesc *ptdesc) {}
2979  #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
2980  
pagetable_pte_ctor(struct ptdesc * ptdesc)2981  static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
2982  {
2983  	struct folio *folio = ptdesc_folio(ptdesc);
2984  
2985  	if (!ptlock_init(ptdesc))
2986  		return false;
2987  	__folio_set_pgtable(folio);
2988  	lruvec_stat_add_folio(folio, NR_PAGETABLE);
2989  	return true;
2990  }
2991  
pagetable_pte_dtor(struct ptdesc * ptdesc)2992  static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
2993  {
2994  	struct folio *folio = ptdesc_folio(ptdesc);
2995  
2996  	ptlock_free(ptdesc);
2997  	__folio_clear_pgtable(folio);
2998  	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
2999  }
3000  
3001  pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
pte_offset_map(pmd_t * pmd,unsigned long addr)3002  static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3003  {
3004  	return __pte_offset_map(pmd, addr, NULL);
3005  }
3006  
3007  pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3008  			unsigned long addr, spinlock_t **ptlp);
pte_offset_map_lock(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,spinlock_t ** ptlp)3009  static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3010  			unsigned long addr, spinlock_t **ptlp)
3011  {
3012  	pte_t *pte;
3013  
3014  	__cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
3015  	return pte;
3016  }
3017  
3018  pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
3019  			unsigned long addr, spinlock_t **ptlp);
3020  
3021  #define pte_unmap_unlock(pte, ptl)	do {		\
3022  	spin_unlock(ptl);				\
3023  	pte_unmap(pte);					\
3024  } while (0)
3025  
3026  #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3027  
3028  #define pte_alloc_map(mm, pmd, address)			\
3029  	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3030  
3031  #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3032  	(pte_alloc(mm, pmd) ?			\
3033  		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3034  
3035  #define pte_alloc_kernel(pmd, address)			\
3036  	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3037  		NULL: pte_offset_kernel(pmd, address))
3038  
3039  #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3040  
pmd_pgtable_page(pmd_t * pmd)3041  static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3042  {
3043  	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3044  	return virt_to_page((void *)((unsigned long) pmd & mask));
3045  }
3046  
pmd_ptdesc(pmd_t * pmd)3047  static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3048  {
3049  	return page_ptdesc(pmd_pgtable_page(pmd));
3050  }
3051  
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3052  static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3053  {
3054  	return ptlock_ptr(pmd_ptdesc(pmd));
3055  }
3056  
pmd_ptlock_init(struct ptdesc * ptdesc)3057  static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3058  {
3059  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3060  	ptdesc->pmd_huge_pte = NULL;
3061  #endif
3062  	return ptlock_init(ptdesc);
3063  }
3064  
pmd_ptlock_free(struct ptdesc * ptdesc)3065  static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
3066  {
3067  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3068  	VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
3069  #endif
3070  	ptlock_free(ptdesc);
3071  }
3072  
3073  #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3074  
3075  #else
3076  
pmd_lockptr(struct mm_struct * mm,pmd_t * pmd)3077  static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3078  {
3079  	return &mm->page_table_lock;
3080  }
3081  
pmd_ptlock_init(struct ptdesc * ptdesc)3082  static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
pmd_ptlock_free(struct ptdesc * ptdesc)3083  static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
3084  
3085  #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3086  
3087  #endif
3088  
pmd_lock(struct mm_struct * mm,pmd_t * pmd)3089  static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3090  {
3091  	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3092  	spin_lock(ptl);
3093  	return ptl;
3094  }
3095  
pagetable_pmd_ctor(struct ptdesc * ptdesc)3096  static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3097  {
3098  	struct folio *folio = ptdesc_folio(ptdesc);
3099  
3100  	if (!pmd_ptlock_init(ptdesc))
3101  		return false;
3102  	__folio_set_pgtable(folio);
3103  	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3104  	return true;
3105  }
3106  
pagetable_pmd_dtor(struct ptdesc * ptdesc)3107  static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
3108  {
3109  	struct folio *folio = ptdesc_folio(ptdesc);
3110  
3111  	pmd_ptlock_free(ptdesc);
3112  	__folio_clear_pgtable(folio);
3113  	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3114  }
3115  
3116  /*
3117   * No scalability reason to split PUD locks yet, but follow the same pattern
3118   * as the PMD locks to make it easier if we decide to.  The VM should not be
3119   * considered ready to switch to split PUD locks yet; there may be places
3120   * which need to be converted from page_table_lock.
3121   */
pud_lockptr(struct mm_struct * mm,pud_t * pud)3122  static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3123  {
3124  	return &mm->page_table_lock;
3125  }
3126  
pud_lock(struct mm_struct * mm,pud_t * pud)3127  static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3128  {
3129  	spinlock_t *ptl = pud_lockptr(mm, pud);
3130  
3131  	spin_lock(ptl);
3132  	return ptl;
3133  }
3134  
pagetable_pud_ctor(struct ptdesc * ptdesc)3135  static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3136  {
3137  	struct folio *folio = ptdesc_folio(ptdesc);
3138  
3139  	__folio_set_pgtable(folio);
3140  	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3141  }
3142  
pagetable_pud_dtor(struct ptdesc * ptdesc)3143  static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
3144  {
3145  	struct folio *folio = ptdesc_folio(ptdesc);
3146  
3147  	__folio_clear_pgtable(folio);
3148  	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3149  }
3150  
3151  extern void __init pagecache_init(void);
3152  extern void free_initmem(void);
3153  
3154  /*
3155   * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3156   * into the buddy system. The freed pages will be poisoned with pattern
3157   * "poison" if it's within range [0, UCHAR_MAX].
3158   * Return pages freed into the buddy system.
3159   */
3160  extern unsigned long free_reserved_area(void *start, void *end,
3161  					int poison, const char *s);
3162  
3163  extern void adjust_managed_page_count(struct page *page, long count);
3164  
3165  extern void reserve_bootmem_region(phys_addr_t start,
3166  				   phys_addr_t end, int nid);
3167  
3168  /* Free the reserved page into the buddy system, so it gets managed. */
3169  void free_reserved_page(struct page *page);
3170  #define free_highmem_page(page) free_reserved_page(page)
3171  
mark_page_reserved(struct page * page)3172  static inline void mark_page_reserved(struct page *page)
3173  {
3174  	SetPageReserved(page);
3175  	adjust_managed_page_count(page, -1);
3176  }
3177  
free_reserved_ptdesc(struct ptdesc * pt)3178  static inline void free_reserved_ptdesc(struct ptdesc *pt)
3179  {
3180  	free_reserved_page(ptdesc_page(pt));
3181  }
3182  
3183  /*
3184   * Default method to free all the __init memory into the buddy system.
3185   * The freed pages will be poisoned with pattern "poison" if it's within
3186   * range [0, UCHAR_MAX].
3187   * Return pages freed into the buddy system.
3188   */
free_initmem_default(int poison)3189  static inline unsigned long free_initmem_default(int poison)
3190  {
3191  	extern char __init_begin[], __init_end[];
3192  
3193  	return free_reserved_area(&__init_begin, &__init_end,
3194  				  poison, "unused kernel image (initmem)");
3195  }
3196  
get_num_physpages(void)3197  static inline unsigned long get_num_physpages(void)
3198  {
3199  	int nid;
3200  	unsigned long phys_pages = 0;
3201  
3202  	for_each_online_node(nid)
3203  		phys_pages += node_present_pages(nid);
3204  
3205  	return phys_pages;
3206  }
3207  
3208  /*
3209   * Using memblock node mappings, an architecture may initialise its
3210   * zones, allocate the backing mem_map and account for memory holes in an
3211   * architecture independent manner.
3212   *
3213   * An architecture is expected to register range of page frames backed by
3214   * physical memory with memblock_add[_node]() before calling
3215   * free_area_init() passing in the PFN each zone ends at. At a basic
3216   * usage, an architecture is expected to do something like
3217   *
3218   * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3219   * 							 max_highmem_pfn};
3220   * for_each_valid_physical_page_range()
3221   *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3222   * free_area_init(max_zone_pfns);
3223   */
3224  void free_area_init(unsigned long *max_zone_pfn);
3225  unsigned long node_map_pfn_alignment(void);
3226  extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3227  						unsigned long end_pfn);
3228  extern void get_pfn_range_for_nid(unsigned int nid,
3229  			unsigned long *start_pfn, unsigned long *end_pfn);
3230  
3231  #ifndef CONFIG_NUMA
early_pfn_to_nid(unsigned long pfn)3232  static inline int early_pfn_to_nid(unsigned long pfn)
3233  {
3234  	return 0;
3235  }
3236  #else
3237  /* please see mm/page_alloc.c */
3238  extern int __meminit early_pfn_to_nid(unsigned long pfn);
3239  #endif
3240  
3241  extern void mem_init(void);
3242  extern void __init mmap_init(void);
3243  
3244  extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
show_mem(void)3245  static inline void show_mem(void)
3246  {
3247  	__show_mem(0, NULL, MAX_NR_ZONES - 1);
3248  }
3249  extern long si_mem_available(void);
3250  extern void si_meminfo(struct sysinfo * val);
3251  extern void si_meminfo_node(struct sysinfo *val, int nid);
3252  
3253  extern __printf(3, 4)
3254  void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3255  
3256  extern void setup_per_cpu_pageset(void);
3257  
3258  /* nommu.c */
3259  extern atomic_long_t mmap_pages_allocated;
3260  extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3261  
3262  /* interval_tree.c */
3263  void vma_interval_tree_insert(struct vm_area_struct *node,
3264  			      struct rb_root_cached *root);
3265  void vma_interval_tree_insert_after(struct vm_area_struct *node,
3266  				    struct vm_area_struct *prev,
3267  				    struct rb_root_cached *root);
3268  void vma_interval_tree_remove(struct vm_area_struct *node,
3269  			      struct rb_root_cached *root);
3270  struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3271  				unsigned long start, unsigned long last);
3272  struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3273  				unsigned long start, unsigned long last);
3274  
3275  #define vma_interval_tree_foreach(vma, root, start, last)		\
3276  	for (vma = vma_interval_tree_iter_first(root, start, last);	\
3277  	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
3278  
3279  void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3280  				   struct rb_root_cached *root);
3281  void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3282  				   struct rb_root_cached *root);
3283  struct anon_vma_chain *
3284  anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3285  				  unsigned long start, unsigned long last);
3286  struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3287  	struct anon_vma_chain *node, unsigned long start, unsigned long last);
3288  #ifdef CONFIG_DEBUG_VM_RB
3289  void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3290  #endif
3291  
3292  #define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
3293  	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3294  	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3295  
3296  /* mmap.c */
3297  extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3298  extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3299  extern void exit_mmap(struct mm_struct *);
3300  int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
3301  
check_data_rlimit(unsigned long rlim,unsigned long new,unsigned long start,unsigned long end_data,unsigned long start_data)3302  static inline int check_data_rlimit(unsigned long rlim,
3303  				    unsigned long new,
3304  				    unsigned long start,
3305  				    unsigned long end_data,
3306  				    unsigned long start_data)
3307  {
3308  	if (rlim < RLIM_INFINITY) {
3309  		if (((new - start) + (end_data - start_data)) > rlim)
3310  			return -ENOSPC;
3311  	}
3312  
3313  	return 0;
3314  }
3315  
3316  extern int mm_take_all_locks(struct mm_struct *mm);
3317  extern void mm_drop_all_locks(struct mm_struct *mm);
3318  
3319  extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3320  extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3321  extern struct file *get_mm_exe_file(struct mm_struct *mm);
3322  extern struct file *get_task_exe_file(struct task_struct *task);
3323  
3324  extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3325  extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3326  
3327  extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3328  				   const struct vm_special_mapping *sm);
3329  extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3330  				   unsigned long addr, unsigned long len,
3331  				   unsigned long flags,
3332  				   const struct vm_special_mapping *spec);
3333  
3334  unsigned long randomize_stack_top(unsigned long stack_top);
3335  unsigned long randomize_page(unsigned long start, unsigned long range);
3336  
3337  unsigned long
3338  __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3339  		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3340  
3341  static inline unsigned long
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3342  get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3343  		  unsigned long pgoff, unsigned long flags)
3344  {
3345  	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3346  }
3347  
3348  extern unsigned long mmap_region(struct file *file, unsigned long addr,
3349  	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
3350  	struct list_head *uf);
3351  extern unsigned long do_mmap(struct file *file, unsigned long addr,
3352  	unsigned long len, unsigned long prot, unsigned long flags,
3353  	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3354  	struct list_head *uf);
3355  extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3356  			 unsigned long start, size_t len, struct list_head *uf,
3357  			 bool unlock);
3358  int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3359  		    struct mm_struct *mm, unsigned long start,
3360  		    unsigned long end, struct list_head *uf, bool unlock);
3361  extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3362  		     struct list_head *uf);
3363  extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3364  
3365  #ifdef CONFIG_MMU
3366  extern int __mm_populate(unsigned long addr, unsigned long len,
3367  			 int ignore_errors);
mm_populate(unsigned long addr,unsigned long len)3368  static inline void mm_populate(unsigned long addr, unsigned long len)
3369  {
3370  	/* Ignore errors */
3371  	(void) __mm_populate(addr, len, 1);
3372  }
3373  #else
mm_populate(unsigned long addr,unsigned long len)3374  static inline void mm_populate(unsigned long addr, unsigned long len) {}
3375  #endif
3376  
3377  /* This takes the mm semaphore itself */
3378  extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3379  extern int vm_munmap(unsigned long, size_t);
3380  extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3381          unsigned long, unsigned long,
3382          unsigned long, unsigned long);
3383  
3384  struct vm_unmapped_area_info {
3385  #define VM_UNMAPPED_AREA_TOPDOWN 1
3386  	unsigned long flags;
3387  	unsigned long length;
3388  	unsigned long low_limit;
3389  	unsigned long high_limit;
3390  	unsigned long align_mask;
3391  	unsigned long align_offset;
3392  	unsigned long start_gap;
3393  };
3394  
3395  extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3396  
3397  /* truncate.c */
3398  extern void truncate_inode_pages(struct address_space *, loff_t);
3399  extern void truncate_inode_pages_range(struct address_space *,
3400  				       loff_t lstart, loff_t lend);
3401  extern void truncate_inode_pages_final(struct address_space *);
3402  
3403  /* generic vm_area_ops exported for stackable file systems */
3404  extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3405  extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3406  		pgoff_t start_pgoff, pgoff_t end_pgoff);
3407  extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3408  
3409  extern unsigned long stack_guard_gap;
3410  /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3411  int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3412  struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3413  
3414  /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
3415  int expand_downwards(struct vm_area_struct *vma, unsigned long address);
3416  
3417  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
3418  extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3419  extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3420  					     struct vm_area_struct **pprev);
3421  
3422  /*
3423   * Look up the first VMA which intersects the interval [start_addr, end_addr)
3424   * NULL if none.  Assume start_addr < end_addr.
3425   */
3426  struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3427  			unsigned long start_addr, unsigned long end_addr);
3428  
3429  /**
3430   * vma_lookup() - Find a VMA at a specific address
3431   * @mm: The process address space.
3432   * @addr: The user address.
3433   *
3434   * Return: The vm_area_struct at the given address, %NULL otherwise.
3435   */
3436  static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)3437  struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3438  {
3439  	return mtree_load(&mm->mm_mt, addr);
3440  }
3441  
stack_guard_start_gap(struct vm_area_struct * vma)3442  static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3443  {
3444  	if (vma->vm_flags & VM_GROWSDOWN)
3445  		return stack_guard_gap;
3446  
3447  	/* See reasoning around the VM_SHADOW_STACK definition */
3448  	if (vma->vm_flags & VM_SHADOW_STACK)
3449  		return PAGE_SIZE;
3450  
3451  	return 0;
3452  }
3453  
vm_start_gap(struct vm_area_struct * vma)3454  static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3455  {
3456  	unsigned long gap = stack_guard_start_gap(vma);
3457  	unsigned long vm_start = vma->vm_start;
3458  
3459  	vm_start -= gap;
3460  	if (vm_start > vma->vm_start)
3461  		vm_start = 0;
3462  	return vm_start;
3463  }
3464  
vm_end_gap(struct vm_area_struct * vma)3465  static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3466  {
3467  	unsigned long vm_end = vma->vm_end;
3468  
3469  	if (vma->vm_flags & VM_GROWSUP) {
3470  		vm_end += stack_guard_gap;
3471  		if (vm_end < vma->vm_end)
3472  			vm_end = -PAGE_SIZE;
3473  	}
3474  	return vm_end;
3475  }
3476  
vma_pages(struct vm_area_struct * vma)3477  static inline unsigned long vma_pages(struct vm_area_struct *vma)
3478  {
3479  	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3480  }
3481  
3482  /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
find_exact_vma(struct mm_struct * mm,unsigned long vm_start,unsigned long vm_end)3483  static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3484  				unsigned long vm_start, unsigned long vm_end)
3485  {
3486  	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3487  
3488  	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3489  		vma = NULL;
3490  
3491  	return vma;
3492  }
3493  
range_in_vma(struct vm_area_struct * vma,unsigned long start,unsigned long end)3494  static inline bool range_in_vma(struct vm_area_struct *vma,
3495  				unsigned long start, unsigned long end)
3496  {
3497  	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3498  }
3499  
3500  #ifdef CONFIG_MMU
3501  pgprot_t vm_get_page_prot(unsigned long vm_flags);
3502  void vma_set_page_prot(struct vm_area_struct *vma);
3503  #else
vm_get_page_prot(unsigned long vm_flags)3504  static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3505  {
3506  	return __pgprot(0);
3507  }
vma_set_page_prot(struct vm_area_struct * vma)3508  static inline void vma_set_page_prot(struct vm_area_struct *vma)
3509  {
3510  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3511  }
3512  #endif
3513  
3514  void vma_set_file(struct vm_area_struct *vma, struct file *file);
3515  
3516  #ifdef CONFIG_NUMA_BALANCING
3517  unsigned long change_prot_numa(struct vm_area_struct *vma,
3518  			unsigned long start, unsigned long end);
3519  #endif
3520  
3521  struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3522  		unsigned long addr);
3523  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3524  			unsigned long pfn, unsigned long size, pgprot_t);
3525  int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3526  		unsigned long pfn, unsigned long size, pgprot_t prot);
3527  int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3528  int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3529  			struct page **pages, unsigned long *num);
3530  int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3531  				unsigned long num);
3532  int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3533  				unsigned long num);
3534  vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3535  			unsigned long pfn);
3536  vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3537  			unsigned long pfn, pgprot_t pgprot);
3538  vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3539  			pfn_t pfn);
3540  vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3541  		unsigned long addr, pfn_t pfn);
3542  int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3543  
vmf_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)3544  static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3545  				unsigned long addr, struct page *page)
3546  {
3547  	int err = vm_insert_page(vma, addr, page);
3548  
3549  	if (err == -ENOMEM)
3550  		return VM_FAULT_OOM;
3551  	if (err < 0 && err != -EBUSY)
3552  		return VM_FAULT_SIGBUS;
3553  
3554  	return VM_FAULT_NOPAGE;
3555  }
3556  
3557  #ifndef io_remap_pfn_range
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)3558  static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3559  				     unsigned long addr, unsigned long pfn,
3560  				     unsigned long size, pgprot_t prot)
3561  {
3562  	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3563  }
3564  #endif
3565  
vmf_error(int err)3566  static inline vm_fault_t vmf_error(int err)
3567  {
3568  	if (err == -ENOMEM)
3569  		return VM_FAULT_OOM;
3570  	else if (err == -EHWPOISON)
3571  		return VM_FAULT_HWPOISON;
3572  	return VM_FAULT_SIGBUS;
3573  }
3574  
3575  /*
3576   * Convert errno to return value for ->page_mkwrite() calls.
3577   *
3578   * This should eventually be merged with vmf_error() above, but will need a
3579   * careful audit of all vmf_error() callers.
3580   */
vmf_fs_error(int err)3581  static inline vm_fault_t vmf_fs_error(int err)
3582  {
3583  	if (err == 0)
3584  		return VM_FAULT_LOCKED;
3585  	if (err == -EFAULT || err == -EAGAIN)
3586  		return VM_FAULT_NOPAGE;
3587  	if (err == -ENOMEM)
3588  		return VM_FAULT_OOM;
3589  	/* -ENOSPC, -EDQUOT, -EIO ... */
3590  	return VM_FAULT_SIGBUS;
3591  }
3592  
vm_fault_to_errno(vm_fault_t vm_fault,int foll_flags)3593  static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3594  {
3595  	if (vm_fault & VM_FAULT_OOM)
3596  		return -ENOMEM;
3597  	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3598  		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3599  	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3600  		return -EFAULT;
3601  	return 0;
3602  }
3603  
3604  /*
3605   * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3606   * a (NUMA hinting) fault is required.
3607   */
gup_can_follow_protnone(struct vm_area_struct * vma,unsigned int flags)3608  static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3609  					   unsigned int flags)
3610  {
3611  	/*
3612  	 * If callers don't want to honor NUMA hinting faults, no need to
3613  	 * determine if we would actually have to trigger a NUMA hinting fault.
3614  	 */
3615  	if (!(flags & FOLL_HONOR_NUMA_FAULT))
3616  		return true;
3617  
3618  	/*
3619  	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
3620  	 *
3621  	 * Requiring a fault here even for inaccessible VMAs would mean that
3622  	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
3623  	 * refuses to process NUMA hinting faults in inaccessible VMAs.
3624  	 */
3625  	return !vma_is_accessible(vma);
3626  }
3627  
3628  typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3629  extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3630  			       unsigned long size, pte_fn_t fn, void *data);
3631  extern int apply_to_existing_page_range(struct mm_struct *mm,
3632  				   unsigned long address, unsigned long size,
3633  				   pte_fn_t fn, void *data);
3634  
3635  #ifdef CONFIG_PAGE_POISONING
3636  extern void __kernel_poison_pages(struct page *page, int numpages);
3637  extern void __kernel_unpoison_pages(struct page *page, int numpages);
3638  extern bool _page_poisoning_enabled_early;
3639  DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
page_poisoning_enabled(void)3640  static inline bool page_poisoning_enabled(void)
3641  {
3642  	return _page_poisoning_enabled_early;
3643  }
3644  /*
3645   * For use in fast paths after init_mem_debugging() has run, or when a
3646   * false negative result is not harmful when called too early.
3647   */
page_poisoning_enabled_static(void)3648  static inline bool page_poisoning_enabled_static(void)
3649  {
3650  	return static_branch_unlikely(&_page_poisoning_enabled);
3651  }
kernel_poison_pages(struct page * page,int numpages)3652  static inline void kernel_poison_pages(struct page *page, int numpages)
3653  {
3654  	if (page_poisoning_enabled_static())
3655  		__kernel_poison_pages(page, numpages);
3656  }
kernel_unpoison_pages(struct page * page,int numpages)3657  static inline void kernel_unpoison_pages(struct page *page, int numpages)
3658  {
3659  	if (page_poisoning_enabled_static())
3660  		__kernel_unpoison_pages(page, numpages);
3661  }
3662  #else
page_poisoning_enabled(void)3663  static inline bool page_poisoning_enabled(void) { return false; }
page_poisoning_enabled_static(void)3664  static inline bool page_poisoning_enabled_static(void) { return false; }
__kernel_poison_pages(struct page * page,int nunmpages)3665  static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
kernel_poison_pages(struct page * page,int numpages)3666  static inline void kernel_poison_pages(struct page *page, int numpages) { }
kernel_unpoison_pages(struct page * page,int numpages)3667  static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3668  #endif
3669  
3670  DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
want_init_on_alloc(gfp_t flags)3671  static inline bool want_init_on_alloc(gfp_t flags)
3672  {
3673  	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3674  				&init_on_alloc))
3675  		return true;
3676  	return flags & __GFP_ZERO;
3677  }
3678  
3679  DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
want_init_on_free(void)3680  static inline bool want_init_on_free(void)
3681  {
3682  	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3683  				   &init_on_free);
3684  }
3685  
3686  extern bool _debug_pagealloc_enabled_early;
3687  DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3688  
debug_pagealloc_enabled(void)3689  static inline bool debug_pagealloc_enabled(void)
3690  {
3691  	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3692  		_debug_pagealloc_enabled_early;
3693  }
3694  
3695  /*
3696   * For use in fast paths after mem_debugging_and_hardening_init() has run,
3697   * or when a false negative result is not harmful when called too early.
3698   */
debug_pagealloc_enabled_static(void)3699  static inline bool debug_pagealloc_enabled_static(void)
3700  {
3701  	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3702  		return false;
3703  
3704  	return static_branch_unlikely(&_debug_pagealloc_enabled);
3705  }
3706  
3707  /*
3708   * To support DEBUG_PAGEALLOC architecture must ensure that
3709   * __kernel_map_pages() never fails
3710   */
3711  extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3712  #ifdef CONFIG_DEBUG_PAGEALLOC
debug_pagealloc_map_pages(struct page * page,int numpages)3713  static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3714  {
3715  	if (debug_pagealloc_enabled_static())
3716  		__kernel_map_pages(page, numpages, 1);
3717  }
3718  
debug_pagealloc_unmap_pages(struct page * page,int numpages)3719  static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3720  {
3721  	if (debug_pagealloc_enabled_static())
3722  		__kernel_map_pages(page, numpages, 0);
3723  }
3724  
3725  extern unsigned int _debug_guardpage_minorder;
3726  DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3727  
debug_guardpage_minorder(void)3728  static inline unsigned int debug_guardpage_minorder(void)
3729  {
3730  	return _debug_guardpage_minorder;
3731  }
3732  
debug_guardpage_enabled(void)3733  static inline bool debug_guardpage_enabled(void)
3734  {
3735  	return static_branch_unlikely(&_debug_guardpage_enabled);
3736  }
3737  
page_is_guard(struct page * page)3738  static inline bool page_is_guard(struct page *page)
3739  {
3740  	if (!debug_guardpage_enabled())
3741  		return false;
3742  
3743  	return PageGuard(page);
3744  }
3745  
3746  bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
set_page_guard(struct zone * zone,struct page * page,unsigned int order)3747  static inline bool set_page_guard(struct zone *zone, struct page *page,
3748  				  unsigned int order)
3749  {
3750  	if (!debug_guardpage_enabled())
3751  		return false;
3752  	return __set_page_guard(zone, page, order);
3753  }
3754  
3755  void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)3756  static inline void clear_page_guard(struct zone *zone, struct page *page,
3757  				    unsigned int order)
3758  {
3759  	if (!debug_guardpage_enabled())
3760  		return;
3761  	__clear_page_guard(zone, page, order);
3762  }
3763  
3764  #else	/* CONFIG_DEBUG_PAGEALLOC */
debug_pagealloc_map_pages(struct page * page,int numpages)3765  static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
debug_pagealloc_unmap_pages(struct page * page,int numpages)3766  static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
debug_guardpage_minorder(void)3767  static inline unsigned int debug_guardpage_minorder(void) { return 0; }
debug_guardpage_enabled(void)3768  static inline bool debug_guardpage_enabled(void) { return false; }
page_is_guard(struct page * page)3769  static inline bool page_is_guard(struct page *page) { return false; }
set_page_guard(struct zone * zone,struct page * page,unsigned int order)3770  static inline bool set_page_guard(struct zone *zone, struct page *page,
3771  			unsigned int order) { return false; }
clear_page_guard(struct zone * zone,struct page * page,unsigned int order)3772  static inline void clear_page_guard(struct zone *zone, struct page *page,
3773  				unsigned int order) {}
3774  #endif	/* CONFIG_DEBUG_PAGEALLOC */
3775  
3776  #ifdef __HAVE_ARCH_GATE_AREA
3777  extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3778  extern int in_gate_area_no_mm(unsigned long addr);
3779  extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3780  #else
get_gate_vma(struct mm_struct * mm)3781  static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3782  {
3783  	return NULL;
3784  }
in_gate_area_no_mm(unsigned long addr)3785  static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
in_gate_area(struct mm_struct * mm,unsigned long addr)3786  static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3787  {
3788  	return 0;
3789  }
3790  #endif	/* __HAVE_ARCH_GATE_AREA */
3791  
3792  extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3793  
3794  #ifdef CONFIG_SYSCTL
3795  extern int sysctl_drop_caches;
3796  int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *,
3797  		loff_t *);
3798  #endif
3799  
3800  void drop_slab(void);
3801  
3802  #ifndef CONFIG_MMU
3803  #define randomize_va_space 0
3804  #else
3805  extern int randomize_va_space;
3806  #endif
3807  
3808  const char * arch_vma_name(struct vm_area_struct *vma);
3809  #ifdef CONFIG_MMU
3810  void print_vma_addr(char *prefix, unsigned long rip);
3811  #else
print_vma_addr(char * prefix,unsigned long rip)3812  static inline void print_vma_addr(char *prefix, unsigned long rip)
3813  {
3814  }
3815  #endif
3816  
3817  void *sparse_buffer_alloc(unsigned long size);
3818  struct page * __populate_section_memmap(unsigned long pfn,
3819  		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3820  		struct dev_pagemap *pgmap);
3821  void pud_init(void *addr);
3822  void pmd_init(void *addr);
3823  void kernel_pte_init(void *addr);
3824  pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3825  p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3826  pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3827  pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3828  pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3829  			    struct vmem_altmap *altmap, struct page *reuse);
3830  void *vmemmap_alloc_block(unsigned long size, int node);
3831  struct vmem_altmap;
3832  void *vmemmap_alloc_block_buf(unsigned long size, int node,
3833  			      struct vmem_altmap *altmap);
3834  void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3835  void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3836  		     unsigned long addr, unsigned long next);
3837  int vmemmap_check_pmd(pmd_t *pmd, int node,
3838  		      unsigned long addr, unsigned long next);
3839  int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3840  			       int node, struct vmem_altmap *altmap);
3841  int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3842  			       int node, struct vmem_altmap *altmap);
3843  int vmemmap_populate(unsigned long start, unsigned long end, int node,
3844  		struct vmem_altmap *altmap);
3845  void vmemmap_populate_print_last(void);
3846  #ifdef CONFIG_MEMORY_HOTPLUG
3847  void vmemmap_free(unsigned long start, unsigned long end,
3848  		struct vmem_altmap *altmap);
3849  #endif
3850  
3851  #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmem_altmap_offset(struct vmem_altmap * altmap)3852  static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3853  {
3854  	/* number of pfns from base where pfn_to_page() is valid */
3855  	if (altmap)
3856  		return altmap->reserve + altmap->free;
3857  	return 0;
3858  }
3859  
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)3860  static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3861  				    unsigned long nr_pfns)
3862  {
3863  	altmap->alloc -= nr_pfns;
3864  }
3865  #else
vmem_altmap_offset(struct vmem_altmap * altmap)3866  static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3867  {
3868  	return 0;
3869  }
3870  
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)3871  static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3872  				    unsigned long nr_pfns)
3873  {
3874  }
3875  #endif
3876  
3877  #define VMEMMAP_RESERVE_NR	2
3878  #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
__vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)3879  static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
3880  					  struct dev_pagemap *pgmap)
3881  {
3882  	unsigned long nr_pages;
3883  	unsigned long nr_vmemmap_pages;
3884  
3885  	if (!pgmap || !is_power_of_2(sizeof(struct page)))
3886  		return false;
3887  
3888  	nr_pages = pgmap_vmemmap_nr(pgmap);
3889  	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
3890  	/*
3891  	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
3892  	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
3893  	 */
3894  	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
3895  }
3896  /*
3897   * If we don't have an architecture override, use the generic rule
3898   */
3899  #ifndef vmemmap_can_optimize
3900  #define vmemmap_can_optimize __vmemmap_can_optimize
3901  #endif
3902  
3903  #else
vmemmap_can_optimize(struct vmem_altmap * altmap,struct dev_pagemap * pgmap)3904  static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
3905  					   struct dev_pagemap *pgmap)
3906  {
3907  	return false;
3908  }
3909  #endif
3910  
3911  void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3912  				  unsigned long nr_pages);
3913  
3914  enum mf_flags {
3915  	MF_COUNT_INCREASED = 1 << 0,
3916  	MF_ACTION_REQUIRED = 1 << 1,
3917  	MF_MUST_KILL = 1 << 2,
3918  	MF_SOFT_OFFLINE = 1 << 3,
3919  	MF_UNPOISON = 1 << 4,
3920  	MF_SW_SIMULATED = 1 << 5,
3921  	MF_NO_RETRY = 1 << 6,
3922  	MF_MEM_PRE_REMOVE = 1 << 7,
3923  };
3924  int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3925  		      unsigned long count, int mf_flags);
3926  extern int memory_failure(unsigned long pfn, int flags);
3927  extern void memory_failure_queue_kick(int cpu);
3928  extern int unpoison_memory(unsigned long pfn);
3929  extern atomic_long_t num_poisoned_pages __read_mostly;
3930  extern int soft_offline_page(unsigned long pfn, int flags);
3931  #ifdef CONFIG_MEMORY_FAILURE
3932  /*
3933   * Sysfs entries for memory failure handling statistics.
3934   */
3935  extern const struct attribute_group memory_failure_attr_group;
3936  extern void memory_failure_queue(unsigned long pfn, int flags);
3937  extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3938  					bool *migratable_cleared);
3939  void num_poisoned_pages_inc(unsigned long pfn);
3940  void num_poisoned_pages_sub(unsigned long pfn, long i);
3941  #else
memory_failure_queue(unsigned long pfn,int flags)3942  static inline void memory_failure_queue(unsigned long pfn, int flags)
3943  {
3944  }
3945  
__get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)3946  static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3947  					bool *migratable_cleared)
3948  {
3949  	return 0;
3950  }
3951  
num_poisoned_pages_inc(unsigned long pfn)3952  static inline void num_poisoned_pages_inc(unsigned long pfn)
3953  {
3954  }
3955  
num_poisoned_pages_sub(unsigned long pfn,long i)3956  static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
3957  {
3958  }
3959  #endif
3960  
3961  #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
3962  extern void memblk_nr_poison_inc(unsigned long pfn);
3963  extern void memblk_nr_poison_sub(unsigned long pfn, long i);
3964  #else
memblk_nr_poison_inc(unsigned long pfn)3965  static inline void memblk_nr_poison_inc(unsigned long pfn)
3966  {
3967  }
3968  
memblk_nr_poison_sub(unsigned long pfn,long i)3969  static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
3970  {
3971  }
3972  #endif
3973  
3974  #ifndef arch_memory_failure
arch_memory_failure(unsigned long pfn,int flags)3975  static inline int arch_memory_failure(unsigned long pfn, int flags)
3976  {
3977  	return -ENXIO;
3978  }
3979  #endif
3980  
3981  #ifndef arch_is_platform_page
arch_is_platform_page(u64 paddr)3982  static inline bool arch_is_platform_page(u64 paddr)
3983  {
3984  	return false;
3985  }
3986  #endif
3987  
3988  /*
3989   * Error handlers for various types of pages.
3990   */
3991  enum mf_result {
3992  	MF_IGNORED,	/* Error: cannot be handled */
3993  	MF_FAILED,	/* Error: handling failed */
3994  	MF_DELAYED,	/* Will be handled later */
3995  	MF_RECOVERED,	/* Successfully recovered */
3996  };
3997  
3998  enum mf_action_page_type {
3999  	MF_MSG_KERNEL,
4000  	MF_MSG_KERNEL_HIGH_ORDER,
4001  	MF_MSG_DIFFERENT_COMPOUND,
4002  	MF_MSG_HUGE,
4003  	MF_MSG_FREE_HUGE,
4004  	MF_MSG_GET_HWPOISON,
4005  	MF_MSG_UNMAP_FAILED,
4006  	MF_MSG_DIRTY_SWAPCACHE,
4007  	MF_MSG_CLEAN_SWAPCACHE,
4008  	MF_MSG_DIRTY_MLOCKED_LRU,
4009  	MF_MSG_CLEAN_MLOCKED_LRU,
4010  	MF_MSG_DIRTY_UNEVICTABLE_LRU,
4011  	MF_MSG_CLEAN_UNEVICTABLE_LRU,
4012  	MF_MSG_DIRTY_LRU,
4013  	MF_MSG_CLEAN_LRU,
4014  	MF_MSG_TRUNCATED_LRU,
4015  	MF_MSG_BUDDY,
4016  	MF_MSG_DAX,
4017  	MF_MSG_UNSPLIT_THP,
4018  	MF_MSG_ALREADY_POISONED,
4019  	MF_MSG_UNKNOWN,
4020  };
4021  
4022  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4023  void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4024  int copy_user_large_folio(struct folio *dst, struct folio *src,
4025  			  unsigned long addr_hint,
4026  			  struct vm_area_struct *vma);
4027  long copy_folio_from_user(struct folio *dst_folio,
4028  			   const void __user *usr_src,
4029  			   bool allow_pagefault);
4030  
4031  /**
4032   * vma_is_special_huge - Are transhuge page-table entries considered special?
4033   * @vma: Pointer to the struct vm_area_struct to consider
4034   *
4035   * Whether transhuge page-table entries are considered "special" following
4036   * the definition in vm_normal_page().
4037   *
4038   * Return: true if transhuge page-table entries should be considered special,
4039   * false otherwise.
4040   */
vma_is_special_huge(const struct vm_area_struct * vma)4041  static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4042  {
4043  	return vma_is_dax(vma) || (vma->vm_file &&
4044  				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4045  }
4046  
4047  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4048  
4049  #if MAX_NUMNODES > 1
4050  void __init setup_nr_node_ids(void);
4051  #else
setup_nr_node_ids(void)4052  static inline void setup_nr_node_ids(void) {}
4053  #endif
4054  
4055  extern int memcmp_pages(struct page *page1, struct page *page2);
4056  
pages_identical(struct page * page1,struct page * page2)4057  static inline int pages_identical(struct page *page1, struct page *page2)
4058  {
4059  	return !memcmp_pages(page1, page2);
4060  }
4061  
4062  #ifdef CONFIG_MAPPING_DIRTY_HELPERS
4063  unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4064  						pgoff_t first_index, pgoff_t nr,
4065  						pgoff_t bitmap_pgoff,
4066  						unsigned long *bitmap,
4067  						pgoff_t *start,
4068  						pgoff_t *end);
4069  
4070  unsigned long wp_shared_mapping_range(struct address_space *mapping,
4071  				      pgoff_t first_index, pgoff_t nr);
4072  #endif
4073  
4074  extern int sysctl_nr_trim_pages;
4075  
4076  #ifdef CONFIG_PRINTK
4077  void mem_dump_obj(void *object);
4078  #else
mem_dump_obj(void * object)4079  static inline void mem_dump_obj(void *object) {}
4080  #endif
4081  
4082  /**
4083   * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
4084   *                    handle them.
4085   * @seals: the seals to check
4086   * @vma: the vma to operate on
4087   *
4088   * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
4089   * check/handling on the vma flags.  Return 0 if check pass, or <0 for errors.
4090   */
seal_check_write(int seals,struct vm_area_struct * vma)4091  static inline int seal_check_write(int seals, struct vm_area_struct *vma)
4092  {
4093  	if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
4094  		/*
4095  		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
4096  		 * write seals are active.
4097  		 */
4098  		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
4099  			return -EPERM;
4100  
4101  		/*
4102  		 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
4103  		 * MAP_SHARED and read-only, take care to not allow mprotect to
4104  		 * revert protections on such mappings. Do this only for shared
4105  		 * mappings. For private mappings, don't need to mask
4106  		 * VM_MAYWRITE as we still want them to be COW-writable.
4107  		 */
4108  		if (vma->vm_flags & VM_SHARED)
4109  			vm_flags_clear(vma, VM_MAYWRITE);
4110  	}
4111  
4112  	return 0;
4113  }
4114  
4115  #ifdef CONFIG_ANON_VMA_NAME
4116  int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4117  			  unsigned long len_in,
4118  			  struct anon_vma_name *anon_name);
4119  #else
4120  static inline int
madvise_set_anon_name(struct mm_struct * mm,unsigned long start,unsigned long len_in,struct anon_vma_name * anon_name)4121  madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4122  		      unsigned long len_in, struct anon_vma_name *anon_name) {
4123  	return 0;
4124  }
4125  #endif
4126  
4127  #ifdef CONFIG_UNACCEPTED_MEMORY
4128  
4129  bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
4130  void accept_memory(phys_addr_t start, unsigned long size);
4131  
4132  #else
4133  
range_contains_unaccepted_memory(phys_addr_t start,unsigned long size)4134  static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4135  						    unsigned long size)
4136  {
4137  	return false;
4138  }
4139  
accept_memory(phys_addr_t start,unsigned long size)4140  static inline void accept_memory(phys_addr_t start, unsigned long size)
4141  {
4142  }
4143  
4144  #endif
4145  
pfn_is_unaccepted_memory(unsigned long pfn)4146  static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4147  {
4148  	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
4149  }
4150  
4151  void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4152  void vma_pgtable_walk_end(struct vm_area_struct *vma);
4153  
4154  int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
4155  
4156  #ifdef CONFIG_64BIT
4157  int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
4158  #else
do_mseal(unsigned long start,size_t len_in,unsigned long flags)4159  static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
4160  {
4161  	/* noop on 32 bit */
4162  	return 0;
4163  }
4164  #endif
4165  
4166  #ifdef CONFIG_MEM_ALLOC_PROFILING
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)4167  static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
4168  {
4169  	int i;
4170  	struct alloc_tag *tag;
4171  	unsigned int nr_pages = 1 << new_order;
4172  
4173  	if (!mem_alloc_profiling_enabled())
4174  		return;
4175  
4176  	tag = pgalloc_tag_get(&folio->page);
4177  	if (!tag)
4178  		return;
4179  
4180  	for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
4181  		union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
4182  
4183  		if (ref) {
4184  			/* Set new reference to point to the original tag */
4185  			alloc_tag_ref_set(ref, tag);
4186  			put_page_tag_ref(ref);
4187  		}
4188  	}
4189  }
4190  
pgalloc_tag_copy(struct folio * new,struct folio * old)4191  static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
4192  {
4193  	struct alloc_tag *tag;
4194  	union codetag_ref *ref;
4195  
4196  	tag = pgalloc_tag_get(&old->page);
4197  	if (!tag)
4198  		return;
4199  
4200  	ref = get_page_tag_ref(&new->page);
4201  	if (!ref)
4202  		return;
4203  
4204  	/* Clear the old ref to the original allocation tag. */
4205  	clear_page_tag_ref(&old->page);
4206  	/* Decrement the counters of the tag on get_new_folio. */
4207  	alloc_tag_sub(ref, folio_nr_pages(new));
4208  
4209  	__alloc_tag_ref_set(ref, tag);
4210  
4211  	put_page_tag_ref(ref);
4212  }
4213  #else /* !CONFIG_MEM_ALLOC_PROFILING */
pgalloc_tag_split(struct folio * folio,int old_order,int new_order)4214  static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
4215  {
4216  }
4217  
pgalloc_tag_copy(struct folio * new,struct folio * old)4218  static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
4219  {
4220  }
4221  #endif /* CONFIG_MEM_ALLOC_PROFILING */
4222  
4223  #endif /* _LINUX_MM_H */
4224