1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_RMAP_H
3  #define _LINUX_RMAP_H
4  /*
5   * Declarations for Reverse Mapping functions in mm/rmap.c
6   */
7  
8  #include <linux/list.h>
9  #include <linux/slab.h>
10  #include <linux/mm.h>
11  #include <linux/rwsem.h>
12  #include <linux/memcontrol.h>
13  #include <linux/highmem.h>
14  #include <linux/pagemap.h>
15  #include <linux/memremap.h>
16  
17  /*
18   * The anon_vma heads a list of private "related" vmas, to scan if
19   * an anonymous page pointing to this anon_vma needs to be unmapped:
20   * the vmas on the list will be related by forking, or by splitting.
21   *
22   * Since vmas come and go as they are split and merged (particularly
23   * in mprotect), the mapping field of an anonymous page cannot point
24   * directly to a vma: instead it points to an anon_vma, on whose list
25   * the related vmas can be easily linked or unlinked.
26   *
27   * After unlinking the last vma on the list, we must garbage collect
28   * the anon_vma object itself: we're guaranteed no page can be
29   * pointing to this anon_vma once its vma list is empty.
30   */
31  struct anon_vma {
32  	struct anon_vma *root;		/* Root of this anon_vma tree */
33  	struct rw_semaphore rwsem;	/* W: modification, R: walking the list */
34  	/*
35  	 * The refcount is taken on an anon_vma when there is no
36  	 * guarantee that the vma of page tables will exist for
37  	 * the duration of the operation. A caller that takes
38  	 * the reference is responsible for clearing up the
39  	 * anon_vma if they are the last user on release
40  	 */
41  	atomic_t refcount;
42  
43  	/*
44  	 * Count of child anon_vmas. Equals to the count of all anon_vmas that
45  	 * have ->parent pointing to this one, including itself.
46  	 *
47  	 * This counter is used for making decision about reusing anon_vma
48  	 * instead of forking new one. See comments in function anon_vma_clone.
49  	 */
50  	unsigned long num_children;
51  	/* Count of VMAs whose ->anon_vma pointer points to this object. */
52  	unsigned long num_active_vmas;
53  
54  	struct anon_vma *parent;	/* Parent of this anon_vma */
55  
56  	/*
57  	 * NOTE: the LSB of the rb_root.rb_node is set by
58  	 * mm_take_all_locks() _after_ taking the above lock. So the
59  	 * rb_root must only be read/written after taking the above lock
60  	 * to be sure to see a valid next pointer. The LSB bit itself
61  	 * is serialized by a system wide lock only visible to
62  	 * mm_take_all_locks() (mm_all_locks_mutex).
63  	 */
64  
65  	/* Interval tree of private "related" vmas */
66  	struct rb_root_cached rb_root;
67  };
68  
69  /*
70   * The copy-on-write semantics of fork mean that an anon_vma
71   * can become associated with multiple processes. Furthermore,
72   * each child process will have its own anon_vma, where new
73   * pages for that process are instantiated.
74   *
75   * This structure allows us to find the anon_vmas associated
76   * with a VMA, or the VMAs associated with an anon_vma.
77   * The "same_vma" list contains the anon_vma_chains linking
78   * all the anon_vmas associated with this VMA.
79   * The "rb" field indexes on an interval tree the anon_vma_chains
80   * which link all the VMAs associated with this anon_vma.
81   */
82  struct anon_vma_chain {
83  	struct vm_area_struct *vma;
84  	struct anon_vma *anon_vma;
85  	struct list_head same_vma;   /* locked by mmap_lock & page_table_lock */
86  	struct rb_node rb;			/* locked by anon_vma->rwsem */
87  	unsigned long rb_subtree_last;
88  #ifdef CONFIG_DEBUG_VM_RB
89  	unsigned long cached_vma_start, cached_vma_last;
90  #endif
91  };
92  
93  enum ttu_flags {
94  	TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */
95  	TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */
96  	TTU_SYNC		= 0x10,	/* avoid racy checks with PVMW_SYNC */
97  	TTU_HWPOISON		= 0x20,	/* do convert pte to hwpoison entry */
98  	TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible
99  					 * and caller guarantees they will
100  					 * do a final flush if necessary */
101  	TTU_RMAP_LOCKED		= 0x80,	/* do not grab rmap lock:
102  					 * caller holds it */
103  };
104  
105  #ifdef CONFIG_MMU
get_anon_vma(struct anon_vma * anon_vma)106  static inline void get_anon_vma(struct anon_vma *anon_vma)
107  {
108  	atomic_inc(&anon_vma->refcount);
109  }
110  
111  void __put_anon_vma(struct anon_vma *anon_vma);
112  
put_anon_vma(struct anon_vma * anon_vma)113  static inline void put_anon_vma(struct anon_vma *anon_vma)
114  {
115  	if (atomic_dec_and_test(&anon_vma->refcount))
116  		__put_anon_vma(anon_vma);
117  }
118  
anon_vma_lock_write(struct anon_vma * anon_vma)119  static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
120  {
121  	down_write(&anon_vma->root->rwsem);
122  }
123  
anon_vma_trylock_write(struct anon_vma * anon_vma)124  static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
125  {
126  	return down_write_trylock(&anon_vma->root->rwsem);
127  }
128  
anon_vma_unlock_write(struct anon_vma * anon_vma)129  static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
130  {
131  	up_write(&anon_vma->root->rwsem);
132  }
133  
anon_vma_lock_read(struct anon_vma * anon_vma)134  static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
135  {
136  	down_read(&anon_vma->root->rwsem);
137  }
138  
anon_vma_trylock_read(struct anon_vma * anon_vma)139  static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
140  {
141  	return down_read_trylock(&anon_vma->root->rwsem);
142  }
143  
anon_vma_unlock_read(struct anon_vma * anon_vma)144  static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
145  {
146  	up_read(&anon_vma->root->rwsem);
147  }
148  
149  
150  /*
151   * anon_vma helper functions.
152   */
153  void anon_vma_init(void);	/* create anon_vma_cachep */
154  int  __anon_vma_prepare(struct vm_area_struct *);
155  void unlink_anon_vmas(struct vm_area_struct *);
156  int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
157  int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
158  
anon_vma_prepare(struct vm_area_struct * vma)159  static inline int anon_vma_prepare(struct vm_area_struct *vma)
160  {
161  	if (likely(vma->anon_vma))
162  		return 0;
163  
164  	return __anon_vma_prepare(vma);
165  }
166  
anon_vma_merge(struct vm_area_struct * vma,struct vm_area_struct * next)167  static inline void anon_vma_merge(struct vm_area_struct *vma,
168  				  struct vm_area_struct *next)
169  {
170  	VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
171  	unlink_anon_vmas(next);
172  }
173  
174  struct anon_vma *folio_get_anon_vma(struct folio *folio);
175  
176  /* RMAP flags, currently only relevant for some anon rmap operations. */
177  typedef int __bitwise rmap_t;
178  
179  /*
180   * No special request: A mapped anonymous (sub)page is possibly shared between
181   * processes.
182   */
183  #define RMAP_NONE		((__force rmap_t)0)
184  
185  /* The anonymous (sub)page is exclusive to a single process. */
186  #define RMAP_EXCLUSIVE		((__force rmap_t)BIT(0))
187  
188  /*
189   * Internally, we're using an enum to specify the granularity. We make the
190   * compiler emit specialized code for each granularity.
191   */
192  enum rmap_level {
193  	RMAP_LEVEL_PTE = 0,
194  	RMAP_LEVEL_PMD,
195  };
196  
__folio_rmap_sanity_checks(struct folio * folio,struct page * page,int nr_pages,enum rmap_level level)197  static inline void __folio_rmap_sanity_checks(struct folio *folio,
198  		struct page *page, int nr_pages, enum rmap_level level)
199  {
200  	/* hugetlb folios are handled separately. */
201  	VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
202  
203  	/* When (un)mapping zeropages, we should never touch ref+mapcount. */
204  	VM_WARN_ON_FOLIO(is_zero_folio(folio), folio);
205  
206  	/*
207  	 * TODO: we get driver-allocated folios that have nothing to do with
208  	 * the rmap using vm_insert_page(); therefore, we cannot assume that
209  	 * folio_test_large_rmappable() holds for large folios. We should
210  	 * handle any desired mapcount+stats accounting for these folios in
211  	 * VM_MIXEDMAP VMAs separately, and then sanity-check here that
212  	 * we really only get rmappable folios.
213  	 */
214  
215  	VM_WARN_ON_ONCE(nr_pages <= 0);
216  	VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
217  	VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
218  
219  	switch (level) {
220  	case RMAP_LEVEL_PTE:
221  		break;
222  	case RMAP_LEVEL_PMD:
223  		/*
224  		 * We don't support folios larger than a single PMD yet. So
225  		 * when RMAP_LEVEL_PMD is set, we assume that we are creating
226  		 * a single "entire" mapping of the folio.
227  		 */
228  		VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
229  		VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
230  		break;
231  	default:
232  		VM_WARN_ON_ONCE(true);
233  	}
234  }
235  
236  /*
237   * rmap interfaces called when adding or removing pte of page
238   */
239  void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
240  void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
241  		struct vm_area_struct *, unsigned long address, rmap_t flags);
242  #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
243  	folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
244  void folio_add_anon_rmap_pmd(struct folio *, struct page *,
245  		struct vm_area_struct *, unsigned long address, rmap_t flags);
246  void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
247  		unsigned long address, rmap_t flags);
248  void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
249  		struct vm_area_struct *);
250  #define folio_add_file_rmap_pte(folio, page, vma) \
251  	folio_add_file_rmap_ptes(folio, page, 1, vma)
252  void folio_add_file_rmap_pmd(struct folio *, struct page *,
253  		struct vm_area_struct *);
254  void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
255  		struct vm_area_struct *);
256  #define folio_remove_rmap_pte(folio, page, vma) \
257  	folio_remove_rmap_ptes(folio, page, 1, vma)
258  void folio_remove_rmap_pmd(struct folio *, struct page *,
259  		struct vm_area_struct *);
260  
261  void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
262  		unsigned long address, rmap_t flags);
263  void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
264  		unsigned long address);
265  
266  /* See folio_try_dup_anon_rmap_*() */
hugetlb_try_dup_anon_rmap(struct folio * folio,struct vm_area_struct * vma)267  static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
268  		struct vm_area_struct *vma)
269  {
270  	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
271  	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
272  
273  	if (PageAnonExclusive(&folio->page)) {
274  		if (unlikely(folio_needs_cow_for_dma(vma, folio)))
275  			return -EBUSY;
276  		ClearPageAnonExclusive(&folio->page);
277  	}
278  	atomic_inc(&folio->_entire_mapcount);
279  	atomic_inc(&folio->_large_mapcount);
280  	return 0;
281  }
282  
283  /* See folio_try_share_anon_rmap_*() */
hugetlb_try_share_anon_rmap(struct folio * folio)284  static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
285  {
286  	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
287  	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
288  	VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
289  
290  	/* Paired with the memory barrier in try_grab_folio(). */
291  	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
292  		smp_mb();
293  
294  	if (unlikely(folio_maybe_dma_pinned(folio)))
295  		return -EBUSY;
296  	ClearPageAnonExclusive(&folio->page);
297  
298  	/*
299  	 * This is conceptually a smp_wmb() paired with the smp_rmb() in
300  	 * gup_must_unshare().
301  	 */
302  	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
303  		smp_mb__after_atomic();
304  	return 0;
305  }
306  
hugetlb_add_file_rmap(struct folio * folio)307  static inline void hugetlb_add_file_rmap(struct folio *folio)
308  {
309  	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
310  	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
311  
312  	atomic_inc(&folio->_entire_mapcount);
313  	atomic_inc(&folio->_large_mapcount);
314  }
315  
hugetlb_remove_rmap(struct folio * folio)316  static inline void hugetlb_remove_rmap(struct folio *folio)
317  {
318  	VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
319  
320  	atomic_dec(&folio->_entire_mapcount);
321  	atomic_dec(&folio->_large_mapcount);
322  }
323  
__folio_dup_file_rmap(struct folio * folio,struct page * page,int nr_pages,enum rmap_level level)324  static __always_inline void __folio_dup_file_rmap(struct folio *folio,
325  		struct page *page, int nr_pages, enum rmap_level level)
326  {
327  	const int orig_nr_pages = nr_pages;
328  
329  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
330  
331  	switch (level) {
332  	case RMAP_LEVEL_PTE:
333  		if (!folio_test_large(folio)) {
334  			atomic_inc(&folio->_mapcount);
335  			break;
336  		}
337  
338  		do {
339  			atomic_inc(&page->_mapcount);
340  		} while (page++, --nr_pages > 0);
341  		atomic_add(orig_nr_pages, &folio->_large_mapcount);
342  		break;
343  	case RMAP_LEVEL_PMD:
344  		atomic_inc(&folio->_entire_mapcount);
345  		atomic_inc(&folio->_large_mapcount);
346  		break;
347  	}
348  }
349  
350  /**
351   * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
352   * @folio:	The folio to duplicate the mappings of
353   * @page:	The first page to duplicate the mappings of
354   * @nr_pages:	The number of pages of which the mapping will be duplicated
355   *
356   * The page range of the folio is defined by [page, page + nr_pages)
357   *
358   * The caller needs to hold the page table lock.
359   */
folio_dup_file_rmap_ptes(struct folio * folio,struct page * page,int nr_pages)360  static inline void folio_dup_file_rmap_ptes(struct folio *folio,
361  		struct page *page, int nr_pages)
362  {
363  	__folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
364  }
365  
folio_dup_file_rmap_pte(struct folio * folio,struct page * page)366  static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
367  		struct page *page)
368  {
369  	__folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE);
370  }
371  
372  /**
373   * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
374   * @folio:	The folio to duplicate the mapping of
375   * @page:	The first page to duplicate the mapping of
376   *
377   * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
378   *
379   * The caller needs to hold the page table lock.
380   */
folio_dup_file_rmap_pmd(struct folio * folio,struct page * page)381  static inline void folio_dup_file_rmap_pmd(struct folio *folio,
382  		struct page *page)
383  {
384  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
385  	__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
386  #else
387  	WARN_ON_ONCE(true);
388  #endif
389  }
390  
__folio_try_dup_anon_rmap(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * src_vma,enum rmap_level level)391  static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
392  		struct page *page, int nr_pages, struct vm_area_struct *src_vma,
393  		enum rmap_level level)
394  {
395  	const int orig_nr_pages = nr_pages;
396  	bool maybe_pinned;
397  	int i;
398  
399  	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
400  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
401  
402  	/*
403  	 * If this folio may have been pinned by the parent process,
404  	 * don't allow to duplicate the mappings but instead require to e.g.,
405  	 * copy the subpage immediately for the child so that we'll always
406  	 * guarantee the pinned folio won't be randomly replaced in the
407  	 * future on write faults.
408  	 */
409  	maybe_pinned = likely(!folio_is_device_private(folio)) &&
410  		       unlikely(folio_needs_cow_for_dma(src_vma, folio));
411  
412  	/*
413  	 * No need to check+clear for already shared PTEs/PMDs of the
414  	 * folio. But if any page is PageAnonExclusive, we must fallback to
415  	 * copying if the folio maybe pinned.
416  	 */
417  	switch (level) {
418  	case RMAP_LEVEL_PTE:
419  		if (unlikely(maybe_pinned)) {
420  			for (i = 0; i < nr_pages; i++)
421  				if (PageAnonExclusive(page + i))
422  					return -EBUSY;
423  		}
424  
425  		if (!folio_test_large(folio)) {
426  			if (PageAnonExclusive(page))
427  				ClearPageAnonExclusive(page);
428  			atomic_inc(&folio->_mapcount);
429  			break;
430  		}
431  
432  		do {
433  			if (PageAnonExclusive(page))
434  				ClearPageAnonExclusive(page);
435  			atomic_inc(&page->_mapcount);
436  		} while (page++, --nr_pages > 0);
437  		atomic_add(orig_nr_pages, &folio->_large_mapcount);
438  		break;
439  	case RMAP_LEVEL_PMD:
440  		if (PageAnonExclusive(page)) {
441  			if (unlikely(maybe_pinned))
442  				return -EBUSY;
443  			ClearPageAnonExclusive(page);
444  		}
445  		atomic_inc(&folio->_entire_mapcount);
446  		atomic_inc(&folio->_large_mapcount);
447  		break;
448  	}
449  	return 0;
450  }
451  
452  /**
453   * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
454   *				  of a folio
455   * @folio:	The folio to duplicate the mappings of
456   * @page:	The first page to duplicate the mappings of
457   * @nr_pages:	The number of pages of which the mapping will be duplicated
458   * @src_vma:	The vm area from which the mappings are duplicated
459   *
460   * The page range of the folio is defined by [page, page + nr_pages)
461   *
462   * The caller needs to hold the page table lock and the
463   * vma->vma_mm->write_protect_seq.
464   *
465   * Duplicating the mappings can only fail if the folio may be pinned; device
466   * private folios cannot get pinned and consequently this function cannot fail
467   * for them.
468   *
469   * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
470   * the parent and the child. They must *not* be writable after this call
471   * succeeded.
472   *
473   * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
474   */
folio_try_dup_anon_rmap_ptes(struct folio * folio,struct page * page,int nr_pages,struct vm_area_struct * src_vma)475  static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
476  		struct page *page, int nr_pages, struct vm_area_struct *src_vma)
477  {
478  	return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
479  					 RMAP_LEVEL_PTE);
480  }
481  
folio_try_dup_anon_rmap_pte(struct folio * folio,struct page * page,struct vm_area_struct * src_vma)482  static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
483  		struct page *page, struct vm_area_struct *src_vma)
484  {
485  	return __folio_try_dup_anon_rmap(folio, page, 1, src_vma,
486  					 RMAP_LEVEL_PTE);
487  }
488  
489  /**
490   * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
491   *				 of a folio
492   * @folio:	The folio to duplicate the mapping of
493   * @page:	The first page to duplicate the mapping of
494   * @src_vma:	The vm area from which the mapping is duplicated
495   *
496   * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
497   *
498   * The caller needs to hold the page table lock and the
499   * vma->vma_mm->write_protect_seq.
500   *
501   * Duplicating the mapping can only fail if the folio may be pinned; device
502   * private folios cannot get pinned and consequently this function cannot fail
503   * for them.
504   *
505   * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
506   * the parent and the child. They must *not* be writable after this call
507   * succeeded.
508   *
509   * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
510   */
folio_try_dup_anon_rmap_pmd(struct folio * folio,struct page * page,struct vm_area_struct * src_vma)511  static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
512  		struct page *page, struct vm_area_struct *src_vma)
513  {
514  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515  	return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
516  					 RMAP_LEVEL_PMD);
517  #else
518  	WARN_ON_ONCE(true);
519  	return -EBUSY;
520  #endif
521  }
522  
__folio_try_share_anon_rmap(struct folio * folio,struct page * page,int nr_pages,enum rmap_level level)523  static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
524  		struct page *page, int nr_pages, enum rmap_level level)
525  {
526  	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
527  	VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
528  	__folio_rmap_sanity_checks(folio, page, nr_pages, level);
529  
530  	/* device private folios cannot get pinned via GUP. */
531  	if (unlikely(folio_is_device_private(folio))) {
532  		ClearPageAnonExclusive(page);
533  		return 0;
534  	}
535  
536  	/*
537  	 * We have to make sure that when we clear PageAnonExclusive, that
538  	 * the page is not pinned and that concurrent GUP-fast won't succeed in
539  	 * concurrently pinning the page.
540  	 *
541  	 * Conceptually, PageAnonExclusive clearing consists of:
542  	 * (A1) Clear PTE
543  	 * (A2) Check if the page is pinned; back off if so.
544  	 * (A3) Clear PageAnonExclusive
545  	 * (A4) Restore PTE (optional, but certainly not writable)
546  	 *
547  	 * When clearing PageAnonExclusive, we cannot possibly map the page
548  	 * writable again, because anon pages that may be shared must never
549  	 * be writable. So in any case, if the PTE was writable it cannot
550  	 * be writable anymore afterwards and there would be a PTE change. Only
551  	 * if the PTE wasn't writable, there might not be a PTE change.
552  	 *
553  	 * Conceptually, GUP-fast pinning of an anon page consists of:
554  	 * (B1) Read the PTE
555  	 * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
556  	 * (B3) Pin the mapped page
557  	 * (B4) Check if the PTE changed by re-reading it; back off if so.
558  	 * (B5) If the original PTE is not writable, check if
559  	 *	PageAnonExclusive is not set; back off if so.
560  	 *
561  	 * If the PTE was writable, we only have to make sure that GUP-fast
562  	 * observes a PTE change and properly backs off.
563  	 *
564  	 * If the PTE was not writable, we have to make sure that GUP-fast either
565  	 * detects a (temporary) PTE change or that PageAnonExclusive is cleared
566  	 * and properly backs off.
567  	 *
568  	 * Consequently, when clearing PageAnonExclusive(), we have to make
569  	 * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
570  	 * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
571  	 * and (B5) happen in the right memory order.
572  	 *
573  	 * We assume that there might not be a memory barrier after
574  	 * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
575  	 * so we use explicit ones here.
576  	 */
577  
578  	/* Paired with the memory barrier in try_grab_folio(). */
579  	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
580  		smp_mb();
581  
582  	if (unlikely(folio_maybe_dma_pinned(folio)))
583  		return -EBUSY;
584  	ClearPageAnonExclusive(page);
585  
586  	/*
587  	 * This is conceptually a smp_wmb() paired with the smp_rmb() in
588  	 * gup_must_unshare().
589  	 */
590  	if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
591  		smp_mb__after_atomic();
592  	return 0;
593  }
594  
595  /**
596   * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
597   *				   mapped by a PTE possibly shared to prepare
598   *				   for KSM or temporary unmapping
599   * @folio:	The folio to share a mapping of
600   * @page:	The mapped exclusive page
601   *
602   * The caller needs to hold the page table lock and has to have the page table
603   * entries cleared/invalidated.
604   *
605   * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
606   * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
607   * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
608   *
609   * Marking the mapped page shared can only fail if the folio maybe pinned;
610   * device private folios cannot get pinned and consequently this function cannot
611   * fail.
612   *
613   * Returns 0 if marking the mapped page possibly shared succeeded. Returns
614   * -EBUSY otherwise.
615   */
folio_try_share_anon_rmap_pte(struct folio * folio,struct page * page)616  static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
617  		struct page *page)
618  {
619  	return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
620  }
621  
622  /**
623   * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
624   *				   range mapped by a PMD possibly shared to
625   *				   prepare for temporary unmapping
626   * @folio:	The folio to share the mapping of
627   * @page:	The first page to share the mapping of
628   *
629   * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
630   *
631   * The caller needs to hold the page table lock and has to have the page table
632   * entries cleared/invalidated.
633   *
634   * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
635   * fork() to duplicate a mapping, but instead to prepare for temporarily
636   * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
637   *
638   * Marking the mapped pages shared can only fail if the folio maybe pinned;
639   * device private folios cannot get pinned and consequently this function cannot
640   * fail.
641   *
642   * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
643   * -EBUSY otherwise.
644   */
folio_try_share_anon_rmap_pmd(struct folio * folio,struct page * page)645  static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
646  		struct page *page)
647  {
648  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
649  	return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
650  					   RMAP_LEVEL_PMD);
651  #else
652  	WARN_ON_ONCE(true);
653  	return -EBUSY;
654  #endif
655  }
656  
657  /*
658   * Called from mm/vmscan.c to handle paging out
659   */
660  int folio_referenced(struct folio *, int is_locked,
661  			struct mem_cgroup *memcg, unsigned long *vm_flags);
662  
663  void try_to_migrate(struct folio *folio, enum ttu_flags flags);
664  void try_to_unmap(struct folio *, enum ttu_flags flags);
665  
666  int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
667  				unsigned long end, struct page **pages,
668  				void *arg);
669  
670  /* Avoid racy checks */
671  #define PVMW_SYNC		(1 << 0)
672  /* Look for migration entries rather than present PTEs */
673  #define PVMW_MIGRATION		(1 << 1)
674  
675  struct page_vma_mapped_walk {
676  	unsigned long pfn;
677  	unsigned long nr_pages;
678  	pgoff_t pgoff;
679  	struct vm_area_struct *vma;
680  	unsigned long address;
681  	pmd_t *pmd;
682  	pte_t *pte;
683  	spinlock_t *ptl;
684  	unsigned int flags;
685  };
686  
687  #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags)	\
688  	struct page_vma_mapped_walk name = {				\
689  		.pfn = folio_pfn(_folio),				\
690  		.nr_pages = folio_nr_pages(_folio),			\
691  		.pgoff = folio_pgoff(_folio),				\
692  		.vma = _vma,						\
693  		.address = _address,					\
694  		.flags = _flags,					\
695  	}
696  
page_vma_mapped_walk_done(struct page_vma_mapped_walk * pvmw)697  static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
698  {
699  	/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
700  	if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
701  		pte_unmap(pvmw->pte);
702  	if (pvmw->ptl)
703  		spin_unlock(pvmw->ptl);
704  }
705  
706  /**
707   * page_vma_mapped_walk_restart - Restart the page table walk.
708   * @pvmw: Pointer to struct page_vma_mapped_walk.
709   *
710   * It restarts the page table walk when changes occur in the page
711   * table, such as splitting a PMD. Ensures that the PTL held during
712   * the previous walk is released and resets the state to allow for
713   * a new walk starting at the current address stored in pvmw->address.
714   */
715  static inline void
page_vma_mapped_walk_restart(struct page_vma_mapped_walk * pvmw)716  page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
717  {
718  	WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte);
719  
720  	if (likely(pvmw->ptl))
721  		spin_unlock(pvmw->ptl);
722  	else
723  		WARN_ON_ONCE(1);
724  
725  	pvmw->ptl = NULL;
726  	pvmw->pmd = NULL;
727  	pvmw->pte = NULL;
728  }
729  
730  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
731  
732  /*
733   * Used by swapoff to help locate where page is expected in vma.
734   */
735  unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
736  
737  /*
738   * Cleans the PTEs of shared mappings.
739   * (and since clean PTEs should also be readonly, write protects them too)
740   *
741   * returns the number of cleaned PTEs.
742   */
743  int folio_mkclean(struct folio *);
744  
745  int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
746  		      struct vm_area_struct *vma);
747  
748  enum rmp_flags {
749  	RMP_LOCKED		= 1 << 0,
750  	RMP_USE_SHARED_ZEROPAGE	= 1 << 1,
751  };
752  
753  void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
754  
755  /*
756   * rmap_walk_control: To control rmap traversing for specific needs
757   *
758   * arg: passed to rmap_one() and invalid_vma()
759   * try_lock: bail out if the rmap lock is contended
760   * contended: indicate the rmap traversal bailed out due to lock contention
761   * rmap_one: executed on each vma where page is mapped
762   * done: for checking traversing termination condition
763   * anon_lock: for getting anon_lock by optimized way rather than default
764   * invalid_vma: for skipping uninterested vma
765   */
766  struct rmap_walk_control {
767  	void *arg;
768  	bool try_lock;
769  	bool contended;
770  	/*
771  	 * Return false if page table scanning in rmap_walk should be stopped.
772  	 * Otherwise, return true.
773  	 */
774  	bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
775  					unsigned long addr, void *arg);
776  	int (*done)(struct folio *folio);
777  	struct anon_vma *(*anon_lock)(struct folio *folio,
778  				      struct rmap_walk_control *rwc);
779  	bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
780  };
781  
782  void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
783  void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
784  struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
785  					  struct rmap_walk_control *rwc);
786  
787  #else	/* !CONFIG_MMU */
788  
789  #define anon_vma_init()		do {} while (0)
790  #define anon_vma_prepare(vma)	(0)
791  
folio_referenced(struct folio * folio,int is_locked,struct mem_cgroup * memcg,unsigned long * vm_flags)792  static inline int folio_referenced(struct folio *folio, int is_locked,
793  				  struct mem_cgroup *memcg,
794  				  unsigned long *vm_flags)
795  {
796  	*vm_flags = 0;
797  	return 0;
798  }
799  
try_to_unmap(struct folio * folio,enum ttu_flags flags)800  static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
801  {
802  }
803  
folio_mkclean(struct folio * folio)804  static inline int folio_mkclean(struct folio *folio)
805  {
806  	return 0;
807  }
808  #endif	/* CONFIG_MMU */
809  
810  #endif	/* _LINUX_RMAP_H */
811