1  // SPDX-License-Identifier: GPL-2.0-only
2  #include <linux/kernel.h>
3  #include <linux/errno.h>
4  #include <linux/err.h>
5  #include <linux/spinlock.h>
6  
7  #include <linux/mm.h>
8  #include <linux/memfd.h>
9  #include <linux/memremap.h>
10  #include <linux/pagemap.h>
11  #include <linux/rmap.h>
12  #include <linux/swap.h>
13  #include <linux/swapops.h>
14  #include <linux/secretmem.h>
15  
16  #include <linux/sched/signal.h>
17  #include <linux/rwsem.h>
18  #include <linux/hugetlb.h>
19  #include <linux/migrate.h>
20  #include <linux/mm_inline.h>
21  #include <linux/pagevec.h>
22  #include <linux/sched/mm.h>
23  #include <linux/shmem_fs.h>
24  
25  #include <asm/mmu_context.h>
26  #include <asm/tlbflush.h>
27  
28  #include "internal.h"
29  
30  struct follow_page_context {
31  	struct dev_pagemap *pgmap;
32  	unsigned int page_mask;
33  };
34  
sanity_check_pinned_pages(struct page ** pages,unsigned long npages)35  static inline void sanity_check_pinned_pages(struct page **pages,
36  					     unsigned long npages)
37  {
38  	if (!IS_ENABLED(CONFIG_DEBUG_VM))
39  		return;
40  
41  	/*
42  	 * We only pin anonymous pages if they are exclusive. Once pinned, we
43  	 * can no longer turn them possibly shared and PageAnonExclusive() will
44  	 * stick around until the page is freed.
45  	 *
46  	 * We'd like to verify that our pinned anonymous pages are still mapped
47  	 * exclusively. The issue with anon THP is that we don't know how
48  	 * they are/were mapped when pinning them. However, for anon
49  	 * THP we can assume that either the given page (PTE-mapped THP) or
50  	 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
51  	 * neither is the case, there is certainly something wrong.
52  	 */
53  	for (; npages; npages--, pages++) {
54  		struct page *page = *pages;
55  		struct folio *folio = page_folio(page);
56  
57  		if (is_zero_page(page) ||
58  		    !folio_test_anon(folio))
59  			continue;
60  		if (!folio_test_large(folio) || folio_test_hugetlb(folio))
61  			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
62  		else
63  			/* Either a PTE-mapped or a PMD-mapped THP. */
64  			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
65  				       !PageAnonExclusive(page), page);
66  	}
67  }
68  
69  /*
70   * Return the folio with ref appropriately incremented,
71   * or NULL if that failed.
72   */
try_get_folio(struct page * page,int refs)73  static inline struct folio *try_get_folio(struct page *page, int refs)
74  {
75  	struct folio *folio;
76  
77  retry:
78  	folio = page_folio(page);
79  	if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
80  		return NULL;
81  	if (unlikely(!folio_ref_try_add(folio, refs)))
82  		return NULL;
83  
84  	/*
85  	 * At this point we have a stable reference to the folio; but it
86  	 * could be that between calling page_folio() and the refcount
87  	 * increment, the folio was split, in which case we'd end up
88  	 * holding a reference on a folio that has nothing to do with the page
89  	 * we were given anymore.
90  	 * So now that the folio is stable, recheck that the page still
91  	 * belongs to this folio.
92  	 */
93  	if (unlikely(page_folio(page) != folio)) {
94  		if (!put_devmap_managed_folio_refs(folio, refs))
95  			folio_put_refs(folio, refs);
96  		goto retry;
97  	}
98  
99  	return folio;
100  }
101  
gup_put_folio(struct folio * folio,int refs,unsigned int flags)102  static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
103  {
104  	if (flags & FOLL_PIN) {
105  		if (is_zero_folio(folio))
106  			return;
107  		node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
108  		if (folio_test_large(folio))
109  			atomic_sub(refs, &folio->_pincount);
110  		else
111  			refs *= GUP_PIN_COUNTING_BIAS;
112  	}
113  
114  	if (!put_devmap_managed_folio_refs(folio, refs))
115  		folio_put_refs(folio, refs);
116  }
117  
118  /**
119   * try_grab_folio() - add a folio's refcount by a flag-dependent amount
120   * @folio:    pointer to folio to be grabbed
121   * @refs:     the value to (effectively) add to the folio's refcount
122   * @flags:    gup flags: these are the FOLL_* flag values
123   *
124   * This might not do anything at all, depending on the flags argument.
125   *
126   * "grab" names in this file mean, "look at flags to decide whether to use
127   * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
128   *
129   * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
130   * time.
131   *
132   * Return: 0 for success, or if no action was required (if neither FOLL_PIN
133   * nor FOLL_GET was set, nothing is done). A negative error code for failure:
134   *
135   *   -ENOMEM		FOLL_GET or FOLL_PIN was set, but the folio could not
136   *			be grabbed.
137   *
138   * It is called when we have a stable reference for the folio, typically in
139   * GUP slow path.
140   */
try_grab_folio(struct folio * folio,int refs,unsigned int flags)141  int __must_check try_grab_folio(struct folio *folio, int refs,
142  				unsigned int flags)
143  {
144  	if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
145  		return -ENOMEM;
146  
147  	if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
148  		return -EREMOTEIO;
149  
150  	if (flags & FOLL_GET)
151  		folio_ref_add(folio, refs);
152  	else if (flags & FOLL_PIN) {
153  		/*
154  		 * Don't take a pin on the zero page - it's not going anywhere
155  		 * and it is used in a *lot* of places.
156  		 */
157  		if (is_zero_folio(folio))
158  			return 0;
159  
160  		/*
161  		 * Increment the normal page refcount field at least once,
162  		 * so that the page really is pinned.
163  		 */
164  		if (folio_test_large(folio)) {
165  			folio_ref_add(folio, refs);
166  			atomic_add(refs, &folio->_pincount);
167  		} else {
168  			folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
169  		}
170  
171  		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
172  	}
173  
174  	return 0;
175  }
176  
177  /**
178   * unpin_user_page() - release a dma-pinned page
179   * @page:            pointer to page to be released
180   *
181   * Pages that were pinned via pin_user_pages*() must be released via either
182   * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
183   * that such pages can be separately tracked and uniquely handled. In
184   * particular, interactions with RDMA and filesystems need special handling.
185   */
unpin_user_page(struct page * page)186  void unpin_user_page(struct page *page)
187  {
188  	sanity_check_pinned_pages(&page, 1);
189  	gup_put_folio(page_folio(page), 1, FOLL_PIN);
190  }
191  EXPORT_SYMBOL(unpin_user_page);
192  
193  /**
194   * unpin_folio() - release a dma-pinned folio
195   * @folio:         pointer to folio to be released
196   *
197   * Folios that were pinned via memfd_pin_folios() or other similar routines
198   * must be released either using unpin_folio() or unpin_folios().
199   */
unpin_folio(struct folio * folio)200  void unpin_folio(struct folio *folio)
201  {
202  	gup_put_folio(folio, 1, FOLL_PIN);
203  }
204  EXPORT_SYMBOL_GPL(unpin_folio);
205  
206  /**
207   * folio_add_pin - Try to get an additional pin on a pinned folio
208   * @folio: The folio to be pinned
209   *
210   * Get an additional pin on a folio we already have a pin on.  Makes no change
211   * if the folio is a zero_page.
212   */
folio_add_pin(struct folio * folio)213  void folio_add_pin(struct folio *folio)
214  {
215  	if (is_zero_folio(folio))
216  		return;
217  
218  	/*
219  	 * Similar to try_grab_folio(): be sure to *also* increment the normal
220  	 * page refcount field at least once, so that the page really is
221  	 * pinned.
222  	 */
223  	if (folio_test_large(folio)) {
224  		WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
225  		folio_ref_inc(folio);
226  		atomic_inc(&folio->_pincount);
227  	} else {
228  		WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
229  		folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
230  	}
231  }
232  
gup_folio_range_next(struct page * start,unsigned long npages,unsigned long i,unsigned int * ntails)233  static inline struct folio *gup_folio_range_next(struct page *start,
234  		unsigned long npages, unsigned long i, unsigned int *ntails)
235  {
236  	struct page *next = nth_page(start, i);
237  	struct folio *folio = page_folio(next);
238  	unsigned int nr = 1;
239  
240  	if (folio_test_large(folio))
241  		nr = min_t(unsigned int, npages - i,
242  			   folio_nr_pages(folio) - folio_page_idx(folio, next));
243  
244  	*ntails = nr;
245  	return folio;
246  }
247  
gup_folio_next(struct page ** list,unsigned long npages,unsigned long i,unsigned int * ntails)248  static inline struct folio *gup_folio_next(struct page **list,
249  		unsigned long npages, unsigned long i, unsigned int *ntails)
250  {
251  	struct folio *folio = page_folio(list[i]);
252  	unsigned int nr;
253  
254  	for (nr = i + 1; nr < npages; nr++) {
255  		if (page_folio(list[nr]) != folio)
256  			break;
257  	}
258  
259  	*ntails = nr - i;
260  	return folio;
261  }
262  
263  /**
264   * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
265   * @pages:  array of pages to be maybe marked dirty, and definitely released.
266   * @npages: number of pages in the @pages array.
267   * @make_dirty: whether to mark the pages dirty
268   *
269   * "gup-pinned page" refers to a page that has had one of the get_user_pages()
270   * variants called on that page.
271   *
272   * For each page in the @pages array, make that page (or its head page, if a
273   * compound page) dirty, if @make_dirty is true, and if the page was previously
274   * listed as clean. In any case, releases all pages using unpin_user_page(),
275   * possibly via unpin_user_pages(), for the non-dirty case.
276   *
277   * Please see the unpin_user_page() documentation for details.
278   *
279   * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
280   * required, then the caller should a) verify that this is really correct,
281   * because _lock() is usually required, and b) hand code it:
282   * set_page_dirty_lock(), unpin_user_page().
283   *
284   */
unpin_user_pages_dirty_lock(struct page ** pages,unsigned long npages,bool make_dirty)285  void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
286  				 bool make_dirty)
287  {
288  	unsigned long i;
289  	struct folio *folio;
290  	unsigned int nr;
291  
292  	if (!make_dirty) {
293  		unpin_user_pages(pages, npages);
294  		return;
295  	}
296  
297  	sanity_check_pinned_pages(pages, npages);
298  	for (i = 0; i < npages; i += nr) {
299  		folio = gup_folio_next(pages, npages, i, &nr);
300  		/*
301  		 * Checking PageDirty at this point may race with
302  		 * clear_page_dirty_for_io(), but that's OK. Two key
303  		 * cases:
304  		 *
305  		 * 1) This code sees the page as already dirty, so it
306  		 * skips the call to set_page_dirty(). That could happen
307  		 * because clear_page_dirty_for_io() called
308  		 * folio_mkclean(), followed by set_page_dirty().
309  		 * However, now the page is going to get written back,
310  		 * which meets the original intention of setting it
311  		 * dirty, so all is well: clear_page_dirty_for_io() goes
312  		 * on to call TestClearPageDirty(), and write the page
313  		 * back.
314  		 *
315  		 * 2) This code sees the page as clean, so it calls
316  		 * set_page_dirty(). The page stays dirty, despite being
317  		 * written back, so it gets written back again in the
318  		 * next writeback cycle. This is harmless.
319  		 */
320  		if (!folio_test_dirty(folio)) {
321  			folio_lock(folio);
322  			folio_mark_dirty(folio);
323  			folio_unlock(folio);
324  		}
325  		gup_put_folio(folio, nr, FOLL_PIN);
326  	}
327  }
328  EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
329  
330  /**
331   * unpin_user_page_range_dirty_lock() - release and optionally dirty
332   * gup-pinned page range
333   *
334   * @page:  the starting page of a range maybe marked dirty, and definitely released.
335   * @npages: number of consecutive pages to release.
336   * @make_dirty: whether to mark the pages dirty
337   *
338   * "gup-pinned page range" refers to a range of pages that has had one of the
339   * pin_user_pages() variants called on that page.
340   *
341   * For the page ranges defined by [page .. page+npages], make that range (or
342   * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
343   * page range was previously listed as clean.
344   *
345   * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
346   * required, then the caller should a) verify that this is really correct,
347   * because _lock() is usually required, and b) hand code it:
348   * set_page_dirty_lock(), unpin_user_page().
349   *
350   */
unpin_user_page_range_dirty_lock(struct page * page,unsigned long npages,bool make_dirty)351  void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
352  				      bool make_dirty)
353  {
354  	unsigned long i;
355  	struct folio *folio;
356  	unsigned int nr;
357  
358  	for (i = 0; i < npages; i += nr) {
359  		folio = gup_folio_range_next(page, npages, i, &nr);
360  		if (make_dirty && !folio_test_dirty(folio)) {
361  			folio_lock(folio);
362  			folio_mark_dirty(folio);
363  			folio_unlock(folio);
364  		}
365  		gup_put_folio(folio, nr, FOLL_PIN);
366  	}
367  }
368  EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
369  
gup_fast_unpin_user_pages(struct page ** pages,unsigned long npages)370  static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages)
371  {
372  	unsigned long i;
373  	struct folio *folio;
374  	unsigned int nr;
375  
376  	/*
377  	 * Don't perform any sanity checks because we might have raced with
378  	 * fork() and some anonymous pages might now actually be shared --
379  	 * which is why we're unpinning after all.
380  	 */
381  	for (i = 0; i < npages; i += nr) {
382  		folio = gup_folio_next(pages, npages, i, &nr);
383  		gup_put_folio(folio, nr, FOLL_PIN);
384  	}
385  }
386  
387  /**
388   * unpin_user_pages() - release an array of gup-pinned pages.
389   * @pages:  array of pages to be marked dirty and released.
390   * @npages: number of pages in the @pages array.
391   *
392   * For each page in the @pages array, release the page using unpin_user_page().
393   *
394   * Please see the unpin_user_page() documentation for details.
395   */
unpin_user_pages(struct page ** pages,unsigned long npages)396  void unpin_user_pages(struct page **pages, unsigned long npages)
397  {
398  	unsigned long i;
399  	struct folio *folio;
400  	unsigned int nr;
401  
402  	/*
403  	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
404  	 * leaving them pinned), but probably not. More likely, gup/pup returned
405  	 * a hard -ERRNO error to the caller, who erroneously passed it here.
406  	 */
407  	if (WARN_ON(IS_ERR_VALUE(npages)))
408  		return;
409  
410  	sanity_check_pinned_pages(pages, npages);
411  	for (i = 0; i < npages; i += nr) {
412  		folio = gup_folio_next(pages, npages, i, &nr);
413  		gup_put_folio(folio, nr, FOLL_PIN);
414  	}
415  }
416  EXPORT_SYMBOL(unpin_user_pages);
417  
418  /**
419   * unpin_user_folio() - release pages of a folio
420   * @folio:  pointer to folio to be released
421   * @npages: number of pages of same folio
422   *
423   * Release npages of the folio
424   */
unpin_user_folio(struct folio * folio,unsigned long npages)425  void unpin_user_folio(struct folio *folio, unsigned long npages)
426  {
427  	gup_put_folio(folio, npages, FOLL_PIN);
428  }
429  EXPORT_SYMBOL(unpin_user_folio);
430  
431  /**
432   * unpin_folios() - release an array of gup-pinned folios.
433   * @folios:  array of folios to be marked dirty and released.
434   * @nfolios: number of folios in the @folios array.
435   *
436   * For each folio in the @folios array, release the folio using gup_put_folio.
437   *
438   * Please see the unpin_folio() documentation for details.
439   */
unpin_folios(struct folio ** folios,unsigned long nfolios)440  void unpin_folios(struct folio **folios, unsigned long nfolios)
441  {
442  	unsigned long i = 0, j;
443  
444  	/*
445  	 * If this WARN_ON() fires, then the system *might* be leaking folios
446  	 * (by leaving them pinned), but probably not. More likely, gup/pup
447  	 * returned a hard -ERRNO error to the caller, who erroneously passed
448  	 * it here.
449  	 */
450  	if (WARN_ON(IS_ERR_VALUE(nfolios)))
451  		return;
452  
453  	while (i < nfolios) {
454  		for (j = i + 1; j < nfolios; j++)
455  			if (folios[i] != folios[j])
456  				break;
457  
458  		if (folios[i])
459  			gup_put_folio(folios[i], j - i, FOLL_PIN);
460  		i = j;
461  	}
462  }
463  EXPORT_SYMBOL_GPL(unpin_folios);
464  
465  /*
466   * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
467   * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
468   * cache bouncing on large SMP machines for concurrent pinned gups.
469   */
mm_set_has_pinned_flag(unsigned long * mm_flags)470  static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
471  {
472  	if (!test_bit(MMF_HAS_PINNED, mm_flags))
473  		set_bit(MMF_HAS_PINNED, mm_flags);
474  }
475  
476  #ifdef CONFIG_MMU
477  
478  #ifdef CONFIG_HAVE_GUP_FAST
record_subpages(struct page * page,unsigned long sz,unsigned long addr,unsigned long end,struct page ** pages)479  static int record_subpages(struct page *page, unsigned long sz,
480  			   unsigned long addr, unsigned long end,
481  			   struct page **pages)
482  {
483  	struct page *start_page;
484  	int nr;
485  
486  	start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
487  	for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
488  		pages[nr] = nth_page(start_page, nr);
489  
490  	return nr;
491  }
492  
493  /**
494   * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
495   * @page:  pointer to page to be grabbed
496   * @refs:  the value to (effectively) add to the folio's refcount
497   * @flags: gup flags: these are the FOLL_* flag values.
498   *
499   * "grab" names in this file mean, "look at flags to decide whether to use
500   * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
501   *
502   * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
503   * same time. (That's true throughout the get_user_pages*() and
504   * pin_user_pages*() APIs.) Cases:
505   *
506   *    FOLL_GET: folio's refcount will be incremented by @refs.
507   *
508   *    FOLL_PIN on large folios: folio's refcount will be incremented by
509   *    @refs, and its pincount will be incremented by @refs.
510   *
511   *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
512   *    @refs * GUP_PIN_COUNTING_BIAS.
513   *
514   * Return: The folio containing @page (with refcount appropriately
515   * incremented) for success, or NULL upon failure. If neither FOLL_GET
516   * nor FOLL_PIN was set, that's considered failure, and furthermore,
517   * a likely bug in the caller, so a warning is also emitted.
518   *
519   * It uses add ref unless zero to elevate the folio refcount and must be called
520   * in fast path only.
521   */
try_grab_folio_fast(struct page * page,int refs,unsigned int flags)522  static struct folio *try_grab_folio_fast(struct page *page, int refs,
523  					 unsigned int flags)
524  {
525  	struct folio *folio;
526  
527  	/* Raise warn if it is not called in fast GUP */
528  	VM_WARN_ON_ONCE(!irqs_disabled());
529  
530  	if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
531  		return NULL;
532  
533  	if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
534  		return NULL;
535  
536  	if (flags & FOLL_GET)
537  		return try_get_folio(page, refs);
538  
539  	/* FOLL_PIN is set */
540  
541  	/*
542  	 * Don't take a pin on the zero page - it's not going anywhere
543  	 * and it is used in a *lot* of places.
544  	 */
545  	if (is_zero_page(page))
546  		return page_folio(page);
547  
548  	folio = try_get_folio(page, refs);
549  	if (!folio)
550  		return NULL;
551  
552  	/*
553  	 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
554  	 * right zone, so fail and let the caller fall back to the slow
555  	 * path.
556  	 */
557  	if (unlikely((flags & FOLL_LONGTERM) &&
558  		     !folio_is_longterm_pinnable(folio))) {
559  		if (!put_devmap_managed_folio_refs(folio, refs))
560  			folio_put_refs(folio, refs);
561  		return NULL;
562  	}
563  
564  	/*
565  	 * When pinning a large folio, use an exact count to track it.
566  	 *
567  	 * However, be sure to *also* increment the normal folio
568  	 * refcount field at least once, so that the folio really
569  	 * is pinned.  That's why the refcount from the earlier
570  	 * try_get_folio() is left intact.
571  	 */
572  	if (folio_test_large(folio))
573  		atomic_add(refs, &folio->_pincount);
574  	else
575  		folio_ref_add(folio,
576  				refs * (GUP_PIN_COUNTING_BIAS - 1));
577  	/*
578  	 * Adjust the pincount before re-checking the PTE for changes.
579  	 * This is essentially a smp_mb() and is paired with a memory
580  	 * barrier in folio_try_share_anon_rmap_*().
581  	 */
582  	smp_mb__after_atomic();
583  
584  	node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
585  
586  	return folio;
587  }
588  #endif	/* CONFIG_HAVE_GUP_FAST */
589  
no_page_table(struct vm_area_struct * vma,unsigned int flags,unsigned long address)590  static struct page *no_page_table(struct vm_area_struct *vma,
591  				  unsigned int flags, unsigned long address)
592  {
593  	if (!(flags & FOLL_DUMP))
594  		return NULL;
595  
596  	/*
597  	 * When core dumping, we don't want to allocate unnecessary pages or
598  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
599  	 * then get_dump_page() will return NULL to leave a hole in the dump.
600  	 * But we can only make this optimization where a hole would surely
601  	 * be zero-filled if handle_mm_fault() actually did handle it.
602  	 */
603  	if (is_vm_hugetlb_page(vma)) {
604  		struct hstate *h = hstate_vma(vma);
605  
606  		if (!hugetlbfs_pagecache_present(h, vma, address))
607  			return ERR_PTR(-EFAULT);
608  	} else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
609  		return ERR_PTR(-EFAULT);
610  	}
611  
612  	return NULL;
613  }
614  
615  #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
follow_huge_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int flags,struct follow_page_context * ctx)616  static struct page *follow_huge_pud(struct vm_area_struct *vma,
617  				    unsigned long addr, pud_t *pudp,
618  				    int flags, struct follow_page_context *ctx)
619  {
620  	struct mm_struct *mm = vma->vm_mm;
621  	struct page *page;
622  	pud_t pud = *pudp;
623  	unsigned long pfn = pud_pfn(pud);
624  	int ret;
625  
626  	assert_spin_locked(pud_lockptr(mm, pudp));
627  
628  	if ((flags & FOLL_WRITE) && !pud_write(pud))
629  		return NULL;
630  
631  	if (!pud_present(pud))
632  		return NULL;
633  
634  	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
635  
636  	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
637  	    pud_devmap(pud)) {
638  		/*
639  		 * device mapped pages can only be returned if the caller
640  		 * will manage the page reference count.
641  		 *
642  		 * At least one of FOLL_GET | FOLL_PIN must be set, so
643  		 * assert that here:
644  		 */
645  		if (!(flags & (FOLL_GET | FOLL_PIN)))
646  			return ERR_PTR(-EEXIST);
647  
648  		if (flags & FOLL_TOUCH)
649  			touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
650  
651  		ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
652  		if (!ctx->pgmap)
653  			return ERR_PTR(-EFAULT);
654  	}
655  
656  	page = pfn_to_page(pfn);
657  
658  	if (!pud_devmap(pud) && !pud_write(pud) &&
659  	    gup_must_unshare(vma, flags, page))
660  		return ERR_PTR(-EMLINK);
661  
662  	ret = try_grab_folio(page_folio(page), 1, flags);
663  	if (ret)
664  		page = ERR_PTR(ret);
665  	else
666  		ctx->page_mask = HPAGE_PUD_NR - 1;
667  
668  	return page;
669  }
670  
671  /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
can_follow_write_pmd(pmd_t pmd,struct page * page,struct vm_area_struct * vma,unsigned int flags)672  static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
673  					struct vm_area_struct *vma,
674  					unsigned int flags)
675  {
676  	/* If the pmd is writable, we can write to the page. */
677  	if (pmd_write(pmd))
678  		return true;
679  
680  	/* Maybe FOLL_FORCE is set to override it? */
681  	if (!(flags & FOLL_FORCE))
682  		return false;
683  
684  	/* But FOLL_FORCE has no effect on shared mappings */
685  	if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
686  		return false;
687  
688  	/* ... or read-only private ones */
689  	if (!(vma->vm_flags & VM_MAYWRITE))
690  		return false;
691  
692  	/* ... or already writable ones that just need to take a write fault */
693  	if (vma->vm_flags & VM_WRITE)
694  		return false;
695  
696  	/*
697  	 * See can_change_pte_writable(): we broke COW and could map the page
698  	 * writable if we have an exclusive anonymous page ...
699  	 */
700  	if (!page || !PageAnon(page) || !PageAnonExclusive(page))
701  		return false;
702  
703  	/* ... and a write-fault isn't required for other reasons. */
704  	if (pmd_needs_soft_dirty_wp(vma, pmd))
705  		return false;
706  	return !userfaultfd_huge_pmd_wp(vma, pmd);
707  }
708  
follow_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags,struct follow_page_context * ctx)709  static struct page *follow_huge_pmd(struct vm_area_struct *vma,
710  				    unsigned long addr, pmd_t *pmd,
711  				    unsigned int flags,
712  				    struct follow_page_context *ctx)
713  {
714  	struct mm_struct *mm = vma->vm_mm;
715  	pmd_t pmdval = *pmd;
716  	struct page *page;
717  	int ret;
718  
719  	assert_spin_locked(pmd_lockptr(mm, pmd));
720  
721  	page = pmd_page(pmdval);
722  	if ((flags & FOLL_WRITE) &&
723  	    !can_follow_write_pmd(pmdval, page, vma, flags))
724  		return NULL;
725  
726  	/* Avoid dumping huge zero page */
727  	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
728  		return ERR_PTR(-EFAULT);
729  
730  	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
731  		return NULL;
732  
733  	if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
734  		return ERR_PTR(-EMLINK);
735  
736  	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
737  			!PageAnonExclusive(page), page);
738  
739  	ret = try_grab_folio(page_folio(page), 1, flags);
740  	if (ret)
741  		return ERR_PTR(ret);
742  
743  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
744  	if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
745  		touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
746  #endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
747  
748  	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
749  	ctx->page_mask = HPAGE_PMD_NR - 1;
750  
751  	return page;
752  }
753  
754  #else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
follow_huge_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pudp,int flags,struct follow_page_context * ctx)755  static struct page *follow_huge_pud(struct vm_area_struct *vma,
756  				    unsigned long addr, pud_t *pudp,
757  				    int flags, struct follow_page_context *ctx)
758  {
759  	return NULL;
760  }
761  
follow_huge_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,unsigned int flags,struct follow_page_context * ctx)762  static struct page *follow_huge_pmd(struct vm_area_struct *vma,
763  				    unsigned long addr, pmd_t *pmd,
764  				    unsigned int flags,
765  				    struct follow_page_context *ctx)
766  {
767  	return NULL;
768  }
769  #endif	/* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
770  
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)771  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
772  		pte_t *pte, unsigned int flags)
773  {
774  	if (flags & FOLL_TOUCH) {
775  		pte_t orig_entry = ptep_get(pte);
776  		pte_t entry = orig_entry;
777  
778  		if (flags & FOLL_WRITE)
779  			entry = pte_mkdirty(entry);
780  		entry = pte_mkyoung(entry);
781  
782  		if (!pte_same(orig_entry, entry)) {
783  			set_pte_at(vma->vm_mm, address, pte, entry);
784  			update_mmu_cache(vma, address, pte);
785  		}
786  	}
787  
788  	/* Proper page table entry exists, but no corresponding struct page */
789  	return -EEXIST;
790  }
791  
792  /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
can_follow_write_pte(pte_t pte,struct page * page,struct vm_area_struct * vma,unsigned int flags)793  static inline bool can_follow_write_pte(pte_t pte, struct page *page,
794  					struct vm_area_struct *vma,
795  					unsigned int flags)
796  {
797  	/* If the pte is writable, we can write to the page. */
798  	if (pte_write(pte))
799  		return true;
800  
801  	/* Maybe FOLL_FORCE is set to override it? */
802  	if (!(flags & FOLL_FORCE))
803  		return false;
804  
805  	/* But FOLL_FORCE has no effect on shared mappings */
806  	if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
807  		return false;
808  
809  	/* ... or read-only private ones */
810  	if (!(vma->vm_flags & VM_MAYWRITE))
811  		return false;
812  
813  	/* ... or already writable ones that just need to take a write fault */
814  	if (vma->vm_flags & VM_WRITE)
815  		return false;
816  
817  	/*
818  	 * See can_change_pte_writable(): we broke COW and could map the page
819  	 * writable if we have an exclusive anonymous page ...
820  	 */
821  	if (!page || !PageAnon(page) || !PageAnonExclusive(page))
822  		return false;
823  
824  	/* ... and a write-fault isn't required for other reasons. */
825  	if (pte_needs_soft_dirty_wp(vma, pte))
826  		return false;
827  	return !userfaultfd_pte_wp(vma, pte);
828  }
829  
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags,struct dev_pagemap ** pgmap)830  static struct page *follow_page_pte(struct vm_area_struct *vma,
831  		unsigned long address, pmd_t *pmd, unsigned int flags,
832  		struct dev_pagemap **pgmap)
833  {
834  	struct mm_struct *mm = vma->vm_mm;
835  	struct folio *folio;
836  	struct page *page;
837  	spinlock_t *ptl;
838  	pte_t *ptep, pte;
839  	int ret;
840  
841  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
842  	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
843  			 (FOLL_PIN | FOLL_GET)))
844  		return ERR_PTR(-EINVAL);
845  
846  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
847  	if (!ptep)
848  		return no_page_table(vma, flags, address);
849  	pte = ptep_get(ptep);
850  	if (!pte_present(pte))
851  		goto no_page;
852  	if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
853  		goto no_page;
854  
855  	page = vm_normal_page(vma, address, pte);
856  
857  	/*
858  	 * We only care about anon pages in can_follow_write_pte() and don't
859  	 * have to worry about pte_devmap() because they are never anon.
860  	 */
861  	if ((flags & FOLL_WRITE) &&
862  	    !can_follow_write_pte(pte, page, vma, flags)) {
863  		page = NULL;
864  		goto out;
865  	}
866  
867  	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
868  		/*
869  		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
870  		 * case since they are only valid while holding the pgmap
871  		 * reference.
872  		 */
873  		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
874  		if (*pgmap)
875  			page = pte_page(pte);
876  		else
877  			goto no_page;
878  	} else if (unlikely(!page)) {
879  		if (flags & FOLL_DUMP) {
880  			/* Avoid special (like zero) pages in core dumps */
881  			page = ERR_PTR(-EFAULT);
882  			goto out;
883  		}
884  
885  		if (is_zero_pfn(pte_pfn(pte))) {
886  			page = pte_page(pte);
887  		} else {
888  			ret = follow_pfn_pte(vma, address, ptep, flags);
889  			page = ERR_PTR(ret);
890  			goto out;
891  		}
892  	}
893  	folio = page_folio(page);
894  
895  	if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
896  		page = ERR_PTR(-EMLINK);
897  		goto out;
898  	}
899  
900  	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
901  		       !PageAnonExclusive(page), page);
902  
903  	/* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
904  	ret = try_grab_folio(folio, 1, flags);
905  	if (unlikely(ret)) {
906  		page = ERR_PTR(ret);
907  		goto out;
908  	}
909  
910  	/*
911  	 * We need to make the page accessible if and only if we are going
912  	 * to access its content (the FOLL_PIN case).  Please see
913  	 * Documentation/core-api/pin_user_pages.rst for details.
914  	 */
915  	if (flags & FOLL_PIN) {
916  		ret = arch_make_folio_accessible(folio);
917  		if (ret) {
918  			unpin_user_page(page);
919  			page = ERR_PTR(ret);
920  			goto out;
921  		}
922  	}
923  	if (flags & FOLL_TOUCH) {
924  		if ((flags & FOLL_WRITE) &&
925  		    !pte_dirty(pte) && !PageDirty(page))
926  			set_page_dirty(page);
927  		/*
928  		 * pte_mkyoung() would be more correct here, but atomic care
929  		 * is needed to avoid losing the dirty bit: it is easier to use
930  		 * mark_page_accessed().
931  		 */
932  		mark_page_accessed(page);
933  	}
934  out:
935  	pte_unmap_unlock(ptep, ptl);
936  	return page;
937  no_page:
938  	pte_unmap_unlock(ptep, ptl);
939  	if (!pte_none(pte))
940  		return NULL;
941  	return no_page_table(vma, flags, address);
942  }
943  
follow_pmd_mask(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,unsigned int flags,struct follow_page_context * ctx)944  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
945  				    unsigned long address, pud_t *pudp,
946  				    unsigned int flags,
947  				    struct follow_page_context *ctx)
948  {
949  	pmd_t *pmd, pmdval;
950  	spinlock_t *ptl;
951  	struct page *page;
952  	struct mm_struct *mm = vma->vm_mm;
953  
954  	pmd = pmd_offset(pudp, address);
955  	pmdval = pmdp_get_lockless(pmd);
956  	if (pmd_none(pmdval))
957  		return no_page_table(vma, flags, address);
958  	if (!pmd_present(pmdval))
959  		return no_page_table(vma, flags, address);
960  	if (pmd_devmap(pmdval)) {
961  		ptl = pmd_lock(mm, pmd);
962  		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
963  		spin_unlock(ptl);
964  		if (page)
965  			return page;
966  		return no_page_table(vma, flags, address);
967  	}
968  	if (likely(!pmd_leaf(pmdval)))
969  		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
970  
971  	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
972  		return no_page_table(vma, flags, address);
973  
974  	ptl = pmd_lock(mm, pmd);
975  	pmdval = *pmd;
976  	if (unlikely(!pmd_present(pmdval))) {
977  		spin_unlock(ptl);
978  		return no_page_table(vma, flags, address);
979  	}
980  	if (unlikely(!pmd_leaf(pmdval))) {
981  		spin_unlock(ptl);
982  		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
983  	}
984  	if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
985  		spin_unlock(ptl);
986  		split_huge_pmd(vma, pmd, address);
987  		/* If pmd was left empty, stuff a page table in there quickly */
988  		return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
989  			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
990  	}
991  	page = follow_huge_pmd(vma, address, pmd, flags, ctx);
992  	spin_unlock(ptl);
993  	return page;
994  }
995  
follow_pud_mask(struct vm_area_struct * vma,unsigned long address,p4d_t * p4dp,unsigned int flags,struct follow_page_context * ctx)996  static struct page *follow_pud_mask(struct vm_area_struct *vma,
997  				    unsigned long address, p4d_t *p4dp,
998  				    unsigned int flags,
999  				    struct follow_page_context *ctx)
1000  {
1001  	pud_t *pudp, pud;
1002  	spinlock_t *ptl;
1003  	struct page *page;
1004  	struct mm_struct *mm = vma->vm_mm;
1005  
1006  	pudp = pud_offset(p4dp, address);
1007  	pud = READ_ONCE(*pudp);
1008  	if (!pud_present(pud))
1009  		return no_page_table(vma, flags, address);
1010  	if (pud_leaf(pud)) {
1011  		ptl = pud_lock(mm, pudp);
1012  		page = follow_huge_pud(vma, address, pudp, flags, ctx);
1013  		spin_unlock(ptl);
1014  		if (page)
1015  			return page;
1016  		return no_page_table(vma, flags, address);
1017  	}
1018  	if (unlikely(pud_bad(pud)))
1019  		return no_page_table(vma, flags, address);
1020  
1021  	return follow_pmd_mask(vma, address, pudp, flags, ctx);
1022  }
1023  
follow_p4d_mask(struct vm_area_struct * vma,unsigned long address,pgd_t * pgdp,unsigned int flags,struct follow_page_context * ctx)1024  static struct page *follow_p4d_mask(struct vm_area_struct *vma,
1025  				    unsigned long address, pgd_t *pgdp,
1026  				    unsigned int flags,
1027  				    struct follow_page_context *ctx)
1028  {
1029  	p4d_t *p4dp, p4d;
1030  
1031  	p4dp = p4d_offset(pgdp, address);
1032  	p4d = READ_ONCE(*p4dp);
1033  	BUILD_BUG_ON(p4d_leaf(p4d));
1034  
1035  	if (!p4d_present(p4d) || p4d_bad(p4d))
1036  		return no_page_table(vma, flags, address);
1037  
1038  	return follow_pud_mask(vma, address, p4dp, flags, ctx);
1039  }
1040  
1041  /**
1042   * follow_page_mask - look up a page descriptor from a user-virtual address
1043   * @vma: vm_area_struct mapping @address
1044   * @address: virtual address to look up
1045   * @flags: flags modifying lookup behaviour
1046   * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
1047   *       pointer to output page_mask
1048   *
1049   * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1050   *
1051   * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
1052   * the device's dev_pagemap metadata to avoid repeating expensive lookups.
1053   *
1054   * When getting an anonymous page and the caller has to trigger unsharing
1055   * of a shared anonymous page first, -EMLINK is returned. The caller should
1056   * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
1057   * relevant with FOLL_PIN and !FOLL_WRITE.
1058   *
1059   * On output, the @ctx->page_mask is set according to the size of the page.
1060   *
1061   * Return: the mapped (struct page *), %NULL if no mapping exists, or
1062   * an error pointer if there is a mapping to something not represented
1063   * by a page descriptor (see also vm_normal_page()).
1064   */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct follow_page_context * ctx)1065  static struct page *follow_page_mask(struct vm_area_struct *vma,
1066  			      unsigned long address, unsigned int flags,
1067  			      struct follow_page_context *ctx)
1068  {
1069  	pgd_t *pgd;
1070  	struct mm_struct *mm = vma->vm_mm;
1071  	struct page *page;
1072  
1073  	vma_pgtable_walk_begin(vma);
1074  
1075  	ctx->page_mask = 0;
1076  	pgd = pgd_offset(mm, address);
1077  
1078  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1079  		page = no_page_table(vma, flags, address);
1080  	else
1081  		page = follow_p4d_mask(vma, address, pgd, flags, ctx);
1082  
1083  	vma_pgtable_walk_end(vma);
1084  
1085  	return page;
1086  }
1087  
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)1088  static int get_gate_page(struct mm_struct *mm, unsigned long address,
1089  		unsigned int gup_flags, struct vm_area_struct **vma,
1090  		struct page **page)
1091  {
1092  	pgd_t *pgd;
1093  	p4d_t *p4d;
1094  	pud_t *pud;
1095  	pmd_t *pmd;
1096  	pte_t *pte;
1097  	pte_t entry;
1098  	int ret = -EFAULT;
1099  
1100  	/* user gate pages are read-only */
1101  	if (gup_flags & FOLL_WRITE)
1102  		return -EFAULT;
1103  	if (address > TASK_SIZE)
1104  		pgd = pgd_offset_k(address);
1105  	else
1106  		pgd = pgd_offset_gate(mm, address);
1107  	if (pgd_none(*pgd))
1108  		return -EFAULT;
1109  	p4d = p4d_offset(pgd, address);
1110  	if (p4d_none(*p4d))
1111  		return -EFAULT;
1112  	pud = pud_offset(p4d, address);
1113  	if (pud_none(*pud))
1114  		return -EFAULT;
1115  	pmd = pmd_offset(pud, address);
1116  	if (!pmd_present(*pmd))
1117  		return -EFAULT;
1118  	pte = pte_offset_map(pmd, address);
1119  	if (!pte)
1120  		return -EFAULT;
1121  	entry = ptep_get(pte);
1122  	if (pte_none(entry))
1123  		goto unmap;
1124  	*vma = get_gate_vma(mm);
1125  	if (!page)
1126  		goto out;
1127  	*page = vm_normal_page(*vma, address, entry);
1128  	if (!*page) {
1129  		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
1130  			goto unmap;
1131  		*page = pte_page(entry);
1132  	}
1133  	ret = try_grab_folio(page_folio(*page), 1, gup_flags);
1134  	if (unlikely(ret))
1135  		goto unmap;
1136  out:
1137  	ret = 0;
1138  unmap:
1139  	pte_unmap(pte);
1140  	return ret;
1141  }
1142  
1143  /*
1144   * mmap_lock must be held on entry.  If @flags has FOLL_UNLOCKABLE but not
1145   * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
1146   * to 0 and -EBUSY returned.
1147   */
faultin_page(struct vm_area_struct * vma,unsigned long address,unsigned int flags,bool unshare,int * locked)1148  static int faultin_page(struct vm_area_struct *vma,
1149  		unsigned long address, unsigned int flags, bool unshare,
1150  		int *locked)
1151  {
1152  	unsigned int fault_flags = 0;
1153  	vm_fault_t ret;
1154  
1155  	if (flags & FOLL_NOFAULT)
1156  		return -EFAULT;
1157  	if (flags & FOLL_WRITE)
1158  		fault_flags |= FAULT_FLAG_WRITE;
1159  	if (flags & FOLL_REMOTE)
1160  		fault_flags |= FAULT_FLAG_REMOTE;
1161  	if (flags & FOLL_UNLOCKABLE) {
1162  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1163  		/*
1164  		 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
1165  		 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
1166  		 * That's because some callers may not be prepared to
1167  		 * handle early exits caused by non-fatal signals.
1168  		 */
1169  		if (flags & FOLL_INTERRUPTIBLE)
1170  			fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
1171  	}
1172  	if (flags & FOLL_NOWAIT)
1173  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
1174  	if (flags & FOLL_TRIED) {
1175  		/*
1176  		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
1177  		 * can co-exist
1178  		 */
1179  		fault_flags |= FAULT_FLAG_TRIED;
1180  	}
1181  	if (unshare) {
1182  		fault_flags |= FAULT_FLAG_UNSHARE;
1183  		/* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
1184  		VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
1185  	}
1186  
1187  	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1188  
1189  	if (ret & VM_FAULT_COMPLETED) {
1190  		/*
1191  		 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
1192  		 * mmap lock in the page fault handler. Sanity check this.
1193  		 */
1194  		WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
1195  		*locked = 0;
1196  
1197  		/*
1198  		 * We should do the same as VM_FAULT_RETRY, but let's not
1199  		 * return -EBUSY since that's not reflecting the reality of
1200  		 * what has happened - we've just fully completed a page
1201  		 * fault, with the mmap lock released.  Use -EAGAIN to show
1202  		 * that we want to take the mmap lock _again_.
1203  		 */
1204  		return -EAGAIN;
1205  	}
1206  
1207  	if (ret & VM_FAULT_ERROR) {
1208  		int err = vm_fault_to_errno(ret, flags);
1209  
1210  		if (err)
1211  			return err;
1212  		BUG();
1213  	}
1214  
1215  	if (ret & VM_FAULT_RETRY) {
1216  		if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
1217  			*locked = 0;
1218  		return -EBUSY;
1219  	}
1220  
1221  	return 0;
1222  }
1223  
1224  /*
1225   * Writing to file-backed mappings which require folio dirty tracking using GUP
1226   * is a fundamentally broken operation, as kernel write access to GUP mappings
1227   * do not adhere to the semantics expected by a file system.
1228   *
1229   * Consider the following scenario:-
1230   *
1231   * 1. A folio is written to via GUP which write-faults the memory, notifying
1232   *    the file system and dirtying the folio.
1233   * 2. Later, writeback is triggered, resulting in the folio being cleaned and
1234   *    the PTE being marked read-only.
1235   * 3. The GUP caller writes to the folio, as it is mapped read/write via the
1236   *    direct mapping.
1237   * 4. The GUP caller, now done with the page, unpins it and sets it dirty
1238   *    (though it does not have to).
1239   *
1240   * This results in both data being written to a folio without writenotify, and
1241   * the folio being dirtied unexpectedly (if the caller decides to do so).
1242   */
writable_file_mapping_allowed(struct vm_area_struct * vma,unsigned long gup_flags)1243  static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
1244  					  unsigned long gup_flags)
1245  {
1246  	/*
1247  	 * If we aren't pinning then no problematic write can occur. A long term
1248  	 * pin is the most egregious case so this is the case we disallow.
1249  	 */
1250  	if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
1251  	    (FOLL_PIN | FOLL_LONGTERM))
1252  		return true;
1253  
1254  	/*
1255  	 * If the VMA does not require dirty tracking then no problematic write
1256  	 * can occur either.
1257  	 */
1258  	return !vma_needs_dirty_tracking(vma);
1259  }
1260  
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)1261  static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
1262  {
1263  	vm_flags_t vm_flags = vma->vm_flags;
1264  	int write = (gup_flags & FOLL_WRITE);
1265  	int foreign = (gup_flags & FOLL_REMOTE);
1266  	bool vma_anon = vma_is_anonymous(vma);
1267  
1268  	if (vm_flags & (VM_IO | VM_PFNMAP))
1269  		return -EFAULT;
1270  
1271  	if ((gup_flags & FOLL_ANON) && !vma_anon)
1272  		return -EFAULT;
1273  
1274  	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1275  		return -EOPNOTSUPP;
1276  
1277  	if (vma_is_secretmem(vma))
1278  		return -EFAULT;
1279  
1280  	if (write) {
1281  		if (!vma_anon &&
1282  		    !writable_file_mapping_allowed(vma, gup_flags))
1283  			return -EFAULT;
1284  
1285  		if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
1286  			if (!(gup_flags & FOLL_FORCE))
1287  				return -EFAULT;
1288  			/* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
1289  			if (is_vm_hugetlb_page(vma))
1290  				return -EFAULT;
1291  			/*
1292  			 * We used to let the write,force case do COW in a
1293  			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1294  			 * set a breakpoint in a read-only mapping of an
1295  			 * executable, without corrupting the file (yet only
1296  			 * when that file had been opened for writing!).
1297  			 * Anon pages in shared mappings are surprising: now
1298  			 * just reject it.
1299  			 */
1300  			if (!is_cow_mapping(vm_flags))
1301  				return -EFAULT;
1302  		}
1303  	} else if (!(vm_flags & VM_READ)) {
1304  		if (!(gup_flags & FOLL_FORCE))
1305  			return -EFAULT;
1306  		/*
1307  		 * Is there actually any vma we can reach here which does not
1308  		 * have VM_MAYREAD set?
1309  		 */
1310  		if (!(vm_flags & VM_MAYREAD))
1311  			return -EFAULT;
1312  	}
1313  	/*
1314  	 * gups are always data accesses, not instruction
1315  	 * fetches, so execute=false here
1316  	 */
1317  	if (!arch_vma_access_permitted(vma, write, false, foreign))
1318  		return -EFAULT;
1319  	return 0;
1320  }
1321  
1322  /*
1323   * This is "vma_lookup()", but with a warning if we would have
1324   * historically expanded the stack in the GUP code.
1325   */
gup_vma_lookup(struct mm_struct * mm,unsigned long addr)1326  static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
1327  	 unsigned long addr)
1328  {
1329  #ifdef CONFIG_STACK_GROWSUP
1330  	return vma_lookup(mm, addr);
1331  #else
1332  	static volatile unsigned long next_warn;
1333  	struct vm_area_struct *vma;
1334  	unsigned long now, next;
1335  
1336  	vma = find_vma(mm, addr);
1337  	if (!vma || (addr >= vma->vm_start))
1338  		return vma;
1339  
1340  	/* Only warn for half-way relevant accesses */
1341  	if (!(vma->vm_flags & VM_GROWSDOWN))
1342  		return NULL;
1343  	if (vma->vm_start - addr > 65536)
1344  		return NULL;
1345  
1346  	/* Let's not warn more than once an hour.. */
1347  	now = jiffies; next = next_warn;
1348  	if (next && time_before(now, next))
1349  		return NULL;
1350  	next_warn = now + 60*60*HZ;
1351  
1352  	/* Let people know things may have changed. */
1353  	pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
1354  		current->comm, task_pid_nr(current),
1355  		vma->vm_start, vma->vm_end, addr);
1356  	dump_stack();
1357  	return NULL;
1358  #endif
1359  }
1360  
1361  /**
1362   * __get_user_pages() - pin user pages in memory
1363   * @mm:		mm_struct of target mm
1364   * @start:	starting user address
1365   * @nr_pages:	number of pages from start to pin
1366   * @gup_flags:	flags modifying pin behaviour
1367   * @pages:	array that receives pointers to the pages pinned.
1368   *		Should be at least nr_pages long. Or NULL, if caller
1369   *		only intends to ensure the pages are faulted in.
1370   * @locked:     whether we're still with the mmap_lock held
1371   *
1372   * Returns either number of pages pinned (which may be less than the
1373   * number requested), or an error. Details about the return value:
1374   *
1375   * -- If nr_pages is 0, returns 0.
1376   * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1377   * -- If nr_pages is >0, and some pages were pinned, returns the number of
1378   *    pages pinned. Again, this may be less than nr_pages.
1379   * -- 0 return value is possible when the fault would need to be retried.
1380   *
1381   * The caller is responsible for releasing returned @pages, via put_page().
1382   *
1383   * Must be called with mmap_lock held.  It may be released.  See below.
1384   *
1385   * __get_user_pages walks a process's page tables and takes a reference to
1386   * each struct page that each user address corresponds to at a given
1387   * instant. That is, it takes the page that would be accessed if a user
1388   * thread accesses the given user virtual address at that instant.
1389   *
1390   * This does not guarantee that the page exists in the user mappings when
1391   * __get_user_pages returns, and there may even be a completely different
1392   * page there in some cases (eg. if mmapped pagecache has been invalidated
1393   * and subsequently re-faulted). However it does guarantee that the page
1394   * won't be freed completely. And mostly callers simply care that the page
1395   * contains data that was valid *at some point in time*. Typically, an IO
1396   * or similar operation cannot guarantee anything stronger anyway because
1397   * locks can't be held over the syscall boundary.
1398   *
1399   * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1400   * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1401   * appropriate) must be called after the page is finished with, and
1402   * before put_page is called.
1403   *
1404   * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1405   * be released. If this happens *@locked will be set to 0 on return.
1406   *
1407   * A caller using such a combination of @gup_flags must therefore hold the
1408   * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1409   * it must be held for either reading or writing and will not be released.
1410   *
1411   * In most cases, get_user_pages or get_user_pages_fast should be used
1412   * instead of __get_user_pages. __get_user_pages should be used only if
1413   * you need some special @gup_flags.
1414   */
__get_user_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)1415  static long __get_user_pages(struct mm_struct *mm,
1416  		unsigned long start, unsigned long nr_pages,
1417  		unsigned int gup_flags, struct page **pages,
1418  		int *locked)
1419  {
1420  	long ret = 0, i = 0;
1421  	struct vm_area_struct *vma = NULL;
1422  	struct follow_page_context ctx = { NULL };
1423  
1424  	if (!nr_pages)
1425  		return 0;
1426  
1427  	start = untagged_addr_remote(mm, start);
1428  
1429  	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1430  
1431  	do {
1432  		struct page *page;
1433  		unsigned int page_increm;
1434  
1435  		/* first iteration or cross vma bound */
1436  		if (!vma || start >= vma->vm_end) {
1437  			/*
1438  			 * MADV_POPULATE_(READ|WRITE) wants to handle VMA
1439  			 * lookups+error reporting differently.
1440  			 */
1441  			if (gup_flags & FOLL_MADV_POPULATE) {
1442  				vma = vma_lookup(mm, start);
1443  				if (!vma) {
1444  					ret = -ENOMEM;
1445  					goto out;
1446  				}
1447  				if (check_vma_flags(vma, gup_flags)) {
1448  					ret = -EINVAL;
1449  					goto out;
1450  				}
1451  				goto retry;
1452  			}
1453  			vma = gup_vma_lookup(mm, start);
1454  			if (!vma && in_gate_area(mm, start)) {
1455  				ret = get_gate_page(mm, start & PAGE_MASK,
1456  						gup_flags, &vma,
1457  						pages ? &page : NULL);
1458  				if (ret)
1459  					goto out;
1460  				ctx.page_mask = 0;
1461  				goto next_page;
1462  			}
1463  
1464  			if (!vma) {
1465  				ret = -EFAULT;
1466  				goto out;
1467  			}
1468  			ret = check_vma_flags(vma, gup_flags);
1469  			if (ret)
1470  				goto out;
1471  		}
1472  retry:
1473  		/*
1474  		 * If we have a pending SIGKILL, don't keep faulting pages and
1475  		 * potentially allocating memory.
1476  		 */
1477  		if (fatal_signal_pending(current)) {
1478  			ret = -EINTR;
1479  			goto out;
1480  		}
1481  		cond_resched();
1482  
1483  		page = follow_page_mask(vma, start, gup_flags, &ctx);
1484  		if (!page || PTR_ERR(page) == -EMLINK) {
1485  			ret = faultin_page(vma, start, gup_flags,
1486  					   PTR_ERR(page) == -EMLINK, locked);
1487  			switch (ret) {
1488  			case 0:
1489  				goto retry;
1490  			case -EBUSY:
1491  			case -EAGAIN:
1492  				ret = 0;
1493  				fallthrough;
1494  			case -EFAULT:
1495  			case -ENOMEM:
1496  			case -EHWPOISON:
1497  				goto out;
1498  			}
1499  			BUG();
1500  		} else if (PTR_ERR(page) == -EEXIST) {
1501  			/*
1502  			 * Proper page table entry exists, but no corresponding
1503  			 * struct page. If the caller expects **pages to be
1504  			 * filled in, bail out now, because that can't be done
1505  			 * for this page.
1506  			 */
1507  			if (pages) {
1508  				ret = PTR_ERR(page);
1509  				goto out;
1510  			}
1511  		} else if (IS_ERR(page)) {
1512  			ret = PTR_ERR(page);
1513  			goto out;
1514  		}
1515  next_page:
1516  		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1517  		if (page_increm > nr_pages)
1518  			page_increm = nr_pages;
1519  
1520  		if (pages) {
1521  			struct page *subpage;
1522  			unsigned int j;
1523  
1524  			/*
1525  			 * This must be a large folio (and doesn't need to
1526  			 * be the whole folio; it can be part of it), do
1527  			 * the refcount work for all the subpages too.
1528  			 *
1529  			 * NOTE: here the page may not be the head page
1530  			 * e.g. when start addr is not thp-size aligned.
1531  			 * try_grab_folio() should have taken care of tail
1532  			 * pages.
1533  			 */
1534  			if (page_increm > 1) {
1535  				struct folio *folio = page_folio(page);
1536  
1537  				/*
1538  				 * Since we already hold refcount on the
1539  				 * large folio, this should never fail.
1540  				 */
1541  				if (try_grab_folio(folio, page_increm - 1,
1542  						   gup_flags)) {
1543  					/*
1544  					 * Release the 1st page ref if the
1545  					 * folio is problematic, fail hard.
1546  					 */
1547  					gup_put_folio(folio, 1, gup_flags);
1548  					ret = -EFAULT;
1549  					goto out;
1550  				}
1551  			}
1552  
1553  			for (j = 0; j < page_increm; j++) {
1554  				subpage = nth_page(page, j);
1555  				pages[i + j] = subpage;
1556  				flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
1557  				flush_dcache_page(subpage);
1558  			}
1559  		}
1560  
1561  		i += page_increm;
1562  		start += page_increm * PAGE_SIZE;
1563  		nr_pages -= page_increm;
1564  	} while (nr_pages);
1565  out:
1566  	if (ctx.pgmap)
1567  		put_dev_pagemap(ctx.pgmap);
1568  	return i ? i : ret;
1569  }
1570  
vma_permits_fault(struct vm_area_struct * vma,unsigned int fault_flags)1571  static bool vma_permits_fault(struct vm_area_struct *vma,
1572  			      unsigned int fault_flags)
1573  {
1574  	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1575  	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1576  	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1577  
1578  	if (!(vm_flags & vma->vm_flags))
1579  		return false;
1580  
1581  	/*
1582  	 * The architecture might have a hardware protection
1583  	 * mechanism other than read/write that can deny access.
1584  	 *
1585  	 * gup always represents data access, not instruction
1586  	 * fetches, so execute=false here:
1587  	 */
1588  	if (!arch_vma_access_permitted(vma, write, false, foreign))
1589  		return false;
1590  
1591  	return true;
1592  }
1593  
1594  /**
1595   * fixup_user_fault() - manually resolve a user page fault
1596   * @mm:		mm_struct of target mm
1597   * @address:	user address
1598   * @fault_flags:flags to pass down to handle_mm_fault()
1599   * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1600   *		does not allow retry. If NULL, the caller must guarantee
1601   *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1602   *
1603   * This is meant to be called in the specific scenario where for locking reasons
1604   * we try to access user memory in atomic context (within a pagefault_disable()
1605   * section), this returns -EFAULT, and we want to resolve the user fault before
1606   * trying again.
1607   *
1608   * Typically this is meant to be used by the futex code.
1609   *
1610   * The main difference with get_user_pages() is that this function will
1611   * unconditionally call handle_mm_fault() which will in turn perform all the
1612   * necessary SW fixup of the dirty and young bits in the PTE, while
1613   * get_user_pages() only guarantees to update these in the struct page.
1614   *
1615   * This is important for some architectures where those bits also gate the
1616   * access permission to the page because they are maintained in software.  On
1617   * such architectures, gup() will not be enough to make a subsequent access
1618   * succeed.
1619   *
1620   * This function will not return with an unlocked mmap_lock. So it has not the
1621   * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1622   */
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)1623  int fixup_user_fault(struct mm_struct *mm,
1624  		     unsigned long address, unsigned int fault_flags,
1625  		     bool *unlocked)
1626  {
1627  	struct vm_area_struct *vma;
1628  	vm_fault_t ret;
1629  
1630  	address = untagged_addr_remote(mm, address);
1631  
1632  	if (unlocked)
1633  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1634  
1635  retry:
1636  	vma = gup_vma_lookup(mm, address);
1637  	if (!vma)
1638  		return -EFAULT;
1639  
1640  	if (!vma_permits_fault(vma, fault_flags))
1641  		return -EFAULT;
1642  
1643  	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1644  	    fatal_signal_pending(current))
1645  		return -EINTR;
1646  
1647  	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1648  
1649  	if (ret & VM_FAULT_COMPLETED) {
1650  		/*
1651  		 * NOTE: it's a pity that we need to retake the lock here
1652  		 * to pair with the unlock() in the callers. Ideally we
1653  		 * could tell the callers so they do not need to unlock.
1654  		 */
1655  		mmap_read_lock(mm);
1656  		*unlocked = true;
1657  		return 0;
1658  	}
1659  
1660  	if (ret & VM_FAULT_ERROR) {
1661  		int err = vm_fault_to_errno(ret, 0);
1662  
1663  		if (err)
1664  			return err;
1665  		BUG();
1666  	}
1667  
1668  	if (ret & VM_FAULT_RETRY) {
1669  		mmap_read_lock(mm);
1670  		*unlocked = true;
1671  		fault_flags |= FAULT_FLAG_TRIED;
1672  		goto retry;
1673  	}
1674  
1675  	return 0;
1676  }
1677  EXPORT_SYMBOL_GPL(fixup_user_fault);
1678  
1679  /*
1680   * GUP always responds to fatal signals.  When FOLL_INTERRUPTIBLE is
1681   * specified, it'll also respond to generic signals.  The caller of GUP
1682   * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1683   */
gup_signal_pending(unsigned int flags)1684  static bool gup_signal_pending(unsigned int flags)
1685  {
1686  	if (fatal_signal_pending(current))
1687  		return true;
1688  
1689  	if (!(flags & FOLL_INTERRUPTIBLE))
1690  		return false;
1691  
1692  	return signal_pending(current);
1693  }
1694  
1695  /*
1696   * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1697   * the caller. This function may drop the mmap_lock. If it does so, then it will
1698   * set (*locked = 0).
1699   *
1700   * (*locked == 0) means that the caller expects this function to acquire and
1701   * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1702   * the function returns, even though it may have changed temporarily during
1703   * function execution.
1704   *
1705   * Please note that this function, unlike __get_user_pages(), will not return 0
1706   * for nr_pages > 0, unless FOLL_NOWAIT is used.
1707   */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int flags)1708  static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1709  						unsigned long start,
1710  						unsigned long nr_pages,
1711  						struct page **pages,
1712  						int *locked,
1713  						unsigned int flags)
1714  {
1715  	long ret, pages_done;
1716  	bool must_unlock = false;
1717  
1718  	if (!nr_pages)
1719  		return 0;
1720  
1721  	/*
1722  	 * The internal caller expects GUP to manage the lock internally and the
1723  	 * lock must be released when this returns.
1724  	 */
1725  	if (!*locked) {
1726  		if (mmap_read_lock_killable(mm))
1727  			return -EAGAIN;
1728  		must_unlock = true;
1729  		*locked = 1;
1730  	}
1731  	else
1732  		mmap_assert_locked(mm);
1733  
1734  	if (flags & FOLL_PIN)
1735  		mm_set_has_pinned_flag(&mm->flags);
1736  
1737  	/*
1738  	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1739  	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1740  	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1741  	 * for FOLL_GET, not for the newer FOLL_PIN.
1742  	 *
1743  	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1744  	 * that here, as any failures will be obvious enough.
1745  	 */
1746  	if (pages && !(flags & FOLL_PIN))
1747  		flags |= FOLL_GET;
1748  
1749  	pages_done = 0;
1750  	for (;;) {
1751  		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1752  				       locked);
1753  		if (!(flags & FOLL_UNLOCKABLE)) {
1754  			/* VM_FAULT_RETRY couldn't trigger, bypass */
1755  			pages_done = ret;
1756  			break;
1757  		}
1758  
1759  		/* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1760  		if (!*locked) {
1761  			BUG_ON(ret < 0);
1762  			BUG_ON(ret >= nr_pages);
1763  		}
1764  
1765  		if (ret > 0) {
1766  			nr_pages -= ret;
1767  			pages_done += ret;
1768  			if (!nr_pages)
1769  				break;
1770  		}
1771  		if (*locked) {
1772  			/*
1773  			 * VM_FAULT_RETRY didn't trigger or it was a
1774  			 * FOLL_NOWAIT.
1775  			 */
1776  			if (!pages_done)
1777  				pages_done = ret;
1778  			break;
1779  		}
1780  		/*
1781  		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1782  		 * For the prefault case (!pages) we only update counts.
1783  		 */
1784  		if (likely(pages))
1785  			pages += ret;
1786  		start += ret << PAGE_SHIFT;
1787  
1788  		/* The lock was temporarily dropped, so we must unlock later */
1789  		must_unlock = true;
1790  
1791  retry:
1792  		/*
1793  		 * Repeat on the address that fired VM_FAULT_RETRY
1794  		 * with both FAULT_FLAG_ALLOW_RETRY and
1795  		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1796  		 * by fatal signals of even common signals, depending on
1797  		 * the caller's request. So we need to check it before we
1798  		 * start trying again otherwise it can loop forever.
1799  		 */
1800  		if (gup_signal_pending(flags)) {
1801  			if (!pages_done)
1802  				pages_done = -EINTR;
1803  			break;
1804  		}
1805  
1806  		ret = mmap_read_lock_killable(mm);
1807  		if (ret) {
1808  			BUG_ON(ret > 0);
1809  			if (!pages_done)
1810  				pages_done = ret;
1811  			break;
1812  		}
1813  
1814  		*locked = 1;
1815  		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1816  				       pages, locked);
1817  		if (!*locked) {
1818  			/* Continue to retry until we succeeded */
1819  			BUG_ON(ret != 0);
1820  			goto retry;
1821  		}
1822  		if (ret != 1) {
1823  			BUG_ON(ret > 1);
1824  			if (!pages_done)
1825  				pages_done = ret;
1826  			break;
1827  		}
1828  		nr_pages--;
1829  		pages_done++;
1830  		if (!nr_pages)
1831  			break;
1832  		if (likely(pages))
1833  			pages++;
1834  		start += PAGE_SIZE;
1835  	}
1836  	if (must_unlock && *locked) {
1837  		/*
1838  		 * We either temporarily dropped the lock, or the caller
1839  		 * requested that we both acquire and drop the lock. Either way,
1840  		 * we must now unlock, and notify the caller of that state.
1841  		 */
1842  		mmap_read_unlock(mm);
1843  		*locked = 0;
1844  	}
1845  
1846  	/*
1847  	 * Failing to pin anything implies something has gone wrong (except when
1848  	 * FOLL_NOWAIT is specified).
1849  	 */
1850  	if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
1851  		return -EFAULT;
1852  
1853  	return pages_done;
1854  }
1855  
1856  /**
1857   * populate_vma_page_range() -  populate a range of pages in the vma.
1858   * @vma:   target vma
1859   * @start: start address
1860   * @end:   end address
1861   * @locked: whether the mmap_lock is still held
1862   *
1863   * This takes care of mlocking the pages too if VM_LOCKED is set.
1864   *
1865   * Return either number of pages pinned in the vma, or a negative error
1866   * code on error.
1867   *
1868   * vma->vm_mm->mmap_lock must be held.
1869   *
1870   * If @locked is NULL, it may be held for read or write and will
1871   * be unperturbed.
1872   *
1873   * If @locked is non-NULL, it must held for read only and may be
1874   * released.  If it's released, *@locked will be set to 0.
1875   */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * locked)1876  long populate_vma_page_range(struct vm_area_struct *vma,
1877  		unsigned long start, unsigned long end, int *locked)
1878  {
1879  	struct mm_struct *mm = vma->vm_mm;
1880  	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1881  	int local_locked = 1;
1882  	int gup_flags;
1883  	long ret;
1884  
1885  	VM_BUG_ON(!PAGE_ALIGNED(start));
1886  	VM_BUG_ON(!PAGE_ALIGNED(end));
1887  	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1888  	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1889  	mmap_assert_locked(mm);
1890  
1891  	/*
1892  	 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1893  	 * faultin_page() to break COW, so it has no work to do here.
1894  	 */
1895  	if (vma->vm_flags & VM_LOCKONFAULT)
1896  		return nr_pages;
1897  
1898  	/* ... similarly, we've never faulted in PROT_NONE pages */
1899  	if (!vma_is_accessible(vma))
1900  		return -EFAULT;
1901  
1902  	gup_flags = FOLL_TOUCH;
1903  	/*
1904  	 * We want to touch writable mappings with a write fault in order
1905  	 * to break COW, except for shared mappings because these don't COW
1906  	 * and we would not want to dirty them for nothing.
1907  	 *
1908  	 * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
1909  	 * readable (ie write-only or executable).
1910  	 */
1911  	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1912  		gup_flags |= FOLL_WRITE;
1913  	else
1914  		gup_flags |= FOLL_FORCE;
1915  
1916  	if (locked)
1917  		gup_flags |= FOLL_UNLOCKABLE;
1918  
1919  	/*
1920  	 * We made sure addr is within a VMA, so the following will
1921  	 * not result in a stack expansion that recurses back here.
1922  	 */
1923  	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1924  			       NULL, locked ? locked : &local_locked);
1925  	lru_add_drain();
1926  	return ret;
1927  }
1928  
1929  /*
1930   * faultin_page_range() - populate (prefault) page tables inside the
1931   *			  given range readable/writable
1932   *
1933   * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1934   *
1935   * @mm: the mm to populate page tables in
1936   * @start: start address
1937   * @end: end address
1938   * @write: whether to prefault readable or writable
1939   * @locked: whether the mmap_lock is still held
1940   *
1941   * Returns either number of processed pages in the MM, or a negative error
1942   * code on error (see __get_user_pages()). Note that this function reports
1943   * errors related to VMAs, such as incompatible mappings, as expected by
1944   * MADV_POPULATE_(READ|WRITE).
1945   *
1946   * The range must be page-aligned.
1947   *
1948   * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
1949   */
faultin_page_range(struct mm_struct * mm,unsigned long start,unsigned long end,bool write,int * locked)1950  long faultin_page_range(struct mm_struct *mm, unsigned long start,
1951  			unsigned long end, bool write, int *locked)
1952  {
1953  	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1954  	int gup_flags;
1955  	long ret;
1956  
1957  	VM_BUG_ON(!PAGE_ALIGNED(start));
1958  	VM_BUG_ON(!PAGE_ALIGNED(end));
1959  	mmap_assert_locked(mm);
1960  
1961  	/*
1962  	 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1963  	 *	       the page dirty with FOLL_WRITE -- which doesn't make a
1964  	 *	       difference with !FOLL_FORCE, because the page is writable
1965  	 *	       in the page table.
1966  	 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1967  	 *		  a poisoned page.
1968  	 * !FOLL_FORCE: Require proper access permissions.
1969  	 */
1970  	gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
1971  		    FOLL_MADV_POPULATE;
1972  	if (write)
1973  		gup_flags |= FOLL_WRITE;
1974  
1975  	ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
1976  				      gup_flags);
1977  	lru_add_drain();
1978  	return ret;
1979  }
1980  
1981  /*
1982   * __mm_populate - populate and/or mlock pages within a range of address space.
1983   *
1984   * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1985   * flags. VMAs must be already marked with the desired vm_flags, and
1986   * mmap_lock must not be held.
1987   */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)1988  int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1989  {
1990  	struct mm_struct *mm = current->mm;
1991  	unsigned long end, nstart, nend;
1992  	struct vm_area_struct *vma = NULL;
1993  	int locked = 0;
1994  	long ret = 0;
1995  
1996  	end = start + len;
1997  
1998  	for (nstart = start; nstart < end; nstart = nend) {
1999  		/*
2000  		 * We want to fault in pages for [nstart; end) address range.
2001  		 * Find first corresponding VMA.
2002  		 */
2003  		if (!locked) {
2004  			locked = 1;
2005  			mmap_read_lock(mm);
2006  			vma = find_vma_intersection(mm, nstart, end);
2007  		} else if (nstart >= vma->vm_end)
2008  			vma = find_vma_intersection(mm, vma->vm_end, end);
2009  
2010  		if (!vma)
2011  			break;
2012  		/*
2013  		 * Set [nstart; nend) to intersection of desired address
2014  		 * range with the first VMA. Also, skip undesirable VMA types.
2015  		 */
2016  		nend = min(end, vma->vm_end);
2017  		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
2018  			continue;
2019  		if (nstart < vma->vm_start)
2020  			nstart = vma->vm_start;
2021  		/*
2022  		 * Now fault in a range of pages. populate_vma_page_range()
2023  		 * double checks the vma flags, so that it won't mlock pages
2024  		 * if the vma was already munlocked.
2025  		 */
2026  		ret = populate_vma_page_range(vma, nstart, nend, &locked);
2027  		if (ret < 0) {
2028  			if (ignore_errors) {
2029  				ret = 0;
2030  				continue;	/* continue at next VMA */
2031  			}
2032  			break;
2033  		}
2034  		nend = nstart + ret * PAGE_SIZE;
2035  		ret = 0;
2036  	}
2037  	if (locked)
2038  		mmap_read_unlock(mm);
2039  	return ret;	/* 0 or negative error code */
2040  }
2041  #else /* CONFIG_MMU */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int foll_flags)2042  static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
2043  		unsigned long nr_pages, struct page **pages,
2044  		int *locked, unsigned int foll_flags)
2045  {
2046  	struct vm_area_struct *vma;
2047  	bool must_unlock = false;
2048  	unsigned long vm_flags;
2049  	long i;
2050  
2051  	if (!nr_pages)
2052  		return 0;
2053  
2054  	/*
2055  	 * The internal caller expects GUP to manage the lock internally and the
2056  	 * lock must be released when this returns.
2057  	 */
2058  	if (!*locked) {
2059  		if (mmap_read_lock_killable(mm))
2060  			return -EAGAIN;
2061  		must_unlock = true;
2062  		*locked = 1;
2063  	}
2064  
2065  	/* calculate required read or write permissions.
2066  	 * If FOLL_FORCE is set, we only require the "MAY" flags.
2067  	 */
2068  	vm_flags  = (foll_flags & FOLL_WRITE) ?
2069  			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
2070  	vm_flags &= (foll_flags & FOLL_FORCE) ?
2071  			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
2072  
2073  	for (i = 0; i < nr_pages; i++) {
2074  		vma = find_vma(mm, start);
2075  		if (!vma)
2076  			break;
2077  
2078  		/* protect what we can, including chardevs */
2079  		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
2080  		    !(vm_flags & vma->vm_flags))
2081  			break;
2082  
2083  		if (pages) {
2084  			pages[i] = virt_to_page((void *)start);
2085  			if (pages[i])
2086  				get_page(pages[i]);
2087  		}
2088  
2089  		start = (start + PAGE_SIZE) & PAGE_MASK;
2090  	}
2091  
2092  	if (must_unlock && *locked) {
2093  		mmap_read_unlock(mm);
2094  		*locked = 0;
2095  	}
2096  
2097  	return i ? : -EFAULT;
2098  }
2099  #endif /* !CONFIG_MMU */
2100  
2101  /**
2102   * fault_in_writeable - fault in userspace address range for writing
2103   * @uaddr: start of address range
2104   * @size: size of address range
2105   *
2106   * Returns the number of bytes not faulted in (like copy_to_user() and
2107   * copy_from_user()).
2108   */
fault_in_writeable(char __user * uaddr,size_t size)2109  size_t fault_in_writeable(char __user *uaddr, size_t size)
2110  {
2111  	char __user *start = uaddr, *end;
2112  
2113  	if (unlikely(size == 0))
2114  		return 0;
2115  	if (!user_write_access_begin(uaddr, size))
2116  		return size;
2117  	if (!PAGE_ALIGNED(uaddr)) {
2118  		unsafe_put_user(0, uaddr, out);
2119  		uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
2120  	}
2121  	end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
2122  	if (unlikely(end < start))
2123  		end = NULL;
2124  	while (uaddr != end) {
2125  		unsafe_put_user(0, uaddr, out);
2126  		uaddr += PAGE_SIZE;
2127  	}
2128  
2129  out:
2130  	user_write_access_end();
2131  	if (size > uaddr - start)
2132  		return size - (uaddr - start);
2133  	return 0;
2134  }
2135  EXPORT_SYMBOL(fault_in_writeable);
2136  
2137  /**
2138   * fault_in_subpage_writeable - fault in an address range for writing
2139   * @uaddr: start of address range
2140   * @size: size of address range
2141   *
2142   * Fault in a user address range for writing while checking for permissions at
2143   * sub-page granularity (e.g. arm64 MTE). This function should be used when
2144   * the caller cannot guarantee forward progress of a copy_to_user() loop.
2145   *
2146   * Returns the number of bytes not faulted in (like copy_to_user() and
2147   * copy_from_user()).
2148   */
fault_in_subpage_writeable(char __user * uaddr,size_t size)2149  size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
2150  {
2151  	size_t faulted_in;
2152  
2153  	/*
2154  	 * Attempt faulting in at page granularity first for page table
2155  	 * permission checking. The arch-specific probe_subpage_writeable()
2156  	 * functions may not check for this.
2157  	 */
2158  	faulted_in = size - fault_in_writeable(uaddr, size);
2159  	if (faulted_in)
2160  		faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
2161  
2162  	return size - faulted_in;
2163  }
2164  EXPORT_SYMBOL(fault_in_subpage_writeable);
2165  
2166  /*
2167   * fault_in_safe_writeable - fault in an address range for writing
2168   * @uaddr: start of address range
2169   * @size: length of address range
2170   *
2171   * Faults in an address range for writing.  This is primarily useful when we
2172   * already know that some or all of the pages in the address range aren't in
2173   * memory.
2174   *
2175   * Unlike fault_in_writeable(), this function is non-destructive.
2176   *
2177   * Note that we don't pin or otherwise hold the pages referenced that we fault
2178   * in.  There's no guarantee that they'll stay in memory for any duration of
2179   * time.
2180   *
2181   * Returns the number of bytes not faulted in, like copy_to_user() and
2182   * copy_from_user().
2183   */
fault_in_safe_writeable(const char __user * uaddr,size_t size)2184  size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
2185  {
2186  	unsigned long start = (unsigned long)uaddr, end;
2187  	struct mm_struct *mm = current->mm;
2188  	bool unlocked = false;
2189  
2190  	if (unlikely(size == 0))
2191  		return 0;
2192  	end = PAGE_ALIGN(start + size);
2193  	if (end < start)
2194  		end = 0;
2195  
2196  	mmap_read_lock(mm);
2197  	do {
2198  		if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
2199  			break;
2200  		start = (start + PAGE_SIZE) & PAGE_MASK;
2201  	} while (start != end);
2202  	mmap_read_unlock(mm);
2203  
2204  	if (size > (unsigned long)uaddr - start)
2205  		return size - ((unsigned long)uaddr - start);
2206  	return 0;
2207  }
2208  EXPORT_SYMBOL(fault_in_safe_writeable);
2209  
2210  /**
2211   * fault_in_readable - fault in userspace address range for reading
2212   * @uaddr: start of user address range
2213   * @size: size of user address range
2214   *
2215   * Returns the number of bytes not faulted in (like copy_to_user() and
2216   * copy_from_user()).
2217   */
fault_in_readable(const char __user * uaddr,size_t size)2218  size_t fault_in_readable(const char __user *uaddr, size_t size)
2219  {
2220  	const char __user *start = uaddr, *end;
2221  	volatile char c;
2222  
2223  	if (unlikely(size == 0))
2224  		return 0;
2225  	if (!user_read_access_begin(uaddr, size))
2226  		return size;
2227  	if (!PAGE_ALIGNED(uaddr)) {
2228  		unsafe_get_user(c, uaddr, out);
2229  		uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
2230  	}
2231  	end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
2232  	if (unlikely(end < start))
2233  		end = NULL;
2234  	while (uaddr != end) {
2235  		unsafe_get_user(c, uaddr, out);
2236  		uaddr += PAGE_SIZE;
2237  	}
2238  
2239  out:
2240  	user_read_access_end();
2241  	(void)c;
2242  	if (size > uaddr - start)
2243  		return size - (uaddr - start);
2244  	return 0;
2245  }
2246  EXPORT_SYMBOL(fault_in_readable);
2247  
2248  /**
2249   * get_dump_page() - pin user page in memory while writing it to core dump
2250   * @addr: user address
2251   *
2252   * Returns struct page pointer of user page pinned for dump,
2253   * to be freed afterwards by put_page().
2254   *
2255   * Returns NULL on any kind of failure - a hole must then be inserted into
2256   * the corefile, to preserve alignment with its headers; and also returns
2257   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
2258   * allowing a hole to be left in the corefile to save disk space.
2259   *
2260   * Called without mmap_lock (takes and releases the mmap_lock by itself).
2261   */
2262  #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)2263  struct page *get_dump_page(unsigned long addr)
2264  {
2265  	struct page *page;
2266  	int locked = 0;
2267  	int ret;
2268  
2269  	ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
2270  				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
2271  	return (ret == 1) ? page : NULL;
2272  }
2273  #endif /* CONFIG_ELF_CORE */
2274  
2275  #ifdef CONFIG_MIGRATION
2276  
2277  /*
2278   * An array of either pages or folios ("pofs"). Although it may seem tempting to
2279   * avoid this complication, by simply interpreting a list of folios as a list of
2280   * pages, that approach won't work in the longer term, because eventually the
2281   * layouts of struct page and struct folio will become completely different.
2282   * Furthermore, this pof approach avoids excessive page_folio() calls.
2283   */
2284  struct pages_or_folios {
2285  	union {
2286  		struct page **pages;
2287  		struct folio **folios;
2288  		void **entries;
2289  	};
2290  	bool has_folios;
2291  	long nr_entries;
2292  };
2293  
pofs_get_folio(struct pages_or_folios * pofs,long i)2294  static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i)
2295  {
2296  	if (pofs->has_folios)
2297  		return pofs->folios[i];
2298  	return page_folio(pofs->pages[i]);
2299  }
2300  
pofs_clear_entry(struct pages_or_folios * pofs,long i)2301  static void pofs_clear_entry(struct pages_or_folios *pofs, long i)
2302  {
2303  	pofs->entries[i] = NULL;
2304  }
2305  
pofs_unpin(struct pages_or_folios * pofs)2306  static void pofs_unpin(struct pages_or_folios *pofs)
2307  {
2308  	if (pofs->has_folios)
2309  		unpin_folios(pofs->folios, pofs->nr_entries);
2310  	else
2311  		unpin_user_pages(pofs->pages, pofs->nr_entries);
2312  }
2313  
2314  /*
2315   * Returns the number of collected folios. Return value is always >= 0.
2316   */
collect_longterm_unpinnable_folios(struct list_head * movable_folio_list,struct pages_or_folios * pofs)2317  static unsigned long collect_longterm_unpinnable_folios(
2318  		struct list_head *movable_folio_list,
2319  		struct pages_or_folios *pofs)
2320  {
2321  	unsigned long i, collected = 0;
2322  	struct folio *prev_folio = NULL;
2323  	bool drain_allow = true;
2324  
2325  	for (i = 0; i < pofs->nr_entries; i++) {
2326  		struct folio *folio = pofs_get_folio(pofs, i);
2327  
2328  		if (folio == prev_folio)
2329  			continue;
2330  		prev_folio = folio;
2331  
2332  		if (folio_is_longterm_pinnable(folio))
2333  			continue;
2334  
2335  		collected++;
2336  
2337  		if (folio_is_device_coherent(folio))
2338  			continue;
2339  
2340  		if (folio_test_hugetlb(folio)) {
2341  			isolate_hugetlb(folio, movable_folio_list);
2342  			continue;
2343  		}
2344  
2345  		if (!folio_test_lru(folio) && drain_allow) {
2346  			lru_add_drain_all();
2347  			drain_allow = false;
2348  		}
2349  
2350  		if (!folio_isolate_lru(folio))
2351  			continue;
2352  
2353  		list_add_tail(&folio->lru, movable_folio_list);
2354  		node_stat_mod_folio(folio,
2355  				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
2356  				    folio_nr_pages(folio));
2357  	}
2358  
2359  	return collected;
2360  }
2361  
2362  /*
2363   * Unpins all folios and migrates device coherent folios and movable_folio_list.
2364   * Returns -EAGAIN if all folios were successfully migrated or -errno for
2365   * failure (or partial success).
2366   */
2367  static int
migrate_longterm_unpinnable_folios(struct list_head * movable_folio_list,struct pages_or_folios * pofs)2368  migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list,
2369  				   struct pages_or_folios *pofs)
2370  {
2371  	int ret;
2372  	unsigned long i;
2373  
2374  	for (i = 0; i < pofs->nr_entries; i++) {
2375  		struct folio *folio = pofs_get_folio(pofs, i);
2376  
2377  		if (folio_is_device_coherent(folio)) {
2378  			/*
2379  			 * Migration will fail if the folio is pinned, so
2380  			 * convert the pin on the source folio to a normal
2381  			 * reference.
2382  			 */
2383  			pofs_clear_entry(pofs, i);
2384  			folio_get(folio);
2385  			gup_put_folio(folio, 1, FOLL_PIN);
2386  
2387  			if (migrate_device_coherent_folio(folio)) {
2388  				ret = -EBUSY;
2389  				goto err;
2390  			}
2391  
2392  			continue;
2393  		}
2394  
2395  		/*
2396  		 * We can't migrate folios with unexpected references, so drop
2397  		 * the reference obtained by __get_user_pages_locked().
2398  		 * Migrating folios have been added to movable_folio_list after
2399  		 * calling folio_isolate_lru() which takes a reference so the
2400  		 * folio won't be freed if it's migrating.
2401  		 */
2402  		unpin_folio(folio);
2403  		pofs_clear_entry(pofs, i);
2404  	}
2405  
2406  	if (!list_empty(movable_folio_list)) {
2407  		struct migration_target_control mtc = {
2408  			.nid = NUMA_NO_NODE,
2409  			.gfp_mask = GFP_USER | __GFP_NOWARN,
2410  			.reason = MR_LONGTERM_PIN,
2411  		};
2412  
2413  		if (migrate_pages(movable_folio_list, alloc_migration_target,
2414  				  NULL, (unsigned long)&mtc, MIGRATE_SYNC,
2415  				  MR_LONGTERM_PIN, NULL)) {
2416  			ret = -ENOMEM;
2417  			goto err;
2418  		}
2419  	}
2420  
2421  	putback_movable_pages(movable_folio_list);
2422  
2423  	return -EAGAIN;
2424  
2425  err:
2426  	pofs_unpin(pofs);
2427  	putback_movable_pages(movable_folio_list);
2428  
2429  	return ret;
2430  }
2431  
2432  static long
check_and_migrate_movable_pages_or_folios(struct pages_or_folios * pofs)2433  check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs)
2434  {
2435  	LIST_HEAD(movable_folio_list);
2436  	unsigned long collected;
2437  
2438  	collected = collect_longterm_unpinnable_folios(&movable_folio_list,
2439  						       pofs);
2440  	if (!collected)
2441  		return 0;
2442  
2443  	return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
2444  }
2445  
2446  /*
2447   * Check whether all folios are *allowed* to be pinned indefinitely (long term).
2448   * Rather confusingly, all folios in the range are required to be pinned via
2449   * FOLL_PIN, before calling this routine.
2450   *
2451   * Return values:
2452   *
2453   * 0: if everything is OK and all folios in the range are allowed to be pinned,
2454   * then this routine leaves all folios pinned and returns zero for success.
2455   *
2456   * -EAGAIN: if any folios in the range are not allowed to be pinned, then this
2457   * routine will migrate those folios away, unpin all the folios in the range. If
2458   * migration of the entire set of folios succeeds, then -EAGAIN is returned. The
2459   * caller should re-pin the entire range with FOLL_PIN and then call this
2460   * routine again.
2461   *
2462   * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this
2463   * indicates a migration failure. The caller should give up, and propagate the
2464   * error back up the call stack. The caller does not need to unpin any folios in
2465   * that case, because this routine will do the unpinning.
2466   */
check_and_migrate_movable_folios(unsigned long nr_folios,struct folio ** folios)2467  static long check_and_migrate_movable_folios(unsigned long nr_folios,
2468  					     struct folio **folios)
2469  {
2470  	struct pages_or_folios pofs = {
2471  		.folios = folios,
2472  		.has_folios = true,
2473  		.nr_entries = nr_folios,
2474  	};
2475  
2476  	return check_and_migrate_movable_pages_or_folios(&pofs);
2477  }
2478  
2479  /*
2480   * Return values and behavior are the same as those for
2481   * check_and_migrate_movable_folios().
2482   */
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2483  static long check_and_migrate_movable_pages(unsigned long nr_pages,
2484  					    struct page **pages)
2485  {
2486  	struct pages_or_folios pofs = {
2487  		.pages = pages,
2488  		.has_folios = false,
2489  		.nr_entries = nr_pages,
2490  	};
2491  
2492  	return check_and_migrate_movable_pages_or_folios(&pofs);
2493  }
2494  #else
check_and_migrate_movable_pages(unsigned long nr_pages,struct page ** pages)2495  static long check_and_migrate_movable_pages(unsigned long nr_pages,
2496  					    struct page **pages)
2497  {
2498  	return 0;
2499  }
2500  
check_and_migrate_movable_folios(unsigned long nr_folios,struct folio ** folios)2501  static long check_and_migrate_movable_folios(unsigned long nr_folios,
2502  					     struct folio **folios)
2503  {
2504  	return 0;
2505  }
2506  #endif /* CONFIG_MIGRATION */
2507  
2508  /*
2509   * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2510   * allows us to process the FOLL_LONGTERM flag.
2511   */
__gup_longterm_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,int * locked,unsigned int gup_flags)2512  static long __gup_longterm_locked(struct mm_struct *mm,
2513  				  unsigned long start,
2514  				  unsigned long nr_pages,
2515  				  struct page **pages,
2516  				  int *locked,
2517  				  unsigned int gup_flags)
2518  {
2519  	unsigned int flags;
2520  	long rc, nr_pinned_pages;
2521  
2522  	if (!(gup_flags & FOLL_LONGTERM))
2523  		return __get_user_pages_locked(mm, start, nr_pages, pages,
2524  					       locked, gup_flags);
2525  
2526  	flags = memalloc_pin_save();
2527  	do {
2528  		nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2529  							  pages, locked,
2530  							  gup_flags);
2531  		if (nr_pinned_pages <= 0) {
2532  			rc = nr_pinned_pages;
2533  			break;
2534  		}
2535  
2536  		/* FOLL_LONGTERM implies FOLL_PIN */
2537  		rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2538  	} while (rc == -EAGAIN);
2539  	memalloc_pin_restore(flags);
2540  	return rc ? rc : nr_pinned_pages;
2541  }
2542  
2543  /*
2544   * Check that the given flags are valid for the exported gup/pup interface, and
2545   * update them with the required flags that the caller must have set.
2546   */
is_valid_gup_args(struct page ** pages,int * locked,unsigned int * gup_flags_p,unsigned int to_set)2547  static bool is_valid_gup_args(struct page **pages, int *locked,
2548  			      unsigned int *gup_flags_p, unsigned int to_set)
2549  {
2550  	unsigned int gup_flags = *gup_flags_p;
2551  
2552  	/*
2553  	 * These flags not allowed to be specified externally to the gup
2554  	 * interfaces:
2555  	 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2556  	 * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote()
2557  	 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2558  	 */
2559  	if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
2560  		return false;
2561  
2562  	gup_flags |= to_set;
2563  	if (locked) {
2564  		/* At the external interface locked must be set */
2565  		if (WARN_ON_ONCE(*locked != 1))
2566  			return false;
2567  
2568  		gup_flags |= FOLL_UNLOCKABLE;
2569  	}
2570  
2571  	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2572  	if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
2573  			 (FOLL_PIN | FOLL_GET)))
2574  		return false;
2575  
2576  	/* LONGTERM can only be specified when pinning */
2577  	if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
2578  		return false;
2579  
2580  	/* Pages input must be given if using GET/PIN */
2581  	if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2582  		return false;
2583  
2584  	/* We want to allow the pgmap to be hot-unplugged at all times */
2585  	if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
2586  			 (gup_flags & FOLL_PCI_P2PDMA)))
2587  		return false;
2588  
2589  	*gup_flags_p = gup_flags;
2590  	return true;
2591  }
2592  
2593  #ifdef CONFIG_MMU
2594  /**
2595   * get_user_pages_remote() - pin user pages in memory
2596   * @mm:		mm_struct of target mm
2597   * @start:	starting user address
2598   * @nr_pages:	number of pages from start to pin
2599   * @gup_flags:	flags modifying lookup behaviour
2600   * @pages:	array that receives pointers to the pages pinned.
2601   *		Should be at least nr_pages long. Or NULL, if caller
2602   *		only intends to ensure the pages are faulted in.
2603   * @locked:	pointer to lock flag indicating whether lock is held and
2604   *		subsequently whether VM_FAULT_RETRY functionality can be
2605   *		utilised. Lock must initially be held.
2606   *
2607   * Returns either number of pages pinned (which may be less than the
2608   * number requested), or an error. Details about the return value:
2609   *
2610   * -- If nr_pages is 0, returns 0.
2611   * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2612   * -- If nr_pages is >0, and some pages were pinned, returns the number of
2613   *    pages pinned. Again, this may be less than nr_pages.
2614   *
2615   * The caller is responsible for releasing returned @pages, via put_page().
2616   *
2617   * Must be called with mmap_lock held for read or write.
2618   *
2619   * get_user_pages_remote walks a process's page tables and takes a reference
2620   * to each struct page that each user address corresponds to at a given
2621   * instant. That is, it takes the page that would be accessed if a user
2622   * thread accesses the given user virtual address at that instant.
2623   *
2624   * This does not guarantee that the page exists in the user mappings when
2625   * get_user_pages_remote returns, and there may even be a completely different
2626   * page there in some cases (eg. if mmapped pagecache has been invalidated
2627   * and subsequently re-faulted). However it does guarantee that the page
2628   * won't be freed completely. And mostly callers simply care that the page
2629   * contains data that was valid *at some point in time*. Typically, an IO
2630   * or similar operation cannot guarantee anything stronger anyway because
2631   * locks can't be held over the syscall boundary.
2632   *
2633   * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2634   * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2635   * be called after the page is finished with, and before put_page is called.
2636   *
2637   * get_user_pages_remote is typically used for fewer-copy IO operations,
2638   * to get a handle on the memory by some means other than accesses
2639   * via the user virtual addresses. The pages may be submitted for
2640   * DMA to devices or accessed via their kernel linear mapping (via the
2641   * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2642   *
2643   * See also get_user_pages_fast, for performance critical applications.
2644   *
2645   * get_user_pages_remote should be phased out in favor of
2646   * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2647   * should use get_user_pages_remote because it cannot pass
2648   * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2649   */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2650  long get_user_pages_remote(struct mm_struct *mm,
2651  		unsigned long start, unsigned long nr_pages,
2652  		unsigned int gup_flags, struct page **pages,
2653  		int *locked)
2654  {
2655  	int local_locked = 1;
2656  
2657  	if (!is_valid_gup_args(pages, locked, &gup_flags,
2658  			       FOLL_TOUCH | FOLL_REMOTE))
2659  		return -EINVAL;
2660  
2661  	return __get_user_pages_locked(mm, start, nr_pages, pages,
2662  				       locked ? locked : &local_locked,
2663  				       gup_flags);
2664  }
2665  EXPORT_SYMBOL(get_user_pages_remote);
2666  
2667  #else /* CONFIG_MMU */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2668  long get_user_pages_remote(struct mm_struct *mm,
2669  			   unsigned long start, unsigned long nr_pages,
2670  			   unsigned int gup_flags, struct page **pages,
2671  			   int *locked)
2672  {
2673  	return 0;
2674  }
2675  #endif /* !CONFIG_MMU */
2676  
2677  /**
2678   * get_user_pages() - pin user pages in memory
2679   * @start:      starting user address
2680   * @nr_pages:   number of pages from start to pin
2681   * @gup_flags:  flags modifying lookup behaviour
2682   * @pages:      array that receives pointers to the pages pinned.
2683   *              Should be at least nr_pages long. Or NULL, if caller
2684   *              only intends to ensure the pages are faulted in.
2685   *
2686   * This is the same as get_user_pages_remote(), just with a less-flexible
2687   * calling convention where we assume that the mm being operated on belongs to
2688   * the current task, and doesn't allow passing of a locked parameter.  We also
2689   * obviously don't pass FOLL_REMOTE in here.
2690   */
get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)2691  long get_user_pages(unsigned long start, unsigned long nr_pages,
2692  		    unsigned int gup_flags, struct page **pages)
2693  {
2694  	int locked = 1;
2695  
2696  	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2697  		return -EINVAL;
2698  
2699  	return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2700  				       &locked, gup_flags);
2701  }
2702  EXPORT_SYMBOL(get_user_pages);
2703  
2704  /*
2705   * get_user_pages_unlocked() is suitable to replace the form:
2706   *
2707   *      mmap_read_lock(mm);
2708   *      get_user_pages(mm, ..., pages, NULL);
2709   *      mmap_read_unlock(mm);
2710   *
2711   *  with:
2712   *
2713   *      get_user_pages_unlocked(mm, ..., pages);
2714   *
2715   * It is functionally equivalent to get_user_pages_fast so
2716   * get_user_pages_fast should be used instead if specific gup_flags
2717   * (e.g. FOLL_FORCE) are not required.
2718   */
get_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)2719  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2720  			     struct page **pages, unsigned int gup_flags)
2721  {
2722  	int locked = 0;
2723  
2724  	if (!is_valid_gup_args(pages, NULL, &gup_flags,
2725  			       FOLL_TOUCH | FOLL_UNLOCKABLE))
2726  		return -EINVAL;
2727  
2728  	return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2729  				       &locked, gup_flags);
2730  }
2731  EXPORT_SYMBOL(get_user_pages_unlocked);
2732  
2733  /*
2734   * GUP-fast
2735   *
2736   * get_user_pages_fast attempts to pin user pages by walking the page
2737   * tables directly and avoids taking locks. Thus the walker needs to be
2738   * protected from page table pages being freed from under it, and should
2739   * block any THP splits.
2740   *
2741   * One way to achieve this is to have the walker disable interrupts, and
2742   * rely on IPIs from the TLB flushing code blocking before the page table
2743   * pages are freed. This is unsuitable for architectures that do not need
2744   * to broadcast an IPI when invalidating TLBs.
2745   *
2746   * Another way to achieve this is to batch up page table containing pages
2747   * belonging to more than one mm_user, then rcu_sched a callback to free those
2748   * pages. Disabling interrupts will allow the gup_fast() walker to both block
2749   * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2750   * (which is a relatively rare event). The code below adopts this strategy.
2751   *
2752   * Before activating this code, please be aware that the following assumptions
2753   * are currently made:
2754   *
2755   *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2756   *  free pages containing page tables or TLB flushing requires IPI broadcast.
2757   *
2758   *  *) ptes can be read atomically by the architecture.
2759   *
2760   *  *) access_ok is sufficient to validate userspace address ranges.
2761   *
2762   * The last two assumptions can be relaxed by the addition of helper functions.
2763   *
2764   * This code is based heavily on the PowerPC implementation by Nick Piggin.
2765   */
2766  #ifdef CONFIG_HAVE_GUP_FAST
2767  /*
2768   * Used in the GUP-fast path to determine whether GUP is permitted to work on
2769   * a specific folio.
2770   *
2771   * This call assumes the caller has pinned the folio, that the lowest page table
2772   * level still points to this folio, and that interrupts have been disabled.
2773   *
2774   * GUP-fast must reject all secretmem folios.
2775   *
2776   * Writing to pinned file-backed dirty tracked folios is inherently problematic
2777   * (see comment describing the writable_file_mapping_allowed() function). We
2778   * therefore try to avoid the most egregious case of a long-term mapping doing
2779   * so.
2780   *
2781   * This function cannot be as thorough as that one as the VMA is not available
2782   * in the fast path, so instead we whitelist known good cases and if in doubt,
2783   * fall back to the slow path.
2784   */
gup_fast_folio_allowed(struct folio * folio,unsigned int flags)2785  static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
2786  {
2787  	bool reject_file_backed = false;
2788  	struct address_space *mapping;
2789  	bool check_secretmem = false;
2790  	unsigned long mapping_flags;
2791  
2792  	/*
2793  	 * If we aren't pinning then no problematic write can occur. A long term
2794  	 * pin is the most egregious case so this is the one we disallow.
2795  	 */
2796  	if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
2797  	    (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
2798  		reject_file_backed = true;
2799  
2800  	/* We hold a folio reference, so we can safely access folio fields. */
2801  
2802  	/* secretmem folios are always order-0 folios. */
2803  	if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
2804  		check_secretmem = true;
2805  
2806  	if (!reject_file_backed && !check_secretmem)
2807  		return true;
2808  
2809  	if (WARN_ON_ONCE(folio_test_slab(folio)))
2810  		return false;
2811  
2812  	/* hugetlb neither requires dirty-tracking nor can be secretmem. */
2813  	if (folio_test_hugetlb(folio))
2814  		return true;
2815  
2816  	/*
2817  	 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
2818  	 * cannot proceed, which means no actions performed under RCU can
2819  	 * proceed either.
2820  	 *
2821  	 * inodes and thus their mappings are freed under RCU, which means the
2822  	 * mapping cannot be freed beneath us and thus we can safely dereference
2823  	 * it.
2824  	 */
2825  	lockdep_assert_irqs_disabled();
2826  
2827  	/*
2828  	 * However, there may be operations which _alter_ the mapping, so ensure
2829  	 * we read it once and only once.
2830  	 */
2831  	mapping = READ_ONCE(folio->mapping);
2832  
2833  	/*
2834  	 * The mapping may have been truncated, in any case we cannot determine
2835  	 * if this mapping is safe - fall back to slow path to determine how to
2836  	 * proceed.
2837  	 */
2838  	if (!mapping)
2839  		return false;
2840  
2841  	/* Anonymous folios pose no problem. */
2842  	mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
2843  	if (mapping_flags)
2844  		return mapping_flags & PAGE_MAPPING_ANON;
2845  
2846  	/*
2847  	 * At this point, we know the mapping is non-null and points to an
2848  	 * address_space object.
2849  	 */
2850  	if (check_secretmem && secretmem_mapping(mapping))
2851  		return false;
2852  	/* The only remaining allowed file system is shmem. */
2853  	return !reject_file_backed || shmem_mapping(mapping);
2854  }
2855  
gup_fast_undo_dev_pagemap(int * nr,int nr_start,unsigned int flags,struct page ** pages)2856  static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start,
2857  		unsigned int flags, struct page **pages)
2858  {
2859  	while ((*nr) - nr_start) {
2860  		struct folio *folio = page_folio(pages[--(*nr)]);
2861  
2862  		folio_clear_referenced(folio);
2863  		gup_put_folio(folio, 1, flags);
2864  	}
2865  }
2866  
2867  #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2868  /*
2869   * GUP-fast relies on pte change detection to avoid concurrent pgtable
2870   * operations.
2871   *
2872   * To pin the page, GUP-fast needs to do below in order:
2873   * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2874   *
2875   * For the rest of pgtable operations where pgtable updates can be racy
2876   * with GUP-fast, we need to do (1) clear pte, then (2) check whether page
2877   * is pinned.
2878   *
2879   * Above will work for all pte-level operations, including THP split.
2880   *
2881   * For THP collapse, it's a bit more complicated because GUP-fast may be
2882   * walking a pgtable page that is being freed (pte is still valid but pmd
2883   * can be cleared already).  To avoid race in such condition, we need to
2884   * also check pmd here to make sure pmd doesn't change (corresponds to
2885   * pmdp_collapse_flush() in the THP collapse code path).
2886   */
gup_fast_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2887  static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2888  		unsigned long end, unsigned int flags, struct page **pages,
2889  		int *nr)
2890  {
2891  	struct dev_pagemap *pgmap = NULL;
2892  	int nr_start = *nr, ret = 0;
2893  	pte_t *ptep, *ptem;
2894  
2895  	ptem = ptep = pte_offset_map(&pmd, addr);
2896  	if (!ptep)
2897  		return 0;
2898  	do {
2899  		pte_t pte = ptep_get_lockless(ptep);
2900  		struct page *page;
2901  		struct folio *folio;
2902  
2903  		/*
2904  		 * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2905  		 * pte_access_permitted() better should reject these pages
2906  		 * either way: otherwise, GUP-fast might succeed in
2907  		 * cases where ordinary GUP would fail due to VMA access
2908  		 * permissions.
2909  		 */
2910  		if (pte_protnone(pte))
2911  			goto pte_unmap;
2912  
2913  		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2914  			goto pte_unmap;
2915  
2916  		if (pte_devmap(pte)) {
2917  			if (unlikely(flags & FOLL_LONGTERM))
2918  				goto pte_unmap;
2919  
2920  			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2921  			if (unlikely(!pgmap)) {
2922  				gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2923  				goto pte_unmap;
2924  			}
2925  		} else if (pte_special(pte))
2926  			goto pte_unmap;
2927  
2928  		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2929  		page = pte_page(pte);
2930  
2931  		folio = try_grab_folio_fast(page, 1, flags);
2932  		if (!folio)
2933  			goto pte_unmap;
2934  
2935  		if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2936  		    unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
2937  			gup_put_folio(folio, 1, flags);
2938  			goto pte_unmap;
2939  		}
2940  
2941  		if (!gup_fast_folio_allowed(folio, flags)) {
2942  			gup_put_folio(folio, 1, flags);
2943  			goto pte_unmap;
2944  		}
2945  
2946  		if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2947  			gup_put_folio(folio, 1, flags);
2948  			goto pte_unmap;
2949  		}
2950  
2951  		/*
2952  		 * We need to make the page accessible if and only if we are
2953  		 * going to access its content (the FOLL_PIN case).  Please
2954  		 * see Documentation/core-api/pin_user_pages.rst for
2955  		 * details.
2956  		 */
2957  		if (flags & FOLL_PIN) {
2958  			ret = arch_make_folio_accessible(folio);
2959  			if (ret) {
2960  				gup_put_folio(folio, 1, flags);
2961  				goto pte_unmap;
2962  			}
2963  		}
2964  		folio_set_referenced(folio);
2965  		pages[*nr] = page;
2966  		(*nr)++;
2967  	} while (ptep++, addr += PAGE_SIZE, addr != end);
2968  
2969  	ret = 1;
2970  
2971  pte_unmap:
2972  	if (pgmap)
2973  		put_dev_pagemap(pgmap);
2974  	pte_unmap(ptem);
2975  	return ret;
2976  }
2977  #else
2978  
2979  /*
2980   * If we can't determine whether or not a pte is special, then fail immediately
2981   * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2982   * to be special.
2983   *
2984   * For a futex to be placed on a THP tail page, get_futex_key requires a
2985   * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2986   * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
2987   */
gup_fast_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2988  static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2989  		unsigned long end, unsigned int flags, struct page **pages,
2990  		int *nr)
2991  {
2992  	return 0;
2993  }
2994  #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2995  
2996  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
gup_fast_devmap_leaf(unsigned long pfn,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2997  static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
2998  	unsigned long end, unsigned int flags, struct page **pages, int *nr)
2999  {
3000  	int nr_start = *nr;
3001  	struct dev_pagemap *pgmap = NULL;
3002  
3003  	do {
3004  		struct folio *folio;
3005  		struct page *page = pfn_to_page(pfn);
3006  
3007  		pgmap = get_dev_pagemap(pfn, pgmap);
3008  		if (unlikely(!pgmap)) {
3009  			gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3010  			break;
3011  		}
3012  
3013  		if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
3014  			gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3015  			break;
3016  		}
3017  
3018  		folio = try_grab_folio_fast(page, 1, flags);
3019  		if (!folio) {
3020  			gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3021  			break;
3022  		}
3023  		folio_set_referenced(folio);
3024  		pages[*nr] = page;
3025  		(*nr)++;
3026  		pfn++;
3027  	} while (addr += PAGE_SIZE, addr != end);
3028  
3029  	put_dev_pagemap(pgmap);
3030  	return addr == end;
3031  }
3032  
gup_fast_devmap_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3033  static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3034  		unsigned long end, unsigned int flags, struct page **pages,
3035  		int *nr)
3036  {
3037  	unsigned long fault_pfn;
3038  	int nr_start = *nr;
3039  
3040  	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
3041  	if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3042  		return 0;
3043  
3044  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3045  		gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3046  		return 0;
3047  	}
3048  	return 1;
3049  }
3050  
gup_fast_devmap_pud_leaf(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3051  static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3052  		unsigned long end, unsigned int flags, struct page **pages,
3053  		int *nr)
3054  {
3055  	unsigned long fault_pfn;
3056  	int nr_start = *nr;
3057  
3058  	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
3059  	if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3060  		return 0;
3061  
3062  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3063  		gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3064  		return 0;
3065  	}
3066  	return 1;
3067  }
3068  #else
gup_fast_devmap_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3069  static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3070  		unsigned long end, unsigned int flags, struct page **pages,
3071  		int *nr)
3072  {
3073  	BUILD_BUG();
3074  	return 0;
3075  }
3076  
gup_fast_devmap_pud_leaf(pud_t pud,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3077  static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr,
3078  		unsigned long end, unsigned int flags, struct page **pages,
3079  		int *nr)
3080  {
3081  	BUILD_BUG();
3082  	return 0;
3083  }
3084  #endif
3085  
gup_fast_pmd_leaf(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3086  static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3087  		unsigned long end, unsigned int flags, struct page **pages,
3088  		int *nr)
3089  {
3090  	struct page *page;
3091  	struct folio *folio;
3092  	int refs;
3093  
3094  	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
3095  		return 0;
3096  
3097  	if (pmd_special(orig))
3098  		return 0;
3099  
3100  	if (pmd_devmap(orig)) {
3101  		if (unlikely(flags & FOLL_LONGTERM))
3102  			return 0;
3103  		return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
3104  					        pages, nr);
3105  	}
3106  
3107  	page = pmd_page(orig);
3108  	refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
3109  
3110  	folio = try_grab_folio_fast(page, refs, flags);
3111  	if (!folio)
3112  		return 0;
3113  
3114  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3115  		gup_put_folio(folio, refs, flags);
3116  		return 0;
3117  	}
3118  
3119  	if (!gup_fast_folio_allowed(folio, flags)) {
3120  		gup_put_folio(folio, refs, flags);
3121  		return 0;
3122  	}
3123  	if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3124  		gup_put_folio(folio, refs, flags);
3125  		return 0;
3126  	}
3127  
3128  	*nr += refs;
3129  	folio_set_referenced(folio);
3130  	return 1;
3131  }
3132  
gup_fast_pud_leaf(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3133  static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3134  		unsigned long end, unsigned int flags, struct page **pages,
3135  		int *nr)
3136  {
3137  	struct page *page;
3138  	struct folio *folio;
3139  	int refs;
3140  
3141  	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
3142  		return 0;
3143  
3144  	if (pud_special(orig))
3145  		return 0;
3146  
3147  	if (pud_devmap(orig)) {
3148  		if (unlikely(flags & FOLL_LONGTERM))
3149  			return 0;
3150  		return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
3151  					        pages, nr);
3152  	}
3153  
3154  	page = pud_page(orig);
3155  	refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
3156  
3157  	folio = try_grab_folio_fast(page, refs, flags);
3158  	if (!folio)
3159  		return 0;
3160  
3161  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3162  		gup_put_folio(folio, refs, flags);
3163  		return 0;
3164  	}
3165  
3166  	if (!gup_fast_folio_allowed(folio, flags)) {
3167  		gup_put_folio(folio, refs, flags);
3168  		return 0;
3169  	}
3170  
3171  	if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3172  		gup_put_folio(folio, refs, flags);
3173  		return 0;
3174  	}
3175  
3176  	*nr += refs;
3177  	folio_set_referenced(folio);
3178  	return 1;
3179  }
3180  
gup_fast_pgd_leaf(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3181  static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
3182  		unsigned long end, unsigned int flags, struct page **pages,
3183  		int *nr)
3184  {
3185  	int refs;
3186  	struct page *page;
3187  	struct folio *folio;
3188  
3189  	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
3190  		return 0;
3191  
3192  	BUILD_BUG_ON(pgd_devmap(orig));
3193  
3194  	page = pgd_page(orig);
3195  	refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
3196  
3197  	folio = try_grab_folio_fast(page, refs, flags);
3198  	if (!folio)
3199  		return 0;
3200  
3201  	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3202  		gup_put_folio(folio, refs, flags);
3203  		return 0;
3204  	}
3205  
3206  	if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3207  		gup_put_folio(folio, refs, flags);
3208  		return 0;
3209  	}
3210  
3211  	if (!gup_fast_folio_allowed(folio, flags)) {
3212  		gup_put_folio(folio, refs, flags);
3213  		return 0;
3214  	}
3215  
3216  	*nr += refs;
3217  	folio_set_referenced(folio);
3218  	return 1;
3219  }
3220  
gup_fast_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3221  static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
3222  		unsigned long end, unsigned int flags, struct page **pages,
3223  		int *nr)
3224  {
3225  	unsigned long next;
3226  	pmd_t *pmdp;
3227  
3228  	pmdp = pmd_offset_lockless(pudp, pud, addr);
3229  	do {
3230  		pmd_t pmd = pmdp_get_lockless(pmdp);
3231  
3232  		next = pmd_addr_end(addr, end);
3233  		if (!pmd_present(pmd))
3234  			return 0;
3235  
3236  		if (unlikely(pmd_leaf(pmd))) {
3237  			/* See gup_fast_pte_range() */
3238  			if (pmd_protnone(pmd))
3239  				return 0;
3240  
3241  			if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
3242  				pages, nr))
3243  				return 0;
3244  
3245  		} else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
3246  					       pages, nr))
3247  			return 0;
3248  	} while (pmdp++, addr = next, addr != end);
3249  
3250  	return 1;
3251  }
3252  
gup_fast_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3253  static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
3254  		unsigned long end, unsigned int flags, struct page **pages,
3255  		int *nr)
3256  {
3257  	unsigned long next;
3258  	pud_t *pudp;
3259  
3260  	pudp = pud_offset_lockless(p4dp, p4d, addr);
3261  	do {
3262  		pud_t pud = READ_ONCE(*pudp);
3263  
3264  		next = pud_addr_end(addr, end);
3265  		if (unlikely(!pud_present(pud)))
3266  			return 0;
3267  		if (unlikely(pud_leaf(pud))) {
3268  			if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
3269  					       pages, nr))
3270  				return 0;
3271  		} else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
3272  					       pages, nr))
3273  			return 0;
3274  	} while (pudp++, addr = next, addr != end);
3275  
3276  	return 1;
3277  }
3278  
gup_fast_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3279  static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
3280  		unsigned long end, unsigned int flags, struct page **pages,
3281  		int *nr)
3282  {
3283  	unsigned long next;
3284  	p4d_t *p4dp;
3285  
3286  	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
3287  	do {
3288  		p4d_t p4d = READ_ONCE(*p4dp);
3289  
3290  		next = p4d_addr_end(addr, end);
3291  		if (!p4d_present(p4d))
3292  			return 0;
3293  		BUILD_BUG_ON(p4d_leaf(p4d));
3294  		if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
3295  					pages, nr))
3296  			return 0;
3297  	} while (p4dp++, addr = next, addr != end);
3298  
3299  	return 1;
3300  }
3301  
gup_fast_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3302  static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3303  		unsigned int flags, struct page **pages, int *nr)
3304  {
3305  	unsigned long next;
3306  	pgd_t *pgdp;
3307  
3308  	pgdp = pgd_offset(current->mm, addr);
3309  	do {
3310  		pgd_t pgd = READ_ONCE(*pgdp);
3311  
3312  		next = pgd_addr_end(addr, end);
3313  		if (pgd_none(pgd))
3314  			return;
3315  		if (unlikely(pgd_leaf(pgd))) {
3316  			if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
3317  					       pages, nr))
3318  				return;
3319  		} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
3320  					       pages, nr))
3321  			return;
3322  	} while (pgdp++, addr = next, addr != end);
3323  }
3324  #else
gup_fast_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)3325  static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3326  		unsigned int flags, struct page **pages, int *nr)
3327  {
3328  }
3329  #endif /* CONFIG_HAVE_GUP_FAST */
3330  
3331  #ifndef gup_fast_permitted
3332  /*
3333   * Check if it's allowed to use get_user_pages_fast_only() for the range, or
3334   * we need to fall back to the slow version:
3335   */
gup_fast_permitted(unsigned long start,unsigned long end)3336  static bool gup_fast_permitted(unsigned long start, unsigned long end)
3337  {
3338  	return true;
3339  }
3340  #endif
3341  
gup_fast(unsigned long start,unsigned long end,unsigned int gup_flags,struct page ** pages)3342  static unsigned long gup_fast(unsigned long start, unsigned long end,
3343  		unsigned int gup_flags, struct page **pages)
3344  {
3345  	unsigned long flags;
3346  	int nr_pinned = 0;
3347  	unsigned seq;
3348  
3349  	if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
3350  	    !gup_fast_permitted(start, end))
3351  		return 0;
3352  
3353  	if (gup_flags & FOLL_PIN) {
3354  		seq = raw_read_seqcount(&current->mm->write_protect_seq);
3355  		if (seq & 1)
3356  			return 0;
3357  	}
3358  
3359  	/*
3360  	 * Disable interrupts. The nested form is used, in order to allow full,
3361  	 * general purpose use of this routine.
3362  	 *
3363  	 * With interrupts disabled, we block page table pages from being freed
3364  	 * from under us. See struct mmu_table_batch comments in
3365  	 * include/asm-generic/tlb.h for more details.
3366  	 *
3367  	 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
3368  	 * that come from THPs splitting.
3369  	 */
3370  	local_irq_save(flags);
3371  	gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned);
3372  	local_irq_restore(flags);
3373  
3374  	/*
3375  	 * When pinning pages for DMA there could be a concurrent write protect
3376  	 * from fork() via copy_page_range(), in this case always fail GUP-fast.
3377  	 */
3378  	if (gup_flags & FOLL_PIN) {
3379  		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
3380  			gup_fast_unpin_user_pages(pages, nr_pinned);
3381  			return 0;
3382  		} else {
3383  			sanity_check_pinned_pages(pages, nr_pinned);
3384  		}
3385  	}
3386  	return nr_pinned;
3387  }
3388  
gup_fast_fallback(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3389  static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
3390  		unsigned int gup_flags, struct page **pages)
3391  {
3392  	unsigned long len, end;
3393  	unsigned long nr_pinned;
3394  	int locked = 0;
3395  	int ret;
3396  
3397  	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
3398  				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
3399  				       FOLL_FAST_ONLY | FOLL_NOFAULT |
3400  				       FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
3401  		return -EINVAL;
3402  
3403  	if (gup_flags & FOLL_PIN)
3404  		mm_set_has_pinned_flag(&current->mm->flags);
3405  
3406  	if (!(gup_flags & FOLL_FAST_ONLY))
3407  		might_lock_read(&current->mm->mmap_lock);
3408  
3409  	start = untagged_addr(start) & PAGE_MASK;
3410  	len = nr_pages << PAGE_SHIFT;
3411  	if (check_add_overflow(start, len, &end))
3412  		return -EOVERFLOW;
3413  	if (end > TASK_SIZE_MAX)
3414  		return -EFAULT;
3415  	if (unlikely(!access_ok((void __user *)start, len)))
3416  		return -EFAULT;
3417  
3418  	nr_pinned = gup_fast(start, end, gup_flags, pages);
3419  	if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
3420  		return nr_pinned;
3421  
3422  	/* Slow path: try to get the remaining pages with get_user_pages */
3423  	start += nr_pinned << PAGE_SHIFT;
3424  	pages += nr_pinned;
3425  	ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
3426  				    pages, &locked,
3427  				    gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
3428  	if (ret < 0) {
3429  		/*
3430  		 * The caller has to unpin the pages we already pinned so
3431  		 * returning -errno is not an option
3432  		 */
3433  		if (nr_pinned)
3434  			return nr_pinned;
3435  		return ret;
3436  	}
3437  	return ret + nr_pinned;
3438  }
3439  
3440  /**
3441   * get_user_pages_fast_only() - pin user pages in memory
3442   * @start:      starting user address
3443   * @nr_pages:   number of pages from start to pin
3444   * @gup_flags:  flags modifying pin behaviour
3445   * @pages:      array that receives pointers to the pages pinned.
3446   *              Should be at least nr_pages long.
3447   *
3448   * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3449   * the regular GUP.
3450   *
3451   * If the architecture does not support this function, simply return with no
3452   * pages pinned.
3453   *
3454   * Careful, careful! COW breaking can go either way, so a non-write
3455   * access can get ambiguous page results. If you call this function without
3456   * 'write' set, you'd better be sure that you're ok with that ambiguity.
3457   */
get_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3458  int get_user_pages_fast_only(unsigned long start, int nr_pages,
3459  			     unsigned int gup_flags, struct page **pages)
3460  {
3461  	/*
3462  	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3463  	 * because gup fast is always a "pin with a +1 page refcount" request.
3464  	 *
3465  	 * FOLL_FAST_ONLY is required in order to match the API description of
3466  	 * this routine: no fall back to regular ("slow") GUP.
3467  	 */
3468  	if (!is_valid_gup_args(pages, NULL, &gup_flags,
3469  			       FOLL_GET | FOLL_FAST_ONLY))
3470  		return -EINVAL;
3471  
3472  	return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3473  }
3474  EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3475  
3476  /**
3477   * get_user_pages_fast() - pin user pages in memory
3478   * @start:      starting user address
3479   * @nr_pages:   number of pages from start to pin
3480   * @gup_flags:  flags modifying pin behaviour
3481   * @pages:      array that receives pointers to the pages pinned.
3482   *              Should be at least nr_pages long.
3483   *
3484   * Attempt to pin user pages in memory without taking mm->mmap_lock.
3485   * If not successful, it will fall back to taking the lock and
3486   * calling get_user_pages().
3487   *
3488   * Returns number of pages pinned. This may be fewer than the number requested.
3489   * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3490   * -errno.
3491   */
get_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3492  int get_user_pages_fast(unsigned long start, int nr_pages,
3493  			unsigned int gup_flags, struct page **pages)
3494  {
3495  	/*
3496  	 * The caller may or may not have explicitly set FOLL_GET; either way is
3497  	 * OK. However, internally (within mm/gup.c), gup fast variants must set
3498  	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3499  	 * request.
3500  	 */
3501  	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3502  		return -EINVAL;
3503  	return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3504  }
3505  EXPORT_SYMBOL_GPL(get_user_pages_fast);
3506  
3507  /**
3508   * pin_user_pages_fast() - pin user pages in memory without taking locks
3509   *
3510   * @start:      starting user address
3511   * @nr_pages:   number of pages from start to pin
3512   * @gup_flags:  flags modifying pin behaviour
3513   * @pages:      array that receives pointers to the pages pinned.
3514   *              Should be at least nr_pages long.
3515   *
3516   * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3517   * get_user_pages_fast() for documentation on the function arguments, because
3518   * the arguments here are identical.
3519   *
3520   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3521   * see Documentation/core-api/pin_user_pages.rst for further details.
3522   *
3523   * Note that if a zero_page is amongst the returned pages, it will not have
3524   * pins in it and unpin_user_page() will not remove pins from it.
3525   */
pin_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)3526  int pin_user_pages_fast(unsigned long start, int nr_pages,
3527  			unsigned int gup_flags, struct page **pages)
3528  {
3529  	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3530  		return -EINVAL;
3531  	return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3532  }
3533  EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3534  
3535  /**
3536   * pin_user_pages_remote() - pin pages of a remote process
3537   *
3538   * @mm:		mm_struct of target mm
3539   * @start:	starting user address
3540   * @nr_pages:	number of pages from start to pin
3541   * @gup_flags:	flags modifying lookup behaviour
3542   * @pages:	array that receives pointers to the pages pinned.
3543   *		Should be at least nr_pages long.
3544   * @locked:	pointer to lock flag indicating whether lock is held and
3545   *		subsequently whether VM_FAULT_RETRY functionality can be
3546   *		utilised. Lock must initially be held.
3547   *
3548   * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3549   * get_user_pages_remote() for documentation on the function arguments, because
3550   * the arguments here are identical.
3551   *
3552   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3553   * see Documentation/core-api/pin_user_pages.rst for details.
3554   *
3555   * Note that if a zero_page is amongst the returned pages, it will not have
3556   * pins in it and unpin_user_page*() will not remove pins from it.
3557   */
pin_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)3558  long pin_user_pages_remote(struct mm_struct *mm,
3559  			   unsigned long start, unsigned long nr_pages,
3560  			   unsigned int gup_flags, struct page **pages,
3561  			   int *locked)
3562  {
3563  	int local_locked = 1;
3564  
3565  	if (!is_valid_gup_args(pages, locked, &gup_flags,
3566  			       FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
3567  		return 0;
3568  	return __gup_longterm_locked(mm, start, nr_pages, pages,
3569  				     locked ? locked : &local_locked,
3570  				     gup_flags);
3571  }
3572  EXPORT_SYMBOL(pin_user_pages_remote);
3573  
3574  /**
3575   * pin_user_pages() - pin user pages in memory for use by other devices
3576   *
3577   * @start:	starting user address
3578   * @nr_pages:	number of pages from start to pin
3579   * @gup_flags:	flags modifying lookup behaviour
3580   * @pages:	array that receives pointers to the pages pinned.
3581   *		Should be at least nr_pages long.
3582   *
3583   * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3584   * FOLL_PIN is set.
3585   *
3586   * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3587   * see Documentation/core-api/pin_user_pages.rst for details.
3588   *
3589   * Note that if a zero_page is amongst the returned pages, it will not have
3590   * pins in it and unpin_user_page*() will not remove pins from it.
3591   */
pin_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)3592  long pin_user_pages(unsigned long start, unsigned long nr_pages,
3593  		    unsigned int gup_flags, struct page **pages)
3594  {
3595  	int locked = 1;
3596  
3597  	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3598  		return 0;
3599  	return __gup_longterm_locked(current->mm, start, nr_pages,
3600  				     pages, &locked, gup_flags);
3601  }
3602  EXPORT_SYMBOL(pin_user_pages);
3603  
3604  /*
3605   * pin_user_pages_unlocked() is the FOLL_PIN variant of
3606   * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3607   * FOLL_PIN and rejects FOLL_GET.
3608   *
3609   * Note that if a zero_page is amongst the returned pages, it will not have
3610   * pins in it and unpin_user_page*() will not remove pins from it.
3611   */
pin_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)3612  long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3613  			     struct page **pages, unsigned int gup_flags)
3614  {
3615  	int locked = 0;
3616  
3617  	if (!is_valid_gup_args(pages, NULL, &gup_flags,
3618  			       FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
3619  		return 0;
3620  
3621  	return __gup_longterm_locked(current->mm, start, nr_pages, pages,
3622  				     &locked, gup_flags);
3623  }
3624  EXPORT_SYMBOL(pin_user_pages_unlocked);
3625  
3626  /**
3627   * memfd_pin_folios() - pin folios associated with a memfd
3628   * @memfd:      the memfd whose folios are to be pinned
3629   * @start:      the first memfd offset
3630   * @end:        the last memfd offset (inclusive)
3631   * @folios:     array that receives pointers to the folios pinned
3632   * @max_folios: maximum number of entries in @folios
3633   * @offset:     the offset into the first folio
3634   *
3635   * Attempt to pin folios associated with a memfd in the contiguous range
3636   * [start, end]. Given that a memfd is either backed by shmem or hugetlb,
3637   * the folios can either be found in the page cache or need to be allocated
3638   * if necessary. Once the folios are located, they are all pinned via
3639   * FOLL_PIN and @offset is populatedwith the offset into the first folio.
3640   * And, eventually, these pinned folios must be released either using
3641   * unpin_folios() or unpin_folio().
3642   *
3643   * It must be noted that the folios may be pinned for an indefinite amount
3644   * of time. And, in most cases, the duration of time they may stay pinned
3645   * would be controlled by the userspace. This behavior is effectively the
3646   * same as using FOLL_LONGTERM with other GUP APIs.
3647   *
3648   * Returns number of folios pinned, which could be less than @max_folios
3649   * as it depends on the folio sizes that cover the range [start, end].
3650   * If no folios were pinned, it returns -errno.
3651   */
memfd_pin_folios(struct file * memfd,loff_t start,loff_t end,struct folio ** folios,unsigned int max_folios,pgoff_t * offset)3652  long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
3653  		      struct folio **folios, unsigned int max_folios,
3654  		      pgoff_t *offset)
3655  {
3656  	unsigned int flags, nr_folios, nr_found;
3657  	unsigned int i, pgshift = PAGE_SHIFT;
3658  	pgoff_t start_idx, end_idx, next_idx;
3659  	struct folio *folio = NULL;
3660  	struct folio_batch fbatch;
3661  	struct hstate *h;
3662  	long ret = -EINVAL;
3663  
3664  	if (start < 0 || start > end || !max_folios)
3665  		return -EINVAL;
3666  
3667  	if (!memfd)
3668  		return -EINVAL;
3669  
3670  	if (!shmem_file(memfd) && !is_file_hugepages(memfd))
3671  		return -EINVAL;
3672  
3673  	if (end >= i_size_read(file_inode(memfd)))
3674  		return -EINVAL;
3675  
3676  	if (is_file_hugepages(memfd)) {
3677  		h = hstate_file(memfd);
3678  		pgshift = huge_page_shift(h);
3679  	}
3680  
3681  	flags = memalloc_pin_save();
3682  	do {
3683  		nr_folios = 0;
3684  		start_idx = start >> pgshift;
3685  		end_idx = end >> pgshift;
3686  		if (is_file_hugepages(memfd)) {
3687  			start_idx <<= huge_page_order(h);
3688  			end_idx <<= huge_page_order(h);
3689  		}
3690  
3691  		folio_batch_init(&fbatch);
3692  		while (start_idx <= end_idx && nr_folios < max_folios) {
3693  			/*
3694  			 * In most cases, we should be able to find the folios
3695  			 * in the page cache. If we cannot find them for some
3696  			 * reason, we try to allocate them and add them to the
3697  			 * page cache.
3698  			 */
3699  			nr_found = filemap_get_folios_contig(memfd->f_mapping,
3700  							     &start_idx,
3701  							     end_idx,
3702  							     &fbatch);
3703  			if (folio) {
3704  				folio_put(folio);
3705  				folio = NULL;
3706  			}
3707  
3708  			next_idx = 0;
3709  			for (i = 0; i < nr_found; i++) {
3710  				/*
3711  				 * As there can be multiple entries for a
3712  				 * given folio in the batch returned by
3713  				 * filemap_get_folios_contig(), the below
3714  				 * check is to ensure that we pin and return a
3715  				 * unique set of folios between start and end.
3716  				 */
3717  				if (next_idx &&
3718  				    next_idx != folio_index(fbatch.folios[i]))
3719  					continue;
3720  
3721  				folio = page_folio(&fbatch.folios[i]->page);
3722  
3723  				if (try_grab_folio(folio, 1, FOLL_PIN)) {
3724  					folio_batch_release(&fbatch);
3725  					ret = -EINVAL;
3726  					goto err;
3727  				}
3728  
3729  				if (nr_folios == 0)
3730  					*offset = offset_in_folio(folio, start);
3731  
3732  				folios[nr_folios] = folio;
3733  				next_idx = folio_next_index(folio);
3734  				if (++nr_folios == max_folios)
3735  					break;
3736  			}
3737  
3738  			folio = NULL;
3739  			folio_batch_release(&fbatch);
3740  			if (!nr_found) {
3741  				folio = memfd_alloc_folio(memfd, start_idx);
3742  				if (IS_ERR(folio)) {
3743  					ret = PTR_ERR(folio);
3744  					if (ret != -EEXIST)
3745  						goto err;
3746  					folio = NULL;
3747  				}
3748  			}
3749  		}
3750  
3751  		ret = check_and_migrate_movable_folios(nr_folios, folios);
3752  	} while (ret == -EAGAIN);
3753  
3754  	memalloc_pin_restore(flags);
3755  	return ret ? ret : nr_folios;
3756  err:
3757  	memalloc_pin_restore(flags);
3758  	unpin_folios(folios, nr_folios);
3759  
3760  	return ret;
3761  }
3762  EXPORT_SYMBOL_GPL(memfd_pin_folios);
3763