1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef LINUX_MM_INLINE_H
3  #define LINUX_MM_INLINE_H
4  
5  #include <linux/atomic.h>
6  #include <linux/huge_mm.h>
7  #include <linux/mm_types.h>
8  #include <linux/swap.h>
9  #include <linux/string.h>
10  #include <linux/userfaultfd_k.h>
11  #include <linux/swapops.h>
12  
13  /**
14   * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
15   * @folio: The folio to test.
16   *
17   * We would like to get this info without a page flag, but the state
18   * needs to survive until the folio is last deleted from the LRU, which
19   * could be as far down as __page_cache_release.
20   *
21   * Return: An integer (not a boolean!) used to sort a folio onto the
22   * right LRU list and to account folios correctly.
23   * 1 if @folio is a regular filesystem backed page cache folio
24   * or a lazily freed anonymous folio (e.g. via MADV_FREE).
25   * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
26   * ram or swap backed folio.
27   */
folio_is_file_lru(struct folio * folio)28  static inline int folio_is_file_lru(struct folio *folio)
29  {
30  	return !folio_test_swapbacked(folio);
31  }
32  
page_is_file_lru(struct page * page)33  static inline int page_is_file_lru(struct page *page)
34  {
35  	return folio_is_file_lru(page_folio(page));
36  }
37  
__update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)38  static __always_inline void __update_lru_size(struct lruvec *lruvec,
39  				enum lru_list lru, enum zone_type zid,
40  				long nr_pages)
41  {
42  	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
43  
44  	lockdep_assert_held(&lruvec->lru_lock);
45  	WARN_ON_ONCE(nr_pages != (int)nr_pages);
46  
47  	__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
48  	__mod_zone_page_state(&pgdat->node_zones[zid],
49  				NR_ZONE_LRU_BASE + lru, nr_pages);
50  }
51  
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)52  static __always_inline void update_lru_size(struct lruvec *lruvec,
53  				enum lru_list lru, enum zone_type zid,
54  				long nr_pages)
55  {
56  	__update_lru_size(lruvec, lru, zid, nr_pages);
57  #ifdef CONFIG_MEMCG
58  	mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
59  #endif
60  }
61  
62  /**
63   * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
64   * @folio: The folio that was on lru and now has a zero reference.
65   */
__folio_clear_lru_flags(struct folio * folio)66  static __always_inline void __folio_clear_lru_flags(struct folio *folio)
67  {
68  	VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
69  
70  	__folio_clear_lru(folio);
71  
72  	/* this shouldn't happen, so leave the flags to bad_page() */
73  	if (folio_test_active(folio) && folio_test_unevictable(folio))
74  		return;
75  
76  	__folio_clear_active(folio);
77  	__folio_clear_unevictable(folio);
78  }
79  
80  /**
81   * folio_lru_list - Which LRU list should a folio be on?
82   * @folio: The folio to test.
83   *
84   * Return: The LRU list a folio should be on, as an index
85   * into the array of LRU lists.
86   */
folio_lru_list(struct folio * folio)87  static __always_inline enum lru_list folio_lru_list(struct folio *folio)
88  {
89  	enum lru_list lru;
90  
91  	VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
92  
93  	if (folio_test_unevictable(folio))
94  		return LRU_UNEVICTABLE;
95  
96  	lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
97  	if (folio_test_active(folio))
98  		lru += LRU_ACTIVE;
99  
100  	return lru;
101  }
102  
103  #ifdef CONFIG_LRU_GEN
104  
105  #ifdef CONFIG_LRU_GEN_ENABLED
lru_gen_enabled(void)106  static inline bool lru_gen_enabled(void)
107  {
108  	DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
109  
110  	return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
111  }
112  #else
lru_gen_enabled(void)113  static inline bool lru_gen_enabled(void)
114  {
115  	DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
116  
117  	return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
118  }
119  #endif
120  
lru_gen_in_fault(void)121  static inline bool lru_gen_in_fault(void)
122  {
123  	return current->in_lru_fault;
124  }
125  
lru_gen_from_seq(unsigned long seq)126  static inline int lru_gen_from_seq(unsigned long seq)
127  {
128  	return seq % MAX_NR_GENS;
129  }
130  
lru_hist_from_seq(unsigned long seq)131  static inline int lru_hist_from_seq(unsigned long seq)
132  {
133  	return seq % NR_HIST_GENS;
134  }
135  
lru_tier_from_refs(int refs)136  static inline int lru_tier_from_refs(int refs)
137  {
138  	VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
139  
140  	/* see the comment in folio_lru_refs() */
141  	return order_base_2(refs + 1);
142  }
143  
folio_lru_refs(struct folio * folio)144  static inline int folio_lru_refs(struct folio *folio)
145  {
146  	unsigned long flags = READ_ONCE(folio->flags);
147  	bool workingset = flags & BIT(PG_workingset);
148  
149  	/*
150  	 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
151  	 * total number of accesses is N>1, since N=0,1 both map to the first
152  	 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
153  	 * the comment on MAX_NR_TIERS.
154  	 */
155  	return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
156  }
157  
folio_lru_gen(struct folio * folio)158  static inline int folio_lru_gen(struct folio *folio)
159  {
160  	unsigned long flags = READ_ONCE(folio->flags);
161  
162  	return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
163  }
164  
lru_gen_is_active(struct lruvec * lruvec,int gen)165  static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
166  {
167  	unsigned long max_seq = lruvec->lrugen.max_seq;
168  
169  	VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
170  
171  	/* see the comment on MIN_NR_GENS */
172  	return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
173  }
174  
lru_gen_update_size(struct lruvec * lruvec,struct folio * folio,int old_gen,int new_gen)175  static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
176  				       int old_gen, int new_gen)
177  {
178  	int type = folio_is_file_lru(folio);
179  	int zone = folio_zonenum(folio);
180  	int delta = folio_nr_pages(folio);
181  	enum lru_list lru = type * LRU_INACTIVE_FILE;
182  	struct lru_gen_folio *lrugen = &lruvec->lrugen;
183  
184  	VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
185  	VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
186  	VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
187  
188  	if (old_gen >= 0)
189  		WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
190  			   lrugen->nr_pages[old_gen][type][zone] - delta);
191  	if (new_gen >= 0)
192  		WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
193  			   lrugen->nr_pages[new_gen][type][zone] + delta);
194  
195  	/* addition */
196  	if (old_gen < 0) {
197  		if (lru_gen_is_active(lruvec, new_gen))
198  			lru += LRU_ACTIVE;
199  		__update_lru_size(lruvec, lru, zone, delta);
200  		return;
201  	}
202  
203  	/* deletion */
204  	if (new_gen < 0) {
205  		if (lru_gen_is_active(lruvec, old_gen))
206  			lru += LRU_ACTIVE;
207  		__update_lru_size(lruvec, lru, zone, -delta);
208  		return;
209  	}
210  
211  	/* promotion */
212  	if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
213  		__update_lru_size(lruvec, lru, zone, -delta);
214  		__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
215  	}
216  
217  	/* demotion requires isolation, e.g., lru_deactivate_fn() */
218  	VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
219  }
220  
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)221  static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
222  {
223  	unsigned long seq;
224  	unsigned long flags;
225  	int gen = folio_lru_gen(folio);
226  	int type = folio_is_file_lru(folio);
227  	int zone = folio_zonenum(folio);
228  	struct lru_gen_folio *lrugen = &lruvec->lrugen;
229  
230  	VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
231  
232  	if (folio_test_unevictable(folio) || !lrugen->enabled)
233  		return false;
234  	/*
235  	 * There are four common cases for this page:
236  	 * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
237  	 *    generation, and it's protected over the rest below.
238  	 * 2. If it can't be evicted immediately, i.e., a dirty page pending
239  	 *    writeback, add it to the second youngest generation.
240  	 * 3. If it should be evicted first, e.g., cold and clean from
241  	 *    folio_rotate_reclaimable(), add it to the oldest generation.
242  	 * 4. Everything else falls between 2 & 3 above and is added to the
243  	 *    second oldest generation if it's considered inactive, or the
244  	 *    oldest generation otherwise. See lru_gen_is_active().
245  	 */
246  	if (folio_test_active(folio))
247  		seq = lrugen->max_seq;
248  	else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
249  		 (folio_test_reclaim(folio) &&
250  		  (folio_test_dirty(folio) || folio_test_writeback(folio))))
251  		seq = lrugen->max_seq - 1;
252  	else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
253  		seq = lrugen->min_seq[type];
254  	else
255  		seq = lrugen->min_seq[type] + 1;
256  
257  	gen = lru_gen_from_seq(seq);
258  	flags = (gen + 1UL) << LRU_GEN_PGOFF;
259  	/* see the comment on MIN_NR_GENS about PG_active */
260  	set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
261  
262  	lru_gen_update_size(lruvec, folio, -1, gen);
263  	/* for folio_rotate_reclaimable() */
264  	if (reclaiming)
265  		list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
266  	else
267  		list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
268  
269  	return true;
270  }
271  
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)272  static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
273  {
274  	unsigned long flags;
275  	int gen = folio_lru_gen(folio);
276  
277  	if (gen < 0)
278  		return false;
279  
280  	VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
281  	VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
282  
283  	/* for folio_migrate_flags() */
284  	flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
285  	flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
286  	gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
287  
288  	lru_gen_update_size(lruvec, folio, gen, -1);
289  	list_del(&folio->lru);
290  
291  	return true;
292  }
293  
294  #else /* !CONFIG_LRU_GEN */
295  
lru_gen_enabled(void)296  static inline bool lru_gen_enabled(void)
297  {
298  	return false;
299  }
300  
lru_gen_in_fault(void)301  static inline bool lru_gen_in_fault(void)
302  {
303  	return false;
304  }
305  
lru_gen_add_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)306  static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
307  {
308  	return false;
309  }
310  
lru_gen_del_folio(struct lruvec * lruvec,struct folio * folio,bool reclaiming)311  static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
312  {
313  	return false;
314  }
315  
316  #endif /* CONFIG_LRU_GEN */
317  
318  static __always_inline
lruvec_add_folio(struct lruvec * lruvec,struct folio * folio)319  void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
320  {
321  	enum lru_list lru = folio_lru_list(folio);
322  
323  	if (lru_gen_add_folio(lruvec, folio, false))
324  		return;
325  
326  	update_lru_size(lruvec, lru, folio_zonenum(folio),
327  			folio_nr_pages(folio));
328  	if (lru != LRU_UNEVICTABLE)
329  		list_add(&folio->lru, &lruvec->lists[lru]);
330  }
331  
332  static __always_inline
lruvec_add_folio_tail(struct lruvec * lruvec,struct folio * folio)333  void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
334  {
335  	enum lru_list lru = folio_lru_list(folio);
336  
337  	if (lru_gen_add_folio(lruvec, folio, true))
338  		return;
339  
340  	update_lru_size(lruvec, lru, folio_zonenum(folio),
341  			folio_nr_pages(folio));
342  	/* This is not expected to be used on LRU_UNEVICTABLE */
343  	list_add_tail(&folio->lru, &lruvec->lists[lru]);
344  }
345  
346  static __always_inline
lruvec_del_folio(struct lruvec * lruvec,struct folio * folio)347  void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
348  {
349  	enum lru_list lru = folio_lru_list(folio);
350  
351  	if (lru_gen_del_folio(lruvec, folio, false))
352  		return;
353  
354  	if (lru != LRU_UNEVICTABLE)
355  		list_del(&folio->lru);
356  	update_lru_size(lruvec, lru, folio_zonenum(folio),
357  			-folio_nr_pages(folio));
358  }
359  
360  #ifdef CONFIG_ANON_VMA_NAME
361  /* mmap_lock should be read-locked */
anon_vma_name_get(struct anon_vma_name * anon_name)362  static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
363  {
364  	if (anon_name)
365  		kref_get(&anon_name->kref);
366  }
367  
anon_vma_name_put(struct anon_vma_name * anon_name)368  static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
369  {
370  	if (anon_name)
371  		kref_put(&anon_name->kref, anon_vma_name_free);
372  }
373  
374  static inline
anon_vma_name_reuse(struct anon_vma_name * anon_name)375  struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
376  {
377  	/* Prevent anon_name refcount saturation early on */
378  	if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
379  		anon_vma_name_get(anon_name);
380  		return anon_name;
381  
382  	}
383  	return anon_vma_name_alloc(anon_name->name);
384  }
385  
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)386  static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
387  				     struct vm_area_struct *new_vma)
388  {
389  	struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
390  
391  	if (anon_name)
392  		new_vma->anon_name = anon_vma_name_reuse(anon_name);
393  }
394  
free_anon_vma_name(struct vm_area_struct * vma)395  static inline void free_anon_vma_name(struct vm_area_struct *vma)
396  {
397  	/*
398  	 * Not using anon_vma_name because it generates a warning if mmap_lock
399  	 * is not held, which might be the case here.
400  	 */
401  	anon_vma_name_put(vma->anon_name);
402  }
403  
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)404  static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
405  				    struct anon_vma_name *anon_name2)
406  {
407  	if (anon_name1 == anon_name2)
408  		return true;
409  
410  	return anon_name1 && anon_name2 &&
411  		!strcmp(anon_name1->name, anon_name2->name);
412  }
413  
414  #else /* CONFIG_ANON_VMA_NAME */
anon_vma_name_get(struct anon_vma_name * anon_name)415  static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
anon_vma_name_put(struct anon_vma_name * anon_name)416  static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)417  static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
418  				     struct vm_area_struct *new_vma) {}
free_anon_vma_name(struct vm_area_struct * vma)419  static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
420  
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)421  static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
422  				    struct anon_vma_name *anon_name2)
423  {
424  	return true;
425  }
426  
427  #endif  /* CONFIG_ANON_VMA_NAME */
428  
init_tlb_flush_pending(struct mm_struct * mm)429  static inline void init_tlb_flush_pending(struct mm_struct *mm)
430  {
431  	atomic_set(&mm->tlb_flush_pending, 0);
432  }
433  
inc_tlb_flush_pending(struct mm_struct * mm)434  static inline void inc_tlb_flush_pending(struct mm_struct *mm)
435  {
436  	atomic_inc(&mm->tlb_flush_pending);
437  	/*
438  	 * The only time this value is relevant is when there are indeed pages
439  	 * to flush. And we'll only flush pages after changing them, which
440  	 * requires the PTL.
441  	 *
442  	 * So the ordering here is:
443  	 *
444  	 *	atomic_inc(&mm->tlb_flush_pending);
445  	 *	spin_lock(&ptl);
446  	 *	...
447  	 *	set_pte_at();
448  	 *	spin_unlock(&ptl);
449  	 *
450  	 *				spin_lock(&ptl)
451  	 *				mm_tlb_flush_pending();
452  	 *				....
453  	 *				spin_unlock(&ptl);
454  	 *
455  	 *	flush_tlb_range();
456  	 *	atomic_dec(&mm->tlb_flush_pending);
457  	 *
458  	 * Where the increment if constrained by the PTL unlock, it thus
459  	 * ensures that the increment is visible if the PTE modification is
460  	 * visible. After all, if there is no PTE modification, nobody cares
461  	 * about TLB flushes either.
462  	 *
463  	 * This very much relies on users (mm_tlb_flush_pending() and
464  	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
465  	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
466  	 * locks (PPC) the unlock of one doesn't order against the lock of
467  	 * another PTL.
468  	 *
469  	 * The decrement is ordered by the flush_tlb_range(), such that
470  	 * mm_tlb_flush_pending() will not return false unless all flushes have
471  	 * completed.
472  	 */
473  }
474  
dec_tlb_flush_pending(struct mm_struct * mm)475  static inline void dec_tlb_flush_pending(struct mm_struct *mm)
476  {
477  	/*
478  	 * See inc_tlb_flush_pending().
479  	 *
480  	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
481  	 * not order against TLB invalidate completion, which is what we need.
482  	 *
483  	 * Therefore we must rely on tlb_flush_*() to guarantee order.
484  	 */
485  	atomic_dec(&mm->tlb_flush_pending);
486  }
487  
mm_tlb_flush_pending(struct mm_struct * mm)488  static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
489  {
490  	/*
491  	 * Must be called after having acquired the PTL; orders against that
492  	 * PTLs release and therefore ensures that if we observe the modified
493  	 * PTE we must also observe the increment from inc_tlb_flush_pending().
494  	 *
495  	 * That is, it only guarantees to return true if there is a flush
496  	 * pending for _this_ PTL.
497  	 */
498  	return atomic_read(&mm->tlb_flush_pending);
499  }
500  
mm_tlb_flush_nested(struct mm_struct * mm)501  static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
502  {
503  	/*
504  	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
505  	 * for which there is a TLB flush pending in order to guarantee
506  	 * we've seen both that PTE modification and the increment.
507  	 *
508  	 * (no requirement on actually still holding the PTL, that is irrelevant)
509  	 */
510  	return atomic_read(&mm->tlb_flush_pending) > 1;
511  }
512  
513  #ifdef CONFIG_MMU
514  /*
515   * Computes the pte marker to copy from the given source entry into dst_vma.
516   * If no marker should be copied, returns 0.
517   * The caller should insert a new pte created with make_pte_marker().
518   */
copy_pte_marker(swp_entry_t entry,struct vm_area_struct * dst_vma)519  static inline pte_marker copy_pte_marker(
520  		swp_entry_t entry, struct vm_area_struct *dst_vma)
521  {
522  	pte_marker srcm = pte_marker_get(entry);
523  	/* Always copy error entries. */
524  	pte_marker dstm = srcm & PTE_MARKER_POISONED;
525  
526  	/* Only copy PTE markers if UFFD register matches. */
527  	if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
528  		dstm |= PTE_MARKER_UFFD_WP;
529  
530  	return dstm;
531  }
532  #endif
533  
534  /*
535   * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
536   * replace a none pte.  NOTE!  This should only be called when *pte is already
537   * cleared so we will never accidentally replace something valuable.  Meanwhile
538   * none pte also means we are not demoting the pte so tlb flushed is not needed.
539   * E.g., when pte cleared the caller should have taken care of the tlb flush.
540   *
541   * Must be called with pgtable lock held so that no thread will see the none
542   * pte, and if they see it, they'll fault and serialize at the pgtable lock.
543   *
544   * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
545   */
546  static inline void
pte_install_uffd_wp_if_needed(struct vm_area_struct * vma,unsigned long addr,pte_t * pte,pte_t pteval)547  pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
548  			      pte_t *pte, pte_t pteval)
549  {
550  #ifdef CONFIG_PTE_MARKER_UFFD_WP
551  	bool arm_uffd_pte = false;
552  
553  	/* The current status of the pte should be "cleared" before calling */
554  	WARN_ON_ONCE(!pte_none(ptep_get(pte)));
555  
556  	/*
557  	 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
558  	 * thing, because when zapping either it means it's dropping the
559  	 * page, or in TTU where the present pte will be quickly replaced
560  	 * with a swap pte.  There's no way of leaking the bit.
561  	 */
562  	if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
563  		return;
564  
565  	/* A uffd-wp wr-protected normal pte */
566  	if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
567  		arm_uffd_pte = true;
568  
569  	/*
570  	 * A uffd-wp wr-protected swap pte.  Note: this should even cover an
571  	 * existing pte marker with uffd-wp bit set.
572  	 */
573  	if (unlikely(pte_swp_uffd_wp_any(pteval)))
574  		arm_uffd_pte = true;
575  
576  	if (unlikely(arm_uffd_pte))
577  		set_pte_at(vma->vm_mm, addr, pte,
578  			   make_pte_marker(PTE_MARKER_UFFD_WP));
579  #endif
580  }
581  
vma_has_recency(struct vm_area_struct * vma)582  static inline bool vma_has_recency(struct vm_area_struct *vma)
583  {
584  	if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
585  		return false;
586  
587  	if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
588  		return false;
589  
590  	return true;
591  }
592  
593  #endif
594