1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_PGTABLE_H
3  #define _LINUX_PGTABLE_H
4  
5  #include <linux/pfn.h>
6  #include <asm/pgtable.h>
7  
8  #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
9  #define PUD_ORDER	(PUD_SHIFT - PAGE_SHIFT)
10  
11  #ifndef __ASSEMBLY__
12  #ifdef CONFIG_MMU
13  
14  #include <linux/mm_types.h>
15  #include <linux/bug.h>
16  #include <linux/errno.h>
17  #include <asm-generic/pgtable_uffd.h>
18  #include <linux/page_table_check.h>
19  
20  #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21  	defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22  #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
23  #endif
24  
25  /*
26   * On almost all architectures and configurations, 0 can be used as the
27   * upper ceiling to free_pgtables(): on many architectures it has the same
28   * effect as using TASK_SIZE.  However, there is one configuration which
29   * must impose a more careful limit, to avoid freeing kernel pgtables.
30   */
31  #ifndef USER_PGTABLES_CEILING
32  #define USER_PGTABLES_CEILING	0UL
33  #endif
34  
35  /*
36   * This defines the first usable user address. Platforms
37   * can override its value with custom FIRST_USER_ADDRESS
38   * defined in their respective <asm/pgtable.h>.
39   */
40  #ifndef FIRST_USER_ADDRESS
41  #define FIRST_USER_ADDRESS	0UL
42  #endif
43  
44  /*
45   * This defines the generic helper for accessing PMD page
46   * table page. Although platforms can still override this
47   * via their respective <asm/pgtable.h>.
48   */
49  #ifndef pmd_pgtable
50  #define pmd_pgtable(pmd) pmd_page(pmd)
51  #endif
52  
53  #define pmd_folio(pmd) page_folio(pmd_page(pmd))
54  
55  /*
56   * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
57   *
58   * The pXx_index() functions return the index of the entry in the page
59   * table page which would control the given virtual address
60   *
61   * As these functions may be used by the same code for different levels of
62   * the page table folding, they are always available, regardless of
63   * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
64   * because in such cases PTRS_PER_PxD equals 1.
65   */
66  
pte_index(unsigned long address)67  static inline unsigned long pte_index(unsigned long address)
68  {
69  	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
70  }
71  
72  #ifndef pmd_index
pmd_index(unsigned long address)73  static inline unsigned long pmd_index(unsigned long address)
74  {
75  	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
76  }
77  #define pmd_index pmd_index
78  #endif
79  
80  #ifndef pud_index
pud_index(unsigned long address)81  static inline unsigned long pud_index(unsigned long address)
82  {
83  	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
84  }
85  #define pud_index pud_index
86  #endif
87  
88  #ifndef pgd_index
89  /* Must be a compile-time constant, so implement it as a macro */
90  #define pgd_index(a)  (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
91  #endif
92  
93  #ifndef pte_offset_kernel
pte_offset_kernel(pmd_t * pmd,unsigned long address)94  static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
95  {
96  	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
97  }
98  #define pte_offset_kernel pte_offset_kernel
99  #endif
100  
101  #ifdef CONFIG_HIGHPTE
102  #define __pte_map(pmd, address) \
103  	((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
104  #define pte_unmap(pte)	do {	\
105  	kunmap_local((pte));	\
106  	rcu_read_unlock();	\
107  } while (0)
108  #else
__pte_map(pmd_t * pmd,unsigned long address)109  static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
110  {
111  	return pte_offset_kernel(pmd, address);
112  }
pte_unmap(pte_t * pte)113  static inline void pte_unmap(pte_t *pte)
114  {
115  	rcu_read_unlock();
116  }
117  #endif
118  
119  void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
120  
121  /* Find an entry in the second-level page table.. */
122  #ifndef pmd_offset
pmd_offset(pud_t * pud,unsigned long address)123  static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
124  {
125  	return pud_pgtable(*pud) + pmd_index(address);
126  }
127  #define pmd_offset pmd_offset
128  #endif
129  
130  #ifndef pud_offset
pud_offset(p4d_t * p4d,unsigned long address)131  static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
132  {
133  	return p4d_pgtable(*p4d) + pud_index(address);
134  }
135  #define pud_offset pud_offset
136  #endif
137  
pgd_offset_pgd(pgd_t * pgd,unsigned long address)138  static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
139  {
140  	return (pgd + pgd_index(address));
141  };
142  
143  /*
144   * a shortcut to get a pgd_t in a given mm
145   */
146  #ifndef pgd_offset
147  #define pgd_offset(mm, address)		pgd_offset_pgd((mm)->pgd, (address))
148  #endif
149  
150  /*
151   * a shortcut which implies the use of the kernel's pgd, instead
152   * of a process's
153   */
154  #define pgd_offset_k(address)		pgd_offset(&init_mm, (address))
155  
156  /*
157   * In many cases it is known that a virtual address is mapped at PMD or PTE
158   * level, so instead of traversing all the page table levels, we can get a
159   * pointer to the PMD entry in user or kernel page table or translate a virtual
160   * address to the pointer in the PTE in the kernel page tables with simple
161   * helpers.
162   */
pmd_off(struct mm_struct * mm,unsigned long va)163  static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
164  {
165  	return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
166  }
167  
pmd_off_k(unsigned long va)168  static inline pmd_t *pmd_off_k(unsigned long va)
169  {
170  	return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
171  }
172  
virt_to_kpte(unsigned long vaddr)173  static inline pte_t *virt_to_kpte(unsigned long vaddr)
174  {
175  	pmd_t *pmd = pmd_off_k(vaddr);
176  
177  	return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
178  }
179  
180  #ifndef pmd_young
pmd_young(pmd_t pmd)181  static inline int pmd_young(pmd_t pmd)
182  {
183  	return 0;
184  }
185  #endif
186  
187  #ifndef pmd_dirty
pmd_dirty(pmd_t pmd)188  static inline int pmd_dirty(pmd_t pmd)
189  {
190  	return 0;
191  }
192  #endif
193  
194  /*
195   * A facility to provide lazy MMU batching.  This allows PTE updates and
196   * page invalidations to be delayed until a call to leave lazy MMU mode
197   * is issued.  Some architectures may benefit from doing this, and it is
198   * beneficial for both shadow and direct mode hypervisors, which may batch
199   * the PTE updates which happen during this window.  Note that using this
200   * interface requires that read hazards be removed from the code.  A read
201   * hazard could result in the direct mode hypervisor case, since the actual
202   * write to the page tables may not yet have taken place, so reads though
203   * a raw PTE pointer after it has been modified are not guaranteed to be
204   * up to date.  This mode can only be entered and left under the protection of
205   * the page table locks for all page tables which may be modified.  In the UP
206   * case, this is required so that preemption is disabled, and in the SMP case,
207   * it must synchronize the delayed page table writes properly on other CPUs.
208   */
209  #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
210  #define arch_enter_lazy_mmu_mode()	do {} while (0)
211  #define arch_leave_lazy_mmu_mode()	do {} while (0)
212  #define arch_flush_lazy_mmu_mode()	do {} while (0)
213  #endif
214  
215  #ifndef pte_batch_hint
216  /**
217   * pte_batch_hint - Number of pages that can be added to batch without scanning.
218   * @ptep: Page table pointer for the entry.
219   * @pte: Page table entry.
220   *
221   * Some architectures know that a set of contiguous ptes all map the same
222   * contiguous memory with the same permissions. In this case, it can provide a
223   * hint to aid pte batching without the core code needing to scan every pte.
224   *
225   * An architecture implementation may ignore the PTE accessed state. Further,
226   * the dirty state must apply atomically to all the PTEs described by the hint.
227   *
228   * May be overridden by the architecture, else pte_batch_hint is always 1.
229   */
pte_batch_hint(pte_t * ptep,pte_t pte)230  static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
231  {
232  	return 1;
233  }
234  #endif
235  
236  #ifndef pte_advance_pfn
pte_advance_pfn(pte_t pte,unsigned long nr)237  static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
238  {
239  	return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
240  }
241  #endif
242  
243  #define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
244  
245  #ifndef set_ptes
246  /**
247   * set_ptes - Map consecutive pages to a contiguous range of addresses.
248   * @mm: Address space to map the pages into.
249   * @addr: Address to map the first page at.
250   * @ptep: Page table pointer for the first entry.
251   * @pte: Page table entry for the first page.
252   * @nr: Number of pages to map.
253   *
254   * When nr==1, initial state of pte may be present or not present, and new state
255   * may be present or not present. When nr>1, initial state of all ptes must be
256   * not present, and new state must be present.
257   *
258   * May be overridden by the architecture, or the architecture can define
259   * set_pte() and PFN_PTE_SHIFT.
260   *
261   * Context: The caller holds the page table lock.  The pages all belong
262   * to the same folio.  The PTEs are all in the same PMD.
263   */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)264  static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
265  		pte_t *ptep, pte_t pte, unsigned int nr)
266  {
267  	page_table_check_ptes_set(mm, ptep, pte, nr);
268  
269  	arch_enter_lazy_mmu_mode();
270  	for (;;) {
271  		set_pte(ptep, pte);
272  		if (--nr == 0)
273  			break;
274  		ptep++;
275  		pte = pte_next_pfn(pte);
276  	}
277  	arch_leave_lazy_mmu_mode();
278  }
279  #endif
280  #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
281  
282  #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
283  extern int ptep_set_access_flags(struct vm_area_struct *vma,
284  				 unsigned long address, pte_t *ptep,
285  				 pte_t entry, int dirty);
286  #endif
287  
288  #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
289  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
290  extern int pmdp_set_access_flags(struct vm_area_struct *vma,
291  				 unsigned long address, pmd_t *pmdp,
292  				 pmd_t entry, int dirty);
293  extern int pudp_set_access_flags(struct vm_area_struct *vma,
294  				 unsigned long address, pud_t *pudp,
295  				 pud_t entry, int dirty);
296  #else
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)297  static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
298  					unsigned long address, pmd_t *pmdp,
299  					pmd_t entry, int dirty)
300  {
301  	BUILD_BUG();
302  	return 0;
303  }
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)304  static inline int pudp_set_access_flags(struct vm_area_struct *vma,
305  					unsigned long address, pud_t *pudp,
306  					pud_t entry, int dirty)
307  {
308  	BUILD_BUG();
309  	return 0;
310  }
311  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
312  #endif
313  
314  #ifndef ptep_get
ptep_get(pte_t * ptep)315  static inline pte_t ptep_get(pte_t *ptep)
316  {
317  	return READ_ONCE(*ptep);
318  }
319  #endif
320  
321  #ifndef pmdp_get
pmdp_get(pmd_t * pmdp)322  static inline pmd_t pmdp_get(pmd_t *pmdp)
323  {
324  	return READ_ONCE(*pmdp);
325  }
326  #endif
327  
328  #ifndef pudp_get
pudp_get(pud_t * pudp)329  static inline pud_t pudp_get(pud_t *pudp)
330  {
331  	return READ_ONCE(*pudp);
332  }
333  #endif
334  
335  #ifndef p4dp_get
p4dp_get(p4d_t * p4dp)336  static inline p4d_t p4dp_get(p4d_t *p4dp)
337  {
338  	return READ_ONCE(*p4dp);
339  }
340  #endif
341  
342  #ifndef pgdp_get
pgdp_get(pgd_t * pgdp)343  static inline pgd_t pgdp_get(pgd_t *pgdp)
344  {
345  	return READ_ONCE(*pgdp);
346  }
347  #endif
348  
349  #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)350  static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
351  					    unsigned long address,
352  					    pte_t *ptep)
353  {
354  	pte_t pte = ptep_get(ptep);
355  	int r = 1;
356  	if (!pte_young(pte))
357  		r = 0;
358  	else
359  		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
360  	return r;
361  }
362  #endif
363  
364  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
365  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)366  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
367  					    unsigned long address,
368  					    pmd_t *pmdp)
369  {
370  	pmd_t pmd = *pmdp;
371  	int r = 1;
372  	if (!pmd_young(pmd))
373  		r = 0;
374  	else
375  		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
376  	return r;
377  }
378  #else
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)379  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
380  					    unsigned long address,
381  					    pmd_t *pmdp)
382  {
383  	BUILD_BUG();
384  	return 0;
385  }
386  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
387  #endif
388  
389  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
390  int ptep_clear_flush_young(struct vm_area_struct *vma,
391  			   unsigned long address, pte_t *ptep);
392  #endif
393  
394  #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
395  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
396  extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
397  				  unsigned long address, pmd_t *pmdp);
398  #else
399  /*
400   * Despite relevant to THP only, this API is called from generic rmap code
401   * under PageTransHuge(), hence needs a dummy implementation for !THP
402   */
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)403  static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
404  					 unsigned long address, pmd_t *pmdp)
405  {
406  	BUILD_BUG();
407  	return 0;
408  }
409  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
410  #endif
411  
412  #ifndef arch_has_hw_nonleaf_pmd_young
413  /*
414   * Return whether the accessed bit in non-leaf PMD entries is supported on the
415   * local CPU.
416   */
arch_has_hw_nonleaf_pmd_young(void)417  static inline bool arch_has_hw_nonleaf_pmd_young(void)
418  {
419  	return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
420  }
421  #endif
422  
423  #ifndef arch_has_hw_pte_young
424  /*
425   * Return whether the accessed bit is supported on the local CPU.
426   *
427   * This stub assumes accessing through an old PTE triggers a page fault.
428   * Architectures that automatically set the access bit should overwrite it.
429   */
arch_has_hw_pte_young(void)430  static inline bool arch_has_hw_pte_young(void)
431  {
432  	return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
433  }
434  #endif
435  
436  #ifndef arch_check_zapped_pte
arch_check_zapped_pte(struct vm_area_struct * vma,pte_t pte)437  static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
438  					 pte_t pte)
439  {
440  }
441  #endif
442  
443  #ifndef arch_check_zapped_pmd
arch_check_zapped_pmd(struct vm_area_struct * vma,pmd_t pmd)444  static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
445  					 pmd_t pmd)
446  {
447  }
448  #endif
449  
450  #ifndef arch_check_zapped_pud
arch_check_zapped_pud(struct vm_area_struct * vma,pud_t pud)451  static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
452  {
453  }
454  #endif
455  
456  #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)457  static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
458  				       unsigned long address,
459  				       pte_t *ptep)
460  {
461  	pte_t pte = ptep_get(ptep);
462  	pte_clear(mm, address, ptep);
463  	page_table_check_pte_clear(mm, pte);
464  	return pte;
465  }
466  #endif
467  
468  #ifndef clear_young_dirty_ptes
469  /**
470   * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
471   *		same folio as old/clean.
472   * @mm: Address space the pages are mapped into.
473   * @addr: Address the first page is mapped at.
474   * @ptep: Page table pointer for the first entry.
475   * @nr: Number of entries to mark old/clean.
476   * @flags: Flags to modify the PTE batch semantics.
477   *
478   * May be overridden by the architecture; otherwise, implemented by
479   * get_and_clear/modify/set for each pte in the range.
480   *
481   * Note that PTE bits in the PTE range besides the PFN can differ. For example,
482   * some PTEs might be write-protected.
483   *
484   * Context: The caller holds the page table lock.  The PTEs map consecutive
485   * pages that belong to the same folio.  The PTEs are all in the same PMD.
486   */
clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)487  static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
488  					  unsigned long addr, pte_t *ptep,
489  					  unsigned int nr, cydp_t flags)
490  {
491  	pte_t pte;
492  
493  	for (;;) {
494  		if (flags == CYDP_CLEAR_YOUNG)
495  			ptep_test_and_clear_young(vma, addr, ptep);
496  		else {
497  			pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
498  			if (flags & CYDP_CLEAR_YOUNG)
499  				pte = pte_mkold(pte);
500  			if (flags & CYDP_CLEAR_DIRTY)
501  				pte = pte_mkclean(pte);
502  			set_pte_at(vma->vm_mm, addr, ptep, pte);
503  		}
504  		if (--nr == 0)
505  			break;
506  		ptep++;
507  		addr += PAGE_SIZE;
508  	}
509  }
510  #endif
511  
ptep_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)512  static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
513  			      pte_t *ptep)
514  {
515  	ptep_get_and_clear(mm, addr, ptep);
516  }
517  
518  #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
519  /*
520   * For walking the pagetables without holding any locks.  Some architectures
521   * (eg x86-32 PAE) cannot load the entries atomically without using expensive
522   * instructions.  We are guaranteed that a PTE will only either go from not
523   * present to present, or present to not present -- it will not switch to a
524   * completely different present page without a TLB flush inbetween; which we
525   * are blocking by holding interrupts off.
526   *
527   * Setting ptes from not present to present goes:
528   *
529   *   ptep->pte_high = h;
530   *   smp_wmb();
531   *   ptep->pte_low = l;
532   *
533   * And present to not present goes:
534   *
535   *   ptep->pte_low = 0;
536   *   smp_wmb();
537   *   ptep->pte_high = 0;
538   *
539   * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
540   * We load pte_high *after* loading pte_low, which ensures we don't see an older
541   * value of pte_high.  *Then* we recheck pte_low, which ensures that we haven't
542   * picked up a changed pte high. We might have gotten rubbish values from
543   * pte_low and pte_high, but we are guaranteed that pte_low will not have the
544   * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
545   * operates on present ptes we're safe.
546   */
ptep_get_lockless(pte_t * ptep)547  static inline pte_t ptep_get_lockless(pte_t *ptep)
548  {
549  	pte_t pte;
550  
551  	do {
552  		pte.pte_low = ptep->pte_low;
553  		smp_rmb();
554  		pte.pte_high = ptep->pte_high;
555  		smp_rmb();
556  	} while (unlikely(pte.pte_low != ptep->pte_low));
557  
558  	return pte;
559  }
560  #define ptep_get_lockless ptep_get_lockless
561  
562  #if CONFIG_PGTABLE_LEVELS > 2
pmdp_get_lockless(pmd_t * pmdp)563  static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
564  {
565  	pmd_t pmd;
566  
567  	do {
568  		pmd.pmd_low = pmdp->pmd_low;
569  		smp_rmb();
570  		pmd.pmd_high = pmdp->pmd_high;
571  		smp_rmb();
572  	} while (unlikely(pmd.pmd_low != pmdp->pmd_low));
573  
574  	return pmd;
575  }
576  #define pmdp_get_lockless pmdp_get_lockless
577  #define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
578  #endif /* CONFIG_PGTABLE_LEVELS > 2 */
579  #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
580  
581  /*
582   * We require that the PTE can be read atomically.
583   */
584  #ifndef ptep_get_lockless
ptep_get_lockless(pte_t * ptep)585  static inline pte_t ptep_get_lockless(pte_t *ptep)
586  {
587  	return ptep_get(ptep);
588  }
589  #endif
590  
591  #ifndef pmdp_get_lockless
pmdp_get_lockless(pmd_t * pmdp)592  static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
593  {
594  	return pmdp_get(pmdp);
595  }
pmdp_get_lockless_sync(void)596  static inline void pmdp_get_lockless_sync(void)
597  {
598  }
599  #endif
600  
601  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
602  #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)603  static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
604  					    unsigned long address,
605  					    pmd_t *pmdp)
606  {
607  	pmd_t pmd = *pmdp;
608  
609  	pmd_clear(pmdp);
610  	page_table_check_pmd_clear(mm, pmd);
611  
612  	return pmd;
613  }
614  #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
615  #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pud_t * pudp)616  static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
617  					    unsigned long address,
618  					    pud_t *pudp)
619  {
620  	pud_t pud = *pudp;
621  
622  	pud_clear(pudp);
623  	page_table_check_pud_clear(mm, pud);
624  
625  	return pud;
626  }
627  #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
628  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
629  
630  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
631  #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,int full)632  static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
633  					    unsigned long address, pmd_t *pmdp,
634  					    int full)
635  {
636  	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
637  }
638  #endif
639  
640  #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
pudp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,int full)641  static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
642  					    unsigned long address, pud_t *pudp,
643  					    int full)
644  {
645  	return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
646  }
647  #endif
648  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
649  
650  #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long address,pte_t * ptep,int full)651  static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
652  					    unsigned long address, pte_t *ptep,
653  					    int full)
654  {
655  	return ptep_get_and_clear(mm, address, ptep);
656  }
657  #endif
658  
659  #ifndef get_and_clear_full_ptes
660  /**
661   * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
662   *			     the same folio, collecting dirty/accessed bits.
663   * @mm: Address space the pages are mapped into.
664   * @addr: Address the first page is mapped at.
665   * @ptep: Page table pointer for the first entry.
666   * @nr: Number of entries to clear.
667   * @full: Whether we are clearing a full mm.
668   *
669   * May be overridden by the architecture; otherwise, implemented as a simple
670   * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
671   * returned PTE.
672   *
673   * Note that PTE bits in the PTE range besides the PFN can differ. For example,
674   * some PTEs might be write-protected.
675   *
676   * Context: The caller holds the page table lock.  The PTEs map consecutive
677   * pages that belong to the same folio.  The PTEs are all in the same PMD.
678   */
get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)679  static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
680  		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
681  {
682  	pte_t pte, tmp_pte;
683  
684  	pte = ptep_get_and_clear_full(mm, addr, ptep, full);
685  	while (--nr) {
686  		ptep++;
687  		addr += PAGE_SIZE;
688  		tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
689  		if (pte_dirty(tmp_pte))
690  			pte = pte_mkdirty(pte);
691  		if (pte_young(tmp_pte))
692  			pte = pte_mkyoung(pte);
693  	}
694  	return pte;
695  }
696  #endif
697  
698  #ifndef clear_full_ptes
699  /**
700   * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
701   *		     folio.
702   * @mm: Address space the pages are mapped into.
703   * @addr: Address the first page is mapped at.
704   * @ptep: Page table pointer for the first entry.
705   * @nr: Number of entries to clear.
706   * @full: Whether we are clearing a full mm.
707   *
708   * May be overridden by the architecture; otherwise, implemented as a simple
709   * loop over ptep_get_and_clear_full().
710   *
711   * Note that PTE bits in the PTE range besides the PFN can differ. For example,
712   * some PTEs might be write-protected.
713   *
714   * Context: The caller holds the page table lock.  The PTEs map consecutive
715   * pages that belong to the same folio.  The PTEs are all in the same PMD.
716   */
clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)717  static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
718  		pte_t *ptep, unsigned int nr, int full)
719  {
720  	for (;;) {
721  		ptep_get_and_clear_full(mm, addr, ptep, full);
722  		if (--nr == 0)
723  			break;
724  		ptep++;
725  		addr += PAGE_SIZE;
726  	}
727  }
728  #endif
729  
730  /*
731   * If two threads concurrently fault at the same page, the thread that
732   * won the race updates the PTE and its local TLB/Cache. The other thread
733   * gives up, simply does nothing, and continues; on architectures where
734   * software can update TLB,  local TLB can be updated here to avoid next page
735   * fault. This function updates TLB only, do nothing with cache or others.
736   * It is the difference with function update_mmu_cache.
737   */
738  #ifndef update_mmu_tlb_range
update_mmu_tlb_range(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)739  static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
740  				unsigned long address, pte_t *ptep, unsigned int nr)
741  {
742  }
743  #endif
744  
update_mmu_tlb(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)745  static inline void update_mmu_tlb(struct vm_area_struct *vma,
746  				unsigned long address, pte_t *ptep)
747  {
748  	update_mmu_tlb_range(vma, address, ptep, 1);
749  }
750  
751  /*
752   * Some architectures may be able to avoid expensive synchronization
753   * primitives when modifications are made to PTE's which are already
754   * not present, or in the process of an address space destruction.
755   */
756  #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
pte_clear_not_present_full(struct mm_struct * mm,unsigned long address,pte_t * ptep,int full)757  static inline void pte_clear_not_present_full(struct mm_struct *mm,
758  					      unsigned long address,
759  					      pte_t *ptep,
760  					      int full)
761  {
762  	pte_clear(mm, address, ptep);
763  }
764  #endif
765  
766  #ifndef clear_not_present_full_ptes
767  /**
768   * clear_not_present_full_ptes - Clear multiple not present PTEs which are
769   *				 consecutive in the pgtable.
770   * @mm: Address space the ptes represent.
771   * @addr: Address of the first pte.
772   * @ptep: Page table pointer for the first entry.
773   * @nr: Number of entries to clear.
774   * @full: Whether we are clearing a full mm.
775   *
776   * May be overridden by the architecture; otherwise, implemented as a simple
777   * loop over pte_clear_not_present_full().
778   *
779   * Context: The caller holds the page table lock.  The PTEs are all not present.
780   * The PTEs are all in the same PMD.
781   */
clear_not_present_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)782  static inline void clear_not_present_full_ptes(struct mm_struct *mm,
783  		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
784  {
785  	for (;;) {
786  		pte_clear_not_present_full(mm, addr, ptep, full);
787  		if (--nr == 0)
788  			break;
789  		ptep++;
790  		addr += PAGE_SIZE;
791  	}
792  }
793  #endif
794  
795  #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
796  extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
797  			      unsigned long address,
798  			      pte_t *ptep);
799  #endif
800  
801  #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
802  extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
803  			      unsigned long address,
804  			      pmd_t *pmdp);
805  extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
806  			      unsigned long address,
807  			      pud_t *pudp);
808  #endif
809  
810  #ifndef pte_mkwrite
pte_mkwrite(pte_t pte,struct vm_area_struct * vma)811  static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
812  {
813  	return pte_mkwrite_novma(pte);
814  }
815  #endif
816  
817  #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
pmd_mkwrite(pmd_t pmd,struct vm_area_struct * vma)818  static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
819  {
820  	return pmd_mkwrite_novma(pmd);
821  }
822  #endif
823  
824  #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
825  struct mm_struct;
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)826  static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
827  {
828  	pte_t old_pte = ptep_get(ptep);
829  	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
830  }
831  #endif
832  
833  #ifndef wrprotect_ptes
834  /**
835   * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
836   *		    folio.
837   * @mm: Address space the pages are mapped into.
838   * @addr: Address the first page is mapped at.
839   * @ptep: Page table pointer for the first entry.
840   * @nr: Number of entries to write-protect.
841   *
842   * May be overridden by the architecture; otherwise, implemented as a simple
843   * loop over ptep_set_wrprotect().
844   *
845   * Note that PTE bits in the PTE range besides the PFN can differ. For example,
846   * some PTEs might be write-protected.
847   *
848   * Context: The caller holds the page table lock.  The PTEs map consecutive
849   * pages that belong to the same folio.  The PTEs are all in the same PMD.
850   */
wrprotect_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)851  static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
852  		pte_t *ptep, unsigned int nr)
853  {
854  	for (;;) {
855  		ptep_set_wrprotect(mm, addr, ptep);
856  		if (--nr == 0)
857  			break;
858  		ptep++;
859  		addr += PAGE_SIZE;
860  	}
861  }
862  #endif
863  
864  /*
865   * On some architectures hardware does not set page access bit when accessing
866   * memory page, it is responsibility of software setting this bit. It brings
867   * out extra page fault penalty to track page access bit. For optimization page
868   * access bit can be set during all page fault flow on these arches.
869   * To be differentiate with macro pte_mkyoung, this macro is used on platforms
870   * where software maintains page access bit.
871   */
872  #ifndef pte_sw_mkyoung
pte_sw_mkyoung(pte_t pte)873  static inline pte_t pte_sw_mkyoung(pte_t pte)
874  {
875  	return pte;
876  }
877  #define pte_sw_mkyoung	pte_sw_mkyoung
878  #endif
879  
880  #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
881  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)882  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
883  				      unsigned long address, pmd_t *pmdp)
884  {
885  	pmd_t old_pmd = *pmdp;
886  	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
887  }
888  #else
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)889  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
890  				      unsigned long address, pmd_t *pmdp)
891  {
892  	BUILD_BUG();
893  }
894  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
895  #endif
896  #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
897  #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
898  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pudp_set_wrprotect(struct mm_struct * mm,unsigned long address,pud_t * pudp)899  static inline void pudp_set_wrprotect(struct mm_struct *mm,
900  				      unsigned long address, pud_t *pudp)
901  {
902  	pud_t old_pud = *pudp;
903  
904  	set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
905  }
906  #else
pudp_set_wrprotect(struct mm_struct * mm,unsigned long address,pud_t * pudp)907  static inline void pudp_set_wrprotect(struct mm_struct *mm,
908  				      unsigned long address, pud_t *pudp)
909  {
910  	BUILD_BUG();
911  }
912  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
913  #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
914  #endif
915  
916  #ifndef pmdp_collapse_flush
917  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
918  extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
919  				 unsigned long address, pmd_t *pmdp);
920  #else
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)921  static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
922  					unsigned long address,
923  					pmd_t *pmdp)
924  {
925  	BUILD_BUG();
926  	return *pmdp;
927  }
928  #define pmdp_collapse_flush pmdp_collapse_flush
929  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
930  #endif
931  
932  #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
933  extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
934  				       pgtable_t pgtable);
935  #endif
936  
937  #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
938  extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
939  #endif
940  
941  #ifndef arch_needs_pgtable_deposit
942  #define arch_needs_pgtable_deposit() (false)
943  #endif
944  
945  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
946  /*
947   * This is an implementation of pmdp_establish() that is only suitable for an
948   * architecture that doesn't have hardware dirty/accessed bits. In this case we
949   * can't race with CPU which sets these bits and non-atomic approach is fine.
950   */
generic_pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)951  static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
952  		unsigned long address, pmd_t *pmdp, pmd_t pmd)
953  {
954  	pmd_t old_pmd = *pmdp;
955  	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
956  	return old_pmd;
957  }
958  #endif
959  
960  #ifndef __HAVE_ARCH_PMDP_INVALIDATE
961  extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
962  			    pmd_t *pmdp);
963  #endif
964  
965  #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
966  
967  /*
968   * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
969   * hugepage mapping in the page tables. This function is similar to
970   * pmdp_invalidate(), but should only be used if the access and dirty bits would
971   * not be cleared by the software in the new PMD value. The function ensures
972   * that hardware changes of the access and dirty bits updates would not be lost.
973   *
974   * Doing so can allow in certain architectures to avoid a TLB flush in most
975   * cases. Yet, another TLB flush might be necessary later if the PMD update
976   * itself requires such flush (e.g., if protection was set to be stricter). Yet,
977   * even when a TLB flush is needed because of the update, the caller may be able
978   * to batch these TLB flushing operations, so fewer TLB flush operations are
979   * needed.
980   */
981  extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
982  				unsigned long address, pmd_t *pmdp);
983  #endif
984  
985  #ifndef __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)986  static inline int pte_same(pte_t pte_a, pte_t pte_b)
987  {
988  	return pte_val(pte_a) == pte_val(pte_b);
989  }
990  #endif
991  
992  #ifndef __HAVE_ARCH_PTE_UNUSED
993  /*
994   * Some architectures provide facilities to virtualization guests
995   * so that they can flag allocated pages as unused. This allows the
996   * host to transparently reclaim unused pages. This function returns
997   * whether the pte's page is unused.
998   */
pte_unused(pte_t pte)999  static inline int pte_unused(pte_t pte)
1000  {
1001  	return 0;
1002  }
1003  #endif
1004  
1005  #ifndef pte_access_permitted
1006  #define pte_access_permitted(pte, write) \
1007  	(pte_present(pte) && (!(write) || pte_write(pte)))
1008  #endif
1009  
1010  #ifndef pmd_access_permitted
1011  #define pmd_access_permitted(pmd, write) \
1012  	(pmd_present(pmd) && (!(write) || pmd_write(pmd)))
1013  #endif
1014  
1015  #ifndef pud_access_permitted
1016  #define pud_access_permitted(pud, write) \
1017  	(pud_present(pud) && (!(write) || pud_write(pud)))
1018  #endif
1019  
1020  #ifndef p4d_access_permitted
1021  #define p4d_access_permitted(p4d, write) \
1022  	(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
1023  #endif
1024  
1025  #ifndef pgd_access_permitted
1026  #define pgd_access_permitted(pgd, write) \
1027  	(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
1028  #endif
1029  
1030  #ifndef __HAVE_ARCH_PMD_SAME
pmd_same(pmd_t pmd_a,pmd_t pmd_b)1031  static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1032  {
1033  	return pmd_val(pmd_a) == pmd_val(pmd_b);
1034  }
1035  #endif
1036  
1037  #ifndef pud_same
pud_same(pud_t pud_a,pud_t pud_b)1038  static inline int pud_same(pud_t pud_a, pud_t pud_b)
1039  {
1040  	return pud_val(pud_a) == pud_val(pud_b);
1041  }
1042  #define pud_same pud_same
1043  #endif
1044  
1045  #ifndef __HAVE_ARCH_P4D_SAME
p4d_same(p4d_t p4d_a,p4d_t p4d_b)1046  static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
1047  {
1048  	return p4d_val(p4d_a) == p4d_val(p4d_b);
1049  }
1050  #endif
1051  
1052  #ifndef __HAVE_ARCH_PGD_SAME
pgd_same(pgd_t pgd_a,pgd_t pgd_b)1053  static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
1054  {
1055  	return pgd_val(pgd_a) == pgd_val(pgd_b);
1056  }
1057  #endif
1058  
1059  /*
1060   * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1061   * TLB flush will be required as a result of the "set". For example, use
1062   * in scenarios where it is known ahead of time that the routine is
1063   * setting non-present entries, or re-setting an existing entry to the
1064   * same value. Otherwise, use the typical "set" helpers and flush the
1065   * TLB.
1066   */
1067  #define set_pte_safe(ptep, pte) \
1068  ({ \
1069  	WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
1070  	set_pte(ptep, pte); \
1071  })
1072  
1073  #define set_pmd_safe(pmdp, pmd) \
1074  ({ \
1075  	WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
1076  	set_pmd(pmdp, pmd); \
1077  })
1078  
1079  #define set_pud_safe(pudp, pud) \
1080  ({ \
1081  	WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
1082  	set_pud(pudp, pud); \
1083  })
1084  
1085  #define set_p4d_safe(p4dp, p4d) \
1086  ({ \
1087  	WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1088  	set_p4d(p4dp, p4d); \
1089  })
1090  
1091  #define set_pgd_safe(pgdp, pgd) \
1092  ({ \
1093  	WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1094  	set_pgd(pgdp, pgd); \
1095  })
1096  
1097  #ifndef __HAVE_ARCH_DO_SWAP_PAGE
arch_do_swap_page_nr(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte,int nr)1098  static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1099  				     struct vm_area_struct *vma,
1100  				     unsigned long addr,
1101  				     pte_t pte, pte_t oldpte,
1102  				     int nr)
1103  {
1104  
1105  }
1106  #else
1107  /*
1108   * Some architectures support metadata associated with a page. When a
1109   * page is being swapped out, this metadata must be saved so it can be
1110   * restored when the page is swapped back in. SPARC M7 and newer
1111   * processors support an ADI (Application Data Integrity) tag for the
1112   * page as metadata for the page. arch_do_swap_page() can restore this
1113   * metadata when a page is swapped back in.
1114   */
arch_do_swap_page_nr(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte,int nr)1115  static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1116  					struct vm_area_struct *vma,
1117  					unsigned long addr,
1118  					pte_t pte, pte_t oldpte,
1119  					int nr)
1120  {
1121  	for (int i = 0; i < nr; i++) {
1122  		arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
1123  				pte_advance_pfn(pte, i),
1124  				pte_advance_pfn(oldpte, i));
1125  	}
1126  }
1127  #endif
1128  
1129  #ifndef __HAVE_ARCH_UNMAP_ONE
1130  /*
1131   * Some architectures support metadata associated with a page. When a
1132   * page is being swapped out, this metadata must be saved so it can be
1133   * restored when the page is swapped back in. SPARC M7 and newer
1134   * processors support an ADI (Application Data Integrity) tag for the
1135   * page as metadata for the page. arch_unmap_one() can save this
1136   * metadata on a swap-out of a page.
1137   */
arch_unmap_one(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t orig_pte)1138  static inline int arch_unmap_one(struct mm_struct *mm,
1139  				  struct vm_area_struct *vma,
1140  				  unsigned long addr,
1141  				  pte_t orig_pte)
1142  {
1143  	return 0;
1144  }
1145  #endif
1146  
1147  /*
1148   * Allow architectures to preserve additional metadata associated with
1149   * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
1150   * prototypes must be defined in the arch-specific asm/pgtable.h file.
1151   */
1152  #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
arch_prepare_to_swap(struct folio * folio)1153  static inline int arch_prepare_to_swap(struct folio *folio)
1154  {
1155  	return 0;
1156  }
1157  #endif
1158  
1159  #ifndef __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)1160  static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1161  {
1162  }
1163  
arch_swap_invalidate_area(int type)1164  static inline void arch_swap_invalidate_area(int type)
1165  {
1166  }
1167  #endif
1168  
1169  #ifndef __HAVE_ARCH_SWAP_RESTORE
arch_swap_restore(swp_entry_t entry,struct folio * folio)1170  static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1171  {
1172  }
1173  #endif
1174  
1175  #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
1176  #define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr)
1177  #endif
1178  
1179  #ifndef __HAVE_ARCH_MOVE_PTE
1180  #define move_pte(pte, old_addr, new_addr)	(pte)
1181  #endif
1182  
1183  #ifndef pte_accessible
1184  # define pte_accessible(mm, pte)	((void)(pte), 1)
1185  #endif
1186  
1187  #ifndef flush_tlb_fix_spurious_fault
1188  #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
1189  #endif
1190  
1191  /*
1192   * When walking page tables, get the address of the next boundary,
1193   * or the end address of the range if that comes earlier.  Although no
1194   * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1195   */
1196  
1197  #define pgd_addr_end(addr, end)						\
1198  ({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\
1199  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1200  })
1201  
1202  #ifndef p4d_addr_end
1203  #define p4d_addr_end(addr, end)						\
1204  ({	unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;	\
1205  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1206  })
1207  #endif
1208  
1209  #ifndef pud_addr_end
1210  #define pud_addr_end(addr, end)						\
1211  ({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\
1212  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1213  })
1214  #endif
1215  
1216  #ifndef pmd_addr_end
1217  #define pmd_addr_end(addr, end)						\
1218  ({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\
1219  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1220  })
1221  #endif
1222  
1223  /*
1224   * When walking page tables, we usually want to skip any p?d_none entries;
1225   * and any p?d_bad entries - reporting the error before resetting to none.
1226   * Do the tests inline, but report and clear the bad entry in mm/memory.c.
1227   */
1228  void pgd_clear_bad(pgd_t *);
1229  
1230  #ifndef __PAGETABLE_P4D_FOLDED
1231  void p4d_clear_bad(p4d_t *);
1232  #else
1233  #define p4d_clear_bad(p4d)        do { } while (0)
1234  #endif
1235  
1236  #ifndef __PAGETABLE_PUD_FOLDED
1237  void pud_clear_bad(pud_t *);
1238  #else
1239  #define pud_clear_bad(p4d)        do { } while (0)
1240  #endif
1241  
1242  void pmd_clear_bad(pmd_t *);
1243  
pgd_none_or_clear_bad(pgd_t * pgd)1244  static inline int pgd_none_or_clear_bad(pgd_t *pgd)
1245  {
1246  	if (pgd_none(*pgd))
1247  		return 1;
1248  	if (unlikely(pgd_bad(*pgd))) {
1249  		pgd_clear_bad(pgd);
1250  		return 1;
1251  	}
1252  	return 0;
1253  }
1254  
p4d_none_or_clear_bad(p4d_t * p4d)1255  static inline int p4d_none_or_clear_bad(p4d_t *p4d)
1256  {
1257  	if (p4d_none(*p4d))
1258  		return 1;
1259  	if (unlikely(p4d_bad(*p4d))) {
1260  		p4d_clear_bad(p4d);
1261  		return 1;
1262  	}
1263  	return 0;
1264  }
1265  
pud_none_or_clear_bad(pud_t * pud)1266  static inline int pud_none_or_clear_bad(pud_t *pud)
1267  {
1268  	if (pud_none(*pud))
1269  		return 1;
1270  	if (unlikely(pud_bad(*pud))) {
1271  		pud_clear_bad(pud);
1272  		return 1;
1273  	}
1274  	return 0;
1275  }
1276  
pmd_none_or_clear_bad(pmd_t * pmd)1277  static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1278  {
1279  	if (pmd_none(*pmd))
1280  		return 1;
1281  	if (unlikely(pmd_bad(*pmd))) {
1282  		pmd_clear_bad(pmd);
1283  		return 1;
1284  	}
1285  	return 0;
1286  }
1287  
__ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1288  static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1289  					     unsigned long addr,
1290  					     pte_t *ptep)
1291  {
1292  	/*
1293  	 * Get the current pte state, but zero it out to make it
1294  	 * non-present, preventing the hardware from asynchronously
1295  	 * updating it.
1296  	 */
1297  	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1298  }
1299  
__ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte)1300  static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1301  					     unsigned long addr,
1302  					     pte_t *ptep, pte_t pte)
1303  {
1304  	/*
1305  	 * The pte is non-present, so there's no hardware state to
1306  	 * preserve.
1307  	 */
1308  	set_pte_at(vma->vm_mm, addr, ptep, pte);
1309  }
1310  
1311  #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1312  /*
1313   * Start a pte protection read-modify-write transaction, which
1314   * protects against asynchronous hardware modifications to the pte.
1315   * The intention is not to prevent the hardware from making pte
1316   * updates, but to prevent any updates it may make from being lost.
1317   *
1318   * This does not protect against other software modifications of the
1319   * pte; the appropriate pte lock must be held over the transaction.
1320   *
1321   * Note that this interface is intended to be batchable, meaning that
1322   * ptep_modify_prot_commit may not actually update the pte, but merely
1323   * queue the update to be done at some later time.  The update must be
1324   * actually committed before the pte lock is released, however.
1325   */
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1326  static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1327  					   unsigned long addr,
1328  					   pte_t *ptep)
1329  {
1330  	return __ptep_modify_prot_start(vma, addr, ptep);
1331  }
1332  
1333  /*
1334   * Commit an update to a pte, leaving any hardware-controlled bits in
1335   * the PTE unmodified.
1336   */
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1337  static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1338  					   unsigned long addr,
1339  					   pte_t *ptep, pte_t old_pte, pte_t pte)
1340  {
1341  	__ptep_modify_prot_commit(vma, addr, ptep, pte);
1342  }
1343  #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
1344  #endif /* CONFIG_MMU */
1345  
1346  /*
1347   * No-op macros that just return the current protection value. Defined here
1348   * because these macros can be used even if CONFIG_MMU is not defined.
1349   */
1350  
1351  #ifndef pgprot_nx
1352  #define pgprot_nx(prot)	(prot)
1353  #endif
1354  
1355  #ifndef pgprot_noncached
1356  #define pgprot_noncached(prot)	(prot)
1357  #endif
1358  
1359  #ifndef pgprot_writecombine
1360  #define pgprot_writecombine pgprot_noncached
1361  #endif
1362  
1363  #ifndef pgprot_writethrough
1364  #define pgprot_writethrough pgprot_noncached
1365  #endif
1366  
1367  #ifndef pgprot_device
1368  #define pgprot_device pgprot_noncached
1369  #endif
1370  
1371  #ifndef pgprot_mhp
1372  #define pgprot_mhp(prot)	(prot)
1373  #endif
1374  
1375  #ifdef CONFIG_MMU
1376  #ifndef pgprot_modify
1377  #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)1378  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1379  {
1380  	if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1381  		newprot = pgprot_noncached(newprot);
1382  	if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1383  		newprot = pgprot_writecombine(newprot);
1384  	if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1385  		newprot = pgprot_device(newprot);
1386  	return newprot;
1387  }
1388  #endif
1389  #endif /* CONFIG_MMU */
1390  
1391  #ifndef pgprot_encrypted
1392  #define pgprot_encrypted(prot)	(prot)
1393  #endif
1394  
1395  #ifndef pgprot_decrypted
1396  #define pgprot_decrypted(prot)	(prot)
1397  #endif
1398  
1399  /*
1400   * A facility to provide batching of the reload of page tables and
1401   * other process state with the actual context switch code for
1402   * paravirtualized guests.  By convention, only one of the batched
1403   * update (lazy) modes (CPU, MMU) should be active at any given time,
1404   * entry should never be nested, and entry and exits should always be
1405   * paired.  This is for sanity of maintaining and reasoning about the
1406   * kernel code.  In this case, the exit (end of the context switch) is
1407   * in architecture-specific code, and so doesn't need a generic
1408   * definition.
1409   */
1410  #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1411  #define arch_start_context_switch(prev)	do {} while (0)
1412  #endif
1413  
1414  #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1415  #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1416  static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1417  {
1418  	return pmd;
1419  }
1420  
pmd_swp_soft_dirty(pmd_t pmd)1421  static inline int pmd_swp_soft_dirty(pmd_t pmd)
1422  {
1423  	return 0;
1424  }
1425  
pmd_swp_clear_soft_dirty(pmd_t pmd)1426  static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1427  {
1428  	return pmd;
1429  }
1430  #endif
1431  #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
pte_soft_dirty(pte_t pte)1432  static inline int pte_soft_dirty(pte_t pte)
1433  {
1434  	return 0;
1435  }
1436  
pmd_soft_dirty(pmd_t pmd)1437  static inline int pmd_soft_dirty(pmd_t pmd)
1438  {
1439  	return 0;
1440  }
1441  
pte_mksoft_dirty(pte_t pte)1442  static inline pte_t pte_mksoft_dirty(pte_t pte)
1443  {
1444  	return pte;
1445  }
1446  
pmd_mksoft_dirty(pmd_t pmd)1447  static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1448  {
1449  	return pmd;
1450  }
1451  
pte_clear_soft_dirty(pte_t pte)1452  static inline pte_t pte_clear_soft_dirty(pte_t pte)
1453  {
1454  	return pte;
1455  }
1456  
pmd_clear_soft_dirty(pmd_t pmd)1457  static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1458  {
1459  	return pmd;
1460  }
1461  
pte_swp_mksoft_dirty(pte_t pte)1462  static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1463  {
1464  	return pte;
1465  }
1466  
pte_swp_soft_dirty(pte_t pte)1467  static inline int pte_swp_soft_dirty(pte_t pte)
1468  {
1469  	return 0;
1470  }
1471  
pte_swp_clear_soft_dirty(pte_t pte)1472  static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1473  {
1474  	return pte;
1475  }
1476  
pmd_swp_mksoft_dirty(pmd_t pmd)1477  static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1478  {
1479  	return pmd;
1480  }
1481  
pmd_swp_soft_dirty(pmd_t pmd)1482  static inline int pmd_swp_soft_dirty(pmd_t pmd)
1483  {
1484  	return 0;
1485  }
1486  
pmd_swp_clear_soft_dirty(pmd_t pmd)1487  static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1488  {
1489  	return pmd;
1490  }
1491  #endif
1492  
1493  #ifndef __HAVE_PFNMAP_TRACKING
1494  /*
1495   * Interfaces that can be used by architecture code to keep track of
1496   * memory type of pfn mappings specified by the remap_pfn_range,
1497   * vmf_insert_pfn.
1498   */
1499  
1500  /*
1501   * track_pfn_remap is called when a _new_ pfn mapping is being established
1502   * by remap_pfn_range() for physical range indicated by pfn and size.
1503   */
track_pfn_remap(struct vm_area_struct * vma,pgprot_t * prot,unsigned long pfn,unsigned long addr,unsigned long size)1504  static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1505  				  unsigned long pfn, unsigned long addr,
1506  				  unsigned long size)
1507  {
1508  	return 0;
1509  }
1510  
1511  /*
1512   * track_pfn_insert is called when a _new_ single pfn is established
1513   * by vmf_insert_pfn().
1514   */
track_pfn_insert(struct vm_area_struct * vma,pgprot_t * prot,pfn_t pfn)1515  static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1516  				    pfn_t pfn)
1517  {
1518  }
1519  
1520  /*
1521   * track_pfn_copy is called when vma that is covering the pfnmap gets
1522   * copied through copy_page_range().
1523   */
track_pfn_copy(struct vm_area_struct * vma)1524  static inline int track_pfn_copy(struct vm_area_struct *vma)
1525  {
1526  	return 0;
1527  }
1528  
1529  /*
1530   * untrack_pfn is called while unmapping a pfnmap for a region.
1531   * untrack can be called for a specific region indicated by pfn and size or
1532   * can be for the entire vma (in which case pfn, size are zero).
1533   */
untrack_pfn(struct vm_area_struct * vma,unsigned long pfn,unsigned long size,bool mm_wr_locked)1534  static inline void untrack_pfn(struct vm_area_struct *vma,
1535  			       unsigned long pfn, unsigned long size,
1536  			       bool mm_wr_locked)
1537  {
1538  }
1539  
1540  /*
1541   * untrack_pfn_clear is called while mremapping a pfnmap for a new region
1542   * or fails to copy pgtable during duplicate vm area.
1543   */
untrack_pfn_clear(struct vm_area_struct * vma)1544  static inline void untrack_pfn_clear(struct vm_area_struct *vma)
1545  {
1546  }
1547  #else
1548  extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1549  			   unsigned long pfn, unsigned long addr,
1550  			   unsigned long size);
1551  extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
1552  			     pfn_t pfn);
1553  extern int track_pfn_copy(struct vm_area_struct *vma);
1554  extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1555  			unsigned long size, bool mm_wr_locked);
1556  extern void untrack_pfn_clear(struct vm_area_struct *vma);
1557  #endif
1558  
1559  #ifdef CONFIG_MMU
1560  #ifdef __HAVE_COLOR_ZERO_PAGE
is_zero_pfn(unsigned long pfn)1561  static inline int is_zero_pfn(unsigned long pfn)
1562  {
1563  	extern unsigned long zero_pfn;
1564  	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1565  	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1566  }
1567  
1568  #define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
1569  
1570  #else
is_zero_pfn(unsigned long pfn)1571  static inline int is_zero_pfn(unsigned long pfn)
1572  {
1573  	extern unsigned long zero_pfn;
1574  	return pfn == zero_pfn;
1575  }
1576  
my_zero_pfn(unsigned long addr)1577  static inline unsigned long my_zero_pfn(unsigned long addr)
1578  {
1579  	extern unsigned long zero_pfn;
1580  	return zero_pfn;
1581  }
1582  #endif
1583  #else
is_zero_pfn(unsigned long pfn)1584  static inline int is_zero_pfn(unsigned long pfn)
1585  {
1586  	return 0;
1587  }
1588  
my_zero_pfn(unsigned long addr)1589  static inline unsigned long my_zero_pfn(unsigned long addr)
1590  {
1591  	return 0;
1592  }
1593  #endif /* CONFIG_MMU */
1594  
1595  #ifdef CONFIG_MMU
1596  
1597  #ifndef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)1598  static inline int pmd_trans_huge(pmd_t pmd)
1599  {
1600  	return 0;
1601  }
1602  #ifndef pmd_write
pmd_write(pmd_t pmd)1603  static inline int pmd_write(pmd_t pmd)
1604  {
1605  	BUG();
1606  	return 0;
1607  }
1608  #endif /* pmd_write */
1609  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1610  
1611  #ifndef pud_write
pud_write(pud_t pud)1612  static inline int pud_write(pud_t pud)
1613  {
1614  	BUG();
1615  	return 0;
1616  }
1617  #endif /* pud_write */
1618  
1619  #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
pmd_devmap(pmd_t pmd)1620  static inline int pmd_devmap(pmd_t pmd)
1621  {
1622  	return 0;
1623  }
pud_devmap(pud_t pud)1624  static inline int pud_devmap(pud_t pud)
1625  {
1626  	return 0;
1627  }
pgd_devmap(pgd_t pgd)1628  static inline int pgd_devmap(pgd_t pgd)
1629  {
1630  	return 0;
1631  }
1632  #endif
1633  
1634  #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1635  	!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_trans_huge(pud_t pud)1636  static inline int pud_trans_huge(pud_t pud)
1637  {
1638  	return 0;
1639  }
1640  #endif
1641  
pud_trans_unstable(pud_t * pud)1642  static inline int pud_trans_unstable(pud_t *pud)
1643  {
1644  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1645  	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1646  	pud_t pudval = READ_ONCE(*pud);
1647  
1648  	if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
1649  		return 1;
1650  	if (unlikely(pud_bad(pudval))) {
1651  		pud_clear_bad(pud);
1652  		return 1;
1653  	}
1654  #endif
1655  	return 0;
1656  }
1657  
1658  #ifndef CONFIG_NUMA_BALANCING
1659  /*
1660   * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1661   * perfectly valid to indicate "no" in that case, which is why our default
1662   * implementation defaults to "always no".
1663   *
1664   * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1665   * page protection due to NUMA hinting. NUMA hinting faults only apply in
1666   * accessible VMAs.
1667   *
1668   * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1669   * looking at the VMA accessibility is sufficient.
1670   */
pte_protnone(pte_t pte)1671  static inline int pte_protnone(pte_t pte)
1672  {
1673  	return 0;
1674  }
1675  
pmd_protnone(pmd_t pmd)1676  static inline int pmd_protnone(pmd_t pmd)
1677  {
1678  	return 0;
1679  }
1680  #endif /* CONFIG_NUMA_BALANCING */
1681  
1682  #endif /* CONFIG_MMU */
1683  
1684  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1685  
1686  #ifndef __PAGETABLE_P4D_FOLDED
1687  int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1688  void p4d_clear_huge(p4d_t *p4d);
1689  #else
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)1690  static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1691  {
1692  	return 0;
1693  }
p4d_clear_huge(p4d_t * p4d)1694  static inline void p4d_clear_huge(p4d_t *p4d) { }
1695  #endif /* !__PAGETABLE_P4D_FOLDED */
1696  
1697  int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1698  int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1699  int pud_clear_huge(pud_t *pud);
1700  int pmd_clear_huge(pmd_t *pmd);
1701  int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1702  int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1703  int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1704  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)1705  static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1706  {
1707  	return 0;
1708  }
pud_set_huge(pud_t * pud,phys_addr_t addr,pgprot_t prot)1709  static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1710  {
1711  	return 0;
1712  }
pmd_set_huge(pmd_t * pmd,phys_addr_t addr,pgprot_t prot)1713  static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1714  {
1715  	return 0;
1716  }
p4d_clear_huge(p4d_t * p4d)1717  static inline void p4d_clear_huge(p4d_t *p4d) { }
pud_clear_huge(pud_t * pud)1718  static inline int pud_clear_huge(pud_t *pud)
1719  {
1720  	return 0;
1721  }
pmd_clear_huge(pmd_t * pmd)1722  static inline int pmd_clear_huge(pmd_t *pmd)
1723  {
1724  	return 0;
1725  }
p4d_free_pud_page(p4d_t * p4d,unsigned long addr)1726  static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1727  {
1728  	return 0;
1729  }
pud_free_pmd_page(pud_t * pud,unsigned long addr)1730  static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1731  {
1732  	return 0;
1733  }
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)1734  static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1735  {
1736  	return 0;
1737  }
1738  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
1739  
1740  #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1741  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1742  /*
1743   * ARCHes with special requirements for evicting THP backing TLB entries can
1744   * implement this. Otherwise also, it can help optimize normal TLB flush in
1745   * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
1746   * entire TLB if flush span is greater than a threshold, which will
1747   * likely be true for a single huge page. Thus a single THP flush will
1748   * invalidate the entire TLB which is not desirable.
1749   * e.g. see arch/arc: flush_pmd_tlb_range
1750   */
1751  #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1752  #define flush_pud_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1753  #else
1754  #define flush_pmd_tlb_range(vma, addr, end)	BUILD_BUG()
1755  #define flush_pud_tlb_range(vma, addr, end)	BUILD_BUG()
1756  #endif
1757  #endif
1758  
1759  struct file;
1760  int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1761  			unsigned long size, pgprot_t *vma_prot);
1762  
1763  #ifndef CONFIG_X86_ESPFIX64
init_espfix_bsp(void)1764  static inline void init_espfix_bsp(void) { }
1765  #endif
1766  
1767  extern void __init pgtable_cache_init(void);
1768  
1769  #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
pfn_modify_allowed(unsigned long pfn,pgprot_t prot)1770  static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1771  {
1772  	return true;
1773  }
1774  
arch_has_pfn_modify_check(void)1775  static inline bool arch_has_pfn_modify_check(void)
1776  {
1777  	return false;
1778  }
1779  #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1780  
1781  /*
1782   * Architecture PAGE_KERNEL_* fallbacks
1783   *
1784   * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1785   * because they really don't support them, or the port needs to be updated to
1786   * reflect the required functionality. Below are a set of relatively safe
1787   * fallbacks, as best effort, which we can count on in lieu of the architectures
1788   * not defining them on their own yet.
1789   */
1790  
1791  #ifndef PAGE_KERNEL_RO
1792  # define PAGE_KERNEL_RO PAGE_KERNEL
1793  #endif
1794  
1795  #ifndef PAGE_KERNEL_EXEC
1796  # define PAGE_KERNEL_EXEC PAGE_KERNEL
1797  #endif
1798  
1799  /*
1800   * Page Table Modification bits for pgtbl_mod_mask.
1801   *
1802   * These are used by the p?d_alloc_track*() set of functions an in the generic
1803   * vmalloc/ioremap code to track at which page-table levels entries have been
1804   * modified. Based on that the code can better decide when vmalloc and ioremap
1805   * mapping changes need to be synchronized to other page-tables in the system.
1806   */
1807  #define		__PGTBL_PGD_MODIFIED	0
1808  #define		__PGTBL_P4D_MODIFIED	1
1809  #define		__PGTBL_PUD_MODIFIED	2
1810  #define		__PGTBL_PMD_MODIFIED	3
1811  #define		__PGTBL_PTE_MODIFIED	4
1812  
1813  #define		PGTBL_PGD_MODIFIED	BIT(__PGTBL_PGD_MODIFIED)
1814  #define		PGTBL_P4D_MODIFIED	BIT(__PGTBL_P4D_MODIFIED)
1815  #define		PGTBL_PUD_MODIFIED	BIT(__PGTBL_PUD_MODIFIED)
1816  #define		PGTBL_PMD_MODIFIED	BIT(__PGTBL_PMD_MODIFIED)
1817  #define		PGTBL_PTE_MODIFIED	BIT(__PGTBL_PTE_MODIFIED)
1818  
1819  /* Page-Table Modification Mask */
1820  typedef unsigned int pgtbl_mod_mask;
1821  
1822  #endif /* !__ASSEMBLY__ */
1823  
1824  #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1825  #ifdef CONFIG_PHYS_ADDR_T_64BIT
1826  /*
1827   * ZSMALLOC needs to know the highest PFN on 32-bit architectures
1828   * with physical address space extension, but falls back to
1829   * BITS_PER_LONG otherwise.
1830   */
1831  #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1832  #else
1833  #define MAX_POSSIBLE_PHYSMEM_BITS 32
1834  #endif
1835  #endif
1836  
1837  #ifndef has_transparent_hugepage
1838  #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
1839  #endif
1840  
1841  #ifndef has_transparent_pud_hugepage
1842  #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1843  #endif
1844  /*
1845   * On some architectures it depends on the mm if the p4d/pud or pmd
1846   * layer of the page table hierarchy is folded or not.
1847   */
1848  #ifndef mm_p4d_folded
1849  #define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
1850  #endif
1851  
1852  #ifndef mm_pud_folded
1853  #define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
1854  #endif
1855  
1856  #ifndef mm_pmd_folded
1857  #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
1858  #endif
1859  
1860  #ifndef p4d_offset_lockless
1861  #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
1862  #endif
1863  #ifndef pud_offset_lockless
1864  #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
1865  #endif
1866  #ifndef pmd_offset_lockless
1867  #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
1868  #endif
1869  
1870  /*
1871   * pXd_leaf() is the API to check whether a pgtable entry is a huge page
1872   * mapping.  It should work globally across all archs, without any
1873   * dependency on CONFIG_* options.  For architectures that do not support
1874   * huge mappings on specific levels, below fallbacks will be used.
1875   *
1876   * A leaf pgtable entry should always imply the following:
1877   *
1878   * - It is a "present" entry.  IOW, before using this API, please check it
1879   *   with pXd_present() first. NOTE: it may not always mean the "present
1880   *   bit" is set.  For example, PROT_NONE entries are always "present".
1881   *
1882   * - It should _never_ be a swap entry of any type.  Above "present" check
1883   *   should have guarded this, but let's be crystal clear on this.
1884   *
1885   * - It should contain a huge PFN, which points to a huge page larger than
1886   *   PAGE_SIZE of the platform.  The PFN format isn't important here.
1887   *
1888   * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(),
1889   *   pXd_devmap(), or hugetlb mappings).
1890   */
1891  #ifndef pgd_leaf
1892  #define pgd_leaf(x)	false
1893  #endif
1894  #ifndef p4d_leaf
1895  #define p4d_leaf(x)	false
1896  #endif
1897  #ifndef pud_leaf
1898  #define pud_leaf(x)	false
1899  #endif
1900  #ifndef pmd_leaf
1901  #define pmd_leaf(x)	false
1902  #endif
1903  
1904  #ifndef pgd_leaf_size
1905  #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
1906  #endif
1907  #ifndef p4d_leaf_size
1908  #define p4d_leaf_size(x) P4D_SIZE
1909  #endif
1910  #ifndef pud_leaf_size
1911  #define pud_leaf_size(x) PUD_SIZE
1912  #endif
1913  #ifndef pmd_leaf_size
1914  #define pmd_leaf_size(x) PMD_SIZE
1915  #endif
1916  #ifndef __pte_leaf_size
1917  #ifndef pte_leaf_size
1918  #define pte_leaf_size(x) PAGE_SIZE
1919  #endif
1920  #define __pte_leaf_size(x,y) pte_leaf_size(y)
1921  #endif
1922  
1923  /*
1924   * We always define pmd_pfn for all archs as it's used in lots of generic
1925   * code.  Now it happens too for pud_pfn (and can happen for larger
1926   * mappings too in the future; we're not there yet).  Instead of defining
1927   * it for all archs (like pmd_pfn), provide a fallback.
1928   *
1929   * Note that returning 0 here means any arch that didn't define this can
1930   * get severely wrong when it hits a real pud leaf.  It's arch's
1931   * responsibility to properly define it when a huge pud is possible.
1932   */
1933  #ifndef pud_pfn
1934  #define pud_pfn(x) 0
1935  #endif
1936  
1937  /*
1938   * Some architectures have MMUs that are configurable or selectable at boot
1939   * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
1940   * helps to have a static maximum value.
1941   */
1942  
1943  #ifndef MAX_PTRS_PER_PTE
1944  #define MAX_PTRS_PER_PTE PTRS_PER_PTE
1945  #endif
1946  
1947  #ifndef MAX_PTRS_PER_PMD
1948  #define MAX_PTRS_PER_PMD PTRS_PER_PMD
1949  #endif
1950  
1951  #ifndef MAX_PTRS_PER_PUD
1952  #define MAX_PTRS_PER_PUD PTRS_PER_PUD
1953  #endif
1954  
1955  #ifndef MAX_PTRS_PER_P4D
1956  #define MAX_PTRS_PER_P4D PTRS_PER_P4D
1957  #endif
1958  
1959  #ifndef pte_pgprot
1960  #define pte_pgprot(x) ((pgprot_t) {0})
1961  #endif
1962  
1963  #ifndef pmd_pgprot
1964  #define pmd_pgprot(x) ((pgprot_t) {0})
1965  #endif
1966  
1967  #ifndef pud_pgprot
1968  #define pud_pgprot(x) ((pgprot_t) {0})
1969  #endif
1970  
1971  /* description of effects of mapping type and prot in current implementation.
1972   * this is due to the limited x86 page protection hardware.  The expected
1973   * behavior is in parens:
1974   *
1975   * map_type	prot
1976   *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
1977   * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
1978   *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
1979   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
1980   *
1981   * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
1982   *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
1983   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
1984   *
1985   * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
1986   * MAP_PRIVATE (with Enhanced PAN supported):
1987   *								r: (no) no
1988   *								w: (no) no
1989   *								x: (yes) yes
1990   */
1991  #define DECLARE_VM_GET_PAGE_PROT					\
1992  pgprot_t vm_get_page_prot(unsigned long vm_flags)			\
1993  {									\
1994  		return protection_map[vm_flags &			\
1995  			(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)];	\
1996  }									\
1997  EXPORT_SYMBOL(vm_get_page_prot);
1998  
1999  #endif /* _LINUX_PGTABLE_H */
2000