1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /* include/asm-generic/tlb.h
3   *
4   *	Generic TLB shootdown code
5   *
6   * Copyright 2001 Red Hat, Inc.
7   * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8   *
9   * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10   */
11  #ifndef _ASM_GENERIC__TLB_H
12  #define _ASM_GENERIC__TLB_H
13  
14  #include <linux/mmu_notifier.h>
15  #include <linux/swap.h>
16  #include <linux/hugetlb_inline.h>
17  #include <asm/tlbflush.h>
18  #include <asm/cacheflush.h>
19  
20  /*
21   * Blindly accessing user memory from NMI context can be dangerous
22   * if we're in the middle of switching the current user task or switching
23   * the loaded mm.
24   */
25  #ifndef nmi_uaccess_okay
26  # define nmi_uaccess_okay() true
27  #endif
28  
29  #ifdef CONFIG_MMU
30  
31  /*
32   * Generic MMU-gather implementation.
33   *
34   * The mmu_gather data structure is used by the mm code to implement the
35   * correct and efficient ordering of freeing pages and TLB invalidations.
36   *
37   * This correct ordering is:
38   *
39   *  1) unhook page
40   *  2) TLB invalidate page
41   *  3) free page
42   *
43   * That is, we must never free a page before we have ensured there are no live
44   * translations left to it. Otherwise it might be possible to observe (or
45   * worse, change) the page content after it has been reused.
46   *
47   * The mmu_gather API consists of:
48   *
49   *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
50   *
51   *    start and finish a mmu_gather
52   *
53   *    Finish in particular will issue a (final) TLB invalidate and free
54   *    all (remaining) queued pages.
55   *
56   *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
57   *
58   *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
59   *    there's large holes between the VMAs.
60   *
61   *  - tlb_remove_table()
62   *
63   *    tlb_remove_table() is the basic primitive to free page-table directories
64   *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
65   *    tlb_remove_page() below, for when page directories are pages and have no
66   *    additional constraints.
67   *
68   *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
69   *
70   *  - tlb_remove_page() / __tlb_remove_page()
71   *  - tlb_remove_page_size() / __tlb_remove_page_size()
72   *  - __tlb_remove_folio_pages()
73   *
74   *    __tlb_remove_page_size() is the basic primitive that queues a page for
75   *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
76   *    boolean indicating if the queue is (now) full and a call to
77   *    tlb_flush_mmu() is required.
78   *
79   *    tlb_remove_page() and tlb_remove_page_size() imply the call to
80   *    tlb_flush_mmu() when required and has no return value.
81   *
82   *    __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
83   *    instead of removing a single page, remove the given number of consecutive
84   *    pages that are all part of the same (large) folio: just like calling
85   *    __tlb_remove_page() on each page individually.
86   *
87   *  - tlb_change_page_size()
88   *
89   *    call before __tlb_remove_page*() to set the current page-size; implies a
90   *    possible tlb_flush_mmu() call.
91   *
92   *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
93   *
94   *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
95   *                              related state, like the range)
96   *
97   *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
98   *			whatever pages are still batched.
99   *
100   *  - mmu_gather::fullmm
101   *
102   *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
103   *    the entire mm; this allows a number of optimizations.
104   *
105   *    - We can ignore tlb_{start,end}_vma(); because we don't
106   *      care about ranges. Everything will be shot down.
107   *
108   *    - (RISC) architectures that use ASIDs can cycle to a new ASID
109   *      and delay the invalidation until ASID space runs out.
110   *
111   *  - mmu_gather::need_flush_all
112   *
113   *    A flag that can be set by the arch code if it wants to force
114   *    flush the entire TLB irrespective of the range. For instance
115   *    x86-PAE needs this when changing top-level entries.
116   *
117   * And allows the architecture to provide and implement tlb_flush():
118   *
119   * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
120   * use of:
121   *
122   *  - mmu_gather::start / mmu_gather::end
123   *
124   *    which provides the range that needs to be flushed to cover the pages to
125   *    be freed.
126   *
127   *  - mmu_gather::freed_tables
128   *
129   *    set when we freed page table pages
130   *
131   *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
132   *
133   *    returns the smallest TLB entry size unmapped in this range.
134   *
135   * If an architecture does not provide tlb_flush() a default implementation
136   * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
137   * specified, in which case we'll default to flush_tlb_mm().
138   *
139   * Additionally there are a few opt-in features:
140   *
141   *  MMU_GATHER_PAGE_SIZE
142   *
143   *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
144   *  changes the size and provides mmu_gather::page_size to tlb_flush().
145   *
146   *  This might be useful if your architecture has size specific TLB
147   *  invalidation instructions.
148   *
149   *  MMU_GATHER_TABLE_FREE
150   *
151   *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
152   *  for page directores (__p*_free_tlb()).
153   *
154   *  Useful if your architecture has non-page page directories.
155   *
156   *  When used, an architecture is expected to provide __tlb_remove_table()
157   *  which does the actual freeing of these pages.
158   *
159   *  MMU_GATHER_RCU_TABLE_FREE
160   *
161   *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
162   *  comment below).
163   *
164   *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
165   *  and therefore doesn't naturally serialize with software page-table walkers.
166   *
167   *  MMU_GATHER_NO_FLUSH_CACHE
168   *
169   *  Indicates the architecture has flush_cache_range() but it needs *NOT* be called
170   *  before unmapping a VMA.
171   *
172   *  NOTE: strictly speaking we shouldn't have this knob and instead rely on
173   *	  flush_cache_range() being a NOP, except Sparc64 seems to be
174   *	  different here.
175   *
176   *  MMU_GATHER_MERGE_VMAS
177   *
178   *  Indicates the architecture wants to merge ranges over VMAs; typical when
179   *  multiple range invalidates are more expensive than a full invalidate.
180   *
181   *  MMU_GATHER_NO_RANGE
182   *
183   *  Use this if your architecture lacks an efficient flush_tlb_range(). This
184   *  option implies MMU_GATHER_MERGE_VMAS above.
185   *
186   *  MMU_GATHER_NO_GATHER
187   *
188   *  If the option is set the mmu_gather will not track individual pages for
189   *  delayed page free anymore. A platform that enables the option needs to
190   *  provide its own implementation of the __tlb_remove_page_size() function to
191   *  free pages.
192   *
193   *  This is useful if your architecture already flushes TLB entries in the
194   *  various ptep_get_and_clear() functions.
195   */
196  
197  #ifdef CONFIG_MMU_GATHER_TABLE_FREE
198  
199  struct mmu_table_batch {
200  #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
201  	struct rcu_head		rcu;
202  #endif
203  	unsigned int		nr;
204  	void			*tables[];
205  };
206  
207  #define MAX_TABLE_BATCH		\
208  	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
209  
210  extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
211  
212  #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
213  
214  /*
215   * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
216   * page directories and we can use the normal page batching to free them.
217   */
218  #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
219  
220  #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
221  
222  #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
223  /*
224   * This allows an architecture that does not use the linux page-tables for
225   * hardware to skip the TLBI when freeing page tables.
226   */
227  #ifndef tlb_needs_table_invalidate
228  #define tlb_needs_table_invalidate() (true)
229  #endif
230  
231  void tlb_remove_table_sync_one(void);
232  
233  #else
234  
235  #ifdef tlb_needs_table_invalidate
236  #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
237  #endif
238  
tlb_remove_table_sync_one(void)239  static inline void tlb_remove_table_sync_one(void) { }
240  
241  #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
242  
243  
244  #ifndef CONFIG_MMU_GATHER_NO_GATHER
245  /*
246   * If we can't allocate a page to make a big batch of page pointers
247   * to work on, then just handle a few from the on-stack structure.
248   */
249  #define MMU_GATHER_BUNDLE	8
250  
251  struct mmu_gather_batch {
252  	struct mmu_gather_batch	*next;
253  	unsigned int		nr;
254  	unsigned int		max;
255  	struct encoded_page	*encoded_pages[];
256  };
257  
258  #define MAX_GATHER_BATCH	\
259  	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
260  
261  /*
262   * Limit the maximum number of mmu_gather batches to reduce a risk of soft
263   * lockups for non-preemptible kernels on huge machines when a lot of memory
264   * is zapped during unmapping.
265   * 10K pages freed at once should be safe even without a preemption point.
266   */
267  #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
268  
269  extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
270  		bool delay_rmap, int page_size);
271  bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
272  		unsigned int nr_pages, bool delay_rmap);
273  
274  #ifdef CONFIG_SMP
275  /*
276   * This both sets 'delayed_rmap', and returns true. It would be an inline
277   * function, except we define it before the 'struct mmu_gather'.
278   */
279  #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
280  extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
281  #endif
282  
283  #endif
284  
285  /*
286   * We have a no-op version of the rmap removal that doesn't
287   * delay anything. That is used on S390, which flushes remote
288   * TLBs synchronously, and on UP, which doesn't have any
289   * remote TLBs to flush and is not preemptible due to this
290   * all happening under the page table lock.
291   */
292  #ifndef tlb_delay_rmap
293  #define tlb_delay_rmap(tlb) (false)
tlb_flush_rmaps(struct mmu_gather * tlb,struct vm_area_struct * vma)294  static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
295  #endif
296  
297  /*
298   * struct mmu_gather is an opaque type used by the mm code for passing around
299   * any data needed by arch specific code for tlb_remove_page.
300   */
301  struct mmu_gather {
302  	struct mm_struct	*mm;
303  
304  #ifdef CONFIG_MMU_GATHER_TABLE_FREE
305  	struct mmu_table_batch	*batch;
306  #endif
307  
308  	unsigned long		start;
309  	unsigned long		end;
310  	/*
311  	 * we are in the middle of an operation to clear
312  	 * a full mm and can make some optimizations
313  	 */
314  	unsigned int		fullmm : 1;
315  
316  	/*
317  	 * we have performed an operation which
318  	 * requires a complete flush of the tlb
319  	 */
320  	unsigned int		need_flush_all : 1;
321  
322  	/*
323  	 * we have removed page directories
324  	 */
325  	unsigned int		freed_tables : 1;
326  
327  	/*
328  	 * Do we have pending delayed rmap removals?
329  	 */
330  	unsigned int		delayed_rmap : 1;
331  
332  	/*
333  	 * at which levels have we cleared entries?
334  	 */
335  	unsigned int		cleared_ptes : 1;
336  	unsigned int		cleared_pmds : 1;
337  	unsigned int		cleared_puds : 1;
338  	unsigned int		cleared_p4ds : 1;
339  
340  	/*
341  	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
342  	 */
343  	unsigned int		vma_exec : 1;
344  	unsigned int		vma_huge : 1;
345  	unsigned int		vma_pfn  : 1;
346  
347  	unsigned int		batch_count;
348  
349  #ifndef CONFIG_MMU_GATHER_NO_GATHER
350  	struct mmu_gather_batch *active;
351  	struct mmu_gather_batch	local;
352  	struct page		*__pages[MMU_GATHER_BUNDLE];
353  
354  #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
355  	unsigned int page_size;
356  #endif
357  #endif
358  };
359  
360  void tlb_flush_mmu(struct mmu_gather *tlb);
361  
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)362  static inline void __tlb_adjust_range(struct mmu_gather *tlb,
363  				      unsigned long address,
364  				      unsigned int range_size)
365  {
366  	tlb->start = min(tlb->start, address);
367  	tlb->end = max(tlb->end, address + range_size);
368  }
369  
__tlb_reset_range(struct mmu_gather * tlb)370  static inline void __tlb_reset_range(struct mmu_gather *tlb)
371  {
372  	if (tlb->fullmm) {
373  		tlb->start = tlb->end = ~0;
374  	} else {
375  		tlb->start = TASK_SIZE;
376  		tlb->end = 0;
377  	}
378  	tlb->freed_tables = 0;
379  	tlb->cleared_ptes = 0;
380  	tlb->cleared_pmds = 0;
381  	tlb->cleared_puds = 0;
382  	tlb->cleared_p4ds = 0;
383  	/*
384  	 * Do not reset mmu_gather::vma_* fields here, we do not
385  	 * call into tlb_start_vma() again to set them if there is an
386  	 * intermediate flush.
387  	 */
388  }
389  
390  #ifdef CONFIG_MMU_GATHER_NO_RANGE
391  
392  #if defined(tlb_flush)
393  #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
394  #endif
395  
396  /*
397   * When an architecture does not have efficient means of range flushing TLBs
398   * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
399   * range small. We equally don't have to worry about page granularity or other
400   * things.
401   *
402   * All we need to do is issue a full flush for any !0 range.
403   */
tlb_flush(struct mmu_gather * tlb)404  static inline void tlb_flush(struct mmu_gather *tlb)
405  {
406  	if (tlb->end)
407  		flush_tlb_mm(tlb->mm);
408  }
409  
410  #else /* CONFIG_MMU_GATHER_NO_RANGE */
411  
412  #ifndef tlb_flush
413  /*
414   * When an architecture does not provide its own tlb_flush() implementation
415   * but does have a reasonably efficient flush_vma_range() implementation
416   * use that.
417   */
tlb_flush(struct mmu_gather * tlb)418  static inline void tlb_flush(struct mmu_gather *tlb)
419  {
420  	if (tlb->fullmm || tlb->need_flush_all) {
421  		flush_tlb_mm(tlb->mm);
422  	} else if (tlb->end) {
423  		struct vm_area_struct vma = {
424  			.vm_mm = tlb->mm,
425  			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
426  				    (tlb->vma_huge ? VM_HUGETLB : 0),
427  		};
428  
429  		flush_tlb_range(&vma, tlb->start, tlb->end);
430  	}
431  }
432  #endif
433  
434  #endif /* CONFIG_MMU_GATHER_NO_RANGE */
435  
436  static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)437  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
438  {
439  	/*
440  	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
441  	 * mips-4k) flush only large pages.
442  	 *
443  	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
444  	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
445  	 * range.
446  	 *
447  	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
448  	 * these values the batch is empty.
449  	 */
450  	tlb->vma_huge = is_vm_hugetlb_page(vma);
451  	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
452  	tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
453  }
454  
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)455  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
456  {
457  	/*
458  	 * Anything calling __tlb_adjust_range() also sets at least one of
459  	 * these bits.
460  	 */
461  	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
462  	      tlb->cleared_puds || tlb->cleared_p4ds))
463  		return;
464  
465  	tlb_flush(tlb);
466  	__tlb_reset_range(tlb);
467  }
468  
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)469  static inline void tlb_remove_page_size(struct mmu_gather *tlb,
470  					struct page *page, int page_size)
471  {
472  	if (__tlb_remove_page_size(tlb, page, false, page_size))
473  		tlb_flush_mmu(tlb);
474  }
475  
__tlb_remove_page(struct mmu_gather * tlb,struct page * page,bool delay_rmap)476  static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
477  		struct page *page, bool delay_rmap)
478  {
479  	return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
480  }
481  
482  /* tlb_remove_page
483   *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
484   *	required.
485   */
tlb_remove_page(struct mmu_gather * tlb,struct page * page)486  static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
487  {
488  	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
489  }
490  
tlb_remove_ptdesc(struct mmu_gather * tlb,void * pt)491  static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
492  {
493  	tlb_remove_table(tlb, pt);
494  }
495  
496  /* Like tlb_remove_ptdesc, but for page-like page directories. */
tlb_remove_page_ptdesc(struct mmu_gather * tlb,struct ptdesc * pt)497  static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
498  {
499  	tlb_remove_page(tlb, ptdesc_page(pt));
500  }
501  
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)502  static inline void tlb_change_page_size(struct mmu_gather *tlb,
503  						     unsigned int page_size)
504  {
505  #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
506  	if (tlb->page_size && tlb->page_size != page_size) {
507  		if (!tlb->fullmm && !tlb->need_flush_all)
508  			tlb_flush_mmu(tlb);
509  	}
510  
511  	tlb->page_size = page_size;
512  #endif
513  }
514  
tlb_get_unmap_shift(struct mmu_gather * tlb)515  static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
516  {
517  	if (tlb->cleared_ptes)
518  		return PAGE_SHIFT;
519  	if (tlb->cleared_pmds)
520  		return PMD_SHIFT;
521  	if (tlb->cleared_puds)
522  		return PUD_SHIFT;
523  	if (tlb->cleared_p4ds)
524  		return P4D_SHIFT;
525  
526  	return PAGE_SHIFT;
527  }
528  
tlb_get_unmap_size(struct mmu_gather * tlb)529  static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
530  {
531  	return 1UL << tlb_get_unmap_shift(tlb);
532  }
533  
534  /*
535   * In the case of tlb vma handling, we can optimise these away in the
536   * case where we're doing a full MM flush.  When we're doing a munmap,
537   * the vmas are adjusted to only cover the region to be torn down.
538   */
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)539  static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
540  {
541  	if (tlb->fullmm)
542  		return;
543  
544  	tlb_update_vma_flags(tlb, vma);
545  #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
546  	flush_cache_range(vma, vma->vm_start, vma->vm_end);
547  #endif
548  }
549  
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)550  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
551  {
552  	if (tlb->fullmm)
553  		return;
554  
555  	/*
556  	 * VM_PFNMAP is more fragile because the core mm will not track the
557  	 * page mapcount -- there might not be page-frames for these PFNs after
558  	 * all. Force flush TLBs for such ranges to avoid munmap() vs
559  	 * unmap_mapping_range() races.
560  	 */
561  	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
562  		/*
563  		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
564  		 * the ranges growing with the unused space between consecutive VMAs.
565  		 */
566  		tlb_flush_mmu_tlbonly(tlb);
567  	}
568  }
569  
570  /*
571   * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
572   * and set corresponding cleared_*.
573   */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)574  static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
575  				     unsigned long address, unsigned long size)
576  {
577  	__tlb_adjust_range(tlb, address, size);
578  	tlb->cleared_ptes = 1;
579  }
580  
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)581  static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
582  				     unsigned long address, unsigned long size)
583  {
584  	__tlb_adjust_range(tlb, address, size);
585  	tlb->cleared_pmds = 1;
586  }
587  
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)588  static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
589  				     unsigned long address, unsigned long size)
590  {
591  	__tlb_adjust_range(tlb, address, size);
592  	tlb->cleared_puds = 1;
593  }
594  
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)595  static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
596  				     unsigned long address, unsigned long size)
597  {
598  	__tlb_adjust_range(tlb, address, size);
599  	tlb->cleared_p4ds = 1;
600  }
601  
602  #ifndef __tlb_remove_tlb_entry
__tlb_remove_tlb_entry(struct mmu_gather * tlb,pte_t * ptep,unsigned long address)603  static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
604  {
605  }
606  #endif
607  
608  /**
609   * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
610   *
611   * Record the fact that pte's were really unmapped by updating the range,
612   * so we can later optimise away the tlb invalidate.   This helps when
613   * userspace is unmapping already-unmapped pages, which happens quite a lot.
614   */
615  #define tlb_remove_tlb_entry(tlb, ptep, address)		\
616  	do {							\
617  		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
618  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
619  	} while (0)
620  
621  /**
622   * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
623   *			    later tlb invalidation.
624   *
625   * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
626   * consecutive ptes instead of only a single one.
627   */
tlb_remove_tlb_entries(struct mmu_gather * tlb,pte_t * ptep,unsigned int nr,unsigned long address)628  static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
629  		pte_t *ptep, unsigned int nr, unsigned long address)
630  {
631  	tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
632  	for (;;) {
633  		__tlb_remove_tlb_entry(tlb, ptep, address);
634  		if (--nr == 0)
635  			break;
636  		ptep++;
637  		address += PAGE_SIZE;
638  	}
639  }
640  
641  #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
642  	do {							\
643  		unsigned long _sz = huge_page_size(h);		\
644  		if (_sz >= P4D_SIZE)				\
645  			tlb_flush_p4d_range(tlb, address, _sz);	\
646  		else if (_sz >= PUD_SIZE)			\
647  			tlb_flush_pud_range(tlb, address, _sz);	\
648  		else if (_sz >= PMD_SIZE)			\
649  			tlb_flush_pmd_range(tlb, address, _sz);	\
650  		else						\
651  			tlb_flush_pte_range(tlb, address, _sz);	\
652  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
653  	} while (0)
654  
655  /**
656   * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
657   * This is a nop so far, because only x86 needs it.
658   */
659  #ifndef __tlb_remove_pmd_tlb_entry
660  #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
661  #endif
662  
663  #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
664  	do {								\
665  		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
666  		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
667  	} while (0)
668  
669  /**
670   * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
671   * invalidation. This is a nop so far, because only x86 needs it.
672   */
673  #ifndef __tlb_remove_pud_tlb_entry
674  #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
675  #endif
676  
677  #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
678  	do {								\
679  		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
680  		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
681  	} while (0)
682  
683  /*
684   * For things like page tables caches (ie caching addresses "inside" the
685   * page tables, like x86 does), for legacy reasons, flushing an
686   * individual page had better flush the page table caches behind it. This
687   * is definitely how x86 works, for example. And if you have an
688   * architected non-legacy page table cache (which I'm not aware of
689   * anybody actually doing), you're going to have some architecturally
690   * explicit flushing for that, likely *separate* from a regular TLB entry
691   * flush, and thus you'd need more than just some range expansion..
692   *
693   * So if we ever find an architecture
694   * that would want something that odd, I think it is up to that
695   * architecture to do its own odd thing, not cause pain for others
696   * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
697   *
698   * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
699   */
700  
701  #ifndef pte_free_tlb
702  #define pte_free_tlb(tlb, ptep, address)			\
703  	do {							\
704  		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
705  		tlb->freed_tables = 1;				\
706  		__pte_free_tlb(tlb, ptep, address);		\
707  	} while (0)
708  #endif
709  
710  #ifndef pmd_free_tlb
711  #define pmd_free_tlb(tlb, pmdp, address)			\
712  	do {							\
713  		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
714  		tlb->freed_tables = 1;				\
715  		__pmd_free_tlb(tlb, pmdp, address);		\
716  	} while (0)
717  #endif
718  
719  #ifndef pud_free_tlb
720  #define pud_free_tlb(tlb, pudp, address)			\
721  	do {							\
722  		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
723  		tlb->freed_tables = 1;				\
724  		__pud_free_tlb(tlb, pudp, address);		\
725  	} while (0)
726  #endif
727  
728  #ifndef p4d_free_tlb
729  #define p4d_free_tlb(tlb, pudp, address)			\
730  	do {							\
731  		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
732  		tlb->freed_tables = 1;				\
733  		__p4d_free_tlb(tlb, pudp, address);		\
734  	} while (0)
735  #endif
736  
737  #ifndef pte_needs_flush
pte_needs_flush(pte_t oldpte,pte_t newpte)738  static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
739  {
740  	return true;
741  }
742  #endif
743  
744  #ifndef huge_pmd_needs_flush
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)745  static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
746  {
747  	return true;
748  }
749  #endif
750  
751  #endif /* CONFIG_MMU */
752  
753  #endif /* _ASM_GENERIC__TLB_H */
754