1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/mm/swap_state.c
4   *
5   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6   *  Swap reorganised 29.12.95, Stephen Tweedie
7   *
8   *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
9   */
10  #include <linux/mm.h>
11  #include <linux/gfp.h>
12  #include <linux/kernel_stat.h>
13  #include <linux/mempolicy.h>
14  #include <linux/swap.h>
15  #include <linux/swapops.h>
16  #include <linux/init.h>
17  #include <linux/pagemap.h>
18  #include <linux/pagevec.h>
19  #include <linux/backing-dev.h>
20  #include <linux/blkdev.h>
21  #include <linux/migrate.h>
22  #include <linux/vmalloc.h>
23  #include <linux/swap_slots.h>
24  #include <linux/huge_mm.h>
25  #include <linux/shmem_fs.h>
26  #include "internal.h"
27  #include "swap.h"
28  
29  /*
30   * swapper_space is a fiction, retained to simplify the path through
31   * vmscan's shrink_folio_list.
32   */
33  static const struct address_space_operations swap_aops = {
34  	.writepage	= swap_writepage,
35  	.dirty_folio	= noop_dirty_folio,
36  #ifdef CONFIG_MIGRATION
37  	.migrate_folio	= migrate_folio,
38  #endif
39  };
40  
41  struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
42  static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
43  static bool enable_vma_readahead __read_mostly = true;
44  
45  #define SWAP_RA_ORDER_CEILING	5
46  
47  #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
48  #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
49  #define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
50  #define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
51  
52  #define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
53  #define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
54  #define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
55  
56  #define SWAP_RA_VAL(addr, win, hits)				\
57  	(((addr) & PAGE_MASK) |					\
58  	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
59  	 ((hits) & SWAP_RA_HITS_MASK))
60  
61  /* Initial readahead hits is 4 to start up with a small window */
62  #define GET_SWAP_RA_VAL(vma)					\
63  	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
64  
65  static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
66  
show_swap_cache_info(void)67  void show_swap_cache_info(void)
68  {
69  	printk("%lu pages in swap cache\n", total_swapcache_pages());
70  	printk("Free swap  = %ldkB\n", K(get_nr_swap_pages()));
71  	printk("Total swap = %lukB\n", K(total_swap_pages));
72  }
73  
get_shadow_from_swap_cache(swp_entry_t entry)74  void *get_shadow_from_swap_cache(swp_entry_t entry)
75  {
76  	struct address_space *address_space = swap_address_space(entry);
77  	pgoff_t idx = swap_cache_index(entry);
78  	void *shadow;
79  
80  	shadow = xa_load(&address_space->i_pages, idx);
81  	if (xa_is_value(shadow))
82  		return shadow;
83  	return NULL;
84  }
85  
86  /*
87   * add_to_swap_cache resembles filemap_add_folio on swapper_space,
88   * but sets SwapCache flag and private instead of mapping and index.
89   */
add_to_swap_cache(struct folio * folio,swp_entry_t entry,gfp_t gfp,void ** shadowp)90  int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
91  			gfp_t gfp, void **shadowp)
92  {
93  	struct address_space *address_space = swap_address_space(entry);
94  	pgoff_t idx = swap_cache_index(entry);
95  	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
96  	unsigned long i, nr = folio_nr_pages(folio);
97  	void *old;
98  
99  	xas_set_update(&xas, workingset_update_node);
100  
101  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
102  	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
103  	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
104  
105  	folio_ref_add(folio, nr);
106  	folio_set_swapcache(folio);
107  	folio->swap = entry;
108  
109  	do {
110  		xas_lock_irq(&xas);
111  		xas_create_range(&xas);
112  		if (xas_error(&xas))
113  			goto unlock;
114  		for (i = 0; i < nr; i++) {
115  			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
116  			if (shadowp) {
117  				old = xas_load(&xas);
118  				if (xa_is_value(old))
119  					*shadowp = old;
120  			}
121  			xas_store(&xas, folio);
122  			xas_next(&xas);
123  		}
124  		address_space->nrpages += nr;
125  		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
126  		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
127  unlock:
128  		xas_unlock_irq(&xas);
129  	} while (xas_nomem(&xas, gfp));
130  
131  	if (!xas_error(&xas))
132  		return 0;
133  
134  	folio_clear_swapcache(folio);
135  	folio_ref_sub(folio, nr);
136  	return xas_error(&xas);
137  }
138  
139  /*
140   * This must be called only on folios that have
141   * been verified to be in the swap cache.
142   */
__delete_from_swap_cache(struct folio * folio,swp_entry_t entry,void * shadow)143  void __delete_from_swap_cache(struct folio *folio,
144  			swp_entry_t entry, void *shadow)
145  {
146  	struct address_space *address_space = swap_address_space(entry);
147  	int i;
148  	long nr = folio_nr_pages(folio);
149  	pgoff_t idx = swap_cache_index(entry);
150  	XA_STATE(xas, &address_space->i_pages, idx);
151  
152  	xas_set_update(&xas, workingset_update_node);
153  
154  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
155  	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
156  	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
157  
158  	for (i = 0; i < nr; i++) {
159  		void *entry = xas_store(&xas, shadow);
160  		VM_BUG_ON_PAGE(entry != folio, entry);
161  		xas_next(&xas);
162  	}
163  	folio->swap.val = 0;
164  	folio_clear_swapcache(folio);
165  	address_space->nrpages -= nr;
166  	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
167  	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
168  }
169  
170  /**
171   * add_to_swap - allocate swap space for a folio
172   * @folio: folio we want to move to swap
173   *
174   * Allocate swap space for the folio and add the folio to the
175   * swap cache.
176   *
177   * Context: Caller needs to hold the folio lock.
178   * Return: Whether the folio was added to the swap cache.
179   */
add_to_swap(struct folio * folio)180  bool add_to_swap(struct folio *folio)
181  {
182  	swp_entry_t entry;
183  	int err;
184  
185  	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
186  	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
187  
188  	entry = folio_alloc_swap(folio);
189  	if (!entry.val)
190  		return false;
191  
192  	/*
193  	 * XArray node allocations from PF_MEMALLOC contexts could
194  	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195  	 * stops emergency reserves from being allocated.
196  	 *
197  	 * TODO: this could cause a theoretical memory reclaim
198  	 * deadlock in the swap out path.
199  	 */
200  	/*
201  	 * Add it to the swap cache.
202  	 */
203  	err = add_to_swap_cache(folio, entry,
204  			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
205  	if (err)
206  		/*
207  		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208  		 * clear SWAP_HAS_CACHE flag.
209  		 */
210  		goto fail;
211  	/*
212  	 * Normally the folio will be dirtied in unmap because its
213  	 * pte should be dirty. A special case is MADV_FREE page. The
214  	 * page's pte could have dirty bit cleared but the folio's
215  	 * SwapBacked flag is still set because clearing the dirty bit
216  	 * and SwapBacked flag has no lock protected. For such folio,
217  	 * unmap will not set dirty bit for it, so folio reclaim will
218  	 * not write the folio out. This can cause data corruption when
219  	 * the folio is swapped in later. Always setting the dirty flag
220  	 * for the folio solves the problem.
221  	 */
222  	folio_mark_dirty(folio);
223  
224  	return true;
225  
226  fail:
227  	put_swap_folio(folio, entry);
228  	return false;
229  }
230  
231  /*
232   * This must be called only on folios that have
233   * been verified to be in the swap cache and locked.
234   * It will never put the folio into the free list,
235   * the caller has a reference on the folio.
236   */
delete_from_swap_cache(struct folio * folio)237  void delete_from_swap_cache(struct folio *folio)
238  {
239  	swp_entry_t entry = folio->swap;
240  	struct address_space *address_space = swap_address_space(entry);
241  
242  	xa_lock_irq(&address_space->i_pages);
243  	__delete_from_swap_cache(folio, entry, NULL);
244  	xa_unlock_irq(&address_space->i_pages);
245  
246  	put_swap_folio(folio, entry);
247  	folio_ref_sub(folio, folio_nr_pages(folio));
248  }
249  
clear_shadow_from_swap_cache(int type,unsigned long begin,unsigned long end)250  void clear_shadow_from_swap_cache(int type, unsigned long begin,
251  				unsigned long end)
252  {
253  	unsigned long curr = begin;
254  	void *old;
255  
256  	for (;;) {
257  		swp_entry_t entry = swp_entry(type, curr);
258  		unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
259  		struct address_space *address_space = swap_address_space(entry);
260  		XA_STATE(xas, &address_space->i_pages, index);
261  
262  		xas_set_update(&xas, workingset_update_node);
263  
264  		xa_lock_irq(&address_space->i_pages);
265  		xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
266  			if (!xa_is_value(old))
267  				continue;
268  			xas_store(&xas, NULL);
269  		}
270  		xa_unlock_irq(&address_space->i_pages);
271  
272  		/* search the next swapcache until we meet end */
273  		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274  		curr++;
275  		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276  		if (curr > end)
277  			break;
278  	}
279  }
280  
281  /*
282   * If we are the only user, then try to free up the swap cache.
283   *
284   * Its ok to check the swapcache flag without the folio lock
285   * here because we are going to recheck again inside
286   * folio_free_swap() _with_ the lock.
287   * 					- Marcelo
288   */
free_swap_cache(struct folio * folio)289  void free_swap_cache(struct folio *folio)
290  {
291  	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
292  	    folio_trylock(folio)) {
293  		folio_free_swap(folio);
294  		folio_unlock(folio);
295  	}
296  }
297  
298  /*
299   * Perform a free_page(), also freeing any swap cache associated with
300   * this page if it is the last user of the page.
301   */
free_page_and_swap_cache(struct page * page)302  void free_page_and_swap_cache(struct page *page)
303  {
304  	struct folio *folio = page_folio(page);
305  
306  	free_swap_cache(folio);
307  	if (!is_huge_zero_folio(folio))
308  		folio_put(folio);
309  }
310  
311  /*
312   * Passed an array of pages, drop them all from swapcache and then release
313   * them.  They are removed from the LRU and freed if this is their last use.
314   */
free_pages_and_swap_cache(struct encoded_page ** pages,int nr)315  void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
316  {
317  	struct folio_batch folios;
318  	unsigned int refs[PAGEVEC_SIZE];
319  
320  	lru_add_drain();
321  	folio_batch_init(&folios);
322  	for (int i = 0; i < nr; i++) {
323  		struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
324  
325  		free_swap_cache(folio);
326  		refs[folios.nr] = 1;
327  		if (unlikely(encoded_page_flags(pages[i]) &
328  			     ENCODED_PAGE_BIT_NR_PAGES_NEXT))
329  			refs[folios.nr] = encoded_nr_pages(pages[++i]);
330  
331  		if (folio_batch_add(&folios, folio) == 0)
332  			folios_put_refs(&folios, refs);
333  	}
334  	if (folios.nr)
335  		folios_put_refs(&folios, refs);
336  }
337  
swap_use_vma_readahead(void)338  static inline bool swap_use_vma_readahead(void)
339  {
340  	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
341  }
342  
343  /*
344   * Lookup a swap entry in the swap cache. A found folio will be returned
345   * unlocked and with its refcount incremented - we rely on the kernel
346   * lock getting page table operations atomic even if we drop the folio
347   * lock before returning.
348   *
349   * Caller must lock the swap device or hold a reference to keep it valid.
350   */
swap_cache_get_folio(swp_entry_t entry,struct vm_area_struct * vma,unsigned long addr)351  struct folio *swap_cache_get_folio(swp_entry_t entry,
352  		struct vm_area_struct *vma, unsigned long addr)
353  {
354  	struct folio *folio;
355  
356  	folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
357  	if (!IS_ERR(folio)) {
358  		bool vma_ra = swap_use_vma_readahead();
359  		bool readahead;
360  
361  		/*
362  		 * At the moment, we don't support PG_readahead for anon THP
363  		 * so let's bail out rather than confusing the readahead stat.
364  		 */
365  		if (unlikely(folio_test_large(folio)))
366  			return folio;
367  
368  		readahead = folio_test_clear_readahead(folio);
369  		if (vma && vma_ra) {
370  			unsigned long ra_val;
371  			int win, hits;
372  
373  			ra_val = GET_SWAP_RA_VAL(vma);
374  			win = SWAP_RA_WIN(ra_val);
375  			hits = SWAP_RA_HITS(ra_val);
376  			if (readahead)
377  				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
378  			atomic_long_set(&vma->swap_readahead_info,
379  					SWAP_RA_VAL(addr, win, hits));
380  		}
381  
382  		if (readahead) {
383  			count_vm_event(SWAP_RA_HIT);
384  			if (!vma || !vma_ra)
385  				atomic_inc(&swapin_readahead_hits);
386  		}
387  	} else {
388  		folio = NULL;
389  	}
390  
391  	return folio;
392  }
393  
394  /**
395   * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
396   * @mapping: The address_space to search.
397   * @index: The page cache index.
398   *
399   * This differs from filemap_get_folio() in that it will also look for the
400   * folio in the swap cache.
401   *
402   * Return: The found folio or %NULL.
403   */
filemap_get_incore_folio(struct address_space * mapping,pgoff_t index)404  struct folio *filemap_get_incore_folio(struct address_space *mapping,
405  		pgoff_t index)
406  {
407  	swp_entry_t swp;
408  	struct swap_info_struct *si;
409  	struct folio *folio = filemap_get_entry(mapping, index);
410  
411  	if (!folio)
412  		return ERR_PTR(-ENOENT);
413  	if (!xa_is_value(folio))
414  		return folio;
415  	if (!shmem_mapping(mapping))
416  		return ERR_PTR(-ENOENT);
417  
418  	swp = radix_to_swp_entry(folio);
419  	/* There might be swapin error entries in shmem mapping. */
420  	if (non_swap_entry(swp))
421  		return ERR_PTR(-ENOENT);
422  	/* Prevent swapoff from happening to us */
423  	si = get_swap_device(swp);
424  	if (!si)
425  		return ERR_PTR(-ENOENT);
426  	index = swap_cache_index(swp);
427  	folio = filemap_get_folio(swap_address_space(swp), index);
428  	put_swap_device(si);
429  	return folio;
430  }
431  
__read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t ilx,bool * new_page_allocated,bool skip_if_exists)432  struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
433  		struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
434  		bool skip_if_exists)
435  {
436  	struct swap_info_struct *si;
437  	struct folio *folio;
438  	struct folio *new_folio = NULL;
439  	struct folio *result = NULL;
440  	void *shadow = NULL;
441  
442  	*new_page_allocated = false;
443  	si = get_swap_device(entry);
444  	if (!si)
445  		return NULL;
446  
447  	for (;;) {
448  		int err;
449  		/*
450  		 * First check the swap cache.  Since this is normally
451  		 * called after swap_cache_get_folio() failed, re-calling
452  		 * that would confuse statistics.
453  		 */
454  		folio = filemap_get_folio(swap_address_space(entry),
455  					  swap_cache_index(entry));
456  		if (!IS_ERR(folio))
457  			goto got_folio;
458  
459  		/*
460  		 * Just skip read ahead for unused swap slot.
461  		 * During swap_off when swap_slot_cache is disabled,
462  		 * we have to handle the race between putting
463  		 * swap entry in swap cache and marking swap slot
464  		 * as SWAP_HAS_CACHE.  That's done in later part of code or
465  		 * else swap_off will be aborted if we return NULL.
466  		 */
467  		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
468  			goto put_and_return;
469  
470  		/*
471  		 * Get a new folio to read into from swap.  Allocate it now if
472  		 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
473  		 * when -EEXIST will cause any racers to loop around until we
474  		 * add it to cache.
475  		 */
476  		if (!new_folio) {
477  			new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
478  			if (!new_folio)
479  				goto put_and_return;
480  		}
481  
482  		/*
483  		 * Swap entry may have been freed since our caller observed it.
484  		 */
485  		err = swapcache_prepare(entry, 1);
486  		if (!err)
487  			break;
488  		else if (err != -EEXIST)
489  			goto put_and_return;
490  
491  		/*
492  		 * Protect against a recursive call to __read_swap_cache_async()
493  		 * on the same entry waiting forever here because SWAP_HAS_CACHE
494  		 * is set but the folio is not the swap cache yet. This can
495  		 * happen today if mem_cgroup_swapin_charge_folio() below
496  		 * triggers reclaim through zswap, which may call
497  		 * __read_swap_cache_async() in the writeback path.
498  		 */
499  		if (skip_if_exists)
500  			goto put_and_return;
501  
502  		/*
503  		 * We might race against __delete_from_swap_cache(), and
504  		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
505  		 * has not yet been cleared.  Or race against another
506  		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
507  		 * in swap_map, but not yet added its folio to swap cache.
508  		 */
509  		schedule_timeout_uninterruptible(1);
510  	}
511  
512  	/*
513  	 * The swap entry is ours to swap in. Prepare the new folio.
514  	 */
515  	__folio_set_locked(new_folio);
516  	__folio_set_swapbacked(new_folio);
517  
518  	if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
519  		goto fail_unlock;
520  
521  	/* May fail (-ENOMEM) if XArray node allocation failed. */
522  	if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523  		goto fail_unlock;
524  
525  	mem_cgroup_swapin_uncharge_swap(entry, 1);
526  
527  	if (shadow)
528  		workingset_refault(new_folio, shadow);
529  
530  	/* Caller will initiate read into locked new_folio */
531  	folio_add_lru(new_folio);
532  	*new_page_allocated = true;
533  	folio = new_folio;
534  got_folio:
535  	result = folio;
536  	goto put_and_return;
537  
538  fail_unlock:
539  	put_swap_folio(new_folio, entry);
540  	folio_unlock(new_folio);
541  put_and_return:
542  	put_swap_device(si);
543  	if (!(*new_page_allocated) && new_folio)
544  		folio_put(new_folio);
545  	return result;
546  }
547  
548  /*
549   * Locate a page of swap in physical memory, reserving swap cache space
550   * and reading the disk if it is not already cached.
551   * A failure return means that either the page allocation failed or that
552   * the swap entry is no longer in use.
553   *
554   * get/put_swap_device() aren't needed to call this function, because
555   * __read_swap_cache_async() call them and swap_read_folio() holds the
556   * swap cache folio lock.
557   */
read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr,struct swap_iocb ** plug)558  struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
559  		struct vm_area_struct *vma, unsigned long addr,
560  		struct swap_iocb **plug)
561  {
562  	bool page_allocated;
563  	struct mempolicy *mpol;
564  	pgoff_t ilx;
565  	struct folio *folio;
566  
567  	mpol = get_vma_policy(vma, addr, 0, &ilx);
568  	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
569  					&page_allocated, false);
570  	mpol_cond_put(mpol);
571  
572  	if (page_allocated)
573  		swap_read_folio(folio, plug);
574  	return folio;
575  }
576  
__swapin_nr_pages(unsigned long prev_offset,unsigned long offset,int hits,int max_pages,int prev_win)577  static unsigned int __swapin_nr_pages(unsigned long prev_offset,
578  				      unsigned long offset,
579  				      int hits,
580  				      int max_pages,
581  				      int prev_win)
582  {
583  	unsigned int pages, last_ra;
584  
585  	/*
586  	 * This heuristic has been found to work well on both sequential and
587  	 * random loads, swapping to hard disk or to SSD: please don't ask
588  	 * what the "+ 2" means, it just happens to work well, that's all.
589  	 */
590  	pages = hits + 2;
591  	if (pages == 2) {
592  		/*
593  		 * We can have no readahead hits to judge by: but must not get
594  		 * stuck here forever, so check for an adjacent offset instead
595  		 * (and don't even bother to check whether swap type is same).
596  		 */
597  		if (offset != prev_offset + 1 && offset != prev_offset - 1)
598  			pages = 1;
599  	} else {
600  		unsigned int roundup = 4;
601  		while (roundup < pages)
602  			roundup <<= 1;
603  		pages = roundup;
604  	}
605  
606  	if (pages > max_pages)
607  		pages = max_pages;
608  
609  	/* Don't shrink readahead too fast */
610  	last_ra = prev_win / 2;
611  	if (pages < last_ra)
612  		pages = last_ra;
613  
614  	return pages;
615  }
616  
swapin_nr_pages(unsigned long offset)617  static unsigned long swapin_nr_pages(unsigned long offset)
618  {
619  	static unsigned long prev_offset;
620  	unsigned int hits, pages, max_pages;
621  	static atomic_t last_readahead_pages;
622  
623  	max_pages = 1 << READ_ONCE(page_cluster);
624  	if (max_pages <= 1)
625  		return 1;
626  
627  	hits = atomic_xchg(&swapin_readahead_hits, 0);
628  	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
629  				  max_pages,
630  				  atomic_read(&last_readahead_pages));
631  	if (!hits)
632  		WRITE_ONCE(prev_offset, offset);
633  	atomic_set(&last_readahead_pages, pages);
634  
635  	return pages;
636  }
637  
638  /**
639   * swap_cluster_readahead - swap in pages in hope we need them soon
640   * @entry: swap entry of this memory
641   * @gfp_mask: memory allocation flags
642   * @mpol: NUMA memory allocation policy to be applied
643   * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
644   *
645   * Returns the struct folio for entry and addr, after queueing swapin.
646   *
647   * Primitive swap readahead code. We simply read an aligned block of
648   * (1 << page_cluster) entries in the swap area. This method is chosen
649   * because it doesn't cost us any seek time.  We also make sure to queue
650   * the 'original' request together with the readahead ones...
651   *
652   * Note: it is intentional that the same NUMA policy and interleave index
653   * are used for every page of the readahead: neighbouring pages on swap
654   * are fairly likely to have been swapped out from the same node.
655   */
swap_cluster_readahead(swp_entry_t entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t ilx)656  struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
657  				    struct mempolicy *mpol, pgoff_t ilx)
658  {
659  	struct folio *folio;
660  	unsigned long entry_offset = swp_offset(entry);
661  	unsigned long offset = entry_offset;
662  	unsigned long start_offset, end_offset;
663  	unsigned long mask;
664  	struct swap_info_struct *si = swp_swap_info(entry);
665  	struct blk_plug plug;
666  	struct swap_iocb *splug = NULL;
667  	bool page_allocated;
668  
669  	mask = swapin_nr_pages(offset) - 1;
670  	if (!mask)
671  		goto skip;
672  
673  	/* Read a page_cluster sized and aligned cluster around offset. */
674  	start_offset = offset & ~mask;
675  	end_offset = offset | mask;
676  	if (!start_offset)	/* First page is swap header. */
677  		start_offset++;
678  	if (end_offset >= si->max)
679  		end_offset = si->max - 1;
680  
681  	blk_start_plug(&plug);
682  	for (offset = start_offset; offset <= end_offset ; offset++) {
683  		/* Ok, do the async read-ahead now */
684  		folio = __read_swap_cache_async(
685  				swp_entry(swp_type(entry), offset),
686  				gfp_mask, mpol, ilx, &page_allocated, false);
687  		if (!folio)
688  			continue;
689  		if (page_allocated) {
690  			swap_read_folio(folio, &splug);
691  			if (offset != entry_offset) {
692  				folio_set_readahead(folio);
693  				count_vm_event(SWAP_RA);
694  			}
695  		}
696  		folio_put(folio);
697  	}
698  	blk_finish_plug(&plug);
699  	swap_read_unplug(splug);
700  	lru_add_drain();	/* Push any new pages onto the LRU now */
701  skip:
702  	/* The page was likely read above, so no need for plugging here */
703  	folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
704  					&page_allocated, false);
705  	if (unlikely(page_allocated))
706  		swap_read_folio(folio, NULL);
707  	return folio;
708  }
709  
init_swap_address_space(unsigned int type,unsigned long nr_pages)710  int init_swap_address_space(unsigned int type, unsigned long nr_pages)
711  {
712  	struct address_space *spaces, *space;
713  	unsigned int i, nr;
714  
715  	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
716  	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
717  	if (!spaces)
718  		return -ENOMEM;
719  	for (i = 0; i < nr; i++) {
720  		space = spaces + i;
721  		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
722  		atomic_set(&space->i_mmap_writable, 0);
723  		space->a_ops = &swap_aops;
724  		/* swap cache doesn't use writeback related tags */
725  		mapping_set_no_writeback_tags(space);
726  	}
727  	nr_swapper_spaces[type] = nr;
728  	swapper_spaces[type] = spaces;
729  
730  	return 0;
731  }
732  
exit_swap_address_space(unsigned int type)733  void exit_swap_address_space(unsigned int type)
734  {
735  	int i;
736  	struct address_space *spaces = swapper_spaces[type];
737  
738  	for (i = 0; i < nr_swapper_spaces[type]; i++)
739  		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
740  	kvfree(spaces);
741  	nr_swapper_spaces[type] = 0;
742  	swapper_spaces[type] = NULL;
743  }
744  
swap_vma_ra_win(struct vm_fault * vmf,unsigned long * start,unsigned long * end)745  static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
746  			   unsigned long *end)
747  {
748  	struct vm_area_struct *vma = vmf->vma;
749  	unsigned long ra_val;
750  	unsigned long faddr, prev_faddr, left, right;
751  	unsigned int max_win, hits, prev_win, win;
752  
753  	max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
754  	if (max_win == 1)
755  		return 1;
756  
757  	faddr = vmf->address;
758  	ra_val = GET_SWAP_RA_VAL(vma);
759  	prev_faddr = SWAP_RA_ADDR(ra_val);
760  	prev_win = SWAP_RA_WIN(ra_val);
761  	hits = SWAP_RA_HITS(ra_val);
762  	win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
763  				max_win, prev_win);
764  	atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
765  	if (win == 1)
766  		return 1;
767  
768  	if (faddr == prev_faddr + PAGE_SIZE)
769  		left = faddr;
770  	else if (prev_faddr == faddr + PAGE_SIZE)
771  		left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
772  	else
773  		left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
774  	right = left + (win << PAGE_SHIFT);
775  	if ((long)left < 0)
776  		left = 0;
777  	*start = max3(left, vma->vm_start, faddr & PMD_MASK);
778  	*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
779  
780  	return win;
781  }
782  
783  /**
784   * swap_vma_readahead - swap in pages in hope we need them soon
785   * @targ_entry: swap entry of the targeted memory
786   * @gfp_mask: memory allocation flags
787   * @mpol: NUMA memory allocation policy to be applied
788   * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
789   * @vmf: fault information
790   *
791   * Returns the struct folio for entry and addr, after queueing swapin.
792   *
793   * Primitive swap readahead code. We simply read in a few pages whose
794   * virtual addresses are around the fault address in the same vma.
795   *
796   * Caller must hold read mmap_lock if vmf->vma is not NULL.
797   *
798   */
swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf)799  static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
800  		struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
801  {
802  	struct blk_plug plug;
803  	struct swap_iocb *splug = NULL;
804  	struct folio *folio;
805  	pte_t *pte = NULL, pentry;
806  	int win;
807  	unsigned long start, end, addr;
808  	swp_entry_t entry;
809  	pgoff_t ilx;
810  	bool page_allocated;
811  
812  	win = swap_vma_ra_win(vmf, &start, &end);
813  	if (win == 1)
814  		goto skip;
815  
816  	ilx = targ_ilx - PFN_DOWN(vmf->address - start);
817  
818  	blk_start_plug(&plug);
819  	for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
820  		if (!pte++) {
821  			pte = pte_offset_map(vmf->pmd, addr);
822  			if (!pte)
823  				break;
824  		}
825  		pentry = ptep_get_lockless(pte);
826  		if (!is_swap_pte(pentry))
827  			continue;
828  		entry = pte_to_swp_entry(pentry);
829  		if (unlikely(non_swap_entry(entry)))
830  			continue;
831  		pte_unmap(pte);
832  		pte = NULL;
833  		folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
834  						&page_allocated, false);
835  		if (!folio)
836  			continue;
837  		if (page_allocated) {
838  			swap_read_folio(folio, &splug);
839  			if (addr != vmf->address) {
840  				folio_set_readahead(folio);
841  				count_vm_event(SWAP_RA);
842  			}
843  		}
844  		folio_put(folio);
845  	}
846  	if (pte)
847  		pte_unmap(pte);
848  	blk_finish_plug(&plug);
849  	swap_read_unplug(splug);
850  	lru_add_drain();
851  skip:
852  	/* The folio was likely read above, so no need for plugging here */
853  	folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
854  					&page_allocated, false);
855  	if (unlikely(page_allocated))
856  		swap_read_folio(folio, NULL);
857  	return folio;
858  }
859  
860  /**
861   * swapin_readahead - swap in pages in hope we need them soon
862   * @entry: swap entry of this memory
863   * @gfp_mask: memory allocation flags
864   * @vmf: fault information
865   *
866   * Returns the struct folio for entry and addr, after queueing swapin.
867   *
868   * It's a main entry function for swap readahead. By the configuration,
869   * it will read ahead blocks by cluster-based(ie, physical disk based)
870   * or vma-based(ie, virtual address based on faulty address) readahead.
871   */
swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf)872  struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
873  				struct vm_fault *vmf)
874  {
875  	struct mempolicy *mpol;
876  	pgoff_t ilx;
877  	struct folio *folio;
878  
879  	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
880  	folio = swap_use_vma_readahead() ?
881  		swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
882  		swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
883  	mpol_cond_put(mpol);
884  
885  	return folio;
886  }
887  
888  #ifdef CONFIG_SYSFS
vma_ra_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)889  static ssize_t vma_ra_enabled_show(struct kobject *kobj,
890  				     struct kobj_attribute *attr, char *buf)
891  {
892  	return sysfs_emit(buf, "%s\n",
893  			  enable_vma_readahead ? "true" : "false");
894  }
vma_ra_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)895  static ssize_t vma_ra_enabled_store(struct kobject *kobj,
896  				      struct kobj_attribute *attr,
897  				      const char *buf, size_t count)
898  {
899  	ssize_t ret;
900  
901  	ret = kstrtobool(buf, &enable_vma_readahead);
902  	if (ret)
903  		return ret;
904  
905  	return count;
906  }
907  static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
908  
909  static struct attribute *swap_attrs[] = {
910  	&vma_ra_enabled_attr.attr,
911  	NULL,
912  };
913  
914  static const struct attribute_group swap_attr_group = {
915  	.attrs = swap_attrs,
916  };
917  
swap_init_sysfs(void)918  static int __init swap_init_sysfs(void)
919  {
920  	int err;
921  	struct kobject *swap_kobj;
922  
923  	swap_kobj = kobject_create_and_add("swap", mm_kobj);
924  	if (!swap_kobj) {
925  		pr_err("failed to create swap kobject\n");
926  		return -ENOMEM;
927  	}
928  	err = sysfs_create_group(swap_kobj, &swap_attr_group);
929  	if (err) {
930  		pr_err("failed to register swap group\n");
931  		goto delete_obj;
932  	}
933  	return 0;
934  
935  delete_obj:
936  	kobject_put(swap_kobj);
937  	return err;
938  }
939  subsys_initcall(swap_init_sysfs);
940  #endif
941