1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/pfn_t.h>
39 #include <linux/page_idle.h>
40 #include <linux/page_owner.h>
41 #include <linux/sched/mm.h>
42 #include <linux/ptrace.h>
43 #include <linux/memory.h>
44 #include <linux/sched/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/pagewalk.h>
47
48 #include <asm/tlbflush.h>
49
50 #include <trace/events/migrate.h>
51
52 #include "internal.h"
53
isolate_movable_page(struct page * page,isolate_mode_t mode)54 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
55 {
56 struct folio *folio = folio_get_nontail_page(page);
57 const struct movable_operations *mops;
58
59 /*
60 * Avoid burning cycles with pages that are yet under __free_pages(),
61 * or just got freed under us.
62 *
63 * In case we 'win' a race for a movable page being freed under us and
64 * raise its refcount preventing __free_pages() from doing its job
65 * the put_page() at the end of this block will take care of
66 * release this page, thus avoiding a nasty leakage.
67 */
68 if (!folio)
69 goto out;
70
71 if (unlikely(folio_test_slab(folio)))
72 goto out_putfolio;
73 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
74 smp_rmb();
75 /*
76 * Check movable flag before taking the page lock because
77 * we use non-atomic bitops on newly allocated page flags so
78 * unconditionally grabbing the lock ruins page's owner side.
79 */
80 if (unlikely(!__folio_test_movable(folio)))
81 goto out_putfolio;
82 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
83 smp_rmb();
84 if (unlikely(folio_test_slab(folio)))
85 goto out_putfolio;
86
87 /*
88 * As movable pages are not isolated from LRU lists, concurrent
89 * compaction threads can race against page migration functions
90 * as well as race against the releasing a page.
91 *
92 * In order to avoid having an already isolated movable page
93 * being (wrongly) re-isolated while it is under migration,
94 * or to avoid attempting to isolate pages being released,
95 * lets be sure we have the page lock
96 * before proceeding with the movable page isolation steps.
97 */
98 if (unlikely(!folio_trylock(folio)))
99 goto out_putfolio;
100
101 if (!folio_test_movable(folio) || folio_test_isolated(folio))
102 goto out_no_isolated;
103
104 mops = folio_movable_ops(folio);
105 VM_BUG_ON_FOLIO(!mops, folio);
106
107 if (!mops->isolate_page(&folio->page, mode))
108 goto out_no_isolated;
109
110 /* Driver shouldn't use the isolated flag */
111 WARN_ON_ONCE(folio_test_isolated(folio));
112 folio_set_isolated(folio);
113 folio_unlock(folio);
114
115 return true;
116
117 out_no_isolated:
118 folio_unlock(folio);
119 out_putfolio:
120 folio_put(folio);
121 out:
122 return false;
123 }
124
putback_movable_folio(struct folio * folio)125 static void putback_movable_folio(struct folio *folio)
126 {
127 const struct movable_operations *mops = folio_movable_ops(folio);
128
129 mops->putback_page(&folio->page);
130 folio_clear_isolated(folio);
131 }
132
133 /*
134 * Put previously isolated pages back onto the appropriate lists
135 * from where they were once taken off for compaction/migration.
136 *
137 * This function shall be used whenever the isolated pageset has been
138 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
139 * and isolate_hugetlb().
140 */
putback_movable_pages(struct list_head * l)141 void putback_movable_pages(struct list_head *l)
142 {
143 struct folio *folio;
144 struct folio *folio2;
145
146 list_for_each_entry_safe(folio, folio2, l, lru) {
147 if (unlikely(folio_test_hugetlb(folio))) {
148 folio_putback_active_hugetlb(folio);
149 continue;
150 }
151 list_del(&folio->lru);
152 /*
153 * We isolated non-lru movable folio so here we can use
154 * __folio_test_movable because LRU folio's mapping cannot
155 * have PAGE_MAPPING_MOVABLE.
156 */
157 if (unlikely(__folio_test_movable(folio))) {
158 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
159 folio_lock(folio);
160 if (folio_test_movable(folio))
161 putback_movable_folio(folio);
162 else
163 folio_clear_isolated(folio);
164 folio_unlock(folio);
165 folio_put(folio);
166 } else {
167 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
168 folio_is_file_lru(folio), -folio_nr_pages(folio));
169 folio_putback_lru(folio);
170 }
171 }
172 }
173
174 /* Must be called with an elevated refcount on the non-hugetlb folio */
isolate_folio_to_list(struct folio * folio,struct list_head * list)175 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
176 {
177 bool isolated, lru;
178
179 if (folio_test_hugetlb(folio))
180 return isolate_hugetlb(folio, list);
181
182 lru = !__folio_test_movable(folio);
183 if (lru)
184 isolated = folio_isolate_lru(folio);
185 else
186 isolated = isolate_movable_page(&folio->page,
187 ISOLATE_UNEVICTABLE);
188
189 if (!isolated)
190 return false;
191
192 list_add(&folio->lru, list);
193 if (lru)
194 node_stat_add_folio(folio, NR_ISOLATED_ANON +
195 folio_is_file_lru(folio));
196
197 return true;
198 }
199
try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,unsigned long idx)200 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
201 struct folio *folio,
202 unsigned long idx)
203 {
204 struct page *page = folio_page(folio, idx);
205 bool contains_data;
206 pte_t newpte;
207 void *addr;
208
209 if (PageCompound(page))
210 return false;
211 VM_BUG_ON_PAGE(!PageAnon(page), page);
212 VM_BUG_ON_PAGE(!PageLocked(page), page);
213 VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
214
215 if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
216 mm_forbids_zeropage(pvmw->vma->vm_mm))
217 return false;
218
219 /*
220 * The pmd entry mapping the old thp was flushed and the pte mapping
221 * this subpage has been non present. If the subpage is only zero-filled
222 * then map it to the shared zeropage.
223 */
224 addr = kmap_local_page(page);
225 contains_data = memchr_inv(addr, 0, PAGE_SIZE);
226 kunmap_local(addr);
227
228 if (contains_data)
229 return false;
230
231 newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
232 pvmw->vma->vm_page_prot));
233 set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
234
235 dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
236 return true;
237 }
238
239 struct rmap_walk_arg {
240 struct folio *folio;
241 bool map_unused_to_zeropage;
242 };
243
244 /*
245 * Restore a potential migration pte to a working pte entry
246 */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)247 static bool remove_migration_pte(struct folio *folio,
248 struct vm_area_struct *vma, unsigned long addr, void *arg)
249 {
250 struct rmap_walk_arg *rmap_walk_arg = arg;
251 DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
252
253 while (page_vma_mapped_walk(&pvmw)) {
254 rmap_t rmap_flags = RMAP_NONE;
255 pte_t old_pte;
256 pte_t pte;
257 swp_entry_t entry;
258 struct page *new;
259 unsigned long idx = 0;
260
261 /* pgoff is invalid for ksm pages, but they are never large */
262 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
263 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
264 new = folio_page(folio, idx);
265
266 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
267 /* PMD-mapped THP migration entry */
268 if (!pvmw.pte) {
269 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
270 !folio_test_pmd_mappable(folio), folio);
271 remove_migration_pmd(&pvmw, new);
272 continue;
273 }
274 #endif
275 if (rmap_walk_arg->map_unused_to_zeropage &&
276 try_to_map_unused_to_zeropage(&pvmw, folio, idx))
277 continue;
278
279 folio_get(folio);
280 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
281 old_pte = ptep_get(pvmw.pte);
282
283 entry = pte_to_swp_entry(old_pte);
284 if (!is_migration_entry_young(entry))
285 pte = pte_mkold(pte);
286 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
287 pte = pte_mkdirty(pte);
288 if (pte_swp_soft_dirty(old_pte))
289 pte = pte_mksoft_dirty(pte);
290 else
291 pte = pte_clear_soft_dirty(pte);
292
293 if (is_writable_migration_entry(entry))
294 pte = pte_mkwrite(pte, vma);
295 else if (pte_swp_uffd_wp(old_pte))
296 pte = pte_mkuffd_wp(pte);
297
298 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
299 rmap_flags |= RMAP_EXCLUSIVE;
300
301 if (unlikely(is_device_private_page(new))) {
302 if (pte_write(pte))
303 entry = make_writable_device_private_entry(
304 page_to_pfn(new));
305 else
306 entry = make_readable_device_private_entry(
307 page_to_pfn(new));
308 pte = swp_entry_to_pte(entry);
309 if (pte_swp_soft_dirty(old_pte))
310 pte = pte_swp_mksoft_dirty(pte);
311 if (pte_swp_uffd_wp(old_pte))
312 pte = pte_swp_mkuffd_wp(pte);
313 }
314
315 #ifdef CONFIG_HUGETLB_PAGE
316 if (folio_test_hugetlb(folio)) {
317 struct hstate *h = hstate_vma(vma);
318 unsigned int shift = huge_page_shift(h);
319 unsigned long psize = huge_page_size(h);
320
321 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
322 if (folio_test_anon(folio))
323 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
324 rmap_flags);
325 else
326 hugetlb_add_file_rmap(folio);
327 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
328 psize);
329 } else
330 #endif
331 {
332 if (folio_test_anon(folio))
333 folio_add_anon_rmap_pte(folio, new, vma,
334 pvmw.address, rmap_flags);
335 else
336 folio_add_file_rmap_pte(folio, new, vma);
337 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
338 }
339 if (vma->vm_flags & VM_LOCKED)
340 mlock_drain_local();
341
342 trace_remove_migration_pte(pvmw.address, pte_val(pte),
343 compound_order(new));
344
345 /* No need to invalidate - it was non-present before */
346 update_mmu_cache(vma, pvmw.address, pvmw.pte);
347 }
348
349 return true;
350 }
351
352 /*
353 * Get rid of all migration entries and replace them by
354 * references to the indicated page.
355 */
remove_migration_ptes(struct folio * src,struct folio * dst,int flags)356 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
357 {
358 struct rmap_walk_arg rmap_walk_arg = {
359 .folio = src,
360 .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
361 };
362
363 struct rmap_walk_control rwc = {
364 .rmap_one = remove_migration_pte,
365 .arg = &rmap_walk_arg,
366 };
367
368 VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
369
370 if (flags & RMP_LOCKED)
371 rmap_walk_locked(dst, &rwc);
372 else
373 rmap_walk(dst, &rwc);
374 }
375
376 /*
377 * Something used the pte of a page under migration. We need to
378 * get to the page and wait until migration is finished.
379 * When we return from this function the fault will be retried.
380 */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)381 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
382 unsigned long address)
383 {
384 spinlock_t *ptl;
385 pte_t *ptep;
386 pte_t pte;
387 swp_entry_t entry;
388
389 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
390 if (!ptep)
391 return;
392
393 pte = ptep_get(ptep);
394 pte_unmap(ptep);
395
396 if (!is_swap_pte(pte))
397 goto out;
398
399 entry = pte_to_swp_entry(pte);
400 if (!is_migration_entry(entry))
401 goto out;
402
403 migration_entry_wait_on_locked(entry, ptl);
404 return;
405 out:
406 spin_unlock(ptl);
407 }
408
409 #ifdef CONFIG_HUGETLB_PAGE
410 /*
411 * The vma read lock must be held upon entry. Holding that lock prevents either
412 * the pte or the ptl from being freed.
413 *
414 * This function will release the vma lock before returning.
415 */
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)416 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
417 {
418 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
419 pte_t pte;
420
421 hugetlb_vma_assert_locked(vma);
422 spin_lock(ptl);
423 pte = huge_ptep_get(vma->vm_mm, addr, ptep);
424
425 if (unlikely(!is_hugetlb_entry_migration(pte))) {
426 spin_unlock(ptl);
427 hugetlb_vma_unlock_read(vma);
428 } else {
429 /*
430 * If migration entry existed, safe to release vma lock
431 * here because the pgtable page won't be freed without the
432 * pgtable lock released. See comment right above pgtable
433 * lock release in migration_entry_wait_on_locked().
434 */
435 hugetlb_vma_unlock_read(vma);
436 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
437 }
438 }
439 #endif
440
441 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)442 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
443 {
444 spinlock_t *ptl;
445
446 ptl = pmd_lock(mm, pmd);
447 if (!is_pmd_migration_entry(*pmd))
448 goto unlock;
449 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
450 return;
451 unlock:
452 spin_unlock(ptl);
453 }
454 #endif
455
folio_expected_refs(struct address_space * mapping,struct folio * folio)456 static int folio_expected_refs(struct address_space *mapping,
457 struct folio *folio)
458 {
459 int refs = 1;
460 if (!mapping)
461 return refs;
462
463 refs += folio_nr_pages(folio);
464 if (folio_test_private(folio))
465 refs++;
466
467 return refs;
468 }
469
470 /*
471 * Replace the folio in the mapping.
472 *
473 * The number of remaining references must be:
474 * 1 for anonymous folios without a mapping
475 * 2 for folios with a mapping
476 * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
477 */
__folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count)478 static int __folio_migrate_mapping(struct address_space *mapping,
479 struct folio *newfolio, struct folio *folio, int expected_count)
480 {
481 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
482 struct zone *oldzone, *newzone;
483 int dirty;
484 long nr = folio_nr_pages(folio);
485 long entries, i;
486
487 if (!mapping) {
488 /* Take off deferred split queue while frozen and memcg set */
489 if (folio_test_large(folio) &&
490 folio_test_large_rmappable(folio)) {
491 if (!folio_ref_freeze(folio, expected_count))
492 return -EAGAIN;
493 folio_unqueue_deferred_split(folio);
494 folio_ref_unfreeze(folio, expected_count);
495 }
496
497 /* No turning back from here */
498 newfolio->index = folio->index;
499 newfolio->mapping = folio->mapping;
500 if (folio_test_anon(folio) && folio_test_large(folio))
501 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
502 if (folio_test_swapbacked(folio))
503 __folio_set_swapbacked(newfolio);
504
505 return MIGRATEPAGE_SUCCESS;
506 }
507
508 oldzone = folio_zone(folio);
509 newzone = folio_zone(newfolio);
510
511 xas_lock_irq(&xas);
512 if (!folio_ref_freeze(folio, expected_count)) {
513 xas_unlock_irq(&xas);
514 return -EAGAIN;
515 }
516
517 /* Take off deferred split queue while frozen and memcg set */
518 folio_unqueue_deferred_split(folio);
519
520 /*
521 * Now we know that no one else is looking at the folio:
522 * no turning back from here.
523 */
524 newfolio->index = folio->index;
525 newfolio->mapping = folio->mapping;
526 if (folio_test_anon(folio) && folio_test_large(folio))
527 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
528 folio_ref_add(newfolio, nr); /* add cache reference */
529 if (folio_test_swapbacked(folio)) {
530 __folio_set_swapbacked(newfolio);
531 if (folio_test_swapcache(folio)) {
532 folio_set_swapcache(newfolio);
533 newfolio->private = folio_get_private(folio);
534 }
535 entries = nr;
536 } else {
537 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
538 entries = 1;
539 }
540
541 /* Move dirty while folio refs frozen and newfolio not yet exposed */
542 dirty = folio_test_dirty(folio);
543 if (dirty) {
544 folio_clear_dirty(folio);
545 folio_set_dirty(newfolio);
546 }
547
548 /* Swap cache still stores N entries instead of a high-order entry */
549 for (i = 0; i < entries; i++) {
550 xas_store(&xas, newfolio);
551 xas_next(&xas);
552 }
553
554 /*
555 * Drop cache reference from old folio by unfreezing
556 * to one less reference.
557 * We know this isn't the last reference.
558 */
559 folio_ref_unfreeze(folio, expected_count - nr);
560
561 xas_unlock(&xas);
562 /* Leave irq disabled to prevent preemption while updating stats */
563
564 /*
565 * If moved to a different zone then also account
566 * the folio for that zone. Other VM counters will be
567 * taken care of when we establish references to the
568 * new folio and drop references to the old folio.
569 *
570 * Note that anonymous folios are accounted for
571 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
572 * are mapped to swap space.
573 */
574 if (newzone != oldzone) {
575 struct lruvec *old_lruvec, *new_lruvec;
576 struct mem_cgroup *memcg;
577
578 memcg = folio_memcg(folio);
579 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
580 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
581
582 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
583 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
584 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
585 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
586 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
587
588 if (folio_test_pmd_mappable(folio)) {
589 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
590 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
591 }
592 }
593 #ifdef CONFIG_SWAP
594 if (folio_test_swapcache(folio)) {
595 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
596 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
597 }
598 #endif
599 if (dirty && mapping_can_writeback(mapping)) {
600 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
601 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
602 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
603 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
604 }
605 }
606 local_irq_enable();
607
608 return MIGRATEPAGE_SUCCESS;
609 }
610
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)611 int folio_migrate_mapping(struct address_space *mapping,
612 struct folio *newfolio, struct folio *folio, int extra_count)
613 {
614 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
615
616 if (folio_ref_count(folio) != expected_count)
617 return -EAGAIN;
618
619 return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
620 }
621 EXPORT_SYMBOL(folio_migrate_mapping);
622
623 /*
624 * The expected number of remaining references is the same as that
625 * of folio_migrate_mapping().
626 */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)627 int migrate_huge_page_move_mapping(struct address_space *mapping,
628 struct folio *dst, struct folio *src)
629 {
630 XA_STATE(xas, &mapping->i_pages, folio_index(src));
631 int rc, expected_count = folio_expected_refs(mapping, src);
632
633 if (folio_ref_count(src) != expected_count)
634 return -EAGAIN;
635
636 rc = folio_mc_copy(dst, src);
637 if (unlikely(rc))
638 return rc;
639
640 xas_lock_irq(&xas);
641 if (!folio_ref_freeze(src, expected_count)) {
642 xas_unlock_irq(&xas);
643 return -EAGAIN;
644 }
645
646 dst->index = src->index;
647 dst->mapping = src->mapping;
648
649 folio_ref_add(dst, folio_nr_pages(dst));
650
651 xas_store(&xas, dst);
652
653 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
654
655 xas_unlock_irq(&xas);
656
657 return MIGRATEPAGE_SUCCESS;
658 }
659
660 /*
661 * Copy the flags and some other ancillary information
662 */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)663 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
664 {
665 int cpupid;
666
667 if (folio_test_referenced(folio))
668 folio_set_referenced(newfolio);
669 if (folio_test_uptodate(folio))
670 folio_mark_uptodate(newfolio);
671 if (folio_test_clear_active(folio)) {
672 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
673 folio_set_active(newfolio);
674 } else if (folio_test_clear_unevictable(folio))
675 folio_set_unevictable(newfolio);
676 if (folio_test_workingset(folio))
677 folio_set_workingset(newfolio);
678 if (folio_test_checked(folio))
679 folio_set_checked(newfolio);
680 /*
681 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
682 * migration entries. We can still have PG_anon_exclusive set on an
683 * effectively unmapped and unreferenced first sub-pages of an
684 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
685 */
686 if (folio_test_mappedtodisk(folio))
687 folio_set_mappedtodisk(newfolio);
688
689 /* Move dirty on pages not done by folio_migrate_mapping() */
690 if (folio_test_dirty(folio))
691 folio_set_dirty(newfolio);
692
693 if (folio_test_young(folio))
694 folio_set_young(newfolio);
695 if (folio_test_idle(folio))
696 folio_set_idle(newfolio);
697
698 /*
699 * Copy NUMA information to the new page, to prevent over-eager
700 * future migrations of this same page.
701 */
702 cpupid = folio_xchg_last_cpupid(folio, -1);
703 /*
704 * For memory tiering mode, when migrate between slow and fast
705 * memory node, reset cpupid, because that is used to record
706 * page access time in slow memory node.
707 */
708 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
709 bool f_toptier = node_is_toptier(folio_nid(folio));
710 bool t_toptier = node_is_toptier(folio_nid(newfolio));
711
712 if (f_toptier != t_toptier)
713 cpupid = -1;
714 }
715 folio_xchg_last_cpupid(newfolio, cpupid);
716
717 folio_migrate_ksm(newfolio, folio);
718 /*
719 * Please do not reorder this without considering how mm/ksm.c's
720 * ksm_get_folio() depends upon ksm_migrate_page() and the
721 * swapcache flag.
722 */
723 if (folio_test_swapcache(folio))
724 folio_clear_swapcache(folio);
725 folio_clear_private(folio);
726
727 /* page->private contains hugetlb specific flags */
728 if (!folio_test_hugetlb(folio))
729 folio->private = NULL;
730
731 /*
732 * If any waiters have accumulated on the new page then
733 * wake them up.
734 */
735 if (folio_test_writeback(newfolio))
736 folio_end_writeback(newfolio);
737
738 /*
739 * PG_readahead shares the same bit with PG_reclaim. The above
740 * end_page_writeback() may clear PG_readahead mistakenly, so set the
741 * bit after that.
742 */
743 if (folio_test_readahead(folio))
744 folio_set_readahead(newfolio);
745
746 folio_copy_owner(newfolio, folio);
747 pgalloc_tag_copy(newfolio, folio);
748
749 mem_cgroup_migrate(folio, newfolio);
750 }
751 EXPORT_SYMBOL(folio_migrate_flags);
752
753 /************************************************************
754 * Migration functions
755 ***********************************************************/
756
__migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,void * src_private,enum migrate_mode mode)757 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
758 struct folio *src, void *src_private,
759 enum migrate_mode mode)
760 {
761 int rc, expected_count = folio_expected_refs(mapping, src);
762
763 /* Check whether src does not have extra refs before we do more work */
764 if (folio_ref_count(src) != expected_count)
765 return -EAGAIN;
766
767 rc = folio_mc_copy(dst, src);
768 if (unlikely(rc))
769 return rc;
770
771 rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
772 if (rc != MIGRATEPAGE_SUCCESS)
773 return rc;
774
775 if (src_private)
776 folio_attach_private(dst, folio_detach_private(src));
777
778 folio_migrate_flags(dst, src);
779 return MIGRATEPAGE_SUCCESS;
780 }
781
782 /**
783 * migrate_folio() - Simple folio migration.
784 * @mapping: The address_space containing the folio.
785 * @dst: The folio to migrate the data to.
786 * @src: The folio containing the current data.
787 * @mode: How to migrate the page.
788 *
789 * Common logic to directly migrate a single LRU folio suitable for
790 * folios that do not use PagePrivate/PagePrivate2.
791 *
792 * Folios are locked upon entry and exit.
793 */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)794 int migrate_folio(struct address_space *mapping, struct folio *dst,
795 struct folio *src, enum migrate_mode mode)
796 {
797 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
798 return __migrate_folio(mapping, dst, src, NULL, mode);
799 }
800 EXPORT_SYMBOL(migrate_folio);
801
802 #ifdef CONFIG_BUFFER_HEAD
803 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)804 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
805 enum migrate_mode mode)
806 {
807 struct buffer_head *bh = head;
808 struct buffer_head *failed_bh;
809
810 do {
811 if (!trylock_buffer(bh)) {
812 if (mode == MIGRATE_ASYNC)
813 goto unlock;
814 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
815 goto unlock;
816 lock_buffer(bh);
817 }
818
819 bh = bh->b_this_page;
820 } while (bh != head);
821
822 return true;
823
824 unlock:
825 /* We failed to lock the buffer and cannot stall. */
826 failed_bh = bh;
827 bh = head;
828 while (bh != failed_bh) {
829 unlock_buffer(bh);
830 bh = bh->b_this_page;
831 }
832
833 return false;
834 }
835
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)836 static int __buffer_migrate_folio(struct address_space *mapping,
837 struct folio *dst, struct folio *src, enum migrate_mode mode,
838 bool check_refs)
839 {
840 struct buffer_head *bh, *head;
841 int rc;
842 int expected_count;
843
844 head = folio_buffers(src);
845 if (!head)
846 return migrate_folio(mapping, dst, src, mode);
847
848 /* Check whether page does not have extra refs before we do more work */
849 expected_count = folio_expected_refs(mapping, src);
850 if (folio_ref_count(src) != expected_count)
851 return -EAGAIN;
852
853 if (!buffer_migrate_lock_buffers(head, mode))
854 return -EAGAIN;
855
856 if (check_refs) {
857 bool busy;
858 bool invalidated = false;
859
860 recheck_buffers:
861 busy = false;
862 spin_lock(&mapping->i_private_lock);
863 bh = head;
864 do {
865 if (atomic_read(&bh->b_count)) {
866 busy = true;
867 break;
868 }
869 bh = bh->b_this_page;
870 } while (bh != head);
871 if (busy) {
872 if (invalidated) {
873 rc = -EAGAIN;
874 goto unlock_buffers;
875 }
876 spin_unlock(&mapping->i_private_lock);
877 invalidate_bh_lrus();
878 invalidated = true;
879 goto recheck_buffers;
880 }
881 }
882
883 rc = filemap_migrate_folio(mapping, dst, src, mode);
884 if (rc != MIGRATEPAGE_SUCCESS)
885 goto unlock_buffers;
886
887 bh = head;
888 do {
889 folio_set_bh(bh, dst, bh_offset(bh));
890 bh = bh->b_this_page;
891 } while (bh != head);
892
893 unlock_buffers:
894 if (check_refs)
895 spin_unlock(&mapping->i_private_lock);
896 bh = head;
897 do {
898 unlock_buffer(bh);
899 bh = bh->b_this_page;
900 } while (bh != head);
901
902 return rc;
903 }
904
905 /**
906 * buffer_migrate_folio() - Migration function for folios with buffers.
907 * @mapping: The address space containing @src.
908 * @dst: The folio to migrate to.
909 * @src: The folio to migrate from.
910 * @mode: How to migrate the folio.
911 *
912 * This function can only be used if the underlying filesystem guarantees
913 * that no other references to @src exist. For example attached buffer
914 * heads are accessed only under the folio lock. If your filesystem cannot
915 * provide this guarantee, buffer_migrate_folio_norefs() may be more
916 * appropriate.
917 *
918 * Return: 0 on success or a negative errno on failure.
919 */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)920 int buffer_migrate_folio(struct address_space *mapping,
921 struct folio *dst, struct folio *src, enum migrate_mode mode)
922 {
923 return __buffer_migrate_folio(mapping, dst, src, mode, false);
924 }
925 EXPORT_SYMBOL(buffer_migrate_folio);
926
927 /**
928 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
929 * @mapping: The address space containing @src.
930 * @dst: The folio to migrate to.
931 * @src: The folio to migrate from.
932 * @mode: How to migrate the folio.
933 *
934 * Like buffer_migrate_folio() except that this variant is more careful
935 * and checks that there are also no buffer head references. This function
936 * is the right one for mappings where buffer heads are directly looked
937 * up and referenced (such as block device mappings).
938 *
939 * Return: 0 on success or a negative errno on failure.
940 */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)941 int buffer_migrate_folio_norefs(struct address_space *mapping,
942 struct folio *dst, struct folio *src, enum migrate_mode mode)
943 {
944 return __buffer_migrate_folio(mapping, dst, src, mode, true);
945 }
946 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
947 #endif /* CONFIG_BUFFER_HEAD */
948
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)949 int filemap_migrate_folio(struct address_space *mapping,
950 struct folio *dst, struct folio *src, enum migrate_mode mode)
951 {
952 return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
953 }
954 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
955
956 /*
957 * Writeback a folio to clean the dirty state
958 */
writeout(struct address_space * mapping,struct folio * folio)959 static int writeout(struct address_space *mapping, struct folio *folio)
960 {
961 struct writeback_control wbc = {
962 .sync_mode = WB_SYNC_NONE,
963 .nr_to_write = 1,
964 .range_start = 0,
965 .range_end = LLONG_MAX,
966 .for_reclaim = 1
967 };
968 int rc;
969
970 if (!mapping->a_ops->writepage)
971 /* No write method for the address space */
972 return -EINVAL;
973
974 if (!folio_clear_dirty_for_io(folio))
975 /* Someone else already triggered a write */
976 return -EAGAIN;
977
978 /*
979 * A dirty folio may imply that the underlying filesystem has
980 * the folio on some queue. So the folio must be clean for
981 * migration. Writeout may mean we lose the lock and the
982 * folio state is no longer what we checked for earlier.
983 * At this point we know that the migration attempt cannot
984 * be successful.
985 */
986 remove_migration_ptes(folio, folio, 0);
987
988 rc = mapping->a_ops->writepage(&folio->page, &wbc);
989
990 if (rc != AOP_WRITEPAGE_ACTIVATE)
991 /* unlocked. Relock */
992 folio_lock(folio);
993
994 return (rc < 0) ? -EIO : -EAGAIN;
995 }
996
997 /*
998 * Default handling if a filesystem does not provide a migration function.
999 */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)1000 static int fallback_migrate_folio(struct address_space *mapping,
1001 struct folio *dst, struct folio *src, enum migrate_mode mode)
1002 {
1003 if (folio_test_dirty(src)) {
1004 /* Only writeback folios in full synchronous migration */
1005 switch (mode) {
1006 case MIGRATE_SYNC:
1007 break;
1008 default:
1009 return -EBUSY;
1010 }
1011 return writeout(mapping, src);
1012 }
1013
1014 /*
1015 * Buffers may be managed in a filesystem specific way.
1016 * We must have no buffers or drop them.
1017 */
1018 if (!filemap_release_folio(src, GFP_KERNEL))
1019 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1020
1021 return migrate_folio(mapping, dst, src, mode);
1022 }
1023
1024 /*
1025 * Move a page to a newly allocated page
1026 * The page is locked and all ptes have been successfully removed.
1027 *
1028 * The new page will have replaced the old page if this function
1029 * is successful.
1030 *
1031 * Return value:
1032 * < 0 - error code
1033 * MIGRATEPAGE_SUCCESS - success
1034 */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)1035 static int move_to_new_folio(struct folio *dst, struct folio *src,
1036 enum migrate_mode mode)
1037 {
1038 int rc = -EAGAIN;
1039 bool is_lru = !__folio_test_movable(src);
1040
1041 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1042 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1043
1044 if (likely(is_lru)) {
1045 struct address_space *mapping = folio_mapping(src);
1046
1047 if (!mapping)
1048 rc = migrate_folio(mapping, dst, src, mode);
1049 else if (mapping_inaccessible(mapping))
1050 rc = -EOPNOTSUPP;
1051 else if (mapping->a_ops->migrate_folio)
1052 /*
1053 * Most folios have a mapping and most filesystems
1054 * provide a migrate_folio callback. Anonymous folios
1055 * are part of swap space which also has its own
1056 * migrate_folio callback. This is the most common path
1057 * for page migration.
1058 */
1059 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1060 mode);
1061 else
1062 rc = fallback_migrate_folio(mapping, dst, src, mode);
1063 } else {
1064 const struct movable_operations *mops;
1065
1066 /*
1067 * In case of non-lru page, it could be released after
1068 * isolation step. In that case, we shouldn't try migration.
1069 */
1070 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1071 if (!folio_test_movable(src)) {
1072 rc = MIGRATEPAGE_SUCCESS;
1073 folio_clear_isolated(src);
1074 goto out;
1075 }
1076
1077 mops = folio_movable_ops(src);
1078 rc = mops->migrate_page(&dst->page, &src->page, mode);
1079 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1080 !folio_test_isolated(src));
1081 }
1082
1083 /*
1084 * When successful, old pagecache src->mapping must be cleared before
1085 * src is freed; but stats require that PageAnon be left as PageAnon.
1086 */
1087 if (rc == MIGRATEPAGE_SUCCESS) {
1088 if (__folio_test_movable(src)) {
1089 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1090
1091 /*
1092 * We clear PG_movable under page_lock so any compactor
1093 * cannot try to migrate this page.
1094 */
1095 folio_clear_isolated(src);
1096 }
1097
1098 /*
1099 * Anonymous and movable src->mapping will be cleared by
1100 * free_pages_prepare so don't reset it here for keeping
1101 * the type to work PageAnon, for example.
1102 */
1103 if (!folio_mapping_flags(src))
1104 src->mapping = NULL;
1105
1106 if (likely(!folio_is_zone_device(dst)))
1107 flush_dcache_folio(dst);
1108 }
1109 out:
1110 return rc;
1111 }
1112
1113 /*
1114 * To record some information during migration, we use unused private
1115 * field of struct folio of the newly allocated destination folio.
1116 * This is safe because nobody is using it except us.
1117 */
1118 enum {
1119 PAGE_WAS_MAPPED = BIT(0),
1120 PAGE_WAS_MLOCKED = BIT(1),
1121 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1122 };
1123
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1124 static void __migrate_folio_record(struct folio *dst,
1125 int old_page_state,
1126 struct anon_vma *anon_vma)
1127 {
1128 dst->private = (void *)anon_vma + old_page_state;
1129 }
1130
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1131 static void __migrate_folio_extract(struct folio *dst,
1132 int *old_page_state,
1133 struct anon_vma **anon_vmap)
1134 {
1135 unsigned long private = (unsigned long)dst->private;
1136
1137 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1138 *old_page_state = private & PAGE_OLD_STATES;
1139 dst->private = NULL;
1140 }
1141
1142 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1143 static void migrate_folio_undo_src(struct folio *src,
1144 int page_was_mapped,
1145 struct anon_vma *anon_vma,
1146 bool locked,
1147 struct list_head *ret)
1148 {
1149 if (page_was_mapped)
1150 remove_migration_ptes(src, src, 0);
1151 /* Drop an anon_vma reference if we took one */
1152 if (anon_vma)
1153 put_anon_vma(anon_vma);
1154 if (locked)
1155 folio_unlock(src);
1156 if (ret)
1157 list_move_tail(&src->lru, ret);
1158 }
1159
1160 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1161 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1162 free_folio_t put_new_folio, unsigned long private)
1163 {
1164 if (locked)
1165 folio_unlock(dst);
1166 if (put_new_folio)
1167 put_new_folio(dst, private);
1168 else
1169 folio_put(dst);
1170 }
1171
1172 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1173 static void migrate_folio_done(struct folio *src,
1174 enum migrate_reason reason)
1175 {
1176 /*
1177 * Compaction can migrate also non-LRU pages which are
1178 * not accounted to NR_ISOLATED_*. They can be recognized
1179 * as __folio_test_movable
1180 */
1181 if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1182 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1183 folio_is_file_lru(src), -folio_nr_pages(src));
1184
1185 if (reason != MR_MEMORY_FAILURE)
1186 /* We release the page in page_handle_poison. */
1187 folio_put(src);
1188 }
1189
1190 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1191 static int migrate_folio_unmap(new_folio_t get_new_folio,
1192 free_folio_t put_new_folio, unsigned long private,
1193 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1194 enum migrate_reason reason, struct list_head *ret)
1195 {
1196 struct folio *dst;
1197 int rc = -EAGAIN;
1198 int old_page_state = 0;
1199 struct anon_vma *anon_vma = NULL;
1200 bool is_lru = data_race(!__folio_test_movable(src));
1201 bool locked = false;
1202 bool dst_locked = false;
1203
1204 if (folio_ref_count(src) == 1) {
1205 /* Folio was freed from under us. So we are done. */
1206 folio_clear_active(src);
1207 folio_clear_unevictable(src);
1208 /* free_pages_prepare() will clear PG_isolated. */
1209 list_del(&src->lru);
1210 migrate_folio_done(src, reason);
1211 return MIGRATEPAGE_SUCCESS;
1212 }
1213
1214 dst = get_new_folio(src, private);
1215 if (!dst)
1216 return -ENOMEM;
1217 *dstp = dst;
1218
1219 dst->private = NULL;
1220
1221 if (!folio_trylock(src)) {
1222 if (mode == MIGRATE_ASYNC)
1223 goto out;
1224
1225 /*
1226 * It's not safe for direct compaction to call lock_page.
1227 * For example, during page readahead pages are added locked
1228 * to the LRU. Later, when the IO completes the pages are
1229 * marked uptodate and unlocked. However, the queueing
1230 * could be merging multiple pages for one bio (e.g.
1231 * mpage_readahead). If an allocation happens for the
1232 * second or third page, the process can end up locking
1233 * the same page twice and deadlocking. Rather than
1234 * trying to be clever about what pages can be locked,
1235 * avoid the use of lock_page for direct compaction
1236 * altogether.
1237 */
1238 if (current->flags & PF_MEMALLOC)
1239 goto out;
1240
1241 /*
1242 * In "light" mode, we can wait for transient locks (eg
1243 * inserting a page into the page table), but it's not
1244 * worth waiting for I/O.
1245 */
1246 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1247 goto out;
1248
1249 folio_lock(src);
1250 }
1251 locked = true;
1252 if (folio_test_mlocked(src))
1253 old_page_state |= PAGE_WAS_MLOCKED;
1254
1255 if (folio_test_writeback(src)) {
1256 /*
1257 * Only in the case of a full synchronous migration is it
1258 * necessary to wait for PageWriteback. In the async case,
1259 * the retry loop is too short and in the sync-light case,
1260 * the overhead of stalling is too much
1261 */
1262 switch (mode) {
1263 case MIGRATE_SYNC:
1264 break;
1265 default:
1266 rc = -EBUSY;
1267 goto out;
1268 }
1269 folio_wait_writeback(src);
1270 }
1271
1272 /*
1273 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1274 * we cannot notice that anon_vma is freed while we migrate a page.
1275 * This get_anon_vma() delays freeing anon_vma pointer until the end
1276 * of migration. File cache pages are no problem because of page_lock()
1277 * File Caches may use write_page() or lock_page() in migration, then,
1278 * just care Anon page here.
1279 *
1280 * Only folio_get_anon_vma() understands the subtleties of
1281 * getting a hold on an anon_vma from outside one of its mms.
1282 * But if we cannot get anon_vma, then we won't need it anyway,
1283 * because that implies that the anon page is no longer mapped
1284 * (and cannot be remapped so long as we hold the page lock).
1285 */
1286 if (folio_test_anon(src) && !folio_test_ksm(src))
1287 anon_vma = folio_get_anon_vma(src);
1288
1289 /*
1290 * Block others from accessing the new page when we get around to
1291 * establishing additional references. We are usually the only one
1292 * holding a reference to dst at this point. We used to have a BUG
1293 * here if folio_trylock(dst) fails, but would like to allow for
1294 * cases where there might be a race with the previous use of dst.
1295 * This is much like races on refcount of oldpage: just don't BUG().
1296 */
1297 if (unlikely(!folio_trylock(dst)))
1298 goto out;
1299 dst_locked = true;
1300
1301 if (unlikely(!is_lru)) {
1302 __migrate_folio_record(dst, old_page_state, anon_vma);
1303 return MIGRATEPAGE_UNMAP;
1304 }
1305
1306 /*
1307 * Corner case handling:
1308 * 1. When a new swap-cache page is read into, it is added to the LRU
1309 * and treated as swapcache but it has no rmap yet.
1310 * Calling try_to_unmap() against a src->mapping==NULL page will
1311 * trigger a BUG. So handle it here.
1312 * 2. An orphaned page (see truncate_cleanup_page) might have
1313 * fs-private metadata. The page can be picked up due to memory
1314 * offlining. Everywhere else except page reclaim, the page is
1315 * invisible to the vm, so the page can not be migrated. So try to
1316 * free the metadata, so the page can be freed.
1317 */
1318 if (!src->mapping) {
1319 if (folio_test_private(src)) {
1320 try_to_free_buffers(src);
1321 goto out;
1322 }
1323 } else if (folio_mapped(src)) {
1324 /* Establish migration ptes */
1325 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1326 !folio_test_ksm(src) && !anon_vma, src);
1327 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1328 old_page_state |= PAGE_WAS_MAPPED;
1329 }
1330
1331 if (!folio_mapped(src)) {
1332 __migrate_folio_record(dst, old_page_state, anon_vma);
1333 return MIGRATEPAGE_UNMAP;
1334 }
1335
1336 out:
1337 /*
1338 * A folio that has not been unmapped will be restored to
1339 * right list unless we want to retry.
1340 */
1341 if (rc == -EAGAIN)
1342 ret = NULL;
1343
1344 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1345 anon_vma, locked, ret);
1346 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1347
1348 return rc;
1349 }
1350
1351 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1352 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1353 struct folio *src, struct folio *dst,
1354 enum migrate_mode mode, enum migrate_reason reason,
1355 struct list_head *ret)
1356 {
1357 int rc;
1358 int old_page_state = 0;
1359 struct anon_vma *anon_vma = NULL;
1360 bool is_lru = !__folio_test_movable(src);
1361 struct list_head *prev;
1362
1363 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1364 prev = dst->lru.prev;
1365 list_del(&dst->lru);
1366
1367 rc = move_to_new_folio(dst, src, mode);
1368 if (rc)
1369 goto out;
1370
1371 if (unlikely(!is_lru))
1372 goto out_unlock_both;
1373
1374 /*
1375 * When successful, push dst to LRU immediately: so that if it
1376 * turns out to be an mlocked page, remove_migration_ptes() will
1377 * automatically build up the correct dst->mlock_count for it.
1378 *
1379 * We would like to do something similar for the old page, when
1380 * unsuccessful, and other cases when a page has been temporarily
1381 * isolated from the unevictable LRU: but this case is the easiest.
1382 */
1383 folio_add_lru(dst);
1384 if (old_page_state & PAGE_WAS_MLOCKED)
1385 lru_add_drain();
1386
1387 if (old_page_state & PAGE_WAS_MAPPED)
1388 remove_migration_ptes(src, dst, 0);
1389
1390 out_unlock_both:
1391 folio_unlock(dst);
1392 set_page_owner_migrate_reason(&dst->page, reason);
1393 /*
1394 * If migration is successful, decrease refcount of dst,
1395 * which will not free the page because new page owner increased
1396 * refcounter.
1397 */
1398 folio_put(dst);
1399
1400 /*
1401 * A folio that has been migrated has all references removed
1402 * and will be freed.
1403 */
1404 list_del(&src->lru);
1405 /* Drop an anon_vma reference if we took one */
1406 if (anon_vma)
1407 put_anon_vma(anon_vma);
1408 folio_unlock(src);
1409 migrate_folio_done(src, reason);
1410
1411 return rc;
1412 out:
1413 /*
1414 * A folio that has not been migrated will be restored to
1415 * right list unless we want to retry.
1416 */
1417 if (rc == -EAGAIN) {
1418 list_add(&dst->lru, prev);
1419 __migrate_folio_record(dst, old_page_state, anon_vma);
1420 return rc;
1421 }
1422
1423 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1424 anon_vma, true, ret);
1425 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1426
1427 return rc;
1428 }
1429
1430 /*
1431 * Counterpart of unmap_and_move_page() for hugepage migration.
1432 *
1433 * This function doesn't wait the completion of hugepage I/O
1434 * because there is no race between I/O and migration for hugepage.
1435 * Note that currently hugepage I/O occurs only in direct I/O
1436 * where no lock is held and PG_writeback is irrelevant,
1437 * and writeback status of all subpages are counted in the reference
1438 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1439 * under direct I/O, the reference of the head page is 512 and a bit more.)
1440 * This means that when we try to migrate hugepage whose subpages are
1441 * doing direct I/O, some references remain after try_to_unmap() and
1442 * hugepage migration fails without data corruption.
1443 *
1444 * There is also no race when direct I/O is issued on the page under migration,
1445 * because then pte is replaced with migration swap entry and direct I/O code
1446 * will wait in the page fault for migration to complete.
1447 */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1448 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1449 free_folio_t put_new_folio, unsigned long private,
1450 struct folio *src, int force, enum migrate_mode mode,
1451 int reason, struct list_head *ret)
1452 {
1453 struct folio *dst;
1454 int rc = -EAGAIN;
1455 int page_was_mapped = 0;
1456 struct anon_vma *anon_vma = NULL;
1457 struct address_space *mapping = NULL;
1458
1459 if (folio_ref_count(src) == 1) {
1460 /* page was freed from under us. So we are done. */
1461 folio_putback_active_hugetlb(src);
1462 return MIGRATEPAGE_SUCCESS;
1463 }
1464
1465 dst = get_new_folio(src, private);
1466 if (!dst)
1467 return -ENOMEM;
1468
1469 if (!folio_trylock(src)) {
1470 if (!force)
1471 goto out;
1472 switch (mode) {
1473 case MIGRATE_SYNC:
1474 break;
1475 default:
1476 goto out;
1477 }
1478 folio_lock(src);
1479 }
1480
1481 /*
1482 * Check for pages which are in the process of being freed. Without
1483 * folio_mapping() set, hugetlbfs specific move page routine will not
1484 * be called and we could leak usage counts for subpools.
1485 */
1486 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1487 rc = -EBUSY;
1488 goto out_unlock;
1489 }
1490
1491 if (folio_test_anon(src))
1492 anon_vma = folio_get_anon_vma(src);
1493
1494 if (unlikely(!folio_trylock(dst)))
1495 goto put_anon;
1496
1497 if (folio_mapped(src)) {
1498 enum ttu_flags ttu = 0;
1499
1500 if (!folio_test_anon(src)) {
1501 /*
1502 * In shared mappings, try_to_unmap could potentially
1503 * call huge_pmd_unshare. Because of this, take
1504 * semaphore in write mode here and set TTU_RMAP_LOCKED
1505 * to let lower levels know we have taken the lock.
1506 */
1507 mapping = hugetlb_folio_mapping_lock_write(src);
1508 if (unlikely(!mapping))
1509 goto unlock_put_anon;
1510
1511 ttu = TTU_RMAP_LOCKED;
1512 }
1513
1514 try_to_migrate(src, ttu);
1515 page_was_mapped = 1;
1516
1517 if (ttu & TTU_RMAP_LOCKED)
1518 i_mmap_unlock_write(mapping);
1519 }
1520
1521 if (!folio_mapped(src))
1522 rc = move_to_new_folio(dst, src, mode);
1523
1524 if (page_was_mapped)
1525 remove_migration_ptes(src,
1526 rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1527
1528 unlock_put_anon:
1529 folio_unlock(dst);
1530
1531 put_anon:
1532 if (anon_vma)
1533 put_anon_vma(anon_vma);
1534
1535 if (rc == MIGRATEPAGE_SUCCESS) {
1536 move_hugetlb_state(src, dst, reason);
1537 put_new_folio = NULL;
1538 }
1539
1540 out_unlock:
1541 folio_unlock(src);
1542 out:
1543 if (rc == MIGRATEPAGE_SUCCESS)
1544 folio_putback_active_hugetlb(src);
1545 else if (rc != -EAGAIN)
1546 list_move_tail(&src->lru, ret);
1547
1548 /*
1549 * If migration was not successful and there's a freeing callback, use
1550 * it. Otherwise, put_page() will drop the reference grabbed during
1551 * isolation.
1552 */
1553 if (put_new_folio)
1554 put_new_folio(dst, private);
1555 else
1556 folio_putback_active_hugetlb(dst);
1557
1558 return rc;
1559 }
1560
try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode)1561 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1562 enum migrate_mode mode)
1563 {
1564 int rc;
1565
1566 if (mode == MIGRATE_ASYNC) {
1567 if (!folio_trylock(folio))
1568 return -EAGAIN;
1569 } else {
1570 folio_lock(folio);
1571 }
1572 rc = split_folio_to_list(folio, split_folios);
1573 folio_unlock(folio);
1574 if (!rc)
1575 list_move_tail(&folio->lru, split_folios);
1576
1577 return rc;
1578 }
1579
1580 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1581 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1582 #else
1583 #define NR_MAX_BATCHED_MIGRATION 512
1584 #endif
1585 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1586 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1587 #define NR_MAX_MIGRATE_SYNC_RETRY \
1588 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1589
1590 struct migrate_pages_stats {
1591 int nr_succeeded; /* Normal and large folios migrated successfully, in
1592 units of base pages */
1593 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1594 units of base pages. Untried folios aren't counted */
1595 int nr_thp_succeeded; /* THP migrated successfully */
1596 int nr_thp_failed; /* THP failed to be migrated */
1597 int nr_thp_split; /* THP split before migrating */
1598 int nr_split; /* Large folio (include THP) split before migrating */
1599 };
1600
1601 /*
1602 * Returns the number of hugetlb folios that were not migrated, or an error code
1603 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1604 * any more because the list has become empty or no retryable hugetlb folios
1605 * exist any more. It is caller's responsibility to call putback_movable_pages()
1606 * only if ret != 0.
1607 */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1608 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1609 free_folio_t put_new_folio, unsigned long private,
1610 enum migrate_mode mode, int reason,
1611 struct migrate_pages_stats *stats,
1612 struct list_head *ret_folios)
1613 {
1614 int retry = 1;
1615 int nr_failed = 0;
1616 int nr_retry_pages = 0;
1617 int pass = 0;
1618 struct folio *folio, *folio2;
1619 int rc, nr_pages;
1620
1621 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1622 retry = 0;
1623 nr_retry_pages = 0;
1624
1625 list_for_each_entry_safe(folio, folio2, from, lru) {
1626 if (!folio_test_hugetlb(folio))
1627 continue;
1628
1629 nr_pages = folio_nr_pages(folio);
1630
1631 cond_resched();
1632
1633 /*
1634 * Migratability of hugepages depends on architectures and
1635 * their size. This check is necessary because some callers
1636 * of hugepage migration like soft offline and memory
1637 * hotremove don't walk through page tables or check whether
1638 * the hugepage is pmd-based or not before kicking migration.
1639 */
1640 if (!hugepage_migration_supported(folio_hstate(folio))) {
1641 nr_failed++;
1642 stats->nr_failed_pages += nr_pages;
1643 list_move_tail(&folio->lru, ret_folios);
1644 continue;
1645 }
1646
1647 rc = unmap_and_move_huge_page(get_new_folio,
1648 put_new_folio, private,
1649 folio, pass > 2, mode,
1650 reason, ret_folios);
1651 /*
1652 * The rules are:
1653 * Success: hugetlb folio will be put back
1654 * -EAGAIN: stay on the from list
1655 * -ENOMEM: stay on the from list
1656 * Other errno: put on ret_folios list
1657 */
1658 switch(rc) {
1659 case -ENOMEM:
1660 /*
1661 * When memory is low, don't bother to try to migrate
1662 * other folios, just exit.
1663 */
1664 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1665 return -ENOMEM;
1666 case -EAGAIN:
1667 retry++;
1668 nr_retry_pages += nr_pages;
1669 break;
1670 case MIGRATEPAGE_SUCCESS:
1671 stats->nr_succeeded += nr_pages;
1672 break;
1673 default:
1674 /*
1675 * Permanent failure (-EBUSY, etc.):
1676 * unlike -EAGAIN case, the failed folio is
1677 * removed from migration folio list and not
1678 * retried in the next outer loop.
1679 */
1680 nr_failed++;
1681 stats->nr_failed_pages += nr_pages;
1682 break;
1683 }
1684 }
1685 }
1686 /*
1687 * nr_failed is number of hugetlb folios failed to be migrated. After
1688 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1689 * folios as failed.
1690 */
1691 nr_failed += retry;
1692 stats->nr_failed_pages += nr_retry_pages;
1693
1694 return nr_failed;
1695 }
1696
1697 /*
1698 * migrate_pages_batch() first unmaps folios in the from list as many as
1699 * possible, then move the unmapped folios.
1700 *
1701 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1702 * lock or bit when we have locked more than one folio. Which may cause
1703 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1704 * length of the from list must be <= 1.
1705 */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1706 static int migrate_pages_batch(struct list_head *from,
1707 new_folio_t get_new_folio, free_folio_t put_new_folio,
1708 unsigned long private, enum migrate_mode mode, int reason,
1709 struct list_head *ret_folios, struct list_head *split_folios,
1710 struct migrate_pages_stats *stats, int nr_pass)
1711 {
1712 int retry = 1;
1713 int thp_retry = 1;
1714 int nr_failed = 0;
1715 int nr_retry_pages = 0;
1716 int pass = 0;
1717 bool is_thp = false;
1718 bool is_large = false;
1719 struct folio *folio, *folio2, *dst = NULL, *dst2;
1720 int rc, rc_saved = 0, nr_pages;
1721 LIST_HEAD(unmap_folios);
1722 LIST_HEAD(dst_folios);
1723 bool nosplit = (reason == MR_NUMA_MISPLACED);
1724
1725 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1726 !list_empty(from) && !list_is_singular(from));
1727
1728 for (pass = 0; pass < nr_pass && retry; pass++) {
1729 retry = 0;
1730 thp_retry = 0;
1731 nr_retry_pages = 0;
1732
1733 list_for_each_entry_safe(folio, folio2, from, lru) {
1734 is_large = folio_test_large(folio);
1735 is_thp = is_large && folio_test_pmd_mappable(folio);
1736 nr_pages = folio_nr_pages(folio);
1737
1738 cond_resched();
1739
1740 /*
1741 * The rare folio on the deferred split list should
1742 * be split now. It should not count as a failure:
1743 * but increment nr_failed because, without doing so,
1744 * migrate_pages() may report success with (split but
1745 * unmigrated) pages still on its fromlist; whereas it
1746 * always reports success when its fromlist is empty.
1747 * stats->nr_thp_failed should be increased too,
1748 * otherwise stats inconsistency will happen when
1749 * migrate_pages_batch is called via migrate_pages()
1750 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1751 *
1752 * Only check it without removing it from the list.
1753 * Since the folio can be on deferred_split_scan()
1754 * local list and removing it can cause the local list
1755 * corruption. Folio split process below can handle it
1756 * with the help of folio_ref_freeze().
1757 *
1758 * nr_pages > 2 is needed to avoid checking order-1
1759 * page cache folios. They exist, in contrast to
1760 * non-existent order-1 anonymous folios, and do not
1761 * use _deferred_list.
1762 */
1763 if (nr_pages > 2 &&
1764 !list_empty(&folio->_deferred_list) &&
1765 folio_test_partially_mapped(folio)) {
1766 if (!try_split_folio(folio, split_folios, mode)) {
1767 nr_failed++;
1768 stats->nr_thp_failed += is_thp;
1769 stats->nr_thp_split += is_thp;
1770 stats->nr_split++;
1771 continue;
1772 }
1773 }
1774
1775 /*
1776 * Large folio migration might be unsupported or
1777 * the allocation might be failed so we should retry
1778 * on the same folio with the large folio split
1779 * to normal folios.
1780 *
1781 * Split folios are put in split_folios, and
1782 * we will migrate them after the rest of the
1783 * list is processed.
1784 */
1785 if (!thp_migration_supported() && is_thp) {
1786 nr_failed++;
1787 stats->nr_thp_failed++;
1788 if (!try_split_folio(folio, split_folios, mode)) {
1789 stats->nr_thp_split++;
1790 stats->nr_split++;
1791 continue;
1792 }
1793 stats->nr_failed_pages += nr_pages;
1794 list_move_tail(&folio->lru, ret_folios);
1795 continue;
1796 }
1797
1798 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1799 private, folio, &dst, mode, reason,
1800 ret_folios);
1801 /*
1802 * The rules are:
1803 * Success: folio will be freed
1804 * Unmap: folio will be put on unmap_folios list,
1805 * dst folio put on dst_folios list
1806 * -EAGAIN: stay on the from list
1807 * -ENOMEM: stay on the from list
1808 * Other errno: put on ret_folios list
1809 */
1810 switch(rc) {
1811 case -ENOMEM:
1812 /*
1813 * When memory is low, don't bother to try to migrate
1814 * other folios, move unmapped folios, then exit.
1815 */
1816 nr_failed++;
1817 stats->nr_thp_failed += is_thp;
1818 /* Large folio NUMA faulting doesn't split to retry. */
1819 if (is_large && !nosplit) {
1820 int ret = try_split_folio(folio, split_folios, mode);
1821
1822 if (!ret) {
1823 stats->nr_thp_split += is_thp;
1824 stats->nr_split++;
1825 break;
1826 } else if (reason == MR_LONGTERM_PIN &&
1827 ret == -EAGAIN) {
1828 /*
1829 * Try again to split large folio to
1830 * mitigate the failure of longterm pinning.
1831 */
1832 retry++;
1833 thp_retry += is_thp;
1834 nr_retry_pages += nr_pages;
1835 /* Undo duplicated failure counting. */
1836 nr_failed--;
1837 stats->nr_thp_failed -= is_thp;
1838 break;
1839 }
1840 }
1841
1842 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1843 /* nr_failed isn't updated for not used */
1844 stats->nr_thp_failed += thp_retry;
1845 rc_saved = rc;
1846 if (list_empty(&unmap_folios))
1847 goto out;
1848 else
1849 goto move;
1850 case -EAGAIN:
1851 retry++;
1852 thp_retry += is_thp;
1853 nr_retry_pages += nr_pages;
1854 break;
1855 case MIGRATEPAGE_SUCCESS:
1856 stats->nr_succeeded += nr_pages;
1857 stats->nr_thp_succeeded += is_thp;
1858 break;
1859 case MIGRATEPAGE_UNMAP:
1860 list_move_tail(&folio->lru, &unmap_folios);
1861 list_add_tail(&dst->lru, &dst_folios);
1862 break;
1863 default:
1864 /*
1865 * Permanent failure (-EBUSY, etc.):
1866 * unlike -EAGAIN case, the failed folio is
1867 * removed from migration folio list and not
1868 * retried in the next outer loop.
1869 */
1870 nr_failed++;
1871 stats->nr_thp_failed += is_thp;
1872 stats->nr_failed_pages += nr_pages;
1873 break;
1874 }
1875 }
1876 }
1877 nr_failed += retry;
1878 stats->nr_thp_failed += thp_retry;
1879 stats->nr_failed_pages += nr_retry_pages;
1880 move:
1881 /* Flush TLBs for all unmapped folios */
1882 try_to_unmap_flush();
1883
1884 retry = 1;
1885 for (pass = 0; pass < nr_pass && retry; pass++) {
1886 retry = 0;
1887 thp_retry = 0;
1888 nr_retry_pages = 0;
1889
1890 dst = list_first_entry(&dst_folios, struct folio, lru);
1891 dst2 = list_next_entry(dst, lru);
1892 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1893 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1894 nr_pages = folio_nr_pages(folio);
1895
1896 cond_resched();
1897
1898 rc = migrate_folio_move(put_new_folio, private,
1899 folio, dst, mode,
1900 reason, ret_folios);
1901 /*
1902 * The rules are:
1903 * Success: folio will be freed
1904 * -EAGAIN: stay on the unmap_folios list
1905 * Other errno: put on ret_folios list
1906 */
1907 switch(rc) {
1908 case -EAGAIN:
1909 retry++;
1910 thp_retry += is_thp;
1911 nr_retry_pages += nr_pages;
1912 break;
1913 case MIGRATEPAGE_SUCCESS:
1914 stats->nr_succeeded += nr_pages;
1915 stats->nr_thp_succeeded += is_thp;
1916 break;
1917 default:
1918 nr_failed++;
1919 stats->nr_thp_failed += is_thp;
1920 stats->nr_failed_pages += nr_pages;
1921 break;
1922 }
1923 dst = dst2;
1924 dst2 = list_next_entry(dst, lru);
1925 }
1926 }
1927 nr_failed += retry;
1928 stats->nr_thp_failed += thp_retry;
1929 stats->nr_failed_pages += nr_retry_pages;
1930
1931 rc = rc_saved ? : nr_failed;
1932 out:
1933 /* Cleanup remaining folios */
1934 dst = list_first_entry(&dst_folios, struct folio, lru);
1935 dst2 = list_next_entry(dst, lru);
1936 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1937 int old_page_state = 0;
1938 struct anon_vma *anon_vma = NULL;
1939
1940 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1941 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1942 anon_vma, true, ret_folios);
1943 list_del(&dst->lru);
1944 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1945 dst = dst2;
1946 dst2 = list_next_entry(dst, lru);
1947 }
1948
1949 return rc;
1950 }
1951
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1952 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1953 free_folio_t put_new_folio, unsigned long private,
1954 enum migrate_mode mode, int reason,
1955 struct list_head *ret_folios, struct list_head *split_folios,
1956 struct migrate_pages_stats *stats)
1957 {
1958 int rc, nr_failed = 0;
1959 LIST_HEAD(folios);
1960 struct migrate_pages_stats astats;
1961
1962 memset(&astats, 0, sizeof(astats));
1963 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1964 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1965 reason, &folios, split_folios, &astats,
1966 NR_MAX_MIGRATE_ASYNC_RETRY);
1967 stats->nr_succeeded += astats.nr_succeeded;
1968 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1969 stats->nr_thp_split += astats.nr_thp_split;
1970 stats->nr_split += astats.nr_split;
1971 if (rc < 0) {
1972 stats->nr_failed_pages += astats.nr_failed_pages;
1973 stats->nr_thp_failed += astats.nr_thp_failed;
1974 list_splice_tail(&folios, ret_folios);
1975 return rc;
1976 }
1977 stats->nr_thp_failed += astats.nr_thp_split;
1978 /*
1979 * Do not count rc, as pages will be retried below.
1980 * Count nr_split only, since it includes nr_thp_split.
1981 */
1982 nr_failed += astats.nr_split;
1983 /*
1984 * Fall back to migrate all failed folios one by one synchronously. All
1985 * failed folios except split THPs will be retried, so their failure
1986 * isn't counted
1987 */
1988 list_splice_tail_init(&folios, from);
1989 while (!list_empty(from)) {
1990 list_move(from->next, &folios);
1991 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1992 private, mode, reason, ret_folios,
1993 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1994 list_splice_tail_init(&folios, ret_folios);
1995 if (rc < 0)
1996 return rc;
1997 nr_failed += rc;
1998 }
1999
2000 return nr_failed;
2001 }
2002
2003 /*
2004 * migrate_pages - migrate the folios specified in a list, to the free folios
2005 * supplied as the target for the page migration
2006 *
2007 * @from: The list of folios to be migrated.
2008 * @get_new_folio: The function used to allocate free folios to be used
2009 * as the target of the folio migration.
2010 * @put_new_folio: The function used to free target folios if migration
2011 * fails, or NULL if no special handling is necessary.
2012 * @private: Private data to be passed on to get_new_folio()
2013 * @mode: The migration mode that specifies the constraints for
2014 * folio migration, if any.
2015 * @reason: The reason for folio migration.
2016 * @ret_succeeded: Set to the number of folios migrated successfully if
2017 * the caller passes a non-NULL pointer.
2018 *
2019 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2020 * are movable any more because the list has become empty or no retryable folios
2021 * exist any more. It is caller's responsibility to call putback_movable_pages()
2022 * only if ret != 0.
2023 *
2024 * Returns the number of {normal folio, large folio, hugetlb} that were not
2025 * migrated, or an error code. The number of large folio splits will be
2026 * considered as the number of non-migrated large folio, no matter how many
2027 * split folios of the large folio are migrated successfully.
2028 */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)2029 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2030 free_folio_t put_new_folio, unsigned long private,
2031 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2032 {
2033 int rc, rc_gather;
2034 int nr_pages;
2035 struct folio *folio, *folio2;
2036 LIST_HEAD(folios);
2037 LIST_HEAD(ret_folios);
2038 LIST_HEAD(split_folios);
2039 struct migrate_pages_stats stats;
2040
2041 trace_mm_migrate_pages_start(mode, reason);
2042
2043 memset(&stats, 0, sizeof(stats));
2044
2045 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2046 mode, reason, &stats, &ret_folios);
2047 if (rc_gather < 0)
2048 goto out;
2049
2050 again:
2051 nr_pages = 0;
2052 list_for_each_entry_safe(folio, folio2, from, lru) {
2053 /* Retried hugetlb folios will be kept in list */
2054 if (folio_test_hugetlb(folio)) {
2055 list_move_tail(&folio->lru, &ret_folios);
2056 continue;
2057 }
2058
2059 nr_pages += folio_nr_pages(folio);
2060 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2061 break;
2062 }
2063 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2064 list_cut_before(&folios, from, &folio2->lru);
2065 else
2066 list_splice_init(from, &folios);
2067 if (mode == MIGRATE_ASYNC)
2068 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2069 private, mode, reason, &ret_folios,
2070 &split_folios, &stats,
2071 NR_MAX_MIGRATE_PAGES_RETRY);
2072 else
2073 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2074 private, mode, reason, &ret_folios,
2075 &split_folios, &stats);
2076 list_splice_tail_init(&folios, &ret_folios);
2077 if (rc < 0) {
2078 rc_gather = rc;
2079 list_splice_tail(&split_folios, &ret_folios);
2080 goto out;
2081 }
2082 if (!list_empty(&split_folios)) {
2083 /*
2084 * Failure isn't counted since all split folios of a large folio
2085 * is counted as 1 failure already. And, we only try to migrate
2086 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2087 */
2088 migrate_pages_batch(&split_folios, get_new_folio,
2089 put_new_folio, private, MIGRATE_ASYNC, reason,
2090 &ret_folios, NULL, &stats, 1);
2091 list_splice_tail_init(&split_folios, &ret_folios);
2092 }
2093 rc_gather += rc;
2094 if (!list_empty(from))
2095 goto again;
2096 out:
2097 /*
2098 * Put the permanent failure folio back to migration list, they
2099 * will be put back to the right list by the caller.
2100 */
2101 list_splice(&ret_folios, from);
2102
2103 /*
2104 * Return 0 in case all split folios of fail-to-migrate large folios
2105 * are migrated successfully.
2106 */
2107 if (list_empty(from))
2108 rc_gather = 0;
2109
2110 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2111 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2112 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2113 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2114 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2115 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2116 stats.nr_thp_succeeded, stats.nr_thp_failed,
2117 stats.nr_thp_split, stats.nr_split, mode,
2118 reason);
2119
2120 if (ret_succeeded)
2121 *ret_succeeded = stats.nr_succeeded;
2122
2123 return rc_gather;
2124 }
2125
alloc_migration_target(struct folio * src,unsigned long private)2126 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2127 {
2128 struct migration_target_control *mtc;
2129 gfp_t gfp_mask;
2130 unsigned int order = 0;
2131 int nid;
2132 int zidx;
2133
2134 mtc = (struct migration_target_control *)private;
2135 gfp_mask = mtc->gfp_mask;
2136 nid = mtc->nid;
2137 if (nid == NUMA_NO_NODE)
2138 nid = folio_nid(src);
2139
2140 if (folio_test_hugetlb(src)) {
2141 struct hstate *h = folio_hstate(src);
2142
2143 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2144 return alloc_hugetlb_folio_nodemask(h, nid,
2145 mtc->nmask, gfp_mask,
2146 htlb_allow_alloc_fallback(mtc->reason));
2147 }
2148
2149 if (folio_test_large(src)) {
2150 /*
2151 * clear __GFP_RECLAIM to make the migration callback
2152 * consistent with regular THP allocations.
2153 */
2154 gfp_mask &= ~__GFP_RECLAIM;
2155 gfp_mask |= GFP_TRANSHUGE;
2156 order = folio_order(src);
2157 }
2158 zidx = zone_idx(folio_zone(src));
2159 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2160 gfp_mask |= __GFP_HIGHMEM;
2161
2162 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2163 }
2164
2165 #ifdef CONFIG_NUMA
2166
store_status(int __user * status,int start,int value,int nr)2167 static int store_status(int __user *status, int start, int value, int nr)
2168 {
2169 while (nr-- > 0) {
2170 if (put_user(value, status + start))
2171 return -EFAULT;
2172 start++;
2173 }
2174
2175 return 0;
2176 }
2177
do_move_pages_to_node(struct list_head * pagelist,int node)2178 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2179 {
2180 int err;
2181 struct migration_target_control mtc = {
2182 .nid = node,
2183 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2184 .reason = MR_SYSCALL,
2185 };
2186
2187 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2188 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2189 if (err)
2190 putback_movable_pages(pagelist);
2191 return err;
2192 }
2193
__add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all)2194 static int __add_folio_for_migration(struct folio *folio, int node,
2195 struct list_head *pagelist, bool migrate_all)
2196 {
2197 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2198 return -EFAULT;
2199
2200 if (folio_is_zone_device(folio))
2201 return -ENOENT;
2202
2203 if (folio_nid(folio) == node)
2204 return 0;
2205
2206 if (folio_likely_mapped_shared(folio) && !migrate_all)
2207 return -EACCES;
2208
2209 if (folio_test_hugetlb(folio)) {
2210 if (isolate_hugetlb(folio, pagelist))
2211 return 1;
2212 } else if (folio_isolate_lru(folio)) {
2213 list_add_tail(&folio->lru, pagelist);
2214 node_stat_mod_folio(folio,
2215 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2216 folio_nr_pages(folio));
2217 return 1;
2218 }
2219 return -EBUSY;
2220 }
2221
2222 /*
2223 * Resolves the given address to a struct folio, isolates it from the LRU and
2224 * puts it to the given pagelist.
2225 * Returns:
2226 * errno - if the folio cannot be found/isolated
2227 * 0 - when it doesn't have to be migrated because it is already on the
2228 * target node
2229 * 1 - when it has been queued
2230 */
add_folio_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2231 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2232 int node, struct list_head *pagelist, bool migrate_all)
2233 {
2234 struct vm_area_struct *vma;
2235 struct folio_walk fw;
2236 struct folio *folio;
2237 unsigned long addr;
2238 int err = -EFAULT;
2239
2240 mmap_read_lock(mm);
2241 addr = (unsigned long)untagged_addr_remote(mm, p);
2242
2243 vma = vma_lookup(mm, addr);
2244 if (vma && vma_migratable(vma)) {
2245 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2246 if (folio) {
2247 err = __add_folio_for_migration(folio, node, pagelist,
2248 migrate_all);
2249 folio_walk_end(&fw, vma);
2250 } else {
2251 err = -ENOENT;
2252 }
2253 }
2254 mmap_read_unlock(mm);
2255 return err;
2256 }
2257
move_pages_and_store_status(int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2258 static int move_pages_and_store_status(int node,
2259 struct list_head *pagelist, int __user *status,
2260 int start, int i, unsigned long nr_pages)
2261 {
2262 int err;
2263
2264 if (list_empty(pagelist))
2265 return 0;
2266
2267 err = do_move_pages_to_node(pagelist, node);
2268 if (err) {
2269 /*
2270 * Positive err means the number of failed
2271 * pages to migrate. Since we are going to
2272 * abort and return the number of non-migrated
2273 * pages, so need to include the rest of the
2274 * nr_pages that have not been attempted as
2275 * well.
2276 */
2277 if (err > 0)
2278 err += nr_pages - i;
2279 return err;
2280 }
2281 return store_status(status, start, node, i - start);
2282 }
2283
2284 /*
2285 * Migrate an array of page address onto an array of nodes and fill
2286 * the corresponding array of status.
2287 */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2288 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2289 unsigned long nr_pages,
2290 const void __user * __user *pages,
2291 const int __user *nodes,
2292 int __user *status, int flags)
2293 {
2294 compat_uptr_t __user *compat_pages = (void __user *)pages;
2295 int current_node = NUMA_NO_NODE;
2296 LIST_HEAD(pagelist);
2297 int start, i;
2298 int err = 0, err1;
2299
2300 lru_cache_disable();
2301
2302 for (i = start = 0; i < nr_pages; i++) {
2303 const void __user *p;
2304 int node;
2305
2306 err = -EFAULT;
2307 if (in_compat_syscall()) {
2308 compat_uptr_t cp;
2309
2310 if (get_user(cp, compat_pages + i))
2311 goto out_flush;
2312
2313 p = compat_ptr(cp);
2314 } else {
2315 if (get_user(p, pages + i))
2316 goto out_flush;
2317 }
2318 if (get_user(node, nodes + i))
2319 goto out_flush;
2320
2321 err = -ENODEV;
2322 if (node < 0 || node >= MAX_NUMNODES)
2323 goto out_flush;
2324 if (!node_state(node, N_MEMORY))
2325 goto out_flush;
2326
2327 err = -EACCES;
2328 if (!node_isset(node, task_nodes))
2329 goto out_flush;
2330
2331 if (current_node == NUMA_NO_NODE) {
2332 current_node = node;
2333 start = i;
2334 } else if (node != current_node) {
2335 err = move_pages_and_store_status(current_node,
2336 &pagelist, status, start, i, nr_pages);
2337 if (err)
2338 goto out;
2339 start = i;
2340 current_node = node;
2341 }
2342
2343 /*
2344 * Errors in the page lookup or isolation are not fatal and we simply
2345 * report them via status
2346 */
2347 err = add_folio_for_migration(mm, p, current_node, &pagelist,
2348 flags & MPOL_MF_MOVE_ALL);
2349
2350 if (err > 0) {
2351 /* The page is successfully queued for migration */
2352 continue;
2353 }
2354
2355 /*
2356 * The move_pages() man page does not have an -EEXIST choice, so
2357 * use -EFAULT instead.
2358 */
2359 if (err == -EEXIST)
2360 err = -EFAULT;
2361
2362 /*
2363 * If the page is already on the target node (!err), store the
2364 * node, otherwise, store the err.
2365 */
2366 err = store_status(status, i, err ? : current_node, 1);
2367 if (err)
2368 goto out_flush;
2369
2370 err = move_pages_and_store_status(current_node, &pagelist,
2371 status, start, i, nr_pages);
2372 if (err) {
2373 /* We have accounted for page i */
2374 if (err > 0)
2375 err--;
2376 goto out;
2377 }
2378 current_node = NUMA_NO_NODE;
2379 }
2380 out_flush:
2381 /* Make sure we do not overwrite the existing error */
2382 err1 = move_pages_and_store_status(current_node, &pagelist,
2383 status, start, i, nr_pages);
2384 if (err >= 0)
2385 err = err1;
2386 out:
2387 lru_cache_enable();
2388 return err;
2389 }
2390
2391 /*
2392 * Determine the nodes of an array of pages and store it in an array of status.
2393 */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2394 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2395 const void __user **pages, int *status)
2396 {
2397 unsigned long i;
2398
2399 mmap_read_lock(mm);
2400
2401 for (i = 0; i < nr_pages; i++) {
2402 unsigned long addr = (unsigned long)(*pages);
2403 struct vm_area_struct *vma;
2404 struct folio_walk fw;
2405 struct folio *folio;
2406 int err = -EFAULT;
2407
2408 vma = vma_lookup(mm, addr);
2409 if (!vma)
2410 goto set_status;
2411
2412 folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2413 if (folio) {
2414 if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2415 err = -EFAULT;
2416 else if (folio_is_zone_device(folio))
2417 err = -ENOENT;
2418 else
2419 err = folio_nid(folio);
2420 folio_walk_end(&fw, vma);
2421 } else {
2422 err = -ENOENT;
2423 }
2424 set_status:
2425 *status = err;
2426
2427 pages++;
2428 status++;
2429 }
2430
2431 mmap_read_unlock(mm);
2432 }
2433
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)2434 static int get_compat_pages_array(const void __user *chunk_pages[],
2435 const void __user * __user *pages,
2436 unsigned long chunk_nr)
2437 {
2438 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2439 compat_uptr_t p;
2440 int i;
2441
2442 for (i = 0; i < chunk_nr; i++) {
2443 if (get_user(p, pages32 + i))
2444 return -EFAULT;
2445 chunk_pages[i] = compat_ptr(p);
2446 }
2447
2448 return 0;
2449 }
2450
2451 /*
2452 * Determine the nodes of a user array of pages and store it in
2453 * a user array of status.
2454 */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2455 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2456 const void __user * __user *pages,
2457 int __user *status)
2458 {
2459 #define DO_PAGES_STAT_CHUNK_NR 16UL
2460 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2461 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2462
2463 while (nr_pages) {
2464 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2465
2466 if (in_compat_syscall()) {
2467 if (get_compat_pages_array(chunk_pages, pages,
2468 chunk_nr))
2469 break;
2470 } else {
2471 if (copy_from_user(chunk_pages, pages,
2472 chunk_nr * sizeof(*chunk_pages)))
2473 break;
2474 }
2475
2476 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2477
2478 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2479 break;
2480
2481 pages += chunk_nr;
2482 status += chunk_nr;
2483 nr_pages -= chunk_nr;
2484 }
2485 return nr_pages ? -EFAULT : 0;
2486 }
2487
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2488 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2489 {
2490 struct task_struct *task;
2491 struct mm_struct *mm;
2492
2493 /*
2494 * There is no need to check if current process has the right to modify
2495 * the specified process when they are same.
2496 */
2497 if (!pid) {
2498 mmget(current->mm);
2499 *mem_nodes = cpuset_mems_allowed(current);
2500 return current->mm;
2501 }
2502
2503 task = find_get_task_by_vpid(pid);
2504 if (!task) {
2505 return ERR_PTR(-ESRCH);
2506 }
2507
2508 /*
2509 * Check if this process has the right to modify the specified
2510 * process. Use the regular "ptrace_may_access()" checks.
2511 */
2512 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2513 mm = ERR_PTR(-EPERM);
2514 goto out;
2515 }
2516
2517 mm = ERR_PTR(security_task_movememory(task));
2518 if (IS_ERR(mm))
2519 goto out;
2520 *mem_nodes = cpuset_mems_allowed(task);
2521 mm = get_task_mm(task);
2522 out:
2523 put_task_struct(task);
2524 if (!mm)
2525 mm = ERR_PTR(-EINVAL);
2526 return mm;
2527 }
2528
2529 /*
2530 * Move a list of pages in the address space of the currently executing
2531 * process.
2532 */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2533 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2534 const void __user * __user *pages,
2535 const int __user *nodes,
2536 int __user *status, int flags)
2537 {
2538 struct mm_struct *mm;
2539 int err;
2540 nodemask_t task_nodes;
2541
2542 /* Check flags */
2543 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2544 return -EINVAL;
2545
2546 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2547 return -EPERM;
2548
2549 mm = find_mm_struct(pid, &task_nodes);
2550 if (IS_ERR(mm))
2551 return PTR_ERR(mm);
2552
2553 if (nodes)
2554 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2555 nodes, status, flags);
2556 else
2557 err = do_pages_stat(mm, nr_pages, pages, status);
2558
2559 mmput(mm);
2560 return err;
2561 }
2562
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2563 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2564 const void __user * __user *, pages,
2565 const int __user *, nodes,
2566 int __user *, status, int, flags)
2567 {
2568 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2569 }
2570
2571 #ifdef CONFIG_NUMA_BALANCING
2572 /*
2573 * Returns true if this is a safe migration target node for misplaced NUMA
2574 * pages. Currently it only checks the watermarks which is crude.
2575 */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2576 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2577 unsigned long nr_migrate_pages)
2578 {
2579 int z;
2580
2581 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2582 struct zone *zone = pgdat->node_zones + z;
2583
2584 if (!managed_zone(zone))
2585 continue;
2586
2587 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2588 if (!zone_watermark_ok(zone, 0,
2589 high_wmark_pages(zone) +
2590 nr_migrate_pages,
2591 ZONE_MOVABLE, ALLOC_CMA))
2592 continue;
2593 return true;
2594 }
2595 return false;
2596 }
2597
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2598 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2599 unsigned long data)
2600 {
2601 int nid = (int) data;
2602 int order = folio_order(src);
2603 gfp_t gfp = __GFP_THISNODE;
2604
2605 if (order > 0)
2606 gfp |= GFP_TRANSHUGE_LIGHT;
2607 else {
2608 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2609 __GFP_NOWARN;
2610 gfp &= ~__GFP_RECLAIM;
2611 }
2612 return __folio_alloc_node(gfp, order, nid);
2613 }
2614
2615 /*
2616 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2617 * permitted. Must be called with the PTL still held.
2618 */
migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node)2619 int migrate_misplaced_folio_prepare(struct folio *folio,
2620 struct vm_area_struct *vma, int node)
2621 {
2622 int nr_pages = folio_nr_pages(folio);
2623 pg_data_t *pgdat = NODE_DATA(node);
2624
2625 if (folio_is_file_lru(folio)) {
2626 /*
2627 * Do not migrate file folios that are mapped in multiple
2628 * processes with execute permissions as they are probably
2629 * shared libraries.
2630 *
2631 * See folio_likely_mapped_shared() on possible imprecision
2632 * when we cannot easily detect if a folio is shared.
2633 */
2634 if ((vma->vm_flags & VM_EXEC) &&
2635 folio_likely_mapped_shared(folio))
2636 return -EACCES;
2637
2638 /*
2639 * Do not migrate dirty folios as not all filesystems can move
2640 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2641 * cycles.
2642 */
2643 if (folio_test_dirty(folio))
2644 return -EAGAIN;
2645 }
2646
2647 /* Avoid migrating to a node that is nearly full */
2648 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2649 int z;
2650
2651 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2652 return -EAGAIN;
2653 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2654 if (managed_zone(pgdat->node_zones + z))
2655 break;
2656 }
2657
2658 /*
2659 * If there are no managed zones, it should not proceed
2660 * further.
2661 */
2662 if (z < 0)
2663 return -EAGAIN;
2664
2665 wakeup_kswapd(pgdat->node_zones + z, 0,
2666 folio_order(folio), ZONE_MOVABLE);
2667 return -EAGAIN;
2668 }
2669
2670 if (!folio_isolate_lru(folio))
2671 return -EAGAIN;
2672
2673 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2674 nr_pages);
2675 return 0;
2676 }
2677
2678 /*
2679 * Attempt to migrate a misplaced folio to the specified destination
2680 * node. Caller is expected to have isolated the folio by calling
2681 * migrate_misplaced_folio_prepare(), which will result in an
2682 * elevated reference count on the folio. This function will un-isolate the
2683 * folio, dereferencing the folio before returning.
2684 */
migrate_misplaced_folio(struct folio * folio,struct vm_area_struct * vma,int node)2685 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2686 int node)
2687 {
2688 pg_data_t *pgdat = NODE_DATA(node);
2689 int nr_remaining;
2690 unsigned int nr_succeeded;
2691 LIST_HEAD(migratepages);
2692 struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2693 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2694
2695 list_add(&folio->lru, &migratepages);
2696 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2697 NULL, node, MIGRATE_ASYNC,
2698 MR_NUMA_MISPLACED, &nr_succeeded);
2699 if (nr_remaining && !list_empty(&migratepages))
2700 putback_movable_pages(&migratepages);
2701 if (nr_succeeded) {
2702 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2703 count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2704 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2705 && !node_is_toptier(folio_nid(folio))
2706 && node_is_toptier(node))
2707 mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2708 }
2709 mem_cgroup_put(memcg);
2710 BUG_ON(!list_empty(&migratepages));
2711 return nr_remaining ? -EAGAIN : 0;
2712 }
2713 #endif /* CONFIG_NUMA_BALANCING */
2714 #endif /* CONFIG_NUMA */
2715