1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32
33 #include "internal.h"
34
get_old_pud(struct mm_struct * mm,unsigned long addr)35 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
36 {
37 pgd_t *pgd;
38 p4d_t *p4d;
39 pud_t *pud;
40
41 pgd = pgd_offset(mm, addr);
42 if (pgd_none_or_clear_bad(pgd))
43 return NULL;
44
45 p4d = p4d_offset(pgd, addr);
46 if (p4d_none_or_clear_bad(p4d))
47 return NULL;
48
49 pud = pud_offset(p4d, addr);
50 if (pud_none_or_clear_bad(pud))
51 return NULL;
52
53 return pud;
54 }
55
get_old_pmd(struct mm_struct * mm,unsigned long addr)56 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
57 {
58 pud_t *pud;
59 pmd_t *pmd;
60
61 pud = get_old_pud(mm, addr);
62 if (!pud)
63 return NULL;
64
65 pmd = pmd_offset(pud, addr);
66 if (pmd_none(*pmd))
67 return NULL;
68
69 return pmd;
70 }
71
alloc_new_pud(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
73 unsigned long addr)
74 {
75 pgd_t *pgd;
76 p4d_t *p4d;
77
78 pgd = pgd_offset(mm, addr);
79 p4d = p4d_alloc(mm, pgd, addr);
80 if (!p4d)
81 return NULL;
82
83 return pud_alloc(mm, p4d, addr);
84 }
85
alloc_new_pmd(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
87 unsigned long addr)
88 {
89 pud_t *pud;
90 pmd_t *pmd;
91
92 pud = alloc_new_pud(mm, vma, addr);
93 if (!pud)
94 return NULL;
95
96 pmd = pmd_alloc(mm, pud, addr);
97 if (!pmd)
98 return NULL;
99
100 VM_BUG_ON(pmd_trans_huge(*pmd));
101
102 return pmd;
103 }
104
take_rmap_locks(struct vm_area_struct * vma)105 static void take_rmap_locks(struct vm_area_struct *vma)
106 {
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
111 }
112
drop_rmap_locks(struct vm_area_struct * vma)113 static void drop_rmap_locks(struct vm_area_struct *vma)
114 {
115 if (vma->anon_vma)
116 anon_vma_unlock_write(vma->anon_vma);
117 if (vma->vm_file)
118 i_mmap_unlock_write(vma->vm_file->f_mapping);
119 }
120
move_soft_dirty_pte(pte_t pte)121 static pte_t move_soft_dirty_pte(pte_t pte)
122 {
123 /*
124 * Set soft dirty bit so we can notice
125 * in userspace the ptes were moved.
126 */
127 #ifdef CONFIG_MEM_SOFT_DIRTY
128 if (pte_present(pte))
129 pte = pte_mksoft_dirty(pte);
130 else if (is_swap_pte(pte))
131 pte = pte_swp_mksoft_dirty(pte);
132 #endif
133 return pte;
134 }
135
move_ptes(struct vm_area_struct * vma,pmd_t * old_pmd,unsigned long old_addr,unsigned long old_end,struct vm_area_struct * new_vma,pmd_t * new_pmd,unsigned long new_addr,bool need_rmap_locks)136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
137 unsigned long old_addr, unsigned long old_end,
138 struct vm_area_struct *new_vma, pmd_t *new_pmd,
139 unsigned long new_addr, bool need_rmap_locks)
140 {
141 struct mm_struct *mm = vma->vm_mm;
142 pte_t *old_pte, *new_pte, pte;
143 spinlock_t *old_ptl, *new_ptl;
144 bool force_flush = false;
145 unsigned long len = old_end - old_addr;
146 int err = 0;
147
148 /*
149 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
150 * locks to ensure that rmap will always observe either the old or the
151 * new ptes. This is the easiest way to avoid races with
152 * truncate_pagecache(), page migration, etc...
153 *
154 * When need_rmap_locks is false, we use other ways to avoid
155 * such races:
156 *
157 * - During exec() shift_arg_pages(), we use a specially tagged vma
158 * which rmap call sites look for using vma_is_temporary_stack().
159 *
160 * - During mremap(), new_vma is often known to be placed after vma
161 * in rmap traversal order. This ensures rmap will always observe
162 * either the old pte, or the new pte, or both (the page table locks
163 * serialize access to individual ptes, but only rmap traversal
164 * order guarantees that we won't miss both the old and new ptes).
165 */
166 if (need_rmap_locks)
167 take_rmap_locks(vma);
168
169 /*
170 * We don't have to worry about the ordering of src and dst
171 * pte locks because exclusive mmap_lock prevents deadlock.
172 */
173 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
174 if (!old_pte) {
175 err = -EAGAIN;
176 goto out;
177 }
178 new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
179 if (!new_pte) {
180 pte_unmap_unlock(old_pte, old_ptl);
181 err = -EAGAIN;
182 goto out;
183 }
184 if (new_ptl != old_ptl)
185 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
186 flush_tlb_batched_pending(vma->vm_mm);
187 arch_enter_lazy_mmu_mode();
188
189 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
190 new_pte++, new_addr += PAGE_SIZE) {
191 if (pte_none(ptep_get(old_pte)))
192 continue;
193
194 pte = ptep_get_and_clear(mm, old_addr, old_pte);
195 /*
196 * If we are remapping a valid PTE, make sure
197 * to flush TLB before we drop the PTL for the
198 * PTE.
199 *
200 * NOTE! Both old and new PTL matter: the old one
201 * for racing with folio_mkclean(), the new one to
202 * make sure the physical page stays valid until
203 * the TLB entry for the old mapping has been
204 * flushed.
205 */
206 if (pte_present(pte))
207 force_flush = true;
208 pte = move_pte(pte, old_addr, new_addr);
209 pte = move_soft_dirty_pte(pte);
210 set_pte_at(mm, new_addr, new_pte, pte);
211 }
212
213 arch_leave_lazy_mmu_mode();
214 if (force_flush)
215 flush_tlb_range(vma, old_end - len, old_end);
216 if (new_ptl != old_ptl)
217 spin_unlock(new_ptl);
218 pte_unmap(new_pte - 1);
219 pte_unmap_unlock(old_pte - 1, old_ptl);
220 out:
221 if (need_rmap_locks)
222 drop_rmap_locks(vma);
223 return err;
224 }
225
226 #ifndef arch_supports_page_table_move
227 #define arch_supports_page_table_move arch_supports_page_table_move
arch_supports_page_table_move(void)228 static inline bool arch_supports_page_table_move(void)
229 {
230 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
231 IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
232 }
233 #endif
234
235 #ifdef CONFIG_HAVE_MOVE_PMD
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)236 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
237 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
238 {
239 spinlock_t *old_ptl, *new_ptl;
240 struct mm_struct *mm = vma->vm_mm;
241 bool res = false;
242 pmd_t pmd;
243
244 if (!arch_supports_page_table_move())
245 return false;
246 /*
247 * The destination pmd shouldn't be established, free_pgtables()
248 * should have released it.
249 *
250 * However, there's a case during execve() where we use mremap
251 * to move the initial stack, and in that case the target area
252 * may overlap the source area (always moving down).
253 *
254 * If everything is PMD-aligned, that works fine, as moving
255 * each pmd down will clear the source pmd. But if we first
256 * have a few 4kB-only pages that get moved down, and then
257 * hit the "now the rest is PMD-aligned, let's do everything
258 * one pmd at a time", we will still have the old (now empty
259 * of any 4kB pages, but still there) PMD in the page table
260 * tree.
261 *
262 * Warn on it once - because we really should try to figure
263 * out how to do this better - but then say "I won't move
264 * this pmd".
265 *
266 * One alternative might be to just unmap the target pmd at
267 * this point, and verify that it really is empty. We'll see.
268 */
269 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
270 return false;
271
272 /*
273 * We don't have to worry about the ordering of src and dst
274 * ptlocks because exclusive mmap_lock prevents deadlock.
275 */
276 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
277 new_ptl = pmd_lockptr(mm, new_pmd);
278 if (new_ptl != old_ptl)
279 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
280
281 pmd = *old_pmd;
282
283 /* Racing with collapse? */
284 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
285 goto out_unlock;
286 /* Clear the pmd */
287 pmd_clear(old_pmd);
288 res = true;
289
290 VM_BUG_ON(!pmd_none(*new_pmd));
291
292 pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
293 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
294 out_unlock:
295 if (new_ptl != old_ptl)
296 spin_unlock(new_ptl);
297 spin_unlock(old_ptl);
298
299 return res;
300 }
301 #else
move_normal_pmd(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pmd_t * old_pmd,pmd_t * new_pmd)302 static inline bool move_normal_pmd(struct vm_area_struct *vma,
303 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
304 pmd_t *new_pmd)
305 {
306 return false;
307 }
308 #endif
309
310 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)311 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
312 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
313 {
314 spinlock_t *old_ptl, *new_ptl;
315 struct mm_struct *mm = vma->vm_mm;
316 pud_t pud;
317
318 if (!arch_supports_page_table_move())
319 return false;
320 /*
321 * The destination pud shouldn't be established, free_pgtables()
322 * should have released it.
323 */
324 if (WARN_ON_ONCE(!pud_none(*new_pud)))
325 return false;
326
327 /*
328 * We don't have to worry about the ordering of src and dst
329 * ptlocks because exclusive mmap_lock prevents deadlock.
330 */
331 old_ptl = pud_lock(vma->vm_mm, old_pud);
332 new_ptl = pud_lockptr(mm, new_pud);
333 if (new_ptl != old_ptl)
334 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
335
336 /* Clear the pud */
337 pud = *old_pud;
338 pud_clear(old_pud);
339
340 VM_BUG_ON(!pud_none(*new_pud));
341
342 pud_populate(mm, new_pud, pud_pgtable(pud));
343 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
344 if (new_ptl != old_ptl)
345 spin_unlock(new_ptl);
346 spin_unlock(old_ptl);
347
348 return true;
349 }
350 #else
move_normal_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)351 static inline bool move_normal_pud(struct vm_area_struct *vma,
352 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
353 pud_t *new_pud)
354 {
355 return false;
356 }
357 #endif
358
359 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)360 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
361 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
362 {
363 spinlock_t *old_ptl, *new_ptl;
364 struct mm_struct *mm = vma->vm_mm;
365 pud_t pud;
366
367 /*
368 * The destination pud shouldn't be established, free_pgtables()
369 * should have released it.
370 */
371 if (WARN_ON_ONCE(!pud_none(*new_pud)))
372 return false;
373
374 /*
375 * We don't have to worry about the ordering of src and dst
376 * ptlocks because exclusive mmap_lock prevents deadlock.
377 */
378 old_ptl = pud_lock(vma->vm_mm, old_pud);
379 new_ptl = pud_lockptr(mm, new_pud);
380 if (new_ptl != old_ptl)
381 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
382
383 /* Clear the pud */
384 pud = *old_pud;
385 pud_clear(old_pud);
386
387 VM_BUG_ON(!pud_none(*new_pud));
388
389 /* Set the new pud */
390 /* mark soft_ditry when we add pud level soft dirty support */
391 set_pud_at(mm, new_addr, new_pud, pud);
392 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
393 if (new_ptl != old_ptl)
394 spin_unlock(new_ptl);
395 spin_unlock(old_ptl);
396
397 return true;
398 }
399 #else
move_huge_pud(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pud_t * old_pud,pud_t * new_pud)400 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
401 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
402 {
403 WARN_ON_ONCE(1);
404 return false;
405
406 }
407 #endif
408
409 enum pgt_entry {
410 NORMAL_PMD,
411 HPAGE_PMD,
412 NORMAL_PUD,
413 HPAGE_PUD,
414 };
415
416 /*
417 * Returns an extent of the corresponding size for the pgt_entry specified if
418 * valid. Else returns a smaller extent bounded by the end of the source and
419 * destination pgt_entry.
420 */
get_extent(enum pgt_entry entry,unsigned long old_addr,unsigned long old_end,unsigned long new_addr)421 static __always_inline unsigned long get_extent(enum pgt_entry entry,
422 unsigned long old_addr, unsigned long old_end,
423 unsigned long new_addr)
424 {
425 unsigned long next, extent, mask, size;
426
427 switch (entry) {
428 case HPAGE_PMD:
429 case NORMAL_PMD:
430 mask = PMD_MASK;
431 size = PMD_SIZE;
432 break;
433 case HPAGE_PUD:
434 case NORMAL_PUD:
435 mask = PUD_MASK;
436 size = PUD_SIZE;
437 break;
438 default:
439 BUILD_BUG();
440 break;
441 }
442
443 next = (old_addr + size) & mask;
444 /* even if next overflowed, extent below will be ok */
445 extent = next - old_addr;
446 if (extent > old_end - old_addr)
447 extent = old_end - old_addr;
448 next = (new_addr + size) & mask;
449 if (extent > next - new_addr)
450 extent = next - new_addr;
451 return extent;
452 }
453
454 /*
455 * Attempts to speedup the move by moving entry at the level corresponding to
456 * pgt_entry. Returns true if the move was successful, else false.
457 */
move_pgt_entry(enum pgt_entry entry,struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,void * old_entry,void * new_entry,bool need_rmap_locks)458 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
459 unsigned long old_addr, unsigned long new_addr,
460 void *old_entry, void *new_entry, bool need_rmap_locks)
461 {
462 bool moved = false;
463
464 /* See comment in move_ptes() */
465 if (need_rmap_locks)
466 take_rmap_locks(vma);
467
468 switch (entry) {
469 case NORMAL_PMD:
470 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
471 new_entry);
472 break;
473 case NORMAL_PUD:
474 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
475 new_entry);
476 break;
477 case HPAGE_PMD:
478 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
479 move_huge_pmd(vma, old_addr, new_addr, old_entry,
480 new_entry);
481 break;
482 case HPAGE_PUD:
483 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
484 move_huge_pud(vma, old_addr, new_addr, old_entry,
485 new_entry);
486 break;
487
488 default:
489 WARN_ON_ONCE(1);
490 break;
491 }
492
493 if (need_rmap_locks)
494 drop_rmap_locks(vma);
495
496 return moved;
497 }
498
499 /*
500 * A helper to check if aligning down is OK. The aligned address should fall
501 * on *no mapping*. For the stack moving down, that's a special move within
502 * the VMA that is created to span the source and destination of the move,
503 * so we make an exception for it.
504 */
can_align_down(struct vm_area_struct * vma,unsigned long addr_to_align,unsigned long mask,bool for_stack)505 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
506 unsigned long mask, bool for_stack)
507 {
508 unsigned long addr_masked = addr_to_align & mask;
509
510 /*
511 * If @addr_to_align of either source or destination is not the beginning
512 * of the corresponding VMA, we can't align down or we will destroy part
513 * of the current mapping.
514 */
515 if (!for_stack && vma->vm_start != addr_to_align)
516 return false;
517
518 /* In the stack case we explicitly permit in-VMA alignment. */
519 if (for_stack && addr_masked >= vma->vm_start)
520 return true;
521
522 /*
523 * Make sure the realignment doesn't cause the address to fall on an
524 * existing mapping.
525 */
526 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
527 }
528
529 /* Opportunistically realign to specified boundary for faster copy. */
try_realign_addr(unsigned long * old_addr,struct vm_area_struct * old_vma,unsigned long * new_addr,struct vm_area_struct * new_vma,unsigned long mask,bool for_stack)530 static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma,
531 unsigned long *new_addr, struct vm_area_struct *new_vma,
532 unsigned long mask, bool for_stack)
533 {
534 /* Skip if the addresses are already aligned. */
535 if ((*old_addr & ~mask) == 0)
536 return;
537
538 /* Only realign if the new and old addresses are mutually aligned. */
539 if ((*old_addr & ~mask) != (*new_addr & ~mask))
540 return;
541
542 /* Ensure realignment doesn't cause overlap with existing mappings. */
543 if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
544 !can_align_down(new_vma, *new_addr, mask, for_stack))
545 return;
546
547 *old_addr = *old_addr & mask;
548 *new_addr = *new_addr & mask;
549 }
550
move_page_tables(struct vm_area_struct * vma,unsigned long old_addr,struct vm_area_struct * new_vma,unsigned long new_addr,unsigned long len,bool need_rmap_locks,bool for_stack)551 unsigned long move_page_tables(struct vm_area_struct *vma,
552 unsigned long old_addr, struct vm_area_struct *new_vma,
553 unsigned long new_addr, unsigned long len,
554 bool need_rmap_locks, bool for_stack)
555 {
556 unsigned long extent, old_end;
557 struct mmu_notifier_range range;
558 pmd_t *old_pmd, *new_pmd;
559 pud_t *old_pud, *new_pud;
560
561 if (!len)
562 return 0;
563
564 old_end = old_addr + len;
565
566 if (is_vm_hugetlb_page(vma))
567 return move_hugetlb_page_tables(vma, new_vma, old_addr,
568 new_addr, len);
569
570 /*
571 * If possible, realign addresses to PMD boundary for faster copy.
572 * Only realign if the mremap copying hits a PMD boundary.
573 */
574 if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
575 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
576 for_stack);
577
578 flush_cache_range(vma, old_addr, old_end);
579 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
580 old_addr, old_end);
581 mmu_notifier_invalidate_range_start(&range);
582
583 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
584 cond_resched();
585 /*
586 * If extent is PUD-sized try to speed up the move by moving at the
587 * PUD level if possible.
588 */
589 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
590
591 old_pud = get_old_pud(vma->vm_mm, old_addr);
592 if (!old_pud)
593 continue;
594 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
595 if (!new_pud)
596 break;
597 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
598 if (extent == HPAGE_PUD_SIZE) {
599 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
600 old_pud, new_pud, need_rmap_locks);
601 /* We ignore and continue on error? */
602 continue;
603 }
604 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
605
606 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
607 old_pud, new_pud, true))
608 continue;
609 }
610
611 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
612 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
613 if (!old_pmd)
614 continue;
615 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
616 if (!new_pmd)
617 break;
618 again:
619 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
620 pmd_devmap(*old_pmd)) {
621 if (extent == HPAGE_PMD_SIZE &&
622 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
623 old_pmd, new_pmd, need_rmap_locks))
624 continue;
625 split_huge_pmd(vma, old_pmd, old_addr);
626 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
627 extent == PMD_SIZE) {
628 /*
629 * If the extent is PMD-sized, try to speed the move by
630 * moving at the PMD level if possible.
631 */
632 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
633 old_pmd, new_pmd, true))
634 continue;
635 }
636 if (pmd_none(*old_pmd))
637 continue;
638 if (pte_alloc(new_vma->vm_mm, new_pmd))
639 break;
640 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
641 new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
642 goto again;
643 }
644
645 mmu_notifier_invalidate_range_end(&range);
646
647 /*
648 * Prevent negative return values when {old,new}_addr was realigned
649 * but we broke out of the above loop for the first PMD itself.
650 */
651 if (old_addr < old_end - len)
652 return 0;
653
654 return len + old_addr - old_end; /* how much done */
655 }
656
move_vma(struct vm_area_struct * vma,unsigned long old_addr,unsigned long old_len,unsigned long new_len,unsigned long new_addr,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap)657 static unsigned long move_vma(struct vm_area_struct *vma,
658 unsigned long old_addr, unsigned long old_len,
659 unsigned long new_len, unsigned long new_addr,
660 bool *locked, unsigned long flags,
661 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
662 {
663 long to_account = new_len - old_len;
664 struct mm_struct *mm = vma->vm_mm;
665 struct vm_area_struct *new_vma;
666 unsigned long vm_flags = vma->vm_flags;
667 unsigned long new_pgoff;
668 unsigned long moved_len;
669 unsigned long account_start = 0;
670 unsigned long account_end = 0;
671 unsigned long hiwater_vm;
672 int err = 0;
673 bool need_rmap_locks;
674 struct vma_iterator vmi;
675
676 /*
677 * We'd prefer to avoid failure later on in do_munmap:
678 * which may split one vma into three before unmapping.
679 */
680 if (mm->map_count >= sysctl_max_map_count - 3)
681 return -ENOMEM;
682
683 if (unlikely(flags & MREMAP_DONTUNMAP))
684 to_account = new_len;
685
686 if (vma->vm_ops && vma->vm_ops->may_split) {
687 if (vma->vm_start != old_addr)
688 err = vma->vm_ops->may_split(vma, old_addr);
689 if (!err && vma->vm_end != old_addr + old_len)
690 err = vma->vm_ops->may_split(vma, old_addr + old_len);
691 if (err)
692 return err;
693 }
694
695 /*
696 * Advise KSM to break any KSM pages in the area to be moved:
697 * it would be confusing if they were to turn up at the new
698 * location, where they happen to coincide with different KSM
699 * pages recently unmapped. But leave vma->vm_flags as it was,
700 * so KSM can come around to merge on vma and new_vma afterwards.
701 */
702 err = ksm_madvise(vma, old_addr, old_addr + old_len,
703 MADV_UNMERGEABLE, &vm_flags);
704 if (err)
705 return err;
706
707 if (vm_flags & VM_ACCOUNT) {
708 if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
709 return -ENOMEM;
710 }
711
712 vma_start_write(vma);
713 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
714 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
715 &need_rmap_locks);
716 if (!new_vma) {
717 if (vm_flags & VM_ACCOUNT)
718 vm_unacct_memory(to_account >> PAGE_SHIFT);
719 return -ENOMEM;
720 }
721
722 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
723 need_rmap_locks, false);
724 if (moved_len < old_len) {
725 err = -ENOMEM;
726 } else if (vma->vm_ops && vma->vm_ops->mremap) {
727 err = vma->vm_ops->mremap(new_vma);
728 }
729
730 if (unlikely(err)) {
731 /*
732 * On error, move entries back from new area to old,
733 * which will succeed since page tables still there,
734 * and then proceed to unmap new area instead of old.
735 */
736 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
737 true, false);
738 vma = new_vma;
739 old_len = new_len;
740 old_addr = new_addr;
741 new_addr = err;
742 } else {
743 mremap_userfaultfd_prep(new_vma, uf);
744 }
745
746 if (is_vm_hugetlb_page(vma)) {
747 clear_vma_resv_huge_pages(vma);
748 }
749
750 /* Conceal VM_ACCOUNT so old reservation is not undone */
751 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
752 vm_flags_clear(vma, VM_ACCOUNT);
753 if (vma->vm_start < old_addr)
754 account_start = vma->vm_start;
755 if (vma->vm_end > old_addr + old_len)
756 account_end = vma->vm_end;
757 }
758
759 /*
760 * If we failed to move page tables we still do total_vm increment
761 * since do_munmap() will decrement it by old_len == new_len.
762 *
763 * Since total_vm is about to be raised artificially high for a
764 * moment, we need to restore high watermark afterwards: if stats
765 * are taken meanwhile, total_vm and hiwater_vm appear too high.
766 * If this were a serious issue, we'd add a flag to do_munmap().
767 */
768 hiwater_vm = mm->hiwater_vm;
769 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
770
771 /* Tell pfnmap has moved from this vma */
772 if (unlikely(vma->vm_flags & VM_PFNMAP))
773 untrack_pfn_clear(vma);
774
775 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
776 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
777 vm_flags_clear(vma, VM_LOCKED_MASK);
778
779 /*
780 * anon_vma links of the old vma is no longer needed after its page
781 * table has been moved.
782 */
783 if (new_vma != vma && vma->vm_start == old_addr &&
784 vma->vm_end == (old_addr + old_len))
785 unlink_anon_vmas(vma);
786
787 /* Because we won't unmap we don't need to touch locked_vm */
788 return new_addr;
789 }
790
791 vma_iter_init(&vmi, mm, old_addr);
792 if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
793 /* OOM: unable to split vma, just get accounts right */
794 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
795 vm_acct_memory(old_len >> PAGE_SHIFT);
796 account_start = account_end = 0;
797 }
798
799 if (vm_flags & VM_LOCKED) {
800 mm->locked_vm += new_len >> PAGE_SHIFT;
801 *locked = true;
802 }
803
804 mm->hiwater_vm = hiwater_vm;
805
806 /* Restore VM_ACCOUNT if one or two pieces of vma left */
807 if (account_start) {
808 vma = vma_prev(&vmi);
809 vm_flags_set(vma, VM_ACCOUNT);
810 }
811
812 if (account_end) {
813 vma = vma_next(&vmi);
814 vm_flags_set(vma, VM_ACCOUNT);
815 }
816
817 return new_addr;
818 }
819
vma_to_resize(unsigned long addr,unsigned long old_len,unsigned long new_len,unsigned long flags)820 static struct vm_area_struct *vma_to_resize(unsigned long addr,
821 unsigned long old_len, unsigned long new_len, unsigned long flags)
822 {
823 struct mm_struct *mm = current->mm;
824 struct vm_area_struct *vma;
825 unsigned long pgoff;
826
827 vma = vma_lookup(mm, addr);
828 if (!vma)
829 return ERR_PTR(-EFAULT);
830
831 /*
832 * !old_len is a special case where an attempt is made to 'duplicate'
833 * a mapping. This makes no sense for private mappings as it will
834 * instead create a fresh/new mapping unrelated to the original. This
835 * is contrary to the basic idea of mremap which creates new mappings
836 * based on the original. There are no known use cases for this
837 * behavior. As a result, fail such attempts.
838 */
839 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
840 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
841 return ERR_PTR(-EINVAL);
842 }
843
844 if ((flags & MREMAP_DONTUNMAP) &&
845 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
846 return ERR_PTR(-EINVAL);
847
848 /* We can't remap across vm area boundaries */
849 if (old_len > vma->vm_end - addr)
850 return ERR_PTR(-EFAULT);
851
852 if (new_len == old_len)
853 return vma;
854
855 /* Need to be careful about a growing mapping */
856 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
857 pgoff += vma->vm_pgoff;
858 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
859 return ERR_PTR(-EINVAL);
860
861 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
862 return ERR_PTR(-EFAULT);
863
864 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
865 return ERR_PTR(-EAGAIN);
866
867 if (!may_expand_vm(mm, vma->vm_flags,
868 (new_len - old_len) >> PAGE_SHIFT))
869 return ERR_PTR(-ENOMEM);
870
871 return vma;
872 }
873
mremap_to(unsigned long addr,unsigned long old_len,unsigned long new_addr,unsigned long new_len,bool * locked,unsigned long flags,struct vm_userfaultfd_ctx * uf,struct list_head * uf_unmap_early,struct list_head * uf_unmap)874 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
875 unsigned long new_addr, unsigned long new_len, bool *locked,
876 unsigned long flags, struct vm_userfaultfd_ctx *uf,
877 struct list_head *uf_unmap_early,
878 struct list_head *uf_unmap)
879 {
880 struct mm_struct *mm = current->mm;
881 struct vm_area_struct *vma;
882 unsigned long ret = -EINVAL;
883 unsigned long map_flags = 0;
884
885 if (offset_in_page(new_addr))
886 goto out;
887
888 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
889 goto out;
890
891 /* Ensure the old/new locations do not overlap */
892 if (addr + old_len > new_addr && new_addr + new_len > addr)
893 goto out;
894
895 /*
896 * move_vma() need us to stay 4 maps below the threshold, otherwise
897 * it will bail out at the very beginning.
898 * That is a problem if we have already unmaped the regions here
899 * (new_addr, and old_addr), because userspace will not know the
900 * state of the vma's after it gets -ENOMEM.
901 * So, to avoid such scenario we can pre-compute if the whole
902 * operation has high chances to success map-wise.
903 * Worst-scenario case is when both vma's (new_addr and old_addr) get
904 * split in 3 before unmapping it.
905 * That means 2 more maps (1 for each) to the ones we already hold.
906 * Check whether current map count plus 2 still leads us to 4 maps below
907 * the threshold, otherwise return -ENOMEM here to be more safe.
908 */
909 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
910 return -ENOMEM;
911
912 if (flags & MREMAP_FIXED) {
913 /*
914 * In mremap_to().
915 * VMA is moved to dst address, and munmap dst first.
916 * do_munmap will check if dst is sealed.
917 */
918 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
919 if (ret)
920 goto out;
921 }
922
923 if (old_len > new_len) {
924 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
925 if (ret)
926 goto out;
927 old_len = new_len;
928 }
929
930 vma = vma_to_resize(addr, old_len, new_len, flags);
931 if (IS_ERR(vma)) {
932 ret = PTR_ERR(vma);
933 goto out;
934 }
935
936 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
937 if (flags & MREMAP_DONTUNMAP &&
938 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
939 ret = -ENOMEM;
940 goto out;
941 }
942
943 if (flags & MREMAP_FIXED)
944 map_flags |= MAP_FIXED;
945
946 if (vma->vm_flags & VM_MAYSHARE)
947 map_flags |= MAP_SHARED;
948
949 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
950 ((addr - vma->vm_start) >> PAGE_SHIFT),
951 map_flags);
952 if (IS_ERR_VALUE(ret))
953 goto out;
954
955 /* We got a new mapping */
956 if (!(flags & MREMAP_FIXED))
957 new_addr = ret;
958
959 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
960 uf_unmap);
961
962 out:
963 return ret;
964 }
965
vma_expandable(struct vm_area_struct * vma,unsigned long delta)966 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
967 {
968 unsigned long end = vma->vm_end + delta;
969
970 if (end < vma->vm_end) /* overflow */
971 return 0;
972 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
973 return 0;
974 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
975 0, MAP_FIXED) & ~PAGE_MASK)
976 return 0;
977 return 1;
978 }
979
980 /*
981 * Expand (or shrink) an existing mapping, potentially moving it at the
982 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
983 *
984 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
985 * This option implies MREMAP_MAYMOVE.
986 */
SYSCALL_DEFINE5(mremap,unsigned long,addr,unsigned long,old_len,unsigned long,new_len,unsigned long,flags,unsigned long,new_addr)987 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
988 unsigned long, new_len, unsigned long, flags,
989 unsigned long, new_addr)
990 {
991 struct mm_struct *mm = current->mm;
992 struct vm_area_struct *vma;
993 unsigned long ret = -EINVAL;
994 bool locked = false;
995 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
996 LIST_HEAD(uf_unmap_early);
997 LIST_HEAD(uf_unmap);
998
999 /*
1000 * There is a deliberate asymmetry here: we strip the pointer tag
1001 * from the old address but leave the new address alone. This is
1002 * for consistency with mmap(), where we prevent the creation of
1003 * aliasing mappings in userspace by leaving the tag bits of the
1004 * mapping address intact. A non-zero tag will cause the subsequent
1005 * range checks to reject the address as invalid.
1006 *
1007 * See Documentation/arch/arm64/tagged-address-abi.rst for more
1008 * information.
1009 */
1010 addr = untagged_addr(addr);
1011
1012 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1013 return ret;
1014
1015 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
1016 return ret;
1017
1018 /*
1019 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
1020 * in the process.
1021 */
1022 if (flags & MREMAP_DONTUNMAP &&
1023 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
1024 return ret;
1025
1026
1027 if (offset_in_page(addr))
1028 return ret;
1029
1030 old_len = PAGE_ALIGN(old_len);
1031 new_len = PAGE_ALIGN(new_len);
1032
1033 /*
1034 * We allow a zero old-len as a special case
1035 * for DOS-emu "duplicate shm area" thing. But
1036 * a zero new-len is nonsensical.
1037 */
1038 if (!new_len)
1039 return ret;
1040
1041 if (mmap_write_lock_killable(current->mm))
1042 return -EINTR;
1043 vma = vma_lookup(mm, addr);
1044 if (!vma) {
1045 ret = -EFAULT;
1046 goto out;
1047 }
1048
1049 /* Don't allow remapping vmas when they have already been sealed */
1050 if (!can_modify_vma(vma)) {
1051 ret = -EPERM;
1052 goto out;
1053 }
1054
1055 if (is_vm_hugetlb_page(vma)) {
1056 struct hstate *h __maybe_unused = hstate_vma(vma);
1057
1058 old_len = ALIGN(old_len, huge_page_size(h));
1059 new_len = ALIGN(new_len, huge_page_size(h));
1060
1061 /* addrs must be huge page aligned */
1062 if (addr & ~huge_page_mask(h))
1063 goto out;
1064 if (new_addr & ~huge_page_mask(h))
1065 goto out;
1066
1067 /*
1068 * Don't allow remap expansion, because the underlying hugetlb
1069 * reservation is not yet capable to handle split reservation.
1070 */
1071 if (new_len > old_len)
1072 goto out;
1073 }
1074
1075 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
1076 ret = mremap_to(addr, old_len, new_addr, new_len,
1077 &locked, flags, &uf, &uf_unmap_early,
1078 &uf_unmap);
1079 goto out;
1080 }
1081
1082 /*
1083 * Always allow a shrinking remap: that just unmaps
1084 * the unnecessary pages..
1085 * do_vmi_munmap does all the needed commit accounting, and
1086 * unlocks the mmap_lock if so directed.
1087 */
1088 if (old_len >= new_len) {
1089 VMA_ITERATOR(vmi, mm, addr + new_len);
1090
1091 if (old_len == new_len) {
1092 ret = addr;
1093 goto out;
1094 }
1095
1096 ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
1097 &uf_unmap, true);
1098 if (ret)
1099 goto out;
1100
1101 ret = addr;
1102 goto out_unlocked;
1103 }
1104
1105 /*
1106 * Ok, we need to grow..
1107 */
1108 vma = vma_to_resize(addr, old_len, new_len, flags);
1109 if (IS_ERR(vma)) {
1110 ret = PTR_ERR(vma);
1111 goto out;
1112 }
1113
1114 /* old_len exactly to the end of the area..
1115 */
1116 if (old_len == vma->vm_end - addr) {
1117 unsigned long delta = new_len - old_len;
1118
1119 /* can we just expand the current mapping? */
1120 if (vma_expandable(vma, delta)) {
1121 long pages = delta >> PAGE_SHIFT;
1122 VMA_ITERATOR(vmi, mm, vma->vm_end);
1123 long charged = 0;
1124
1125 if (vma->vm_flags & VM_ACCOUNT) {
1126 if (security_vm_enough_memory_mm(mm, pages)) {
1127 ret = -ENOMEM;
1128 goto out;
1129 }
1130 charged = pages;
1131 }
1132
1133 /*
1134 * Function vma_merge_extend() is called on the
1135 * extension we are adding to the already existing vma,
1136 * vma_merge_extend() will merge this extension with the
1137 * already existing vma (expand operation itself) and
1138 * possibly also with the next vma if it becomes
1139 * adjacent to the expanded vma and otherwise
1140 * compatible.
1141 */
1142 vma = vma_merge_extend(&vmi, vma, delta);
1143 if (!vma) {
1144 vm_unacct_memory(charged);
1145 ret = -ENOMEM;
1146 goto out;
1147 }
1148
1149 vm_stat_account(mm, vma->vm_flags, pages);
1150 if (vma->vm_flags & VM_LOCKED) {
1151 mm->locked_vm += pages;
1152 locked = true;
1153 new_addr = addr;
1154 }
1155 ret = addr;
1156 goto out;
1157 }
1158 }
1159
1160 /*
1161 * We weren't able to just expand or shrink the area,
1162 * we need to create a new one and move it..
1163 */
1164 ret = -ENOMEM;
1165 if (flags & MREMAP_MAYMOVE) {
1166 unsigned long map_flags = 0;
1167 if (vma->vm_flags & VM_MAYSHARE)
1168 map_flags |= MAP_SHARED;
1169
1170 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1171 vma->vm_pgoff +
1172 ((addr - vma->vm_start) >> PAGE_SHIFT),
1173 map_flags);
1174 if (IS_ERR_VALUE(new_addr)) {
1175 ret = new_addr;
1176 goto out;
1177 }
1178
1179 ret = move_vma(vma, addr, old_len, new_len, new_addr,
1180 &locked, flags, &uf, &uf_unmap);
1181 }
1182 out:
1183 if (offset_in_page(ret))
1184 locked = false;
1185 mmap_write_unlock(current->mm);
1186 if (locked && new_len > old_len)
1187 mm_populate(new_addr + old_len, new_len - old_len);
1188 out_unlocked:
1189 userfaultfd_unmap_complete(mm, &uf_unmap_early);
1190 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1191 userfaultfd_unmap_complete(mm, &uf_unmap);
1192 return ret;
1193 }
1194