Lines Matching refs:new_vma
138 struct vm_area_struct *new_vma, pmd_t *new_pmd, in move_ptes() argument
531 unsigned long *new_addr, struct vm_area_struct *new_vma, in try_realign_addr() argument
544 !can_align_down(new_vma, *new_addr, mask, for_stack)) in try_realign_addr()
552 unsigned long old_addr, struct vm_area_struct *new_vma, in move_page_tables() argument
567 return move_hugetlb_page_tables(vma, new_vma, old_addr, in move_page_tables()
575 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, in move_page_tables()
638 if (pte_alloc(new_vma->vm_mm, new_pmd)) in move_page_tables()
641 new_vma, new_pmd, new_addr, need_rmap_locks) < 0) in move_page_tables()
665 struct vm_area_struct *new_vma; in move_vma() local
714 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
716 if (!new_vma) { in move_vma()
722 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
727 err = vma->vm_ops->mremap(new_vma); in move_vma()
736 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
738 vma = new_vma; in move_vma()
743 mremap_userfaultfd_prep(new_vma, uf); in move_vma()
783 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()