Lines Matching refs:vma

72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,  in alloc_new_pud()  argument
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
92 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
105 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
107 if (vma->vm_file) in take_rmap_locks()
108 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
109 if (vma->anon_vma) in take_rmap_locks()
110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
113 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
115 if (vma->anon_vma) in drop_rmap_locks()
116 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
117 if (vma->vm_file) in drop_rmap_locks()
118 i_mmap_unlock_write(vma->vm_file->f_mapping); in drop_rmap_locks()
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
141 struct mm_struct *mm = vma->vm_mm; in move_ptes()
167 take_rmap_locks(vma); in move_ptes()
186 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
215 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
222 drop_rmap_locks(vma); in move_ptes()
236 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pmd() argument
240 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
276 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd()
293 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_normal_pmd()
302 static inline bool move_normal_pmd(struct vm_area_struct *vma, in move_normal_pmd() argument
311 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_normal_pud() argument
315 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
331 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud()
343 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); in move_normal_pud()
351 static inline bool move_normal_pud(struct vm_area_struct *vma, in move_normal_pud() argument
360 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
364 struct mm_struct *mm = vma->vm_mm; in move_huge_pud()
378 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud()
392 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); in move_huge_pud()
400 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pud() argument
458 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, in move_pgt_entry() argument
466 take_rmap_locks(vma); in move_pgt_entry()
470 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
474 moved = move_normal_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
479 move_huge_pmd(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
484 move_huge_pud(vma, old_addr, new_addr, old_entry, in move_pgt_entry()
494 drop_rmap_locks(vma); in move_pgt_entry()
505 static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, in can_align_down() argument
515 if (!for_stack && vma->vm_start != addr_to_align) in can_align_down()
519 if (for_stack && addr_masked >= vma->vm_start) in can_align_down()
526 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down()
551 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
566 if (is_vm_hugetlb_page(vma)) in move_page_tables()
567 return move_hugetlb_page_tables(vma, new_vma, old_addr, in move_page_tables()
575 try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, in move_page_tables()
578 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
579 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in move_page_tables()
591 old_pud = get_old_pud(vma->vm_mm, old_addr); in move_page_tables()
594 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
599 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, in move_page_tables()
606 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, in move_page_tables()
612 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
615 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
622 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, in move_page_tables()
625 split_huge_pmd(vma, old_pmd, old_addr); in move_page_tables()
632 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, in move_page_tables()
640 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, in move_page_tables()
657 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
664 struct mm_struct *mm = vma->vm_mm; in move_vma()
666 unsigned long vm_flags = vma->vm_flags; in move_vma()
686 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma()
687 if (vma->vm_start != old_addr) in move_vma()
688 err = vma->vm_ops->may_split(vma, old_addr); in move_vma()
689 if (!err && vma->vm_end != old_addr + old_len) in move_vma()
690 err = vma->vm_ops->may_split(vma, old_addr + old_len); in move_vma()
702 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
712 vma_start_write(vma); in move_vma()
713 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
714 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
722 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
726 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
727 err = vma->vm_ops->mremap(new_vma); in move_vma()
736 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
738 vma = new_vma; in move_vma()
746 if (is_vm_hugetlb_page(vma)) { in move_vma()
747 clear_vma_resv_huge_pages(vma); in move_vma()
752 vm_flags_clear(vma, VM_ACCOUNT); in move_vma()
753 if (vma->vm_start < old_addr) in move_vma()
754 account_start = vma->vm_start; in move_vma()
755 if (vma->vm_end > old_addr + old_len) in move_vma()
756 account_end = vma->vm_end; in move_vma()
769 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); in move_vma()
772 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma()
773 untrack_pfn_clear(vma); in move_vma()
777 vm_flags_clear(vma, VM_LOCKED_MASK); in move_vma()
783 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
784 vma->vm_end == (old_addr + old_len)) in move_vma()
785 unlink_anon_vmas(vma); in move_vma()
808 vma = vma_prev(&vmi); in move_vma()
809 vm_flags_set(vma, VM_ACCOUNT); in move_vma()
813 vma = vma_next(&vmi); in move_vma()
814 vm_flags_set(vma, VM_ACCOUNT); in move_vma()
824 struct vm_area_struct *vma; in vma_to_resize() local
827 vma = vma_lookup(mm, addr); in vma_to_resize()
828 if (!vma) in vma_to_resize()
839 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in vma_to_resize()
845 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) in vma_to_resize()
849 if (old_len > vma->vm_end - addr) in vma_to_resize()
853 return vma; in vma_to_resize()
856 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
857 pgoff += vma->vm_pgoff; in vma_to_resize()
861 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
864 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) in vma_to_resize()
867 if (!may_expand_vm(mm, vma->vm_flags, in vma_to_resize()
871 return vma; in vma_to_resize()
881 struct vm_area_struct *vma; in mremap_to() local
930 vma = vma_to_resize(addr, old_len, new_len, flags); in mremap_to()
931 if (IS_ERR(vma)) { in mremap_to()
932 ret = PTR_ERR(vma); in mremap_to()
938 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { in mremap_to()
946 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
949 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
950 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
959 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to()
966 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
968 unsigned long end = vma->vm_end + delta; in vma_expandable()
970 if (end < vma->vm_end) /* overflow */ in vma_expandable()
972 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) in vma_expandable()
974 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
992 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
1043 vma = vma_lookup(mm, addr); in SYSCALL_DEFINE5()
1044 if (!vma) { in SYSCALL_DEFINE5()
1050 if (!can_modify_vma(vma)) { in SYSCALL_DEFINE5()
1055 if (is_vm_hugetlb_page(vma)) { in SYSCALL_DEFINE5()
1056 struct hstate *h __maybe_unused = hstate_vma(vma); in SYSCALL_DEFINE5()
1108 vma = vma_to_resize(addr, old_len, new_len, flags); in SYSCALL_DEFINE5()
1109 if (IS_ERR(vma)) { in SYSCALL_DEFINE5()
1110 ret = PTR_ERR(vma); in SYSCALL_DEFINE5()
1116 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
1120 if (vma_expandable(vma, delta)) { in SYSCALL_DEFINE5()
1122 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5()
1125 if (vma->vm_flags & VM_ACCOUNT) { in SYSCALL_DEFINE5()
1142 vma = vma_merge_extend(&vmi, vma, delta); in SYSCALL_DEFINE5()
1143 if (!vma) { in SYSCALL_DEFINE5()
1149 vm_stat_account(mm, vma->vm_flags, pages); in SYSCALL_DEFINE5()
1150 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
1167 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
1170 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
1171 vma->vm_pgoff + in SYSCALL_DEFINE5()
1172 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
1179 ret = move_vma(vma, addr, old_len, new_len, new_addr, in SYSCALL_DEFINE5()