Lines Matching refs:vma

117 	if (!userfaultfd_wp(vmf->vma))  in vmf_orig_pte_uffd_wp()
365 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument
371 unsigned long addr = vma->vm_start; in free_pgtables()
387 vma_start_write(vma); in free_pgtables()
388 unlink_anon_vmas(vma); in free_pgtables()
390 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
391 unlink_file_vma(vma); in free_pgtables()
392 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
396 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
401 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
403 vma = next; in free_pgtables()
408 vma_start_write(vma); in free_pgtables()
409 unlink_anon_vmas(vma); in free_pgtables()
410 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
413 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
416 vma = next; in free_pgtables()
417 } while (vma); in free_pgtables()
497 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
500 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
529 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
530 index = linear_page_index(vma, addr); in print_bad_pte()
538 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
540 vma->vm_file, in print_bad_pte()
541 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
542 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
593 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
601 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
602 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
603 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
618 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
624 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
625 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
633 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
634 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
636 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
646 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
659 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, in vm_normal_folio() argument
662 struct page *page = vm_normal_page(vma, addr, pte); in vm_normal_folio()
670 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
679 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
680 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
686 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
687 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
689 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
709 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, in vm_normal_folio_pmd() argument
712 struct page *page = vm_normal_page_pmd(vma, addr, pmd); in vm_normal_folio_pmd()
720 static void restore_exclusive_pte(struct vm_area_struct *vma, in restore_exclusive_pte() argument
730 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
738 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in restore_exclusive_pte()
748 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); in restore_exclusive_pte()
756 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
762 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte()
770 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, in try_restore_exclusive_pte() argument
777 restore_exclusive_pte(vma, page, addr, src_pte); in try_restore_exclusive_pte()
1057 struct vm_area_struct *vma, unsigned long addr, bool need_zero) in folio_prealloc() argument
1062 new_folio = vma_alloc_zeroed_movable_folio(vma, addr); in folio_prealloc()
1064 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, in folio_prealloc()
1465 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, in zap_install_uffd_wp_if_needed() argument
1470 if (vma_is_anonymous(vma)) in zap_install_uffd_wp_if_needed()
1478 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); in zap_install_uffd_wp_if_needed()
1487 struct vm_area_struct *vma, struct folio *folio, in zap_present_folio_ptes() argument
1504 if (pte_young(ptent) && likely(vma_has_recency(vma))) in zap_present_folio_ptes()
1513 arch_check_zapped_pte(vma, ptent); in zap_present_folio_ptes()
1515 if (unlikely(userfaultfd_pte_wp(vma, ptent))) in zap_present_folio_ptes()
1516 zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, in zap_present_folio_ptes()
1520 folio_remove_rmap_ptes(folio, page, nr, vma); in zap_present_folio_ptes()
1523 print_bad_pte(vma, addr, ptent, page); in zap_present_folio_ptes()
1538 struct vm_area_struct *vma, pte_t *pte, pte_t ptent, in zap_present_ptes() argument
1549 page = vm_normal_page(vma, addr, ptent); in zap_present_ptes()
1553 arch_check_zapped_pte(vma, ptent); in zap_present_ptes()
1555 if (userfaultfd_pte_wp(vma, ptent)) in zap_present_ptes()
1556 zap_install_uffd_wp_if_needed(vma, addr, pte, 1, in zap_present_ptes()
1574 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, in zap_present_ptes()
1579 zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, in zap_present_ptes()
1585 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1621 nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, in zap_pte_range()
1644 WARN_ON_ONCE(!vma_is_anonymous(vma)); in zap_pte_range()
1647 folio_remove_rmap_pte(folio, page, vma); in zap_pte_range()
1667 if (!vma_is_anonymous(vma) && in zap_pte_range()
1680 zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); in zap_pte_range()
1689 tlb_flush_rmaps(tlb, vma); in zap_pte_range()
1706 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1718 __split_huge_pmd(vma, pmd, addr, false, NULL); in zap_pmd_range()
1719 else if (zap_huge_pmd(tlb, vma, pmd, addr)) { in zap_pmd_range()
1739 addr = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1748 struct vm_area_struct *vma, p4d_t *p4d, in zap_pud_range() argument
1761 split_huge_pud(vma, pud, addr); in zap_pud_range()
1762 } else if (zap_huge_pud(tlb, vma, pud, addr)) in zap_pud_range()
1768 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1777 struct vm_area_struct *vma, pgd_t *pgd, in zap_p4d_range() argument
1789 next = zap_pud_range(tlb, vma, p4d, addr, next, details); in zap_p4d_range()
1796 struct vm_area_struct *vma, in unmap_page_range() argument
1804 tlb_start_vma(tlb, vma); in unmap_page_range()
1805 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1810 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1812 tlb_end_vma(tlb, vma); in unmap_page_range()
1817 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1821 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1824 if (start >= vma->vm_end) in unmap_single_vma()
1826 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1827 if (end <= vma->vm_start) in unmap_single_vma()
1830 if (vma->vm_file) in unmap_single_vma()
1831 uprobe_munmap(vma, start, end); in unmap_single_vma()
1833 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1834 untrack_pfn(vma, 0, 0, mm_wr_locked); in unmap_single_vma()
1837 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1849 if (vma->vm_file) { in unmap_single_vma()
1852 __unmap_hugepage_range(tlb, vma, start, end, in unmap_single_vma()
1856 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1882 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1893 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1899 hugetlb_zap_begin(vma, &start, &end); in unmap_vmas()
1900 unmap_single_vma(tlb, vma, start, end, &details, in unmap_vmas()
1902 hugetlb_zap_end(vma, &details); in unmap_vmas()
1903 vma = mas_find(mas, tree_end - 1); in unmap_vmas()
1904 } while (vma && likely(!xa_is_zero(vma))); in unmap_vmas()
1917 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1925 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
1927 hugetlb_zap_begin(vma, &range.start, &range.end); in zap_page_range_single()
1928 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1929 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1935 unmap_single_vma(&tlb, vma, address, end, details, false); in zap_page_range_single()
1938 hugetlb_zap_end(vma, details); in zap_page_range_single()
1952 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1955 if (!range_in_vma(vma, address, address + size) || in zap_vma_ptes()
1956 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1959 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1995 static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) in vm_mixed_zeropage_allowed() argument
1997 VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); in vm_mixed_zeropage_allowed()
2004 if (mm_forbids_zeropage(vma->vm_mm)) in vm_mixed_zeropage_allowed()
2007 if (is_cow_mapping(vma->vm_flags)) in vm_mixed_zeropage_allowed()
2010 if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) in vm_mixed_zeropage_allowed()
2021 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && in vm_mixed_zeropage_allowed()
2022 (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); in vm_mixed_zeropage_allowed()
2025 static int validate_page_before_insert(struct vm_area_struct *vma, in validate_page_before_insert() argument
2033 if (!vm_mixed_zeropage_allowed(vma)) in validate_page_before_insert()
2044 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_into_pte_locked() argument
2058 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); in insert_page_into_pte_locked()
2059 folio_add_file_rmap_pte(folio, page, vma); in insert_page_into_pte_locked()
2061 set_pte_at(vma->vm_mm, addr, pte, pteval); in insert_page_into_pte_locked()
2065 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
2072 retval = validate_page_before_insert(vma, page); in insert_page()
2076 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
2079 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page()
2085 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, in insert_page_in_batch_locked() argument
2090 err = validate_page_before_insert(vma, page); in insert_page_in_batch_locked()
2093 return insert_page_into_pte_locked(vma, pte, addr, page, prot); in insert_page_in_batch_locked()
2099 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, in insert_pages() argument
2105 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
2134 int err = insert_page_in_batch_locked(vma, pte, in insert_pages()
2172 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
2177 if (addr < vma->vm_start || end_addr >= vma->vm_end) in vm_insert_pages()
2179 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_pages()
2180 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2181 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_pages()
2182 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_pages()
2185 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); in vm_insert_pages()
2219 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
2222 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
2224 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
2225 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2226 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
2227 vm_flags_set(vma, VM_MIXEDMAP); in vm_insert_page()
2229 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
2246 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, in __vm_map_pages() argument
2249 unsigned long count = vma_pages(vma); in __vm_map_pages()
2250 unsigned long uaddr = vma->vm_start; in __vm_map_pages()
2262 ret = vm_insert_page(vma, uaddr, pages[offset + i]); in __vm_map_pages()
2289 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
2292 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); in vm_map_pages()
2309 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
2312 return __vm_map_pages(vma, pages, num, 0); in vm_map_pages_zero()
2316 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
2319 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2344 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2345 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) in insert_pfn()
2346 update_mmu_cache(vma, addr, pte); in insert_pfn()
2359 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in insert_pfn()
2363 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
2403 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
2412 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_prot()
2413 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_prot()
2415 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot()
2416 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_prot()
2418 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_prot()
2424 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); in vmf_insert_pfn_prot()
2426 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, in vmf_insert_pfn_prot()
2451 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn() argument
2454 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); in vmf_insert_pfn()
2458 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite) in vm_mixed_ok() argument
2461 (mkwrite || !vm_mixed_zeropage_allowed(vma))) in vm_mixed_ok()
2464 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok()
2475 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, in __vm_insert_mixed() argument
2478 pgprot_t pgprot = vma->vm_page_prot; in __vm_insert_mixed()
2481 if (!vm_mixed_ok(vma, pfn, mkwrite)) in __vm_insert_mixed()
2484 if (addr < vma->vm_start || addr >= vma->vm_end) in __vm_insert_mixed()
2487 track_pfn_insert(vma, &pgprot, pfn); in __vm_insert_mixed()
2509 err = insert_page(vma, addr, page, pgprot); in __vm_insert_mixed()
2511 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); in __vm_insert_mixed()
2522 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_mixed() argument
2525 return __vm_insert_mixed(vma, addr, pfn, false); in vmf_insert_mixed()
2534 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, in vmf_insert_mixed_mkwrite() argument
2537 return __vm_insert_mixed(vma, addr, pfn, true); in vmf_insert_mixed_mkwrite()
2638 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_internal() argument
2644 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_internal()
2668 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range_internal()
2669 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range_internal()
2671 vma->vm_pgoff = pfn; in remap_pfn_range_internal()
2674 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); in remap_pfn_range_internal()
2679 flush_cache_range(vma, addr, end); in remap_pfn_range_internal()
2695 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range_notrack() argument
2698 int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); in remap_pfn_range_notrack()
2708 zap_page_range_single(vma, addr, size, NULL); in remap_pfn_range_notrack()
2724 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
2729 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
2733 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); in remap_pfn_range()
2735 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); in remap_pfn_range()
2755 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
2774 if (vma->vm_pgoff > pages) in vm_iomap_memory()
2776 pfn += vma->vm_pgoff; in vm_iomap_memory()
2777 pages -= vma->vm_pgoff; in vm_iomap_memory()
2780 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2785 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
3039 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
3040 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3044 if (copy_mc_user_highpage(dst, src, addr, vma)) in __wp_page_copy_user()
3074 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3080 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) in __wp_page_copy_user()
3081 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); in __wp_page_copy_user()
3099 update_mmu_tlb(vma, addr, vmf->pte); in __wp_page_copy_user()
3131 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) in __get_fault_gfp_mask() argument
3133 struct file *vm_file = vma->vm_file; in __get_fault_gfp_mask()
3158 if (vmf->vma->vm_file && in do_page_mkwrite()
3159 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
3162 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3186 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3190 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3204 file_update_time(vma->vm_file); in fault_dirty_shared_page()
3240 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3257 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse()
3259 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
3260 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) in wp_page_reuse()
3261 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_reuse()
3273 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault() local
3275 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3277 vma_end_read(vma); in vmf_can_call_fault()
3298 struct vm_area_struct *vma = vmf->vma; in __vmf_anon_prepare() local
3301 if (likely(vma->anon_vma)) in __vmf_anon_prepare()
3304 if (!mmap_read_trylock(vma->vm_mm)) in __vmf_anon_prepare()
3307 if (__anon_vma_prepare(vma)) in __vmf_anon_prepare()
3310 mmap_read_unlock(vma->vm_mm); in __vmf_anon_prepare()
3334 struct vm_area_struct *vma = vmf->vma; in wp_page_copy() local
3335 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3353 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3400 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_copy()
3401 entry = mk_pte(&new_folio->page, vma->vm_page_prot); in wp_page_copy()
3409 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
3419 ptep_clear_flush(vma, vmf->address, vmf->pte); in wp_page_copy()
3420 folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); in wp_page_copy()
3421 folio_add_lru_vma(new_folio, vma); in wp_page_copy()
3424 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); in wp_page_copy()
3448 folio_remove_rmap_pte(old_folio, vmf->page, vma); in wp_page_copy()
3456 update_mmu_tlb(vma, vmf->address, vmf->pte); in wp_page_copy()
3501 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); in finish_mkwrite_fault()
3502 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3511 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in finish_mkwrite_fault()
3525 struct vm_area_struct *vma = vmf->vma; in wp_pfn_shared() local
3527 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3536 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3548 struct vm_area_struct *vma = vmf->vma; in wp_page_shared() local
3553 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
3586 struct vm_area_struct *vma) in wp_can_reuse_anon_folio() argument
3628 folio_move_anon_rmap(folio, vma); in wp_can_reuse_anon_folio()
3659 struct vm_area_struct *vma = vmf->vma; in do_wp_page() local
3664 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { in do_wp_page()
3665 if (!userfaultfd_wp_async(vma)) { in do_wp_page()
3677 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3689 if (unlikely(userfaultfd_wp(vmf->vma) && in do_wp_page()
3690 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3691 flush_tlb_page(vmf->vma, vmf->address); in do_wp_page()
3694 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); in do_wp_page()
3703 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in do_wp_page()
3724 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { in do_wp_page()
3748 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
3752 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
3760 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
3763 vma_interval_tree_foreach(vma, root, first_index, last_index) { in unmap_mapping_range_tree()
3764 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
3765 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
3769 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
3770 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3771 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
3882 struct vm_area_struct *vma = vmf->vma; in remove_device_exclusive_entry() local
3903 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3907 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3910 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); in remove_device_exclusive_entry()
3922 struct vm_area_struct *vma, in should_try_to_free_swap() argument
3927 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || in should_try_to_free_swap()
3942 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3955 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3962 if (vma_is_anonymous(vmf->vma)) in do_pte_missing()
3978 if (unlikely(!userfaultfd_wp(vmf->vma))) in pte_marker_handle_uffd_wp()
4009 struct vm_area_struct *vma = vmf->vma; in __alloc_swap_folio() local
4013 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, in __alloc_swap_folio()
4019 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in __alloc_swap_folio()
4108 struct vm_area_struct *vma = vmf->vma; in alloc_swap_folio() local
4122 if (unlikely(userfaultfd_armed(vma))) in alloc_swap_folio()
4138 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_swap_folio()
4140 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_swap_folio()
4147 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4167 gfp = vma_thp_gfp_mask(vma); in alloc_swap_folio()
4170 folio = vma_alloc_folio(gfp, order, vma, addr, true); in alloc_swap_folio()
4172 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in alloc_swap_folio()
4202 struct vm_area_struct *vma = vmf->vma; in do_swap_page() local
4225 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4236 vma_end_read(vma); in do_swap_page()
4242 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4262 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); in do_swap_page()
4273 folio = swap_cache_get_folio(entry, vma, vmf->address); in do_swap_page()
4334 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4345 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
4377 folio = ksm_might_need_to_copy(folio, vma, vmf->address); in do_swap_page()
4406 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4446 if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) in do_swap_page()
4448 if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) in do_swap_page()
4526 if (should_try_to_free_swap(folio, vma, vmf->flags)) in do_swap_page()
4529 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_swap_page()
4530 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); in do_swap_page()
4531 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
4545 if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && in do_swap_page()
4546 !pte_needs_soft_dirty_wp(vma, pte)) { in do_swap_page()
4547 pte = pte_mkwrite(pte, vma); in do_swap_page()
4556 flush_icache_pages(vma, page, nr_pages); in do_swap_page()
4561 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); in do_swap_page()
4562 folio_add_lru_vma(folio, vma); in do_swap_page()
4572 folio_add_new_anon_rmap(folio, vma, address, rmap_flags); in do_swap_page()
4574 folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, in do_swap_page()
4580 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); in do_swap_page()
4581 arch_do_swap_page_nr(vma->vm_mm, vma, address, in do_swap_page()
4606 update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); in do_swap_page()
4655 struct vm_area_struct *vma = vmf->vma; in alloc_anon_folio() local
4668 if (unlikely(userfaultfd_armed(vma))) in alloc_anon_folio()
4676 orders = thp_vma_allowable_orders(vma, vma->vm_flags, in alloc_anon_folio()
4678 orders = thp_vma_suitable_orders(vma, vmf->address, orders); in alloc_anon_folio()
4706 gfp = vma_thp_gfp_mask(vma); in alloc_anon_folio()
4709 folio = vma_alloc_folio(gfp, order, vma, addr, true); in alloc_anon_folio()
4711 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in alloc_anon_folio()
4727 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
4737 struct vm_area_struct *vma = vmf->vma; in do_anonymous_page() local
4745 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
4752 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4757 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4759 vma->vm_page_prot)); in do_anonymous_page()
4760 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4765 update_mmu_tlb(vma, vmf->address, vmf->pte); in do_anonymous_page()
4768 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4772 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4800 entry = mk_pte(&folio->page, vma->vm_page_prot); in do_anonymous_page()
4802 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
4803 entry = pte_mkwrite(pte_mkdirty(entry), vma); in do_anonymous_page()
4805 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
4809 update_mmu_tlb(vma, addr, vmf->pte); in do_anonymous_page()
4812 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
4816 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4821 if (userfaultfd_missing(vma)) { in do_anonymous_page()
4828 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_anonymous_page()
4830 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in do_anonymous_page()
4831 folio_add_lru_vma(folio, vma); in do_anonymous_page()
4835 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
4838 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); in do_anonymous_page()
4857 struct vm_area_struct *vma = vmf->vma; in __do_fault() local
4877 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4882 ret = vma->vm_ops->fault(vmf); in __do_fault()
4914 struct vm_area_struct *vma = vmf->vma; in deposit_prealloc_pte() local
4916 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4921 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4928 struct vm_area_struct *vma = vmf->vma; in do_set_pmd() local
4940 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) in do_set_pmd()
4943 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) in do_set_pmd()
4964 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4969 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4973 flush_icache_pages(vma, page, HPAGE_PMD_NR); in do_set_pmd()
4975 entry = mk_huge_pmd(page, vma->vm_page_prot); in do_set_pmd()
4977 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_set_pmd()
4979 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
4980 folio_add_file_rmap_pmd(folio, page, vma); in do_set_pmd()
4988 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4990 update_mmu_cache_pmd(vma, haddr, vmf->pmd); in do_set_pmd()
5017 struct vm_area_struct *vma = vmf->vma; in set_pte_range() local
5022 flush_icache_pages(vma, page, nr); in set_pte_range()
5023 entry = mk_pte(page, vma->vm_page_prot); in set_pte_range()
5031 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in set_pte_range()
5035 if (write && !(vma->vm_flags & VM_SHARED)) { in set_pte_range()
5037 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); in set_pte_range()
5038 folio_add_lru_vma(folio, vma); in set_pte_range()
5040 folio_add_file_rmap_ptes(folio, page, nr, vma); in set_pte_range()
5042 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5045 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); in set_pte_range()
5073 struct vm_area_struct *vma = vmf->vma; in finish_fault() local
5078 !(vma->vm_flags & VM_SHARED); in finish_fault()
5092 if (!(vma->vm_flags & VM_SHARED)) { in finish_fault()
5093 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
5106 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5107 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5119 if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { in finish_fault()
5124 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in finish_fault()
5133 vma_off + (nr_pages - idx) > vma_pages(vma) || in finish_fault()
5144 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5151 update_mmu_tlb(vma, addr, vmf->pte); in finish_fault()
5155 update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); in finish_fault()
5163 add_mm_counter(vma->vm_mm, type, nr_pages); in finish_fault()
5236 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; in do_fault_around()
5246 pte_off + vma_pages(vmf->vma) - vma_off) - 1; in do_fault_around()
5249 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5255 ret = vmf->vma->vm_ops->map_pages(vmf, in do_fault_around()
5267 if (!vmf->vma->vm_ops->map_pages) in should_fault_around()
5270 if (uffd_disable_fault_around(vmf->vma)) in should_fault_around()
5311 struct vm_area_struct *vma = vmf->vma; in do_cow_fault() local
5321 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5333 if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { in do_cow_fault()
5353 struct vm_area_struct *vma = vmf->vma; in do_shared_fault() local
5371 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
5403 struct vm_area_struct *vma = vmf->vma; in do_fault() local
5404 struct mm_struct *vm_mm = vma->vm_mm; in do_fault()
5410 if (!vma->vm_ops->fault) { in do_fault()
5411 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5432 else if (!(vma->vm_flags & VM_SHARED)) in do_fault()
5449 struct vm_area_struct *vma = vmf->vma; in numa_migrate_check() local
5466 if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) in numa_migrate_check()
5478 vma_set_access_pid_bit(vma); in numa_migrate_check()
5492 static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_single_mapping() argument
5498 old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); in numa_rebuild_single_mapping()
5499 pte = pte_modify(old_pte, vma->vm_page_prot); in numa_rebuild_single_mapping()
5502 pte = pte_mkwrite(pte, vma); in numa_rebuild_single_mapping()
5503 ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); in numa_rebuild_single_mapping()
5504 update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); in numa_rebuild_single_mapping()
5507 static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, in numa_rebuild_large_mapping() argument
5518 start = max3(addr_start, pt_start, vma->vm_start); in numa_rebuild_large_mapping()
5520 vma->vm_end); in numa_rebuild_large_mapping()
5535 ptent = pte_modify(ptent, vma->vm_page_prot); in numa_rebuild_large_mapping()
5538 can_change_pte_writable(vma, addr, ptent)) in numa_rebuild_large_mapping()
5542 numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); in numa_rebuild_large_mapping()
5548 struct vm_area_struct *vma = vmf->vma; in do_numa_page() local
5552 bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); in do_numa_page()
5571 pte = pte_modify(old_pte, vma->vm_page_prot); in do_numa_page()
5579 can_change_pte_writable(vma, vmf->address, pte)) in do_numa_page()
5582 folio = vm_normal_folio(vma, vmf->address, pte); in do_numa_page()
5593 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { in do_numa_page()
5603 if (!migrate_misplaced_folio(folio, vma, target_nid)) { in do_numa_page()
5611 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5625 numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, in do_numa_page()
5628 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, in do_numa_page()
5639 struct vm_area_struct *vma = vmf->vma; in create_huge_pmd() local
5640 if (vma_is_anonymous(vma)) in create_huge_pmd()
5642 if (vma->vm_ops->huge_fault) in create_huge_pmd()
5643 return vma->vm_ops->huge_fault(vmf, PMD_ORDER); in create_huge_pmd()
5650 struct vm_area_struct *vma = vmf->vma; in wp_huge_pmd() local
5654 if (vma_is_anonymous(vma)) { in wp_huge_pmd()
5656 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { in wp_huge_pmd()
5657 if (userfaultfd_wp_async(vmf->vma)) in wp_huge_pmd()
5664 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pmd()
5665 if (vma->vm_ops->huge_fault) { in wp_huge_pmd()
5666 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); in wp_huge_pmd()
5674 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in wp_huge_pmd()
5683 struct vm_area_struct *vma = vmf->vma; in create_huge_pud() local
5685 if (vma_is_anonymous(vma)) in create_huge_pud()
5687 if (vma->vm_ops->huge_fault) in create_huge_pud()
5688 return vma->vm_ops->huge_fault(vmf, PUD_ORDER); in create_huge_pud()
5697 struct vm_area_struct *vma = vmf->vma; in wp_huge_pud() local
5701 if (vma_is_anonymous(vma)) in wp_huge_pud()
5703 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { in wp_huge_pud()
5704 if (vma->vm_ops->huge_fault) { in wp_huge_pud()
5705 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); in wp_huge_pud()
5712 __split_huge_pud(vma, vmf->pud, vmf->address); in wp_huge_pud()
5752 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5771 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) in handle_pte_fault()
5777 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); in handle_pte_fault()
5787 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, in handle_pte_fault()
5789 update_mmu_cache_range(vmf, vmf->vma, vmf->address, in handle_pte_fault()
5802 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, in handle_pte_fault()
5816 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, in __handle_mm_fault() argument
5820 .vma = vma, in __handle_mm_fault()
5824 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
5825 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault()
5827 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5828 unsigned long vm_flags = vma->vm_flags; in __handle_mm_fault()
5843 thp_vma_allowable_order(vma, vm_flags, in __handle_mm_fault()
5878 thp_vma_allowable_order(vma, vm_flags, in __handle_mm_fault()
5894 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) in __handle_mm_fault()
5981 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
5984 current->in_lru_fault = vma_has_recency(vma); in lru_gen_enter_fault()
5992 static void lru_gen_enter_fault(struct vm_area_struct *vma) in lru_gen_enter_fault() argument
6001 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, in sanitize_fault_flags() argument
6011 if (!is_cow_mapping(vma->vm_flags)) in sanitize_fault_flags()
6015 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) in sanitize_fault_flags()
6018 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && in sanitize_fault_flags()
6019 !is_cow_mapping(vma->vm_flags))) in sanitize_fault_flags()
6042 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
6046 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
6052 ret = sanitize_fault_flags(vma, &flags); in handle_mm_fault()
6056 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, in handle_mm_fault()
6063 is_droppable = !!(vma->vm_flags & VM_DROPPABLE); in handle_mm_fault()
6072 lru_gen_enter_fault(vma); in handle_mm_fault()
6074 if (unlikely(is_vm_hugetlb_page(vma))) in handle_mm_fault()
6075 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
6077 ret = __handle_mm_fault(vma, address, flags); in handle_mm_fault()
6172 struct vm_area_struct *vma; in lock_mm_and_find_vma() local
6177 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
6178 if (likely(vma && (vma->vm_start <= addr))) in lock_mm_and_find_vma()
6179 return vma; in lock_mm_and_find_vma()
6185 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { in lock_mm_and_find_vma()
6203 vma = find_vma(mm, addr); in lock_mm_and_find_vma()
6204 if (!vma) in lock_mm_and_find_vma()
6206 if (vma->vm_start <= addr) in lock_mm_and_find_vma()
6208 if (!(vma->vm_flags & VM_GROWSDOWN)) in lock_mm_and_find_vma()
6212 if (expand_stack_locked(vma, addr)) in lock_mm_and_find_vma()
6217 return vma; in lock_mm_and_find_vma()
6235 struct vm_area_struct *vma; in lock_vma_under_rcu() local
6239 vma = mas_walk(&mas); in lock_vma_under_rcu()
6240 if (!vma) in lock_vma_under_rcu()
6243 if (!vma_start_read(vma)) in lock_vma_under_rcu()
6247 if (vma->detached) { in lock_vma_under_rcu()
6248 vma_end_read(vma); in lock_vma_under_rcu()
6261 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in lock_vma_under_rcu()
6265 return vma; in lock_vma_under_rcu()
6268 vma_end_read(vma); in lock_vma_under_rcu()
6361 static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) in pfnmap_lockdep_assert() argument
6364 struct file *file = vma->vm_file; in pfnmap_lockdep_assert()
6368 lockdep_assert(lockdep_is_held(&vma->vm_file->f_mapping->i_mmap_rwsem) || in pfnmap_lockdep_assert()
6369 lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6371 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6408 struct vm_area_struct *vma = args->vma; in follow_pfnmap_start() local
6410 struct mm_struct *mm = vma->vm_mm; in follow_pfnmap_start()
6418 pfnmap_lockdep_assert(vma); in follow_pfnmap_start()
6420 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in follow_pfnmap_start()
6423 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfnmap_start()
6511 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
6520 struct follow_pfnmap_args args = { .vma = vma, .address = addr }; in generic_access_phys()
6585 struct vm_area_struct *vma = NULL; in __access_remote_vm() local
6587 gup_flags, &vma); in __access_remote_vm()
6591 vma = vma_lookup(mm, addr); in __access_remote_vm()
6592 if (!vma) { in __access_remote_vm()
6593 vma = expand_stack(mm, addr); in __access_remote_vm()
6596 if (!vma) in __access_remote_vm()
6609 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
6610 bytes = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
6623 copy_to_user_page(vma, page, addr, in __access_remote_vm()
6627 copy_from_user_page(vma, page, addr, in __access_remote_vm()
6688 struct vm_area_struct *vma; in print_vma_addr() local
6696 vma = vma_lookup(mm, ip); in print_vma_addr()
6697 if (vma && vma->vm_file) { in print_vma_addr()
6698 struct file *f = vma->vm_file; in print_vma_addr()
6699 ip -= vma->vm_start; in print_vma_addr()
6700 ip += vma->vm_pgoff << PAGE_SHIFT; in print_vma_addr()
6702 vma->vm_start, in print_vma_addr()
6703 vma->vm_end - vma->vm_start); in print_vma_addr()
6820 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
6833 addr + i*PAGE_SIZE, vma)) in copy_user_gigantic_page()
6842 struct vm_area_struct *vma; member
6851 if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) in copy_subpage()
6857 unsigned long addr_hint, struct vm_area_struct *vma) in copy_user_large_folio() argument
6863 .vma = vma, in copy_user_large_folio()
6867 return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); in copy_user_large_folio()
6931 void vma_pgtable_walk_begin(struct vm_area_struct *vma) in vma_pgtable_walk_begin() argument
6933 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_begin()
6934 hugetlb_vma_lock_read(vma); in vma_pgtable_walk_begin()
6937 void vma_pgtable_walk_end(struct vm_area_struct *vma) in vma_pgtable_walk_end() argument
6939 if (is_vm_hugetlb_page(vma)) in vma_pgtable_walk_end()
6940 hugetlb_vma_unlock_read(vma); in vma_pgtable_walk_end()