Lines Matching refs:vm_mm
500 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
756 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
945 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
953 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes()
969 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); in __copy_present_ptes()
1084 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
1085 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1230 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1231 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1267 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
1268 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pud_range()
1304 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_p4d_range()
1362 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_page_range()
1363 struct mm_struct *src_mm = src_vma->vm_mm; in copy_page_range()
1805 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1893 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1925 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
1928 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1929 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
2004 if (mm_forbids_zeropage(vma->vm_mm)) in vm_mixed_zeropage_allowed()
2058 inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); in insert_page_into_pte_locked()
2061 set_pte_at(vma->vm_mm, addr, pte, pteval); in insert_page_into_pte_locked()
2076 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
2105 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
2180 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2225 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2319 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2644 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_internal()
3040 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3304 if (!mmap_read_trylock(vma->vm_mm)) in __vmf_anon_prepare()
3310 mmap_read_unlock(vma->vm_mm); in __vmf_anon_prepare()
3335 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3502 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3677 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_wp_page()
3690 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3903 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3907 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3942 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3955 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
4019 if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in __alloc_swap_folio()
4147 pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in alloc_swap_folio()
4172 if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, in alloc_swap_folio()
4225 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
4242 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4334 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
4345 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
4406 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
4529 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_swap_page()
4530 add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); in do_swap_page()
4580 set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); in do_swap_page()
4581 arch_do_swap_page_nr(vma->vm_mm, vma, address, in do_swap_page()
4711 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in alloc_anon_folio()
4727 return folio_prealloc(vma->vm_mm, vma, vmf->address, true); in alloc_anon_folio()
4752 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4757 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4760 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4768 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4805 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in do_anonymous_page()
4816 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4828 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); in do_anonymous_page()
4835 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); in do_anonymous_page()
4877 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4916 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4921 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4964 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4969 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4979 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
4988 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
5042 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
5093 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
5106 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
5107 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
5144 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
5163 add_mm_counter(vma->vm_mm, type, nr_pages); in finish_fault()
5249 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
5321 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); in do_cow_fault()
5404 struct mm_struct *vm_mm = vma->vm_mm; in do_fault() local
5411 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
5439 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
5611 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
5752 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5827 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
6046 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
6075 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
6369 lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6371 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert()
6410 struct mm_struct *mm = vma->vm_mm; in follow_pfnmap_start()