/linux-6.12.1/mm/ |
D | userfaultfd.c | 23 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) in validate_dst_vma() argument 26 if (dst_end > dst_vma->vm_end) in validate_dst_vma() 34 if (!dst_vma->vm_userfaultfd_ctx.ctx) in validate_dst_vma() 106 struct vm_area_struct *dst_vma; in uffd_mfill_lock() local 108 dst_vma = uffd_lock_vma(dst_mm, dst_start); in uffd_mfill_lock() 109 if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len)) in uffd_mfill_lock() 110 return dst_vma; in uffd_mfill_lock() 112 vma_end_read(dst_vma); in uffd_mfill_lock() 127 struct vm_area_struct *dst_vma; in uffd_mfill_lock() local 130 dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start); in uffd_mfill_lock() [all …]
|
D | memory.c | 793 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() argument 796 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 888 pte_marker marker = copy_pte_marker(entry, dst_vma); in copy_nonpresent_pte() 895 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 914 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 935 folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); in copy_present_page() 936 folio_add_lru_vma(new_folio, dst_vma); in copy_present_page() 940 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); in copy_present_page() 941 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 942 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) in copy_present_page() [all …]
|
D | hugetlb.c | 5207 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 5246 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range() 5276 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5295 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5300 pte_to_swp_entry(entry), dst_vma); in copy_hugetlb_page_range() 5329 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); in copy_hugetlb_page_range() 5336 ALIGN_DOWN(addr, sz), dst_vma); in copy_hugetlb_page_range() 5349 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range() 5355 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range() 5374 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() [all …]
|
D | huge_memory.c | 1568 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1596 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1628 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1674 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 2378 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument 2395 vma_assert_locked(dst_vma); in move_pages_huge_pmd() 2478 folio_move_anon_rmap(src_folio, dst_vma); in move_pages_huge_pmd() 2479 src_folio->index = linear_page_index(dst_vma, dst_addr); in move_pages_huge_pmd() 2481 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot); in move_pages_huge_pmd() 2483 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); in move_pages_huge_pmd() [all …]
|
D | shmem.c | 2900 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() argument 2906 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2910 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2988 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); in shmem_mfill_atomic_pte() 2995 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte()
|
/linux-6.12.1/include/linux/ |
D | shmem_fs.h | 191 struct vm_area_struct *dst_vma, 197 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ argument
|
D | userfaultfd_k.h | 119 struct vm_area_struct *dst_vma, 144 struct vm_area_struct *dst_vma,
|
D | mm_inline.h | 520 swp_entry_t entry, struct vm_area_struct *dst_vma) in copy_pte_marker() argument 527 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) in copy_pte_marker()
|
D | hugetlb.h | 145 struct vm_area_struct *dst_vma, 324 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() argument 400 struct vm_area_struct *dst_vma, in hugetlb_mfill_atomic_pte() argument
|
D | huge_mm.h | 14 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
D | mm.h | 2385 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|