Home
last modified time | relevance | path

Searched refs:src_vma (Results 1 – 9 of 9) sorted by relevance

/linux-6.12.1/mm/
Duserfaultfd.c1026 struct vm_area_struct *src_vma, in move_present_pte() argument
1049 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); in move_present_pte()
1096 struct vm_area_struct *src_vma, in move_zeropage_pte() argument
1113 ptep_clear_flush(src_vma, src_addr, src_pte); in move_zeropage_pte()
1128 struct vm_area_struct *src_vma, in move_pages_pte() argument
1144 flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); in move_pages_pte()
1204 err = move_zeropage_pte(mm, dst_vma, src_vma, in move_pages_pte()
1230 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); in move_pages_pte()
1295 err = move_present_pte(mm, dst_vma, src_vma, in move_pages_pte()
1362 struct vm_area_struct *src_vma, in validate_move_areas() argument
[all …]
Dmemory.c794 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() argument
858 folio_try_dup_anon_rmap_pte(folio, page, src_vma); in copy_nonpresent_pte()
883 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); in copy_nonpresent_pte()
884 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) in copy_nonpresent_pte()
914 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument
930 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) in copy_present_page()
950 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, in __copy_present_ptes() argument
953 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes()
956 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { in __copy_present_ptes()
962 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes()
[all …]
Dhugetlb.c5208 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
5213 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range()
5214 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range()
5223 src_vma->vm_start, in copy_hugetlb_page_range()
5224 src_vma->vm_end); in copy_hugetlb_page_range()
5226 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range()
5235 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range()
5239 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
5241 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range()
5268 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
[all …]
Dhuge_memory.c1568 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument
1591 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1660 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { in copy_huge_pmd()
1666 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); in copy_huge_pmd()
2378 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument
2394 vma_assert_locked(src_vma); in move_pages_huge_pmd()
2428 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd()
2470 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd()
2485 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); in move_pages_huge_pmd()
/linux-6.12.1/include/linux/
Drmap.h392 struct page *page, int nr_pages, struct vm_area_struct *src_vma, in __folio_try_dup_anon_rmap() argument
410 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap()
476 struct page *page, int nr_pages, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_ptes() argument
478 return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, in folio_try_dup_anon_rmap_ptes()
483 struct page *page, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pte() argument
485 return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, in folio_try_dup_anon_rmap_pte()
512 struct page *page, struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pmd() argument
515 return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, in folio_try_dup_anon_rmap_pmd()
Duserfaultfd_k.h145 struct vm_area_struct *src_vma,
Dhuge_mm.h14 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
Dhugetlb.h325 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
Dmm.h2385 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);