/linux-6.12.1/mm/ |
D | rmap.c | 91 static inline struct anon_vma *anon_vma_alloc(void) in anon_vma_alloc() 93 struct anon_vma *anon_vma; in anon_vma_alloc() local 95 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); in anon_vma_alloc() 96 if (anon_vma) { in anon_vma_alloc() 97 atomic_set(&anon_vma->refcount, 1); in anon_vma_alloc() 98 anon_vma->num_children = 0; in anon_vma_alloc() 99 anon_vma->num_active_vmas = 0; in anon_vma_alloc() 100 anon_vma->parent = anon_vma; in anon_vma_alloc() 105 anon_vma->root = anon_vma; in anon_vma_alloc() 108 return anon_vma; in anon_vma_alloc() [all …]
|
D | vma.c | 35 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, in is_mergeable_anon_vma() 36 struct anon_vma *anon_vma2, struct vm_area_struct *vma) in is_mergeable_anon_vma() 52 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL); in are_anon_vmas_compatible() 71 vp->anon_vma = vma->anon_vma; in init_multi_vma_prep() 75 if (!vp->anon_vma && next) in init_multi_vma_prep() 76 vp->anon_vma = next->anon_vma; in init_multi_vma_prep() 102 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) { in can_vma_merge_before() 122 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { in can_vma_merge_after() 180 if (vp->anon_vma) { in vma_prepare() 181 anon_vma_lock_write(vp->anon_vma); in vma_prepare() [all …]
|
D | migrate.c | 1126 struct anon_vma *anon_vma) in __migrate_folio_record() argument 1128 dst->private = (void *)anon_vma + old_page_state; in __migrate_folio_record() 1133 struct anon_vma **anon_vmap) in __migrate_folio_extract() 1137 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES); in __migrate_folio_extract() 1145 struct anon_vma *anon_vma, in migrate_folio_undo_src() argument 1152 if (anon_vma) in migrate_folio_undo_src() 1153 put_anon_vma(anon_vma); in migrate_folio_undo_src() 1199 struct anon_vma *anon_vma = NULL; in migrate_folio_unmap() local 1287 anon_vma = folio_get_anon_vma(src); in migrate_folio_unmap() 1302 __migrate_folio_record(dst, old_page_state, anon_vma); in migrate_folio_unmap() [all …]
|
D | vma.h | 18 struct anon_vma *anon_vma; member 84 struct anon_vma *anon_vma; member 128 .anon_vma = vma_->anon_vma, \ 331 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
|
D | ksm.c | 205 struct anon_vma *anon_vma; /* when stable */ member 710 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 725 put_anon_vma(rmap_item->anon_vma); in break_cow() 837 put_anon_vma(rmap_item->anon_vma); in remove_node_from_stable_tree() 990 put_anon_vma(rmap_item->anon_vma); in remove_rmap_item_from_tree() 1194 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items() 1566 rmap_item->anon_vma = vma->anon_vma; in try_to_merge_with_ksm_page() 1567 get_anon_vma(vma->anon_vma); in try_to_merge_with_ksm_page() 2533 if (!vma->anon_vma) in scan_get_next_rmap_item() 2710 if (vma->anon_vma) { in __ksm_del_vma() [all …]
|
D | mremap.c | 109 if (vma->anon_vma) in take_rmap_locks() 110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 115 if (vma->anon_vma) in drop_rmap_locks() 116 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
|
D | huge_memory.c | 191 if (!vma->anon_vma) in __thp_vma_allowable_orders() 1776 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page() 2384 struct anon_vma *src_anon_vma; in move_pages_huge_pmd() 3357 struct anon_vma *anon_vma = NULL; in split_huge_page_to_list_to_order() local 3418 anon_vma = folio_get_anon_vma(folio); in split_huge_page_to_list_to_order() 3419 if (!anon_vma) { in split_huge_page_to_list_to_order() 3425 anon_vma_lock_write(anon_vma); in split_huge_page_to_list_to_order() 3460 anon_vma = NULL; in split_huge_page_to_list_to_order() 3553 if (anon_vma) { in split_huge_page_to_list_to_order() 3554 anon_vma_unlock_write(anon_vma); in split_huge_page_to_list_to_order() [all …]
|
D | khugepaged.c | 942 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) in hugepage_vma_revalidate() 1164 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page() 1205 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page() 1213 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page() 1715 if (READ_ONCE(vma->anon_vma)) in retract_page_tables() 1758 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { in retract_page_tables()
|
D | debug.c | 184 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma()
|
D | mmap.c | 1094 anon_vma_lock_write(vma->anon_vma); in expand_upwards() 1131 anon_vma_unlock_write(vma->anon_vma); in expand_upwards() 1186 anon_vma_lock_write(vma->anon_vma); in expand_downwards() 1224 anon_vma_unlock_write(vma->anon_vma); in expand_downwards() 2002 BUG_ON(vma->anon_vma); in insert_vm_struct()
|
D | userfaultfd.c | 79 if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma)) in uffd_lock_vma() 1140 struct anon_vma *src_anon_vma = NULL; in move_pages_pte()
|
D | mprotect.c | 660 !vma->anon_vma) { in mprotect_fixup()
|
D | util.c | 823 struct anon_vma *folio_anon_vma(struct folio *folio) in folio_anon_vma()
|
D | internal.h | 844 struct anon_vma *folio_anon_vma(struct folio *folio);
|
D | memory.c | 538 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte() 1343 if (src_vma->anon_vma) in vma_needs_copy() 3301 if (likely(vma->anon_vma)) in __vmf_anon_prepare()
|
D | memory-failure.c | 613 struct anon_vma *av; in collect_procs_anon()
|
/linux-6.12.1/include/linux/ |
D | rmap.h | 31 struct anon_vma { struct 32 struct anon_vma *root; /* Root of this anon_vma tree */ argument 54 struct anon_vma *parent; /* Parent of this anon_vma */ argument 84 struct anon_vma *anon_vma; member 106 static inline void get_anon_vma(struct anon_vma *anon_vma) in get_anon_vma() argument 108 atomic_inc(&anon_vma->refcount); in get_anon_vma() 111 void __put_anon_vma(struct anon_vma *anon_vma); 113 static inline void put_anon_vma(struct anon_vma *anon_vma) in put_anon_vma() argument 115 if (atomic_dec_and_test(&anon_vma->refcount)) in put_anon_vma() 116 __put_anon_vma(anon_vma); in put_anon_vma() [all …]
|
D | mm_types.h | 741 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ member
|
/linux-6.12.1/tools/testing/vma/ |
D | vma.c | 26 static struct anon_vma dummy_anon_vma; 379 .anon_vma = &dummy_anon_vma, in test_merge_new() 382 .anon_vma = &dummy_anon_vma, in test_merge_new() 385 .anon_vma = &dummy_anon_vma, in test_merge_new() 388 .anon_vma = &dummy_anon_vma, in test_merge_new() 437 vma_b->anon_vma = &dummy_anon_vma; in test_merge_new() 445 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); in test_merge_new() 462 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); in test_merge_new() 472 vma_d->anon_vma = &dummy_anon_vma; in test_merge_new() 481 ASSERT_EQ(vma->anon_vma, &dummy_anon_vma); in test_merge_new() [all …]
|
D | vma_internal.h | 124 struct anon_vma { struct 125 struct anon_vma *root; argument 134 struct anon_vma *anon_vma; argument 256 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ member 713 if (src->anon_vma != NULL) { in anon_vma_clone() 714 dst->anon_vma = src->anon_vma; in anon_vma_clone() 715 dst->anon_vma->was_cloned = true; in anon_vma_clone() 793 static inline void anon_vma_lock_write(struct anon_vma *) in anon_vma_lock_write() argument 804 vma->anon_vma->was_unlinked = true; in unlink_anon_vmas() 807 static inline void anon_vma_unlock_write(struct anon_vma *) in anon_vma_unlock_write() argument
|
/linux-6.12.1/Documentation/translations/zh_CN/admin-guide/mm/ |
D | ksm.rst | 194 制,因为do_swap_page()不能做所有的锁,而需要重组一个跨anon_vma的KSM页。
|
/linux-6.12.1/Documentation/translations/zh_TW/admin-guide/mm/ |
D | ksm.rst | 194 制,因爲do_swap_page()不能做所有的鎖,而需要重組一個跨anon_vma的KSM頁。
|
/linux-6.12.1/Documentation/mm/ |
D | transhuge.rst | 96 takes the mmap_lock in write mode in addition to the anon_vma lock). If
|
/linux-6.12.1/fs/ |
D | coredump.c | 1117 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) in vma_dump_size()
|
/linux-6.12.1/Documentation/admin-guide/mm/ |
D | ksm.rst | 290 cannot do all the locking needed to reconstitute a cross-anon_vma KSM page.
|