Lines Matching +full:time +full:- +full:slot
1 // SPDX-License-Identifier: GPL-2.0-only
8 * Copyright (C) 2008-2009 Red Hat, Inc.
73 * Therefore KSM uses two data structures - the stable and the unstable tree.
76 * by their contents. Because each such page is write-protected, searching on
97 * be "unchanged for a period of time". The unstable tree sorts these pages
98 * by their contents, but since they are not write-protected, KSM cannot rely
99 * upon the unstable tree to work correctly - the unstable tree is liable to
104 * 1) The unstable tree is flushed every time KSM completes scanning all
108 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
110 * the tree gets "corrupted" it won't get out of balance, so scanning time
123 * struct ksm_mm_slot - ksm information per mm that is being scanned
124 * @slot: hash lookup from mm to mm_slot
125 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
128 struct mm_slot slot; member
133 * struct ksm_scan - cursor for scanning
149 * struct ksm_stable_node - node of the stable rbtree
152 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
156 * @chain_prune_time: time of the last full garbage collection
178 * rmap_hlist_len negative range, but better not -1 to be able
181 #define STABLE_NODE_CHAIN -1024
189 * struct ksm_rmap_item - reverse mapping item for virtual addresses
190 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
242 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
294 /* Skip pages that couldn't be de-duplicated previously */
313 /* Target scan time in seconds to analyze all KSM candidate pages. */
320 * struct advisor_ctx - metadata for KSM advisor
321 * @start_scan: start time of the current scan
322 * @scan_time: scan time of previous scan
324 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
367 * Use previous scan time if available, otherwise use current scan time as an
368 * approximation for the previous scan time.
373 return ctx->scan_time ? ctx->scan_time : scan_time; in prev_scan_time()
379 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; in ewma()
383 * The scan time advisor is based on the current scan rate and the target
416 /* Convert scan time to seconds */ in scan_time_advisor()
423 cpu_time_diff = cpu_time - advisor_ctx.cpu_time; in scan_time_advisor()
430 /* Calculate scan time as percentage of target scan time */ in scan_time_advisor()
435 * Calculate scan time as percentage of last scan time and use in scan_time_advisor()
512 return -ENOMEM; in ksm_slab_init()
525 return chain->rmap_hlist_len == STABLE_NODE_CHAIN; in is_stable_node_chain()
530 return dup->head == STABLE_NODE_DUP_HEAD; in is_stable_node_dup()
537 dup->head = STABLE_NODE_DUP_HEAD; in stable_node_chain_add_dup()
539 hlist_add_head(&dup->hlist_dup, &chain->hlist); in stable_node_chain_add_dup()
546 hlist_del(&dup->hlist_dup); in __stable_node_dup_del()
547 ksm_stable_node_dups--; in __stable_node_dup_del()
556 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); in stable_node_dup_del()
558 dup->head = NULL; in stable_node_dup_del()
575 ksm_rmap_items--; in free_rmap_item()
576 rmap_item->mm->ksm_rmap_items--; in free_rmap_item()
577 rmap_item->mm = NULL; /* debug safety */ in free_rmap_item()
593 VM_BUG_ON(stable_node->rmap_hlist_len && in free_stable_node()
600 * page tables after it has passed through ksm_exit() - which, if necessary,
608 return atomic_read(&mm->mm_users) == 0; in ksm_test_exit()
662 * VM_FAULT_OOM: at the time of writing (late July 2009), setting in break_ksm()
669 * even ksmd can fail in this way - though it's usually breaking ksm in break_ksm()
678 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; in break_ksm()
683 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | in vma_ksm_compatible()
692 if (vma->vm_flags & VM_SAO) in vma_ksm_compatible()
696 if (vma->vm_flags & VM_SPARC_ADI) in vma_ksm_compatible()
710 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
717 struct mm_struct *mm = rmap_item->mm; in break_cow()
718 unsigned long addr = rmap_item->address; in break_cow()
725 put_anon_vma(rmap_item->anon_vma); in break_cow()
736 struct mm_struct *mm = rmap_item->mm; in get_mergeable_page()
737 unsigned long addr = rmap_item->address; in get_mergeable_page()
768 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
783 INIT_HLIST_HEAD(&chain->hlist); in alloc_stable_node_chain()
784 chain->chain_prune_time = jiffies; in alloc_stable_node_chain()
785 chain->rmap_hlist_len = STABLE_NODE_CHAIN; in alloc_stable_node_chain()
787 chain->nid = NUMA_NO_NODE; /* debug */ in alloc_stable_node_chain()
793 * the stable tree and at the same time remove the old in alloc_stable_node_chain()
796 rb_replace_node(&dup->node, &chain->node, root); in alloc_stable_node_chain()
801 * dup stable_nodes in the chain->hlist point to pages in alloc_stable_node_chain()
813 rb_erase(&chain->node, root); in free_stable_node_chain()
815 ksm_stable_node_chains--; in free_stable_node_chain()
823 BUG_ON(stable_node->rmap_hlist_len < 0); in remove_node_from_stable_tree()
825 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { in remove_node_from_stable_tree()
826 if (rmap_item->hlist.next) { in remove_node_from_stable_tree()
827 ksm_pages_sharing--; in remove_node_from_stable_tree()
828 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); in remove_node_from_stable_tree()
830 ksm_pages_shared--; in remove_node_from_stable_tree()
833 rmap_item->mm->ksm_merging_pages--; in remove_node_from_stable_tree()
835 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); in remove_node_from_stable_tree()
836 stable_node->rmap_hlist_len--; in remove_node_from_stable_tree()
837 put_anon_vma(rmap_item->anon_vma); in remove_node_from_stable_tree()
838 rmap_item->address &= PAGE_MASK; in remove_node_from_stable_tree()
852 trace_ksm_remove_ksm_page(stable_node->kpfn); in remove_node_from_stable_tree()
853 if (stable_node->head == &migrate_nodes) in remove_node_from_stable_tree()
854 list_del(&stable_node->list); in remove_node_from_stable_tree()
881 * page to reset its page->mapping to NULL, and relies on no other use of
882 * a page to put something that might look like our key in page->mapping.
895 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ in ksm_get_folio()
897 if (READ_ONCE(folio->mapping) != expected_mapping) in ksm_get_folio()
902 * Usually 0 means free, or tail of a higher-order page: in which in ksm_get_folio()
912 * Another check for folio->mapping != expected_mapping in ksm_get_folio()
917 * section of __remove_mapping(); but anon folio->mapping in ksm_get_folio()
925 if (READ_ONCE(folio->mapping) != expected_mapping) { in ksm_get_folio()
933 return ERR_PTR(-EBUSY); in ksm_get_folio()
939 if (READ_ONCE(folio->mapping) != expected_mapping) { in ksm_get_folio()
949 * We come here from above when folio->mapping or the swapcache flag in ksm_get_folio()
952 * before checking whether node->kpfn has been changed. in ksm_get_folio()
955 if (READ_ONCE(stable_node->kpfn) != kpfn) in ksm_get_folio()
967 if (rmap_item->address & STABLE_FLAG) { in remove_rmap_item_from_tree()
971 stable_node = rmap_item->head; in remove_rmap_item_from_tree()
976 hlist_del(&rmap_item->hlist); in remove_rmap_item_from_tree()
980 if (!hlist_empty(&stable_node->hlist)) in remove_rmap_item_from_tree()
981 ksm_pages_sharing--; in remove_rmap_item_from_tree()
983 ksm_pages_shared--; in remove_rmap_item_from_tree()
985 rmap_item->mm->ksm_merging_pages--; in remove_rmap_item_from_tree()
987 VM_BUG_ON(stable_node->rmap_hlist_len <= 0); in remove_rmap_item_from_tree()
988 stable_node->rmap_hlist_len--; in remove_rmap_item_from_tree()
990 put_anon_vma(rmap_item->anon_vma); in remove_rmap_item_from_tree()
991 rmap_item->head = NULL; in remove_rmap_item_from_tree()
992 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
994 } else if (rmap_item->address & UNSTABLE_FLAG) { in remove_rmap_item_from_tree()
1003 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); in remove_rmap_item_from_tree()
1006 rb_erase(&rmap_item->node, in remove_rmap_item_from_tree()
1007 root_unstable_tree + NUMA(rmap_item->nid)); in remove_rmap_item_from_tree()
1008 ksm_pages_unshared--; in remove_rmap_item_from_tree()
1009 rmap_item->address &= PAGE_MASK; in remove_rmap_item_from_tree()
1019 *rmap_list = rmap_item->rmap_list; in remove_trailing_rmap_items()
1028 * that - an rmap_item is assigned to the stable tree after inserting ksm
1030 * rmap_items from parent to child at fork time (so as not to waste time
1035 * to the next pass of ksmd - consider, for example, how ksmd might be
1045 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
1048 err = -ERESTARTSYS; in unmerge_ksm_pages()
1068 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); in folio_set_stable_node()
1069 folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); in folio_set_stable_node()
1094 err = -EBUSY; in remove_stable_node()
1129 &stable_node->hlist, hlist_dup) { in remove_stable_node_chain()
1134 BUG_ON(!hlist_empty(&stable_node->hlist)); in remove_stable_node_chain()
1151 err = -EBUSY; in remove_all_stable_nodes()
1159 err = -EBUSY; in remove_all_stable_nodes()
1168 struct mm_slot *slot; in unmerge_and_remove_all_rmap_items() local
1174 slot = list_entry(ksm_mm_head.slot.mm_node.next, in unmerge_and_remove_all_rmap_items()
1176 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); in unmerge_and_remove_all_rmap_items()
1181 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); in unmerge_and_remove_all_rmap_items()
1183 mm = mm_slot->slot.mm; in unmerge_and_remove_all_rmap_items()
1194 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items()
1197 vma->vm_start, vma->vm_end, false); in unmerge_and_remove_all_rmap_items()
1203 remove_trailing_rmap_items(&mm_slot->rmap_list); in unmerge_and_remove_all_rmap_items()
1207 slot = list_entry(mm_slot->slot.mm_node.next, in unmerge_and_remove_all_rmap_items()
1209 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); in unmerge_and_remove_all_rmap_items()
1211 hash_del(&mm_slot->slot.hash); in unmerge_and_remove_all_rmap_items()
1212 list_del(&mm_slot->slot.mm_node); in unmerge_and_remove_all_rmap_items()
1216 clear_bit(MMF_VM_MERGEABLE, &mm->flags); in unmerge_and_remove_all_rmap_items()
1217 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); in unmerge_and_remove_all_rmap_items()
1249 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
1252 int err = -EFAULT; in write_protect_page()
1260 pvmw.address = page_address_in_vma(&folio->page, vma); in write_protect_page()
1261 if (pvmw.address == -EFAULT) in write_protect_page()
1273 anon_exclusive = PageAnonExclusive(&folio->page); in write_protect_page()
1305 folio_try_share_anon_rmap_pte(folio, &folio->page)) { in write_protect_page()
1331 * replace_page - replace page in vma by new ksm page
1337 * Returns 0 on success, -EFAULT on failure.
1343 struct mm_struct *mm = vma->vm_mm; in replace_page()
1351 int err = -EFAULT; in replace_page()
1355 if (addr == -EFAULT) in replace_page()
1392 newpte = mk_pte(kpage, vma->vm_page_prot); in replace_page()
1396 * we can easily track all KSM-placed zero pages by checking if in replace_page()
1399 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); in replace_page()
1435 * try_to_merge_one_page - take two pages and merge them into one
1439 * or NULL the first time when we want to use page as kpage.
1441 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1447 int err = -EFAULT; in try_to_merge_one_page()
1458 * lock_page() because we don't want to wait here - we in try_to_merge_one_page()
1472 * to be write-protected. If it's mapped elsewhere, all of its in try_to_merge_one_page()
1473 * ptes are necessarily already write-protected. But in either in try_to_merge_one_page()
1504 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
1509 struct mm_struct *mm = rmap_item->mm; in try_to_merge_with_zero_page()
1510 int err = -EFAULT; in try_to_merge_with_zero_page()
1516 if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { in try_to_merge_with_zero_page()
1520 vma = find_mergeable_vma(mm, rmap_item->address); in try_to_merge_with_zero_page()
1523 ZERO_PAGE(rmap_item->address)); in try_to_merge_with_zero_page()
1525 page_to_pfn(ZERO_PAGE(rmap_item->address)), in try_to_merge_with_zero_page()
1541 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1544 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1549 struct mm_struct *mm = rmap_item->mm; in try_to_merge_with_ksm_page()
1551 int err = -EFAULT; in try_to_merge_with_ksm_page()
1554 vma = find_mergeable_vma(mm, rmap_item->address); in try_to_merge_with_ksm_page()
1566 rmap_item->anon_vma = vma->anon_vma; in try_to_merge_with_ksm_page()
1567 get_anon_vma(vma->anon_vma); in try_to_merge_with_ksm_page()
1576 * try_to_merge_two_pages - take two identical pages and prepare them
1609 VM_BUG_ON(stable_node->rmap_hlist_len < 0); in __is_page_sharing_candidate()
1616 return stable_node->rmap_hlist_len && in __is_page_sharing_candidate()
1617 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; in __is_page_sharing_candidate()
1637 time_before(jiffies, stable_node->chain_prune_time + in stable_node_dup()
1642 stable_node->chain_prune_time = jiffies; in stable_node_dup()
1645 &stable_node->hlist, hlist_dup) { in stable_node_dup()
1652 * stable_node->hlist if they point to freed pages in stable_node_dup()
1663 dup->rmap_hlist_len > found_rmap_hlist_len))) { in stable_node_dup()
1667 found_rmap_hlist_len = found->rmap_hlist_len; in stable_node_dup()
1679 if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { in stable_node_dup()
1686 BUG_ON(stable_node->hlist.first->next); in stable_node_dup()
1692 rb_replace_node(&stable_node->node, &found->node, in stable_node_dup()
1695 ksm_stable_node_chains--; in stable_node_dup()
1696 ksm_stable_node_dups--; in stable_node_dup()
1707 * time. in stable_node_dup()
1710 } else if (stable_node->hlist.first != &found->hlist_dup && in stable_node_dup()
1727 hlist_del(&found->hlist_dup); in stable_node_dup()
1728 hlist_add_head(&found->hlist_dup, in stable_node_dup()
1729 &stable_node->hlist); in stable_node_dup()
1784 * stable_tree_search - search for page inside the stable tree
1804 if (page_node && page_node->head != &migrate_nodes) { in stable_tree_search()
1807 return &folio->page; in stable_tree_search()
1813 new = &root->rb_node; in stable_tree_search()
1836 ret = memcmp_pages(page, &tree_folio->page); in stable_tree_search()
1841 new = &parent->rb_left; in stable_tree_search()
1843 new = &parent->rb_right; in stable_tree_search()
1846 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
1886 if (PTR_ERR(tree_folio) == -EBUSY) in stable_tree_search()
1887 return ERR_PTR(-EBUSY); in stable_tree_search()
1892 * so re-evaluate parent and new. in stable_tree_search()
1897 if (get_kpfn_nid(stable_node_dup->kpfn) != in stable_tree_search()
1898 NUMA(stable_node_dup->nid)) { in stable_tree_search()
1902 return &tree_folio->page; in stable_tree_search()
1909 list_del(&page_node->list); in stable_tree_search()
1910 DO_NUMA(page_node->nid = nid); in stable_tree_search()
1911 rb_link_node(&page_node->node, parent, new); in stable_tree_search()
1912 rb_insert_color(&page_node->node, root); in stable_tree_search()
1916 return &folio->page; in stable_tree_search()
1934 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
1935 list_del(&page_node->list); in stable_tree_search()
1936 DO_NUMA(page_node->nid = nid); in stable_tree_search()
1937 rb_replace_node(&stable_node_dup->node, in stable_tree_search()
1938 &page_node->node, in stable_tree_search()
1945 rb_erase(&stable_node_dup->node, root); in stable_tree_search()
1952 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
1953 list_del(&page_node->list); in stable_tree_search()
1954 DO_NUMA(page_node->nid = nid); in stable_tree_search()
1964 stable_node_dup->head = &migrate_nodes; in stable_tree_search()
1965 list_add(&stable_node_dup->list, stable_node_dup->head); in stable_tree_search()
1966 return &folio->page; in stable_tree_search()
1992 VM_BUG_ON(page_node->head != &migrate_nodes); in stable_tree_search()
1993 list_del(&page_node->list); in stable_tree_search()
1994 DO_NUMA(page_node->nid = nid); in stable_tree_search()
2000 * stable_tree_insert - insert stable tree node pointing to new ksm page
2021 new = &root->rb_node; in stable_tree_insert()
2043 ret = memcmp_pages(&kfolio->page, &tree_folio->page); in stable_tree_insert()
2048 new = &parent->rb_left; in stable_tree_insert()
2050 new = &parent->rb_right; in stable_tree_insert()
2061 INIT_HLIST_HEAD(&stable_node_dup->hlist); in stable_tree_insert()
2062 stable_node_dup->kpfn = kpfn; in stable_tree_insert()
2063 stable_node_dup->rmap_hlist_len = 0; in stable_tree_insert()
2064 DO_NUMA(stable_node_dup->nid = nid); in stable_tree_insert()
2066 rb_link_node(&stable_node_dup->node, parent, new); in stable_tree_insert()
2067 rb_insert_color(&stable_node_dup->node, root); in stable_tree_insert()
2087 * unstable_tree_search_insert - search for identical page,
2112 new = &root->rb_node; in unstable_tree_search_insert()
2138 new = &parent->rb_left; in unstable_tree_search_insert()
2141 new = &parent->rb_right; in unstable_tree_search_insert()
2147 * tree next time: only merge with it when across_nodes. in unstable_tree_search_insert()
2157 rmap_item->address |= UNSTABLE_FLAG; in unstable_tree_search_insert()
2158 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); in unstable_tree_search_insert()
2159 DO_NUMA(rmap_item->nid = nid); in unstable_tree_search_insert()
2160 rb_link_node(&rmap_item->node, parent, new); in unstable_tree_search_insert()
2161 rb_insert_color(&rmap_item->node, root); in unstable_tree_search_insert()
2168 * stable_tree_append - add another rmap_item to the linked list of
2183 * for the first time (and not when decreasing rmap_hlist_len) in stable_tree_append()
2186 BUG_ON(stable_node->rmap_hlist_len < 0); in stable_tree_append()
2188 stable_node->rmap_hlist_len++; in stable_tree_append()
2191 WARN_ON_ONCE(stable_node->rmap_hlist_len > in stable_tree_append()
2194 rmap_item->head = stable_node; in stable_tree_append()
2195 rmap_item->address |= STABLE_FLAG; in stable_tree_append()
2196 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); in stable_tree_append()
2198 if (rmap_item->hlist.next) in stable_tree_append()
2203 rmap_item->mm->ksm_merging_pages++; in stable_tree_append()
2207 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2227 if (stable_node->head != &migrate_nodes && in cmp_and_merge_page()
2228 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != in cmp_and_merge_page()
2229 NUMA(stable_node->nid)) { in cmp_and_merge_page()
2231 stable_node->head = &migrate_nodes; in cmp_and_merge_page()
2232 list_add(&stable_node->list, stable_node->head); in cmp_and_merge_page()
2234 if (stable_node->head != &migrate_nodes && in cmp_and_merge_page()
2235 rmap_item->head == stable_node) in cmp_and_merge_page()
2247 * If the hash value of the page has changed from the last time in cmp_and_merge_page()
2250 * to waste our time searching for something identical to it there. in cmp_and_merge_page()
2253 if (rmap_item->oldchecksum != checksum) { in cmp_and_merge_page()
2254 rmap_item->oldchecksum = checksum; in cmp_and_merge_page()
2264 if (kpage == page && rmap_item->head == stable_node) { in cmp_and_merge_page()
2272 if (PTR_ERR(kpage) == -EBUSY) in cmp_and_merge_page()
2361 if ((rmap_item->address & PAGE_MASK) == addr) in get_next_rmap_item()
2363 if (rmap_item->address > addr) in get_next_rmap_item()
2365 *rmap_list = rmap_item->rmap_list; in get_next_rmap_item()
2373 rmap_item->mm = mm_slot->slot.mm; in get_next_rmap_item()
2374 rmap_item->mm->ksm_rmap_items++; in get_next_rmap_item()
2375 rmap_item->address = addr; in get_next_rmap_item()
2376 rmap_item->rmap_list = *rmap_list; in get_next_rmap_item()
2384 * de-duplicating has already been tried unsuccessfully. If the age is
2423 age = rmap_item->age; in should_skip_rmap_item()
2425 rmap_item->age++; in should_skip_rmap_item()
2438 if (!rmap_item->remaining_skips) { in should_skip_rmap_item()
2439 rmap_item->remaining_skips = skip_age(age); in should_skip_rmap_item()
2445 rmap_item->remaining_skips--; in should_skip_rmap_item()
2454 struct mm_slot *slot; in scan_get_next_rmap_item() local
2460 if (list_empty(&ksm_mm_head.slot.mm_node)) in scan_get_next_rmap_item()
2469 * A number of pages can hang around indefinitely in per-cpu in scan_get_next_rmap_item()
2504 slot = list_entry(mm_slot->slot.mm_node.next, in scan_get_next_rmap_item()
2506 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); in scan_get_next_rmap_item()
2517 ksm_scan.rmap_list = &mm_slot->rmap_list; in scan_get_next_rmap_item()
2520 slot = &mm_slot->slot; in scan_get_next_rmap_item()
2521 mm = slot->mm; in scan_get_next_rmap_item()
2529 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item()
2531 if (ksm_scan.address < vma->vm_start) in scan_get_next_rmap_item()
2532 ksm_scan.address = vma->vm_start; in scan_get_next_rmap_item()
2533 if (!vma->anon_vma) in scan_get_next_rmap_item()
2534 ksm_scan.address = vma->vm_end; in scan_get_next_rmap_item()
2536 while (ksm_scan.address < vma->vm_end) { in scan_get_next_rmap_item()
2561 &rmap_item->rmap_list; in scan_get_next_rmap_item()
2585 ksm_scan.rmap_list = &mm_slot->rmap_list; in scan_get_next_rmap_item()
2594 slot = list_entry(mm_slot->slot.mm_node.next, in scan_get_next_rmap_item()
2596 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); in scan_get_next_rmap_item()
2607 hash_del(&mm_slot->slot.hash); in scan_get_next_rmap_item()
2608 list_del(&mm_slot->slot.mm_node); in scan_get_next_rmap_item()
2612 clear_bit(MMF_VM_MERGEABLE, &mm->flags); in scan_get_next_rmap_item()
2613 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); in scan_get_next_rmap_item()
2641 * ksm_do_scan - the ksm scanner main worker function.
2649 while (scan_npages-- && likely(!freezing(current))) { in ksm_do_scan()
2662 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); in ksmd_should_run()
2694 unsigned long vm_flags = vma->vm_flags; in __ksm_add_vma()
2707 if (!(vma->vm_flags & VM_MERGEABLE)) in __ksm_del_vma()
2710 if (vma->anon_vma) { in __ksm_del_vma()
2711 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); in __ksm_del_vma()
2720 * ksm_add_vma - Mark vma as mergeable if compatible
2726 struct mm_struct *mm = vma->vm_mm; in ksm_add_vma()
2728 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) in ksm_add_vma()
2756 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2767 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) in ksm_enable_merge_any()
2770 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { in ksm_enable_merge_any()
2776 set_bit(MMF_VM_MERGE_ANY, &mm->flags); in ksm_enable_merge_any()
2783 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2798 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) in ksm_disable_merge_any()
2807 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); in ksm_disable_merge_any()
2815 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) in ksm_disable()
2817 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) in ksm_disable()
2825 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
2830 if (vma->vm_flags & VM_MERGEABLE) in ksm_madvise()
2835 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { in ksm_madvise()
2848 if (vma->anon_vma) { in ksm_madvise()
2865 struct mm_slot *slot; in __ksm_enter() local
2870 return -ENOMEM; in __ksm_enter()
2872 slot = &mm_slot->slot; in __ksm_enter()
2875 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); in __ksm_enter()
2878 mm_slot_insert(mm_slots_hash, mm, slot); in __ksm_enter()
2883 * want ksmd to waste time setting up and tearing down an rmap_list. in __ksm_enter()
2890 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); in __ksm_enter()
2892 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); in __ksm_enter()
2895 set_bit(MMF_VM_MERGEABLE, &mm->flags); in __ksm_enter()
2908 struct mm_slot *slot; in __ksm_exit() local
2917 * Beware: ksm may already have noticed it exiting and freed the slot. in __ksm_exit()
2921 slot = mm_slot_lookup(mm_slots_hash, mm); in __ksm_exit()
2922 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); in __ksm_exit()
2924 if (!mm_slot->rmap_list) { in __ksm_exit()
2925 hash_del(&slot->hash); in __ksm_exit()
2926 list_del(&slot->mm_node); in __ksm_exit()
2929 list_move(&slot->mm_node, in __ksm_exit()
2930 &ksm_scan.mm_slot->slot.mm_node); in __ksm_exit()
2937 clear_bit(MMF_VM_MERGE_ANY, &mm->flags); in __ksm_exit()
2938 clear_bit(MMF_VM_MERGEABLE, &mm->flags); in __ksm_exit()
2964 } else if (folio->index == linear_page_index(vma, addr) && in ksm_might_need_to_copy()
2965 anon_vma->root == vma->anon_vma->root) { in ksm_might_need_to_copy()
2969 return ERR_PTR(-EHWPOISON); in ksm_might_need_to_copy()
2975 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
2983 return ERR_PTR(-EHWPOISON); in ksm_might_need_to_copy()
3014 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { in rmap_walk_ksm()
3015 struct anon_vma *anon_vma = rmap_item->anon_vma; in rmap_walk_ksm()
3021 if (rwc->try_lock) { in rmap_walk_ksm()
3022 rwc->contended = true; in rmap_walk_ksm()
3027 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, in rmap_walk_ksm()
3032 vma = vmac->vma; in rmap_walk_ksm()
3035 addr = rmap_item->address & PAGE_MASK; in rmap_walk_ksm()
3037 if (addr < vma->vm_start || addr >= vma->vm_end) in rmap_walk_ksm()
3045 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
3048 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_ksm()
3051 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { in rmap_walk_ksm()
3055 if (rwc->done && rwc->done(folio)) { in rmap_walk_ksm()
3081 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { in collect_procs_ksm()
3082 struct anon_vma *av = rmap_item->anon_vma; in collect_procs_ksm()
3093 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, in collect_procs_ksm()
3096 vma = vmac->vma; in collect_procs_ksm()
3097 if (vma->vm_mm == t->mm) { in collect_procs_ksm()
3098 addr = rmap_item->address & PAGE_MASK; in collect_procs_ksm()
3117 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); in folio_migrate_ksm()
3121 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); in folio_migrate_ksm()
3122 stable_node->kpfn = folio_pfn(newfolio); in folio_migrate_ksm()
3124 * newfolio->mapping was set in advance; now we need smp_wmb() in folio_migrate_ksm()
3125 * to make sure that the new stable_node->kpfn is visible in folio_migrate_ksm()
3126 * to ksm_get_folio() before it can see that folio->mapping in folio_migrate_ksm()
3150 if (stable_node->kpfn >= start_pfn && in stable_node_dup_remove_range()
3151 stable_node->kpfn < end_pfn) { in stable_node_dup_remove_range()
3177 &stable_node->hlist, hlist_dup) { in stable_node_chain_remove_range()
3181 if (hlist_empty(&stable_node->hlist)) { in stable_node_chain_remove_range()
3210 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree()
3211 stable_node->kpfn < end_pfn) in ksm_check_stable_tree()
3227 * it is unsafe for them to touch the stable tree at this time. in ksm_memory_callback()
3242 * non-existent struct page. in ksm_memory_callback()
3244 ksm_check_stable_tree(mn->start_pfn, in ksm_memory_callback()
3245 mn->start_pfn + mn->nr_pages); in ksm_memory_callback()
3267 return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - in ksm_process_profit()
3268 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); in ksm_process_profit()
3297 return -EINVAL; in sleep_millisecs_store()
3320 return -EINVAL; in pages_to_scan_store()
3324 return -EINVAL; in pages_to_scan_store()
3346 return -EINVAL; in run_store()
3348 return -EINVAL; in run_store()
3398 return -EINVAL; in merge_across_nodes_store()
3404 err = -EBUSY; in merge_across_nodes_store()
3408 * This is the first time that we switch away from the in merge_across_nodes_store()
3418 err = -ENOMEM; in merge_across_nodes_store()
3452 return -EINVAL; in use_zero_pages_store()
3482 return -EINVAL; in max_page_sharing_store()
3491 err = -EBUSY; in max_page_sharing_store()
3534 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared in pages_volatile_show()
3535 - ksm_pages_sharing - ksm_pages_unshared; in pages_volatile_show()
3565 general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - in general_profit_show()
3604 return -EINVAL; in stable_node_chains_prune_millisecs_store()
3634 return -EINVAL; in smart_scan_store()
3647 output = "[none] scan-time"; in advisor_mode_show()
3649 output = "none [scan-time]"; in advisor_mode_show()
3660 if (sysfs_streq("scan-time", buf)) in advisor_mode_store()
3665 return -EINVAL; in advisor_mode_store()
3690 return -EINVAL; in advisor_max_cpu_store()
3712 return -EINVAL; in advisor_min_pages_to_scan_store()
3734 return -EINVAL; in advisor_max_pages_to_scan_store()
3756 return -EINVAL; in advisor_target_scan_time_store()
3758 return -EINVAL; in advisor_target_scan_time_store()