1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_KSM_H
3 #define __LINUX_KSM_H
4 /*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/rmap.h>
15 #include <linux/sched.h>
16 #include <linux/sched/coredump.h>
17
18 #ifdef CONFIG_KSM
19 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20 unsigned long end, int advice, unsigned long *vm_flags);
21
22 void ksm_add_vma(struct vm_area_struct *vma);
23 int ksm_enable_merge_any(struct mm_struct *mm);
24 int ksm_disable_merge_any(struct mm_struct *mm);
25 int ksm_disable(struct mm_struct *mm);
26
27 int __ksm_enter(struct mm_struct *mm);
28 void __ksm_exit(struct mm_struct *mm);
29 /*
30 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
31 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
32 * deduplicating memory.
33 */
34 #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
35
36 extern atomic_long_t ksm_zero_pages;
37
ksm_map_zero_page(struct mm_struct * mm)38 static inline void ksm_map_zero_page(struct mm_struct *mm)
39 {
40 atomic_long_inc(&ksm_zero_pages);
41 atomic_long_inc(&mm->ksm_zero_pages);
42 }
43
ksm_might_unmap_zero_page(struct mm_struct * mm,pte_t pte)44 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
45 {
46 if (is_ksm_zero_pte(pte)) {
47 atomic_long_dec(&ksm_zero_pages);
48 atomic_long_dec(&mm->ksm_zero_pages);
49 }
50 }
51
mm_ksm_zero_pages(struct mm_struct * mm)52 static inline long mm_ksm_zero_pages(struct mm_struct *mm)
53 {
54 return atomic_long_read(&mm->ksm_zero_pages);
55 }
56
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)57 static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
58 {
59 /* Adding mm to ksm is best effort on fork. */
60 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
61 __ksm_enter(mm);
62 }
63
ksm_execve(struct mm_struct * mm)64 static inline int ksm_execve(struct mm_struct *mm)
65 {
66 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
67 return __ksm_enter(mm);
68
69 return 0;
70 }
71
ksm_exit(struct mm_struct * mm)72 static inline void ksm_exit(struct mm_struct *mm)
73 {
74 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
75 __ksm_exit(mm);
76 }
77
78 /*
79 * When do_swap_page() first faults in from swap what used to be a KSM page,
80 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
81 * it might be faulted into a different anon_vma (or perhaps to a different
82 * offset in the same anon_vma). do_swap_page() cannot do all the locking
83 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
84 * a copy, and leave remerging the pages to a later pass of ksmd.
85 *
86 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
87 * but what if the vma was unmerged while the page was swapped out?
88 */
89 struct folio *ksm_might_need_to_copy(struct folio *folio,
90 struct vm_area_struct *vma, unsigned long addr);
91
92 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
93 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
94 void collect_procs_ksm(struct folio *folio, struct page *page,
95 struct list_head *to_kill, int force_early);
96 long ksm_process_profit(struct mm_struct *);
97
98 #else /* !CONFIG_KSM */
99
ksm_add_vma(struct vm_area_struct * vma)100 static inline void ksm_add_vma(struct vm_area_struct *vma)
101 {
102 }
103
ksm_disable(struct mm_struct * mm)104 static inline int ksm_disable(struct mm_struct *mm)
105 {
106 return 0;
107 }
108
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)109 static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
110 {
111 }
112
ksm_execve(struct mm_struct * mm)113 static inline int ksm_execve(struct mm_struct *mm)
114 {
115 return 0;
116 }
117
ksm_exit(struct mm_struct * mm)118 static inline void ksm_exit(struct mm_struct *mm)
119 {
120 }
121
ksm_might_unmap_zero_page(struct mm_struct * mm,pte_t pte)122 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
123 {
124 }
125
collect_procs_ksm(struct folio * folio,struct page * page,struct list_head * to_kill,int force_early)126 static inline void collect_procs_ksm(struct folio *folio, struct page *page,
127 struct list_head *to_kill, int force_early)
128 {
129 }
130
131 #ifdef CONFIG_MMU
ksm_madvise(struct vm_area_struct * vma,unsigned long start,unsigned long end,int advice,unsigned long * vm_flags)132 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
133 unsigned long end, int advice, unsigned long *vm_flags)
134 {
135 return 0;
136 }
137
ksm_might_need_to_copy(struct folio * folio,struct vm_area_struct * vma,unsigned long addr)138 static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
139 struct vm_area_struct *vma, unsigned long addr)
140 {
141 return folio;
142 }
143
rmap_walk_ksm(struct folio * folio,struct rmap_walk_control * rwc)144 static inline void rmap_walk_ksm(struct folio *folio,
145 struct rmap_walk_control *rwc)
146 {
147 }
148
folio_migrate_ksm(struct folio * newfolio,struct folio * old)149 static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
150 {
151 }
152 #endif /* CONFIG_MMU */
153 #endif /* !CONFIG_KSM */
154
155 #endif /* __LINUX_KSM_H */
156