1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * vma_internal.h
4 *
5 * Header providing userland wrappers and shims for the functionality provided
6 * by mm/vma_internal.h.
7 *
8 * We make the header guard the same as mm/vma_internal.h, so if this shim
9 * header is included, it precludes the inclusion of the kernel one.
10 */
11
12 #ifndef __MM_VMA_INTERNAL_H
13 #define __MM_VMA_INTERNAL_H
14
15 #define __private
16 #define __bitwise
17 #define __randomize_layout
18
19 #define CONFIG_MMU
20 #define CONFIG_PER_VMA_LOCK
21
22 #include <stdlib.h>
23
24 #include <linux/list.h>
25 #include <linux/maple_tree.h>
26 #include <linux/mm.h>
27 #include <linux/rbtree.h>
28 #include <linux/rwsem.h>
29
30 #define VM_WARN_ON(_expr) (WARN_ON(_expr))
31 #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr))
32 #define VM_BUG_ON(_expr) (BUG_ON(_expr))
33 #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
34
35 #define VM_NONE 0x00000000
36 #define VM_READ 0x00000001
37 #define VM_WRITE 0x00000002
38 #define VM_EXEC 0x00000004
39 #define VM_SHARED 0x00000008
40 #define VM_MAYREAD 0x00000010
41 #define VM_MAYWRITE 0x00000020
42 #define VM_GROWSDOWN 0x00000100
43 #define VM_PFNMAP 0x00000400
44 #define VM_LOCKED 0x00002000
45 #define VM_IO 0x00004000
46 #define VM_DONTEXPAND 0x00040000
47 #define VM_ACCOUNT 0x00100000
48 #define VM_MIXEDMAP 0x10000000
49 #define VM_STACK VM_GROWSDOWN
50 #define VM_SHADOW_STACK VM_NONE
51 #define VM_SOFTDIRTY 0
52
53 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
54 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
55
56 #define FIRST_USER_ADDRESS 0UL
57 #define USER_PGTABLES_CEILING 0UL
58
59 #define vma_policy(vma) NULL
60
61 #define down_write_nest_lock(sem, nest_lock)
62
63 #define pgprot_val(x) ((x).pgprot)
64 #define __pgprot(x) ((pgprot_t) { (x) } )
65
66 #define for_each_vma(__vmi, __vma) \
67 while (((__vma) = vma_next(&(__vmi))) != NULL)
68
69 /* The MM code likes to work with exclusive end addresses */
70 #define for_each_vma_range(__vmi, __vma, __end) \
71 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
72
73 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
74
75 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
76
77 #define test_and_set_bit(nr, addr) __test_and_set_bit(nr, addr)
78 #define test_and_clear_bit(nr, addr) __test_and_clear_bit(nr, addr)
79
80 #define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
81
82 #define AS_MM_ALL_LOCKS 2
83
84 /* We hardcode this for now. */
85 #define sysctl_max_map_count 0x1000000UL
86
87 #define pgoff_t unsigned long
88 typedef unsigned long pgprotval_t;
89 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
90 typedef unsigned long vm_flags_t;
91 typedef __bitwise unsigned int vm_fault_t;
92
93 /*
94 * The shared stubs do not implement this, it amounts to an fprintf(STDERR,...)
95 * either way :)
96 */
97 #define pr_warn_once pr_err
98
99 typedef struct refcount_struct {
100 atomic_t refs;
101 } refcount_t;
102
103 struct kref {
104 refcount_t refcount;
105 };
106
107 /*
108 * Define the task command name length as enum, then it can be visible to
109 * BPF programs.
110 */
111 enum {
112 TASK_COMM_LEN = 16,
113 };
114
115 struct task_struct {
116 char comm[TASK_COMM_LEN];
117 pid_t pid;
118 struct mm_struct *mm;
119 };
120
121 struct task_struct *get_current(void);
122 #define current get_current()
123
124 struct anon_vma {
125 struct anon_vma *root;
126 struct rb_root_cached rb_root;
127
128 /* Test fields. */
129 bool was_cloned;
130 bool was_unlinked;
131 };
132
133 struct anon_vma_chain {
134 struct anon_vma *anon_vma;
135 struct list_head same_vma;
136 };
137
138 struct anon_vma_name {
139 struct kref kref;
140 /* The name needs to be at the end because it is dynamically sized. */
141 char name[];
142 };
143
144 struct vma_iterator {
145 struct ma_state mas;
146 };
147
148 #define VMA_ITERATOR(name, __mm, __addr) \
149 struct vma_iterator name = { \
150 .mas = { \
151 .tree = &(__mm)->mm_mt, \
152 .index = __addr, \
153 .node = NULL, \
154 .status = ma_start, \
155 }, \
156 }
157
158 struct address_space {
159 struct rb_root_cached i_mmap;
160 unsigned long flags;
161 atomic_t i_mmap_writable;
162 };
163
164 struct vm_userfaultfd_ctx {};
165 struct mempolicy {};
166 struct mmu_gather {};
167 struct mutex {};
168 #define DEFINE_MUTEX(mutexname) \
169 struct mutex mutexname = {}
170
171 struct mm_struct {
172 struct maple_tree mm_mt;
173 int map_count; /* number of VMAs */
174 unsigned long total_vm; /* Total pages mapped */
175 unsigned long locked_vm; /* Pages that have PG_mlocked set */
176 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
177 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
178 unsigned long stack_vm; /* VM_STACK */
179 };
180
181 struct vma_lock {
182 struct rw_semaphore lock;
183 };
184
185
186 struct file {
187 struct address_space *f_mapping;
188 };
189
190 struct vm_area_struct {
191 /* The first cache line has the info for VMA tree walking. */
192
193 union {
194 struct {
195 /* VMA covers [vm_start; vm_end) addresses within mm */
196 unsigned long vm_start;
197 unsigned long vm_end;
198 };
199 #ifdef CONFIG_PER_VMA_LOCK
200 struct rcu_head vm_rcu; /* Used for deferred freeing. */
201 #endif
202 };
203
204 struct mm_struct *vm_mm; /* The address space we belong to. */
205 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
206
207 /*
208 * Flags, see mm.h.
209 * To modify use vm_flags_{init|reset|set|clear|mod} functions.
210 */
211 union {
212 const vm_flags_t vm_flags;
213 vm_flags_t __private __vm_flags;
214 };
215
216 #ifdef CONFIG_PER_VMA_LOCK
217 /* Flag to indicate areas detached from the mm->mm_mt tree */
218 bool detached;
219
220 /*
221 * Can only be written (using WRITE_ONCE()) while holding both:
222 * - mmap_lock (in write mode)
223 * - vm_lock->lock (in write mode)
224 * Can be read reliably while holding one of:
225 * - mmap_lock (in read or write mode)
226 * - vm_lock->lock (in read or write mode)
227 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
228 * while holding nothing (except RCU to keep the VMA struct allocated).
229 *
230 * This sequence counter is explicitly allowed to overflow; sequence
231 * counter reuse can only lead to occasional unnecessary use of the
232 * slowpath.
233 */
234 int vm_lock_seq;
235 struct vma_lock *vm_lock;
236 #endif
237
238 /*
239 * For areas with an address space and backing store,
240 * linkage into the address_space->i_mmap interval tree.
241 *
242 */
243 struct {
244 struct rb_node rb;
245 unsigned long rb_subtree_last;
246 } shared;
247
248 /*
249 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
250 * list, after a COW of one of the file pages. A MAP_SHARED vma
251 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
252 * or brk vma (with NULL file) can only be in an anon_vma list.
253 */
254 struct list_head anon_vma_chain; /* Serialized by mmap_lock &
255 * page_table_lock */
256 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
257
258 /* Function pointers to deal with this struct. */
259 const struct vm_operations_struct *vm_ops;
260
261 /* Information about our backing store: */
262 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
263 units */
264 struct file * vm_file; /* File we map to (can be NULL). */
265 void * vm_private_data; /* was vm_pte (shared mem) */
266
267 #ifdef CONFIG_ANON_VMA_NAME
268 /*
269 * For private and shared anonymous mappings, a pointer to a null
270 * terminated string containing the name given to the vma, or NULL if
271 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
272 */
273 struct anon_vma_name *anon_name;
274 #endif
275 #ifdef CONFIG_SWAP
276 atomic_long_t swap_readahead_info;
277 #endif
278 #ifndef CONFIG_MMU
279 struct vm_region *vm_region; /* NOMMU mapping region */
280 #endif
281 #ifdef CONFIG_NUMA
282 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
283 #endif
284 #ifdef CONFIG_NUMA_BALANCING
285 struct vma_numab_state *numab_state; /* NUMA Balancing state */
286 #endif
287 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
288 } __randomize_layout;
289
290 struct vm_fault {};
291
292 struct vm_operations_struct {
293 void (*open)(struct vm_area_struct * area);
294 /**
295 * @close: Called when the VMA is being removed from the MM.
296 * Context: User context. May sleep. Caller holds mmap_lock.
297 */
298 void (*close)(struct vm_area_struct * area);
299 /* Called any time before splitting to check if it's allowed */
300 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
301 int (*mremap)(struct vm_area_struct *area);
302 /*
303 * Called by mprotect() to make driver-specific permission
304 * checks before mprotect() is finalised. The VMA must not
305 * be modified. Returns 0 if mprotect() can proceed.
306 */
307 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
308 unsigned long end, unsigned long newflags);
309 vm_fault_t (*fault)(struct vm_fault *vmf);
310 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
311 vm_fault_t (*map_pages)(struct vm_fault *vmf,
312 pgoff_t start_pgoff, pgoff_t end_pgoff);
313 unsigned long (*pagesize)(struct vm_area_struct * area);
314
315 /* notification that a previously read-only page is about to become
316 * writable, if an error is returned it will cause a SIGBUS */
317 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
318
319 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
320 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
321
322 /* called by access_process_vm when get_user_pages() fails, typically
323 * for use by special VMAs. See also generic_access_phys() for a generic
324 * implementation useful for any iomem mapping.
325 */
326 int (*access)(struct vm_area_struct *vma, unsigned long addr,
327 void *buf, int len, int write);
328
329 /* Called by the /proc/PID/maps code to ask the vma whether it
330 * has a special name. Returning non-NULL will also cause this
331 * vma to be dumped unconditionally. */
332 const char *(*name)(struct vm_area_struct *vma);
333
334 #ifdef CONFIG_NUMA
335 /*
336 * set_policy() op must add a reference to any non-NULL @new mempolicy
337 * to hold the policy upon return. Caller should pass NULL @new to
338 * remove a policy and fall back to surrounding context--i.e. do not
339 * install a MPOL_DEFAULT policy, nor the task or system default
340 * mempolicy.
341 */
342 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
343
344 /*
345 * get_policy() op must add reference [mpol_get()] to any policy at
346 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
347 * in mm/mempolicy.c will do this automatically.
348 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
349 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
350 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
351 * must return NULL--i.e., do not "fallback" to task or system default
352 * policy.
353 */
354 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
355 unsigned long addr, pgoff_t *ilx);
356 #endif
357 /*
358 * Called by vm_normal_page() for special PTEs to find the
359 * page for @addr. This is useful if the default behavior
360 * (using pte_page()) would not find the correct page.
361 */
362 struct page *(*find_special_page)(struct vm_area_struct *vma,
363 unsigned long addr);
364 };
365
vma_iter_invalidate(struct vma_iterator * vmi)366 static inline void vma_iter_invalidate(struct vma_iterator *vmi)
367 {
368 mas_pause(&vmi->mas);
369 }
370
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)371 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
372 {
373 return __pgprot(pgprot_val(oldprot) | pgprot_val(newprot));
374 }
375
vm_get_page_prot(unsigned long vm_flags)376 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
377 {
378 return __pgprot(vm_flags);
379 }
380
is_shared_maywrite(vm_flags_t vm_flags)381 static inline bool is_shared_maywrite(vm_flags_t vm_flags)
382 {
383 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
384 (VM_SHARED | VM_MAYWRITE);
385 }
386
vma_is_shared_maywrite(struct vm_area_struct * vma)387 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
388 {
389 return is_shared_maywrite(vma->vm_flags);
390 }
391
vma_next(struct vma_iterator * vmi)392 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
393 {
394 /*
395 * Uses mas_find() to get the first VMA when the iterator starts.
396 * Calling mas_next() could skip the first entry.
397 */
398 return mas_find(&vmi->mas, ULONG_MAX);
399 }
400
vma_lock_alloc(struct vm_area_struct * vma)401 static inline bool vma_lock_alloc(struct vm_area_struct *vma)
402 {
403 vma->vm_lock = calloc(1, sizeof(struct vma_lock));
404
405 if (!vma->vm_lock)
406 return false;
407
408 init_rwsem(&vma->vm_lock->lock);
409 vma->vm_lock_seq = -1;
410
411 return true;
412 }
413
414 static inline void vma_assert_write_locked(struct vm_area_struct *);
vma_mark_detached(struct vm_area_struct * vma,bool detached)415 static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
416 {
417 /* When detaching vma should be write-locked */
418 if (detached)
419 vma_assert_write_locked(vma);
420 vma->detached = detached;
421 }
422
423 extern const struct vm_operations_struct vma_dummy_vm_ops;
424
vma_init(struct vm_area_struct * vma,struct mm_struct * mm)425 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
426 {
427 memset(vma, 0, sizeof(*vma));
428 vma->vm_mm = mm;
429 vma->vm_ops = &vma_dummy_vm_ops;
430 INIT_LIST_HEAD(&vma->anon_vma_chain);
431 vma_mark_detached(vma, false);
432 }
433
vm_area_alloc(struct mm_struct * mm)434 static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
435 {
436 struct vm_area_struct *vma = calloc(1, sizeof(struct vm_area_struct));
437
438 if (!vma)
439 return NULL;
440
441 vma_init(vma, mm);
442 if (!vma_lock_alloc(vma)) {
443 free(vma);
444 return NULL;
445 }
446
447 return vma;
448 }
449
vm_area_dup(struct vm_area_struct * orig)450 static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
451 {
452 struct vm_area_struct *new = calloc(1, sizeof(struct vm_area_struct));
453
454 if (!new)
455 return NULL;
456
457 memcpy(new, orig, sizeof(*new));
458 if (!vma_lock_alloc(new)) {
459 free(new);
460 return NULL;
461 }
462 INIT_LIST_HEAD(&new->anon_vma_chain);
463
464 return new;
465 }
466
467 /*
468 * These are defined in vma.h, but sadly vm_stat_account() is referenced by
469 * kernel/fork.c, so we have to these broadly available there, and temporarily
470 * define them here to resolve the dependency cycle.
471 */
472
473 #define is_exec_mapping(flags) \
474 ((flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC)
475
476 #define is_stack_mapping(flags) \
477 (((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK))
478
479 #define is_data_mapping(flags) \
480 ((flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE)
481
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)482 static inline void vm_stat_account(struct mm_struct *mm, vm_flags_t flags,
483 long npages)
484 {
485 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
486
487 if (is_exec_mapping(flags))
488 mm->exec_vm += npages;
489 else if (is_stack_mapping(flags))
490 mm->stack_vm += npages;
491 else if (is_data_mapping(flags))
492 mm->data_vm += npages;
493 }
494
495 #undef is_exec_mapping
496 #undef is_stack_mapping
497 #undef is_data_mapping
498
499 /* Currently stubbed but we may later wish to un-stub. */
500 static inline void vm_acct_memory(long pages);
vm_unacct_memory(long pages)501 static inline void vm_unacct_memory(long pages)
502 {
503 vm_acct_memory(-pages);
504 }
505
mapping_allow_writable(struct address_space * mapping)506 static inline void mapping_allow_writable(struct address_space *mapping)
507 {
508 atomic_inc(&mapping->i_mmap_writable);
509 }
510
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)511 static inline void vma_set_range(struct vm_area_struct *vma,
512 unsigned long start, unsigned long end,
513 pgoff_t pgoff)
514 {
515 vma->vm_start = start;
516 vma->vm_end = end;
517 vma->vm_pgoff = pgoff;
518 }
519
520 static inline
vma_find(struct vma_iterator * vmi,unsigned long max)521 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
522 {
523 return mas_find(&vmi->mas, max - 1);
524 }
525
vma_iter_clear_gfp(struct vma_iterator * vmi,unsigned long start,unsigned long end,gfp_t gfp)526 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
527 unsigned long start, unsigned long end, gfp_t gfp)
528 {
529 __mas_set_range(&vmi->mas, start, end - 1);
530 mas_store_gfp(&vmi->mas, NULL, gfp);
531 if (unlikely(mas_is_err(&vmi->mas)))
532 return -ENOMEM;
533
534 return 0;
535 }
536
537 static inline void mmap_assert_locked(struct mm_struct *);
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)538 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
539 unsigned long start_addr,
540 unsigned long end_addr)
541 {
542 unsigned long index = start_addr;
543
544 mmap_assert_locked(mm);
545 return mt_find(&mm->mm_mt, &index, end_addr - 1);
546 }
547
548 static inline
vma_lookup(struct mm_struct * mm,unsigned long addr)549 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
550 {
551 return mtree_load(&mm->mm_mt, addr);
552 }
553
vma_prev(struct vma_iterator * vmi)554 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
555 {
556 return mas_prev(&vmi->mas, 0);
557 }
558
vma_iter_set(struct vma_iterator * vmi,unsigned long addr)559 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
560 {
561 mas_set(&vmi->mas, addr);
562 }
563
vma_is_anonymous(struct vm_area_struct * vma)564 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
565 {
566 return !vma->vm_ops;
567 }
568
569 /* Defined in vma.h, so temporarily define here to avoid circular dependency. */
570 #define vma_iter_load(vmi) \
571 mas_walk(&(vmi)->mas)
572
573 static inline struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)574 find_vma_prev(struct mm_struct *mm, unsigned long addr,
575 struct vm_area_struct **pprev)
576 {
577 struct vm_area_struct *vma;
578 VMA_ITERATOR(vmi, mm, addr);
579
580 vma = vma_iter_load(&vmi);
581 *pprev = vma_prev(&vmi);
582 if (!vma)
583 vma = vma_next(&vmi);
584 return vma;
585 }
586
587 #undef vma_iter_load
588
vma_iter_init(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long addr)589 static inline void vma_iter_init(struct vma_iterator *vmi,
590 struct mm_struct *mm, unsigned long addr)
591 {
592 mas_init(&vmi->mas, &mm->mm_mt, addr);
593 }
594
595 /* Stubbed functions. */
596
anon_vma_name(struct vm_area_struct * vma)597 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
598 {
599 return NULL;
600 }
601
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)602 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
603 struct vm_userfaultfd_ctx vm_ctx)
604 {
605 return true;
606 }
607
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)608 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
609 struct anon_vma_name *anon_name2)
610 {
611 return true;
612 }
613
might_sleep(void)614 static inline void might_sleep(void)
615 {
616 }
617
vma_pages(struct vm_area_struct * vma)618 static inline unsigned long vma_pages(struct vm_area_struct *vma)
619 {
620 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
621 }
622
fput(struct file *)623 static inline void fput(struct file *)
624 {
625 }
626
mpol_put(struct mempolicy *)627 static inline void mpol_put(struct mempolicy *)
628 {
629 }
630
vma_lock_free(struct vm_area_struct * vma)631 static inline void vma_lock_free(struct vm_area_struct *vma)
632 {
633 free(vma->vm_lock);
634 }
635
__vm_area_free(struct vm_area_struct * vma)636 static inline void __vm_area_free(struct vm_area_struct *vma)
637 {
638 vma_lock_free(vma);
639 free(vma);
640 }
641
vm_area_free(struct vm_area_struct * vma)642 static inline void vm_area_free(struct vm_area_struct *vma)
643 {
644 __vm_area_free(vma);
645 }
646
lru_add_drain(void)647 static inline void lru_add_drain(void)
648 {
649 }
650
tlb_gather_mmu(struct mmu_gather *,struct mm_struct *)651 static inline void tlb_gather_mmu(struct mmu_gather *, struct mm_struct *)
652 {
653 }
654
update_hiwater_rss(struct mm_struct *)655 static inline void update_hiwater_rss(struct mm_struct *)
656 {
657 }
658
update_hiwater_vm(struct mm_struct *)659 static inline void update_hiwater_vm(struct mm_struct *)
660 {
661 }
662
unmap_vmas(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,unsigned long tree_end,bool mm_wr_locked)663 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
664 struct vm_area_struct *vma, unsigned long start_addr,
665 unsigned long end_addr, unsigned long tree_end,
666 bool mm_wr_locked)
667 {
668 (void)tlb;
669 (void)mas;
670 (void)vma;
671 (void)start_addr;
672 (void)end_addr;
673 (void)tree_end;
674 (void)mm_wr_locked;
675 }
676
free_pgtables(struct mmu_gather * tlb,struct ma_state * mas,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling,bool mm_wr_locked)677 static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
678 struct vm_area_struct *vma, unsigned long floor,
679 unsigned long ceiling, bool mm_wr_locked)
680 {
681 (void)tlb;
682 (void)mas;
683 (void)vma;
684 (void)floor;
685 (void)ceiling;
686 (void)mm_wr_locked;
687 }
688
mapping_unmap_writable(struct address_space *)689 static inline void mapping_unmap_writable(struct address_space *)
690 {
691 }
692
flush_dcache_mmap_lock(struct address_space *)693 static inline void flush_dcache_mmap_lock(struct address_space *)
694 {
695 }
696
tlb_finish_mmu(struct mmu_gather *)697 static inline void tlb_finish_mmu(struct mmu_gather *)
698 {
699 }
700
get_file(struct file *)701 static inline void get_file(struct file *)
702 {
703 }
704
vma_dup_policy(struct vm_area_struct *,struct vm_area_struct *)705 static inline int vma_dup_policy(struct vm_area_struct *, struct vm_area_struct *)
706 {
707 return 0;
708 }
709
anon_vma_clone(struct vm_area_struct * dst,struct vm_area_struct * src)710 static inline int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
711 {
712 /* For testing purposes. We indicate that an anon_vma has been cloned. */
713 if (src->anon_vma != NULL) {
714 dst->anon_vma = src->anon_vma;
715 dst->anon_vma->was_cloned = true;
716 }
717
718 return 0;
719 }
720
vma_start_write(struct vm_area_struct * vma)721 static inline void vma_start_write(struct vm_area_struct *vma)
722 {
723 /* Used to indicate to tests that a write operation has begun. */
724 vma->vm_lock_seq++;
725 }
726
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,long adjust_next)727 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
728 unsigned long start,
729 unsigned long end,
730 long adjust_next)
731 {
732 (void)vma;
733 (void)start;
734 (void)end;
735 (void)adjust_next;
736 }
737
vma_iter_free(struct vma_iterator * vmi)738 static inline void vma_iter_free(struct vma_iterator *vmi)
739 {
740 mas_destroy(&vmi->mas);
741 }
742
743 static inline
vma_iter_next_range(struct vma_iterator * vmi)744 struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
745 {
746 return mas_next_range(&vmi->mas, ULONG_MAX);
747 }
748
vm_acct_memory(long pages)749 static inline void vm_acct_memory(long pages)
750 {
751 }
752
vma_interval_tree_insert(struct vm_area_struct *,struct rb_root_cached *)753 static inline void vma_interval_tree_insert(struct vm_area_struct *,
754 struct rb_root_cached *)
755 {
756 }
757
vma_interval_tree_remove(struct vm_area_struct *,struct rb_root_cached *)758 static inline void vma_interval_tree_remove(struct vm_area_struct *,
759 struct rb_root_cached *)
760 {
761 }
762
flush_dcache_mmap_unlock(struct address_space *)763 static inline void flush_dcache_mmap_unlock(struct address_space *)
764 {
765 }
766
anon_vma_interval_tree_insert(struct anon_vma_chain *,struct rb_root_cached *)767 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain*,
768 struct rb_root_cached *)
769 {
770 }
771
anon_vma_interval_tree_remove(struct anon_vma_chain *,struct rb_root_cached *)772 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain*,
773 struct rb_root_cached *)
774 {
775 }
776
uprobe_mmap(struct vm_area_struct *)777 static inline void uprobe_mmap(struct vm_area_struct *)
778 {
779 }
780
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)781 static inline void uprobe_munmap(struct vm_area_struct *vma,
782 unsigned long start, unsigned long end)
783 {
784 (void)vma;
785 (void)start;
786 (void)end;
787 }
788
i_mmap_lock_write(struct address_space *)789 static inline void i_mmap_lock_write(struct address_space *)
790 {
791 }
792
anon_vma_lock_write(struct anon_vma *)793 static inline void anon_vma_lock_write(struct anon_vma *)
794 {
795 }
796
vma_assert_write_locked(struct vm_area_struct *)797 static inline void vma_assert_write_locked(struct vm_area_struct *)
798 {
799 }
800
unlink_anon_vmas(struct vm_area_struct * vma)801 static inline void unlink_anon_vmas(struct vm_area_struct *vma)
802 {
803 /* For testing purposes, indicate that the anon_vma was unlinked. */
804 vma->anon_vma->was_unlinked = true;
805 }
806
anon_vma_unlock_write(struct anon_vma *)807 static inline void anon_vma_unlock_write(struct anon_vma *)
808 {
809 }
810
i_mmap_unlock_write(struct address_space *)811 static inline void i_mmap_unlock_write(struct address_space *)
812 {
813 }
814
anon_vma_merge(struct vm_area_struct *,struct vm_area_struct *)815 static inline void anon_vma_merge(struct vm_area_struct *,
816 struct vm_area_struct *)
817 {
818 }
819
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)820 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
821 unsigned long start,
822 unsigned long end,
823 struct list_head *unmaps)
824 {
825 (void)vma;
826 (void)start;
827 (void)end;
828 (void)unmaps;
829
830 return 0;
831 }
832
mmap_write_downgrade(struct mm_struct *)833 static inline void mmap_write_downgrade(struct mm_struct *)
834 {
835 }
836
mmap_read_unlock(struct mm_struct *)837 static inline void mmap_read_unlock(struct mm_struct *)
838 {
839 }
840
mmap_write_unlock(struct mm_struct *)841 static inline void mmap_write_unlock(struct mm_struct *)
842 {
843 }
844
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)845 static inline bool can_modify_mm(struct mm_struct *mm,
846 unsigned long start,
847 unsigned long end)
848 {
849 (void)mm;
850 (void)start;
851 (void)end;
852
853 return true;
854 }
855
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)856 static inline void arch_unmap(struct mm_struct *mm,
857 unsigned long start,
858 unsigned long end)
859 {
860 (void)mm;
861 (void)start;
862 (void)end;
863 }
864
mmap_assert_locked(struct mm_struct *)865 static inline void mmap_assert_locked(struct mm_struct *)
866 {
867 }
868
mpol_equal(struct mempolicy *,struct mempolicy *)869 static inline bool mpol_equal(struct mempolicy *, struct mempolicy *)
870 {
871 return true;
872 }
873
khugepaged_enter_vma(struct vm_area_struct * vma,unsigned long vm_flags)874 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
875 unsigned long vm_flags)
876 {
877 (void)vma;
878 (void)vm_flags;
879 }
880
mapping_can_writeback(struct address_space *)881 static inline bool mapping_can_writeback(struct address_space *)
882 {
883 return true;
884 }
885
is_vm_hugetlb_page(struct vm_area_struct *)886 static inline bool is_vm_hugetlb_page(struct vm_area_struct *)
887 {
888 return false;
889 }
890
vma_soft_dirty_enabled(struct vm_area_struct *)891 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *)
892 {
893 return false;
894 }
895
userfaultfd_wp(struct vm_area_struct *)896 static inline bool userfaultfd_wp(struct vm_area_struct *)
897 {
898 return false;
899 }
900
mmap_assert_write_locked(struct mm_struct *)901 static inline void mmap_assert_write_locked(struct mm_struct *)
902 {
903 }
904
mutex_lock(struct mutex *)905 static inline void mutex_lock(struct mutex *)
906 {
907 }
908
mutex_unlock(struct mutex *)909 static inline void mutex_unlock(struct mutex *)
910 {
911 }
912
mutex_is_locked(struct mutex *)913 static inline bool mutex_is_locked(struct mutex *)
914 {
915 return true;
916 }
917
signal_pending(void *)918 static inline bool signal_pending(void *)
919 {
920 return false;
921 }
922
923 #endif /* __MM_VMA_INTERNAL_H */
924