1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 #include <linux/sched/coredump.h>
12
13 /*
14 * Routines for handling mm_structs
15 */
16 extern struct mm_struct *mm_alloc(void);
17
18 /**
19 * mmgrab() - Pin a &struct mm_struct.
20 * @mm: The &struct mm_struct to pin.
21 *
22 * Make sure that @mm will not get freed even after the owning task
23 * exits. This doesn't guarantee that the associated address space
24 * will still exist later on and mmget_not_zero() has to be used before
25 * accessing it.
26 *
27 * This is a preferred way to pin @mm for a longer/unbounded amount
28 * of time.
29 *
30 * Use mmdrop() to release the reference acquired by mmgrab().
31 *
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
33 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 */
mmgrab(struct mm_struct * mm)35 static inline void mmgrab(struct mm_struct *mm)
36 {
37 atomic_inc(&mm->mm_count);
38 }
39
smp_mb__after_mmgrab(void)40 static inline void smp_mb__after_mmgrab(void)
41 {
42 smp_mb__after_atomic();
43 }
44
45 extern void __mmdrop(struct mm_struct *mm);
46
mmdrop(struct mm_struct * mm)47 static inline void mmdrop(struct mm_struct *mm)
48 {
49 /*
50 * The implicit full barrier implied by atomic_dec_and_test() is
51 * required by the membarrier system call before returning to
52 * user-space, after storing to rq->curr.
53 */
54 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
55 __mmdrop(mm);
56 }
57
58 #ifdef CONFIG_PREEMPT_RT
59 /*
60 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
61 * by far the least expensive way to do that.
62 */
__mmdrop_delayed(struct rcu_head * rhp)63 static inline void __mmdrop_delayed(struct rcu_head *rhp)
64 {
65 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
66
67 __mmdrop(mm);
68 }
69
70 /*
71 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
72 * kernels via RCU.
73 */
mmdrop_sched(struct mm_struct * mm)74 static inline void mmdrop_sched(struct mm_struct *mm)
75 {
76 /* Provides a full memory barrier. See mmdrop() */
77 if (atomic_dec_and_test(&mm->mm_count))
78 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
79 }
80 #else
mmdrop_sched(struct mm_struct * mm)81 static inline void mmdrop_sched(struct mm_struct *mm)
82 {
83 mmdrop(mm);
84 }
85 #endif
86
87 /* Helpers for lazy TLB mm refcounting */
mmgrab_lazy_tlb(struct mm_struct * mm)88 static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
89 {
90 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
91 mmgrab(mm);
92 }
93
mmdrop_lazy_tlb(struct mm_struct * mm)94 static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
95 {
96 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
97 mmdrop(mm);
98 } else {
99 /*
100 * mmdrop_lazy_tlb must provide a full memory barrier, see the
101 * membarrier comment finish_task_switch which relies on this.
102 */
103 smp_mb();
104 }
105 }
106
mmdrop_lazy_tlb_sched(struct mm_struct * mm)107 static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
108 {
109 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
110 mmdrop_sched(mm);
111 else
112 smp_mb(); /* see mmdrop_lazy_tlb() above */
113 }
114
115 /**
116 * mmget() - Pin the address space associated with a &struct mm_struct.
117 * @mm: The address space to pin.
118 *
119 * Make sure that the address space of the given &struct mm_struct doesn't
120 * go away. This does not protect against parts of the address space being
121 * modified or freed, however.
122 *
123 * Never use this function to pin this address space for an
124 * unbounded/indefinite amount of time.
125 *
126 * Use mmput() to release the reference acquired by mmget().
127 *
128 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
129 * of &mm_struct.mm_count vs &mm_struct.mm_users.
130 */
mmget(struct mm_struct * mm)131 static inline void mmget(struct mm_struct *mm)
132 {
133 atomic_inc(&mm->mm_users);
134 }
135
mmget_not_zero(struct mm_struct * mm)136 static inline bool mmget_not_zero(struct mm_struct *mm)
137 {
138 return atomic_inc_not_zero(&mm->mm_users);
139 }
140
141 /* mmput gets rid of the mappings and all user-space */
142 extern void mmput(struct mm_struct *);
143 #ifdef CONFIG_MMU
144 /* same as above but performs the slow path from the async context. Can
145 * be called from the atomic context as well
146 */
147 void mmput_async(struct mm_struct *);
148 #endif
149
150 /* Grab a reference to a task's mm, if it is not already going away */
151 extern struct mm_struct *get_task_mm(struct task_struct *task);
152 /*
153 * Grab a reference to a task's mm, if it is not already going away
154 * and ptrace_may_access with the mode parameter passed to it
155 * succeeds.
156 */
157 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
158 /* Remove the current tasks stale references to the old mm_struct on exit() */
159 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
160 /* Remove the current tasks stale references to the old mm_struct on exec() */
161 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
162
163 #ifdef CONFIG_MEMCG
164 extern void mm_update_next_owner(struct mm_struct *mm);
165 #else
mm_update_next_owner(struct mm_struct * mm)166 static inline void mm_update_next_owner(struct mm_struct *mm)
167 {
168 }
169 #endif /* CONFIG_MEMCG */
170
171 #ifdef CONFIG_MMU
172 #ifndef arch_get_mmap_end
173 #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
174 #endif
175
176 #ifndef arch_get_mmap_base
177 #define arch_get_mmap_base(addr, base) (base)
178 #endif
179
180 extern void arch_pick_mmap_layout(struct mm_struct *mm,
181 struct rlimit *rlim_stack);
182
183 unsigned long
184 arch_get_unmapped_area(struct file *filp, unsigned long addr,
185 unsigned long len, unsigned long pgoff,
186 unsigned long flags, vm_flags_t vm_flags);
187 unsigned long
188 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
189 unsigned long len, unsigned long pgoff,
190 unsigned long flags, vm_flags_t);
191
192 unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
193 unsigned long addr, unsigned long len,
194 unsigned long pgoff, unsigned long flags);
195
196 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
197 struct file *filp,
198 unsigned long addr,
199 unsigned long len,
200 unsigned long pgoff,
201 unsigned long flags,
202 vm_flags_t vm_flags);
203
204 unsigned long
205 generic_get_unmapped_area(struct file *filp, unsigned long addr,
206 unsigned long len, unsigned long pgoff,
207 unsigned long flags, vm_flags_t vm_flags);
208 unsigned long
209 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
210 unsigned long len, unsigned long pgoff,
211 unsigned long flags, vm_flags_t vm_flags);
212 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)213 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
214 struct rlimit *rlim_stack) {}
215 #endif
216
in_vfork(struct task_struct * tsk)217 static inline bool in_vfork(struct task_struct *tsk)
218 {
219 bool ret;
220
221 /*
222 * need RCU to access ->real_parent if CLONE_VM was used along with
223 * CLONE_PARENT.
224 *
225 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
226 * imply CLONE_VM
227 *
228 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
229 * ->real_parent is not necessarily the task doing vfork(), so in
230 * theory we can't rely on task_lock() if we want to dereference it.
231 *
232 * And in this case we can't trust the real_parent->mm == tsk->mm
233 * check, it can be false negative. But we do not care, if init or
234 * another oom-unkillable task does this it should blame itself.
235 */
236 rcu_read_lock();
237 ret = tsk->vfork_done &&
238 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
239 rcu_read_unlock();
240
241 return ret;
242 }
243
244 /*
245 * Applies per-task gfp context to the given allocation flags.
246 * PF_MEMALLOC_NOIO implies GFP_NOIO
247 * PF_MEMALLOC_NOFS implies GFP_NOFS
248 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
249 */
current_gfp_context(gfp_t flags)250 static inline gfp_t current_gfp_context(gfp_t flags)
251 {
252 unsigned int pflags = READ_ONCE(current->flags);
253
254 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
255 /*
256 * NOIO implies both NOIO and NOFS and it is a weaker context
257 * so always make sure it makes precedence
258 */
259 if (pflags & PF_MEMALLOC_NOIO)
260 flags &= ~(__GFP_IO | __GFP_FS);
261 else if (pflags & PF_MEMALLOC_NOFS)
262 flags &= ~__GFP_FS;
263
264 if (pflags & PF_MEMALLOC_PIN)
265 flags &= ~__GFP_MOVABLE;
266 }
267 return flags;
268 }
269
270 #ifdef CONFIG_LOCKDEP
271 extern void __fs_reclaim_acquire(unsigned long ip);
272 extern void __fs_reclaim_release(unsigned long ip);
273 extern void fs_reclaim_acquire(gfp_t gfp_mask);
274 extern void fs_reclaim_release(gfp_t gfp_mask);
275 #else
__fs_reclaim_acquire(unsigned long ip)276 static inline void __fs_reclaim_acquire(unsigned long ip) { }
__fs_reclaim_release(unsigned long ip)277 static inline void __fs_reclaim_release(unsigned long ip) { }
fs_reclaim_acquire(gfp_t gfp_mask)278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)279 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
280 #endif
281
282 /* Any memory-allocation retry loop should use
283 * memalloc_retry_wait(), and pass the flags for the most
284 * constrained allocation attempt that might have failed.
285 * This provides useful documentation of where loops are,
286 * and a central place to fine tune the waiting as the MM
287 * implementation changes.
288 */
memalloc_retry_wait(gfp_t gfp_flags)289 static inline void memalloc_retry_wait(gfp_t gfp_flags)
290 {
291 /* We use io_schedule_timeout because waiting for memory
292 * typically included waiting for dirty pages to be
293 * written out, which requires IO.
294 */
295 __set_current_state(TASK_UNINTERRUPTIBLE);
296 gfp_flags = current_gfp_context(gfp_flags);
297 if (gfpflags_allow_blocking(gfp_flags) &&
298 !(gfp_flags & __GFP_NORETRY))
299 /* Probably waited already, no need for much more */
300 io_schedule_timeout(1);
301 else
302 /* Probably didn't wait, and has now released a lock,
303 * so now is a good time to wait
304 */
305 io_schedule_timeout(HZ/50);
306 }
307
308 /**
309 * might_alloc - Mark possible allocation sites
310 * @gfp_mask: gfp_t flags that would be used to allocate
311 *
312 * Similar to might_sleep() and other annotations, this can be used in functions
313 * that might allocate, but often don't. Compiles to nothing without
314 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
315 */
might_alloc(gfp_t gfp_mask)316 static inline void might_alloc(gfp_t gfp_mask)
317 {
318 fs_reclaim_acquire(gfp_mask);
319 fs_reclaim_release(gfp_mask);
320
321 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
322 }
323
324 /**
325 * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
326 *
327 * This allows PF_* flags to be conveniently added, irrespective of current
328 * value, and then the old version restored with memalloc_flags_restore().
329 */
memalloc_flags_save(unsigned flags)330 static inline unsigned memalloc_flags_save(unsigned flags)
331 {
332 unsigned oldflags = ~current->flags & flags;
333 current->flags |= flags;
334 return oldflags;
335 }
336
memalloc_flags_restore(unsigned flags)337 static inline void memalloc_flags_restore(unsigned flags)
338 {
339 current->flags &= ~flags;
340 }
341
342 /**
343 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
344 *
345 * This functions marks the beginning of the GFP_NOIO allocation scope.
346 * All further allocations will implicitly drop __GFP_IO flag and so
347 * they are safe for the IO critical section from the allocation recursion
348 * point of view. Use memalloc_noio_restore to end the scope with flags
349 * returned by this function.
350 *
351 * Context: This function is safe to be used from any context.
352 * Return: The saved flags to be passed to memalloc_noio_restore.
353 */
memalloc_noio_save(void)354 static inline unsigned int memalloc_noio_save(void)
355 {
356 return memalloc_flags_save(PF_MEMALLOC_NOIO);
357 }
358
359 /**
360 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
361 * @flags: Flags to restore.
362 *
363 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
364 * Always make sure that the given flags is the return value from the
365 * pairing memalloc_noio_save call.
366 */
memalloc_noio_restore(unsigned int flags)367 static inline void memalloc_noio_restore(unsigned int flags)
368 {
369 memalloc_flags_restore(flags);
370 }
371
372 /**
373 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
374 *
375 * This functions marks the beginning of the GFP_NOFS allocation scope.
376 * All further allocations will implicitly drop __GFP_FS flag and so
377 * they are safe for the FS critical section from the allocation recursion
378 * point of view. Use memalloc_nofs_restore to end the scope with flags
379 * returned by this function.
380 *
381 * Context: This function is safe to be used from any context.
382 * Return: The saved flags to be passed to memalloc_nofs_restore.
383 */
memalloc_nofs_save(void)384 static inline unsigned int memalloc_nofs_save(void)
385 {
386 return memalloc_flags_save(PF_MEMALLOC_NOFS);
387 }
388
389 /**
390 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
391 * @flags: Flags to restore.
392 *
393 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
394 * Always make sure that the given flags is the return value from the
395 * pairing memalloc_nofs_save call.
396 */
memalloc_nofs_restore(unsigned int flags)397 static inline void memalloc_nofs_restore(unsigned int flags)
398 {
399 memalloc_flags_restore(flags);
400 }
401
402 /**
403 * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
404 *
405 * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
406 * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
407 * prevents entering reclaim and allows access to all memory reserves. This
408 * should only be used when the caller guarantees the allocation will allow more
409 * memory to be freed very shortly, i.e. it needs to allocate some memory in
410 * the process of freeing memory, and cannot reclaim due to potential recursion.
411 *
412 * Users of this scope have to be extremely careful to not deplete the reserves
413 * completely and implement a throttling mechanism which controls the
414 * consumption of the reserve based on the amount of freed memory. Usage of a
415 * pre-allocated pool (e.g. mempool) should be always considered before using
416 * this scope.
417 *
418 * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
419 *
420 * Context: This function should not be used in an interrupt context as that one
421 * does not give PF_MEMALLOC access to reserves.
422 * See __gfp_pfmemalloc_flags().
423 * Return: The saved flags to be passed to memalloc_noreclaim_restore.
424 */
memalloc_noreclaim_save(void)425 static inline unsigned int memalloc_noreclaim_save(void)
426 {
427 return memalloc_flags_save(PF_MEMALLOC);
428 }
429
430 /**
431 * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
432 * @flags: Flags to restore.
433 *
434 * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
435 * function. Always make sure that the given flags is the return value from the
436 * pairing memalloc_noreclaim_save call.
437 */
memalloc_noreclaim_restore(unsigned int flags)438 static inline void memalloc_noreclaim_restore(unsigned int flags)
439 {
440 memalloc_flags_restore(flags);
441 }
442
443 /**
444 * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
445 *
446 * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
447 * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
448 * will constraint the allocations to zones that allow long term pinning, i.e.
449 * not ZONE_MOVABLE zones.
450 *
451 * Return: The saved flags to be passed to memalloc_pin_restore.
452 */
memalloc_pin_save(void)453 static inline unsigned int memalloc_pin_save(void)
454 {
455 return memalloc_flags_save(PF_MEMALLOC_PIN);
456 }
457
458 /**
459 * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
460 * @flags: Flags to restore.
461 *
462 * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
463 * Always make sure that the given flags is the return value from the pairing
464 * memalloc_pin_save call.
465 */
memalloc_pin_restore(unsigned int flags)466 static inline void memalloc_pin_restore(unsigned int flags)
467 {
468 memalloc_flags_restore(flags);
469 }
470
471 #ifdef CONFIG_MEMCG
472 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
473 /**
474 * set_active_memcg - Starts the remote memcg charging scope.
475 * @memcg: memcg to charge.
476 *
477 * This function marks the beginning of the remote memcg charging scope. All the
478 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
479 * given memcg.
480 *
481 * Please, make sure that caller has a reference to the passed memcg structure,
482 * so its lifetime is guaranteed to exceed the scope between two
483 * set_active_memcg() calls.
484 *
485 * NOTE: This function can nest. Users must save the return value and
486 * reset the previous value after their own charging scope is over.
487 */
488 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)489 set_active_memcg(struct mem_cgroup *memcg)
490 {
491 struct mem_cgroup *old;
492
493 if (!in_task()) {
494 old = this_cpu_read(int_active_memcg);
495 this_cpu_write(int_active_memcg, memcg);
496 } else {
497 old = current->active_memcg;
498 current->active_memcg = memcg;
499 }
500
501 return old;
502 }
503 #else
504 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)505 set_active_memcg(struct mem_cgroup *memcg)
506 {
507 return NULL;
508 }
509 #endif
510
511 #ifdef CONFIG_MEMBARRIER
512 enum {
513 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
514 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
515 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
516 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
517 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
518 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
519 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
520 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
521 };
522
523 enum {
524 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
525 MEMBARRIER_FLAG_RSEQ = (1U << 1),
526 };
527
528 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
529 #include <asm/membarrier.h>
530 #endif
531
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)532 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
533 {
534 if (current->mm != mm)
535 return;
536 if (likely(!(atomic_read(&mm->membarrier_state) &
537 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
538 return;
539 sync_core_before_usermode();
540 }
541
542 extern void membarrier_exec_mmap(struct mm_struct *mm);
543
544 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
545
546 #else
547 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)548 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
549 struct mm_struct *next,
550 struct task_struct *tsk)
551 {
552 }
553 #endif
membarrier_exec_mmap(struct mm_struct * mm)554 static inline void membarrier_exec_mmap(struct mm_struct *mm)
555 {
556 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)557 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
558 {
559 }
membarrier_update_current_mm(struct mm_struct * next_mm)560 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
561 {
562 }
563 #endif
564
565 #endif /* _LINUX_SCHED_MM_H */
566