Lines Matching +full:fine +full:- +full:tune

1 /* SPDX-License-Identifier: GPL-2.0 */
19 * mmgrab() - Pin a &struct mm_struct.
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
37 atomic_inc(&mm->mm_count); in mmgrab()
52 * user-space, after storing to rq->curr. in mmdrop()
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) in mmdrop()
77 if (atomic_dec_and_test(&mm->mm_count)) in mmdrop_sched()
78 call_rcu(&mm->delayed_drop, __mmdrop_delayed); in mmdrop_sched()
116 * mmget() - Pin the address space associated with a &struct mm_struct.
128 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
133 atomic_inc(&mm->mm_users); in mmget()
138 return atomic_inc_not_zero(&mm->mm_users); in mmget_not_zero()
141 /* mmput gets rid of the mappings and all user-space */
222 * need RCU to access ->real_parent if CLONE_VM was used along with in in_vfork()
225 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not in in_vfork()
229 * ->real_parent is not necessarily the task doing vfork(), so in in in_vfork()
232 * And in this case we can't trust the real_parent->mm == tsk->mm in in_vfork()
234 * another oom-unkillable task does this it should blame itself. in in_vfork()
237 ret = tsk->vfork_done && in in_vfork()
238 rcu_dereference(tsk->real_parent)->mm == tsk->mm; in in_vfork()
245 * Applies per-task gfp context to the given allocation flags.
252 unsigned int pflags = READ_ONCE(current->flags); in current_gfp_context()
282 /* Any memory-allocation retry loop should use
286 * and a central place to fine tune the waiting as the MM
309 * might_alloc - Mark possible allocation sites
325 * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
332 unsigned oldflags = ~current->flags & flags; in memalloc_flags_save()
333 current->flags |= flags; in memalloc_flags_save()
339 current->flags &= ~flags; in memalloc_flags_restore()
343 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
360 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
373 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
390 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
403 * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
415 * pre-allocated pool (e.g. mempool) should be always considered before using
431 * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
444 * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
459 * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
474 * set_active_memcg - Starts the remote memcg charging scope.
497 old = current->active_memcg; in set_active_memcg()
498 current->active_memcg = memcg; in set_active_memcg()
534 if (current->mm != mm) in membarrier_mm_sync_core_before_usermode()
536 if (likely(!(atomic_read(&mm->membarrier_state) & in membarrier_mm_sync_core_before_usermode()