1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  linux/mm/oom_kill.c
4   *
5   *  Copyright (C)  1998,2000  Rik van Riel
6   *	Thanks go out to Claus Fischer for some serious inspiration and
7   *	for goading me into coding this file...
8   *  Copyright (C)  2010  Google, Inc.
9   *	Rewritten by David Rientjes
10   *
11   *  The routines in this file are used to kill a process when
12   *  we're seriously out of memory. This gets called from __alloc_pages()
13   *  in mm/page_alloc.c when we really run out of memory.
14   *
15   *  Since we won't call these routines often (on a well-configured
16   *  machine) this file will double as a 'coding guide' and a signpost
17   *  for newbie kernel hackers. It features several pointers to major
18   *  kernel subsystems and hints as to where to find out what things do.
19   */
20  
21  #include <linux/oom.h>
22  #include <linux/mm.h>
23  #include <linux/err.h>
24  #include <linux/gfp.h>
25  #include <linux/sched.h>
26  #include <linux/sched/mm.h>
27  #include <linux/sched/coredump.h>
28  #include <linux/sched/task.h>
29  #include <linux/sched/debug.h>
30  #include <linux/swap.h>
31  #include <linux/syscalls.h>
32  #include <linux/timex.h>
33  #include <linux/jiffies.h>
34  #include <linux/cpuset.h>
35  #include <linux/export.h>
36  #include <linux/notifier.h>
37  #include <linux/memcontrol.h>
38  #include <linux/mempolicy.h>
39  #include <linux/security.h>
40  #include <linux/ptrace.h>
41  #include <linux/freezer.h>
42  #include <linux/ftrace.h>
43  #include <linux/ratelimit.h>
44  #include <linux/kthread.h>
45  #include <linux/init.h>
46  #include <linux/mmu_notifier.h>
47  #include <linux/cred.h>
48  
49  #include <asm/tlb.h>
50  #include "internal.h"
51  #include "slab.h"
52  
53  #define CREATE_TRACE_POINTS
54  #include <trace/events/oom.h>
55  
56  static int sysctl_panic_on_oom;
57  static int sysctl_oom_kill_allocating_task;
58  static int sysctl_oom_dump_tasks = 1;
59  
60  /*
61   * Serializes oom killer invocations (out_of_memory()) from all contexts to
62   * prevent from over eager oom killing (e.g. when the oom killer is invoked
63   * from different domains).
64   *
65   * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
66   * and mark_oom_victim
67   */
68  DEFINE_MUTEX(oom_lock);
69  /* Serializes oom_score_adj and oom_score_adj_min updates */
70  DEFINE_MUTEX(oom_adj_mutex);
71  
is_memcg_oom(struct oom_control * oc)72  static inline bool is_memcg_oom(struct oom_control *oc)
73  {
74  	return oc->memcg != NULL;
75  }
76  
77  #ifdef CONFIG_NUMA
78  /**
79   * oom_cpuset_eligible() - check task eligibility for kill
80   * @start: task struct of which task to consider
81   * @oc: pointer to struct oom_control
82   *
83   * Task eligibility is determined by whether or not a candidate task, @tsk,
84   * shares the same mempolicy nodes as current if it is bound by such a policy
85   * and whether or not it has the same set of allowed cpuset nodes.
86   *
87   * This function is assuming oom-killer context and 'current' has triggered
88   * the oom-killer.
89   */
oom_cpuset_eligible(struct task_struct * start,struct oom_control * oc)90  static bool oom_cpuset_eligible(struct task_struct *start,
91  				struct oom_control *oc)
92  {
93  	struct task_struct *tsk;
94  	bool ret = false;
95  	const nodemask_t *mask = oc->nodemask;
96  
97  	rcu_read_lock();
98  	for_each_thread(start, tsk) {
99  		if (mask) {
100  			/*
101  			 * If this is a mempolicy constrained oom, tsk's
102  			 * cpuset is irrelevant.  Only return true if its
103  			 * mempolicy intersects current, otherwise it may be
104  			 * needlessly killed.
105  			 */
106  			ret = mempolicy_in_oom_domain(tsk, mask);
107  		} else {
108  			/*
109  			 * This is not a mempolicy constrained oom, so only
110  			 * check the mems of tsk's cpuset.
111  			 */
112  			ret = cpuset_mems_allowed_intersects(current, tsk);
113  		}
114  		if (ret)
115  			break;
116  	}
117  	rcu_read_unlock();
118  
119  	return ret;
120  }
121  #else
oom_cpuset_eligible(struct task_struct * tsk,struct oom_control * oc)122  static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
123  {
124  	return true;
125  }
126  #endif /* CONFIG_NUMA */
127  
128  /*
129   * The process p may have detached its own ->mm while exiting or through
130   * kthread_use_mm(), but one or more of its subthreads may still have a valid
131   * pointer.  Return p, or any of its subthreads with a valid ->mm, with
132   * task_lock() held.
133   */
find_lock_task_mm(struct task_struct * p)134  struct task_struct *find_lock_task_mm(struct task_struct *p)
135  {
136  	struct task_struct *t;
137  
138  	rcu_read_lock();
139  
140  	for_each_thread(p, t) {
141  		task_lock(t);
142  		if (likely(t->mm))
143  			goto found;
144  		task_unlock(t);
145  	}
146  	t = NULL;
147  found:
148  	rcu_read_unlock();
149  
150  	return t;
151  }
152  
153  /*
154   * order == -1 means the oom kill is required by sysrq, otherwise only
155   * for display purposes.
156   */
is_sysrq_oom(struct oom_control * oc)157  static inline bool is_sysrq_oom(struct oom_control *oc)
158  {
159  	return oc->order == -1;
160  }
161  
162  /* return true if the task is not adequate as candidate victim task. */
oom_unkillable_task(struct task_struct * p)163  static bool oom_unkillable_task(struct task_struct *p)
164  {
165  	if (is_global_init(p))
166  		return true;
167  	if (p->flags & PF_KTHREAD)
168  		return true;
169  	return false;
170  }
171  
172  /*
173   * Check whether unreclaimable slab amount is greater than
174   * all user memory(LRU pages).
175   * dump_unreclaimable_slab() could help in the case that
176   * oom due to too much unreclaimable slab used by kernel.
177  */
should_dump_unreclaim_slab(void)178  static bool should_dump_unreclaim_slab(void)
179  {
180  	unsigned long nr_lru;
181  
182  	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
183  		 global_node_page_state(NR_INACTIVE_ANON) +
184  		 global_node_page_state(NR_ACTIVE_FILE) +
185  		 global_node_page_state(NR_INACTIVE_FILE) +
186  		 global_node_page_state(NR_ISOLATED_ANON) +
187  		 global_node_page_state(NR_ISOLATED_FILE) +
188  		 global_node_page_state(NR_UNEVICTABLE);
189  
190  	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
191  }
192  
193  /**
194   * oom_badness - heuristic function to determine which candidate task to kill
195   * @p: task struct of which task we should calculate
196   * @totalpages: total present RAM allowed for page allocation
197   *
198   * The heuristic for determining which task to kill is made to be as simple and
199   * predictable as possible.  The goal is to return the highest value for the
200   * task consuming the most memory to avoid subsequent oom failures.
201   */
oom_badness(struct task_struct * p,unsigned long totalpages)202  long oom_badness(struct task_struct *p, unsigned long totalpages)
203  {
204  	long points;
205  	long adj;
206  
207  	if (oom_unkillable_task(p))
208  		return LONG_MIN;
209  
210  	p = find_lock_task_mm(p);
211  	if (!p)
212  		return LONG_MIN;
213  
214  	/*
215  	 * Do not even consider tasks which are explicitly marked oom
216  	 * unkillable or have been already oom reaped or the are in
217  	 * the middle of vfork
218  	 */
219  	adj = (long)p->signal->oom_score_adj;
220  	if (adj == OOM_SCORE_ADJ_MIN ||
221  			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
222  			in_vfork(p)) {
223  		task_unlock(p);
224  		return LONG_MIN;
225  	}
226  
227  	/*
228  	 * The baseline for the badness score is the proportion of RAM that each
229  	 * task's rss, pagetable and swap space use.
230  	 */
231  	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
232  		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
233  	task_unlock(p);
234  
235  	/* Normalize to oom_score_adj units */
236  	adj *= totalpages / 1000;
237  	points += adj;
238  
239  	return points;
240  }
241  
242  static const char * const oom_constraint_text[] = {
243  	[CONSTRAINT_NONE] = "CONSTRAINT_NONE",
244  	[CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
245  	[CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
246  	[CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
247  };
248  
249  /*
250   * Determine the type of allocation constraint.
251   */
constrained_alloc(struct oom_control * oc)252  static enum oom_constraint constrained_alloc(struct oom_control *oc)
253  {
254  	struct zone *zone;
255  	struct zoneref *z;
256  	enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
257  	bool cpuset_limited = false;
258  	int nid;
259  
260  	if (is_memcg_oom(oc)) {
261  		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
262  		return CONSTRAINT_MEMCG;
263  	}
264  
265  	/* Default to all available memory */
266  	oc->totalpages = totalram_pages() + total_swap_pages;
267  
268  	if (!IS_ENABLED(CONFIG_NUMA))
269  		return CONSTRAINT_NONE;
270  
271  	if (!oc->zonelist)
272  		return CONSTRAINT_NONE;
273  	/*
274  	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
275  	 * to kill current.We have to random task kill in this case.
276  	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
277  	 */
278  	if (oc->gfp_mask & __GFP_THISNODE)
279  		return CONSTRAINT_NONE;
280  
281  	/*
282  	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
283  	 * the page allocator means a mempolicy is in effect.  Cpuset policy
284  	 * is enforced in get_page_from_freelist().
285  	 */
286  	if (oc->nodemask &&
287  	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
288  		oc->totalpages = total_swap_pages;
289  		for_each_node_mask(nid, *oc->nodemask)
290  			oc->totalpages += node_present_pages(nid);
291  		return CONSTRAINT_MEMORY_POLICY;
292  	}
293  
294  	/* Check this allocation failure is caused by cpuset's wall function */
295  	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
296  			highest_zoneidx, oc->nodemask)
297  		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
298  			cpuset_limited = true;
299  
300  	if (cpuset_limited) {
301  		oc->totalpages = total_swap_pages;
302  		for_each_node_mask(nid, cpuset_current_mems_allowed)
303  			oc->totalpages += node_present_pages(nid);
304  		return CONSTRAINT_CPUSET;
305  	}
306  	return CONSTRAINT_NONE;
307  }
308  
oom_evaluate_task(struct task_struct * task,void * arg)309  static int oom_evaluate_task(struct task_struct *task, void *arg)
310  {
311  	struct oom_control *oc = arg;
312  	long points;
313  
314  	if (oom_unkillable_task(task))
315  		goto next;
316  
317  	/* p may not have freeable memory in nodemask */
318  	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
319  		goto next;
320  
321  	/*
322  	 * This task already has access to memory reserves and is being killed.
323  	 * Don't allow any other task to have access to the reserves unless
324  	 * the task has MMF_OOM_SKIP because chances that it would release
325  	 * any memory is quite low.
326  	 */
327  	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
328  		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
329  			goto next;
330  		goto abort;
331  	}
332  
333  	/*
334  	 * If task is allocating a lot of memory and has been marked to be
335  	 * killed first if it triggers an oom, then select it.
336  	 */
337  	if (oom_task_origin(task)) {
338  		points = LONG_MAX;
339  		goto select;
340  	}
341  
342  	points = oom_badness(task, oc->totalpages);
343  	if (points == LONG_MIN || points < oc->chosen_points)
344  		goto next;
345  
346  select:
347  	if (oc->chosen)
348  		put_task_struct(oc->chosen);
349  	get_task_struct(task);
350  	oc->chosen = task;
351  	oc->chosen_points = points;
352  next:
353  	return 0;
354  abort:
355  	if (oc->chosen)
356  		put_task_struct(oc->chosen);
357  	oc->chosen = (void *)-1UL;
358  	return 1;
359  }
360  
361  /*
362   * Simple selection loop. We choose the process with the highest number of
363   * 'points'. In case scan was aborted, oc->chosen is set to -1.
364   */
select_bad_process(struct oom_control * oc)365  static void select_bad_process(struct oom_control *oc)
366  {
367  	oc->chosen_points = LONG_MIN;
368  
369  	if (is_memcg_oom(oc))
370  		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
371  	else {
372  		struct task_struct *p;
373  
374  		rcu_read_lock();
375  		for_each_process(p)
376  			if (oom_evaluate_task(p, oc))
377  				break;
378  		rcu_read_unlock();
379  	}
380  }
381  
dump_task(struct task_struct * p,void * arg)382  static int dump_task(struct task_struct *p, void *arg)
383  {
384  	struct oom_control *oc = arg;
385  	struct task_struct *task;
386  
387  	if (oom_unkillable_task(p))
388  		return 0;
389  
390  	/* p may not have freeable memory in nodemask */
391  	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
392  		return 0;
393  
394  	task = find_lock_task_mm(p);
395  	if (!task) {
396  		/*
397  		 * All of p's threads have already detached their mm's. There's
398  		 * no need to report them; they can't be oom killed anyway.
399  		 */
400  		return 0;
401  	}
402  
403  	pr_info("[%7d] %5d %5d %8lu %8lu %8lu %8lu %9lu %8ld %8lu         %5hd %s\n",
404  		task->pid, from_kuid(&init_user_ns, task_uid(task)),
405  		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
406  		get_mm_counter(task->mm, MM_ANONPAGES), get_mm_counter(task->mm, MM_FILEPAGES),
407  		get_mm_counter(task->mm, MM_SHMEMPAGES), mm_pgtables_bytes(task->mm),
408  		get_mm_counter(task->mm, MM_SWAPENTS),
409  		task->signal->oom_score_adj, task->comm);
410  	task_unlock(task);
411  
412  	return 0;
413  }
414  
415  /**
416   * dump_tasks - dump current memory state of all system tasks
417   * @oc: pointer to struct oom_control
418   *
419   * Dumps the current memory state of all eligible tasks.  Tasks not in the same
420   * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
421   * are not shown.
422   * State information includes task's pid, uid, tgid, vm size, rss,
423   * pgtables_bytes, swapents, oom_score_adj value, and name.
424   */
dump_tasks(struct oom_control * oc)425  static void dump_tasks(struct oom_control *oc)
426  {
427  	pr_info("Tasks state (memory values in pages):\n");
428  	pr_info("[  pid  ]   uid  tgid total_vm      rss rss_anon rss_file rss_shmem pgtables_bytes swapents oom_score_adj name\n");
429  
430  	if (is_memcg_oom(oc))
431  		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
432  	else {
433  		struct task_struct *p;
434  
435  		rcu_read_lock();
436  		for_each_process(p)
437  			dump_task(p, oc);
438  		rcu_read_unlock();
439  	}
440  }
441  
dump_oom_victim(struct oom_control * oc,struct task_struct * victim)442  static void dump_oom_victim(struct oom_control *oc, struct task_struct *victim)
443  {
444  	/* one line summary of the oom killer context. */
445  	pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
446  			oom_constraint_text[oc->constraint],
447  			nodemask_pr_args(oc->nodemask));
448  	cpuset_print_current_mems_allowed();
449  	mem_cgroup_print_oom_context(oc->memcg, victim);
450  	pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
451  		from_kuid(&init_user_ns, task_uid(victim)));
452  }
453  
dump_header(struct oom_control * oc)454  static void dump_header(struct oom_control *oc)
455  {
456  	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
457  		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
458  			current->signal->oom_score_adj);
459  	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
460  		pr_warn("COMPACTION is disabled!!!\n");
461  
462  	dump_stack();
463  	if (is_memcg_oom(oc))
464  		mem_cgroup_print_oom_meminfo(oc->memcg);
465  	else {
466  		__show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask));
467  		if (should_dump_unreclaim_slab())
468  			dump_unreclaimable_slab();
469  	}
470  	if (sysctl_oom_dump_tasks)
471  		dump_tasks(oc);
472  }
473  
474  /*
475   * Number of OOM victims in flight
476   */
477  static atomic_t oom_victims = ATOMIC_INIT(0);
478  static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
479  
480  static bool oom_killer_disabled __read_mostly;
481  
482  /*
483   * task->mm can be NULL if the task is the exited group leader.  So to
484   * determine whether the task is using a particular mm, we examine all the
485   * task's threads: if one of those is using this mm then this task was also
486   * using it.
487   */
process_shares_mm(struct task_struct * p,struct mm_struct * mm)488  bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
489  {
490  	struct task_struct *t;
491  
492  	for_each_thread(p, t) {
493  		struct mm_struct *t_mm = READ_ONCE(t->mm);
494  		if (t_mm)
495  			return t_mm == mm;
496  	}
497  	return false;
498  }
499  
500  #ifdef CONFIG_MMU
501  /*
502   * OOM Reaper kernel thread which tries to reap the memory used by the OOM
503   * victim (if that is possible) to help the OOM killer to move on.
504   */
505  static struct task_struct *oom_reaper_th;
506  static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
507  static struct task_struct *oom_reaper_list;
508  static DEFINE_SPINLOCK(oom_reaper_lock);
509  
__oom_reap_task_mm(struct mm_struct * mm)510  static bool __oom_reap_task_mm(struct mm_struct *mm)
511  {
512  	struct vm_area_struct *vma;
513  	bool ret = true;
514  	VMA_ITERATOR(vmi, mm, 0);
515  
516  	/*
517  	 * Tell all users of get_user/copy_from_user etc... that the content
518  	 * is no longer stable. No barriers really needed because unmapping
519  	 * should imply barriers already and the reader would hit a page fault
520  	 * if it stumbled over a reaped memory.
521  	 */
522  	set_bit(MMF_UNSTABLE, &mm->flags);
523  
524  	for_each_vma(vmi, vma) {
525  		if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
526  			continue;
527  
528  		/*
529  		 * Only anonymous pages have a good chance to be dropped
530  		 * without additional steps which we cannot afford as we
531  		 * are OOM already.
532  		 *
533  		 * We do not even care about fs backed pages because all
534  		 * which are reclaimable have already been reclaimed and
535  		 * we do not want to block exit_mmap by keeping mm ref
536  		 * count elevated without a good reason.
537  		 */
538  		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
539  			struct mmu_notifier_range range;
540  			struct mmu_gather tlb;
541  
542  			mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
543  						mm, vma->vm_start,
544  						vma->vm_end);
545  			tlb_gather_mmu(&tlb, mm);
546  			if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
547  				tlb_finish_mmu(&tlb);
548  				ret = false;
549  				continue;
550  			}
551  			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
552  			mmu_notifier_invalidate_range_end(&range);
553  			tlb_finish_mmu(&tlb);
554  		}
555  	}
556  
557  	return ret;
558  }
559  
560  /*
561   * Reaps the address space of the give task.
562   *
563   * Returns true on success and false if none or part of the address space
564   * has been reclaimed and the caller should retry later.
565   */
oom_reap_task_mm(struct task_struct * tsk,struct mm_struct * mm)566  static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
567  {
568  	bool ret = true;
569  
570  	if (!mmap_read_trylock(mm)) {
571  		trace_skip_task_reaping(tsk->pid);
572  		return false;
573  	}
574  
575  	/*
576  	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
577  	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
578  	 * under mmap_lock for reading because it serializes against the
579  	 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
580  	 */
581  	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
582  		trace_skip_task_reaping(tsk->pid);
583  		goto out_unlock;
584  	}
585  
586  	trace_start_task_reaping(tsk->pid);
587  
588  	/* failed to reap part of the address space. Try again later */
589  	ret = __oom_reap_task_mm(mm);
590  	if (!ret)
591  		goto out_finish;
592  
593  	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
594  			task_pid_nr(tsk), tsk->comm,
595  			K(get_mm_counter(mm, MM_ANONPAGES)),
596  			K(get_mm_counter(mm, MM_FILEPAGES)),
597  			K(get_mm_counter(mm, MM_SHMEMPAGES)));
598  out_finish:
599  	trace_finish_task_reaping(tsk->pid);
600  out_unlock:
601  	mmap_read_unlock(mm);
602  
603  	return ret;
604  }
605  
606  #define MAX_OOM_REAP_RETRIES 10
oom_reap_task(struct task_struct * tsk)607  static void oom_reap_task(struct task_struct *tsk)
608  {
609  	int attempts = 0;
610  	struct mm_struct *mm = tsk->signal->oom_mm;
611  
612  	/* Retry the mmap_read_trylock(mm) a few times */
613  	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
614  		schedule_timeout_idle(HZ/10);
615  
616  	if (attempts <= MAX_OOM_REAP_RETRIES ||
617  	    test_bit(MMF_OOM_SKIP, &mm->flags))
618  		goto done;
619  
620  	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
621  		task_pid_nr(tsk), tsk->comm);
622  	sched_show_task(tsk);
623  	debug_show_all_locks();
624  
625  done:
626  	tsk->oom_reaper_list = NULL;
627  
628  	/*
629  	 * Hide this mm from OOM killer because it has been either reaped or
630  	 * somebody can't call mmap_write_unlock(mm).
631  	 */
632  	set_bit(MMF_OOM_SKIP, &mm->flags);
633  
634  	/* Drop a reference taken by queue_oom_reaper */
635  	put_task_struct(tsk);
636  }
637  
oom_reaper(void * unused)638  static int oom_reaper(void *unused)
639  {
640  	set_freezable();
641  
642  	while (true) {
643  		struct task_struct *tsk = NULL;
644  
645  		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
646  		spin_lock_irq(&oom_reaper_lock);
647  		if (oom_reaper_list != NULL) {
648  			tsk = oom_reaper_list;
649  			oom_reaper_list = tsk->oom_reaper_list;
650  		}
651  		spin_unlock_irq(&oom_reaper_lock);
652  
653  		if (tsk)
654  			oom_reap_task(tsk);
655  	}
656  
657  	return 0;
658  }
659  
wake_oom_reaper(struct timer_list * timer)660  static void wake_oom_reaper(struct timer_list *timer)
661  {
662  	struct task_struct *tsk = container_of(timer, struct task_struct,
663  			oom_reaper_timer);
664  	struct mm_struct *mm = tsk->signal->oom_mm;
665  	unsigned long flags;
666  
667  	/* The victim managed to terminate on its own - see exit_mmap */
668  	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
669  		put_task_struct(tsk);
670  		return;
671  	}
672  
673  	spin_lock_irqsave(&oom_reaper_lock, flags);
674  	tsk->oom_reaper_list = oom_reaper_list;
675  	oom_reaper_list = tsk;
676  	spin_unlock_irqrestore(&oom_reaper_lock, flags);
677  	trace_wake_reaper(tsk->pid);
678  	wake_up(&oom_reaper_wait);
679  }
680  
681  /*
682   * Give the OOM victim time to exit naturally before invoking the oom_reaping.
683   * The timers timeout is arbitrary... the longer it is, the longer the worst
684   * case scenario for the OOM can take. If it is too small, the oom_reaper can
685   * get in the way and release resources needed by the process exit path.
686   * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
687   * before the exit path is able to wake the futex waiters.
688   */
689  #define OOM_REAPER_DELAY (2*HZ)
queue_oom_reaper(struct task_struct * tsk)690  static void queue_oom_reaper(struct task_struct *tsk)
691  {
692  	/* mm is already queued? */
693  	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
694  		return;
695  
696  	get_task_struct(tsk);
697  	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
698  	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
699  	add_timer(&tsk->oom_reaper_timer);
700  }
701  
702  #ifdef CONFIG_SYSCTL
703  static struct ctl_table vm_oom_kill_table[] = {
704  	{
705  		.procname	= "panic_on_oom",
706  		.data		= &sysctl_panic_on_oom,
707  		.maxlen		= sizeof(sysctl_panic_on_oom),
708  		.mode		= 0644,
709  		.proc_handler	= proc_dointvec_minmax,
710  		.extra1		= SYSCTL_ZERO,
711  		.extra2		= SYSCTL_TWO,
712  	},
713  	{
714  		.procname	= "oom_kill_allocating_task",
715  		.data		= &sysctl_oom_kill_allocating_task,
716  		.maxlen		= sizeof(sysctl_oom_kill_allocating_task),
717  		.mode		= 0644,
718  		.proc_handler	= proc_dointvec,
719  	},
720  	{
721  		.procname	= "oom_dump_tasks",
722  		.data		= &sysctl_oom_dump_tasks,
723  		.maxlen		= sizeof(sysctl_oom_dump_tasks),
724  		.mode		= 0644,
725  		.proc_handler	= proc_dointvec,
726  	},
727  };
728  #endif
729  
oom_init(void)730  static int __init oom_init(void)
731  {
732  	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
733  #ifdef CONFIG_SYSCTL
734  	register_sysctl_init("vm", vm_oom_kill_table);
735  #endif
736  	return 0;
737  }
subsys_initcall(oom_init)738  subsys_initcall(oom_init)
739  #else
740  static inline void queue_oom_reaper(struct task_struct *tsk)
741  {
742  }
743  #endif /* CONFIG_MMU */
744  
745  /**
746   * mark_oom_victim - mark the given task as OOM victim
747   * @tsk: task to mark
748   *
749   * Has to be called with oom_lock held and never after
750   * oom has been disabled already.
751   *
752   * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
753   * under task_lock or operate on the current).
754   */
755  static void mark_oom_victim(struct task_struct *tsk)
756  {
757  	const struct cred *cred;
758  	struct mm_struct *mm = tsk->mm;
759  
760  	WARN_ON(oom_killer_disabled);
761  	/* OOM killer might race with memcg OOM */
762  	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
763  		return;
764  
765  	/* oom_mm is bound to the signal struct life time. */
766  	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
767  		mmgrab(tsk->signal->oom_mm);
768  
769  	/*
770  	 * Make sure that the task is woken up from uninterruptible sleep
771  	 * if it is frozen because OOM killer wouldn't be able to free
772  	 * any memory and livelock. freezing_slow_path will tell the freezer
773  	 * that TIF_MEMDIE tasks should be ignored.
774  	 */
775  	__thaw_task(tsk);
776  	atomic_inc(&oom_victims);
777  	cred = get_task_cred(tsk);
778  	trace_mark_victim(tsk, cred->uid.val);
779  	put_cred(cred);
780  }
781  
782  /**
783   * exit_oom_victim - note the exit of an OOM victim
784   */
exit_oom_victim(void)785  void exit_oom_victim(void)
786  {
787  	clear_thread_flag(TIF_MEMDIE);
788  
789  	if (!atomic_dec_return(&oom_victims))
790  		wake_up_all(&oom_victims_wait);
791  }
792  
793  /**
794   * oom_killer_enable - enable OOM killer
795   */
oom_killer_enable(void)796  void oom_killer_enable(void)
797  {
798  	oom_killer_disabled = false;
799  	pr_info("OOM killer enabled.\n");
800  }
801  
802  /**
803   * oom_killer_disable - disable OOM killer
804   * @timeout: maximum timeout to wait for oom victims in jiffies
805   *
806   * Forces all page allocations to fail rather than trigger OOM killer.
807   * Will block and wait until all OOM victims are killed or the given
808   * timeout expires.
809   *
810   * The function cannot be called when there are runnable user tasks because
811   * the userspace would see unexpected allocation failures as a result. Any
812   * new usage of this function should be consulted with MM people.
813   *
814   * Returns true if successful and false if the OOM killer cannot be
815   * disabled.
816   */
oom_killer_disable(signed long timeout)817  bool oom_killer_disable(signed long timeout)
818  {
819  	signed long ret;
820  
821  	/*
822  	 * Make sure to not race with an ongoing OOM killer. Check that the
823  	 * current is not killed (possibly due to sharing the victim's memory).
824  	 */
825  	if (mutex_lock_killable(&oom_lock))
826  		return false;
827  	oom_killer_disabled = true;
828  	mutex_unlock(&oom_lock);
829  
830  	ret = wait_event_interruptible_timeout(oom_victims_wait,
831  			!atomic_read(&oom_victims), timeout);
832  	if (ret <= 0) {
833  		oom_killer_enable();
834  		return false;
835  	}
836  	pr_info("OOM killer disabled.\n");
837  
838  	return true;
839  }
840  
__task_will_free_mem(struct task_struct * task)841  static inline bool __task_will_free_mem(struct task_struct *task)
842  {
843  	struct signal_struct *sig = task->signal;
844  
845  	/*
846  	 * A coredumping process may sleep for an extended period in
847  	 * coredump_task_exit(), so the oom killer cannot assume that
848  	 * the process will promptly exit and release memory.
849  	 */
850  	if (sig->core_state)
851  		return false;
852  
853  	if (sig->flags & SIGNAL_GROUP_EXIT)
854  		return true;
855  
856  	if (thread_group_empty(task) && (task->flags & PF_EXITING))
857  		return true;
858  
859  	return false;
860  }
861  
862  /*
863   * Checks whether the given task is dying or exiting and likely to
864   * release its address space. This means that all threads and processes
865   * sharing the same mm have to be killed or exiting.
866   * Caller has to make sure that task->mm is stable (hold task_lock or
867   * it operates on the current).
868   */
task_will_free_mem(struct task_struct * task)869  static bool task_will_free_mem(struct task_struct *task)
870  {
871  	struct mm_struct *mm = task->mm;
872  	struct task_struct *p;
873  	bool ret = true;
874  
875  	/*
876  	 * Skip tasks without mm because it might have passed its exit_mm and
877  	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
878  	 * on that for now. We can consider find_lock_task_mm in future.
879  	 */
880  	if (!mm)
881  		return false;
882  
883  	if (!__task_will_free_mem(task))
884  		return false;
885  
886  	/*
887  	 * This task has already been drained by the oom reaper so there are
888  	 * only small chances it will free some more
889  	 */
890  	if (test_bit(MMF_OOM_SKIP, &mm->flags))
891  		return false;
892  
893  	if (atomic_read(&mm->mm_users) <= 1)
894  		return true;
895  
896  	/*
897  	 * Make sure that all tasks which share the mm with the given tasks
898  	 * are dying as well to make sure that a) nobody pins its mm and
899  	 * b) the task is also reapable by the oom reaper.
900  	 */
901  	rcu_read_lock();
902  	for_each_process(p) {
903  		if (!process_shares_mm(p, mm))
904  			continue;
905  		if (same_thread_group(task, p))
906  			continue;
907  		ret = __task_will_free_mem(p);
908  		if (!ret)
909  			break;
910  	}
911  	rcu_read_unlock();
912  
913  	return ret;
914  }
915  
__oom_kill_process(struct task_struct * victim,const char * message)916  static void __oom_kill_process(struct task_struct *victim, const char *message)
917  {
918  	struct task_struct *p;
919  	struct mm_struct *mm;
920  	bool can_oom_reap = true;
921  
922  	p = find_lock_task_mm(victim);
923  	if (!p) {
924  		pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
925  			message, task_pid_nr(victim), victim->comm);
926  		put_task_struct(victim);
927  		return;
928  	} else if (victim != p) {
929  		get_task_struct(p);
930  		put_task_struct(victim);
931  		victim = p;
932  	}
933  
934  	/* Get a reference to safely compare mm after task_unlock(victim) */
935  	mm = victim->mm;
936  	mmgrab(mm);
937  
938  	/* Raise event before sending signal: task reaper must see this */
939  	count_vm_event(OOM_KILL);
940  	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
941  
942  	/*
943  	 * We should send SIGKILL before granting access to memory reserves
944  	 * in order to prevent the OOM victim from depleting the memory
945  	 * reserves from the user space under its control.
946  	 */
947  	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
948  	mark_oom_victim(victim);
949  	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
950  		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
951  		K(get_mm_counter(mm, MM_ANONPAGES)),
952  		K(get_mm_counter(mm, MM_FILEPAGES)),
953  		K(get_mm_counter(mm, MM_SHMEMPAGES)),
954  		from_kuid(&init_user_ns, task_uid(victim)),
955  		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
956  	task_unlock(victim);
957  
958  	/*
959  	 * Kill all user processes sharing victim->mm in other thread groups, if
960  	 * any.  They don't get access to memory reserves, though, to avoid
961  	 * depletion of all memory.  This prevents mm->mmap_lock livelock when an
962  	 * oom killed thread cannot exit because it requires the semaphore and
963  	 * its contended by another thread trying to allocate memory itself.
964  	 * That thread will now get access to memory reserves since it has a
965  	 * pending fatal signal.
966  	 */
967  	rcu_read_lock();
968  	for_each_process(p) {
969  		if (!process_shares_mm(p, mm))
970  			continue;
971  		if (same_thread_group(p, victim))
972  			continue;
973  		if (is_global_init(p)) {
974  			can_oom_reap = false;
975  			set_bit(MMF_OOM_SKIP, &mm->flags);
976  			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
977  					task_pid_nr(victim), victim->comm,
978  					task_pid_nr(p), p->comm);
979  			continue;
980  		}
981  		/*
982  		 * No kthread_use_mm() user needs to read from the userspace so
983  		 * we are ok to reap it.
984  		 */
985  		if (unlikely(p->flags & PF_KTHREAD))
986  			continue;
987  		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
988  	}
989  	rcu_read_unlock();
990  
991  	if (can_oom_reap)
992  		queue_oom_reaper(victim);
993  
994  	mmdrop(mm);
995  	put_task_struct(victim);
996  }
997  
998  /*
999   * Kill provided task unless it's secured by setting
1000   * oom_score_adj to OOM_SCORE_ADJ_MIN.
1001   */
oom_kill_memcg_member(struct task_struct * task,void * message)1002  static int oom_kill_memcg_member(struct task_struct *task, void *message)
1003  {
1004  	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
1005  	    !is_global_init(task)) {
1006  		get_task_struct(task);
1007  		__oom_kill_process(task, message);
1008  	}
1009  	return 0;
1010  }
1011  
oom_kill_process(struct oom_control * oc,const char * message)1012  static void oom_kill_process(struct oom_control *oc, const char *message)
1013  {
1014  	struct task_struct *victim = oc->chosen;
1015  	struct mem_cgroup *oom_group;
1016  	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1017  					      DEFAULT_RATELIMIT_BURST);
1018  
1019  	/*
1020  	 * If the task is already exiting, don't alarm the sysadmin or kill
1021  	 * its children or threads, just give it access to memory reserves
1022  	 * so it can die quickly
1023  	 */
1024  	task_lock(victim);
1025  	if (task_will_free_mem(victim)) {
1026  		mark_oom_victim(victim);
1027  		queue_oom_reaper(victim);
1028  		task_unlock(victim);
1029  		put_task_struct(victim);
1030  		return;
1031  	}
1032  	task_unlock(victim);
1033  
1034  	if (__ratelimit(&oom_rs)) {
1035  		dump_header(oc);
1036  		dump_oom_victim(oc, victim);
1037  	}
1038  
1039  	/*
1040  	 * Do we need to kill the entire memory cgroup?
1041  	 * Or even one of the ancestor memory cgroups?
1042  	 * Check this out before killing the victim task.
1043  	 */
1044  	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1045  
1046  	__oom_kill_process(victim, message);
1047  
1048  	/*
1049  	 * If necessary, kill all tasks in the selected memory cgroup.
1050  	 */
1051  	if (oom_group) {
1052  		memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
1053  		mem_cgroup_print_oom_group(oom_group);
1054  		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1055  				      (void *)message);
1056  		mem_cgroup_put(oom_group);
1057  	}
1058  }
1059  
1060  /*
1061   * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1062   */
check_panic_on_oom(struct oom_control * oc)1063  static void check_panic_on_oom(struct oom_control *oc)
1064  {
1065  	if (likely(!sysctl_panic_on_oom))
1066  		return;
1067  	if (sysctl_panic_on_oom != 2) {
1068  		/*
1069  		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1070  		 * does not panic for cpuset, mempolicy, or memcg allocation
1071  		 * failures.
1072  		 */
1073  		if (oc->constraint != CONSTRAINT_NONE)
1074  			return;
1075  	}
1076  	/* Do not panic for oom kills triggered by sysrq */
1077  	if (is_sysrq_oom(oc))
1078  		return;
1079  	dump_header(oc);
1080  	panic("Out of memory: %s panic_on_oom is enabled\n",
1081  		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1082  }
1083  
1084  static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1085  
register_oom_notifier(struct notifier_block * nb)1086  int register_oom_notifier(struct notifier_block *nb)
1087  {
1088  	return blocking_notifier_chain_register(&oom_notify_list, nb);
1089  }
1090  EXPORT_SYMBOL_GPL(register_oom_notifier);
1091  
unregister_oom_notifier(struct notifier_block * nb)1092  int unregister_oom_notifier(struct notifier_block *nb)
1093  {
1094  	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1095  }
1096  EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1097  
1098  /**
1099   * out_of_memory - kill the "best" process when we run out of memory
1100   * @oc: pointer to struct oom_control
1101   *
1102   * If we run out of memory, we have the choice between either
1103   * killing a random task (bad), letting the system crash (worse)
1104   * OR try to be smart about which process to kill. Note that we
1105   * don't have to be perfect here, we just have to be good.
1106   */
out_of_memory(struct oom_control * oc)1107  bool out_of_memory(struct oom_control *oc)
1108  {
1109  	unsigned long freed = 0;
1110  
1111  	if (oom_killer_disabled)
1112  		return false;
1113  
1114  	if (!is_memcg_oom(oc)) {
1115  		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1116  		if (freed > 0 && !is_sysrq_oom(oc))
1117  			/* Got some memory back in the last second. */
1118  			return true;
1119  	}
1120  
1121  	/*
1122  	 * If current has a pending SIGKILL or is exiting, then automatically
1123  	 * select it.  The goal is to allow it to allocate so that it may
1124  	 * quickly exit and free its memory.
1125  	 */
1126  	if (task_will_free_mem(current)) {
1127  		mark_oom_victim(current);
1128  		queue_oom_reaper(current);
1129  		return true;
1130  	}
1131  
1132  	/*
1133  	 * The OOM killer does not compensate for IO-less reclaim.
1134  	 * But mem_cgroup_oom() has to invoke the OOM killer even
1135  	 * if it is a GFP_NOFS allocation.
1136  	 */
1137  	if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1138  		return true;
1139  
1140  	/*
1141  	 * Check if there were limitations on the allocation (only relevant for
1142  	 * NUMA and memcg) that may require different handling.
1143  	 */
1144  	oc->constraint = constrained_alloc(oc);
1145  	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1146  		oc->nodemask = NULL;
1147  	check_panic_on_oom(oc);
1148  
1149  	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1150  	    current->mm && !oom_unkillable_task(current) &&
1151  	    oom_cpuset_eligible(current, oc) &&
1152  	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1153  		get_task_struct(current);
1154  		oc->chosen = current;
1155  		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1156  		return true;
1157  	}
1158  
1159  	select_bad_process(oc);
1160  	/* Found nothing?!?! */
1161  	if (!oc->chosen) {
1162  		dump_header(oc);
1163  		pr_warn("Out of memory and no killable processes...\n");
1164  		/*
1165  		 * If we got here due to an actual allocation at the
1166  		 * system level, we cannot survive this and will enter
1167  		 * an endless loop in the allocator. Bail out now.
1168  		 */
1169  		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1170  			panic("System is deadlocked on memory\n");
1171  	}
1172  	if (oc->chosen && oc->chosen != (void *)-1UL)
1173  		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1174  				 "Memory cgroup out of memory");
1175  	return !!oc->chosen;
1176  }
1177  
1178  /*
1179   * The pagefault handler calls here because some allocation has failed. We have
1180   * to take care of the memcg OOM here because this is the only safe context without
1181   * any locks held but let the oom killer triggered from the allocation context care
1182   * about the global OOM.
1183   */
pagefault_out_of_memory(void)1184  void pagefault_out_of_memory(void)
1185  {
1186  	static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1187  				      DEFAULT_RATELIMIT_BURST);
1188  
1189  	if (mem_cgroup_oom_synchronize(true))
1190  		return;
1191  
1192  	if (fatal_signal_pending(current))
1193  		return;
1194  
1195  	if (__ratelimit(&pfoom_rs))
1196  		pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1197  }
1198  
SYSCALL_DEFINE2(process_mrelease,int,pidfd,unsigned int,flags)1199  SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1200  {
1201  #ifdef CONFIG_MMU
1202  	struct mm_struct *mm = NULL;
1203  	struct task_struct *task;
1204  	struct task_struct *p;
1205  	unsigned int f_flags;
1206  	bool reap = false;
1207  	long ret = 0;
1208  
1209  	if (flags)
1210  		return -EINVAL;
1211  
1212  	task = pidfd_get_task(pidfd, &f_flags);
1213  	if (IS_ERR(task))
1214  		return PTR_ERR(task);
1215  
1216  	/*
1217  	 * Make sure to choose a thread which still has a reference to mm
1218  	 * during the group exit
1219  	 */
1220  	p = find_lock_task_mm(task);
1221  	if (!p) {
1222  		ret = -ESRCH;
1223  		goto put_task;
1224  	}
1225  
1226  	mm = p->mm;
1227  	mmgrab(mm);
1228  
1229  	if (task_will_free_mem(p))
1230  		reap = true;
1231  	else {
1232  		/* Error only if the work has not been done already */
1233  		if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1234  			ret = -EINVAL;
1235  	}
1236  	task_unlock(p);
1237  
1238  	if (!reap)
1239  		goto drop_mm;
1240  
1241  	if (mmap_read_lock_killable(mm)) {
1242  		ret = -EINTR;
1243  		goto drop_mm;
1244  	}
1245  	/*
1246  	 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1247  	 * possible change in exit_mmap is seen
1248  	 */
1249  	if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
1250  		ret = -EAGAIN;
1251  	mmap_read_unlock(mm);
1252  
1253  drop_mm:
1254  	mmdrop(mm);
1255  put_task:
1256  	put_task_struct(task);
1257  	return ret;
1258  #else
1259  	return -ENOSYS;
1260  #endif /* CONFIG_MMU */
1261  }
1262