1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/exit.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/sched/autogroup.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/stat.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/sched/cputime.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/capability.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/tty.h>
22 #include <linux/iocontext.h>
23 #include <linux/key.h>
24 #include <linux/cpu.h>
25 #include <linux/acct.h>
26 #include <linux/tsacct_kern.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/freezer.h>
30 #include <linux/binfmts.h>
31 #include <linux/nsproxy.h>
32 #include <linux/pid_namespace.h>
33 #include <linux/ptrace.h>
34 #include <linux/profile.h>
35 #include <linux/mount.h>
36 #include <linux/proc_fs.h>
37 #include <linux/kthread.h>
38 #include <linux/mempolicy.h>
39 #include <linux/taskstats_kern.h>
40 #include <linux/delayacct.h>
41 #include <linux/cgroup.h>
42 #include <linux/syscalls.h>
43 #include <linux/signal.h>
44 #include <linux/posix-timers.h>
45 #include <linux/cn_proc.h>
46 #include <linux/mutex.h>
47 #include <linux/futex.h>
48 #include <linux/pipe_fs_i.h>
49 #include <linux/audit.h> /* for audit_free() */
50 #include <linux/resource.h>
51 #include <linux/task_io_accounting_ops.h>
52 #include <linux/blkdev.h>
53 #include <linux/task_work.h>
54 #include <linux/fs_struct.h>
55 #include <linux/init_task.h>
56 #include <linux/perf_event.h>
57 #include <trace/events/sched.h>
58 #include <linux/hw_breakpoint.h>
59 #include <linux/oom.h>
60 #include <linux/writeback.h>
61 #include <linux/shm.h>
62 #include <linux/kcov.h>
63 #include <linux/kmsan.h>
64 #include <linux/random.h>
65 #include <linux/rcuwait.h>
66 #include <linux/compat.h>
67 #include <linux/io_uring.h>
68 #include <linux/kprobes.h>
69 #include <linux/rethook.h>
70 #include <linux/sysfs.h>
71 #include <linux/user_events.h>
72 #include <linux/uaccess.h>
73 
74 #include <uapi/linux/wait.h>
75 
76 #include <asm/unistd.h>
77 #include <asm/mmu_context.h>
78 
79 #include "exit.h"
80 
81 /*
82  * The default value should be high enough to not crash a system that randomly
83  * crashes its kernel from time to time, but low enough to at least not permit
84  * overflowing 32-bit refcounts or the ldsem writer count.
85  */
86 static unsigned int oops_limit = 10000;
87 
88 #ifdef CONFIG_SYSCTL
89 static struct ctl_table kern_exit_table[] = {
90 	{
91 		.procname       = "oops_limit",
92 		.data           = &oops_limit,
93 		.maxlen         = sizeof(oops_limit),
94 		.mode           = 0644,
95 		.proc_handler   = proc_douintvec,
96 	},
97 };
98 
kernel_exit_sysctls_init(void)99 static __init int kernel_exit_sysctls_init(void)
100 {
101 	register_sysctl_init("kernel", kern_exit_table);
102 	return 0;
103 }
104 late_initcall(kernel_exit_sysctls_init);
105 #endif
106 
107 static atomic_t oops_count = ATOMIC_INIT(0);
108 
109 #ifdef CONFIG_SYSFS
oops_count_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)110 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
111 			       char *page)
112 {
113 	return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
114 }
115 
116 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
117 
kernel_exit_sysfs_init(void)118 static __init int kernel_exit_sysfs_init(void)
119 {
120 	sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
121 	return 0;
122 }
123 late_initcall(kernel_exit_sysfs_init);
124 #endif
125 
__unhash_process(struct task_struct * p,bool group_dead)126 static void __unhash_process(struct task_struct *p, bool group_dead)
127 {
128 	nr_threads--;
129 	detach_pid(p, PIDTYPE_PID);
130 	if (group_dead) {
131 		detach_pid(p, PIDTYPE_TGID);
132 		detach_pid(p, PIDTYPE_PGID);
133 		detach_pid(p, PIDTYPE_SID);
134 
135 		list_del_rcu(&p->tasks);
136 		list_del_init(&p->sibling);
137 		__this_cpu_dec(process_counts);
138 	}
139 	list_del_rcu(&p->thread_node);
140 }
141 
142 /*
143  * This function expects the tasklist_lock write-locked.
144  */
__exit_signal(struct task_struct * tsk)145 static void __exit_signal(struct task_struct *tsk)
146 {
147 	struct signal_struct *sig = tsk->signal;
148 	bool group_dead = thread_group_leader(tsk);
149 	struct sighand_struct *sighand;
150 	struct tty_struct *tty;
151 	u64 utime, stime;
152 
153 	sighand = rcu_dereference_check(tsk->sighand,
154 					lockdep_tasklist_lock_is_held());
155 	spin_lock(&sighand->siglock);
156 
157 #ifdef CONFIG_POSIX_TIMERS
158 	posix_cpu_timers_exit(tsk);
159 	if (group_dead)
160 		posix_cpu_timers_exit_group(tsk);
161 #endif
162 
163 	if (group_dead) {
164 		tty = sig->tty;
165 		sig->tty = NULL;
166 	} else {
167 		/*
168 		 * If there is any task waiting for the group exit
169 		 * then notify it:
170 		 */
171 		if (sig->notify_count > 0 && !--sig->notify_count)
172 			wake_up_process(sig->group_exec_task);
173 
174 		if (tsk == sig->curr_target)
175 			sig->curr_target = next_thread(tsk);
176 	}
177 
178 	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
179 			      sizeof(unsigned long long));
180 
181 	/*
182 	 * Accumulate here the counters for all threads as they die. We could
183 	 * skip the group leader because it is the last user of signal_struct,
184 	 * but we want to avoid the race with thread_group_cputime() which can
185 	 * see the empty ->thread_head list.
186 	 */
187 	task_cputime(tsk, &utime, &stime);
188 	write_seqlock(&sig->stats_lock);
189 	sig->utime += utime;
190 	sig->stime += stime;
191 	sig->gtime += task_gtime(tsk);
192 	sig->min_flt += tsk->min_flt;
193 	sig->maj_flt += tsk->maj_flt;
194 	sig->nvcsw += tsk->nvcsw;
195 	sig->nivcsw += tsk->nivcsw;
196 	sig->inblock += task_io_get_inblock(tsk);
197 	sig->oublock += task_io_get_oublock(tsk);
198 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
199 	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
200 	sig->nr_threads--;
201 	__unhash_process(tsk, group_dead);
202 	write_sequnlock(&sig->stats_lock);
203 
204 	/*
205 	 * Do this under ->siglock, we can race with another thread
206 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
207 	 */
208 	flush_sigqueue(&tsk->pending);
209 	tsk->sighand = NULL;
210 	spin_unlock(&sighand->siglock);
211 
212 	__cleanup_sighand(sighand);
213 	clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
214 	if (group_dead) {
215 		flush_sigqueue(&sig->shared_pending);
216 		tty_kref_put(tty);
217 	}
218 }
219 
delayed_put_task_struct(struct rcu_head * rhp)220 static void delayed_put_task_struct(struct rcu_head *rhp)
221 {
222 	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
223 
224 	kprobe_flush_task(tsk);
225 	rethook_flush_task(tsk);
226 	perf_event_delayed_put(tsk);
227 	trace_sched_process_free(tsk);
228 	put_task_struct(tsk);
229 }
230 
put_task_struct_rcu_user(struct task_struct * task)231 void put_task_struct_rcu_user(struct task_struct *task)
232 {
233 	if (refcount_dec_and_test(&task->rcu_users))
234 		call_rcu(&task->rcu, delayed_put_task_struct);
235 }
236 
release_thread(struct task_struct * dead_task)237 void __weak release_thread(struct task_struct *dead_task)
238 {
239 }
240 
release_task(struct task_struct * p)241 void release_task(struct task_struct *p)
242 {
243 	struct task_struct *leader;
244 	struct pid *thread_pid;
245 	int zap_leader;
246 repeat:
247 	/* don't need to get the RCU readlock here - the process is dead and
248 	 * can't be modifying its own credentials. But shut RCU-lockdep up */
249 	rcu_read_lock();
250 	dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
251 	rcu_read_unlock();
252 
253 	cgroup_release(p);
254 
255 	write_lock_irq(&tasklist_lock);
256 	ptrace_release_task(p);
257 	thread_pid = get_pid(p->thread_pid);
258 	__exit_signal(p);
259 
260 	/*
261 	 * If we are the last non-leader member of the thread
262 	 * group, and the leader is zombie, then notify the
263 	 * group leader's parent process. (if it wants notification.)
264 	 */
265 	zap_leader = 0;
266 	leader = p->group_leader;
267 	if (leader != p && thread_group_empty(leader)
268 			&& leader->exit_state == EXIT_ZOMBIE) {
269 		/*
270 		 * If we were the last child thread and the leader has
271 		 * exited already, and the leader's parent ignores SIGCHLD,
272 		 * then we are the one who should release the leader.
273 		 */
274 		zap_leader = do_notify_parent(leader, leader->exit_signal);
275 		if (zap_leader)
276 			leader->exit_state = EXIT_DEAD;
277 	}
278 
279 	write_unlock_irq(&tasklist_lock);
280 	proc_flush_pid(thread_pid);
281 	put_pid(thread_pid);
282 	release_thread(p);
283 	put_task_struct_rcu_user(p);
284 
285 	p = leader;
286 	if (unlikely(zap_leader))
287 		goto repeat;
288 }
289 
rcuwait_wake_up(struct rcuwait * w)290 int rcuwait_wake_up(struct rcuwait *w)
291 {
292 	int ret = 0;
293 	struct task_struct *task;
294 
295 	rcu_read_lock();
296 
297 	/*
298 	 * Order condition vs @task, such that everything prior to the load
299 	 * of @task is visible. This is the condition as to why the user called
300 	 * rcuwait_wake() in the first place. Pairs with set_current_state()
301 	 * barrier (A) in rcuwait_wait_event().
302 	 *
303 	 *    WAIT                WAKE
304 	 *    [S] tsk = current	  [S] cond = true
305 	 *        MB (A)	      MB (B)
306 	 *    [L] cond		  [L] tsk
307 	 */
308 	smp_mb(); /* (B) */
309 
310 	task = rcu_dereference(w->task);
311 	if (task)
312 		ret = wake_up_process(task);
313 	rcu_read_unlock();
314 
315 	return ret;
316 }
317 EXPORT_SYMBOL_GPL(rcuwait_wake_up);
318 
319 /*
320  * Determine if a process group is "orphaned", according to the POSIX
321  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
322  * by terminal-generated stop signals.  Newly orphaned process groups are
323  * to receive a SIGHUP and a SIGCONT.
324  *
325  * "I ask you, have you ever known what it is to be an orphan?"
326  */
will_become_orphaned_pgrp(struct pid * pgrp,struct task_struct * ignored_task)327 static int will_become_orphaned_pgrp(struct pid *pgrp,
328 					struct task_struct *ignored_task)
329 {
330 	struct task_struct *p;
331 
332 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
333 		if ((p == ignored_task) ||
334 		    (p->exit_state && thread_group_empty(p)) ||
335 		    is_global_init(p->real_parent))
336 			continue;
337 
338 		if (task_pgrp(p->real_parent) != pgrp &&
339 		    task_session(p->real_parent) == task_session(p))
340 			return 0;
341 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
342 
343 	return 1;
344 }
345 
is_current_pgrp_orphaned(void)346 int is_current_pgrp_orphaned(void)
347 {
348 	int retval;
349 
350 	read_lock(&tasklist_lock);
351 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
352 	read_unlock(&tasklist_lock);
353 
354 	return retval;
355 }
356 
has_stopped_jobs(struct pid * pgrp)357 static bool has_stopped_jobs(struct pid *pgrp)
358 {
359 	struct task_struct *p;
360 
361 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
362 		if (p->signal->flags & SIGNAL_STOP_STOPPED)
363 			return true;
364 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
365 
366 	return false;
367 }
368 
369 /*
370  * Check to see if any process groups have become orphaned as
371  * a result of our exiting, and if they have any stopped jobs,
372  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
373  */
374 static void
kill_orphaned_pgrp(struct task_struct * tsk,struct task_struct * parent)375 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
376 {
377 	struct pid *pgrp = task_pgrp(tsk);
378 	struct task_struct *ignored_task = tsk;
379 
380 	if (!parent)
381 		/* exit: our father is in a different pgrp than
382 		 * we are and we were the only connection outside.
383 		 */
384 		parent = tsk->real_parent;
385 	else
386 		/* reparent: our child is in a different pgrp than
387 		 * we are, and it was the only connection outside.
388 		 */
389 		ignored_task = NULL;
390 
391 	if (task_pgrp(parent) != pgrp &&
392 	    task_session(parent) == task_session(tsk) &&
393 	    will_become_orphaned_pgrp(pgrp, ignored_task) &&
394 	    has_stopped_jobs(pgrp)) {
395 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
396 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
397 	}
398 }
399 
coredump_task_exit(struct task_struct * tsk)400 static void coredump_task_exit(struct task_struct *tsk)
401 {
402 	struct core_state *core_state;
403 
404 	/*
405 	 * Serialize with any possible pending coredump.
406 	 * We must hold siglock around checking core_state
407 	 * and setting PF_POSTCOREDUMP.  The core-inducing thread
408 	 * will increment ->nr_threads for each thread in the
409 	 * group without PF_POSTCOREDUMP set.
410 	 */
411 	spin_lock_irq(&tsk->sighand->siglock);
412 	tsk->flags |= PF_POSTCOREDUMP;
413 	core_state = tsk->signal->core_state;
414 	spin_unlock_irq(&tsk->sighand->siglock);
415 	if (core_state) {
416 		struct core_thread self;
417 
418 		self.task = current;
419 		if (self.task->flags & PF_SIGNALED)
420 			self.next = xchg(&core_state->dumper.next, &self);
421 		else
422 			self.task = NULL;
423 		/*
424 		 * Implies mb(), the result of xchg() must be visible
425 		 * to core_state->dumper.
426 		 */
427 		if (atomic_dec_and_test(&core_state->nr_threads))
428 			complete(&core_state->startup);
429 
430 		for (;;) {
431 			set_current_state(TASK_IDLE|TASK_FREEZABLE);
432 			if (!self.task) /* see coredump_finish() */
433 				break;
434 			schedule();
435 		}
436 		__set_current_state(TASK_RUNNING);
437 	}
438 }
439 
440 #ifdef CONFIG_MEMCG
441 /* drops tasklist_lock if succeeds */
__try_to_set_owner(struct task_struct * tsk,struct mm_struct * mm)442 static bool __try_to_set_owner(struct task_struct *tsk, struct mm_struct *mm)
443 {
444 	bool ret = false;
445 
446 	task_lock(tsk);
447 	if (likely(tsk->mm == mm)) {
448 		/* tsk can't pass exit_mm/exec_mmap and exit */
449 		read_unlock(&tasklist_lock);
450 		WRITE_ONCE(mm->owner, tsk);
451 		lru_gen_migrate_mm(mm);
452 		ret = true;
453 	}
454 	task_unlock(tsk);
455 	return ret;
456 }
457 
try_to_set_owner(struct task_struct * g,struct mm_struct * mm)458 static bool try_to_set_owner(struct task_struct *g, struct mm_struct *mm)
459 {
460 	struct task_struct *t;
461 
462 	for_each_thread(g, t) {
463 		struct mm_struct *t_mm = READ_ONCE(t->mm);
464 		if (t_mm == mm) {
465 			if (__try_to_set_owner(t, mm))
466 				return true;
467 		} else if (t_mm)
468 			break;
469 	}
470 
471 	return false;
472 }
473 
474 /*
475  * A task is exiting.   If it owned this mm, find a new owner for the mm.
476  */
mm_update_next_owner(struct mm_struct * mm)477 void mm_update_next_owner(struct mm_struct *mm)
478 {
479 	struct task_struct *g, *p = current;
480 
481 	/*
482 	 * If the exiting or execing task is not the owner, it's
483 	 * someone else's problem.
484 	 */
485 	if (mm->owner != p)
486 		return;
487 	/*
488 	 * The current owner is exiting/execing and there are no other
489 	 * candidates.  Do not leave the mm pointing to a possibly
490 	 * freed task structure.
491 	 */
492 	if (atomic_read(&mm->mm_users) <= 1) {
493 		WRITE_ONCE(mm->owner, NULL);
494 		return;
495 	}
496 
497 	read_lock(&tasklist_lock);
498 	/*
499 	 * Search in the children
500 	 */
501 	list_for_each_entry(g, &p->children, sibling) {
502 		if (try_to_set_owner(g, mm))
503 			goto ret;
504 	}
505 	/*
506 	 * Search in the siblings
507 	 */
508 	list_for_each_entry(g, &p->real_parent->children, sibling) {
509 		if (try_to_set_owner(g, mm))
510 			goto ret;
511 	}
512 	/*
513 	 * Search through everything else, we should not get here often.
514 	 */
515 	for_each_process(g) {
516 		if (atomic_read(&mm->mm_users) <= 1)
517 			break;
518 		if (g->flags & PF_KTHREAD)
519 			continue;
520 		if (try_to_set_owner(g, mm))
521 			goto ret;
522 	}
523 	read_unlock(&tasklist_lock);
524 	/*
525 	 * We found no owner yet mm_users > 1: this implies that we are
526 	 * most likely racing with swapoff (try_to_unuse()) or /proc or
527 	 * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
528 	 */
529 	WRITE_ONCE(mm->owner, NULL);
530  ret:
531 	return;
532 
533 }
534 #endif /* CONFIG_MEMCG */
535 
536 /*
537  * Turn us into a lazy TLB process if we
538  * aren't already..
539  */
exit_mm(void)540 static void exit_mm(void)
541 {
542 	struct mm_struct *mm = current->mm;
543 
544 	exit_mm_release(current, mm);
545 	if (!mm)
546 		return;
547 	mmap_read_lock(mm);
548 	mmgrab_lazy_tlb(mm);
549 	BUG_ON(mm != current->active_mm);
550 	/* more a memory barrier than a real lock */
551 	task_lock(current);
552 	/*
553 	 * When a thread stops operating on an address space, the loop
554 	 * in membarrier_private_expedited() may not observe that
555 	 * tsk->mm, and the loop in membarrier_global_expedited() may
556 	 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
557 	 * rq->membarrier_state, so those would not issue an IPI.
558 	 * Membarrier requires a memory barrier after accessing
559 	 * user-space memory, before clearing tsk->mm or the
560 	 * rq->membarrier_state.
561 	 */
562 	smp_mb__after_spinlock();
563 	local_irq_disable();
564 	current->mm = NULL;
565 	membarrier_update_current_mm(NULL);
566 	enter_lazy_tlb(mm, current);
567 	local_irq_enable();
568 	task_unlock(current);
569 	mmap_read_unlock(mm);
570 	mm_update_next_owner(mm);
571 	mmput(mm);
572 	if (test_thread_flag(TIF_MEMDIE))
573 		exit_oom_victim();
574 }
575 
find_alive_thread(struct task_struct * p)576 static struct task_struct *find_alive_thread(struct task_struct *p)
577 {
578 	struct task_struct *t;
579 
580 	for_each_thread(p, t) {
581 		if (!(t->flags & PF_EXITING))
582 			return t;
583 	}
584 	return NULL;
585 }
586 
find_child_reaper(struct task_struct * father,struct list_head * dead)587 static struct task_struct *find_child_reaper(struct task_struct *father,
588 						struct list_head *dead)
589 	__releases(&tasklist_lock)
590 	__acquires(&tasklist_lock)
591 {
592 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
593 	struct task_struct *reaper = pid_ns->child_reaper;
594 	struct task_struct *p, *n;
595 
596 	if (likely(reaper != father))
597 		return reaper;
598 
599 	reaper = find_alive_thread(father);
600 	if (reaper) {
601 		pid_ns->child_reaper = reaper;
602 		return reaper;
603 	}
604 
605 	write_unlock_irq(&tasklist_lock);
606 
607 	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
608 		list_del_init(&p->ptrace_entry);
609 		release_task(p);
610 	}
611 
612 	zap_pid_ns_processes(pid_ns);
613 	write_lock_irq(&tasklist_lock);
614 
615 	return father;
616 }
617 
618 /*
619  * When we die, we re-parent all our children, and try to:
620  * 1. give them to another thread in our thread group, if such a member exists
621  * 2. give it to the first ancestor process which prctl'd itself as a
622  *    child_subreaper for its children (like a service manager)
623  * 3. give it to the init process (PID 1) in our pid namespace
624  */
find_new_reaper(struct task_struct * father,struct task_struct * child_reaper)625 static struct task_struct *find_new_reaper(struct task_struct *father,
626 					   struct task_struct *child_reaper)
627 {
628 	struct task_struct *thread, *reaper;
629 
630 	thread = find_alive_thread(father);
631 	if (thread)
632 		return thread;
633 
634 	if (father->signal->has_child_subreaper) {
635 		unsigned int ns_level = task_pid(father)->level;
636 		/*
637 		 * Find the first ->is_child_subreaper ancestor in our pid_ns.
638 		 * We can't check reaper != child_reaper to ensure we do not
639 		 * cross the namespaces, the exiting parent could be injected
640 		 * by setns() + fork().
641 		 * We check pid->level, this is slightly more efficient than
642 		 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
643 		 */
644 		for (reaper = father->real_parent;
645 		     task_pid(reaper)->level == ns_level;
646 		     reaper = reaper->real_parent) {
647 			if (reaper == &init_task)
648 				break;
649 			if (!reaper->signal->is_child_subreaper)
650 				continue;
651 			thread = find_alive_thread(reaper);
652 			if (thread)
653 				return thread;
654 		}
655 	}
656 
657 	return child_reaper;
658 }
659 
660 /*
661 * Any that need to be release_task'd are put on the @dead list.
662  */
reparent_leader(struct task_struct * father,struct task_struct * p,struct list_head * dead)663 static void reparent_leader(struct task_struct *father, struct task_struct *p,
664 				struct list_head *dead)
665 {
666 	if (unlikely(p->exit_state == EXIT_DEAD))
667 		return;
668 
669 	/* We don't want people slaying init. */
670 	p->exit_signal = SIGCHLD;
671 
672 	/* If it has exited notify the new parent about this child's death. */
673 	if (!p->ptrace &&
674 	    p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
675 		if (do_notify_parent(p, p->exit_signal)) {
676 			p->exit_state = EXIT_DEAD;
677 			list_add(&p->ptrace_entry, dead);
678 		}
679 	}
680 
681 	kill_orphaned_pgrp(p, father);
682 }
683 
684 /*
685  * This does two things:
686  *
687  * A.  Make init inherit all the child processes
688  * B.  Check to see if any process groups have become orphaned
689  *	as a result of our exiting, and if they have any stopped
690  *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
691  */
forget_original_parent(struct task_struct * father,struct list_head * dead)692 static void forget_original_parent(struct task_struct *father,
693 					struct list_head *dead)
694 {
695 	struct task_struct *p, *t, *reaper;
696 
697 	if (unlikely(!list_empty(&father->ptraced)))
698 		exit_ptrace(father, dead);
699 
700 	/* Can drop and reacquire tasklist_lock */
701 	reaper = find_child_reaper(father, dead);
702 	if (list_empty(&father->children))
703 		return;
704 
705 	reaper = find_new_reaper(father, reaper);
706 	list_for_each_entry(p, &father->children, sibling) {
707 		for_each_thread(p, t) {
708 			RCU_INIT_POINTER(t->real_parent, reaper);
709 			BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
710 			if (likely(!t->ptrace))
711 				t->parent = t->real_parent;
712 			if (t->pdeath_signal)
713 				group_send_sig_info(t->pdeath_signal,
714 						    SEND_SIG_NOINFO, t,
715 						    PIDTYPE_TGID);
716 		}
717 		/*
718 		 * If this is a threaded reparent there is no need to
719 		 * notify anyone anything has happened.
720 		 */
721 		if (!same_thread_group(reaper, father))
722 			reparent_leader(father, p, dead);
723 	}
724 	list_splice_tail_init(&father->children, &reaper->children);
725 }
726 
727 /*
728  * Send signals to all our closest relatives so that they know
729  * to properly mourn us..
730  */
exit_notify(struct task_struct * tsk,int group_dead)731 static void exit_notify(struct task_struct *tsk, int group_dead)
732 {
733 	bool autoreap;
734 	struct task_struct *p, *n;
735 	LIST_HEAD(dead);
736 
737 	write_lock_irq(&tasklist_lock);
738 	forget_original_parent(tsk, &dead);
739 
740 	if (group_dead)
741 		kill_orphaned_pgrp(tsk->group_leader, NULL);
742 
743 	tsk->exit_state = EXIT_ZOMBIE;
744 	/*
745 	 * sub-thread or delay_group_leader(), wake up the
746 	 * PIDFD_THREAD waiters.
747 	 */
748 	if (!thread_group_empty(tsk))
749 		do_notify_pidfd(tsk);
750 
751 	if (unlikely(tsk->ptrace)) {
752 		int sig = thread_group_leader(tsk) &&
753 				thread_group_empty(tsk) &&
754 				!ptrace_reparented(tsk) ?
755 			tsk->exit_signal : SIGCHLD;
756 		autoreap = do_notify_parent(tsk, sig);
757 	} else if (thread_group_leader(tsk)) {
758 		autoreap = thread_group_empty(tsk) &&
759 			do_notify_parent(tsk, tsk->exit_signal);
760 	} else {
761 		autoreap = true;
762 	}
763 
764 	if (autoreap) {
765 		tsk->exit_state = EXIT_DEAD;
766 		list_add(&tsk->ptrace_entry, &dead);
767 	}
768 
769 	/* mt-exec, de_thread() is waiting for group leader */
770 	if (unlikely(tsk->signal->notify_count < 0))
771 		wake_up_process(tsk->signal->group_exec_task);
772 	write_unlock_irq(&tasklist_lock);
773 
774 	list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
775 		list_del_init(&p->ptrace_entry);
776 		release_task(p);
777 	}
778 }
779 
780 #ifdef CONFIG_DEBUG_STACK_USAGE
stack_not_used(struct task_struct * p)781 unsigned long stack_not_used(struct task_struct *p)
782 {
783 	unsigned long *n = end_of_stack(p);
784 
785 	do {	/* Skip over canary */
786 # ifdef CONFIG_STACK_GROWSUP
787 		n--;
788 # else
789 		n++;
790 # endif
791 	} while (!*n);
792 
793 # ifdef CONFIG_STACK_GROWSUP
794 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
795 # else
796 	return (unsigned long)n - (unsigned long)end_of_stack(p);
797 # endif
798 }
799 
800 /* Count the maximum pages reached in kernel stacks */
kstack_histogram(unsigned long used_stack)801 static inline void kstack_histogram(unsigned long used_stack)
802 {
803 #ifdef CONFIG_VM_EVENT_COUNTERS
804 	if (used_stack <= 1024)
805 		count_vm_event(KSTACK_1K);
806 #if THREAD_SIZE > 1024
807 	else if (used_stack <= 2048)
808 		count_vm_event(KSTACK_2K);
809 #endif
810 #if THREAD_SIZE > 2048
811 	else if (used_stack <= 4096)
812 		count_vm_event(KSTACK_4K);
813 #endif
814 #if THREAD_SIZE > 4096
815 	else if (used_stack <= 8192)
816 		count_vm_event(KSTACK_8K);
817 #endif
818 #if THREAD_SIZE > 8192
819 	else if (used_stack <= 16384)
820 		count_vm_event(KSTACK_16K);
821 #endif
822 #if THREAD_SIZE > 16384
823 	else if (used_stack <= 32768)
824 		count_vm_event(KSTACK_32K);
825 #endif
826 #if THREAD_SIZE > 32768
827 	else if (used_stack <= 65536)
828 		count_vm_event(KSTACK_64K);
829 #endif
830 #if THREAD_SIZE > 65536
831 	else
832 		count_vm_event(KSTACK_REST);
833 #endif
834 #endif /* CONFIG_VM_EVENT_COUNTERS */
835 }
836 
check_stack_usage(void)837 static void check_stack_usage(void)
838 {
839 	static DEFINE_SPINLOCK(low_water_lock);
840 	static int lowest_to_date = THREAD_SIZE;
841 	unsigned long free;
842 
843 	free = stack_not_used(current);
844 	kstack_histogram(THREAD_SIZE - free);
845 
846 	if (free >= lowest_to_date)
847 		return;
848 
849 	spin_lock(&low_water_lock);
850 	if (free < lowest_to_date) {
851 		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
852 			current->comm, task_pid_nr(current), free);
853 		lowest_to_date = free;
854 	}
855 	spin_unlock(&low_water_lock);
856 }
857 #else
check_stack_usage(void)858 static inline void check_stack_usage(void) {}
859 #endif
860 
synchronize_group_exit(struct task_struct * tsk,long code)861 static void synchronize_group_exit(struct task_struct *tsk, long code)
862 {
863 	struct sighand_struct *sighand = tsk->sighand;
864 	struct signal_struct *signal = tsk->signal;
865 
866 	spin_lock_irq(&sighand->siglock);
867 	signal->quick_threads--;
868 	if ((signal->quick_threads == 0) &&
869 	    !(signal->flags & SIGNAL_GROUP_EXIT)) {
870 		signal->flags = SIGNAL_GROUP_EXIT;
871 		signal->group_exit_code = code;
872 		signal->group_stop_count = 0;
873 	}
874 	spin_unlock_irq(&sighand->siglock);
875 }
876 
do_exit(long code)877 void __noreturn do_exit(long code)
878 {
879 	struct task_struct *tsk = current;
880 	int group_dead;
881 
882 	WARN_ON(irqs_disabled());
883 
884 	synchronize_group_exit(tsk, code);
885 
886 	WARN_ON(tsk->plug);
887 
888 	kcov_task_exit(tsk);
889 	kmsan_task_exit(tsk);
890 
891 	coredump_task_exit(tsk);
892 	ptrace_event(PTRACE_EVENT_EXIT, code);
893 	user_events_exit(tsk);
894 
895 	io_uring_files_cancel();
896 	exit_signals(tsk);  /* sets PF_EXITING */
897 
898 	seccomp_filter_release(tsk);
899 
900 	acct_update_integrals(tsk);
901 	group_dead = atomic_dec_and_test(&tsk->signal->live);
902 	if (group_dead) {
903 		/*
904 		 * If the last thread of global init has exited, panic
905 		 * immediately to get a useable coredump.
906 		 */
907 		if (unlikely(is_global_init(tsk)))
908 			panic("Attempted to kill init! exitcode=0x%08x\n",
909 				tsk->signal->group_exit_code ?: (int)code);
910 
911 #ifdef CONFIG_POSIX_TIMERS
912 		hrtimer_cancel(&tsk->signal->real_timer);
913 		exit_itimers(tsk);
914 #endif
915 		if (tsk->mm)
916 			setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
917 	}
918 	acct_collect(code, group_dead);
919 	if (group_dead)
920 		tty_audit_exit();
921 	audit_free(tsk);
922 
923 	tsk->exit_code = code;
924 	taskstats_exit(tsk, group_dead);
925 
926 	exit_mm();
927 
928 	if (group_dead)
929 		acct_process();
930 	trace_sched_process_exit(tsk);
931 
932 	exit_sem(tsk);
933 	exit_shm(tsk);
934 	exit_files(tsk);
935 	exit_fs(tsk);
936 	if (group_dead)
937 		disassociate_ctty(1);
938 	exit_task_namespaces(tsk);
939 	exit_task_work(tsk);
940 	exit_thread(tsk);
941 
942 	/*
943 	 * Flush inherited counters to the parent - before the parent
944 	 * gets woken up by child-exit notifications.
945 	 *
946 	 * because of cgroup mode, must be called before cgroup_exit()
947 	 */
948 	perf_event_exit_task(tsk);
949 
950 	sched_autogroup_exit_task(tsk);
951 	cgroup_exit(tsk);
952 
953 	/*
954 	 * FIXME: do that only when needed, using sched_exit tracepoint
955 	 */
956 	flush_ptrace_hw_breakpoint(tsk);
957 
958 	exit_tasks_rcu_start();
959 	exit_notify(tsk, group_dead);
960 	proc_exit_connector(tsk);
961 	mpol_put_task_policy(tsk);
962 #ifdef CONFIG_FUTEX
963 	if (unlikely(current->pi_state_cache))
964 		kfree(current->pi_state_cache);
965 #endif
966 	/*
967 	 * Make sure we are holding no locks:
968 	 */
969 	debug_check_no_locks_held();
970 
971 	if (tsk->io_context)
972 		exit_io_context(tsk);
973 
974 	if (tsk->splice_pipe)
975 		free_pipe_info(tsk->splice_pipe);
976 
977 	if (tsk->task_frag.page)
978 		put_page(tsk->task_frag.page);
979 
980 	exit_task_stack_account(tsk);
981 
982 	check_stack_usage();
983 	preempt_disable();
984 	if (tsk->nr_dirtied)
985 		__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
986 	exit_rcu();
987 	exit_tasks_rcu_finish();
988 
989 	lockdep_free_task(tsk);
990 	do_task_dead();
991 }
992 
make_task_dead(int signr)993 void __noreturn make_task_dead(int signr)
994 {
995 	/*
996 	 * Take the task off the cpu after something catastrophic has
997 	 * happened.
998 	 *
999 	 * We can get here from a kernel oops, sometimes with preemption off.
1000 	 * Start by checking for critical errors.
1001 	 * Then fix up important state like USER_DS and preemption.
1002 	 * Then do everything else.
1003 	 */
1004 	struct task_struct *tsk = current;
1005 	unsigned int limit;
1006 
1007 	if (unlikely(in_interrupt()))
1008 		panic("Aiee, killing interrupt handler!");
1009 	if (unlikely(!tsk->pid))
1010 		panic("Attempted to kill the idle task!");
1011 
1012 	if (unlikely(irqs_disabled())) {
1013 		pr_info("note: %s[%d] exited with irqs disabled\n",
1014 			current->comm, task_pid_nr(current));
1015 		local_irq_enable();
1016 	}
1017 	if (unlikely(in_atomic())) {
1018 		pr_info("note: %s[%d] exited with preempt_count %d\n",
1019 			current->comm, task_pid_nr(current),
1020 			preempt_count());
1021 		preempt_count_set(PREEMPT_ENABLED);
1022 	}
1023 
1024 	/*
1025 	 * Every time the system oopses, if the oops happens while a reference
1026 	 * to an object was held, the reference leaks.
1027 	 * If the oops doesn't also leak memory, repeated oopsing can cause
1028 	 * reference counters to wrap around (if they're not using refcount_t).
1029 	 * This means that repeated oopsing can make unexploitable-looking bugs
1030 	 * exploitable through repeated oopsing.
1031 	 * To make sure this can't happen, place an upper bound on how often the
1032 	 * kernel may oops without panic().
1033 	 */
1034 	limit = READ_ONCE(oops_limit);
1035 	if (atomic_inc_return(&oops_count) >= limit && limit)
1036 		panic("Oopsed too often (kernel.oops_limit is %d)", limit);
1037 
1038 	/*
1039 	 * We're taking recursive faults here in make_task_dead. Safest is to just
1040 	 * leave this task alone and wait for reboot.
1041 	 */
1042 	if (unlikely(tsk->flags & PF_EXITING)) {
1043 		pr_alert("Fixing recursive fault but reboot is needed!\n");
1044 		futex_exit_recursive(tsk);
1045 		tsk->exit_state = EXIT_DEAD;
1046 		refcount_inc(&tsk->rcu_users);
1047 		do_task_dead();
1048 	}
1049 
1050 	do_exit(signr);
1051 }
1052 
SYSCALL_DEFINE1(exit,int,error_code)1053 SYSCALL_DEFINE1(exit, int, error_code)
1054 {
1055 	do_exit((error_code&0xff)<<8);
1056 }
1057 
1058 /*
1059  * Take down every thread in the group.  This is called by fatal signals
1060  * as well as by sys_exit_group (below).
1061  */
1062 void __noreturn
do_group_exit(int exit_code)1063 do_group_exit(int exit_code)
1064 {
1065 	struct signal_struct *sig = current->signal;
1066 
1067 	if (sig->flags & SIGNAL_GROUP_EXIT)
1068 		exit_code = sig->group_exit_code;
1069 	else if (sig->group_exec_task)
1070 		exit_code = 0;
1071 	else {
1072 		struct sighand_struct *const sighand = current->sighand;
1073 
1074 		spin_lock_irq(&sighand->siglock);
1075 		if (sig->flags & SIGNAL_GROUP_EXIT)
1076 			/* Another thread got here before we took the lock.  */
1077 			exit_code = sig->group_exit_code;
1078 		else if (sig->group_exec_task)
1079 			exit_code = 0;
1080 		else {
1081 			sig->group_exit_code = exit_code;
1082 			sig->flags = SIGNAL_GROUP_EXIT;
1083 			zap_other_threads(current);
1084 		}
1085 		spin_unlock_irq(&sighand->siglock);
1086 	}
1087 
1088 	do_exit(exit_code);
1089 	/* NOTREACHED */
1090 }
1091 
1092 /*
1093  * this kills every thread in the thread group. Note that any externally
1094  * wait4()-ing process will get the correct exit code - even if this
1095  * thread is not the thread group leader.
1096  */
SYSCALL_DEFINE1(exit_group,int,error_code)1097 SYSCALL_DEFINE1(exit_group, int, error_code)
1098 {
1099 	do_group_exit((error_code & 0xff) << 8);
1100 	/* NOTREACHED */
1101 	return 0;
1102 }
1103 
eligible_pid(struct wait_opts * wo,struct task_struct * p)1104 static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1105 {
1106 	return	wo->wo_type == PIDTYPE_MAX ||
1107 		task_pid_type(p, wo->wo_type) == wo->wo_pid;
1108 }
1109 
1110 static int
eligible_child(struct wait_opts * wo,bool ptrace,struct task_struct * p)1111 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
1112 {
1113 	if (!eligible_pid(wo, p))
1114 		return 0;
1115 
1116 	/*
1117 	 * Wait for all children (clone and not) if __WALL is set or
1118 	 * if it is traced by us.
1119 	 */
1120 	if (ptrace || (wo->wo_flags & __WALL))
1121 		return 1;
1122 
1123 	/*
1124 	 * Otherwise, wait for clone children *only* if __WCLONE is set;
1125 	 * otherwise, wait for non-clone children *only*.
1126 	 *
1127 	 * Note: a "clone" child here is one that reports to its parent
1128 	 * using a signal other than SIGCHLD, or a non-leader thread which
1129 	 * we can only see if it is traced by us.
1130 	 */
1131 	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1132 		return 0;
1133 
1134 	return 1;
1135 }
1136 
1137 /*
1138  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1139  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1140  * the lock and this task is uninteresting.  If we return nonzero, we have
1141  * released the lock and the system call should return.
1142  */
wait_task_zombie(struct wait_opts * wo,struct task_struct * p)1143 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1144 {
1145 	int state, status;
1146 	pid_t pid = task_pid_vnr(p);
1147 	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1148 	struct waitid_info *infop;
1149 
1150 	if (!likely(wo->wo_flags & WEXITED))
1151 		return 0;
1152 
1153 	if (unlikely(wo->wo_flags & WNOWAIT)) {
1154 		status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1155 			? p->signal->group_exit_code : p->exit_code;
1156 		get_task_struct(p);
1157 		read_unlock(&tasklist_lock);
1158 		sched_annotate_sleep();
1159 		if (wo->wo_rusage)
1160 			getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1161 		put_task_struct(p);
1162 		goto out_info;
1163 	}
1164 	/*
1165 	 * Move the task's state to DEAD/TRACE, only one thread can do this.
1166 	 */
1167 	state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1168 		EXIT_TRACE : EXIT_DEAD;
1169 	if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1170 		return 0;
1171 	/*
1172 	 * We own this thread, nobody else can reap it.
1173 	 */
1174 	read_unlock(&tasklist_lock);
1175 	sched_annotate_sleep();
1176 
1177 	/*
1178 	 * Check thread_group_leader() to exclude the traced sub-threads.
1179 	 */
1180 	if (state == EXIT_DEAD && thread_group_leader(p)) {
1181 		struct signal_struct *sig = p->signal;
1182 		struct signal_struct *psig = current->signal;
1183 		unsigned long maxrss;
1184 		u64 tgutime, tgstime;
1185 
1186 		/*
1187 		 * The resource counters for the group leader are in its
1188 		 * own task_struct.  Those for dead threads in the group
1189 		 * are in its signal_struct, as are those for the child
1190 		 * processes it has previously reaped.  All these
1191 		 * accumulate in the parent's signal_struct c* fields.
1192 		 *
1193 		 * We don't bother to take a lock here to protect these
1194 		 * p->signal fields because the whole thread group is dead
1195 		 * and nobody can change them.
1196 		 *
1197 		 * psig->stats_lock also protects us from our sub-threads
1198 		 * which can reap other children at the same time.
1199 		 *
1200 		 * We use thread_group_cputime_adjusted() to get times for
1201 		 * the thread group, which consolidates times for all threads
1202 		 * in the group including the group leader.
1203 		 */
1204 		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1205 		write_seqlock_irq(&psig->stats_lock);
1206 		psig->cutime += tgutime + sig->cutime;
1207 		psig->cstime += tgstime + sig->cstime;
1208 		psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1209 		psig->cmin_flt +=
1210 			p->min_flt + sig->min_flt + sig->cmin_flt;
1211 		psig->cmaj_flt +=
1212 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1213 		psig->cnvcsw +=
1214 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1215 		psig->cnivcsw +=
1216 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1217 		psig->cinblock +=
1218 			task_io_get_inblock(p) +
1219 			sig->inblock + sig->cinblock;
1220 		psig->coublock +=
1221 			task_io_get_oublock(p) +
1222 			sig->oublock + sig->coublock;
1223 		maxrss = max(sig->maxrss, sig->cmaxrss);
1224 		if (psig->cmaxrss < maxrss)
1225 			psig->cmaxrss = maxrss;
1226 		task_io_accounting_add(&psig->ioac, &p->ioac);
1227 		task_io_accounting_add(&psig->ioac, &sig->ioac);
1228 		write_sequnlock_irq(&psig->stats_lock);
1229 	}
1230 
1231 	if (wo->wo_rusage)
1232 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1233 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1234 		? p->signal->group_exit_code : p->exit_code;
1235 	wo->wo_stat = status;
1236 
1237 	if (state == EXIT_TRACE) {
1238 		write_lock_irq(&tasklist_lock);
1239 		/* We dropped tasklist, ptracer could die and untrace */
1240 		ptrace_unlink(p);
1241 
1242 		/* If parent wants a zombie, don't release it now */
1243 		state = EXIT_ZOMBIE;
1244 		if (do_notify_parent(p, p->exit_signal))
1245 			state = EXIT_DEAD;
1246 		p->exit_state = state;
1247 		write_unlock_irq(&tasklist_lock);
1248 	}
1249 	if (state == EXIT_DEAD)
1250 		release_task(p);
1251 
1252 out_info:
1253 	infop = wo->wo_info;
1254 	if (infop) {
1255 		if ((status & 0x7f) == 0) {
1256 			infop->cause = CLD_EXITED;
1257 			infop->status = status >> 8;
1258 		} else {
1259 			infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1260 			infop->status = status & 0x7f;
1261 		}
1262 		infop->pid = pid;
1263 		infop->uid = uid;
1264 	}
1265 
1266 	return pid;
1267 }
1268 
task_stopped_code(struct task_struct * p,bool ptrace)1269 static int *task_stopped_code(struct task_struct *p, bool ptrace)
1270 {
1271 	if (ptrace) {
1272 		if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1273 			return &p->exit_code;
1274 	} else {
1275 		if (p->signal->flags & SIGNAL_STOP_STOPPED)
1276 			return &p->signal->group_exit_code;
1277 	}
1278 	return NULL;
1279 }
1280 
1281 /**
1282  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1283  * @wo: wait options
1284  * @ptrace: is the wait for ptrace
1285  * @p: task to wait for
1286  *
1287  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1288  *
1289  * CONTEXT:
1290  * read_lock(&tasklist_lock), which is released if return value is
1291  * non-zero.  Also, grabs and releases @p->sighand->siglock.
1292  *
1293  * RETURNS:
1294  * 0 if wait condition didn't exist and search for other wait conditions
1295  * should continue.  Non-zero return, -errno on failure and @p's pid on
1296  * success, implies that tasklist_lock is released and wait condition
1297  * search should terminate.
1298  */
wait_task_stopped(struct wait_opts * wo,int ptrace,struct task_struct * p)1299 static int wait_task_stopped(struct wait_opts *wo,
1300 				int ptrace, struct task_struct *p)
1301 {
1302 	struct waitid_info *infop;
1303 	int exit_code, *p_code, why;
1304 	uid_t uid = 0; /* unneeded, required by compiler */
1305 	pid_t pid;
1306 
1307 	/*
1308 	 * Traditionally we see ptrace'd stopped tasks regardless of options.
1309 	 */
1310 	if (!ptrace && !(wo->wo_flags & WUNTRACED))
1311 		return 0;
1312 
1313 	if (!task_stopped_code(p, ptrace))
1314 		return 0;
1315 
1316 	exit_code = 0;
1317 	spin_lock_irq(&p->sighand->siglock);
1318 
1319 	p_code = task_stopped_code(p, ptrace);
1320 	if (unlikely(!p_code))
1321 		goto unlock_sig;
1322 
1323 	exit_code = *p_code;
1324 	if (!exit_code)
1325 		goto unlock_sig;
1326 
1327 	if (!unlikely(wo->wo_flags & WNOWAIT))
1328 		*p_code = 0;
1329 
1330 	uid = from_kuid_munged(current_user_ns(), task_uid(p));
1331 unlock_sig:
1332 	spin_unlock_irq(&p->sighand->siglock);
1333 	if (!exit_code)
1334 		return 0;
1335 
1336 	/*
1337 	 * Now we are pretty sure this task is interesting.
1338 	 * Make sure it doesn't get reaped out from under us while we
1339 	 * give up the lock and then examine it below.  We don't want to
1340 	 * keep holding onto the tasklist_lock while we call getrusage and
1341 	 * possibly take page faults for user memory.
1342 	 */
1343 	get_task_struct(p);
1344 	pid = task_pid_vnr(p);
1345 	why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1346 	read_unlock(&tasklist_lock);
1347 	sched_annotate_sleep();
1348 	if (wo->wo_rusage)
1349 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1350 	put_task_struct(p);
1351 
1352 	if (likely(!(wo->wo_flags & WNOWAIT)))
1353 		wo->wo_stat = (exit_code << 8) | 0x7f;
1354 
1355 	infop = wo->wo_info;
1356 	if (infop) {
1357 		infop->cause = why;
1358 		infop->status = exit_code;
1359 		infop->pid = pid;
1360 		infop->uid = uid;
1361 	}
1362 	return pid;
1363 }
1364 
1365 /*
1366  * Handle do_wait work for one task in a live, non-stopped state.
1367  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1368  * the lock and this task is uninteresting.  If we return nonzero, we have
1369  * released the lock and the system call should return.
1370  */
wait_task_continued(struct wait_opts * wo,struct task_struct * p)1371 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1372 {
1373 	struct waitid_info *infop;
1374 	pid_t pid;
1375 	uid_t uid;
1376 
1377 	if (!unlikely(wo->wo_flags & WCONTINUED))
1378 		return 0;
1379 
1380 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1381 		return 0;
1382 
1383 	spin_lock_irq(&p->sighand->siglock);
1384 	/* Re-check with the lock held.  */
1385 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1386 		spin_unlock_irq(&p->sighand->siglock);
1387 		return 0;
1388 	}
1389 	if (!unlikely(wo->wo_flags & WNOWAIT))
1390 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1391 	uid = from_kuid_munged(current_user_ns(), task_uid(p));
1392 	spin_unlock_irq(&p->sighand->siglock);
1393 
1394 	pid = task_pid_vnr(p);
1395 	get_task_struct(p);
1396 	read_unlock(&tasklist_lock);
1397 	sched_annotate_sleep();
1398 	if (wo->wo_rusage)
1399 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1400 	put_task_struct(p);
1401 
1402 	infop = wo->wo_info;
1403 	if (!infop) {
1404 		wo->wo_stat = 0xffff;
1405 	} else {
1406 		infop->cause = CLD_CONTINUED;
1407 		infop->pid = pid;
1408 		infop->uid = uid;
1409 		infop->status = SIGCONT;
1410 	}
1411 	return pid;
1412 }
1413 
1414 /*
1415  * Consider @p for a wait by @parent.
1416  *
1417  * -ECHILD should be in ->notask_error before the first call.
1418  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1419  * Returns zero if the search for a child should continue;
1420  * then ->notask_error is 0 if @p is an eligible child,
1421  * or still -ECHILD.
1422  */
wait_consider_task(struct wait_opts * wo,int ptrace,struct task_struct * p)1423 static int wait_consider_task(struct wait_opts *wo, int ptrace,
1424 				struct task_struct *p)
1425 {
1426 	/*
1427 	 * We can race with wait_task_zombie() from another thread.
1428 	 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1429 	 * can't confuse the checks below.
1430 	 */
1431 	int exit_state = READ_ONCE(p->exit_state);
1432 	int ret;
1433 
1434 	if (unlikely(exit_state == EXIT_DEAD))
1435 		return 0;
1436 
1437 	ret = eligible_child(wo, ptrace, p);
1438 	if (!ret)
1439 		return ret;
1440 
1441 	if (unlikely(exit_state == EXIT_TRACE)) {
1442 		/*
1443 		 * ptrace == 0 means we are the natural parent. In this case
1444 		 * we should clear notask_error, debugger will notify us.
1445 		 */
1446 		if (likely(!ptrace))
1447 			wo->notask_error = 0;
1448 		return 0;
1449 	}
1450 
1451 	if (likely(!ptrace) && unlikely(p->ptrace)) {
1452 		/*
1453 		 * If it is traced by its real parent's group, just pretend
1454 		 * the caller is ptrace_do_wait() and reap this child if it
1455 		 * is zombie.
1456 		 *
1457 		 * This also hides group stop state from real parent; otherwise
1458 		 * a single stop can be reported twice as group and ptrace stop.
1459 		 * If a ptracer wants to distinguish these two events for its
1460 		 * own children it should create a separate process which takes
1461 		 * the role of real parent.
1462 		 */
1463 		if (!ptrace_reparented(p))
1464 			ptrace = 1;
1465 	}
1466 
1467 	/* slay zombie? */
1468 	if (exit_state == EXIT_ZOMBIE) {
1469 		/* we don't reap group leaders with subthreads */
1470 		if (!delay_group_leader(p)) {
1471 			/*
1472 			 * A zombie ptracee is only visible to its ptracer.
1473 			 * Notification and reaping will be cascaded to the
1474 			 * real parent when the ptracer detaches.
1475 			 */
1476 			if (unlikely(ptrace) || likely(!p->ptrace))
1477 				return wait_task_zombie(wo, p);
1478 		}
1479 
1480 		/*
1481 		 * Allow access to stopped/continued state via zombie by
1482 		 * falling through.  Clearing of notask_error is complex.
1483 		 *
1484 		 * When !@ptrace:
1485 		 *
1486 		 * If WEXITED is set, notask_error should naturally be
1487 		 * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
1488 		 * so, if there are live subthreads, there are events to
1489 		 * wait for.  If all subthreads are dead, it's still safe
1490 		 * to clear - this function will be called again in finite
1491 		 * amount time once all the subthreads are released and
1492 		 * will then return without clearing.
1493 		 *
1494 		 * When @ptrace:
1495 		 *
1496 		 * Stopped state is per-task and thus can't change once the
1497 		 * target task dies.  Only continued and exited can happen.
1498 		 * Clear notask_error if WCONTINUED | WEXITED.
1499 		 */
1500 		if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1501 			wo->notask_error = 0;
1502 	} else {
1503 		/*
1504 		 * @p is alive and it's gonna stop, continue or exit, so
1505 		 * there always is something to wait for.
1506 		 */
1507 		wo->notask_error = 0;
1508 	}
1509 
1510 	/*
1511 	 * Wait for stopped.  Depending on @ptrace, different stopped state
1512 	 * is used and the two don't interact with each other.
1513 	 */
1514 	ret = wait_task_stopped(wo, ptrace, p);
1515 	if (ret)
1516 		return ret;
1517 
1518 	/*
1519 	 * Wait for continued.  There's only one continued state and the
1520 	 * ptracer can consume it which can confuse the real parent.  Don't
1521 	 * use WCONTINUED from ptracer.  You don't need or want it.
1522 	 */
1523 	return wait_task_continued(wo, p);
1524 }
1525 
1526 /*
1527  * Do the work of do_wait() for one thread in the group, @tsk.
1528  *
1529  * -ECHILD should be in ->notask_error before the first call.
1530  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1531  * Returns zero if the search for a child should continue; then
1532  * ->notask_error is 0 if there were any eligible children,
1533  * or still -ECHILD.
1534  */
do_wait_thread(struct wait_opts * wo,struct task_struct * tsk)1535 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1536 {
1537 	struct task_struct *p;
1538 
1539 	list_for_each_entry(p, &tsk->children, sibling) {
1540 		int ret = wait_consider_task(wo, 0, p);
1541 
1542 		if (ret)
1543 			return ret;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
ptrace_do_wait(struct wait_opts * wo,struct task_struct * tsk)1549 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1550 {
1551 	struct task_struct *p;
1552 
1553 	list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1554 		int ret = wait_consider_task(wo, 1, p);
1555 
1556 		if (ret)
1557 			return ret;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
pid_child_should_wake(struct wait_opts * wo,struct task_struct * p)1563 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p)
1564 {
1565 	if (!eligible_pid(wo, p))
1566 		return false;
1567 
1568 	if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent)
1569 		return false;
1570 
1571 	return true;
1572 }
1573 
child_wait_callback(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)1574 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1575 				int sync, void *key)
1576 {
1577 	struct wait_opts *wo = container_of(wait, struct wait_opts,
1578 						child_wait);
1579 	struct task_struct *p = key;
1580 
1581 	if (pid_child_should_wake(wo, p))
1582 		return default_wake_function(wait, mode, sync, key);
1583 
1584 	return 0;
1585 }
1586 
__wake_up_parent(struct task_struct * p,struct task_struct * parent)1587 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1588 {
1589 	__wake_up_sync_key(&parent->signal->wait_chldexit,
1590 			   TASK_INTERRUPTIBLE, p);
1591 }
1592 
is_effectively_child(struct wait_opts * wo,bool ptrace,struct task_struct * target)1593 static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1594 				 struct task_struct *target)
1595 {
1596 	struct task_struct *parent =
1597 		!ptrace ? target->real_parent : target->parent;
1598 
1599 	return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1600 				     same_thread_group(current, parent));
1601 }
1602 
1603 /*
1604  * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1605  * and tracee lists to find the target task.
1606  */
do_wait_pid(struct wait_opts * wo)1607 static int do_wait_pid(struct wait_opts *wo)
1608 {
1609 	bool ptrace;
1610 	struct task_struct *target;
1611 	int retval;
1612 
1613 	ptrace = false;
1614 	target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1615 	if (target && is_effectively_child(wo, ptrace, target)) {
1616 		retval = wait_consider_task(wo, ptrace, target);
1617 		if (retval)
1618 			return retval;
1619 	}
1620 
1621 	ptrace = true;
1622 	target = pid_task(wo->wo_pid, PIDTYPE_PID);
1623 	if (target && target->ptrace &&
1624 	    is_effectively_child(wo, ptrace, target)) {
1625 		retval = wait_consider_task(wo, ptrace, target);
1626 		if (retval)
1627 			return retval;
1628 	}
1629 
1630 	return 0;
1631 }
1632 
__do_wait(struct wait_opts * wo)1633 long __do_wait(struct wait_opts *wo)
1634 {
1635 	long retval;
1636 
1637 	/*
1638 	 * If there is nothing that can match our criteria, just get out.
1639 	 * We will clear ->notask_error to zero if we see any child that
1640 	 * might later match our criteria, even if we are not able to reap
1641 	 * it yet.
1642 	 */
1643 	wo->notask_error = -ECHILD;
1644 	if ((wo->wo_type < PIDTYPE_MAX) &&
1645 	   (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
1646 		goto notask;
1647 
1648 	read_lock(&tasklist_lock);
1649 
1650 	if (wo->wo_type == PIDTYPE_PID) {
1651 		retval = do_wait_pid(wo);
1652 		if (retval)
1653 			return retval;
1654 	} else {
1655 		struct task_struct *tsk = current;
1656 
1657 		do {
1658 			retval = do_wait_thread(wo, tsk);
1659 			if (retval)
1660 				return retval;
1661 
1662 			retval = ptrace_do_wait(wo, tsk);
1663 			if (retval)
1664 				return retval;
1665 
1666 			if (wo->wo_flags & __WNOTHREAD)
1667 				break;
1668 		} while_each_thread(current, tsk);
1669 	}
1670 	read_unlock(&tasklist_lock);
1671 
1672 notask:
1673 	retval = wo->notask_error;
1674 	if (!retval && !(wo->wo_flags & WNOHANG))
1675 		return -ERESTARTSYS;
1676 
1677 	return retval;
1678 }
1679 
do_wait(struct wait_opts * wo)1680 static long do_wait(struct wait_opts *wo)
1681 {
1682 	int retval;
1683 
1684 	trace_sched_process_wait(wo->wo_pid);
1685 
1686 	init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1687 	wo->child_wait.private = current;
1688 	add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1689 
1690 	do {
1691 		set_current_state(TASK_INTERRUPTIBLE);
1692 		retval = __do_wait(wo);
1693 		if (retval != -ERESTARTSYS)
1694 			break;
1695 		if (signal_pending(current))
1696 			break;
1697 		schedule();
1698 	} while (1);
1699 
1700 	__set_current_state(TASK_RUNNING);
1701 	remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1702 	return retval;
1703 }
1704 
kernel_waitid_prepare(struct wait_opts * wo,int which,pid_t upid,struct waitid_info * infop,int options,struct rusage * ru)1705 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid,
1706 			  struct waitid_info *infop, int options,
1707 			  struct rusage *ru)
1708 {
1709 	unsigned int f_flags = 0;
1710 	struct pid *pid = NULL;
1711 	enum pid_type type;
1712 
1713 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1714 			__WNOTHREAD|__WCLONE|__WALL))
1715 		return -EINVAL;
1716 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1717 		return -EINVAL;
1718 
1719 	switch (which) {
1720 	case P_ALL:
1721 		type = PIDTYPE_MAX;
1722 		break;
1723 	case P_PID:
1724 		type = PIDTYPE_PID;
1725 		if (upid <= 0)
1726 			return -EINVAL;
1727 
1728 		pid = find_get_pid(upid);
1729 		break;
1730 	case P_PGID:
1731 		type = PIDTYPE_PGID;
1732 		if (upid < 0)
1733 			return -EINVAL;
1734 
1735 		if (upid)
1736 			pid = find_get_pid(upid);
1737 		else
1738 			pid = get_task_pid(current, PIDTYPE_PGID);
1739 		break;
1740 	case P_PIDFD:
1741 		type = PIDTYPE_PID;
1742 		if (upid < 0)
1743 			return -EINVAL;
1744 
1745 		pid = pidfd_get_pid(upid, &f_flags);
1746 		if (IS_ERR(pid))
1747 			return PTR_ERR(pid);
1748 
1749 		break;
1750 	default:
1751 		return -EINVAL;
1752 	}
1753 
1754 	wo->wo_type	= type;
1755 	wo->wo_pid	= pid;
1756 	wo->wo_flags	= options;
1757 	wo->wo_info	= infop;
1758 	wo->wo_rusage	= ru;
1759 	if (f_flags & O_NONBLOCK)
1760 		wo->wo_flags |= WNOHANG;
1761 
1762 	return 0;
1763 }
1764 
kernel_waitid(int which,pid_t upid,struct waitid_info * infop,int options,struct rusage * ru)1765 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1766 			  int options, struct rusage *ru)
1767 {
1768 	struct wait_opts wo;
1769 	long ret;
1770 
1771 	ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru);
1772 	if (ret)
1773 		return ret;
1774 
1775 	ret = do_wait(&wo);
1776 	if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG))
1777 		ret = -EAGAIN;
1778 
1779 	put_pid(wo.wo_pid);
1780 	return ret;
1781 }
1782 
SYSCALL_DEFINE5(waitid,int,which,pid_t,upid,struct siginfo __user *,infop,int,options,struct rusage __user *,ru)1783 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1784 		infop, int, options, struct rusage __user *, ru)
1785 {
1786 	struct rusage r;
1787 	struct waitid_info info = {.status = 0};
1788 	long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1789 	int signo = 0;
1790 
1791 	if (err > 0) {
1792 		signo = SIGCHLD;
1793 		err = 0;
1794 		if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1795 			return -EFAULT;
1796 	}
1797 	if (!infop)
1798 		return err;
1799 
1800 	if (!user_write_access_begin(infop, sizeof(*infop)))
1801 		return -EFAULT;
1802 
1803 	unsafe_put_user(signo, &infop->si_signo, Efault);
1804 	unsafe_put_user(0, &infop->si_errno, Efault);
1805 	unsafe_put_user(info.cause, &infop->si_code, Efault);
1806 	unsafe_put_user(info.pid, &infop->si_pid, Efault);
1807 	unsafe_put_user(info.uid, &infop->si_uid, Efault);
1808 	unsafe_put_user(info.status, &infop->si_status, Efault);
1809 	user_write_access_end();
1810 	return err;
1811 Efault:
1812 	user_write_access_end();
1813 	return -EFAULT;
1814 }
1815 
kernel_wait4(pid_t upid,int __user * stat_addr,int options,struct rusage * ru)1816 long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1817 		  struct rusage *ru)
1818 {
1819 	struct wait_opts wo;
1820 	struct pid *pid = NULL;
1821 	enum pid_type type;
1822 	long ret;
1823 
1824 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1825 			__WNOTHREAD|__WCLONE|__WALL))
1826 		return -EINVAL;
1827 
1828 	/* -INT_MIN is not defined */
1829 	if (upid == INT_MIN)
1830 		return -ESRCH;
1831 
1832 	if (upid == -1)
1833 		type = PIDTYPE_MAX;
1834 	else if (upid < 0) {
1835 		type = PIDTYPE_PGID;
1836 		pid = find_get_pid(-upid);
1837 	} else if (upid == 0) {
1838 		type = PIDTYPE_PGID;
1839 		pid = get_task_pid(current, PIDTYPE_PGID);
1840 	} else /* upid > 0 */ {
1841 		type = PIDTYPE_PID;
1842 		pid = find_get_pid(upid);
1843 	}
1844 
1845 	wo.wo_type	= type;
1846 	wo.wo_pid	= pid;
1847 	wo.wo_flags	= options | WEXITED;
1848 	wo.wo_info	= NULL;
1849 	wo.wo_stat	= 0;
1850 	wo.wo_rusage	= ru;
1851 	ret = do_wait(&wo);
1852 	put_pid(pid);
1853 	if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1854 		ret = -EFAULT;
1855 
1856 	return ret;
1857 }
1858 
kernel_wait(pid_t pid,int * stat)1859 int kernel_wait(pid_t pid, int *stat)
1860 {
1861 	struct wait_opts wo = {
1862 		.wo_type	= PIDTYPE_PID,
1863 		.wo_pid		= find_get_pid(pid),
1864 		.wo_flags	= WEXITED,
1865 	};
1866 	int ret;
1867 
1868 	ret = do_wait(&wo);
1869 	if (ret > 0 && wo.wo_stat)
1870 		*stat = wo.wo_stat;
1871 	put_pid(wo.wo_pid);
1872 	return ret;
1873 }
1874 
SYSCALL_DEFINE4(wait4,pid_t,upid,int __user *,stat_addr,int,options,struct rusage __user *,ru)1875 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1876 		int, options, struct rusage __user *, ru)
1877 {
1878 	struct rusage r;
1879 	long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1880 
1881 	if (err > 0) {
1882 		if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1883 			return -EFAULT;
1884 	}
1885 	return err;
1886 }
1887 
1888 #ifdef __ARCH_WANT_SYS_WAITPID
1889 
1890 /*
1891  * sys_waitpid() remains for compatibility. waitpid() should be
1892  * implemented by calling sys_wait4() from libc.a.
1893  */
SYSCALL_DEFINE3(waitpid,pid_t,pid,int __user *,stat_addr,int,options)1894 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1895 {
1896 	return kernel_wait4(pid, stat_addr, options, NULL);
1897 }
1898 
1899 #endif
1900 
1901 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(wait4,compat_pid_t,pid,compat_uint_t __user *,stat_addr,int,options,struct compat_rusage __user *,ru)1902 COMPAT_SYSCALL_DEFINE4(wait4,
1903 	compat_pid_t, pid,
1904 	compat_uint_t __user *, stat_addr,
1905 	int, options,
1906 	struct compat_rusage __user *, ru)
1907 {
1908 	struct rusage r;
1909 	long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1910 	if (err > 0) {
1911 		if (ru && put_compat_rusage(&r, ru))
1912 			return -EFAULT;
1913 	}
1914 	return err;
1915 }
1916 
COMPAT_SYSCALL_DEFINE5(waitid,int,which,compat_pid_t,pid,struct compat_siginfo __user *,infop,int,options,struct compat_rusage __user *,uru)1917 COMPAT_SYSCALL_DEFINE5(waitid,
1918 		int, which, compat_pid_t, pid,
1919 		struct compat_siginfo __user *, infop, int, options,
1920 		struct compat_rusage __user *, uru)
1921 {
1922 	struct rusage ru;
1923 	struct waitid_info info = {.status = 0};
1924 	long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1925 	int signo = 0;
1926 	if (err > 0) {
1927 		signo = SIGCHLD;
1928 		err = 0;
1929 		if (uru) {
1930 			/* kernel_waitid() overwrites everything in ru */
1931 			if (COMPAT_USE_64BIT_TIME)
1932 				err = copy_to_user(uru, &ru, sizeof(ru));
1933 			else
1934 				err = put_compat_rusage(&ru, uru);
1935 			if (err)
1936 				return -EFAULT;
1937 		}
1938 	}
1939 
1940 	if (!infop)
1941 		return err;
1942 
1943 	if (!user_write_access_begin(infop, sizeof(*infop)))
1944 		return -EFAULT;
1945 
1946 	unsafe_put_user(signo, &infop->si_signo, Efault);
1947 	unsafe_put_user(0, &infop->si_errno, Efault);
1948 	unsafe_put_user(info.cause, &infop->si_code, Efault);
1949 	unsafe_put_user(info.pid, &infop->si_pid, Efault);
1950 	unsafe_put_user(info.uid, &infop->si_uid, Efault);
1951 	unsafe_put_user(info.status, &infop->si_status, Efault);
1952 	user_write_access_end();
1953 	return err;
1954 Efault:
1955 	user_write_access_end();
1956 	return -EFAULT;
1957 }
1958 #endif
1959 
1960 /*
1961  * This needs to be __function_aligned as GCC implicitly makes any
1962  * implementation of abort() cold and drops alignment specified by
1963  * -falign-functions=N.
1964  *
1965  * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
1966  */
abort(void)1967 __weak __function_aligned void abort(void)
1968 {
1969 	BUG();
1970 
1971 	/* if that doesn't kill us, halt */
1972 	panic("Oops failed to kill thread");
1973 }
1974 EXPORT_SYMBOL(abort);
1975