1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_SIGNAL_H
3 #define _LINUX_SCHED_SIGNAL_H
4 
5 #include <linux/rculist.h>
6 #include <linux/signal.h>
7 #include <linux/sched.h>
8 #include <linux/sched/jobctl.h>
9 #include <linux/sched/task.h>
10 #include <linux/cred.h>
11 #include <linux/refcount.h>
12 #include <linux/pid.h>
13 #include <linux/posix-timers.h>
14 #include <linux/mm_types.h>
15 #include <asm/ptrace.h>
16 
17 /*
18  * Types defining task->signal and task->sighand and APIs using them:
19  */
20 
21 struct sighand_struct {
22 	spinlock_t		siglock;
23 	refcount_t		count;
24 	wait_queue_head_t	signalfd_wqh;
25 	struct k_sigaction	action[_NSIG];
26 };
27 
28 /*
29  * Per-process accounting stats:
30  */
31 struct pacct_struct {
32 	int			ac_flag;
33 	long			ac_exitcode;
34 	unsigned long		ac_mem;
35 	u64			ac_utime, ac_stime;
36 	unsigned long		ac_minflt, ac_majflt;
37 };
38 
39 struct cpu_itimer {
40 	u64 expires;
41 	u64 incr;
42 };
43 
44 /*
45  * This is the atomic variant of task_cputime, which can be used for
46  * storing and updating task_cputime statistics without locking.
47  */
48 struct task_cputime_atomic {
49 	atomic64_t utime;
50 	atomic64_t stime;
51 	atomic64_t sum_exec_runtime;
52 };
53 
54 #define INIT_CPUTIME_ATOMIC \
55 	(struct task_cputime_atomic) {				\
56 		.utime = ATOMIC64_INIT(0),			\
57 		.stime = ATOMIC64_INIT(0),			\
58 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
59 	}
60 /**
61  * struct thread_group_cputimer - thread group interval timer counts
62  * @cputime_atomic:	atomic thread group interval timers.
63  *
64  * This structure contains the version of task_cputime, above, that is
65  * used for thread group CPU timer calculations.
66  */
67 struct thread_group_cputimer {
68 	struct task_cputime_atomic cputime_atomic;
69 };
70 
71 struct multiprocess_signals {
72 	sigset_t signal;
73 	struct hlist_node node;
74 };
75 
76 struct core_thread {
77 	struct task_struct *task;
78 	struct core_thread *next;
79 };
80 
81 struct core_state {
82 	atomic_t nr_threads;
83 	struct core_thread dumper;
84 	struct completion startup;
85 };
86 
87 /*
88  * NOTE! "signal_struct" does not have its own
89  * locking, because a shared signal_struct always
90  * implies a shared sighand_struct, so locking
91  * sighand_struct is always a proper superset of
92  * the locking of signal_struct.
93  */
94 struct signal_struct {
95 	refcount_t		sigcnt;
96 	atomic_t		live;
97 	int			nr_threads;
98 	int			quick_threads;
99 	struct list_head	thread_head;
100 
101 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
102 
103 	/* current thread group signal load-balancing target: */
104 	struct task_struct	*curr_target;
105 
106 	/* shared signal handling: */
107 	struct sigpending	shared_pending;
108 
109 	/* For collecting multiprocess signals during fork */
110 	struct hlist_head	multiprocess;
111 
112 	/* thread group exit support */
113 	int			group_exit_code;
114 	/* notify group_exec_task when notify_count is less or equal to 0 */
115 	int			notify_count;
116 	struct task_struct	*group_exec_task;
117 
118 	/* thread group stop support, overloads group_exit_code too */
119 	int			group_stop_count;
120 	unsigned int		flags; /* see SIGNAL_* flags below */
121 
122 	struct core_state *core_state; /* coredumping support */
123 
124 	/*
125 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
126 	 * manager, to re-parent orphan (double-forking) child processes
127 	 * to this process instead of 'init'. The service manager is
128 	 * able to receive SIGCHLD signals and is able to investigate
129 	 * the process until it calls wait(). All children of this
130 	 * process will inherit a flag if they should look for a
131 	 * child_subreaper process at exit.
132 	 */
133 	unsigned int		is_child_subreaper:1;
134 	unsigned int		has_child_subreaper:1;
135 
136 #ifdef CONFIG_POSIX_TIMERS
137 
138 	/* POSIX.1b Interval Timers */
139 	unsigned int		next_posix_timer_id;
140 	struct hlist_head	posix_timers;
141 
142 	/* ITIMER_REAL timer for the process */
143 	struct hrtimer real_timer;
144 	ktime_t it_real_incr;
145 
146 	/*
147 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
148 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
149 	 * values are defined to 0 and 1 respectively
150 	 */
151 	struct cpu_itimer it[2];
152 
153 	/*
154 	 * Thread group totals for process CPU timers.
155 	 * See thread_group_cputimer(), et al, for details.
156 	 */
157 	struct thread_group_cputimer cputimer;
158 
159 #endif
160 	/* Empty if CONFIG_POSIX_TIMERS=n */
161 	struct posix_cputimers posix_cputimers;
162 
163 	/* PID/PID hash table linkage. */
164 	struct pid *pids[PIDTYPE_MAX];
165 
166 #ifdef CONFIG_NO_HZ_FULL
167 	atomic_t tick_dep_mask;
168 #endif
169 
170 	struct pid *tty_old_pgrp;
171 
172 	/* boolean value for session group leader */
173 	int leader;
174 
175 	struct tty_struct *tty; /* NULL if no tty */
176 
177 #ifdef CONFIG_SCHED_AUTOGROUP
178 	struct autogroup *autogroup;
179 #endif
180 	/*
181 	 * Cumulative resource counters for dead threads in the group,
182 	 * and for reaped dead child processes forked by this group.
183 	 * Live threads maintain their own counters and add to these
184 	 * in __exit_signal, except for the group leader.
185 	 */
186 	seqlock_t stats_lock;
187 	u64 utime, stime, cutime, cstime;
188 	u64 gtime;
189 	u64 cgtime;
190 	struct prev_cputime prev_cputime;
191 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
192 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
193 	unsigned long inblock, oublock, cinblock, coublock;
194 	unsigned long maxrss, cmaxrss;
195 	struct task_io_accounting ioac;
196 
197 	/*
198 	 * Cumulative ns of schedule CPU time fo dead threads in the
199 	 * group, not including a zombie group leader, (This only differs
200 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
201 	 * other than jiffies.)
202 	 */
203 	unsigned long long sum_sched_runtime;
204 
205 	/*
206 	 * We don't bother to synchronize most readers of this at all,
207 	 * because there is no reader checking a limit that actually needs
208 	 * to get both rlim_cur and rlim_max atomically, and either one
209 	 * alone is a single word that can safely be read normally.
210 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
211 	 * protect this instead of the siglock, because they really
212 	 * have no need to disable irqs.
213 	 */
214 	struct rlimit rlim[RLIM_NLIMITS];
215 
216 #ifdef CONFIG_BSD_PROCESS_ACCT
217 	struct pacct_struct pacct;	/* per-process accounting information */
218 #endif
219 #ifdef CONFIG_TASKSTATS
220 	struct taskstats *stats;
221 #endif
222 #ifdef CONFIG_AUDIT
223 	unsigned audit_tty;
224 	struct tty_audit_buf *tty_audit_buf;
225 #endif
226 
227 	/*
228 	 * Thread is the potential origin of an oom condition; kill first on
229 	 * oom
230 	 */
231 	bool oom_flag_origin;
232 	short oom_score_adj;		/* OOM kill score adjustment */
233 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
234 					 * Only settable by CAP_SYS_RESOURCE. */
235 	struct mm_struct *oom_mm;	/* recorded mm when the thread group got
236 					 * killed by the oom killer */
237 
238 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
239 					 * credential calculations
240 					 * (notably. ptrace)
241 					 * Deprecated do not use in new code.
242 					 * Use exec_update_lock instead.
243 					 */
244 	struct rw_semaphore exec_update_lock;	/* Held while task_struct is
245 						 * being updated during exec,
246 						 * and may have inconsistent
247 						 * permissions.
248 						 */
249 } __randomize_layout;
250 
251 /*
252  * Bits in flags field of signal_struct.
253  */
254 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
255 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
256 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
257 /*
258  * Pending notifications to parent.
259  */
260 #define SIGNAL_CLD_STOPPED	0x00000010
261 #define SIGNAL_CLD_CONTINUED	0x00000020
262 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
263 
264 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
265 
266 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
267 			  SIGNAL_STOP_CONTINUED)
268 
signal_set_stop_flags(struct signal_struct * sig,unsigned int flags)269 static inline void signal_set_stop_flags(struct signal_struct *sig,
270 					 unsigned int flags)
271 {
272 	WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
273 	sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
274 }
275 
276 extern void flush_signals(struct task_struct *);
277 extern void ignore_signals(struct task_struct *);
278 extern void flush_signal_handlers(struct task_struct *, int force_default);
279 extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
280 
kernel_dequeue_signal(void)281 static inline int kernel_dequeue_signal(void)
282 {
283 	struct task_struct *task = current;
284 	kernel_siginfo_t __info;
285 	enum pid_type __type;
286 	int ret;
287 
288 	spin_lock_irq(&task->sighand->siglock);
289 	ret = dequeue_signal(&task->blocked, &__info, &__type);
290 	spin_unlock_irq(&task->sighand->siglock);
291 
292 	return ret;
293 }
294 
kernel_signal_stop(void)295 static inline void kernel_signal_stop(void)
296 {
297 	spin_lock_irq(&current->sighand->siglock);
298 	if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
299 		current->jobctl |= JOBCTL_STOPPED;
300 		set_special_state(TASK_STOPPED);
301 	}
302 	spin_unlock_irq(&current->sighand->siglock);
303 
304 	schedule();
305 }
306 
307 int force_sig_fault_to_task(int sig, int code, void __user *addr,
308 			    struct task_struct *t);
309 int force_sig_fault(int sig, int code, void __user *addr);
310 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
311 
312 int force_sig_mceerr(int code, void __user *, short);
313 int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
314 
315 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
316 int force_sig_pkuerr(void __user *addr, u32 pkey);
317 int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
318 
319 int force_sig_ptrace_errno_trap(int errno, void __user *addr);
320 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
321 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
322 			struct task_struct *t);
323 int force_sig_seccomp(int syscall, int reason, bool force_coredump);
324 
325 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
326 extern void force_sigsegv(int sig);
327 extern int force_sig_info(struct kernel_siginfo *);
328 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
329 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
330 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
331 				const struct cred *);
332 extern int kill_pgrp(struct pid *pid, int sig, int priv);
333 extern int kill_pid(struct pid *pid, int sig, int priv);
334 extern __must_check bool do_notify_parent(struct task_struct *, int);
335 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
336 extern void force_sig(int);
337 extern void force_fatal_sig(int);
338 extern void force_exit_sig(int);
339 extern int send_sig(int, struct task_struct *, int);
340 extern int zap_other_threads(struct task_struct *p);
341 extern struct sigqueue *sigqueue_alloc(void);
342 extern void sigqueue_free(struct sigqueue *);
343 extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
344 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
345 
clear_notify_signal(void)346 static inline void clear_notify_signal(void)
347 {
348 	clear_thread_flag(TIF_NOTIFY_SIGNAL);
349 	smp_mb__after_atomic();
350 }
351 
352 /*
353  * Returns 'true' if kick_process() is needed to force a transition from
354  * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
355  */
__set_notify_signal(struct task_struct * task)356 static inline bool __set_notify_signal(struct task_struct *task)
357 {
358 	return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
359 	       !wake_up_state(task, TASK_INTERRUPTIBLE);
360 }
361 
362 /*
363  * Called to break out of interruptible wait loops, and enter the
364  * exit_to_user_mode_loop().
365  */
set_notify_signal(struct task_struct * task)366 static inline void set_notify_signal(struct task_struct *task)
367 {
368 	if (__set_notify_signal(task))
369 		kick_process(task);
370 }
371 
restart_syscall(void)372 static inline int restart_syscall(void)
373 {
374 	set_tsk_thread_flag(current, TIF_SIGPENDING);
375 	return -ERESTARTNOINTR;
376 }
377 
task_sigpending(struct task_struct * p)378 static inline int task_sigpending(struct task_struct *p)
379 {
380 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
381 }
382 
signal_pending(struct task_struct * p)383 static inline int signal_pending(struct task_struct *p)
384 {
385 	/*
386 	 * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
387 	 * behavior in terms of ensuring that we break out of wait loops
388 	 * so that notify signal callbacks can be processed.
389 	 */
390 	if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
391 		return 1;
392 	return task_sigpending(p);
393 }
394 
__fatal_signal_pending(struct task_struct * p)395 static inline int __fatal_signal_pending(struct task_struct *p)
396 {
397 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
398 }
399 
fatal_signal_pending(struct task_struct * p)400 static inline int fatal_signal_pending(struct task_struct *p)
401 {
402 	return task_sigpending(p) && __fatal_signal_pending(p);
403 }
404 
signal_pending_state(unsigned int state,struct task_struct * p)405 static inline int signal_pending_state(unsigned int state, struct task_struct *p)
406 {
407 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
408 		return 0;
409 	if (!signal_pending(p))
410 		return 0;
411 
412 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
413 }
414 
415 /*
416  * This should only be used in fault handlers to decide whether we
417  * should stop the current fault routine to handle the signals
418  * instead, especially with the case where we've got interrupted with
419  * a VM_FAULT_RETRY.
420  */
fault_signal_pending(vm_fault_t fault_flags,struct pt_regs * regs)421 static inline bool fault_signal_pending(vm_fault_t fault_flags,
422 					struct pt_regs *regs)
423 {
424 	return unlikely((fault_flags & VM_FAULT_RETRY) &&
425 			(fatal_signal_pending(current) ||
426 			 (user_mode(regs) && signal_pending(current))));
427 }
428 
429 /*
430  * Reevaluate whether the task has signals pending delivery.
431  * Wake the task if so.
432  * This is required every time the blocked sigset_t changes.
433  * callers must hold sighand->siglock.
434  */
435 extern void recalc_sigpending(void);
436 extern void calculate_sigpending(void);
437 
438 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
439 
signal_wake_up(struct task_struct * t,bool fatal)440 static inline void signal_wake_up(struct task_struct *t, bool fatal)
441 {
442 	unsigned int state = 0;
443 	if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
444 		t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
445 		state = TASK_WAKEKILL | __TASK_TRACED;
446 	}
447 	signal_wake_up_state(t, state);
448 }
ptrace_signal_wake_up(struct task_struct * t,bool resume)449 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
450 {
451 	unsigned int state = 0;
452 	if (resume) {
453 		t->jobctl &= ~JOBCTL_TRACED;
454 		state = __TASK_TRACED;
455 	}
456 	signal_wake_up_state(t, state);
457 }
458 
459 void task_join_group_stop(struct task_struct *task);
460 
461 #ifdef TIF_RESTORE_SIGMASK
462 /*
463  * Legacy restore_sigmask accessors.  These are inefficient on
464  * SMP architectures because they require atomic operations.
465  */
466 
467 /**
468  * set_restore_sigmask() - make sure saved_sigmask processing gets done
469  *
470  * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
471  * will run before returning to user mode, to process the flag.  For
472  * all callers, TIF_SIGPENDING is already set or it's no harm to set
473  * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
474  * arch code will notice on return to user mode, in case those bits
475  * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
476  * signal code always gets run when TIF_RESTORE_SIGMASK is set.
477  */
set_restore_sigmask(void)478 static inline void set_restore_sigmask(void)
479 {
480 	set_thread_flag(TIF_RESTORE_SIGMASK);
481 }
482 
clear_tsk_restore_sigmask(struct task_struct * task)483 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
484 {
485 	clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
486 }
487 
clear_restore_sigmask(void)488 static inline void clear_restore_sigmask(void)
489 {
490 	clear_thread_flag(TIF_RESTORE_SIGMASK);
491 }
test_tsk_restore_sigmask(struct task_struct * task)492 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
493 {
494 	return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
495 }
test_restore_sigmask(void)496 static inline bool test_restore_sigmask(void)
497 {
498 	return test_thread_flag(TIF_RESTORE_SIGMASK);
499 }
test_and_clear_restore_sigmask(void)500 static inline bool test_and_clear_restore_sigmask(void)
501 {
502 	return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
503 }
504 
505 #else	/* TIF_RESTORE_SIGMASK */
506 
507 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
set_restore_sigmask(void)508 static inline void set_restore_sigmask(void)
509 {
510 	current->restore_sigmask = true;
511 }
clear_tsk_restore_sigmask(struct task_struct * task)512 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
513 {
514 	task->restore_sigmask = false;
515 }
clear_restore_sigmask(void)516 static inline void clear_restore_sigmask(void)
517 {
518 	current->restore_sigmask = false;
519 }
test_restore_sigmask(void)520 static inline bool test_restore_sigmask(void)
521 {
522 	return current->restore_sigmask;
523 }
test_tsk_restore_sigmask(struct task_struct * task)524 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
525 {
526 	return task->restore_sigmask;
527 }
test_and_clear_restore_sigmask(void)528 static inline bool test_and_clear_restore_sigmask(void)
529 {
530 	if (!current->restore_sigmask)
531 		return false;
532 	current->restore_sigmask = false;
533 	return true;
534 }
535 #endif
536 
restore_saved_sigmask(void)537 static inline void restore_saved_sigmask(void)
538 {
539 	if (test_and_clear_restore_sigmask())
540 		__set_current_blocked(&current->saved_sigmask);
541 }
542 
543 extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
544 
restore_saved_sigmask_unless(bool interrupted)545 static inline void restore_saved_sigmask_unless(bool interrupted)
546 {
547 	if (interrupted)
548 		WARN_ON(!signal_pending(current));
549 	else
550 		restore_saved_sigmask();
551 }
552 
sigmask_to_save(void)553 static inline sigset_t *sigmask_to_save(void)
554 {
555 	sigset_t *res = &current->blocked;
556 	if (unlikely(test_restore_sigmask()))
557 		res = &current->saved_sigmask;
558 	return res;
559 }
560 
kill_cad_pid(int sig,int priv)561 static inline int kill_cad_pid(int sig, int priv)
562 {
563 	return kill_pid(cad_pid, sig, priv);
564 }
565 
566 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
567 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
568 #define SEND_SIG_PRIV	((struct kernel_siginfo *) 1)
569 
__on_sig_stack(unsigned long sp)570 static inline int __on_sig_stack(unsigned long sp)
571 {
572 #ifdef CONFIG_STACK_GROWSUP
573 	return sp >= current->sas_ss_sp &&
574 		sp - current->sas_ss_sp < current->sas_ss_size;
575 #else
576 	return sp > current->sas_ss_sp &&
577 		sp - current->sas_ss_sp <= current->sas_ss_size;
578 #endif
579 }
580 
581 /*
582  * True if we are on the alternate signal stack.
583  */
on_sig_stack(unsigned long sp)584 static inline int on_sig_stack(unsigned long sp)
585 {
586 	/*
587 	 * If the signal stack is SS_AUTODISARM then, by construction, we
588 	 * can't be on the signal stack unless user code deliberately set
589 	 * SS_AUTODISARM when we were already on it.
590 	 *
591 	 * This improves reliability: if user state gets corrupted such that
592 	 * the stack pointer points very close to the end of the signal stack,
593 	 * then this check will enable the signal to be handled anyway.
594 	 */
595 	if (current->sas_ss_flags & SS_AUTODISARM)
596 		return 0;
597 
598 	return __on_sig_stack(sp);
599 }
600 
sas_ss_flags(unsigned long sp)601 static inline int sas_ss_flags(unsigned long sp)
602 {
603 	if (!current->sas_ss_size)
604 		return SS_DISABLE;
605 
606 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
607 }
608 
sas_ss_reset(struct task_struct * p)609 static inline void sas_ss_reset(struct task_struct *p)
610 {
611 	p->sas_ss_sp = 0;
612 	p->sas_ss_size = 0;
613 	p->sas_ss_flags = SS_DISABLE;
614 }
615 
sigsp(unsigned long sp,struct ksignal * ksig)616 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
617 {
618 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
619 #ifdef CONFIG_STACK_GROWSUP
620 		return current->sas_ss_sp;
621 #else
622 		return current->sas_ss_sp + current->sas_ss_size;
623 #endif
624 	return sp;
625 }
626 
627 extern void __cleanup_sighand(struct sighand_struct *);
628 extern void flush_itimer_signals(void);
629 
630 #define tasklist_empty() \
631 	list_empty(&init_task.tasks)
632 
633 #define next_task(p) \
634 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
635 
636 #define for_each_process(p) \
637 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
638 
639 extern bool current_is_single_threaded(void);
640 
641 /*
642  * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
643  * otherwise next_thread(t) will never reach g after list_del_rcu(g).
644  */
645 #define while_each_thread(g, t) \
646 	while ((t = next_thread(t)) != g)
647 
648 #define for_other_threads(p, t)	\
649 	for (t = p; (t = next_thread(t)) != p; )
650 
651 #define __for_each_thread(signal, t)	\
652 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
653 		lockdep_is_held(&tasklist_lock))
654 
655 #define for_each_thread(p, t)		\
656 	__for_each_thread((p)->signal, t)
657 
658 /* Careful: this is a double loop, 'break' won't work as expected. */
659 #define for_each_process_thread(p, t)	\
660 	for_each_process(p) for_each_thread(p, t)
661 
662 typedef int (*proc_visitor)(struct task_struct *p, void *data);
663 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
664 
665 static inline
task_pid_type(struct task_struct * task,enum pid_type type)666 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
667 {
668 	struct pid *pid;
669 	if (type == PIDTYPE_PID)
670 		pid = task_pid(task);
671 	else
672 		pid = task->signal->pids[type];
673 	return pid;
674 }
675 
task_tgid(struct task_struct * task)676 static inline struct pid *task_tgid(struct task_struct *task)
677 {
678 	return task->signal->pids[PIDTYPE_TGID];
679 }
680 
681 /*
682  * Without tasklist or RCU lock it is not safe to dereference
683  * the result of task_pgrp/task_session even if task == current,
684  * we can race with another thread doing sys_setsid/sys_setpgid.
685  */
task_pgrp(struct task_struct * task)686 static inline struct pid *task_pgrp(struct task_struct *task)
687 {
688 	return task->signal->pids[PIDTYPE_PGID];
689 }
690 
task_session(struct task_struct * task)691 static inline struct pid *task_session(struct task_struct *task)
692 {
693 	return task->signal->pids[PIDTYPE_SID];
694 }
695 
get_nr_threads(struct task_struct * task)696 static inline int get_nr_threads(struct task_struct *task)
697 {
698 	return task->signal->nr_threads;
699 }
700 
thread_group_leader(struct task_struct * p)701 static inline bool thread_group_leader(struct task_struct *p)
702 {
703 	return p->exit_signal >= 0;
704 }
705 
706 static inline
same_thread_group(struct task_struct * p1,struct task_struct * p2)707 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
708 {
709 	return p1->signal == p2->signal;
710 }
711 
712 /*
713  * returns NULL if p is the last thread in the thread group
714  */
__next_thread(struct task_struct * p)715 static inline struct task_struct *__next_thread(struct task_struct *p)
716 {
717 	return list_next_or_null_rcu(&p->signal->thread_head,
718 					&p->thread_node,
719 					struct task_struct,
720 					thread_node);
721 }
722 
next_thread(struct task_struct * p)723 static inline struct task_struct *next_thread(struct task_struct *p)
724 {
725 	return __next_thread(p) ?: p->group_leader;
726 }
727 
thread_group_empty(struct task_struct * p)728 static inline int thread_group_empty(struct task_struct *p)
729 {
730 	return thread_group_leader(p) &&
731 	       list_is_last(&p->thread_node, &p->signal->thread_head);
732 }
733 
734 #define delay_group_leader(p) \
735 		(thread_group_leader(p) && !thread_group_empty(p))
736 
737 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
738 							unsigned long *flags);
739 
lock_task_sighand(struct task_struct * task,unsigned long * flags)740 static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
741 						       unsigned long *flags)
742 {
743 	struct sighand_struct *ret;
744 
745 	ret = __lock_task_sighand(task, flags);
746 	(void)__cond_lock(&task->sighand->siglock, ret);
747 	return ret;
748 }
749 
unlock_task_sighand(struct task_struct * task,unsigned long * flags)750 static inline void unlock_task_sighand(struct task_struct *task,
751 						unsigned long *flags)
752 {
753 	spin_unlock_irqrestore(&task->sighand->siglock, *flags);
754 }
755 
756 #ifdef CONFIG_LOCKDEP
757 extern void lockdep_assert_task_sighand_held(struct task_struct *task);
758 #else
lockdep_assert_task_sighand_held(struct task_struct * task)759 static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
760 #endif
761 
task_rlimit(const struct task_struct * task,unsigned int limit)762 static inline unsigned long task_rlimit(const struct task_struct *task,
763 		unsigned int limit)
764 {
765 	return READ_ONCE(task->signal->rlim[limit].rlim_cur);
766 }
767 
task_rlimit_max(const struct task_struct * task,unsigned int limit)768 static inline unsigned long task_rlimit_max(const struct task_struct *task,
769 		unsigned int limit)
770 {
771 	return READ_ONCE(task->signal->rlim[limit].rlim_max);
772 }
773 
rlimit(unsigned int limit)774 static inline unsigned long rlimit(unsigned int limit)
775 {
776 	return task_rlimit(current, limit);
777 }
778 
rlimit_max(unsigned int limit)779 static inline unsigned long rlimit_max(unsigned int limit)
780 {
781 	return task_rlimit_max(current, limit);
782 }
783 
784 #endif /* _LINUX_SCHED_SIGNAL_H */
785