1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/syscalls.c
4  *
5  *  Core kernel scheduler syscalls related code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/sched.h>
11 #include <linux/cpuset.h>
12 #include <linux/sched/debug.h>
13 
14 #include <uapi/linux/sched/types.h>
15 
16 #include "sched.h"
17 #include "autogroup.h"
18 
__normal_prio(int policy,int rt_prio,int nice)19 static inline int __normal_prio(int policy, int rt_prio, int nice)
20 {
21 	int prio;
22 
23 	if (dl_policy(policy))
24 		prio = MAX_DL_PRIO - 1;
25 	else if (rt_policy(policy))
26 		prio = MAX_RT_PRIO - 1 - rt_prio;
27 	else
28 		prio = NICE_TO_PRIO(nice);
29 
30 	return prio;
31 }
32 
33 /*
34  * Calculate the expected normal priority: i.e. priority
35  * without taking RT-inheritance into account. Might be
36  * boosted by interactivity modifiers. Changes upon fork,
37  * setprio syscalls, and whenever the interactivity
38  * estimator recalculates.
39  */
normal_prio(struct task_struct * p)40 static inline int normal_prio(struct task_struct *p)
41 {
42 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
43 }
44 
45 /*
46  * Calculate the current priority, i.e. the priority
47  * taken into account by the scheduler. This value might
48  * be boosted by RT tasks, or might be boosted by
49  * interactivity modifiers. Will be RT if the task got
50  * RT-boosted. If not then it returns p->normal_prio.
51  */
effective_prio(struct task_struct * p)52 static int effective_prio(struct task_struct *p)
53 {
54 	p->normal_prio = normal_prio(p);
55 	/*
56 	 * If we are RT tasks or we were boosted to RT priority,
57 	 * keep the priority unchanged. Otherwise, update priority
58 	 * to the normal priority:
59 	 */
60 	if (!rt_or_dl_prio(p->prio))
61 		return p->normal_prio;
62 	return p->prio;
63 }
64 
set_user_nice(struct task_struct * p,long nice)65 void set_user_nice(struct task_struct *p, long nice)
66 {
67 	bool queued, running;
68 	struct rq *rq;
69 	int old_prio;
70 
71 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
72 		return;
73 	/*
74 	 * We have to be careful, if called from sys_setpriority(),
75 	 * the task might be in the middle of scheduling on another CPU.
76 	 */
77 	CLASS(task_rq_lock, rq_guard)(p);
78 	rq = rq_guard.rq;
79 
80 	update_rq_clock(rq);
81 
82 	/*
83 	 * The RT priorities are set via sched_setscheduler(), but we still
84 	 * allow the 'normal' nice value to be set - but as expected
85 	 * it won't have any effect on scheduling until the task is
86 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
87 	 */
88 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
89 		p->static_prio = NICE_TO_PRIO(nice);
90 		return;
91 	}
92 
93 	queued = task_on_rq_queued(p);
94 	running = task_current(rq, p);
95 	if (queued)
96 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
97 	if (running)
98 		put_prev_task(rq, p);
99 
100 	p->static_prio = NICE_TO_PRIO(nice);
101 	set_load_weight(p, true);
102 	old_prio = p->prio;
103 	p->prio = effective_prio(p);
104 
105 	if (queued)
106 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
107 	if (running)
108 		set_next_task(rq, p);
109 
110 	/*
111 	 * If the task increased its priority or is running and
112 	 * lowered its priority, then reschedule its CPU:
113 	 */
114 	p->sched_class->prio_changed(rq, p, old_prio);
115 }
116 EXPORT_SYMBOL(set_user_nice);
117 
118 /*
119  * is_nice_reduction - check if nice value is an actual reduction
120  *
121  * Similar to can_nice() but does not perform a capability check.
122  *
123  * @p: task
124  * @nice: nice value
125  */
is_nice_reduction(const struct task_struct * p,const int nice)126 static bool is_nice_reduction(const struct task_struct *p, const int nice)
127 {
128 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
129 	int nice_rlim = nice_to_rlimit(nice);
130 
131 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
132 }
133 
134 /*
135  * can_nice - check if a task can reduce its nice value
136  * @p: task
137  * @nice: nice value
138  */
can_nice(const struct task_struct * p,const int nice)139 int can_nice(const struct task_struct *p, const int nice)
140 {
141 	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
142 }
143 
144 #ifdef __ARCH_WANT_SYS_NICE
145 
146 /*
147  * sys_nice - change the priority of the current process.
148  * @increment: priority increment
149  *
150  * sys_setpriority is a more generic, but much slower function that
151  * does similar things.
152  */
SYSCALL_DEFINE1(nice,int,increment)153 SYSCALL_DEFINE1(nice, int, increment)
154 {
155 	long nice, retval;
156 
157 	/*
158 	 * Setpriority might change our priority at the same moment.
159 	 * We don't have to worry. Conceptually one call occurs first
160 	 * and we have a single winner.
161 	 */
162 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
163 	nice = task_nice(current) + increment;
164 
165 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
166 	if (increment < 0 && !can_nice(current, nice))
167 		return -EPERM;
168 
169 	retval = security_task_setnice(current, nice);
170 	if (retval)
171 		return retval;
172 
173 	set_user_nice(current, nice);
174 	return 0;
175 }
176 
177 #endif
178 
179 /**
180  * task_prio - return the priority value of a given task.
181  * @p: the task in question.
182  *
183  * Return: The priority value as seen by users in /proc.
184  *
185  * sched policy         return value   kernel prio    user prio/nice
186  *
187  * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
188  * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
189  * deadline                     -101             -1           0
190  */
task_prio(const struct task_struct * p)191 int task_prio(const struct task_struct *p)
192 {
193 	return p->prio - MAX_RT_PRIO;
194 }
195 
196 /**
197  * idle_cpu - is a given CPU idle currently?
198  * @cpu: the processor in question.
199  *
200  * Return: 1 if the CPU is currently idle. 0 otherwise.
201  */
idle_cpu(int cpu)202 int idle_cpu(int cpu)
203 {
204 	struct rq *rq = cpu_rq(cpu);
205 
206 	if (rq->curr != rq->idle)
207 		return 0;
208 
209 	if (rq->nr_running)
210 		return 0;
211 
212 #ifdef CONFIG_SMP
213 	if (rq->ttwu_pending)
214 		return 0;
215 #endif
216 
217 	return 1;
218 }
219 
220 /**
221  * available_idle_cpu - is a given CPU idle for enqueuing work.
222  * @cpu: the CPU in question.
223  *
224  * Return: 1 if the CPU is currently idle. 0 otherwise.
225  */
available_idle_cpu(int cpu)226 int available_idle_cpu(int cpu)
227 {
228 	if (!idle_cpu(cpu))
229 		return 0;
230 
231 	if (vcpu_is_preempted(cpu))
232 		return 0;
233 
234 	return 1;
235 }
236 
237 /**
238  * idle_task - return the idle task for a given CPU.
239  * @cpu: the processor in question.
240  *
241  * Return: The idle task for the CPU @cpu.
242  */
idle_task(int cpu)243 struct task_struct *idle_task(int cpu)
244 {
245 	return cpu_rq(cpu)->idle;
246 }
247 
248 #ifdef CONFIG_SCHED_CORE
sched_core_idle_cpu(int cpu)249 int sched_core_idle_cpu(int cpu)
250 {
251 	struct rq *rq = cpu_rq(cpu);
252 
253 	if (sched_core_enabled(rq) && rq->curr == rq->idle)
254 		return 1;
255 
256 	return idle_cpu(cpu);
257 }
258 
259 #endif
260 
261 /**
262  * find_process_by_pid - find a process with a matching PID value.
263  * @pid: the pid in question.
264  *
265  * The task of @pid, if found. %NULL otherwise.
266  */
find_process_by_pid(pid_t pid)267 static struct task_struct *find_process_by_pid(pid_t pid)
268 {
269 	return pid ? find_task_by_vpid(pid) : current;
270 }
271 
find_get_task(pid_t pid)272 static struct task_struct *find_get_task(pid_t pid)
273 {
274 	struct task_struct *p;
275 	guard(rcu)();
276 
277 	p = find_process_by_pid(pid);
278 	if (likely(p))
279 		get_task_struct(p);
280 
281 	return p;
282 }
283 
DEFINE_CLASS(find_get_task,struct task_struct *,if (_T)put_task_struct (_T),find_get_task (pid),pid_t pid)284 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
285 	     find_get_task(pid), pid_t pid)
286 
287 /*
288  * sched_setparam() passes in -1 for its policy, to let the functions
289  * it calls know not to change it.
290  */
291 #define SETPARAM_POLICY	-1
292 
293 static void __setscheduler_params(struct task_struct *p,
294 		const struct sched_attr *attr)
295 {
296 	int policy = attr->sched_policy;
297 
298 	if (policy == SETPARAM_POLICY)
299 		policy = p->policy;
300 
301 	p->policy = policy;
302 
303 	if (dl_policy(policy)) {
304 		__setparam_dl(p, attr);
305 	} else if (fair_policy(policy)) {
306 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
307 		if (attr->sched_runtime) {
308 			p->se.custom_slice = 1;
309 			p->se.slice = clamp_t(u64, attr->sched_runtime,
310 					      NSEC_PER_MSEC/10,   /* HZ=1000 * 10 */
311 					      NSEC_PER_MSEC*100); /* HZ=100  / 10 */
312 		} else {
313 			p->se.custom_slice = 0;
314 			p->se.slice = sysctl_sched_base_slice;
315 		}
316 	}
317 
318 	/* rt-policy tasks do not have a timerslack */
319 	if (rt_or_dl_task_policy(p)) {
320 		p->timer_slack_ns = 0;
321 	} else if (p->timer_slack_ns == 0) {
322 		/* when switching back to non-rt policy, restore timerslack */
323 		p->timer_slack_ns = p->default_timer_slack_ns;
324 	}
325 
326 	/*
327 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
328 	 * !rt_policy. Always setting this ensures that things like
329 	 * getparam()/getattr() don't report silly values for !rt tasks.
330 	 */
331 	p->rt_priority = attr->sched_priority;
332 	p->normal_prio = normal_prio(p);
333 	set_load_weight(p, true);
334 }
335 
336 /*
337  * Check the target process has a UID that matches the current process's:
338  */
check_same_owner(struct task_struct * p)339 static bool check_same_owner(struct task_struct *p)
340 {
341 	const struct cred *cred = current_cred(), *pcred;
342 	guard(rcu)();
343 
344 	pcred = __task_cred(p);
345 	return (uid_eq(cred->euid, pcred->euid) ||
346 		uid_eq(cred->euid, pcred->uid));
347 }
348 
349 #ifdef CONFIG_UCLAMP_TASK
350 
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)351 static int uclamp_validate(struct task_struct *p,
352 			   const struct sched_attr *attr)
353 {
354 	int util_min = p->uclamp_req[UCLAMP_MIN].value;
355 	int util_max = p->uclamp_req[UCLAMP_MAX].value;
356 
357 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
358 		util_min = attr->sched_util_min;
359 
360 		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
361 			return -EINVAL;
362 	}
363 
364 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
365 		util_max = attr->sched_util_max;
366 
367 		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
368 			return -EINVAL;
369 	}
370 
371 	if (util_min != -1 && util_max != -1 && util_min > util_max)
372 		return -EINVAL;
373 
374 	/*
375 	 * We have valid uclamp attributes; make sure uclamp is enabled.
376 	 *
377 	 * We need to do that here, because enabling static branches is a
378 	 * blocking operation which obviously cannot be done while holding
379 	 * scheduler locks.
380 	 */
381 	static_branch_enable(&sched_uclamp_used);
382 
383 	return 0;
384 }
385 
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)386 static bool uclamp_reset(const struct sched_attr *attr,
387 			 enum uclamp_id clamp_id,
388 			 struct uclamp_se *uc_se)
389 {
390 	/* Reset on sched class change for a non user-defined clamp value. */
391 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
392 	    !uc_se->user_defined)
393 		return true;
394 
395 	/* Reset on sched_util_{min,max} == -1. */
396 	if (clamp_id == UCLAMP_MIN &&
397 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
398 	    attr->sched_util_min == -1) {
399 		return true;
400 	}
401 
402 	if (clamp_id == UCLAMP_MAX &&
403 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
404 	    attr->sched_util_max == -1) {
405 		return true;
406 	}
407 
408 	return false;
409 }
410 
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)411 static void __setscheduler_uclamp(struct task_struct *p,
412 				  const struct sched_attr *attr)
413 {
414 	enum uclamp_id clamp_id;
415 
416 	for_each_clamp_id(clamp_id) {
417 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
418 		unsigned int value;
419 
420 		if (!uclamp_reset(attr, clamp_id, uc_se))
421 			continue;
422 
423 		/*
424 		 * RT by default have a 100% boost value that could be modified
425 		 * at runtime.
426 		 */
427 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
428 			value = sysctl_sched_uclamp_util_min_rt_default;
429 		else
430 			value = uclamp_none(clamp_id);
431 
432 		uclamp_se_set(uc_se, value, false);
433 
434 	}
435 
436 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
437 		return;
438 
439 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
440 	    attr->sched_util_min != -1) {
441 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
442 			      attr->sched_util_min, true);
443 	}
444 
445 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
446 	    attr->sched_util_max != -1) {
447 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
448 			      attr->sched_util_max, true);
449 	}
450 }
451 
452 #else /* !CONFIG_UCLAMP_TASK: */
453 
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)454 static inline int uclamp_validate(struct task_struct *p,
455 				  const struct sched_attr *attr)
456 {
457 	return -EOPNOTSUPP;
458 }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)459 static void __setscheduler_uclamp(struct task_struct *p,
460 				  const struct sched_attr *attr) { }
461 #endif
462 
463 /*
464  * Allow unprivileged RT tasks to decrease priority.
465  * Only issue a capable test if needed and only once to avoid an audit
466  * event on permitted non-privileged operations:
467  */
user_check_sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,int policy,int reset_on_fork)468 static int user_check_sched_setscheduler(struct task_struct *p,
469 					 const struct sched_attr *attr,
470 					 int policy, int reset_on_fork)
471 {
472 	if (fair_policy(policy)) {
473 		if (attr->sched_nice < task_nice(p) &&
474 		    !is_nice_reduction(p, attr->sched_nice))
475 			goto req_priv;
476 	}
477 
478 	if (rt_policy(policy)) {
479 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
480 
481 		/* Can't set/change the rt policy: */
482 		if (policy != p->policy && !rlim_rtprio)
483 			goto req_priv;
484 
485 		/* Can't increase priority: */
486 		if (attr->sched_priority > p->rt_priority &&
487 		    attr->sched_priority > rlim_rtprio)
488 			goto req_priv;
489 	}
490 
491 	/*
492 	 * Can't set/change SCHED_DEADLINE policy at all for now
493 	 * (safest behavior); in the future we would like to allow
494 	 * unprivileged DL tasks to increase their relative deadline
495 	 * or reduce their runtime (both ways reducing utilization)
496 	 */
497 	if (dl_policy(policy))
498 		goto req_priv;
499 
500 	/*
501 	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
502 	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
503 	 */
504 	if (task_has_idle_policy(p) && !idle_policy(policy)) {
505 		if (!is_nice_reduction(p, task_nice(p)))
506 			goto req_priv;
507 	}
508 
509 	/* Can't change other user's priorities: */
510 	if (!check_same_owner(p))
511 		goto req_priv;
512 
513 	/* Normal users shall not reset the sched_reset_on_fork flag: */
514 	if (p->sched_reset_on_fork && !reset_on_fork)
515 		goto req_priv;
516 
517 	return 0;
518 
519 req_priv:
520 	if (!capable(CAP_SYS_NICE))
521 		return -EPERM;
522 
523 	return 0;
524 }
525 
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)526 int __sched_setscheduler(struct task_struct *p,
527 			 const struct sched_attr *attr,
528 			 bool user, bool pi)
529 {
530 	int oldpolicy = -1, policy = attr->sched_policy;
531 	int retval, oldprio, newprio, queued, running;
532 	const struct sched_class *prev_class, *next_class;
533 	struct balance_callback *head;
534 	struct rq_flags rf;
535 	int reset_on_fork;
536 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
537 	struct rq *rq;
538 	bool cpuset_locked = false;
539 
540 	/* The pi code expects interrupts enabled */
541 	BUG_ON(pi && in_interrupt());
542 recheck:
543 	/* Double check policy once rq lock held: */
544 	if (policy < 0) {
545 		reset_on_fork = p->sched_reset_on_fork;
546 		policy = oldpolicy = p->policy;
547 	} else {
548 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
549 
550 		if (!valid_policy(policy))
551 			return -EINVAL;
552 	}
553 
554 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
555 		return -EINVAL;
556 
557 	/*
558 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
559 	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
560 	 * SCHED_BATCH and SCHED_IDLE is 0.
561 	 */
562 	if (attr->sched_priority > MAX_RT_PRIO-1)
563 		return -EINVAL;
564 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
565 	    (rt_policy(policy) != (attr->sched_priority != 0)))
566 		return -EINVAL;
567 
568 	if (user) {
569 		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
570 		if (retval)
571 			return retval;
572 
573 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
574 			return -EINVAL;
575 
576 		retval = security_task_setscheduler(p);
577 		if (retval)
578 			return retval;
579 	}
580 
581 	/* Update task specific "requested" clamps */
582 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
583 		retval = uclamp_validate(p, attr);
584 		if (retval)
585 			return retval;
586 	}
587 
588 	/*
589 	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
590 	 * information.
591 	 */
592 	if (dl_policy(policy) || dl_policy(p->policy)) {
593 		cpuset_locked = true;
594 		cpuset_lock();
595 	}
596 
597 	/*
598 	 * Make sure no PI-waiters arrive (or leave) while we are
599 	 * changing the priority of the task:
600 	 *
601 	 * To be able to change p->policy safely, the appropriate
602 	 * runqueue lock must be held.
603 	 */
604 	rq = task_rq_lock(p, &rf);
605 	update_rq_clock(rq);
606 
607 	/*
608 	 * Changing the policy of the stop threads its a very bad idea:
609 	 */
610 	if (p == rq->stop) {
611 		retval = -EINVAL;
612 		goto unlock;
613 	}
614 
615 	retval = scx_check_setscheduler(p, policy);
616 	if (retval)
617 		goto unlock;
618 
619 	/*
620 	 * If not changing anything there's no need to proceed further,
621 	 * but store a possible modification of reset_on_fork.
622 	 */
623 	if (unlikely(policy == p->policy)) {
624 		if (fair_policy(policy) &&
625 		    (attr->sched_nice != task_nice(p) ||
626 		     (attr->sched_runtime != p->se.slice)))
627 			goto change;
628 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
629 			goto change;
630 		if (dl_policy(policy) && dl_param_changed(p, attr))
631 			goto change;
632 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
633 			goto change;
634 
635 		p->sched_reset_on_fork = reset_on_fork;
636 		retval = 0;
637 		goto unlock;
638 	}
639 change:
640 
641 	if (user) {
642 #ifdef CONFIG_RT_GROUP_SCHED
643 		/*
644 		 * Do not allow real-time tasks into groups that have no runtime
645 		 * assigned.
646 		 */
647 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
648 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
649 				!task_group_is_autogroup(task_group(p))) {
650 			retval = -EPERM;
651 			goto unlock;
652 		}
653 #endif
654 #ifdef CONFIG_SMP
655 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
656 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
657 			cpumask_t *span = rq->rd->span;
658 
659 			/*
660 			 * Don't allow tasks with an affinity mask smaller than
661 			 * the entire root_domain to become SCHED_DEADLINE. We
662 			 * will also fail if there's no bandwidth available.
663 			 */
664 			if (!cpumask_subset(span, p->cpus_ptr) ||
665 			    rq->rd->dl_bw.bw == 0) {
666 				retval = -EPERM;
667 				goto unlock;
668 			}
669 		}
670 #endif
671 	}
672 
673 	/* Re-check policy now with rq lock held: */
674 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
675 		policy = oldpolicy = -1;
676 		task_rq_unlock(rq, p, &rf);
677 		if (cpuset_locked)
678 			cpuset_unlock();
679 		goto recheck;
680 	}
681 
682 	/*
683 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
684 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
685 	 * is available.
686 	 */
687 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
688 		retval = -EBUSY;
689 		goto unlock;
690 	}
691 
692 	p->sched_reset_on_fork = reset_on_fork;
693 	oldprio = p->prio;
694 
695 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
696 	if (pi) {
697 		/*
698 		 * Take priority boosted tasks into account. If the new
699 		 * effective priority is unchanged, we just store the new
700 		 * normal parameters and do not touch the scheduler class and
701 		 * the runqueue. This will be done when the task deboost
702 		 * itself.
703 		 */
704 		newprio = rt_effective_prio(p, newprio);
705 		if (newprio == oldprio)
706 			queue_flags &= ~DEQUEUE_MOVE;
707 	}
708 
709 	prev_class = p->sched_class;
710 	next_class = __setscheduler_class(policy, newprio);
711 
712 	if (prev_class != next_class && p->se.sched_delayed)
713 		dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
714 
715 	queued = task_on_rq_queued(p);
716 	running = task_current(rq, p);
717 	if (queued)
718 		dequeue_task(rq, p, queue_flags);
719 	if (running)
720 		put_prev_task(rq, p);
721 
722 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
723 		__setscheduler_params(p, attr);
724 		p->sched_class = next_class;
725 		p->prio = newprio;
726 	}
727 	__setscheduler_uclamp(p, attr);
728 	check_class_changing(rq, p, prev_class);
729 
730 	if (queued) {
731 		/*
732 		 * We enqueue to tail when the priority of a task is
733 		 * increased (user space view).
734 		 */
735 		if (oldprio < p->prio)
736 			queue_flags |= ENQUEUE_HEAD;
737 
738 		enqueue_task(rq, p, queue_flags);
739 	}
740 	if (running)
741 		set_next_task(rq, p);
742 
743 	check_class_changed(rq, p, prev_class, oldprio);
744 
745 	/* Avoid rq from going away on us: */
746 	preempt_disable();
747 	head = splice_balance_callbacks(rq);
748 	task_rq_unlock(rq, p, &rf);
749 
750 	if (pi) {
751 		if (cpuset_locked)
752 			cpuset_unlock();
753 		rt_mutex_adjust_pi(p);
754 	}
755 
756 	/* Run balance callbacks after we've adjusted the PI chain: */
757 	balance_callbacks(rq, head);
758 	preempt_enable();
759 
760 	return 0;
761 
762 unlock:
763 	task_rq_unlock(rq, p, &rf);
764 	if (cpuset_locked)
765 		cpuset_unlock();
766 	return retval;
767 }
768 
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)769 static int _sched_setscheduler(struct task_struct *p, int policy,
770 			       const struct sched_param *param, bool check)
771 {
772 	struct sched_attr attr = {
773 		.sched_policy   = policy,
774 		.sched_priority = param->sched_priority,
775 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
776 	};
777 
778 	if (p->se.custom_slice)
779 		attr.sched_runtime = p->se.slice;
780 
781 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
782 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
783 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
784 		policy &= ~SCHED_RESET_ON_FORK;
785 		attr.sched_policy = policy;
786 	}
787 
788 	return __sched_setscheduler(p, &attr, check, true);
789 }
790 /**
791  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
792  * @p: the task in question.
793  * @policy: new policy.
794  * @param: structure containing the new RT priority.
795  *
796  * Use sched_set_fifo(), read its comment.
797  *
798  * Return: 0 on success. An error code otherwise.
799  *
800  * NOTE that the task may be already dead.
801  */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)802 int sched_setscheduler(struct task_struct *p, int policy,
803 		       const struct sched_param *param)
804 {
805 	return _sched_setscheduler(p, policy, param, true);
806 }
807 
sched_setattr(struct task_struct * p,const struct sched_attr * attr)808 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
809 {
810 	return __sched_setscheduler(p, attr, true, true);
811 }
812 
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)813 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
814 {
815 	return __sched_setscheduler(p, attr, false, true);
816 }
817 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
818 
819 /**
820  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space.
821  * @p: the task in question.
822  * @policy: new policy.
823  * @param: structure containing the new RT priority.
824  *
825  * Just like sched_setscheduler, only don't bother checking if the
826  * current context has permission.  For example, this is needed in
827  * stop_machine(): we create temporary high priority worker threads,
828  * but our caller might not have that capability.
829  *
830  * Return: 0 on success. An error code otherwise.
831  */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)832 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
833 			       const struct sched_param *param)
834 {
835 	return _sched_setscheduler(p, policy, param, false);
836 }
837 
838 /*
839  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
840  * incapable of resource management, which is the one thing an OS really should
841  * be doing.
842  *
843  * This is of course the reason it is limited to privileged users only.
844  *
845  * Worse still; it is fundamentally impossible to compose static priority
846  * workloads. You cannot take two correctly working static prio workloads
847  * and smash them together and still expect them to work.
848  *
849  * For this reason 'all' FIFO tasks the kernel creates are basically at:
850  *
851  *   MAX_RT_PRIO / 2
852  *
853  * The administrator _MUST_ configure the system, the kernel simply doesn't
854  * know enough information to make a sensible choice.
855  */
sched_set_fifo(struct task_struct * p)856 void sched_set_fifo(struct task_struct *p)
857 {
858 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
859 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
860 }
861 EXPORT_SYMBOL_GPL(sched_set_fifo);
862 
863 /*
864  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
865  */
sched_set_fifo_low(struct task_struct * p)866 void sched_set_fifo_low(struct task_struct *p)
867 {
868 	struct sched_param sp = { .sched_priority = 1 };
869 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
870 }
871 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
872 
sched_set_normal(struct task_struct * p,int nice)873 void sched_set_normal(struct task_struct *p, int nice)
874 {
875 	struct sched_attr attr = {
876 		.sched_policy = SCHED_NORMAL,
877 		.sched_nice = nice,
878 	};
879 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
880 }
881 EXPORT_SYMBOL_GPL(sched_set_normal);
882 
883 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)884 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
885 {
886 	struct sched_param lparam;
887 
888 	if (!param || pid < 0)
889 		return -EINVAL;
890 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
891 		return -EFAULT;
892 
893 	CLASS(find_get_task, p)(pid);
894 	if (!p)
895 		return -ESRCH;
896 
897 	return sched_setscheduler(p, policy, &lparam);
898 }
899 
900 /*
901  * Mimics kernel/events/core.c perf_copy_attr().
902  */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)903 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
904 {
905 	u32 size;
906 	int ret;
907 
908 	/* Zero the full structure, so that a short copy will be nice: */
909 	memset(attr, 0, sizeof(*attr));
910 
911 	ret = get_user(size, &uattr->size);
912 	if (ret)
913 		return ret;
914 
915 	/* ABI compatibility quirk: */
916 	if (!size)
917 		size = SCHED_ATTR_SIZE_VER0;
918 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
919 		goto err_size;
920 
921 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
922 	if (ret) {
923 		if (ret == -E2BIG)
924 			goto err_size;
925 		return ret;
926 	}
927 
928 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
929 	    size < SCHED_ATTR_SIZE_VER1)
930 		return -EINVAL;
931 
932 	/*
933 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
934 	 * to be strict and return an error on out-of-bounds values?
935 	 */
936 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
937 
938 	return 0;
939 
940 err_size:
941 	put_user(sizeof(*attr), &uattr->size);
942 	return -E2BIG;
943 }
944 
get_params(struct task_struct * p,struct sched_attr * attr)945 static void get_params(struct task_struct *p, struct sched_attr *attr)
946 {
947 	if (task_has_dl_policy(p)) {
948 		__getparam_dl(p, attr);
949 	} else if (task_has_rt_policy(p)) {
950 		attr->sched_priority = p->rt_priority;
951 	} else {
952 		attr->sched_nice = task_nice(p);
953 		attr->sched_runtime = p->se.slice;
954 	}
955 }
956 
957 /**
958  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
959  * @pid: the pid in question.
960  * @policy: new policy.
961  * @param: structure containing the new RT priority.
962  *
963  * Return: 0 on success. An error code otherwise.
964  */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)965 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
966 {
967 	if (policy < 0)
968 		return -EINVAL;
969 
970 	return do_sched_setscheduler(pid, policy, param);
971 }
972 
973 /**
974  * sys_sched_setparam - set/change the RT priority of a thread
975  * @pid: the pid in question.
976  * @param: structure containing the new RT priority.
977  *
978  * Return: 0 on success. An error code otherwise.
979  */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)980 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
981 {
982 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
983 }
984 
985 /**
986  * sys_sched_setattr - same as above, but with extended sched_attr
987  * @pid: the pid in question.
988  * @uattr: structure containing the extended parameters.
989  * @flags: for future extension.
990  */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)991 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
992 			       unsigned int, flags)
993 {
994 	struct sched_attr attr;
995 	int retval;
996 
997 	if (!uattr || pid < 0 || flags)
998 		return -EINVAL;
999 
1000 	retval = sched_copy_attr(uattr, &attr);
1001 	if (retval)
1002 		return retval;
1003 
1004 	if ((int)attr.sched_policy < 0)
1005 		return -EINVAL;
1006 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
1007 		attr.sched_policy = SETPARAM_POLICY;
1008 
1009 	CLASS(find_get_task, p)(pid);
1010 	if (!p)
1011 		return -ESRCH;
1012 
1013 	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
1014 		get_params(p, &attr);
1015 
1016 	return sched_setattr(p, &attr);
1017 }
1018 
1019 /**
1020  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1021  * @pid: the pid in question.
1022  *
1023  * Return: On success, the policy of the thread. Otherwise, a negative error
1024  * code.
1025  */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)1026 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1027 {
1028 	struct task_struct *p;
1029 	int retval;
1030 
1031 	if (pid < 0)
1032 		return -EINVAL;
1033 
1034 	guard(rcu)();
1035 	p = find_process_by_pid(pid);
1036 	if (!p)
1037 		return -ESRCH;
1038 
1039 	retval = security_task_getscheduler(p);
1040 	if (!retval) {
1041 		retval = p->policy;
1042 		if (p->sched_reset_on_fork)
1043 			retval |= SCHED_RESET_ON_FORK;
1044 	}
1045 	return retval;
1046 }
1047 
1048 /**
1049  * sys_sched_getparam - get the RT priority of a thread
1050  * @pid: the pid in question.
1051  * @param: structure containing the RT priority.
1052  *
1053  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
1054  * code.
1055  */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)1056 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1057 {
1058 	struct sched_param lp = { .sched_priority = 0 };
1059 	struct task_struct *p;
1060 	int retval;
1061 
1062 	if (!param || pid < 0)
1063 		return -EINVAL;
1064 
1065 	scoped_guard (rcu) {
1066 		p = find_process_by_pid(pid);
1067 		if (!p)
1068 			return -ESRCH;
1069 
1070 		retval = security_task_getscheduler(p);
1071 		if (retval)
1072 			return retval;
1073 
1074 		if (task_has_rt_policy(p))
1075 			lp.sched_priority = p->rt_priority;
1076 	}
1077 
1078 	/*
1079 	 * This one might sleep, we cannot do it with a spinlock held ...
1080 	 */
1081 	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1082 }
1083 
1084 /*
1085  * Copy the kernel size attribute structure (which might be larger
1086  * than what user-space knows about) to user-space.
1087  *
1088  * Note that all cases are valid: user-space buffer can be larger or
1089  * smaller than the kernel-space buffer. The usual case is that both
1090  * have the same size.
1091  */
1092 static int
sched_attr_copy_to_user(struct sched_attr __user * uattr,struct sched_attr * kattr,unsigned int usize)1093 sched_attr_copy_to_user(struct sched_attr __user *uattr,
1094 			struct sched_attr *kattr,
1095 			unsigned int usize)
1096 {
1097 	unsigned int ksize = sizeof(*kattr);
1098 
1099 	if (!access_ok(uattr, usize))
1100 		return -EFAULT;
1101 
1102 	/*
1103 	 * sched_getattr() ABI forwards and backwards compatibility:
1104 	 *
1105 	 * If usize == ksize then we just copy everything to user-space and all is good.
1106 	 *
1107 	 * If usize < ksize then we only copy as much as user-space has space for,
1108 	 * this keeps ABI compatibility as well. We skip the rest.
1109 	 *
1110 	 * If usize > ksize then user-space is using a newer version of the ABI,
1111 	 * which part the kernel doesn't know about. Just ignore it - tooling can
1112 	 * detect the kernel's knowledge of attributes from the attr->size value
1113 	 * which is set to ksize in this case.
1114 	 */
1115 	kattr->size = min(usize, ksize);
1116 
1117 	if (copy_to_user(uattr, kattr, kattr->size))
1118 		return -EFAULT;
1119 
1120 	return 0;
1121 }
1122 
1123 /**
1124  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
1125  * @pid: the pid in question.
1126  * @uattr: structure containing the extended parameters.
1127  * @usize: sizeof(attr) for fwd/bwd comp.
1128  * @flags: for future extension.
1129  */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)1130 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
1131 		unsigned int, usize, unsigned int, flags)
1132 {
1133 	struct sched_attr kattr = { };
1134 	struct task_struct *p;
1135 	int retval;
1136 
1137 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
1138 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
1139 		return -EINVAL;
1140 
1141 	scoped_guard (rcu) {
1142 		p = find_process_by_pid(pid);
1143 		if (!p)
1144 			return -ESRCH;
1145 
1146 		retval = security_task_getscheduler(p);
1147 		if (retval)
1148 			return retval;
1149 
1150 		kattr.sched_policy = p->policy;
1151 		if (p->sched_reset_on_fork)
1152 			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
1153 		get_params(p, &kattr);
1154 		kattr.sched_flags &= SCHED_FLAG_ALL;
1155 
1156 #ifdef CONFIG_UCLAMP_TASK
1157 		/*
1158 		 * This could race with another potential updater, but this is fine
1159 		 * because it'll correctly read the old or the new value. We don't need
1160 		 * to guarantee who wins the race as long as it doesn't return garbage.
1161 		 */
1162 		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
1163 		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
1164 #endif
1165 	}
1166 
1167 	return sched_attr_copy_to_user(uattr, &kattr, usize);
1168 }
1169 
1170 #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)1171 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1172 {
1173 	/*
1174 	 * If the task isn't a deadline task or admission control is
1175 	 * disabled then we don't care about affinity changes.
1176 	 */
1177 	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
1178 		return 0;
1179 
1180 	/*
1181 	 * Since bandwidth control happens on root_domain basis,
1182 	 * if admission test is enabled, we only admit -deadline
1183 	 * tasks allowed to run on all the CPUs in the task's
1184 	 * root_domain.
1185 	 */
1186 	guard(rcu)();
1187 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
1188 		return -EBUSY;
1189 
1190 	return 0;
1191 }
1192 #endif /* CONFIG_SMP */
1193 
__sched_setaffinity(struct task_struct * p,struct affinity_context * ctx)1194 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
1195 {
1196 	int retval;
1197 	cpumask_var_t cpus_allowed, new_mask;
1198 
1199 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
1200 		return -ENOMEM;
1201 
1202 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
1203 		retval = -ENOMEM;
1204 		goto out_free_cpus_allowed;
1205 	}
1206 
1207 	cpuset_cpus_allowed(p, cpus_allowed);
1208 	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
1209 
1210 	ctx->new_mask = new_mask;
1211 	ctx->flags |= SCA_CHECK;
1212 
1213 	retval = dl_task_check_affinity(p, new_mask);
1214 	if (retval)
1215 		goto out_free_new_mask;
1216 
1217 	retval = __set_cpus_allowed_ptr(p, ctx);
1218 	if (retval)
1219 		goto out_free_new_mask;
1220 
1221 	cpuset_cpus_allowed(p, cpus_allowed);
1222 	if (!cpumask_subset(new_mask, cpus_allowed)) {
1223 		/*
1224 		 * We must have raced with a concurrent cpuset update.
1225 		 * Just reset the cpumask to the cpuset's cpus_allowed.
1226 		 */
1227 		cpumask_copy(new_mask, cpus_allowed);
1228 
1229 		/*
1230 		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
1231 		 * will restore the previous user_cpus_ptr value.
1232 		 *
1233 		 * In the unlikely event a previous user_cpus_ptr exists,
1234 		 * we need to further restrict the mask to what is allowed
1235 		 * by that old user_cpus_ptr.
1236 		 */
1237 		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
1238 			bool empty = !cpumask_and(new_mask, new_mask,
1239 						  ctx->user_mask);
1240 
1241 			if (WARN_ON_ONCE(empty))
1242 				cpumask_copy(new_mask, cpus_allowed);
1243 		}
1244 		__set_cpus_allowed_ptr(p, ctx);
1245 		retval = -EINVAL;
1246 	}
1247 
1248 out_free_new_mask:
1249 	free_cpumask_var(new_mask);
1250 out_free_cpus_allowed:
1251 	free_cpumask_var(cpus_allowed);
1252 	return retval;
1253 }
1254 
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)1255 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1256 {
1257 	struct affinity_context ac;
1258 	struct cpumask *user_mask;
1259 	int retval;
1260 
1261 	CLASS(find_get_task, p)(pid);
1262 	if (!p)
1263 		return -ESRCH;
1264 
1265 	if (p->flags & PF_NO_SETAFFINITY)
1266 		return -EINVAL;
1267 
1268 	if (!check_same_owner(p)) {
1269 		guard(rcu)();
1270 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
1271 			return -EPERM;
1272 	}
1273 
1274 	retval = security_task_setscheduler(p);
1275 	if (retval)
1276 		return retval;
1277 
1278 	/*
1279 	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
1280 	 * alloc_user_cpus_ptr() returns NULL.
1281 	 */
1282 	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
1283 	if (user_mask) {
1284 		cpumask_copy(user_mask, in_mask);
1285 	} else if (IS_ENABLED(CONFIG_SMP)) {
1286 		return -ENOMEM;
1287 	}
1288 
1289 	ac = (struct affinity_context){
1290 		.new_mask  = in_mask,
1291 		.user_mask = user_mask,
1292 		.flags     = SCA_USER,
1293 	};
1294 
1295 	retval = __sched_setaffinity(p, &ac);
1296 	kfree(ac.user_mask);
1297 
1298 	return retval;
1299 }
1300 
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)1301 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
1302 			     struct cpumask *new_mask)
1303 {
1304 	if (len < cpumask_size())
1305 		cpumask_clear(new_mask);
1306 	else if (len > cpumask_size())
1307 		len = cpumask_size();
1308 
1309 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
1310 }
1311 
1312 /**
1313  * sys_sched_setaffinity - set the CPU affinity of a process
1314  * @pid: pid of the process
1315  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1316  * @user_mask_ptr: user-space pointer to the new CPU mask
1317  *
1318  * Return: 0 on success. An error code otherwise.
1319  */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)1320 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
1321 		unsigned long __user *, user_mask_ptr)
1322 {
1323 	cpumask_var_t new_mask;
1324 	int retval;
1325 
1326 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
1327 		return -ENOMEM;
1328 
1329 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
1330 	if (retval == 0)
1331 		retval = sched_setaffinity(pid, new_mask);
1332 	free_cpumask_var(new_mask);
1333 	return retval;
1334 }
1335 
sched_getaffinity(pid_t pid,struct cpumask * mask)1336 long sched_getaffinity(pid_t pid, struct cpumask *mask)
1337 {
1338 	struct task_struct *p;
1339 	int retval;
1340 
1341 	guard(rcu)();
1342 	p = find_process_by_pid(pid);
1343 	if (!p)
1344 		return -ESRCH;
1345 
1346 	retval = security_task_getscheduler(p);
1347 	if (retval)
1348 		return retval;
1349 
1350 	guard(raw_spinlock_irqsave)(&p->pi_lock);
1351 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
1352 
1353 	return 0;
1354 }
1355 
1356 /**
1357  * sys_sched_getaffinity - get the CPU affinity of a process
1358  * @pid: pid of the process
1359  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1360  * @user_mask_ptr: user-space pointer to hold the current CPU mask
1361  *
1362  * Return: size of CPU mask copied to user_mask_ptr on success. An
1363  * error code otherwise.
1364  */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)1365 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
1366 		unsigned long __user *, user_mask_ptr)
1367 {
1368 	int ret;
1369 	cpumask_var_t mask;
1370 
1371 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
1372 		return -EINVAL;
1373 	if (len & (sizeof(unsigned long)-1))
1374 		return -EINVAL;
1375 
1376 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1377 		return -ENOMEM;
1378 
1379 	ret = sched_getaffinity(pid, mask);
1380 	if (ret == 0) {
1381 		unsigned int retlen = min(len, cpumask_size());
1382 
1383 		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
1384 			ret = -EFAULT;
1385 		else
1386 			ret = retlen;
1387 	}
1388 	free_cpumask_var(mask);
1389 
1390 	return ret;
1391 }
1392 
do_sched_yield(void)1393 static void do_sched_yield(void)
1394 {
1395 	struct rq_flags rf;
1396 	struct rq *rq;
1397 
1398 	rq = this_rq_lock_irq(&rf);
1399 
1400 	schedstat_inc(rq->yld_count);
1401 	current->sched_class->yield_task(rq);
1402 
1403 	preempt_disable();
1404 	rq_unlock_irq(rq, &rf);
1405 	sched_preempt_enable_no_resched();
1406 
1407 	schedule();
1408 }
1409 
1410 /**
1411  * sys_sched_yield - yield the current processor to other threads.
1412  *
1413  * This function yields the current CPU to other tasks. If there are no
1414  * other threads running on this CPU then this function will return.
1415  *
1416  * Return: 0.
1417  */
SYSCALL_DEFINE0(sched_yield)1418 SYSCALL_DEFINE0(sched_yield)
1419 {
1420 	do_sched_yield();
1421 	return 0;
1422 }
1423 
1424 /**
1425  * yield - yield the current processor to other threads.
1426  *
1427  * Do not ever use this function, there's a 99% chance you're doing it wrong.
1428  *
1429  * The scheduler is at all times free to pick the calling task as the most
1430  * eligible task to run, if removing the yield() call from your code breaks
1431  * it, it's already broken.
1432  *
1433  * Typical broken usage is:
1434  *
1435  * while (!event)
1436  *	yield();
1437  *
1438  * where one assumes that yield() will let 'the other' process run that will
1439  * make event true. If the current task is a SCHED_FIFO task that will never
1440  * happen. Never use yield() as a progress guarantee!!
1441  *
1442  * If you want to use yield() to wait for something, use wait_event().
1443  * If you want to use yield() to be 'nice' for others, use cond_resched().
1444  * If you still want to use yield(), do not!
1445  */
yield(void)1446 void __sched yield(void)
1447 {
1448 	set_current_state(TASK_RUNNING);
1449 	do_sched_yield();
1450 }
1451 EXPORT_SYMBOL(yield);
1452 
1453 /**
1454  * yield_to - yield the current processor to another thread in
1455  * your thread group, or accelerate that thread toward the
1456  * processor it's on.
1457  * @p: target task
1458  * @preempt: whether task preemption is allowed or not
1459  *
1460  * It's the caller's job to ensure that the target task struct
1461  * can't go away on us before we can do any checks.
1462  *
1463  * Return:
1464  *	true (>0) if we indeed boosted the target task.
1465  *	false (0) if we failed to boost the target.
1466  *	-ESRCH if there's no task to yield to.
1467  */
yield_to(struct task_struct * p,bool preempt)1468 int __sched yield_to(struct task_struct *p, bool preempt)
1469 {
1470 	struct task_struct *curr = current;
1471 	struct rq *rq, *p_rq;
1472 	int yielded = 0;
1473 
1474 	scoped_guard (irqsave) {
1475 		rq = this_rq();
1476 
1477 again:
1478 		p_rq = task_rq(p);
1479 		/*
1480 		 * If we're the only runnable task on the rq and target rq also
1481 		 * has only one task, there's absolutely no point in yielding.
1482 		 */
1483 		if (rq->nr_running == 1 && p_rq->nr_running == 1)
1484 			return -ESRCH;
1485 
1486 		guard(double_rq_lock)(rq, p_rq);
1487 		if (task_rq(p) != p_rq)
1488 			goto again;
1489 
1490 		if (!curr->sched_class->yield_to_task)
1491 			return 0;
1492 
1493 		if (curr->sched_class != p->sched_class)
1494 			return 0;
1495 
1496 		if (task_on_cpu(p_rq, p) || !task_is_running(p))
1497 			return 0;
1498 
1499 		yielded = curr->sched_class->yield_to_task(rq, p);
1500 		if (yielded) {
1501 			schedstat_inc(rq->yld_count);
1502 			/*
1503 			 * Make p's CPU reschedule; pick_next_entity
1504 			 * takes care of fairness.
1505 			 */
1506 			if (preempt && rq != p_rq)
1507 				resched_curr(p_rq);
1508 		}
1509 	}
1510 
1511 	if (yielded)
1512 		schedule();
1513 
1514 	return yielded;
1515 }
1516 EXPORT_SYMBOL_GPL(yield_to);
1517 
1518 /**
1519  * sys_sched_get_priority_max - return maximum RT priority.
1520  * @policy: scheduling class.
1521  *
1522  * Return: On success, this syscall returns the maximum
1523  * rt_priority that can be used by a given scheduling class.
1524  * On failure, a negative error code is returned.
1525  */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)1526 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1527 {
1528 	int ret = -EINVAL;
1529 
1530 	switch (policy) {
1531 	case SCHED_FIFO:
1532 	case SCHED_RR:
1533 		ret = MAX_RT_PRIO-1;
1534 		break;
1535 	case SCHED_DEADLINE:
1536 	case SCHED_NORMAL:
1537 	case SCHED_BATCH:
1538 	case SCHED_IDLE:
1539 	case SCHED_EXT:
1540 		ret = 0;
1541 		break;
1542 	}
1543 	return ret;
1544 }
1545 
1546 /**
1547  * sys_sched_get_priority_min - return minimum RT priority.
1548  * @policy: scheduling class.
1549  *
1550  * Return: On success, this syscall returns the minimum
1551  * rt_priority that can be used by a given scheduling class.
1552  * On failure, a negative error code is returned.
1553  */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)1554 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1555 {
1556 	int ret = -EINVAL;
1557 
1558 	switch (policy) {
1559 	case SCHED_FIFO:
1560 	case SCHED_RR:
1561 		ret = 1;
1562 		break;
1563 	case SCHED_DEADLINE:
1564 	case SCHED_NORMAL:
1565 	case SCHED_BATCH:
1566 	case SCHED_IDLE:
1567 	case SCHED_EXT:
1568 		ret = 0;
1569 	}
1570 	return ret;
1571 }
1572 
sched_rr_get_interval(pid_t pid,struct timespec64 * t)1573 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
1574 {
1575 	unsigned int time_slice = 0;
1576 	int retval;
1577 
1578 	if (pid < 0)
1579 		return -EINVAL;
1580 
1581 	scoped_guard (rcu) {
1582 		struct task_struct *p = find_process_by_pid(pid);
1583 		if (!p)
1584 			return -ESRCH;
1585 
1586 		retval = security_task_getscheduler(p);
1587 		if (retval)
1588 			return retval;
1589 
1590 		scoped_guard (task_rq_lock, p) {
1591 			struct rq *rq = scope.rq;
1592 			if (p->sched_class->get_rr_interval)
1593 				time_slice = p->sched_class->get_rr_interval(rq, p);
1594 		}
1595 	}
1596 
1597 	jiffies_to_timespec64(time_slice, t);
1598 	return 0;
1599 }
1600 
1601 /**
1602  * sys_sched_rr_get_interval - return the default time-slice of a process.
1603  * @pid: pid of the process.
1604  * @interval: userspace pointer to the time-slice value.
1605  *
1606  * this syscall writes the default time-slice value of a given process
1607  * into the user-space timespec buffer. A value of '0' means infinity.
1608  *
1609  * Return: On success, 0 and the time-slice is in @interval. Otherwise,
1610  * an error code.
1611  */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)1612 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
1613 		struct __kernel_timespec __user *, interval)
1614 {
1615 	struct timespec64 t;
1616 	int retval = sched_rr_get_interval(pid, &t);
1617 
1618 	if (retval == 0)
1619 		retval = put_timespec64(&t, interval);
1620 
1621 	return retval;
1622 }
1623 
1624 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)1625 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
1626 		struct old_timespec32 __user *, interval)
1627 {
1628 	struct timespec64 t;
1629 	int retval = sched_rr_get_interval(pid, &t);
1630 
1631 	if (retval == 0)
1632 		retval = put_old_timespec32(&t, interval);
1633 	return retval;
1634 }
1635 #endif
1636