1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4   *
5   * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6   * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7   * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8   */
9  #define SCX_OP_IDX(op)		(offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
10  
11  enum scx_consts {
12  	SCX_DSP_DFL_MAX_BATCH		= 32,
13  	SCX_DSP_MAX_LOOPS		= 32,
14  	SCX_WATCHDOG_MAX_TIMEOUT	= 30 * HZ,
15  
16  	SCX_EXIT_BT_LEN			= 64,
17  	SCX_EXIT_MSG_LEN		= 1024,
18  	SCX_EXIT_DUMP_DFL_LEN		= 32768,
19  
20  	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
21  
22  	/*
23  	 * Iterating all tasks may take a while. Periodically drop
24  	 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
25  	 */
26  	SCX_OPS_TASK_ITER_BATCH		= 32,
27  };
28  
29  enum scx_exit_kind {
30  	SCX_EXIT_NONE,
31  	SCX_EXIT_DONE,
32  
33  	SCX_EXIT_UNREG = 64,	/* user-space initiated unregistration */
34  	SCX_EXIT_UNREG_BPF,	/* BPF-initiated unregistration */
35  	SCX_EXIT_UNREG_KERN,	/* kernel-initiated unregistration */
36  	SCX_EXIT_SYSRQ,		/* requested by 'S' sysrq */
37  
38  	SCX_EXIT_ERROR = 1024,	/* runtime error, error msg contains details */
39  	SCX_EXIT_ERROR_BPF,	/* ERROR but triggered through scx_bpf_error() */
40  	SCX_EXIT_ERROR_STALL,	/* watchdog detected stalled runnable tasks */
41  };
42  
43  /*
44   * An exit code can be specified when exiting with scx_bpf_exit() or
45   * scx_ops_exit(), corresponding to exit_kind UNREG_BPF and UNREG_KERN
46   * respectively. The codes are 64bit of the format:
47   *
48   *   Bits: [63  ..  48 47   ..  32 31 .. 0]
49   *         [ SYS ACT ] [ SYS RSN ] [ USR  ]
50   *
51   *   SYS ACT: System-defined exit actions
52   *   SYS RSN: System-defined exit reasons
53   *   USR    : User-defined exit codes and reasons
54   *
55   * Using the above, users may communicate intention and context by ORing system
56   * actions and/or system reasons with a user-defined exit code.
57   */
58  enum scx_exit_code {
59  	/* Reasons */
60  	SCX_ECODE_RSN_HOTPLUG	= 1LLU << 32,
61  
62  	/* Actions */
63  	SCX_ECODE_ACT_RESTART	= 1LLU << 48,
64  };
65  
66  /*
67   * scx_exit_info is passed to ops.exit() to describe why the BPF scheduler is
68   * being disabled.
69   */
70  struct scx_exit_info {
71  	/* %SCX_EXIT_* - broad category of the exit reason */
72  	enum scx_exit_kind	kind;
73  
74  	/* exit code if gracefully exiting */
75  	s64			exit_code;
76  
77  	/* textual representation of the above */
78  	const char		*reason;
79  
80  	/* backtrace if exiting due to an error */
81  	unsigned long		*bt;
82  	u32			bt_len;
83  
84  	/* informational message */
85  	char			*msg;
86  
87  	/* debug dump */
88  	char			*dump;
89  };
90  
91  /* sched_ext_ops.flags */
92  enum scx_ops_flags {
93  	/*
94  	 * Keep built-in idle tracking even if ops.update_idle() is implemented.
95  	 */
96  	SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
97  
98  	/*
99  	 * By default, if there are no other task to run on the CPU, ext core
100  	 * keeps running the current task even after its slice expires. If this
101  	 * flag is specified, such tasks are passed to ops.enqueue() with
102  	 * %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
103  	 */
104  	SCX_OPS_ENQ_LAST	= 1LLU << 1,
105  
106  	/*
107  	 * An exiting task may schedule after PF_EXITING is set. In such cases,
108  	 * bpf_task_from_pid() may not be able to find the task and if the BPF
109  	 * scheduler depends on pid lookup for dispatching, the task will be
110  	 * lost leading to various issues including RCU grace period stalls.
111  	 *
112  	 * To mask this problem, by default, unhashed tasks are automatically
113  	 * dispatched to the local DSQ on enqueue. If the BPF scheduler doesn't
114  	 * depend on pid lookups and wants to handle these tasks directly, the
115  	 * following flag can be used.
116  	 */
117  	SCX_OPS_ENQ_EXITING	= 1LLU << 2,
118  
119  	/*
120  	 * If set, only tasks with policy set to SCHED_EXT are attached to
121  	 * sched_ext. If clear, SCHED_NORMAL tasks are also included.
122  	 */
123  	SCX_OPS_SWITCH_PARTIAL	= 1LLU << 3,
124  
125  	/*
126  	 * CPU cgroup support flags
127  	 */
128  	SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16,	/* cpu.weight */
129  
130  	SCX_OPS_ALL_FLAGS	= SCX_OPS_KEEP_BUILTIN_IDLE |
131  				  SCX_OPS_ENQ_LAST |
132  				  SCX_OPS_ENQ_EXITING |
133  				  SCX_OPS_SWITCH_PARTIAL |
134  				  SCX_OPS_HAS_CGROUP_WEIGHT,
135  };
136  
137  /* argument container for ops.init_task() */
138  struct scx_init_task_args {
139  	/*
140  	 * Set if ops.init_task() is being invoked on the fork path, as opposed
141  	 * to the scheduler transition path.
142  	 */
143  	bool			fork;
144  #ifdef CONFIG_EXT_GROUP_SCHED
145  	/* the cgroup the task is joining */
146  	struct cgroup		*cgroup;
147  #endif
148  };
149  
150  /* argument container for ops.exit_task() */
151  struct scx_exit_task_args {
152  	/* Whether the task exited before running on sched_ext. */
153  	bool cancelled;
154  };
155  
156  /* argument container for ops->cgroup_init() */
157  struct scx_cgroup_init_args {
158  	/* the weight of the cgroup [1..10000] */
159  	u32			weight;
160  };
161  
162  enum scx_cpu_preempt_reason {
163  	/* next task is being scheduled by &sched_class_rt */
164  	SCX_CPU_PREEMPT_RT,
165  	/* next task is being scheduled by &sched_class_dl */
166  	SCX_CPU_PREEMPT_DL,
167  	/* next task is being scheduled by &sched_class_stop */
168  	SCX_CPU_PREEMPT_STOP,
169  	/* unknown reason for SCX being preempted */
170  	SCX_CPU_PREEMPT_UNKNOWN,
171  };
172  
173  /*
174   * Argument container for ops->cpu_acquire(). Currently empty, but may be
175   * expanded in the future.
176   */
177  struct scx_cpu_acquire_args {};
178  
179  /* argument container for ops->cpu_release() */
180  struct scx_cpu_release_args {
181  	/* the reason the CPU was preempted */
182  	enum scx_cpu_preempt_reason reason;
183  
184  	/* the task that's going to be scheduled on the CPU */
185  	struct task_struct	*task;
186  };
187  
188  /*
189   * Informational context provided to dump operations.
190   */
191  struct scx_dump_ctx {
192  	enum scx_exit_kind	kind;
193  	s64			exit_code;
194  	const char		*reason;
195  	u64			at_ns;
196  	u64			at_jiffies;
197  };
198  
199  /**
200   * struct sched_ext_ops - Operation table for BPF scheduler implementation
201   *
202   * Userland can implement an arbitrary scheduling policy by implementing and
203   * loading operations in this table.
204   */
205  struct sched_ext_ops {
206  	/**
207  	 * select_cpu - Pick the target CPU for a task which is being woken up
208  	 * @p: task being woken up
209  	 * @prev_cpu: the cpu @p was on before sleeping
210  	 * @wake_flags: SCX_WAKE_*
211  	 *
212  	 * Decision made here isn't final. @p may be moved to any CPU while it
213  	 * is getting dispatched for execution later. However, as @p is not on
214  	 * the rq at this point, getting the eventual execution CPU right here
215  	 * saves a small bit of overhead down the line.
216  	 *
217  	 * If an idle CPU is returned, the CPU is kicked and will try to
218  	 * dispatch. While an explicit custom mechanism can be added,
219  	 * select_cpu() serves as the default way to wake up idle CPUs.
220  	 *
221  	 * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
222  	 * is dispatched, the ops.enqueue() callback will be skipped. Finally,
223  	 * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
224  	 * local DSQ of whatever CPU is returned by this callback.
225  	 */
226  	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
227  
228  	/**
229  	 * enqueue - Enqueue a task on the BPF scheduler
230  	 * @p: task being enqueued
231  	 * @enq_flags: %SCX_ENQ_*
232  	 *
233  	 * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
234  	 * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
235  	 * scheduler owns @p and if it fails to dispatch @p, the task will
236  	 * stall.
237  	 *
238  	 * If @p was dispatched from ops.select_cpu(), this callback is
239  	 * skipped.
240  	 */
241  	void (*enqueue)(struct task_struct *p, u64 enq_flags);
242  
243  	/**
244  	 * dequeue - Remove a task from the BPF scheduler
245  	 * @p: task being dequeued
246  	 * @deq_flags: %SCX_DEQ_*
247  	 *
248  	 * Remove @p from the BPF scheduler. This is usually called to isolate
249  	 * the task while updating its scheduling properties (e.g. priority).
250  	 *
251  	 * The ext core keeps track of whether the BPF side owns a given task or
252  	 * not and can gracefully ignore spurious dispatches from BPF side,
253  	 * which makes it safe to not implement this method. However, depending
254  	 * on the scheduling logic, this can lead to confusing behaviors - e.g.
255  	 * scheduling position not being updated across a priority change.
256  	 */
257  	void (*dequeue)(struct task_struct *p, u64 deq_flags);
258  
259  	/**
260  	 * dispatch - Dispatch tasks from the BPF scheduler and/or consume DSQs
261  	 * @cpu: CPU to dispatch tasks for
262  	 * @prev: previous task being switched out
263  	 *
264  	 * Called when a CPU's local dsq is empty. The operation should dispatch
265  	 * one or more tasks from the BPF scheduler into the DSQs using
266  	 * scx_bpf_dispatch() and/or consume user DSQs into the local DSQ using
267  	 * scx_bpf_consume().
268  	 *
269  	 * The maximum number of times scx_bpf_dispatch() can be called without
270  	 * an intervening scx_bpf_consume() is specified by
271  	 * ops.dispatch_max_batch. See the comments on top of the two functions
272  	 * for more details.
273  	 *
274  	 * When not %NULL, @prev is an SCX task with its slice depleted. If
275  	 * @prev is still runnable as indicated by set %SCX_TASK_QUEUED in
276  	 * @prev->scx.flags, it is not enqueued yet and will be enqueued after
277  	 * ops.dispatch() returns. To keep executing @prev, return without
278  	 * dispatching or consuming any tasks. Also see %SCX_OPS_ENQ_LAST.
279  	 */
280  	void (*dispatch)(s32 cpu, struct task_struct *prev);
281  
282  	/**
283  	 * tick - Periodic tick
284  	 * @p: task running currently
285  	 *
286  	 * This operation is called every 1/HZ seconds on CPUs which are
287  	 * executing an SCX task. Setting @p->scx.slice to 0 will trigger an
288  	 * immediate dispatch cycle on the CPU.
289  	 */
290  	void (*tick)(struct task_struct *p);
291  
292  	/**
293  	 * runnable - A task is becoming runnable on its associated CPU
294  	 * @p: task becoming runnable
295  	 * @enq_flags: %SCX_ENQ_*
296  	 *
297  	 * This and the following three functions can be used to track a task's
298  	 * execution state transitions. A task becomes ->runnable() on a CPU,
299  	 * and then goes through one or more ->running() and ->stopping() pairs
300  	 * as it runs on the CPU, and eventually becomes ->quiescent() when it's
301  	 * done running on the CPU.
302  	 *
303  	 * @p is becoming runnable on the CPU because it's
304  	 *
305  	 * - waking up (%SCX_ENQ_WAKEUP)
306  	 * - being moved from another CPU
307  	 * - being restored after temporarily taken off the queue for an
308  	 *   attribute change.
309  	 *
310  	 * This and ->enqueue() are related but not coupled. This operation
311  	 * notifies @p's state transition and may not be followed by ->enqueue()
312  	 * e.g. when @p is being dispatched to a remote CPU, or when @p is
313  	 * being enqueued on a CPU experiencing a hotplug event. Likewise, a
314  	 * task may be ->enqueue()'d without being preceded by this operation
315  	 * e.g. after exhausting its slice.
316  	 */
317  	void (*runnable)(struct task_struct *p, u64 enq_flags);
318  
319  	/**
320  	 * running - A task is starting to run on its associated CPU
321  	 * @p: task starting to run
322  	 *
323  	 * See ->runnable() for explanation on the task state notifiers.
324  	 */
325  	void (*running)(struct task_struct *p);
326  
327  	/**
328  	 * stopping - A task is stopping execution
329  	 * @p: task stopping to run
330  	 * @runnable: is task @p still runnable?
331  	 *
332  	 * See ->runnable() for explanation on the task state notifiers. If
333  	 * !@runnable, ->quiescent() will be invoked after this operation
334  	 * returns.
335  	 */
336  	void (*stopping)(struct task_struct *p, bool runnable);
337  
338  	/**
339  	 * quiescent - A task is becoming not runnable on its associated CPU
340  	 * @p: task becoming not runnable
341  	 * @deq_flags: %SCX_DEQ_*
342  	 *
343  	 * See ->runnable() for explanation on the task state notifiers.
344  	 *
345  	 * @p is becoming quiescent on the CPU because it's
346  	 *
347  	 * - sleeping (%SCX_DEQ_SLEEP)
348  	 * - being moved to another CPU
349  	 * - being temporarily taken off the queue for an attribute change
350  	 *   (%SCX_DEQ_SAVE)
351  	 *
352  	 * This and ->dequeue() are related but not coupled. This operation
353  	 * notifies @p's state transition and may not be preceded by ->dequeue()
354  	 * e.g. when @p is being dispatched to a remote CPU.
355  	 */
356  	void (*quiescent)(struct task_struct *p, u64 deq_flags);
357  
358  	/**
359  	 * yield - Yield CPU
360  	 * @from: yielding task
361  	 * @to: optional yield target task
362  	 *
363  	 * If @to is NULL, @from is yielding the CPU to other runnable tasks.
364  	 * The BPF scheduler should ensure that other available tasks are
365  	 * dispatched before the yielding task. Return value is ignored in this
366  	 * case.
367  	 *
368  	 * If @to is not-NULL, @from wants to yield the CPU to @to. If the bpf
369  	 * scheduler can implement the request, return %true; otherwise, %false.
370  	 */
371  	bool (*yield)(struct task_struct *from, struct task_struct *to);
372  
373  	/**
374  	 * core_sched_before - Task ordering for core-sched
375  	 * @a: task A
376  	 * @b: task B
377  	 *
378  	 * Used by core-sched to determine the ordering between two tasks. See
379  	 * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on
380  	 * core-sched.
381  	 *
382  	 * Both @a and @b are runnable and may or may not currently be queued on
383  	 * the BPF scheduler. Should return %true if @a should run before @b.
384  	 * %false if there's no required ordering or @b should run before @a.
385  	 *
386  	 * If not specified, the default is ordering them according to when they
387  	 * became runnable.
388  	 */
389  	bool (*core_sched_before)(struct task_struct *a, struct task_struct *b);
390  
391  	/**
392  	 * set_weight - Set task weight
393  	 * @p: task to set weight for
394  	 * @weight: new weight [1..10000]
395  	 *
396  	 * Update @p's weight to @weight.
397  	 */
398  	void (*set_weight)(struct task_struct *p, u32 weight);
399  
400  	/**
401  	 * set_cpumask - Set CPU affinity
402  	 * @p: task to set CPU affinity for
403  	 * @cpumask: cpumask of cpus that @p can run on
404  	 *
405  	 * Update @p's CPU affinity to @cpumask.
406  	 */
407  	void (*set_cpumask)(struct task_struct *p,
408  			    const struct cpumask *cpumask);
409  
410  	/**
411  	 * update_idle - Update the idle state of a CPU
412  	 * @cpu: CPU to udpate the idle state for
413  	 * @idle: whether entering or exiting the idle state
414  	 *
415  	 * This operation is called when @rq's CPU goes or leaves the idle
416  	 * state. By default, implementing this operation disables the built-in
417  	 * idle CPU tracking and the following helpers become unavailable:
418  	 *
419  	 * - scx_bpf_select_cpu_dfl()
420  	 * - scx_bpf_test_and_clear_cpu_idle()
421  	 * - scx_bpf_pick_idle_cpu()
422  	 *
423  	 * The user also must implement ops.select_cpu() as the default
424  	 * implementation relies on scx_bpf_select_cpu_dfl().
425  	 *
426  	 * Specify the %SCX_OPS_KEEP_BUILTIN_IDLE flag to keep the built-in idle
427  	 * tracking.
428  	 */
429  	void (*update_idle)(s32 cpu, bool idle);
430  
431  	/**
432  	 * cpu_acquire - A CPU is becoming available to the BPF scheduler
433  	 * @cpu: The CPU being acquired by the BPF scheduler.
434  	 * @args: Acquire arguments, see the struct definition.
435  	 *
436  	 * A CPU that was previously released from the BPF scheduler is now once
437  	 * again under its control.
438  	 */
439  	void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args);
440  
441  	/**
442  	 * cpu_release - A CPU is taken away from the BPF scheduler
443  	 * @cpu: The CPU being released by the BPF scheduler.
444  	 * @args: Release arguments, see the struct definition.
445  	 *
446  	 * The specified CPU is no longer under the control of the BPF
447  	 * scheduler. This could be because it was preempted by a higher
448  	 * priority sched_class, though there may be other reasons as well. The
449  	 * caller should consult @args->reason to determine the cause.
450  	 */
451  	void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args);
452  
453  	/**
454  	 * init_task - Initialize a task to run in a BPF scheduler
455  	 * @p: task to initialize for BPF scheduling
456  	 * @args: init arguments, see the struct definition
457  	 *
458  	 * Either we're loading a BPF scheduler or a new task is being forked.
459  	 * Initialize @p for BPF scheduling. This operation may block and can
460  	 * be used for allocations, and is called exactly once for a task.
461  	 *
462  	 * Return 0 for success, -errno for failure. An error return while
463  	 * loading will abort loading of the BPF scheduler. During a fork, it
464  	 * will abort that specific fork.
465  	 */
466  	s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
467  
468  	/**
469  	 * exit_task - Exit a previously-running task from the system
470  	 * @p: task to exit
471  	 *
472  	 * @p is exiting or the BPF scheduler is being unloaded. Perform any
473  	 * necessary cleanup for @p.
474  	 */
475  	void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
476  
477  	/**
478  	 * enable - Enable BPF scheduling for a task
479  	 * @p: task to enable BPF scheduling for
480  	 *
481  	 * Enable @p for BPF scheduling. enable() is called on @p any time it
482  	 * enters SCX, and is always paired with a matching disable().
483  	 */
484  	void (*enable)(struct task_struct *p);
485  
486  	/**
487  	 * disable - Disable BPF scheduling for a task
488  	 * @p: task to disable BPF scheduling for
489  	 *
490  	 * @p is exiting, leaving SCX or the BPF scheduler is being unloaded.
491  	 * Disable BPF scheduling for @p. A disable() call is always matched
492  	 * with a prior enable() call.
493  	 */
494  	void (*disable)(struct task_struct *p);
495  
496  	/**
497  	 * dump - Dump BPF scheduler state on error
498  	 * @ctx: debug dump context
499  	 *
500  	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump.
501  	 */
502  	void (*dump)(struct scx_dump_ctx *ctx);
503  
504  	/**
505  	 * dump_cpu - Dump BPF scheduler state for a CPU on error
506  	 * @ctx: debug dump context
507  	 * @cpu: CPU to generate debug dump for
508  	 * @idle: @cpu is currently idle without any runnable tasks
509  	 *
510  	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
511  	 * @cpu. If @idle is %true and this operation doesn't produce any
512  	 * output, @cpu is skipped for dump.
513  	 */
514  	void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle);
515  
516  	/**
517  	 * dump_task - Dump BPF scheduler state for a runnable task on error
518  	 * @ctx: debug dump context
519  	 * @p: runnable task to generate debug dump for
520  	 *
521  	 * Use scx_bpf_dump() to generate BPF scheduler specific debug dump for
522  	 * @p.
523  	 */
524  	void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
525  
526  #ifdef CONFIG_EXT_GROUP_SCHED
527  	/**
528  	 * cgroup_init - Initialize a cgroup
529  	 * @cgrp: cgroup being initialized
530  	 * @args: init arguments, see the struct definition
531  	 *
532  	 * Either the BPF scheduler is being loaded or @cgrp created, initialize
533  	 * @cgrp for sched_ext. This operation may block.
534  	 *
535  	 * Return 0 for success, -errno for failure. An error return while
536  	 * loading will abort loading of the BPF scheduler. During cgroup
537  	 * creation, it will abort the specific cgroup creation.
538  	 */
539  	s32 (*cgroup_init)(struct cgroup *cgrp,
540  			   struct scx_cgroup_init_args *args);
541  
542  	/**
543  	 * cgroup_exit - Exit a cgroup
544  	 * @cgrp: cgroup being exited
545  	 *
546  	 * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit
547  	 * @cgrp for sched_ext. This operation my block.
548  	 */
549  	void (*cgroup_exit)(struct cgroup *cgrp);
550  
551  	/**
552  	 * cgroup_prep_move - Prepare a task to be moved to a different cgroup
553  	 * @p: task being moved
554  	 * @from: cgroup @p is being moved from
555  	 * @to: cgroup @p is being moved to
556  	 *
557  	 * Prepare @p for move from cgroup @from to @to. This operation may
558  	 * block and can be used for allocations.
559  	 *
560  	 * Return 0 for success, -errno for failure. An error return aborts the
561  	 * migration.
562  	 */
563  	s32 (*cgroup_prep_move)(struct task_struct *p,
564  				struct cgroup *from, struct cgroup *to);
565  
566  	/**
567  	 * cgroup_move - Commit cgroup move
568  	 * @p: task being moved
569  	 * @from: cgroup @p is being moved from
570  	 * @to: cgroup @p is being moved to
571  	 *
572  	 * Commit the move. @p is dequeued during this operation.
573  	 */
574  	void (*cgroup_move)(struct task_struct *p,
575  			    struct cgroup *from, struct cgroup *to);
576  
577  	/**
578  	 * cgroup_cancel_move - Cancel cgroup move
579  	 * @p: task whose cgroup move is being canceled
580  	 * @from: cgroup @p was being moved from
581  	 * @to: cgroup @p was being moved to
582  	 *
583  	 * @p was cgroup_prep_move()'d but failed before reaching cgroup_move().
584  	 * Undo the preparation.
585  	 */
586  	void (*cgroup_cancel_move)(struct task_struct *p,
587  				   struct cgroup *from, struct cgroup *to);
588  
589  	/**
590  	 * cgroup_set_weight - A cgroup's weight is being changed
591  	 * @cgrp: cgroup whose weight is being updated
592  	 * @weight: new weight [1..10000]
593  	 *
594  	 * Update @tg's weight to @weight.
595  	 */
596  	void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight);
597  #endif	/* CONFIG_CGROUPS */
598  
599  	/*
600  	 * All online ops must come before ops.cpu_online().
601  	 */
602  
603  	/**
604  	 * cpu_online - A CPU became online
605  	 * @cpu: CPU which just came up
606  	 *
607  	 * @cpu just came online. @cpu will not call ops.enqueue() or
608  	 * ops.dispatch(), nor run tasks associated with other CPUs beforehand.
609  	 */
610  	void (*cpu_online)(s32 cpu);
611  
612  	/**
613  	 * cpu_offline - A CPU is going offline
614  	 * @cpu: CPU which is going offline
615  	 *
616  	 * @cpu is going offline. @cpu will not call ops.enqueue() or
617  	 * ops.dispatch(), nor run tasks associated with other CPUs afterwards.
618  	 */
619  	void (*cpu_offline)(s32 cpu);
620  
621  	/*
622  	 * All CPU hotplug ops must come before ops.init().
623  	 */
624  
625  	/**
626  	 * init - Initialize the BPF scheduler
627  	 */
628  	s32 (*init)(void);
629  
630  	/**
631  	 * exit - Clean up after the BPF scheduler
632  	 * @info: Exit info
633  	 *
634  	 * ops.exit() is also called on ops.init() failure, which is a bit
635  	 * unusual. This is to allow rich reporting through @info on how
636  	 * ops.init() failed.
637  	 */
638  	void (*exit)(struct scx_exit_info *info);
639  
640  	/**
641  	 * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch
642  	 */
643  	u32 dispatch_max_batch;
644  
645  	/**
646  	 * flags - %SCX_OPS_* flags
647  	 */
648  	u64 flags;
649  
650  	/**
651  	 * timeout_ms - The maximum amount of time, in milliseconds, that a
652  	 * runnable task should be able to wait before being scheduled. The
653  	 * maximum timeout may not exceed the default timeout of 30 seconds.
654  	 *
655  	 * Defaults to the maximum allowed timeout value of 30 seconds.
656  	 */
657  	u32 timeout_ms;
658  
659  	/**
660  	 * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default
661  	 * value of 32768 is used.
662  	 */
663  	u32 exit_dump_len;
664  
665  	/**
666  	 * hotplug_seq - A sequence number that may be set by the scheduler to
667  	 * detect when a hotplug event has occurred during the loading process.
668  	 * If 0, no detection occurs. Otherwise, the scheduler will fail to
669  	 * load if the sequence number does not match @scx_hotplug_seq on the
670  	 * enable path.
671  	 */
672  	u64 hotplug_seq;
673  
674  	/**
675  	 * name - BPF scheduler's name
676  	 *
677  	 * Must be a non-zero valid BPF object name including only isalnum(),
678  	 * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the
679  	 * BPF scheduler is enabled.
680  	 */
681  	char name[SCX_OPS_NAME_LEN];
682  };
683  
684  enum scx_opi {
685  	SCX_OPI_BEGIN			= 0,
686  	SCX_OPI_NORMAL_BEGIN		= 0,
687  	SCX_OPI_NORMAL_END		= SCX_OP_IDX(cpu_online),
688  	SCX_OPI_CPU_HOTPLUG_BEGIN	= SCX_OP_IDX(cpu_online),
689  	SCX_OPI_CPU_HOTPLUG_END		= SCX_OP_IDX(init),
690  	SCX_OPI_END			= SCX_OP_IDX(init),
691  };
692  
693  enum scx_wake_flags {
694  	/* expose select WF_* flags as enums */
695  	SCX_WAKE_FORK		= WF_FORK,
696  	SCX_WAKE_TTWU		= WF_TTWU,
697  	SCX_WAKE_SYNC		= WF_SYNC,
698  };
699  
700  enum scx_enq_flags {
701  	/* expose select ENQUEUE_* flags as enums */
702  	SCX_ENQ_WAKEUP		= ENQUEUE_WAKEUP,
703  	SCX_ENQ_HEAD		= ENQUEUE_HEAD,
704  	SCX_ENQ_CPU_SELECTED	= ENQUEUE_RQ_SELECTED,
705  
706  	/* high 32bits are SCX specific */
707  
708  	/*
709  	 * Set the following to trigger preemption when calling
710  	 * scx_bpf_dispatch() with a local dsq as the target. The slice of the
711  	 * current task is cleared to zero and the CPU is kicked into the
712  	 * scheduling path. Implies %SCX_ENQ_HEAD.
713  	 */
714  	SCX_ENQ_PREEMPT		= 1LLU << 32,
715  
716  	/*
717  	 * The task being enqueued was previously enqueued on the current CPU's
718  	 * %SCX_DSQ_LOCAL, but was removed from it in a call to the
719  	 * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was
720  	 * invoked in a ->cpu_release() callback, and the task is again
721  	 * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the
722  	 * task will not be scheduled on the CPU until at least the next invocation
723  	 * of the ->cpu_acquire() callback.
724  	 */
725  	SCX_ENQ_REENQ		= 1LLU << 40,
726  
727  	/*
728  	 * The task being enqueued is the only task available for the cpu. By
729  	 * default, ext core keeps executing such tasks but when
730  	 * %SCX_OPS_ENQ_LAST is specified, they're ops.enqueue()'d with the
731  	 * %SCX_ENQ_LAST flag set.
732  	 *
733  	 * The BPF scheduler is responsible for triggering a follow-up
734  	 * scheduling event. Otherwise, Execution may stall.
735  	 */
736  	SCX_ENQ_LAST		= 1LLU << 41,
737  
738  	/* high 8 bits are internal */
739  	__SCX_ENQ_INTERNAL_MASK	= 0xffLLU << 56,
740  
741  	SCX_ENQ_CLEAR_OPSS	= 1LLU << 56,
742  	SCX_ENQ_DSQ_PRIQ	= 1LLU << 57,
743  };
744  
745  enum scx_deq_flags {
746  	/* expose select DEQUEUE_* flags as enums */
747  	SCX_DEQ_SLEEP		= DEQUEUE_SLEEP,
748  
749  	/* high 32bits are SCX specific */
750  
751  	/*
752  	 * The generic core-sched layer decided to execute the task even though
753  	 * it hasn't been dispatched yet. Dequeue from the BPF side.
754  	 */
755  	SCX_DEQ_CORE_SCHED_EXEC	= 1LLU << 32,
756  };
757  
758  enum scx_pick_idle_cpu_flags {
759  	SCX_PICK_IDLE_CORE	= 1LLU << 0,	/* pick a CPU whose SMT siblings are also idle */
760  };
761  
762  enum scx_kick_flags {
763  	/*
764  	 * Kick the target CPU if idle. Guarantees that the target CPU goes
765  	 * through at least one full scheduling cycle before going idle. If the
766  	 * target CPU can be determined to be currently not idle and going to go
767  	 * through a scheduling cycle before going idle, noop.
768  	 */
769  	SCX_KICK_IDLE		= 1LLU << 0,
770  
771  	/*
772  	 * Preempt the current task and execute the dispatch path. If the
773  	 * current task of the target CPU is an SCX task, its ->scx.slice is
774  	 * cleared to zero before the scheduling path is invoked so that the
775  	 * task expires and the dispatch path is invoked.
776  	 */
777  	SCX_KICK_PREEMPT	= 1LLU << 1,
778  
779  	/*
780  	 * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will
781  	 * return after the target CPU finishes picking the next task.
782  	 */
783  	SCX_KICK_WAIT		= 1LLU << 2,
784  };
785  
786  enum scx_tg_flags {
787  	SCX_TG_ONLINE		= 1U << 0,
788  	SCX_TG_INITED		= 1U << 1,
789  };
790  
791  enum scx_ops_enable_state {
792  	SCX_OPS_ENABLING,
793  	SCX_OPS_ENABLED,
794  	SCX_OPS_DISABLING,
795  	SCX_OPS_DISABLED,
796  };
797  
798  static const char *scx_ops_enable_state_str[] = {
799  	[SCX_OPS_ENABLING]	= "enabling",
800  	[SCX_OPS_ENABLED]	= "enabled",
801  	[SCX_OPS_DISABLING]	= "disabling",
802  	[SCX_OPS_DISABLED]	= "disabled",
803  };
804  
805  /*
806   * sched_ext_entity->ops_state
807   *
808   * Used to track the task ownership between the SCX core and the BPF scheduler.
809   * State transitions look as follows:
810   *
811   * NONE -> QUEUEING -> QUEUED -> DISPATCHING
812   *   ^              |                 |
813   *   |              v                 v
814   *   \-------------------------------/
815   *
816   * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
817   * sites for explanations on the conditions being waited upon and why they are
818   * safe. Transitions out of them into NONE or QUEUED must store_release and the
819   * waiters should load_acquire.
820   *
821   * Tracking scx_ops_state enables sched_ext core to reliably determine whether
822   * any given task can be dispatched by the BPF scheduler at all times and thus
823   * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
824   * to try to dispatch any task anytime regardless of its state as the SCX core
825   * can safely reject invalid dispatches.
826   */
827  enum scx_ops_state {
828  	SCX_OPSS_NONE,		/* owned by the SCX core */
829  	SCX_OPSS_QUEUEING,	/* in transit to the BPF scheduler */
830  	SCX_OPSS_QUEUED,	/* owned by the BPF scheduler */
831  	SCX_OPSS_DISPATCHING,	/* in transit back to the SCX core */
832  
833  	/*
834  	 * QSEQ brands each QUEUED instance so that, when dispatch races
835  	 * dequeue/requeue, the dispatcher can tell whether it still has a claim
836  	 * on the task being dispatched.
837  	 *
838  	 * As some 32bit archs can't do 64bit store_release/load_acquire,
839  	 * p->scx.ops_state is atomic_long_t which leaves 30 bits for QSEQ on
840  	 * 32bit machines. The dispatch race window QSEQ protects is very narrow
841  	 * and runs with IRQ disabled. 30 bits should be sufficient.
842  	 */
843  	SCX_OPSS_QSEQ_SHIFT	= 2,
844  };
845  
846  /* Use macros to ensure that the type is unsigned long for the masks */
847  #define SCX_OPSS_STATE_MASK	((1LU << SCX_OPSS_QSEQ_SHIFT) - 1)
848  #define SCX_OPSS_QSEQ_MASK	(~SCX_OPSS_STATE_MASK)
849  
850  /*
851   * During exit, a task may schedule after losing its PIDs. When disabling the
852   * BPF scheduler, we need to be able to iterate tasks in every state to
853   * guarantee system safety. Maintain a dedicated task list which contains every
854   * task between its fork and eventual free.
855   */
856  static DEFINE_SPINLOCK(scx_tasks_lock);
857  static LIST_HEAD(scx_tasks);
858  
859  /* ops enable/disable */
860  static struct kthread_worker *scx_ops_helper;
861  static DEFINE_MUTEX(scx_ops_enable_mutex);
862  DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
863  DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
864  static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
865  static int scx_ops_bypass_depth;
866  static DEFINE_RAW_SPINLOCK(__scx_ops_bypass_lock);
867  static bool scx_ops_init_task_enabled;
868  static bool scx_switching_all;
869  DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
870  
871  static struct sched_ext_ops scx_ops;
872  static bool scx_warned_zero_slice;
873  
874  static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
875  static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
876  static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
877  static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
878  
879  static struct static_key_false scx_has_op[SCX_OPI_END] =
880  	{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
881  
882  static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
883  static struct scx_exit_info *scx_exit_info;
884  
885  static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
886  static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
887  
888  /*
889   * A monotically increasing sequence number that is incremented every time a
890   * scheduler is enabled. This can be used by to check if any custom sched_ext
891   * scheduler has ever been used in the system.
892   */
893  static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
894  
895  /*
896   * The maximum amount of time in jiffies that a task may be runnable without
897   * being scheduled on a CPU. If this timeout is exceeded, it will trigger
898   * scx_ops_error().
899   */
900  static unsigned long scx_watchdog_timeout;
901  
902  /*
903   * The last time the delayed work was run. This delayed work relies on
904   * ksoftirqd being able to run to service timer interrupts, so it's possible
905   * that this work itself could get wedged. To account for this, we check that
906   * it's not stalled in the timer tick, and trigger an error if it is.
907   */
908  static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
909  
910  static struct delayed_work scx_watchdog_work;
911  
912  /* idle tracking */
913  #ifdef CONFIG_SMP
914  #ifdef CONFIG_CPUMASK_OFFSTACK
915  #define CL_ALIGNED_IF_ONSTACK
916  #else
917  #define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
918  #endif
919  
920  static struct {
921  	cpumask_var_t cpu;
922  	cpumask_var_t smt;
923  } idle_masks CL_ALIGNED_IF_ONSTACK;
924  
925  #endif	/* CONFIG_SMP */
926  
927  /* for %SCX_KICK_WAIT */
928  static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
929  
930  /*
931   * Direct dispatch marker.
932   *
933   * Non-NULL values are used for direct dispatch from enqueue path. A valid
934   * pointer points to the task currently being enqueued. An ERR_PTR value is used
935   * to indicate that direct dispatch has already happened.
936   */
937  static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
938  
939  /*
940   * Dispatch queues.
941   *
942   * The global DSQ (%SCX_DSQ_GLOBAL) is split per-node for scalability. This is
943   * to avoid live-locking in bypass mode where all tasks are dispatched to
944   * %SCX_DSQ_GLOBAL and all CPUs consume from it. If per-node split isn't
945   * sufficient, it can be further split.
946   */
947  static struct scx_dispatch_q **global_dsqs;
948  
949  static const struct rhashtable_params dsq_hash_params = {
950  	.key_len		= 8,
951  	.key_offset		= offsetof(struct scx_dispatch_q, id),
952  	.head_offset		= offsetof(struct scx_dispatch_q, hash_node),
953  };
954  
955  static struct rhashtable dsq_hash;
956  static LLIST_HEAD(dsqs_to_free);
957  
958  /* dispatch buf */
959  struct scx_dsp_buf_ent {
960  	struct task_struct	*task;
961  	unsigned long		qseq;
962  	u64			dsq_id;
963  	u64			enq_flags;
964  };
965  
966  static u32 scx_dsp_max_batch;
967  
968  struct scx_dsp_ctx {
969  	struct rq		*rq;
970  	u32			cursor;
971  	u32			nr_tasks;
972  	struct scx_dsp_buf_ent	buf[];
973  };
974  
975  static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
976  
977  /* string formatting from BPF */
978  struct scx_bstr_buf {
979  	u64			data[MAX_BPRINTF_VARARGS];
980  	char			line[SCX_EXIT_MSG_LEN];
981  };
982  
983  static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
984  static struct scx_bstr_buf scx_exit_bstr_buf;
985  
986  /* ops debug dump */
987  struct scx_dump_data {
988  	s32			cpu;
989  	bool			first;
990  	s32			cursor;
991  	struct seq_buf		*s;
992  	const char		*prefix;
993  	struct scx_bstr_buf	buf;
994  };
995  
996  static struct scx_dump_data scx_dump_data = {
997  	.cpu			= -1,
998  };
999  
1000  /* /sys/kernel/sched_ext interface */
1001  static struct kset *scx_kset;
1002  static struct kobject *scx_root_kobj;
1003  
1004  #define CREATE_TRACE_POINTS
1005  #include <trace/events/sched_ext.h>
1006  
1007  static void process_ddsp_deferred_locals(struct rq *rq);
1008  static void scx_bpf_kick_cpu(s32 cpu, u64 flags);
1009  static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
1010  					     s64 exit_code,
1011  					     const char *fmt, ...);
1012  
1013  #define scx_ops_error_kind(err, fmt, args...)					\
1014  	scx_ops_exit_kind((err), 0, fmt, ##args)
1015  
1016  #define scx_ops_exit(code, fmt, args...)					\
1017  	scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)
1018  
1019  #define scx_ops_error(fmt, args...)						\
1020  	scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
1021  
1022  #define SCX_HAS_OP(op)	static_branch_likely(&scx_has_op[SCX_OP_IDX(op)])
1023  
jiffies_delta_msecs(unsigned long at,unsigned long now)1024  static long jiffies_delta_msecs(unsigned long at, unsigned long now)
1025  {
1026  	if (time_after(at, now))
1027  		return jiffies_to_msecs(at - now);
1028  	else
1029  		return -(long)jiffies_to_msecs(now - at);
1030  }
1031  
1032  /* if the highest set bit is N, return a mask with bits [N+1, 31] set */
higher_bits(u32 flags)1033  static u32 higher_bits(u32 flags)
1034  {
1035  	return ~((1 << fls(flags)) - 1);
1036  }
1037  
1038  /* return the mask with only the highest bit set */
highest_bit(u32 flags)1039  static u32 highest_bit(u32 flags)
1040  {
1041  	int bit = fls(flags);
1042  	return ((u64)1 << bit) >> 1;
1043  }
1044  
u32_before(u32 a,u32 b)1045  static bool u32_before(u32 a, u32 b)
1046  {
1047  	return (s32)(a - b) < 0;
1048  }
1049  
find_global_dsq(struct task_struct * p)1050  static struct scx_dispatch_q *find_global_dsq(struct task_struct *p)
1051  {
1052  	return global_dsqs[cpu_to_node(task_cpu(p))];
1053  }
1054  
find_user_dsq(u64 dsq_id)1055  static struct scx_dispatch_q *find_user_dsq(u64 dsq_id)
1056  {
1057  	return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
1058  }
1059  
1060  /*
1061   * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
1062   * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
1063   * the allowed kfuncs and those kfuncs should use scx_kf_allowed() to check
1064   * whether it's running from an allowed context.
1065   *
1066   * @mask is constant, always inline to cull the mask calculations.
1067   */
scx_kf_allow(u32 mask)1068  static __always_inline void scx_kf_allow(u32 mask)
1069  {
1070  	/* nesting is allowed only in increasing scx_kf_mask order */
1071  	WARN_ONCE((mask | higher_bits(mask)) & current->scx.kf_mask,
1072  		  "invalid nesting current->scx.kf_mask=0x%x mask=0x%x\n",
1073  		  current->scx.kf_mask, mask);
1074  	current->scx.kf_mask |= mask;
1075  	barrier();
1076  }
1077  
scx_kf_disallow(u32 mask)1078  static void scx_kf_disallow(u32 mask)
1079  {
1080  	barrier();
1081  	current->scx.kf_mask &= ~mask;
1082  }
1083  
1084  #define SCX_CALL_OP(mask, op, args...)						\
1085  do {										\
1086  	if (mask) {								\
1087  		scx_kf_allow(mask);						\
1088  		scx_ops.op(args);						\
1089  		scx_kf_disallow(mask);						\
1090  	} else {								\
1091  		scx_ops.op(args);						\
1092  	}									\
1093  } while (0)
1094  
1095  #define SCX_CALL_OP_RET(mask, op, args...)					\
1096  ({										\
1097  	__typeof__(scx_ops.op(args)) __ret;					\
1098  	if (mask) {								\
1099  		scx_kf_allow(mask);						\
1100  		__ret = scx_ops.op(args);					\
1101  		scx_kf_disallow(mask);						\
1102  	} else {								\
1103  		__ret = scx_ops.op(args);					\
1104  	}									\
1105  	__ret;									\
1106  })
1107  
1108  /*
1109   * Some kfuncs are allowed only on the tasks that are subjects of the
1110   * in-progress scx_ops operation for, e.g., locking guarantees. To enforce such
1111   * restrictions, the following SCX_CALL_OP_*() variants should be used when
1112   * invoking scx_ops operations that take task arguments. These can only be used
1113   * for non-nesting operations due to the way the tasks are tracked.
1114   *
1115   * kfuncs which can only operate on such tasks can in turn use
1116   * scx_kf_allowed_on_arg_tasks() to test whether the invocation is allowed on
1117   * the specific task.
1118   */
1119  #define SCX_CALL_OP_TASK(mask, op, task, args...)				\
1120  do {										\
1121  	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1122  	current->scx.kf_tasks[0] = task;					\
1123  	SCX_CALL_OP(mask, op, task, ##args);					\
1124  	current->scx.kf_tasks[0] = NULL;					\
1125  } while (0)
1126  
1127  #define SCX_CALL_OP_TASK_RET(mask, op, task, args...)				\
1128  ({										\
1129  	__typeof__(scx_ops.op(task, ##args)) __ret;				\
1130  	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1131  	current->scx.kf_tasks[0] = task;					\
1132  	__ret = SCX_CALL_OP_RET(mask, op, task, ##args);			\
1133  	current->scx.kf_tasks[0] = NULL;					\
1134  	__ret;									\
1135  })
1136  
1137  #define SCX_CALL_OP_2TASKS_RET(mask, op, task0, task1, args...)			\
1138  ({										\
1139  	__typeof__(scx_ops.op(task0, task1, ##args)) __ret;			\
1140  	BUILD_BUG_ON((mask) & ~__SCX_KF_TERMINAL);				\
1141  	current->scx.kf_tasks[0] = task0;					\
1142  	current->scx.kf_tasks[1] = task1;					\
1143  	__ret = SCX_CALL_OP_RET(mask, op, task0, task1, ##args);		\
1144  	current->scx.kf_tasks[0] = NULL;					\
1145  	current->scx.kf_tasks[1] = NULL;					\
1146  	__ret;									\
1147  })
1148  
1149  /* @mask is constant, always inline to cull unnecessary branches */
scx_kf_allowed(u32 mask)1150  static __always_inline bool scx_kf_allowed(u32 mask)
1151  {
1152  	if (unlikely(!(current->scx.kf_mask & mask))) {
1153  		scx_ops_error("kfunc with mask 0x%x called from an operation only allowing 0x%x",
1154  			      mask, current->scx.kf_mask);
1155  		return false;
1156  	}
1157  
1158  	/*
1159  	 * Enforce nesting boundaries. e.g. A kfunc which can be called from
1160  	 * DISPATCH must not be called if we're running DEQUEUE which is nested
1161  	 * inside ops.dispatch(). We don't need to check boundaries for any
1162  	 * blocking kfuncs as the verifier ensures they're only called from
1163  	 * sleepable progs.
1164  	 */
1165  	if (unlikely(highest_bit(mask) == SCX_KF_CPU_RELEASE &&
1166  		     (current->scx.kf_mask & higher_bits(SCX_KF_CPU_RELEASE)))) {
1167  		scx_ops_error("cpu_release kfunc called from a nested operation");
1168  		return false;
1169  	}
1170  
1171  	if (unlikely(highest_bit(mask) == SCX_KF_DISPATCH &&
1172  		     (current->scx.kf_mask & higher_bits(SCX_KF_DISPATCH)))) {
1173  		scx_ops_error("dispatch kfunc called from a nested operation");
1174  		return false;
1175  	}
1176  
1177  	return true;
1178  }
1179  
1180  /* see SCX_CALL_OP_TASK() */
scx_kf_allowed_on_arg_tasks(u32 mask,struct task_struct * p)1181  static __always_inline bool scx_kf_allowed_on_arg_tasks(u32 mask,
1182  							struct task_struct *p)
1183  {
1184  	if (!scx_kf_allowed(mask))
1185  		return false;
1186  
1187  	if (unlikely((p != current->scx.kf_tasks[0] &&
1188  		      p != current->scx.kf_tasks[1]))) {
1189  		scx_ops_error("called on a task not being operated on");
1190  		return false;
1191  	}
1192  
1193  	return true;
1194  }
1195  
scx_kf_allowed_if_unlocked(void)1196  static bool scx_kf_allowed_if_unlocked(void)
1197  {
1198  	return !current->scx.kf_mask;
1199  }
1200  
1201  /**
1202   * nldsq_next_task - Iterate to the next task in a non-local DSQ
1203   * @dsq: user dsq being interated
1204   * @cur: current position, %NULL to start iteration
1205   * @rev: walk backwards
1206   *
1207   * Returns %NULL when iteration is finished.
1208   */
nldsq_next_task(struct scx_dispatch_q * dsq,struct task_struct * cur,bool rev)1209  static struct task_struct *nldsq_next_task(struct scx_dispatch_q *dsq,
1210  					   struct task_struct *cur, bool rev)
1211  {
1212  	struct list_head *list_node;
1213  	struct scx_dsq_list_node *dsq_lnode;
1214  
1215  	lockdep_assert_held(&dsq->lock);
1216  
1217  	if (cur)
1218  		list_node = &cur->scx.dsq_list.node;
1219  	else
1220  		list_node = &dsq->list;
1221  
1222  	/* find the next task, need to skip BPF iteration cursors */
1223  	do {
1224  		if (rev)
1225  			list_node = list_node->prev;
1226  		else
1227  			list_node = list_node->next;
1228  
1229  		if (list_node == &dsq->list)
1230  			return NULL;
1231  
1232  		dsq_lnode = container_of(list_node, struct scx_dsq_list_node,
1233  					 node);
1234  	} while (dsq_lnode->flags & SCX_DSQ_LNODE_ITER_CURSOR);
1235  
1236  	return container_of(dsq_lnode, struct task_struct, scx.dsq_list);
1237  }
1238  
1239  #define nldsq_for_each_task(p, dsq)						\
1240  	for ((p) = nldsq_next_task((dsq), NULL, false); (p);			\
1241  	     (p) = nldsq_next_task((dsq), (p), false))
1242  
1243  
1244  /*
1245   * BPF DSQ iterator. Tasks in a non-local DSQ can be iterated in [reverse]
1246   * dispatch order. BPF-visible iterator is opaque and larger to allow future
1247   * changes without breaking backward compatibility. Can be used with
1248   * bpf_for_each(). See bpf_iter_scx_dsq_*().
1249   */
1250  enum scx_dsq_iter_flags {
1251  	/* iterate in the reverse dispatch order */
1252  	SCX_DSQ_ITER_REV		= 1U << 16,
1253  
1254  	__SCX_DSQ_ITER_HAS_SLICE	= 1U << 30,
1255  	__SCX_DSQ_ITER_HAS_VTIME	= 1U << 31,
1256  
1257  	__SCX_DSQ_ITER_USER_FLAGS	= SCX_DSQ_ITER_REV,
1258  	__SCX_DSQ_ITER_ALL_FLAGS	= __SCX_DSQ_ITER_USER_FLAGS |
1259  					  __SCX_DSQ_ITER_HAS_SLICE |
1260  					  __SCX_DSQ_ITER_HAS_VTIME,
1261  };
1262  
1263  struct bpf_iter_scx_dsq_kern {
1264  	struct scx_dsq_list_node	cursor;
1265  	struct scx_dispatch_q		*dsq;
1266  	u64				slice;
1267  	u64				vtime;
1268  } __attribute__((aligned(8)));
1269  
1270  struct bpf_iter_scx_dsq {
1271  	u64				__opaque[6];
1272  } __attribute__((aligned(8)));
1273  
1274  
1275  /*
1276   * SCX task iterator.
1277   */
1278  struct scx_task_iter {
1279  	struct sched_ext_entity		cursor;
1280  	struct task_struct		*locked;
1281  	struct rq			*rq;
1282  	struct rq_flags			rf;
1283  	u32				cnt;
1284  };
1285  
1286  /**
1287   * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
1288   * @iter: iterator to init
1289   *
1290   * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1291   * must eventually be stopped with scx_task_iter_stop().
1292   *
1293   * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1294   * between this and the first next() call or between any two next() calls. If
1295   * the locks are released between two next() calls, the caller is responsible
1296   * for ensuring that the task being iterated remains accessible either through
1297   * RCU read lock or obtaining a reference count.
1298   *
1299   * All tasks which existed when the iteration started are guaranteed to be
1300   * visited as long as they still exist.
1301   */
scx_task_iter_start(struct scx_task_iter * iter)1302  static void scx_task_iter_start(struct scx_task_iter *iter)
1303  {
1304  	BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
1305  		     ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
1306  
1307  	spin_lock_irq(&scx_tasks_lock);
1308  
1309  	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
1310  	list_add(&iter->cursor.tasks_node, &scx_tasks);
1311  	iter->locked = NULL;
1312  	iter->cnt = 0;
1313  }
1314  
__scx_task_iter_rq_unlock(struct scx_task_iter * iter)1315  static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
1316  {
1317  	if (iter->locked) {
1318  		task_rq_unlock(iter->rq, iter->locked, &iter->rf);
1319  		iter->locked = NULL;
1320  	}
1321  }
1322  
1323  /**
1324   * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1325   * @iter: iterator to unlock
1326   *
1327   * If @iter is in the middle of a locked iteration, it may be locking the rq of
1328   * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1329   * This function can be safely called anytime during an iteration.
1330   */
scx_task_iter_unlock(struct scx_task_iter * iter)1331  static void scx_task_iter_unlock(struct scx_task_iter *iter)
1332  {
1333  	__scx_task_iter_rq_unlock(iter);
1334  	spin_unlock_irq(&scx_tasks_lock);
1335  }
1336  
1337  /**
1338   * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1339   * @iter: iterator to re-lock
1340   *
1341   * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1342   * doesn't re-lock the rq lock. Must be called before other iterator operations.
1343   */
scx_task_iter_relock(struct scx_task_iter * iter)1344  static void scx_task_iter_relock(struct scx_task_iter *iter)
1345  {
1346  	spin_lock_irq(&scx_tasks_lock);
1347  }
1348  
1349  /**
1350   * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
1351   * @iter: iterator to exit
1352   *
1353   * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1354   * which is released on return. If the iterator holds a task's rq lock, that rq
1355   * lock is also released. See scx_task_iter_start() for details.
1356   */
scx_task_iter_stop(struct scx_task_iter * iter)1357  static void scx_task_iter_stop(struct scx_task_iter *iter)
1358  {
1359  	list_del_init(&iter->cursor.tasks_node);
1360  	scx_task_iter_unlock(iter);
1361  }
1362  
1363  /**
1364   * scx_task_iter_next - Next task
1365   * @iter: iterator to walk
1366   *
1367   * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
1368   * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
1369   * stalls by holding scx_tasks_lock for too long.
1370   */
scx_task_iter_next(struct scx_task_iter * iter)1371  static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
1372  {
1373  	struct list_head *cursor = &iter->cursor.tasks_node;
1374  	struct sched_ext_entity *pos;
1375  
1376  	if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
1377  		scx_task_iter_unlock(iter);
1378  		cond_resched();
1379  		scx_task_iter_relock(iter);
1380  	}
1381  
1382  	list_for_each_entry(pos, cursor, tasks_node) {
1383  		if (&pos->tasks_node == &scx_tasks)
1384  			return NULL;
1385  		if (!(pos->flags & SCX_TASK_CURSOR)) {
1386  			list_move(cursor, &pos->tasks_node);
1387  			return container_of(pos, struct task_struct, scx);
1388  		}
1389  	}
1390  
1391  	/* can't happen, should always terminate at scx_tasks above */
1392  	BUG();
1393  }
1394  
1395  /**
1396   * scx_task_iter_next_locked - Next non-idle task with its rq locked
1397   * @iter: iterator to walk
1398   * @include_dead: Whether we should include dead tasks in the iteration
1399   *
1400   * Visit the non-idle task with its rq lock held. Allows callers to specify
1401   * whether they would like to filter out dead tasks. See scx_task_iter_start()
1402   * for details.
1403   */
scx_task_iter_next_locked(struct scx_task_iter * iter)1404  static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
1405  {
1406  	struct task_struct *p;
1407  
1408  	__scx_task_iter_rq_unlock(iter);
1409  
1410  	while ((p = scx_task_iter_next(iter))) {
1411  		/*
1412  		 * scx_task_iter is used to prepare and move tasks into SCX
1413  		 * while loading the BPF scheduler and vice-versa while
1414  		 * unloading. The init_tasks ("swappers") should be excluded
1415  		 * from the iteration because:
1416  		 *
1417  		 * - It's unsafe to use __setschduler_prio() on an init_task to
1418  		 *   determine the sched_class to use as it won't preserve its
1419  		 *   idle_sched_class.
1420  		 *
1421  		 * - ops.init/exit_task() can easily be confused if called with
1422  		 *   init_tasks as they, e.g., share PID 0.
1423  		 *
1424  		 * As init_tasks are never scheduled through SCX, they can be
1425  		 * skipped safely. Note that is_idle_task() which tests %PF_IDLE
1426  		 * doesn't work here:
1427  		 *
1428  		 * - %PF_IDLE may not be set for an init_task whose CPU hasn't
1429  		 *   yet been onlined.
1430  		 *
1431  		 * - %PF_IDLE can be set on tasks that are not init_tasks. See
1432  		 *   play_idle_precise() used by CONFIG_IDLE_INJECT.
1433  		 *
1434  		 * Test for idle_sched_class as only init_tasks are on it.
1435  		 */
1436  		if (p->sched_class != &idle_sched_class)
1437  			break;
1438  	}
1439  	if (!p)
1440  		return NULL;
1441  
1442  	iter->rq = task_rq_lock(p, &iter->rf);
1443  	iter->locked = p;
1444  
1445  	return p;
1446  }
1447  
scx_ops_enable_state(void)1448  static enum scx_ops_enable_state scx_ops_enable_state(void)
1449  {
1450  	return atomic_read(&scx_ops_enable_state_var);
1451  }
1452  
1453  static enum scx_ops_enable_state
scx_ops_set_enable_state(enum scx_ops_enable_state to)1454  scx_ops_set_enable_state(enum scx_ops_enable_state to)
1455  {
1456  	return atomic_xchg(&scx_ops_enable_state_var, to);
1457  }
1458  
scx_ops_tryset_enable_state(enum scx_ops_enable_state to,enum scx_ops_enable_state from)1459  static bool scx_ops_tryset_enable_state(enum scx_ops_enable_state to,
1460  					enum scx_ops_enable_state from)
1461  {
1462  	int from_v = from;
1463  
1464  	return atomic_try_cmpxchg(&scx_ops_enable_state_var, &from_v, to);
1465  }
1466  
scx_rq_bypassing(struct rq * rq)1467  static bool scx_rq_bypassing(struct rq *rq)
1468  {
1469  	return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
1470  }
1471  
1472  /**
1473   * wait_ops_state - Busy-wait the specified ops state to end
1474   * @p: target task
1475   * @opss: state to wait the end of
1476   *
1477   * Busy-wait for @p to transition out of @opss. This can only be used when the
1478   * state part of @opss is %SCX_QUEUEING or %SCX_DISPATCHING. This function also
1479   * has load_acquire semantics to ensure that the caller can see the updates made
1480   * in the enqueueing and dispatching paths.
1481   */
wait_ops_state(struct task_struct * p,unsigned long opss)1482  static void wait_ops_state(struct task_struct *p, unsigned long opss)
1483  {
1484  	do {
1485  		cpu_relax();
1486  	} while (atomic_long_read_acquire(&p->scx.ops_state) == opss);
1487  }
1488  
1489  /**
1490   * ops_cpu_valid - Verify a cpu number
1491   * @cpu: cpu number which came from a BPF ops
1492   * @where: extra information reported on error
1493   *
1494   * @cpu is a cpu number which came from the BPF scheduler and can be any value.
1495   * Verify that it is in range and one of the possible cpus. If invalid, trigger
1496   * an ops error.
1497   */
ops_cpu_valid(s32 cpu,const char * where)1498  static bool ops_cpu_valid(s32 cpu, const char *where)
1499  {
1500  	if (likely(cpu >= 0 && cpu < nr_cpu_ids && cpu_possible(cpu))) {
1501  		return true;
1502  	} else {
1503  		scx_ops_error("invalid CPU %d%s%s", cpu,
1504  			      where ? " " : "", where ?: "");
1505  		return false;
1506  	}
1507  }
1508  
1509  /**
1510   * ops_sanitize_err - Sanitize a -errno value
1511   * @ops_name: operation to blame on failure
1512   * @err: -errno value to sanitize
1513   *
1514   * Verify @err is a valid -errno. If not, trigger scx_ops_error() and return
1515   * -%EPROTO. This is necessary because returning a rogue -errno up the chain can
1516   * cause misbehaviors. For an example, a large negative return from
1517   * ops.init_task() triggers an oops when passed up the call chain because the
1518   * value fails IS_ERR() test after being encoded with ERR_PTR() and then is
1519   * handled as a pointer.
1520   */
ops_sanitize_err(const char * ops_name,s32 err)1521  static int ops_sanitize_err(const char *ops_name, s32 err)
1522  {
1523  	if (err < 0 && err >= -MAX_ERRNO)
1524  		return err;
1525  
1526  	scx_ops_error("ops.%s() returned an invalid errno %d", ops_name, err);
1527  	return -EPROTO;
1528  }
1529  
run_deferred(struct rq * rq)1530  static void run_deferred(struct rq *rq)
1531  {
1532  	process_ddsp_deferred_locals(rq);
1533  }
1534  
1535  #ifdef CONFIG_SMP
deferred_bal_cb_workfn(struct rq * rq)1536  static void deferred_bal_cb_workfn(struct rq *rq)
1537  {
1538  	run_deferred(rq);
1539  }
1540  #endif
1541  
deferred_irq_workfn(struct irq_work * irq_work)1542  static void deferred_irq_workfn(struct irq_work *irq_work)
1543  {
1544  	struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
1545  
1546  	raw_spin_rq_lock(rq);
1547  	run_deferred(rq);
1548  	raw_spin_rq_unlock(rq);
1549  }
1550  
1551  /**
1552   * schedule_deferred - Schedule execution of deferred actions on an rq
1553   * @rq: target rq
1554   *
1555   * Schedule execution of deferred actions on @rq. Must be called with @rq
1556   * locked. Deferred actions are executed with @rq locked but unpinned, and thus
1557   * can unlock @rq to e.g. migrate tasks to other rqs.
1558   */
schedule_deferred(struct rq * rq)1559  static void schedule_deferred(struct rq *rq)
1560  {
1561  	lockdep_assert_rq_held(rq);
1562  
1563  #ifdef CONFIG_SMP
1564  	/*
1565  	 * If in the middle of waking up a task, task_woken_scx() will be called
1566  	 * afterwards which will then run the deferred actions, no need to
1567  	 * schedule anything.
1568  	 */
1569  	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
1570  		return;
1571  
1572  	/*
1573  	 * If in balance, the balance callbacks will be called before rq lock is
1574  	 * released. Schedule one.
1575  	 */
1576  	if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
1577  		queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
1578  				       deferred_bal_cb_workfn);
1579  		return;
1580  	}
1581  #endif
1582  	/*
1583  	 * No scheduler hooks available. Queue an irq work. They are executed on
1584  	 * IRQ re-enable which may take a bit longer than the scheduler hooks.
1585  	 * The above WAKEUP and BALANCE paths should cover most of the cases and
1586  	 * the time to IRQ re-enable shouldn't be long.
1587  	 */
1588  	irq_work_queue(&rq->scx.deferred_irq_work);
1589  }
1590  
1591  /**
1592   * touch_core_sched - Update timestamp used for core-sched task ordering
1593   * @rq: rq to read clock from, must be locked
1594   * @p: task to update the timestamp for
1595   *
1596   * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to
1597   * implement global or local-DSQ FIFO ordering for core-sched. Should be called
1598   * when a task becomes runnable and its turn on the CPU ends (e.g. slice
1599   * exhaustion).
1600   */
touch_core_sched(struct rq * rq,struct task_struct * p)1601  static void touch_core_sched(struct rq *rq, struct task_struct *p)
1602  {
1603  	lockdep_assert_rq_held(rq);
1604  
1605  #ifdef CONFIG_SCHED_CORE
1606  	/*
1607  	 * It's okay to update the timestamp spuriously. Use
1608  	 * sched_core_disabled() which is cheaper than enabled().
1609  	 *
1610  	 * As this is used to determine ordering between tasks of sibling CPUs,
1611  	 * it may be better to use per-core dispatch sequence instead.
1612  	 */
1613  	if (!sched_core_disabled())
1614  		p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
1615  #endif
1616  }
1617  
1618  /**
1619   * touch_core_sched_dispatch - Update core-sched timestamp on dispatch
1620   * @rq: rq to read clock from, must be locked
1621   * @p: task being dispatched
1622   *
1623   * If the BPF scheduler implements custom core-sched ordering via
1624   * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO
1625   * ordering within each local DSQ. This function is called from dispatch paths
1626   * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect.
1627   */
touch_core_sched_dispatch(struct rq * rq,struct task_struct * p)1628  static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
1629  {
1630  	lockdep_assert_rq_held(rq);
1631  
1632  #ifdef CONFIG_SCHED_CORE
1633  	if (SCX_HAS_OP(core_sched_before))
1634  		touch_core_sched(rq, p);
1635  #endif
1636  }
1637  
update_curr_scx(struct rq * rq)1638  static void update_curr_scx(struct rq *rq)
1639  {
1640  	struct task_struct *curr = rq->curr;
1641  	s64 delta_exec;
1642  
1643  	delta_exec = update_curr_common(rq);
1644  	if (unlikely(delta_exec <= 0))
1645  		return;
1646  
1647  	if (curr->scx.slice != SCX_SLICE_INF) {
1648  		curr->scx.slice -= min_t(u64, curr->scx.slice, delta_exec);
1649  		if (!curr->scx.slice)
1650  			touch_core_sched(rq, curr);
1651  	}
1652  }
1653  
scx_dsq_priq_less(struct rb_node * node_a,const struct rb_node * node_b)1654  static bool scx_dsq_priq_less(struct rb_node *node_a,
1655  			      const struct rb_node *node_b)
1656  {
1657  	const struct task_struct *a =
1658  		container_of(node_a, struct task_struct, scx.dsq_priq);
1659  	const struct task_struct *b =
1660  		container_of(node_b, struct task_struct, scx.dsq_priq);
1661  
1662  	return time_before64(a->scx.dsq_vtime, b->scx.dsq_vtime);
1663  }
1664  
dsq_mod_nr(struct scx_dispatch_q * dsq,s32 delta)1665  static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
1666  {
1667  	/* scx_bpf_dsq_nr_queued() reads ->nr without locking, use WRITE_ONCE() */
1668  	WRITE_ONCE(dsq->nr, dsq->nr + delta);
1669  }
1670  
dispatch_enqueue(struct scx_dispatch_q * dsq,struct task_struct * p,u64 enq_flags)1671  static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
1672  			     u64 enq_flags)
1673  {
1674  	bool is_local = dsq->id == SCX_DSQ_LOCAL;
1675  
1676  	WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1677  	WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) ||
1678  		     !RB_EMPTY_NODE(&p->scx.dsq_priq));
1679  
1680  	if (!is_local) {
1681  		raw_spin_lock(&dsq->lock);
1682  		if (unlikely(dsq->id == SCX_DSQ_INVALID)) {
1683  			scx_ops_error("attempting to dispatch to a destroyed dsq");
1684  			/* fall back to the global dsq */
1685  			raw_spin_unlock(&dsq->lock);
1686  			dsq = find_global_dsq(p);
1687  			raw_spin_lock(&dsq->lock);
1688  		}
1689  	}
1690  
1691  	if (unlikely((dsq->id & SCX_DSQ_FLAG_BUILTIN) &&
1692  		     (enq_flags & SCX_ENQ_DSQ_PRIQ))) {
1693  		/*
1694  		 * SCX_DSQ_LOCAL and SCX_DSQ_GLOBAL DSQs always consume from
1695  		 * their FIFO queues. To avoid confusion and accidentally
1696  		 * starving vtime-dispatched tasks by FIFO-dispatched tasks, we
1697  		 * disallow any internal DSQ from doing vtime ordering of
1698  		 * tasks.
1699  		 */
1700  		scx_ops_error("cannot use vtime ordering for built-in DSQs");
1701  		enq_flags &= ~SCX_ENQ_DSQ_PRIQ;
1702  	}
1703  
1704  	if (enq_flags & SCX_ENQ_DSQ_PRIQ) {
1705  		struct rb_node *rbp;
1706  
1707  		/*
1708  		 * A PRIQ DSQ shouldn't be using FIFO enqueueing. As tasks are
1709  		 * linked to both the rbtree and list on PRIQs, this can only be
1710  		 * tested easily when adding the first task.
1711  		 */
1712  		if (unlikely(RB_EMPTY_ROOT(&dsq->priq) &&
1713  			     nldsq_next_task(dsq, NULL, false)))
1714  			scx_ops_error("DSQ ID 0x%016llx already had FIFO-enqueued tasks",
1715  				      dsq->id);
1716  
1717  		p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ;
1718  		rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less);
1719  
1720  		/*
1721  		 * Find the previous task and insert after it on the list so
1722  		 * that @dsq->list is vtime ordered.
1723  		 */
1724  		rbp = rb_prev(&p->scx.dsq_priq);
1725  		if (rbp) {
1726  			struct task_struct *prev =
1727  				container_of(rbp, struct task_struct,
1728  					     scx.dsq_priq);
1729  			list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node);
1730  		} else {
1731  			list_add(&p->scx.dsq_list.node, &dsq->list);
1732  		}
1733  	} else {
1734  		/* a FIFO DSQ shouldn't be using PRIQ enqueuing */
1735  		if (unlikely(!RB_EMPTY_ROOT(&dsq->priq)))
1736  			scx_ops_error("DSQ ID 0x%016llx already had PRIQ-enqueued tasks",
1737  				      dsq->id);
1738  
1739  		if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
1740  			list_add(&p->scx.dsq_list.node, &dsq->list);
1741  		else
1742  			list_add_tail(&p->scx.dsq_list.node, &dsq->list);
1743  	}
1744  
1745  	/* seq records the order tasks are queued, used by BPF DSQ iterator */
1746  	dsq->seq++;
1747  	p->scx.dsq_seq = dsq->seq;
1748  
1749  	dsq_mod_nr(dsq, 1);
1750  	p->scx.dsq = dsq;
1751  
1752  	/*
1753  	 * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1754  	 * direct dispatch path, but we clear them here because the direct
1755  	 * dispatch verdict may be overridden on the enqueue path during e.g.
1756  	 * bypass.
1757  	 */
1758  	p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1759  	p->scx.ddsp_enq_flags = 0;
1760  
1761  	/*
1762  	 * We're transitioning out of QUEUEING or DISPATCHING. store_release to
1763  	 * match waiters' load_acquire.
1764  	 */
1765  	if (enq_flags & SCX_ENQ_CLEAR_OPSS)
1766  		atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1767  
1768  	if (is_local) {
1769  		struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
1770  		bool preempt = false;
1771  
1772  		if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
1773  		    rq->curr->sched_class == &ext_sched_class) {
1774  			rq->curr->scx.slice = 0;
1775  			preempt = true;
1776  		}
1777  
1778  		if (preempt || sched_class_above(&ext_sched_class,
1779  						 rq->curr->sched_class))
1780  			resched_curr(rq);
1781  	} else {
1782  		raw_spin_unlock(&dsq->lock);
1783  	}
1784  }
1785  
task_unlink_from_dsq(struct task_struct * p,struct scx_dispatch_q * dsq)1786  static void task_unlink_from_dsq(struct task_struct *p,
1787  				 struct scx_dispatch_q *dsq)
1788  {
1789  	WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node));
1790  
1791  	if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) {
1792  		rb_erase(&p->scx.dsq_priq, &dsq->priq);
1793  		RB_CLEAR_NODE(&p->scx.dsq_priq);
1794  		p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ;
1795  	}
1796  
1797  	list_del_init(&p->scx.dsq_list.node);
1798  	dsq_mod_nr(dsq, -1);
1799  }
1800  
dispatch_dequeue(struct rq * rq,struct task_struct * p)1801  static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
1802  {
1803  	struct scx_dispatch_q *dsq = p->scx.dsq;
1804  	bool is_local = dsq == &rq->scx.local_dsq;
1805  
1806  	if (!dsq) {
1807  		/*
1808  		 * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals.
1809  		 * Unlinking is all that's needed to cancel.
1810  		 */
1811  		if (unlikely(!list_empty(&p->scx.dsq_list.node)))
1812  			list_del_init(&p->scx.dsq_list.node);
1813  
1814  		/*
1815  		 * When dispatching directly from the BPF scheduler to a local
1816  		 * DSQ, the task isn't associated with any DSQ but
1817  		 * @p->scx.holding_cpu may be set under the protection of
1818  		 * %SCX_OPSS_DISPATCHING.
1819  		 */
1820  		if (p->scx.holding_cpu >= 0)
1821  			p->scx.holding_cpu = -1;
1822  
1823  		return;
1824  	}
1825  
1826  	if (!is_local)
1827  		raw_spin_lock(&dsq->lock);
1828  
1829  	/*
1830  	 * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1831  	 * change underneath us.
1832  	*/
1833  	if (p->scx.holding_cpu < 0) {
1834  		/* @p must still be on @dsq, dequeue */
1835  		task_unlink_from_dsq(p, dsq);
1836  	} else {
1837  		/*
1838  		 * We're racing against dispatch_to_local_dsq() which already
1839  		 * removed @p from @dsq and set @p->scx.holding_cpu. Clear the
1840  		 * holding_cpu which tells dispatch_to_local_dsq() that it lost
1841  		 * the race.
1842  		 */
1843  		WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node));
1844  		p->scx.holding_cpu = -1;
1845  	}
1846  	p->scx.dsq = NULL;
1847  
1848  	if (!is_local)
1849  		raw_spin_unlock(&dsq->lock);
1850  }
1851  
find_dsq_for_dispatch(struct rq * rq,u64 dsq_id,struct task_struct * p)1852  static struct scx_dispatch_q *find_dsq_for_dispatch(struct rq *rq, u64 dsq_id,
1853  						    struct task_struct *p)
1854  {
1855  	struct scx_dispatch_q *dsq;
1856  
1857  	if (dsq_id == SCX_DSQ_LOCAL)
1858  		return &rq->scx.local_dsq;
1859  
1860  	if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
1861  		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
1862  
1863  		if (!ops_cpu_valid(cpu, "in SCX_DSQ_LOCAL_ON dispatch verdict"))
1864  			return find_global_dsq(p);
1865  
1866  		return &cpu_rq(cpu)->scx.local_dsq;
1867  	}
1868  
1869  	if (dsq_id == SCX_DSQ_GLOBAL)
1870  		dsq = find_global_dsq(p);
1871  	else
1872  		dsq = find_user_dsq(dsq_id);
1873  
1874  	if (unlikely(!dsq)) {
1875  		scx_ops_error("non-existent DSQ 0x%llx for %s[%d]",
1876  			      dsq_id, p->comm, p->pid);
1877  		return find_global_dsq(p);
1878  	}
1879  
1880  	return dsq;
1881  }
1882  
mark_direct_dispatch(struct task_struct * ddsp_task,struct task_struct * p,u64 dsq_id,u64 enq_flags)1883  static void mark_direct_dispatch(struct task_struct *ddsp_task,
1884  				 struct task_struct *p, u64 dsq_id,
1885  				 u64 enq_flags)
1886  {
1887  	/*
1888  	 * Mark that dispatch already happened from ops.select_cpu() or
1889  	 * ops.enqueue() by spoiling direct_dispatch_task with a non-NULL value
1890  	 * which can never match a valid task pointer.
1891  	 */
1892  	__this_cpu_write(direct_dispatch_task, ERR_PTR(-ESRCH));
1893  
1894  	/* @p must match the task on the enqueue path */
1895  	if (unlikely(p != ddsp_task)) {
1896  		if (IS_ERR(ddsp_task))
1897  			scx_ops_error("%s[%d] already direct-dispatched",
1898  				      p->comm, p->pid);
1899  		else
1900  			scx_ops_error("scheduling for %s[%d] but trying to direct-dispatch %s[%d]",
1901  				      ddsp_task->comm, ddsp_task->pid,
1902  				      p->comm, p->pid);
1903  		return;
1904  	}
1905  
1906  	WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID);
1907  	WARN_ON_ONCE(p->scx.ddsp_enq_flags);
1908  
1909  	p->scx.ddsp_dsq_id = dsq_id;
1910  	p->scx.ddsp_enq_flags = enq_flags;
1911  }
1912  
direct_dispatch(struct task_struct * p,u64 enq_flags)1913  static void direct_dispatch(struct task_struct *p, u64 enq_flags)
1914  {
1915  	struct rq *rq = task_rq(p);
1916  	struct scx_dispatch_q *dsq =
1917  		find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
1918  
1919  	touch_core_sched_dispatch(rq, p);
1920  
1921  	p->scx.ddsp_enq_flags |= enq_flags;
1922  
1923  	/*
1924  	 * We are in the enqueue path with @rq locked and pinned, and thus can't
1925  	 * double lock a remote rq and enqueue to its local DSQ. For
1926  	 * DSQ_LOCAL_ON verdicts targeting the local DSQ of a remote CPU, defer
1927  	 * the enqueue so that it's executed when @rq can be unlocked.
1928  	 */
1929  	if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
1930  		unsigned long opss;
1931  
1932  		opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK;
1933  
1934  		switch (opss & SCX_OPSS_STATE_MASK) {
1935  		case SCX_OPSS_NONE:
1936  			break;
1937  		case SCX_OPSS_QUEUEING:
1938  			/*
1939  			 * As @p was never passed to the BPF side, _release is
1940  			 * not strictly necessary. Still do it for consistency.
1941  			 */
1942  			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1943  			break;
1944  		default:
1945  			WARN_ONCE(true, "sched_ext: %s[%d] has invalid ops state 0x%lx in direct_dispatch()",
1946  				  p->comm, p->pid, opss);
1947  			atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
1948  			break;
1949  		}
1950  
1951  		WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node));
1952  		list_add_tail(&p->scx.dsq_list.node,
1953  			      &rq->scx.ddsp_deferred_locals);
1954  		schedule_deferred(rq);
1955  		return;
1956  	}
1957  
1958  	dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1959  }
1960  
scx_rq_online(struct rq * rq)1961  static bool scx_rq_online(struct rq *rq)
1962  {
1963  	/*
1964  	 * Test both cpu_active() and %SCX_RQ_ONLINE. %SCX_RQ_ONLINE indicates
1965  	 * the online state as seen from the BPF scheduler. cpu_active() test
1966  	 * guarantees that, if this function returns %true, %SCX_RQ_ONLINE will
1967  	 * stay set until the current scheduling operation is complete even if
1968  	 * we aren't locking @rq.
1969  	 */
1970  	return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
1971  }
1972  
do_enqueue_task(struct rq * rq,struct task_struct * p,u64 enq_flags,int sticky_cpu)1973  static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
1974  			    int sticky_cpu)
1975  {
1976  	struct task_struct **ddsp_taskp;
1977  	unsigned long qseq;
1978  
1979  	WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED));
1980  
1981  	/* rq migration */
1982  	if (sticky_cpu == cpu_of(rq))
1983  		goto local_norefill;
1984  
1985  	/*
1986  	 * If !scx_rq_online(), we already told the BPF scheduler that the CPU
1987  	 * is offline and are just running the hotplug path. Don't bother the
1988  	 * BPF scheduler.
1989  	 */
1990  	if (!scx_rq_online(rq))
1991  		goto local;
1992  
1993  	if (scx_rq_bypassing(rq))
1994  		goto global;
1995  
1996  	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
1997  		goto direct;
1998  
1999  	/* see %SCX_OPS_ENQ_EXITING */
2000  	if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
2001  	    unlikely(p->flags & PF_EXITING))
2002  		goto local;
2003  
2004  	if (!SCX_HAS_OP(enqueue))
2005  		goto global;
2006  
2007  	/* DSQ bypass didn't trigger, enqueue on the BPF scheduler */
2008  	qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
2009  
2010  	WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2011  	atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq);
2012  
2013  	ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
2014  	WARN_ON_ONCE(*ddsp_taskp);
2015  	*ddsp_taskp = p;
2016  
2017  	SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags);
2018  
2019  	*ddsp_taskp = NULL;
2020  	if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
2021  		goto direct;
2022  
2023  	/*
2024  	 * If not directly dispatched, QUEUEING isn't clear yet and dispatch or
2025  	 * dequeue may be waiting. The store_release matches their load_acquire.
2026  	 */
2027  	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq);
2028  	return;
2029  
2030  direct:
2031  	direct_dispatch(p, enq_flags);
2032  	return;
2033  
2034  local:
2035  	/*
2036  	 * For task-ordering, slice refill must be treated as implying the end
2037  	 * of the current slice. Otherwise, the longer @p stays on the CPU, the
2038  	 * higher priority it becomes from scx_prio_less()'s POV.
2039  	 */
2040  	touch_core_sched(rq, p);
2041  	p->scx.slice = SCX_SLICE_DFL;
2042  local_norefill:
2043  	dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
2044  	return;
2045  
2046  global:
2047  	touch_core_sched(rq, p);	/* see the comment in local: */
2048  	p->scx.slice = SCX_SLICE_DFL;
2049  	dispatch_enqueue(find_global_dsq(p), p, enq_flags);
2050  }
2051  
task_runnable(const struct task_struct * p)2052  static bool task_runnable(const struct task_struct *p)
2053  {
2054  	return !list_empty(&p->scx.runnable_node);
2055  }
2056  
set_task_runnable(struct rq * rq,struct task_struct * p)2057  static void set_task_runnable(struct rq *rq, struct task_struct *p)
2058  {
2059  	lockdep_assert_rq_held(rq);
2060  
2061  	if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) {
2062  		p->scx.runnable_at = jiffies;
2063  		p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT;
2064  	}
2065  
2066  	/*
2067  	 * list_add_tail() must be used. scx_ops_bypass() depends on tasks being
2068  	 * appened to the runnable_list.
2069  	 */
2070  	list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
2071  }
2072  
clr_task_runnable(struct task_struct * p,bool reset_runnable_at)2073  static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
2074  {
2075  	list_del_init(&p->scx.runnable_node);
2076  	if (reset_runnable_at)
2077  		p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
2078  }
2079  
enqueue_task_scx(struct rq * rq,struct task_struct * p,int enq_flags)2080  static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
2081  {
2082  	int sticky_cpu = p->scx.sticky_cpu;
2083  
2084  	if (enq_flags & ENQUEUE_WAKEUP)
2085  		rq->scx.flags |= SCX_RQ_IN_WAKEUP;
2086  
2087  	enq_flags |= rq->scx.extra_enq_flags;
2088  
2089  	if (sticky_cpu >= 0)
2090  		p->scx.sticky_cpu = -1;
2091  
2092  	/*
2093  	 * Restoring a running task will be immediately followed by
2094  	 * set_next_task_scx() which expects the task to not be on the BPF
2095  	 * scheduler as tasks can only start running through local DSQs. Force
2096  	 * direct-dispatch into the local DSQ by setting the sticky_cpu.
2097  	 */
2098  	if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
2099  		sticky_cpu = cpu_of(rq);
2100  
2101  	if (p->scx.flags & SCX_TASK_QUEUED) {
2102  		WARN_ON_ONCE(!task_runnable(p));
2103  		goto out;
2104  	}
2105  
2106  	set_task_runnable(rq, p);
2107  	p->scx.flags |= SCX_TASK_QUEUED;
2108  	rq->scx.nr_running++;
2109  	add_nr_running(rq, 1);
2110  
2111  	if (SCX_HAS_OP(runnable) && !task_on_rq_migrating(p))
2112  		SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags);
2113  
2114  	if (enq_flags & SCX_ENQ_WAKEUP)
2115  		touch_core_sched(rq, p);
2116  
2117  	do_enqueue_task(rq, p, enq_flags, sticky_cpu);
2118  out:
2119  	rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
2120  }
2121  
ops_dequeue(struct task_struct * p,u64 deq_flags)2122  static void ops_dequeue(struct task_struct *p, u64 deq_flags)
2123  {
2124  	unsigned long opss;
2125  
2126  	/* dequeue is always temporary, don't reset runnable_at */
2127  	clr_task_runnable(p, false);
2128  
2129  	/* acquire ensures that we see the preceding updates on QUEUED */
2130  	opss = atomic_long_read_acquire(&p->scx.ops_state);
2131  
2132  	switch (opss & SCX_OPSS_STATE_MASK) {
2133  	case SCX_OPSS_NONE:
2134  		break;
2135  	case SCX_OPSS_QUEUEING:
2136  		/*
2137  		 * QUEUEING is started and finished while holding @p's rq lock.
2138  		 * As we're holding the rq lock now, we shouldn't see QUEUEING.
2139  		 */
2140  		BUG();
2141  	case SCX_OPSS_QUEUED:
2142  		if (SCX_HAS_OP(dequeue))
2143  			SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags);
2144  
2145  		if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2146  					    SCX_OPSS_NONE))
2147  			break;
2148  		fallthrough;
2149  	case SCX_OPSS_DISPATCHING:
2150  		/*
2151  		 * If @p is being dispatched from the BPF scheduler to a DSQ,
2152  		 * wait for the transfer to complete so that @p doesn't get
2153  		 * added to its DSQ after dequeueing is complete.
2154  		 *
2155  		 * As we're waiting on DISPATCHING with the rq locked, the
2156  		 * dispatching side shouldn't try to lock the rq while
2157  		 * DISPATCHING is set. See dispatch_to_local_dsq().
2158  		 *
2159  		 * DISPATCHING shouldn't have qseq set and control can reach
2160  		 * here with NONE @opss from the above QUEUED case block.
2161  		 * Explicitly wait on %SCX_OPSS_DISPATCHING instead of @opss.
2162  		 */
2163  		wait_ops_state(p, SCX_OPSS_DISPATCHING);
2164  		BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE);
2165  		break;
2166  	}
2167  }
2168  
dequeue_task_scx(struct rq * rq,struct task_struct * p,int deq_flags)2169  static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
2170  {
2171  	if (!(p->scx.flags & SCX_TASK_QUEUED)) {
2172  		WARN_ON_ONCE(task_runnable(p));
2173  		return true;
2174  	}
2175  
2176  	ops_dequeue(p, deq_flags);
2177  
2178  	/*
2179  	 * A currently running task which is going off @rq first gets dequeued
2180  	 * and then stops running. As we want running <-> stopping transitions
2181  	 * to be contained within runnable <-> quiescent transitions, trigger
2182  	 * ->stopping() early here instead of in put_prev_task_scx().
2183  	 *
2184  	 * @p may go through multiple stopping <-> running transitions between
2185  	 * here and put_prev_task_scx() if task attribute changes occur while
2186  	 * balance_scx() leaves @rq unlocked. However, they don't contain any
2187  	 * information meaningful to the BPF scheduler and can be suppressed by
2188  	 * skipping the callbacks if the task is !QUEUED.
2189  	 */
2190  	if (SCX_HAS_OP(stopping) && task_current(rq, p)) {
2191  		update_curr_scx(rq);
2192  		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false);
2193  	}
2194  
2195  	if (SCX_HAS_OP(quiescent) && !task_on_rq_migrating(p))
2196  		SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags);
2197  
2198  	if (deq_flags & SCX_DEQ_SLEEP)
2199  		p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP;
2200  	else
2201  		p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP;
2202  
2203  	p->scx.flags &= ~SCX_TASK_QUEUED;
2204  	rq->scx.nr_running--;
2205  	sub_nr_running(rq, 1);
2206  
2207  	dispatch_dequeue(rq, p);
2208  	return true;
2209  }
2210  
yield_task_scx(struct rq * rq)2211  static void yield_task_scx(struct rq *rq)
2212  {
2213  	struct task_struct *p = rq->curr;
2214  
2215  	if (SCX_HAS_OP(yield))
2216  		SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL);
2217  	else
2218  		p->scx.slice = 0;
2219  }
2220  
yield_to_task_scx(struct rq * rq,struct task_struct * to)2221  static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
2222  {
2223  	struct task_struct *from = rq->curr;
2224  
2225  	if (SCX_HAS_OP(yield))
2226  		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, from, to);
2227  	else
2228  		return false;
2229  }
2230  
move_local_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct scx_dispatch_q * src_dsq,struct rq * dst_rq)2231  static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2232  					 struct scx_dispatch_q *src_dsq,
2233  					 struct rq *dst_rq)
2234  {
2235  	struct scx_dispatch_q *dst_dsq = &dst_rq->scx.local_dsq;
2236  
2237  	/* @dsq is locked and @p is on @dst_rq */
2238  	lockdep_assert_held(&src_dsq->lock);
2239  	lockdep_assert_rq_held(dst_rq);
2240  
2241  	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2242  
2243  	if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT))
2244  		list_add(&p->scx.dsq_list.node, &dst_dsq->list);
2245  	else
2246  		list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list);
2247  
2248  	dsq_mod_nr(dst_dsq, 1);
2249  	p->scx.dsq = dst_dsq;
2250  }
2251  
2252  #ifdef CONFIG_SMP
2253  /**
2254   * move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2255   * @p: task to move
2256   * @enq_flags: %SCX_ENQ_*
2257   * @src_rq: rq to move the task from, locked on entry, released on return
2258   * @dst_rq: rq to move the task into, locked on return
2259   *
2260   * Move @p which is currently on @src_rq to @dst_rq's local DSQ.
2261   */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2262  static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2263  					  struct rq *src_rq, struct rq *dst_rq)
2264  {
2265  	lockdep_assert_rq_held(src_rq);
2266  
2267  	/* the following marks @p MIGRATING which excludes dequeue */
2268  	deactivate_task(src_rq, p, 0);
2269  	set_task_cpu(p, cpu_of(dst_rq));
2270  	p->scx.sticky_cpu = cpu_of(dst_rq);
2271  
2272  	raw_spin_rq_unlock(src_rq);
2273  	raw_spin_rq_lock(dst_rq);
2274  
2275  	/*
2276  	 * We want to pass scx-specific enq_flags but activate_task() will
2277  	 * truncate the upper 32 bit. As we own @rq, we can pass them through
2278  	 * @rq->scx.extra_enq_flags instead.
2279  	 */
2280  	WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr));
2281  	WARN_ON_ONCE(dst_rq->scx.extra_enq_flags);
2282  	dst_rq->scx.extra_enq_flags = enq_flags;
2283  	activate_task(dst_rq, p, 0);
2284  	dst_rq->scx.extra_enq_flags = 0;
2285  }
2286  
2287  /*
2288   * Similar to kernel/sched/core.c::is_cpu_allowed(). However, there are two
2289   * differences:
2290   *
2291   * - is_cpu_allowed() asks "Can this task run on this CPU?" while
2292   *   task_can_run_on_remote_rq() asks "Can the BPF scheduler migrate the task to
2293   *   this CPU?".
2294   *
2295   *   While migration is disabled, is_cpu_allowed() has to say "yes" as the task
2296   *   must be allowed to finish on the CPU that it's currently on regardless of
2297   *   the CPU state. However, task_can_run_on_remote_rq() must say "no" as the
2298   *   BPF scheduler shouldn't attempt to migrate a task which has migration
2299   *   disabled.
2300   *
2301   * - The BPF scheduler is bypassed while the rq is offline and we can always say
2302   *   no to the BPF scheduler initiated migrations while offline.
2303   */
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2304  static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2305  				      bool trigger_error)
2306  {
2307  	int cpu = cpu_of(rq);
2308  
2309  	/*
2310  	 * We don't require the BPF scheduler to avoid dispatching to offline
2311  	 * CPUs mostly for convenience but also because CPUs can go offline
2312  	 * between scx_bpf_dispatch() calls and here. Trigger error iff the
2313  	 * picked CPU is outside the allowed mask.
2314  	 */
2315  	if (!task_allowed_on_cpu(p, cpu)) {
2316  		if (trigger_error)
2317  			scx_ops_error("SCX_DSQ_LOCAL[_ON] verdict target cpu %d not allowed for %s[%d]",
2318  				      cpu_of(rq), p->comm, p->pid);
2319  		return false;
2320  	}
2321  
2322  	if (unlikely(is_migration_disabled(p)))
2323  		return false;
2324  
2325  	if (!scx_rq_online(rq))
2326  		return false;
2327  
2328  	return true;
2329  }
2330  
2331  /**
2332   * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
2333   * @p: target task
2334   * @dsq: locked DSQ @p is currently on
2335   * @src_rq: rq @p is currently on, stable with @dsq locked
2336   *
2337   * Called with @dsq locked but no rq's locked. We want to move @p to a different
2338   * DSQ, including any local DSQ, but are not locking @src_rq. Locking @src_rq is
2339   * required when transferring into a local DSQ. Even when transferring into a
2340   * non-local DSQ, it's better to use the same mechanism to protect against
2341   * dequeues and maintain the invariant that @p->scx.dsq can only change while
2342   * @src_rq is locked, which e.g. scx_dump_task() depends on.
2343   *
2344   * We want to grab @src_rq but that can deadlock if we try while locking @dsq,
2345   * so we want to unlink @p from @dsq, drop its lock and then lock @src_rq. As
2346   * this may race with dequeue, which can't drop the rq lock or fail, do a little
2347   * dancing from our side.
2348   *
2349   * @p->scx.holding_cpu is set to this CPU before @dsq is unlocked. If @p gets
2350   * dequeued after we unlock @dsq but before locking @src_rq, the holding_cpu
2351   * would be cleared to -1. While other cpus may have updated it to different
2352   * values afterwards, as this operation can't be preempted or recurse, the
2353   * holding_cpu can never become this CPU again before we're done. Thus, we can
2354   * tell whether we lost to dequeue by testing whether the holding_cpu still
2355   * points to this CPU. See dispatch_dequeue() for the counterpart.
2356   *
2357   * On return, @dsq is unlocked and @src_rq is locked. Returns %true if @p is
2358   * still valid. %false if lost to dequeue.
2359   */
unlink_dsq_and_lock_src_rq(struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2360  static bool unlink_dsq_and_lock_src_rq(struct task_struct *p,
2361  				       struct scx_dispatch_q *dsq,
2362  				       struct rq *src_rq)
2363  {
2364  	s32 cpu = raw_smp_processor_id();
2365  
2366  	lockdep_assert_held(&dsq->lock);
2367  
2368  	WARN_ON_ONCE(p->scx.holding_cpu >= 0);
2369  	task_unlink_from_dsq(p, dsq);
2370  	p->scx.holding_cpu = cpu;
2371  
2372  	raw_spin_unlock(&dsq->lock);
2373  	raw_spin_rq_lock(src_rq);
2374  
2375  	/* task_rq couldn't have changed if we're still the holding cpu */
2376  	return likely(p->scx.holding_cpu == cpu) &&
2377  		!WARN_ON_ONCE(src_rq != task_rq(p));
2378  }
2379  
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * src_rq)2380  static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2381  				struct scx_dispatch_q *dsq, struct rq *src_rq)
2382  {
2383  	raw_spin_rq_unlock(this_rq);
2384  
2385  	if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) {
2386  		move_remote_task_to_local_dsq(p, 0, src_rq, this_rq);
2387  		return true;
2388  	} else {
2389  		raw_spin_rq_unlock(src_rq);
2390  		raw_spin_rq_lock(this_rq);
2391  		return false;
2392  	}
2393  }
2394  #else	/* CONFIG_SMP */
move_remote_task_to_local_dsq(struct task_struct * p,u64 enq_flags,struct rq * src_rq,struct rq * dst_rq)2395  static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
task_can_run_on_remote_rq(struct task_struct * p,struct rq * rq,bool trigger_error)2396  static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
consume_remote_task(struct rq * this_rq,struct task_struct * p,struct scx_dispatch_q * dsq,struct rq * task_rq)2397  static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2398  #endif	/* CONFIG_SMP */
2399  
consume_dispatch_q(struct rq * rq,struct scx_dispatch_q * dsq)2400  static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
2401  {
2402  	struct task_struct *p;
2403  retry:
2404  	/*
2405  	 * The caller can't expect to successfully consume a task if the task's
2406  	 * addition to @dsq isn't guaranteed to be visible somehow. Test
2407  	 * @dsq->list without locking and skip if it seems empty.
2408  	 */
2409  	if (list_empty(&dsq->list))
2410  		return false;
2411  
2412  	raw_spin_lock(&dsq->lock);
2413  
2414  	nldsq_for_each_task(p, dsq) {
2415  		struct rq *task_rq = task_rq(p);
2416  
2417  		if (rq == task_rq) {
2418  			task_unlink_from_dsq(p, dsq);
2419  			move_local_task_to_local_dsq(p, 0, dsq, rq);
2420  			raw_spin_unlock(&dsq->lock);
2421  			return true;
2422  		}
2423  
2424  		if (task_can_run_on_remote_rq(p, rq, false)) {
2425  			if (likely(consume_remote_task(rq, p, dsq, task_rq)))
2426  				return true;
2427  			goto retry;
2428  		}
2429  	}
2430  
2431  	raw_spin_unlock(&dsq->lock);
2432  	return false;
2433  }
2434  
consume_global_dsq(struct rq * rq)2435  static bool consume_global_dsq(struct rq *rq)
2436  {
2437  	int node = cpu_to_node(cpu_of(rq));
2438  
2439  	return consume_dispatch_q(rq, global_dsqs[node]);
2440  }
2441  
2442  /**
2443   * dispatch_to_local_dsq - Dispatch a task to a local dsq
2444   * @rq: current rq which is locked
2445   * @dst_dsq: destination DSQ
2446   * @p: task to dispatch
2447   * @enq_flags: %SCX_ENQ_*
2448   *
2449   * We're holding @rq lock and want to dispatch @p to @dst_dsq which is a local
2450   * DSQ. This function performs all the synchronization dancing needed because
2451   * local DSQs are protected with rq locks.
2452   *
2453   * The caller must have exclusive ownership of @p (e.g. through
2454   * %SCX_OPSS_DISPATCHING).
2455   */
dispatch_to_local_dsq(struct rq * rq,struct scx_dispatch_q * dst_dsq,struct task_struct * p,u64 enq_flags)2456  static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2457  				  struct task_struct *p, u64 enq_flags)
2458  {
2459  	struct rq *src_rq = task_rq(p);
2460  	struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2461  
2462  	/*
2463  	 * We're synchronized against dequeue through DISPATCHING. As @p can't
2464  	 * be dequeued, its task_rq and cpus_allowed are stable too.
2465  	 *
2466  	 * If dispatching to @rq that @p is already on, no lock dancing needed.
2467  	 */
2468  	if (rq == src_rq && rq == dst_rq) {
2469  		dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2470  		return;
2471  	}
2472  
2473  #ifdef CONFIG_SMP
2474  	if (unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) {
2475  		dispatch_enqueue(find_global_dsq(p), p,
2476  				 enq_flags | SCX_ENQ_CLEAR_OPSS);
2477  		return;
2478  	}
2479  
2480  	/*
2481  	 * @p is on a possibly remote @src_rq which we need to lock to move the
2482  	 * task. If dequeue is in progress, it'd be locking @src_rq and waiting
2483  	 * on DISPATCHING, so we can't grab @src_rq lock while holding
2484  	 * DISPATCHING.
2485  	 *
2486  	 * As DISPATCHING guarantees that @p is wholly ours, we can pretend that
2487  	 * we're moving from a DSQ and use the same mechanism - mark the task
2488  	 * under transfer with holding_cpu, release DISPATCHING and then follow
2489  	 * the same protocol. See unlink_dsq_and_lock_src_rq().
2490  	 */
2491  	p->scx.holding_cpu = raw_smp_processor_id();
2492  
2493  	/* store_release ensures that dequeue sees the above */
2494  	atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
2495  
2496  	/* switch to @src_rq lock */
2497  	if (rq != src_rq) {
2498  		raw_spin_rq_unlock(rq);
2499  		raw_spin_rq_lock(src_rq);
2500  	}
2501  
2502  	/* task_rq couldn't have changed if we're still the holding cpu */
2503  	if (likely(p->scx.holding_cpu == raw_smp_processor_id()) &&
2504  	    !WARN_ON_ONCE(src_rq != task_rq(p))) {
2505  		/*
2506  		 * If @p is staying on the same rq, there's no need to go
2507  		 * through the full deactivate/activate cycle. Optimize by
2508  		 * abbreviating move_remote_task_to_local_dsq().
2509  		 */
2510  		if (src_rq == dst_rq) {
2511  			p->scx.holding_cpu = -1;
2512  			dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags);
2513  		} else {
2514  			move_remote_task_to_local_dsq(p, enq_flags,
2515  						      src_rq, dst_rq);
2516  		}
2517  
2518  		/* if the destination CPU is idle, wake it up */
2519  		if (sched_class_above(p->sched_class, dst_rq->curr->sched_class))
2520  			resched_curr(dst_rq);
2521  	}
2522  
2523  	/* switch back to @rq lock */
2524  	if (rq != dst_rq) {
2525  		raw_spin_rq_unlock(dst_rq);
2526  		raw_spin_rq_lock(rq);
2527  	}
2528  #else	/* CONFIG_SMP */
2529  	BUG();	/* control can not reach here on UP */
2530  #endif	/* CONFIG_SMP */
2531  }
2532  
2533  /**
2534   * finish_dispatch - Asynchronously finish dispatching a task
2535   * @rq: current rq which is locked
2536   * @p: task to finish dispatching
2537   * @qseq_at_dispatch: qseq when @p started getting dispatched
2538   * @dsq_id: destination DSQ ID
2539   * @enq_flags: %SCX_ENQ_*
2540   *
2541   * Dispatching to local DSQs may need to wait for queueing to complete or
2542   * require rq lock dancing. As we don't wanna do either while inside
2543   * ops.dispatch() to avoid locking order inversion, we split dispatching into
2544   * two parts. scx_bpf_dispatch() which is called by ops.dispatch() records the
2545   * task and its qseq. Once ops.dispatch() returns, this function is called to
2546   * finish up.
2547   *
2548   * There is no guarantee that @p is still valid for dispatching or even that it
2549   * was valid in the first place. Make sure that the task is still owned by the
2550   * BPF scheduler and claim the ownership before dispatching.
2551   */
finish_dispatch(struct rq * rq,struct task_struct * p,unsigned long qseq_at_dispatch,u64 dsq_id,u64 enq_flags)2552  static void finish_dispatch(struct rq *rq, struct task_struct *p,
2553  			    unsigned long qseq_at_dispatch,
2554  			    u64 dsq_id, u64 enq_flags)
2555  {
2556  	struct scx_dispatch_q *dsq;
2557  	unsigned long opss;
2558  
2559  	touch_core_sched_dispatch(rq, p);
2560  retry:
2561  	/*
2562  	 * No need for _acquire here. @p is accessed only after a successful
2563  	 * try_cmpxchg to DISPATCHING.
2564  	 */
2565  	opss = atomic_long_read(&p->scx.ops_state);
2566  
2567  	switch (opss & SCX_OPSS_STATE_MASK) {
2568  	case SCX_OPSS_DISPATCHING:
2569  	case SCX_OPSS_NONE:
2570  		/* someone else already got to it */
2571  		return;
2572  	case SCX_OPSS_QUEUED:
2573  		/*
2574  		 * If qseq doesn't match, @p has gone through at least one
2575  		 * dispatch/dequeue and re-enqueue cycle between
2576  		 * scx_bpf_dispatch() and here and we have no claim on it.
2577  		 */
2578  		if ((opss & SCX_OPSS_QSEQ_MASK) != qseq_at_dispatch)
2579  			return;
2580  
2581  		/*
2582  		 * While we know @p is accessible, we don't yet have a claim on
2583  		 * it - the BPF scheduler is allowed to dispatch tasks
2584  		 * spuriously and there can be a racing dequeue attempt. Let's
2585  		 * claim @p by atomically transitioning it from QUEUED to
2586  		 * DISPATCHING.
2587  		 */
2588  		if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss,
2589  						   SCX_OPSS_DISPATCHING)))
2590  			break;
2591  		goto retry;
2592  	case SCX_OPSS_QUEUEING:
2593  		/*
2594  		 * do_enqueue_task() is in the process of transferring the task
2595  		 * to the BPF scheduler while holding @p's rq lock. As we aren't
2596  		 * holding any kernel or BPF resource that the enqueue path may
2597  		 * depend upon, it's safe to wait.
2598  		 */
2599  		wait_ops_state(p, opss);
2600  		goto retry;
2601  	}
2602  
2603  	BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED));
2604  
2605  	dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p);
2606  
2607  	if (dsq->id == SCX_DSQ_LOCAL)
2608  		dispatch_to_local_dsq(rq, dsq, p, enq_flags);
2609  	else
2610  		dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
2611  }
2612  
flush_dispatch_buf(struct rq * rq)2613  static void flush_dispatch_buf(struct rq *rq)
2614  {
2615  	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2616  	u32 u;
2617  
2618  	for (u = 0; u < dspc->cursor; u++) {
2619  		struct scx_dsp_buf_ent *ent = &dspc->buf[u];
2620  
2621  		finish_dispatch(rq, ent->task, ent->qseq, ent->dsq_id,
2622  				ent->enq_flags);
2623  	}
2624  
2625  	dspc->nr_tasks += dspc->cursor;
2626  	dspc->cursor = 0;
2627  }
2628  
balance_one(struct rq * rq,struct task_struct * prev)2629  static int balance_one(struct rq *rq, struct task_struct *prev)
2630  {
2631  	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
2632  	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2633  	int nr_loops = SCX_DSP_MAX_LOOPS;
2634  
2635  	lockdep_assert_rq_held(rq);
2636  	rq->scx.flags |= SCX_RQ_IN_BALANCE;
2637  	rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP);
2638  
2639  	if (static_branch_unlikely(&scx_ops_cpu_preempt) &&
2640  	    unlikely(rq->scx.cpu_released)) {
2641  		/*
2642  		 * If the previous sched_class for the current CPU was not SCX,
2643  		 * notify the BPF scheduler that it again has control of the
2644  		 * core. This callback complements ->cpu_release(), which is
2645  		 * emitted in scx_next_task_picked().
2646  		 */
2647  		if (SCX_HAS_OP(cpu_acquire))
2648  			SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL);
2649  		rq->scx.cpu_released = false;
2650  	}
2651  
2652  	if (prev_on_scx) {
2653  		update_curr_scx(rq);
2654  
2655  		/*
2656  		 * If @prev is runnable & has slice left, it has priority and
2657  		 * fetching more just increases latency for the fetched tasks.
2658  		 * Tell pick_task_scx() to keep running @prev. If the BPF
2659  		 * scheduler wants to handle this explicitly, it should
2660  		 * implement ->cpu_release().
2661  		 *
2662  		 * See scx_ops_disable_workfn() for the explanation on the
2663  		 * bypassing test.
2664  		 */
2665  		if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2666  		    prev->scx.slice && !scx_rq_bypassing(rq)) {
2667  			rq->scx.flags |= SCX_RQ_BAL_KEEP;
2668  			goto has_tasks;
2669  		}
2670  	}
2671  
2672  	/* if there already are tasks to run, nothing to do */
2673  	if (rq->scx.local_dsq.nr)
2674  		goto has_tasks;
2675  
2676  	if (consume_global_dsq(rq))
2677  		goto has_tasks;
2678  
2679  	if (!SCX_HAS_OP(dispatch) || scx_rq_bypassing(rq) || !scx_rq_online(rq))
2680  		goto no_tasks;
2681  
2682  	dspc->rq = rq;
2683  
2684  	/*
2685  	 * The dispatch loop. Because flush_dispatch_buf() may drop the rq lock,
2686  	 * the local DSQ might still end up empty after a successful
2687  	 * ops.dispatch(). If the local DSQ is empty even after ops.dispatch()
2688  	 * produced some tasks, retry. The BPF scheduler may depend on this
2689  	 * looping behavior to simplify its implementation.
2690  	 */
2691  	do {
2692  		dspc->nr_tasks = 0;
2693  
2694  		SCX_CALL_OP(SCX_KF_DISPATCH, dispatch, cpu_of(rq),
2695  			    prev_on_scx ? prev : NULL);
2696  
2697  		flush_dispatch_buf(rq);
2698  
2699  		if (rq->scx.local_dsq.nr)
2700  			goto has_tasks;
2701  		if (consume_global_dsq(rq))
2702  			goto has_tasks;
2703  
2704  		/*
2705  		 * ops.dispatch() can trap us in this loop by repeatedly
2706  		 * dispatching ineligible tasks. Break out once in a while to
2707  		 * allow the watchdog to run. As IRQ can't be enabled in
2708  		 * balance(), we want to complete this scheduling cycle and then
2709  		 * start a new one. IOW, we want to call resched_curr() on the
2710  		 * next, most likely idle, task, not the current one. Use
2711  		 * scx_bpf_kick_cpu() for deferred kicking.
2712  		 */
2713  		if (unlikely(!--nr_loops)) {
2714  			scx_bpf_kick_cpu(cpu_of(rq), 0);
2715  			break;
2716  		}
2717  	} while (dspc->nr_tasks);
2718  
2719  no_tasks:
2720  	/*
2721  	 * Didn't find another task to run. Keep running @prev unless
2722  	 * %SCX_OPS_ENQ_LAST is in effect.
2723  	 */
2724  	if ((prev->scx.flags & SCX_TASK_QUEUED) &&
2725  	    (!static_branch_unlikely(&scx_ops_enq_last) ||
2726  	     scx_rq_bypassing(rq))) {
2727  		rq->scx.flags |= SCX_RQ_BAL_KEEP;
2728  		goto has_tasks;
2729  	}
2730  	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2731  	return false;
2732  
2733  has_tasks:
2734  	rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
2735  	return true;
2736  }
2737  
balance_scx(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)2738  static int balance_scx(struct rq *rq, struct task_struct *prev,
2739  		       struct rq_flags *rf)
2740  {
2741  	int ret;
2742  
2743  	rq_unpin_lock(rq, rf);
2744  
2745  	ret = balance_one(rq, prev);
2746  
2747  #ifdef CONFIG_SCHED_SMT
2748  	/*
2749  	 * When core-sched is enabled, this ops.balance() call will be followed
2750  	 * by pick_task_scx() on this CPU and the SMT siblings. Balance the
2751  	 * siblings too.
2752  	 */
2753  	if (sched_core_enabled(rq)) {
2754  		const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
2755  		int scpu;
2756  
2757  		for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) {
2758  			struct rq *srq = cpu_rq(scpu);
2759  			struct task_struct *sprev = srq->curr;
2760  
2761  			WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq));
2762  			update_rq_clock(srq);
2763  			balance_one(srq, sprev);
2764  		}
2765  	}
2766  #endif
2767  	rq_repin_lock(rq, rf);
2768  
2769  	return ret;
2770  }
2771  
process_ddsp_deferred_locals(struct rq * rq)2772  static void process_ddsp_deferred_locals(struct rq *rq)
2773  {
2774  	struct task_struct *p;
2775  
2776  	lockdep_assert_rq_held(rq);
2777  
2778  	/*
2779  	 * Now that @rq can be unlocked, execute the deferred enqueueing of
2780  	 * tasks directly dispatched to the local DSQs of other CPUs. See
2781  	 * direct_dispatch(). Keep popping from the head instead of using
2782  	 * list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2783  	 * temporarily.
2784  	 */
2785  	while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2786  				struct task_struct, scx.dsq_list.node))) {
2787  		struct scx_dispatch_q *dsq;
2788  
2789  		list_del_init(&p->scx.dsq_list.node);
2790  
2791  		dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p);
2792  		if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2793  			dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags);
2794  	}
2795  }
2796  
set_next_task_scx(struct rq * rq,struct task_struct * p,bool first)2797  static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
2798  {
2799  	if (p->scx.flags & SCX_TASK_QUEUED) {
2800  		/*
2801  		 * Core-sched might decide to execute @p before it is
2802  		 * dispatched. Call ops_dequeue() to notify the BPF scheduler.
2803  		 */
2804  		ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC);
2805  		dispatch_dequeue(rq, p);
2806  	}
2807  
2808  	p->se.exec_start = rq_clock_task(rq);
2809  
2810  	/* see dequeue_task_scx() on why we skip when !QUEUED */
2811  	if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED))
2812  		SCX_CALL_OP_TASK(SCX_KF_REST, running, p);
2813  
2814  	clr_task_runnable(p, true);
2815  
2816  	/*
2817  	 * @p is getting newly scheduled or got kicked after someone updated its
2818  	 * slice. Refresh whether tick can be stopped. See scx_can_stop_tick().
2819  	 */
2820  	if ((p->scx.slice == SCX_SLICE_INF) !=
2821  	    (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
2822  		if (p->scx.slice == SCX_SLICE_INF)
2823  			rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
2824  		else
2825  			rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
2826  
2827  		sched_update_tick_dependency(rq);
2828  
2829  		/*
2830  		 * For now, let's refresh the load_avgs just when transitioning
2831  		 * in and out of nohz. In the future, we might want to add a
2832  		 * mechanism which calls the following periodically on
2833  		 * tick-stopped CPUs.
2834  		 */
2835  		update_other_load_avgs(rq);
2836  	}
2837  }
2838  
2839  static enum scx_cpu_preempt_reason
preempt_reason_from_class(const struct sched_class * class)2840  preempt_reason_from_class(const struct sched_class *class)
2841  {
2842  #ifdef CONFIG_SMP
2843  	if (class == &stop_sched_class)
2844  		return SCX_CPU_PREEMPT_STOP;
2845  #endif
2846  	if (class == &dl_sched_class)
2847  		return SCX_CPU_PREEMPT_DL;
2848  	if (class == &rt_sched_class)
2849  		return SCX_CPU_PREEMPT_RT;
2850  	return SCX_CPU_PREEMPT_UNKNOWN;
2851  }
2852  
switch_class(struct rq * rq,struct task_struct * next)2853  static void switch_class(struct rq *rq, struct task_struct *next)
2854  {
2855  	const struct sched_class *next_class = next->sched_class;
2856  
2857  #ifdef CONFIG_SMP
2858  	/*
2859  	 * Pairs with the smp_load_acquire() issued by a CPU in
2860  	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2861  	 * resched.
2862  	 */
2863  	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2864  #endif
2865  	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2866  		return;
2867  
2868  	/*
2869  	 * The callback is conceptually meant to convey that the CPU is no
2870  	 * longer under the control of SCX. Therefore, don't invoke the callback
2871  	 * if the next class is below SCX (in which case the BPF scheduler has
2872  	 * actively decided not to schedule any tasks on the CPU).
2873  	 */
2874  	if (sched_class_above(&ext_sched_class, next_class))
2875  		return;
2876  
2877  	/*
2878  	 * At this point we know that SCX was preempted by a higher priority
2879  	 * sched_class, so invoke the ->cpu_release() callback if we have not
2880  	 * done so already. We only send the callback once between SCX being
2881  	 * preempted, and it regaining control of the CPU.
2882  	 *
2883  	 * ->cpu_release() complements ->cpu_acquire(), which is emitted the
2884  	 *  next time that balance_scx() is invoked.
2885  	 */
2886  	if (!rq->scx.cpu_released) {
2887  		if (SCX_HAS_OP(cpu_release)) {
2888  			struct scx_cpu_release_args args = {
2889  				.reason = preempt_reason_from_class(next_class),
2890  				.task = next,
2891  			};
2892  
2893  			SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2894  				    cpu_release, cpu_of(rq), &args);
2895  		}
2896  		rq->scx.cpu_released = true;
2897  	}
2898  }
2899  
put_prev_task_scx(struct rq * rq,struct task_struct * p,struct task_struct * next)2900  static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
2901  			      struct task_struct *next)
2902  {
2903  	update_curr_scx(rq);
2904  
2905  	/* see dequeue_task_scx() on why we skip when !QUEUED */
2906  	if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED))
2907  		SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true);
2908  
2909  	if (p->scx.flags & SCX_TASK_QUEUED) {
2910  		set_task_runnable(rq, p);
2911  
2912  		/*
2913  		 * If @p has slice left and is being put, @p is getting
2914  		 * preempted by a higher priority scheduler class or core-sched
2915  		 * forcing a different task. Leave it at the head of the local
2916  		 * DSQ.
2917  		 */
2918  		if (p->scx.slice && !scx_rq_bypassing(rq)) {
2919  			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
2920  			return;
2921  		}
2922  
2923  		/*
2924  		 * If @p is runnable but we're about to enter a lower
2925  		 * sched_class, %SCX_OPS_ENQ_LAST must be set. Tell
2926  		 * ops.enqueue() that @p is the only one available for this cpu,
2927  		 * which should trigger an explicit follow-up scheduling event.
2928  		 */
2929  		if (sched_class_above(&ext_sched_class, next->sched_class)) {
2930  			WARN_ON_ONCE(!static_branch_unlikely(&scx_ops_enq_last));
2931  			do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
2932  		} else {
2933  			do_enqueue_task(rq, p, 0, -1);
2934  		}
2935  	}
2936  
2937  	if (next && next->sched_class != &ext_sched_class)
2938  		switch_class(rq, next);
2939  }
2940  
first_local_task(struct rq * rq)2941  static struct task_struct *first_local_task(struct rq *rq)
2942  {
2943  	return list_first_entry_or_null(&rq->scx.local_dsq.list,
2944  					struct task_struct, scx.dsq_list.node);
2945  }
2946  
pick_task_scx(struct rq * rq)2947  static struct task_struct *pick_task_scx(struct rq *rq)
2948  {
2949  	struct task_struct *prev = rq->curr;
2950  	struct task_struct *p;
2951  	bool prev_on_scx = prev->sched_class == &ext_sched_class;
2952  	bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
2953  	bool kick_idle = false;
2954  
2955  	/*
2956  	 * WORKAROUND:
2957  	 *
2958  	 * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just
2959  	 * have gone through balance_scx(). Unfortunately, there currently is a
2960  	 * bug where fair could say yes on balance() but no on pick_task(),
2961  	 * which then ends up calling pick_task_scx() without preceding
2962  	 * balance_scx().
2963  	 *
2964  	 * Keep running @prev if possible and avoid stalling from entering idle
2965  	 * without balancing.
2966  	 *
2967  	 * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE()
2968  	 * if pick_task_scx() is called without preceding balance_scx().
2969  	 */
2970  	if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
2971  		if (prev_on_scx) {
2972  			keep_prev = true;
2973  		} else {
2974  			keep_prev = false;
2975  			kick_idle = true;
2976  		}
2977  	} else if (unlikely(keep_prev && !prev_on_scx)) {
2978  		/* only allowed during transitions */
2979  		WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
2980  		keep_prev = false;
2981  	}
2982  
2983  	/*
2984  	 * If balance_scx() is telling us to keep running @prev, replenish slice
2985  	 * if necessary and keep running @prev. Otherwise, pop the first one
2986  	 * from the local DSQ.
2987  	 */
2988  	if (keep_prev) {
2989  		p = prev;
2990  		if (!p->scx.slice)
2991  			p->scx.slice = SCX_SLICE_DFL;
2992  	} else {
2993  		p = first_local_task(rq);
2994  		if (!p) {
2995  			if (kick_idle)
2996  				scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE);
2997  			return NULL;
2998  		}
2999  
3000  		if (unlikely(!p->scx.slice)) {
3001  			if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) {
3002  				printk_deferred(KERN_WARNING "sched_ext: %s[%d] has zero slice in %s()\n",
3003  						p->comm, p->pid, __func__);
3004  				scx_warned_zero_slice = true;
3005  			}
3006  			p->scx.slice = SCX_SLICE_DFL;
3007  		}
3008  	}
3009  
3010  	return p;
3011  }
3012  
3013  #ifdef CONFIG_SCHED_CORE
3014  /**
3015   * scx_prio_less - Task ordering for core-sched
3016   * @a: task A
3017   * @b: task B
3018   *
3019   * Core-sched is implemented as an additional scheduling layer on top of the
3020   * usual sched_class'es and needs to find out the expected task ordering. For
3021   * SCX, core-sched calls this function to interrogate the task ordering.
3022   *
3023   * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used
3024   * to implement the default task ordering. The older the timestamp, the higher
3025   * prority the task - the global FIFO ordering matching the default scheduling
3026   * behavior.
3027   *
3028   * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to
3029   * implement FIFO ordering within each local DSQ. See pick_task_scx().
3030   */
scx_prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)3031  bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3032  		   bool in_fi)
3033  {
3034  	/*
3035  	 * The const qualifiers are dropped from task_struct pointers when
3036  	 * calling ops.core_sched_before(). Accesses are controlled by the
3037  	 * verifier.
3038  	 */
3039  	if (SCX_HAS_OP(core_sched_before) && !scx_rq_bypassing(task_rq(a)))
3040  		return SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, core_sched_before,
3041  					      (struct task_struct *)a,
3042  					      (struct task_struct *)b);
3043  	else
3044  		return time_after64(a->scx.core_sched_at, b->scx.core_sched_at);
3045  }
3046  #endif	/* CONFIG_SCHED_CORE */
3047  
3048  #ifdef CONFIG_SMP
3049  
test_and_clear_cpu_idle(int cpu)3050  static bool test_and_clear_cpu_idle(int cpu)
3051  {
3052  #ifdef CONFIG_SCHED_SMT
3053  	/*
3054  	 * SMT mask should be cleared whether we can claim @cpu or not. The SMT
3055  	 * cluster is not wholly idle either way. This also prevents
3056  	 * scx_pick_idle_cpu() from getting caught in an infinite loop.
3057  	 */
3058  	if (sched_smt_active()) {
3059  		const struct cpumask *smt = cpu_smt_mask(cpu);
3060  
3061  		/*
3062  		 * If offline, @cpu is not its own sibling and
3063  		 * scx_pick_idle_cpu() can get caught in an infinite loop as
3064  		 * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
3065  		 * is eventually cleared.
3066  		 */
3067  		if (cpumask_intersects(smt, idle_masks.smt))
3068  			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3069  		else if (cpumask_test_cpu(cpu, idle_masks.smt))
3070  			__cpumask_clear_cpu(cpu, idle_masks.smt);
3071  	}
3072  #endif
3073  	return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
3074  }
3075  
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3076  static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
3077  {
3078  	int cpu;
3079  
3080  retry:
3081  	if (sched_smt_active()) {
3082  		cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
3083  		if (cpu < nr_cpu_ids)
3084  			goto found;
3085  
3086  		if (flags & SCX_PICK_IDLE_CORE)
3087  			return -EBUSY;
3088  	}
3089  
3090  	cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
3091  	if (cpu >= nr_cpu_ids)
3092  		return -EBUSY;
3093  
3094  found:
3095  	if (test_and_clear_cpu_idle(cpu))
3096  		return cpu;
3097  	else
3098  		goto retry;
3099  }
3100  
scx_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * found)3101  static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
3102  			      u64 wake_flags, bool *found)
3103  {
3104  	s32 cpu;
3105  
3106  	*found = false;
3107  
3108  	/*
3109  	 * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
3110  	 * under utilized, wake up @p to the local DSQ of the waker. Checking
3111  	 * only for an empty local DSQ is insufficient as it could give the
3112  	 * wakee an unfair advantage when the system is oversaturated.
3113  	 * Checking only for the presence of idle CPUs is also insufficient as
3114  	 * the local DSQ of the waker could have tasks piled up on it even if
3115  	 * there is an idle core elsewhere on the system.
3116  	 */
3117  	cpu = smp_processor_id();
3118  	if ((wake_flags & SCX_WAKE_SYNC) &&
3119  	    !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
3120  	    cpu_rq(cpu)->scx.local_dsq.nr == 0) {
3121  		if (cpumask_test_cpu(cpu, p->cpus_ptr))
3122  			goto cpu_found;
3123  	}
3124  
3125  	/*
3126  	 * If CPU has SMT, any wholly idle CPU is likely a better pick than
3127  	 * partially idle @prev_cpu.
3128  	 */
3129  	if (sched_smt_active()) {
3130  		if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
3131  		    test_and_clear_cpu_idle(prev_cpu)) {
3132  			cpu = prev_cpu;
3133  			goto cpu_found;
3134  		}
3135  
3136  		cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
3137  		if (cpu >= 0)
3138  			goto cpu_found;
3139  	}
3140  
3141  	if (test_and_clear_cpu_idle(prev_cpu)) {
3142  		cpu = prev_cpu;
3143  		goto cpu_found;
3144  	}
3145  
3146  	cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
3147  	if (cpu >= 0)
3148  		goto cpu_found;
3149  
3150  	return prev_cpu;
3151  
3152  cpu_found:
3153  	*found = true;
3154  	return cpu;
3155  }
3156  
select_task_rq_scx(struct task_struct * p,int prev_cpu,int wake_flags)3157  static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
3158  {
3159  	/*
3160  	 * sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
3161  	 * can be a good migration opportunity with low cache and memory
3162  	 * footprint. Returning a CPU different than @prev_cpu triggers
3163  	 * immediate rq migration. However, for SCX, as the current rq
3164  	 * association doesn't dictate where the task is going to run, this
3165  	 * doesn't fit well. If necessary, we can later add a dedicated method
3166  	 * which can decide to preempt self to force it through the regular
3167  	 * scheduling path.
3168  	 */
3169  	if (unlikely(wake_flags & WF_EXEC))
3170  		return prev_cpu;
3171  
3172  	if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
3173  		s32 cpu;
3174  		struct task_struct **ddsp_taskp;
3175  
3176  		ddsp_taskp = this_cpu_ptr(&direct_dispatch_task);
3177  		WARN_ON_ONCE(*ddsp_taskp);
3178  		*ddsp_taskp = p;
3179  
3180  		cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
3181  					   select_cpu, p, prev_cpu, wake_flags);
3182  		*ddsp_taskp = NULL;
3183  		if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
3184  			return cpu;
3185  		else
3186  			return prev_cpu;
3187  	} else {
3188  		bool found;
3189  		s32 cpu;
3190  
3191  		cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
3192  		if (found) {
3193  			p->scx.slice = SCX_SLICE_DFL;
3194  			p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
3195  		}
3196  		return cpu;
3197  	}
3198  }
3199  
task_woken_scx(struct rq * rq,struct task_struct * p)3200  static void task_woken_scx(struct rq *rq, struct task_struct *p)
3201  {
3202  	run_deferred(rq);
3203  }
3204  
set_cpus_allowed_scx(struct task_struct * p,struct affinity_context * ac)3205  static void set_cpus_allowed_scx(struct task_struct *p,
3206  				 struct affinity_context *ac)
3207  {
3208  	set_cpus_allowed_common(p, ac);
3209  
3210  	/*
3211  	 * The effective cpumask is stored in @p->cpus_ptr which may temporarily
3212  	 * differ from the configured one in @p->cpus_mask. Always tell the bpf
3213  	 * scheduler the effective one.
3214  	 *
3215  	 * Fine-grained memory write control is enforced by BPF making the const
3216  	 * designation pointless. Cast it away when calling the operation.
3217  	 */
3218  	if (SCX_HAS_OP(set_cpumask))
3219  		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3220  				 (struct cpumask *)p->cpus_ptr);
3221  }
3222  
reset_idle_masks(void)3223  static void reset_idle_masks(void)
3224  {
3225  	/*
3226  	 * Consider all online cpus idle. Should converge to the actual state
3227  	 * quickly.
3228  	 */
3229  	cpumask_copy(idle_masks.cpu, cpu_online_mask);
3230  	cpumask_copy(idle_masks.smt, cpu_online_mask);
3231  }
3232  
__scx_update_idle(struct rq * rq,bool idle)3233  void __scx_update_idle(struct rq *rq, bool idle)
3234  {
3235  	int cpu = cpu_of(rq);
3236  
3237  	if (SCX_HAS_OP(update_idle) && !scx_rq_bypassing(rq)) {
3238  		SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
3239  		if (!static_branch_unlikely(&scx_builtin_idle_enabled))
3240  			return;
3241  	}
3242  
3243  	if (idle)
3244  		cpumask_set_cpu(cpu, idle_masks.cpu);
3245  	else
3246  		cpumask_clear_cpu(cpu, idle_masks.cpu);
3247  
3248  #ifdef CONFIG_SCHED_SMT
3249  	if (sched_smt_active()) {
3250  		const struct cpumask *smt = cpu_smt_mask(cpu);
3251  
3252  		if (idle) {
3253  			/*
3254  			 * idle_masks.smt handling is racy but that's fine as
3255  			 * it's only for optimization and self-correcting.
3256  			 */
3257  			for_each_cpu(cpu, smt) {
3258  				if (!cpumask_test_cpu(cpu, idle_masks.cpu))
3259  					return;
3260  			}
3261  			cpumask_or(idle_masks.smt, idle_masks.smt, smt);
3262  		} else {
3263  			cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
3264  		}
3265  	}
3266  #endif
3267  }
3268  
handle_hotplug(struct rq * rq,bool online)3269  static void handle_hotplug(struct rq *rq, bool online)
3270  {
3271  	int cpu = cpu_of(rq);
3272  
3273  	atomic_long_inc(&scx_hotplug_seq);
3274  
3275  	if (online && SCX_HAS_OP(cpu_online))
3276  		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
3277  	else if (!online && SCX_HAS_OP(cpu_offline))
3278  		SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
3279  	else
3280  		scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
3281  			     "cpu %d going %s, exiting scheduler", cpu,
3282  			     online ? "online" : "offline");
3283  }
3284  
scx_rq_activate(struct rq * rq)3285  void scx_rq_activate(struct rq *rq)
3286  {
3287  	handle_hotplug(rq, true);
3288  }
3289  
scx_rq_deactivate(struct rq * rq)3290  void scx_rq_deactivate(struct rq *rq)
3291  {
3292  	handle_hotplug(rq, false);
3293  }
3294  
rq_online_scx(struct rq * rq)3295  static void rq_online_scx(struct rq *rq)
3296  {
3297  	rq->scx.flags |= SCX_RQ_ONLINE;
3298  }
3299  
rq_offline_scx(struct rq * rq)3300  static void rq_offline_scx(struct rq *rq)
3301  {
3302  	rq->scx.flags &= ~SCX_RQ_ONLINE;
3303  }
3304  
3305  #else	/* CONFIG_SMP */
3306  
test_and_clear_cpu_idle(int cpu)3307  static bool test_and_clear_cpu_idle(int cpu) { return false; }
scx_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)3308  static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
reset_idle_masks(void)3309  static void reset_idle_masks(void) {}
3310  
3311  #endif	/* CONFIG_SMP */
3312  
check_rq_for_timeouts(struct rq * rq)3313  static bool check_rq_for_timeouts(struct rq *rq)
3314  {
3315  	struct task_struct *p;
3316  	struct rq_flags rf;
3317  	bool timed_out = false;
3318  
3319  	rq_lock_irqsave(rq, &rf);
3320  	list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
3321  		unsigned long last_runnable = p->scx.runnable_at;
3322  
3323  		if (unlikely(time_after(jiffies,
3324  					last_runnable + scx_watchdog_timeout))) {
3325  			u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable);
3326  
3327  			scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3328  					   "%s[%d] failed to run for %u.%03us",
3329  					   p->comm, p->pid,
3330  					   dur_ms / 1000, dur_ms % 1000);
3331  			timed_out = true;
3332  			break;
3333  		}
3334  	}
3335  	rq_unlock_irqrestore(rq, &rf);
3336  
3337  	return timed_out;
3338  }
3339  
scx_watchdog_workfn(struct work_struct * work)3340  static void scx_watchdog_workfn(struct work_struct *work)
3341  {
3342  	int cpu;
3343  
3344  	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
3345  
3346  	for_each_online_cpu(cpu) {
3347  		if (unlikely(check_rq_for_timeouts(cpu_rq(cpu))))
3348  			break;
3349  
3350  		cond_resched();
3351  	}
3352  	queue_delayed_work(system_unbound_wq, to_delayed_work(work),
3353  			   scx_watchdog_timeout / 2);
3354  }
3355  
scx_tick(struct rq * rq)3356  void scx_tick(struct rq *rq)
3357  {
3358  	unsigned long last_check;
3359  
3360  	if (!scx_enabled())
3361  		return;
3362  
3363  	last_check = READ_ONCE(scx_watchdog_timestamp);
3364  	if (unlikely(time_after(jiffies,
3365  				last_check + READ_ONCE(scx_watchdog_timeout)))) {
3366  		u32 dur_ms = jiffies_to_msecs(jiffies - last_check);
3367  
3368  		scx_ops_error_kind(SCX_EXIT_ERROR_STALL,
3369  				   "watchdog failed to check in for %u.%03us",
3370  				   dur_ms / 1000, dur_ms % 1000);
3371  	}
3372  
3373  	update_other_load_avgs(rq);
3374  }
3375  
task_tick_scx(struct rq * rq,struct task_struct * curr,int queued)3376  static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
3377  {
3378  	update_curr_scx(rq);
3379  
3380  	/*
3381  	 * While disabling, always resched and refresh core-sched timestamp as
3382  	 * we can't trust the slice management or ops.core_sched_before().
3383  	 */
3384  	if (scx_rq_bypassing(rq)) {
3385  		curr->scx.slice = 0;
3386  		touch_core_sched(rq, curr);
3387  	} else if (SCX_HAS_OP(tick)) {
3388  		SCX_CALL_OP(SCX_KF_REST, tick, curr);
3389  	}
3390  
3391  	if (!curr->scx.slice)
3392  		resched_curr(rq);
3393  }
3394  
3395  #ifdef CONFIG_EXT_GROUP_SCHED
tg_cgrp(struct task_group * tg)3396  static struct cgroup *tg_cgrp(struct task_group *tg)
3397  {
3398  	/*
3399  	 * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup,
3400  	 * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the
3401  	 * root cgroup.
3402  	 */
3403  	if (tg && tg->css.cgroup)
3404  		return tg->css.cgroup;
3405  	else
3406  		return &cgrp_dfl_root.cgrp;
3407  }
3408  
3409  #define SCX_INIT_TASK_ARGS_CGROUP(tg)		.cgroup = tg_cgrp(tg),
3410  
3411  #else	/* CONFIG_EXT_GROUP_SCHED */
3412  
3413  #define SCX_INIT_TASK_ARGS_CGROUP(tg)
3414  
3415  #endif	/* CONFIG_EXT_GROUP_SCHED */
3416  
scx_get_task_state(const struct task_struct * p)3417  static enum scx_task_state scx_get_task_state(const struct task_struct *p)
3418  {
3419  	return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT;
3420  }
3421  
scx_set_task_state(struct task_struct * p,enum scx_task_state state)3422  static void scx_set_task_state(struct task_struct *p, enum scx_task_state state)
3423  {
3424  	enum scx_task_state prev_state = scx_get_task_state(p);
3425  	bool warn = false;
3426  
3427  	BUILD_BUG_ON(SCX_TASK_NR_STATES > (1 << SCX_TASK_STATE_BITS));
3428  
3429  	switch (state) {
3430  	case SCX_TASK_NONE:
3431  		break;
3432  	case SCX_TASK_INIT:
3433  		warn = prev_state != SCX_TASK_NONE;
3434  		break;
3435  	case SCX_TASK_READY:
3436  		warn = prev_state == SCX_TASK_NONE;
3437  		break;
3438  	case SCX_TASK_ENABLED:
3439  		warn = prev_state != SCX_TASK_READY;
3440  		break;
3441  	default:
3442  		warn = true;
3443  		return;
3444  	}
3445  
3446  	WARN_ONCE(warn, "sched_ext: Invalid task state transition %d -> %d for %s[%d]",
3447  		  prev_state, state, p->comm, p->pid);
3448  
3449  	p->scx.flags &= ~SCX_TASK_STATE_MASK;
3450  	p->scx.flags |= state << SCX_TASK_STATE_SHIFT;
3451  }
3452  
scx_ops_init_task(struct task_struct * p,struct task_group * tg,bool fork)3453  static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork)
3454  {
3455  	int ret;
3456  
3457  	p->scx.disallow = false;
3458  
3459  	if (SCX_HAS_OP(init_task)) {
3460  		struct scx_init_task_args args = {
3461  			SCX_INIT_TASK_ARGS_CGROUP(tg)
3462  			.fork = fork,
3463  		};
3464  
3465  		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args);
3466  		if (unlikely(ret)) {
3467  			ret = ops_sanitize_err("init_task", ret);
3468  			return ret;
3469  		}
3470  	}
3471  
3472  	scx_set_task_state(p, SCX_TASK_INIT);
3473  
3474  	if (p->scx.disallow) {
3475  		if (!fork) {
3476  			struct rq *rq;
3477  			struct rq_flags rf;
3478  
3479  			rq = task_rq_lock(p, &rf);
3480  
3481  			/*
3482  			 * We're in the load path and @p->policy will be applied
3483  			 * right after. Reverting @p->policy here and rejecting
3484  			 * %SCHED_EXT transitions from scx_check_setscheduler()
3485  			 * guarantees that if ops.init_task() sets @p->disallow,
3486  			 * @p can never be in SCX.
3487  			 */
3488  			if (p->policy == SCHED_EXT) {
3489  				p->policy = SCHED_NORMAL;
3490  				atomic_long_inc(&scx_nr_rejected);
3491  			}
3492  
3493  			task_rq_unlock(rq, p, &rf);
3494  		} else if (p->policy == SCHED_EXT) {
3495  			scx_ops_error("ops.init_task() set task->scx.disallow for %s[%d] during fork",
3496  				      p->comm, p->pid);
3497  		}
3498  	}
3499  
3500  	p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
3501  	return 0;
3502  }
3503  
scx_ops_enable_task(struct task_struct * p)3504  static void scx_ops_enable_task(struct task_struct *p)
3505  {
3506  	u32 weight;
3507  
3508  	lockdep_assert_rq_held(task_rq(p));
3509  
3510  	/*
3511  	 * Set the weight before calling ops.enable() so that the scheduler
3512  	 * doesn't see a stale value if they inspect the task struct.
3513  	 */
3514  	if (task_has_idle_policy(p))
3515  		weight = WEIGHT_IDLEPRIO;
3516  	else
3517  		weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
3518  
3519  	p->scx.weight = sched_weight_to_cgroup(weight);
3520  
3521  	if (SCX_HAS_OP(enable))
3522  		SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
3523  	scx_set_task_state(p, SCX_TASK_ENABLED);
3524  
3525  	if (SCX_HAS_OP(set_weight))
3526  		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3527  }
3528  
scx_ops_disable_task(struct task_struct * p)3529  static void scx_ops_disable_task(struct task_struct *p)
3530  {
3531  	lockdep_assert_rq_held(task_rq(p));
3532  	WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
3533  
3534  	if (SCX_HAS_OP(disable))
3535  		SCX_CALL_OP(SCX_KF_REST, disable, p);
3536  	scx_set_task_state(p, SCX_TASK_READY);
3537  }
3538  
scx_ops_exit_task(struct task_struct * p)3539  static void scx_ops_exit_task(struct task_struct *p)
3540  {
3541  	struct scx_exit_task_args args = {
3542  		.cancelled = false,
3543  	};
3544  
3545  	lockdep_assert_rq_held(task_rq(p));
3546  
3547  	switch (scx_get_task_state(p)) {
3548  	case SCX_TASK_NONE:
3549  		return;
3550  	case SCX_TASK_INIT:
3551  		args.cancelled = true;
3552  		break;
3553  	case SCX_TASK_READY:
3554  		break;
3555  	case SCX_TASK_ENABLED:
3556  		scx_ops_disable_task(p);
3557  		break;
3558  	default:
3559  		WARN_ON_ONCE(true);
3560  		return;
3561  	}
3562  
3563  	if (SCX_HAS_OP(exit_task))
3564  		SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3565  	scx_set_task_state(p, SCX_TASK_NONE);
3566  }
3567  
init_scx_entity(struct sched_ext_entity * scx)3568  void init_scx_entity(struct sched_ext_entity *scx)
3569  {
3570  	/*
3571  	 * init_idle() calls this function again after fork sequence is
3572  	 * complete. Don't touch ->tasks_node as it's already linked.
3573  	 */
3574  	memset(scx, 0, offsetof(struct sched_ext_entity, tasks_node));
3575  
3576  	INIT_LIST_HEAD(&scx->dsq_list.node);
3577  	RB_CLEAR_NODE(&scx->dsq_priq);
3578  	scx->sticky_cpu = -1;
3579  	scx->holding_cpu = -1;
3580  	INIT_LIST_HEAD(&scx->runnable_node);
3581  	scx->runnable_at = jiffies;
3582  	scx->ddsp_dsq_id = SCX_DSQ_INVALID;
3583  	scx->slice = SCX_SLICE_DFL;
3584  }
3585  
scx_pre_fork(struct task_struct * p)3586  void scx_pre_fork(struct task_struct *p)
3587  {
3588  	/*
3589  	 * BPF scheduler enable/disable paths want to be able to iterate and
3590  	 * update all tasks which can become complex when racing forks. As
3591  	 * enable/disable are very cold paths, let's use a percpu_rwsem to
3592  	 * exclude forks.
3593  	 */
3594  	percpu_down_read(&scx_fork_rwsem);
3595  }
3596  
scx_fork(struct task_struct * p)3597  int scx_fork(struct task_struct *p)
3598  {
3599  	percpu_rwsem_assert_held(&scx_fork_rwsem);
3600  
3601  	if (scx_ops_init_task_enabled)
3602  		return scx_ops_init_task(p, task_group(p), true);
3603  	else
3604  		return 0;
3605  }
3606  
scx_post_fork(struct task_struct * p)3607  void scx_post_fork(struct task_struct *p)
3608  {
3609  	if (scx_ops_init_task_enabled) {
3610  		scx_set_task_state(p, SCX_TASK_READY);
3611  
3612  		/*
3613  		 * Enable the task immediately if it's running on sched_ext.
3614  		 * Otherwise, it'll be enabled in switching_to_scx() if and
3615  		 * when it's ever configured to run with a SCHED_EXT policy.
3616  		 */
3617  		if (p->sched_class == &ext_sched_class) {
3618  			struct rq_flags rf;
3619  			struct rq *rq;
3620  
3621  			rq = task_rq_lock(p, &rf);
3622  			scx_ops_enable_task(p);
3623  			task_rq_unlock(rq, p, &rf);
3624  		}
3625  	}
3626  
3627  	spin_lock_irq(&scx_tasks_lock);
3628  	list_add_tail(&p->scx.tasks_node, &scx_tasks);
3629  	spin_unlock_irq(&scx_tasks_lock);
3630  
3631  	percpu_up_read(&scx_fork_rwsem);
3632  }
3633  
scx_cancel_fork(struct task_struct * p)3634  void scx_cancel_fork(struct task_struct *p)
3635  {
3636  	if (scx_enabled()) {
3637  		struct rq *rq;
3638  		struct rq_flags rf;
3639  
3640  		rq = task_rq_lock(p, &rf);
3641  		WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY);
3642  		scx_ops_exit_task(p);
3643  		task_rq_unlock(rq, p, &rf);
3644  	}
3645  
3646  	percpu_up_read(&scx_fork_rwsem);
3647  }
3648  
sched_ext_free(struct task_struct * p)3649  void sched_ext_free(struct task_struct *p)
3650  {
3651  	unsigned long flags;
3652  
3653  	spin_lock_irqsave(&scx_tasks_lock, flags);
3654  	list_del_init(&p->scx.tasks_node);
3655  	spin_unlock_irqrestore(&scx_tasks_lock, flags);
3656  
3657  	/*
3658  	 * @p is off scx_tasks and wholly ours. scx_ops_enable()'s READY ->
3659  	 * ENABLED transitions can't race us. Disable ops for @p.
3660  	 */
3661  	if (scx_get_task_state(p) != SCX_TASK_NONE) {
3662  		struct rq_flags rf;
3663  		struct rq *rq;
3664  
3665  		rq = task_rq_lock(p, &rf);
3666  		scx_ops_exit_task(p);
3667  		task_rq_unlock(rq, p, &rf);
3668  	}
3669  }
3670  
reweight_task_scx(struct rq * rq,struct task_struct * p,const struct load_weight * lw)3671  static void reweight_task_scx(struct rq *rq, struct task_struct *p,
3672  			      const struct load_weight *lw)
3673  {
3674  	lockdep_assert_rq_held(task_rq(p));
3675  
3676  	p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
3677  	if (SCX_HAS_OP(set_weight))
3678  		SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
3679  }
3680  
prio_changed_scx(struct rq * rq,struct task_struct * p,int oldprio)3681  static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio)
3682  {
3683  }
3684  
switching_to_scx(struct rq * rq,struct task_struct * p)3685  static void switching_to_scx(struct rq *rq, struct task_struct *p)
3686  {
3687  	scx_ops_enable_task(p);
3688  
3689  	/*
3690  	 * set_cpus_allowed_scx() is not called while @p is associated with a
3691  	 * different scheduler class. Keep the BPF scheduler up-to-date.
3692  	 */
3693  	if (SCX_HAS_OP(set_cpumask))
3694  		SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p,
3695  				 (struct cpumask *)p->cpus_ptr);
3696  }
3697  
switched_from_scx(struct rq * rq,struct task_struct * p)3698  static void switched_from_scx(struct rq *rq, struct task_struct *p)
3699  {
3700  	scx_ops_disable_task(p);
3701  }
3702  
wakeup_preempt_scx(struct rq * rq,struct task_struct * p,int wake_flags)3703  static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {}
switched_to_scx(struct rq * rq,struct task_struct * p)3704  static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
3705  
scx_check_setscheduler(struct task_struct * p,int policy)3706  int scx_check_setscheduler(struct task_struct *p, int policy)
3707  {
3708  	lockdep_assert_rq_held(task_rq(p));
3709  
3710  	/* if disallow, reject transitioning into SCX */
3711  	if (scx_enabled() && READ_ONCE(p->scx.disallow) &&
3712  	    p->policy != policy && policy == SCHED_EXT)
3713  		return -EACCES;
3714  
3715  	return 0;
3716  }
3717  
3718  #ifdef CONFIG_NO_HZ_FULL
scx_can_stop_tick(struct rq * rq)3719  bool scx_can_stop_tick(struct rq *rq)
3720  {
3721  	struct task_struct *p = rq->curr;
3722  
3723  	if (scx_rq_bypassing(rq))
3724  		return false;
3725  
3726  	if (p->sched_class != &ext_sched_class)
3727  		return true;
3728  
3729  	/*
3730  	 * @rq can dispatch from different DSQs, so we can't tell whether it
3731  	 * needs the tick or not by looking at nr_running. Allow stopping ticks
3732  	 * iff the BPF scheduler indicated so. See set_next_task_scx().
3733  	 */
3734  	return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
3735  }
3736  #endif
3737  
3738  #ifdef CONFIG_EXT_GROUP_SCHED
3739  
3740  DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
3741  static bool scx_cgroup_enabled;
3742  static bool cgroup_warned_missing_weight;
3743  static bool cgroup_warned_missing_idle;
3744  
scx_cgroup_warn_missing_weight(struct task_group * tg)3745  static void scx_cgroup_warn_missing_weight(struct task_group *tg)
3746  {
3747  	if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
3748  	    cgroup_warned_missing_weight)
3749  		return;
3750  
3751  	if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
3752  		return;
3753  
3754  	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
3755  		scx_ops.name);
3756  	cgroup_warned_missing_weight = true;
3757  }
3758  
scx_cgroup_warn_missing_idle(struct task_group * tg)3759  static void scx_cgroup_warn_missing_idle(struct task_group *tg)
3760  {
3761  	if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
3762  		return;
3763  
3764  	if (!tg->idle)
3765  		return;
3766  
3767  	pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
3768  		scx_ops.name);
3769  	cgroup_warned_missing_idle = true;
3770  }
3771  
scx_tg_online(struct task_group * tg)3772  int scx_tg_online(struct task_group *tg)
3773  {
3774  	int ret = 0;
3775  
3776  	WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED));
3777  
3778  	percpu_down_read(&scx_cgroup_rwsem);
3779  
3780  	scx_cgroup_warn_missing_weight(tg);
3781  
3782  	if (scx_cgroup_enabled) {
3783  		if (SCX_HAS_OP(cgroup_init)) {
3784  			struct scx_cgroup_init_args args =
3785  				{ .weight = tg->scx_weight };
3786  
3787  			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
3788  					      tg->css.cgroup, &args);
3789  			if (ret)
3790  				ret = ops_sanitize_err("cgroup_init", ret);
3791  		}
3792  		if (ret == 0)
3793  			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED;
3794  	} else {
3795  		tg->scx_flags |= SCX_TG_ONLINE;
3796  	}
3797  
3798  	percpu_up_read(&scx_cgroup_rwsem);
3799  	return ret;
3800  }
3801  
scx_tg_offline(struct task_group * tg)3802  void scx_tg_offline(struct task_group *tg)
3803  {
3804  	WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE));
3805  
3806  	percpu_down_read(&scx_cgroup_rwsem);
3807  
3808  	if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED))
3809  		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, tg->css.cgroup);
3810  	tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED);
3811  
3812  	percpu_up_read(&scx_cgroup_rwsem);
3813  }
3814  
scx_cgroup_can_attach(struct cgroup_taskset * tset)3815  int scx_cgroup_can_attach(struct cgroup_taskset *tset)
3816  {
3817  	struct cgroup_subsys_state *css;
3818  	struct task_struct *p;
3819  	int ret;
3820  
3821  	/* released in scx_finish/cancel_attach() */
3822  	percpu_down_read(&scx_cgroup_rwsem);
3823  
3824  	if (!scx_cgroup_enabled)
3825  		return 0;
3826  
3827  	cgroup_taskset_for_each(p, css, tset) {
3828  		struct cgroup *from = tg_cgrp(task_group(p));
3829  		struct cgroup *to = tg_cgrp(css_tg(css));
3830  
3831  		WARN_ON_ONCE(p->scx.cgrp_moving_from);
3832  
3833  		/*
3834  		 * sched_move_task() omits identity migrations. Let's match the
3835  		 * behavior so that ops.cgroup_prep_move() and ops.cgroup_move()
3836  		 * always match one-to-one.
3837  		 */
3838  		if (from == to)
3839  			continue;
3840  
3841  		if (SCX_HAS_OP(cgroup_prep_move)) {
3842  			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_prep_move,
3843  					      p, from, css->cgroup);
3844  			if (ret)
3845  				goto err;
3846  		}
3847  
3848  		p->scx.cgrp_moving_from = from;
3849  	}
3850  
3851  	return 0;
3852  
3853  err:
3854  	cgroup_taskset_for_each(p, css, tset) {
3855  		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3856  			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3857  				    p->scx.cgrp_moving_from, css->cgroup);
3858  		p->scx.cgrp_moving_from = NULL;
3859  	}
3860  
3861  	percpu_up_read(&scx_cgroup_rwsem);
3862  	return ops_sanitize_err("cgroup_prep_move", ret);
3863  }
3864  
scx_move_task(struct task_struct * p)3865  void scx_move_task(struct task_struct *p)
3866  {
3867  	if (!scx_cgroup_enabled)
3868  		return;
3869  
3870  	/*
3871  	 * We're called from sched_move_task() which handles both cgroup and
3872  	 * autogroup moves. Ignore the latter.
3873  	 *
3874  	 * Also ignore exiting tasks, because in the exit path tasks transition
3875  	 * from the autogroup to the root group, so task_group_is_autogroup()
3876  	 * alone isn't able to catch exiting autogroup tasks. This is safe for
3877  	 * cgroup_move(), because cgroup migrations never happen for PF_EXITING
3878  	 * tasks.
3879  	 */
3880  	if (task_group_is_autogroup(task_group(p)) || (p->flags & PF_EXITING))
3881  		return;
3882  
3883  	/*
3884  	 * @p must have ops.cgroup_prep_move() called on it and thus
3885  	 * cgrp_moving_from set.
3886  	 */
3887  	if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from))
3888  		SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p,
3889  			p->scx.cgrp_moving_from, tg_cgrp(task_group(p)));
3890  	p->scx.cgrp_moving_from = NULL;
3891  }
3892  
scx_cgroup_finish_attach(void)3893  void scx_cgroup_finish_attach(void)
3894  {
3895  	percpu_up_read(&scx_cgroup_rwsem);
3896  }
3897  
scx_cgroup_cancel_attach(struct cgroup_taskset * tset)3898  void scx_cgroup_cancel_attach(struct cgroup_taskset *tset)
3899  {
3900  	struct cgroup_subsys_state *css;
3901  	struct task_struct *p;
3902  
3903  	if (!scx_cgroup_enabled)
3904  		goto out_unlock;
3905  
3906  	cgroup_taskset_for_each(p, css, tset) {
3907  		if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from)
3908  			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p,
3909  				    p->scx.cgrp_moving_from, css->cgroup);
3910  		p->scx.cgrp_moving_from = NULL;
3911  	}
3912  out_unlock:
3913  	percpu_up_read(&scx_cgroup_rwsem);
3914  }
3915  
scx_group_set_weight(struct task_group * tg,unsigned long weight)3916  void scx_group_set_weight(struct task_group *tg, unsigned long weight)
3917  {
3918  	percpu_down_read(&scx_cgroup_rwsem);
3919  
3920  	if (scx_cgroup_enabled && tg->scx_weight != weight) {
3921  		if (SCX_HAS_OP(cgroup_set_weight))
3922  			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight,
3923  				    tg_cgrp(tg), weight);
3924  		tg->scx_weight = weight;
3925  	}
3926  
3927  	percpu_up_read(&scx_cgroup_rwsem);
3928  }
3929  
scx_group_set_idle(struct task_group * tg,bool idle)3930  void scx_group_set_idle(struct task_group *tg, bool idle)
3931  {
3932  	percpu_down_read(&scx_cgroup_rwsem);
3933  	scx_cgroup_warn_missing_idle(tg);
3934  	percpu_up_read(&scx_cgroup_rwsem);
3935  }
3936  
scx_cgroup_lock(void)3937  static void scx_cgroup_lock(void)
3938  {
3939  	percpu_down_write(&scx_cgroup_rwsem);
3940  }
3941  
scx_cgroup_unlock(void)3942  static void scx_cgroup_unlock(void)
3943  {
3944  	percpu_up_write(&scx_cgroup_rwsem);
3945  }
3946  
3947  #else	/* CONFIG_EXT_GROUP_SCHED */
3948  
scx_cgroup_lock(void)3949  static inline void scx_cgroup_lock(void) {}
scx_cgroup_unlock(void)3950  static inline void scx_cgroup_unlock(void) {}
3951  
3952  #endif	/* CONFIG_EXT_GROUP_SCHED */
3953  
3954  /*
3955   * Omitted operations:
3956   *
3957   * - wakeup_preempt: NOOP as it isn't useful in the wakeup path because the task
3958   *   isn't tied to the CPU at that point. Preemption is implemented by resetting
3959   *   the victim task's slice to 0 and triggering reschedule on the target CPU.
3960   *
3961   * - migrate_task_rq: Unnecessary as task to cpu mapping is transient.
3962   *
3963   * - task_fork/dead: We need fork/dead notifications for all tasks regardless of
3964   *   their current sched_class. Call them directly from sched core instead.
3965   */
3966  DEFINE_SCHED_CLASS(ext) = {
3967  	.enqueue_task		= enqueue_task_scx,
3968  	.dequeue_task		= dequeue_task_scx,
3969  	.yield_task		= yield_task_scx,
3970  	.yield_to_task		= yield_to_task_scx,
3971  
3972  	.wakeup_preempt		= wakeup_preempt_scx,
3973  
3974  	.balance		= balance_scx,
3975  	.pick_task		= pick_task_scx,
3976  
3977  	.put_prev_task		= put_prev_task_scx,
3978  	.set_next_task		= set_next_task_scx,
3979  
3980  #ifdef CONFIG_SMP
3981  	.select_task_rq		= select_task_rq_scx,
3982  	.task_woken		= task_woken_scx,
3983  	.set_cpus_allowed	= set_cpus_allowed_scx,
3984  
3985  	.rq_online		= rq_online_scx,
3986  	.rq_offline		= rq_offline_scx,
3987  #endif
3988  
3989  	.task_tick		= task_tick_scx,
3990  
3991  	.switching_to		= switching_to_scx,
3992  	.switched_from		= switched_from_scx,
3993  	.switched_to		= switched_to_scx,
3994  	.reweight_task		= reweight_task_scx,
3995  	.prio_changed		= prio_changed_scx,
3996  
3997  	.update_curr		= update_curr_scx,
3998  
3999  #ifdef CONFIG_UCLAMP_TASK
4000  	.uclamp_enabled		= 1,
4001  #endif
4002  };
4003  
init_dsq(struct scx_dispatch_q * dsq,u64 dsq_id)4004  static void init_dsq(struct scx_dispatch_q *dsq, u64 dsq_id)
4005  {
4006  	memset(dsq, 0, sizeof(*dsq));
4007  
4008  	raw_spin_lock_init(&dsq->lock);
4009  	INIT_LIST_HEAD(&dsq->list);
4010  	dsq->id = dsq_id;
4011  }
4012  
create_dsq(u64 dsq_id,int node)4013  static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
4014  {
4015  	struct scx_dispatch_q *dsq;
4016  	int ret;
4017  
4018  	if (dsq_id & SCX_DSQ_FLAG_BUILTIN)
4019  		return ERR_PTR(-EINVAL);
4020  
4021  	dsq = kmalloc_node(sizeof(*dsq), GFP_KERNEL, node);
4022  	if (!dsq)
4023  		return ERR_PTR(-ENOMEM);
4024  
4025  	init_dsq(dsq, dsq_id);
4026  
4027  	ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
4028  				     dsq_hash_params);
4029  	if (ret) {
4030  		kfree(dsq);
4031  		return ERR_PTR(ret);
4032  	}
4033  	return dsq;
4034  }
4035  
free_dsq_irq_workfn(struct irq_work * irq_work)4036  static void free_dsq_irq_workfn(struct irq_work *irq_work)
4037  {
4038  	struct llist_node *to_free = llist_del_all(&dsqs_to_free);
4039  	struct scx_dispatch_q *dsq, *tmp_dsq;
4040  
4041  	llist_for_each_entry_safe(dsq, tmp_dsq, to_free, free_node)
4042  		kfree_rcu(dsq, rcu);
4043  }
4044  
4045  static DEFINE_IRQ_WORK(free_dsq_irq_work, free_dsq_irq_workfn);
4046  
destroy_dsq(u64 dsq_id)4047  static void destroy_dsq(u64 dsq_id)
4048  {
4049  	struct scx_dispatch_q *dsq;
4050  	unsigned long flags;
4051  
4052  	rcu_read_lock();
4053  
4054  	dsq = find_user_dsq(dsq_id);
4055  	if (!dsq)
4056  		goto out_unlock_rcu;
4057  
4058  	raw_spin_lock_irqsave(&dsq->lock, flags);
4059  
4060  	if (dsq->nr) {
4061  		scx_ops_error("attempting to destroy in-use dsq 0x%016llx (nr=%u)",
4062  			      dsq->id, dsq->nr);
4063  		goto out_unlock_dsq;
4064  	}
4065  
4066  	if (rhashtable_remove_fast(&dsq_hash, &dsq->hash_node, dsq_hash_params))
4067  		goto out_unlock_dsq;
4068  
4069  	/*
4070  	 * Mark dead by invalidating ->id to prevent dispatch_enqueue() from
4071  	 * queueing more tasks. As this function can be called from anywhere,
4072  	 * freeing is bounced through an irq work to avoid nesting RCU
4073  	 * operations inside scheduler locks.
4074  	 */
4075  	dsq->id = SCX_DSQ_INVALID;
4076  	llist_add(&dsq->free_node, &dsqs_to_free);
4077  	irq_work_queue(&free_dsq_irq_work);
4078  
4079  out_unlock_dsq:
4080  	raw_spin_unlock_irqrestore(&dsq->lock, flags);
4081  out_unlock_rcu:
4082  	rcu_read_unlock();
4083  }
4084  
4085  #ifdef CONFIG_EXT_GROUP_SCHED
scx_cgroup_exit(void)4086  static void scx_cgroup_exit(void)
4087  {
4088  	struct cgroup_subsys_state *css;
4089  
4090  	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4091  
4092  	scx_cgroup_enabled = false;
4093  
4094  	/*
4095  	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
4096  	 * cgroups and exit all the inited ones, all online cgroups are exited.
4097  	 */
4098  	rcu_read_lock();
4099  	css_for_each_descendant_post(css, &root_task_group.css) {
4100  		struct task_group *tg = css_tg(css);
4101  
4102  		if (!(tg->scx_flags & SCX_TG_INITED))
4103  			continue;
4104  		tg->scx_flags &= ~SCX_TG_INITED;
4105  
4106  		if (!scx_ops.cgroup_exit)
4107  			continue;
4108  
4109  		if (WARN_ON_ONCE(!css_tryget(css)))
4110  			continue;
4111  		rcu_read_unlock();
4112  
4113  		SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_exit, css->cgroup);
4114  
4115  		rcu_read_lock();
4116  		css_put(css);
4117  	}
4118  	rcu_read_unlock();
4119  }
4120  
scx_cgroup_init(void)4121  static int scx_cgroup_init(void)
4122  {
4123  	struct cgroup_subsys_state *css;
4124  	int ret;
4125  
4126  	percpu_rwsem_assert_held(&scx_cgroup_rwsem);
4127  
4128  	cgroup_warned_missing_weight = false;
4129  	cgroup_warned_missing_idle = false;
4130  
4131  	/*
4132  	 * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk
4133  	 * cgroups and init, all online cgroups are initialized.
4134  	 */
4135  	rcu_read_lock();
4136  	css_for_each_descendant_pre(css, &root_task_group.css) {
4137  		struct task_group *tg = css_tg(css);
4138  		struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
4139  
4140  		scx_cgroup_warn_missing_weight(tg);
4141  		scx_cgroup_warn_missing_idle(tg);
4142  
4143  		if ((tg->scx_flags &
4144  		     (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
4145  			continue;
4146  
4147  		if (!scx_ops.cgroup_init) {
4148  			tg->scx_flags |= SCX_TG_INITED;
4149  			continue;
4150  		}
4151  
4152  		if (WARN_ON_ONCE(!css_tryget(css)))
4153  			continue;
4154  		rcu_read_unlock();
4155  
4156  		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
4157  				      css->cgroup, &args);
4158  		if (ret) {
4159  			css_put(css);
4160  			scx_ops_error("ops.cgroup_init() failed (%d)", ret);
4161  			return ret;
4162  		}
4163  		tg->scx_flags |= SCX_TG_INITED;
4164  
4165  		rcu_read_lock();
4166  		css_put(css);
4167  	}
4168  	rcu_read_unlock();
4169  
4170  	WARN_ON_ONCE(scx_cgroup_enabled);
4171  	scx_cgroup_enabled = true;
4172  
4173  	return 0;
4174  }
4175  
4176  #else
scx_cgroup_exit(void)4177  static void scx_cgroup_exit(void) {}
scx_cgroup_init(void)4178  static int scx_cgroup_init(void) { return 0; }
4179  #endif
4180  
4181  
4182  /********************************************************************************
4183   * Sysfs interface and ops enable/disable.
4184   */
4185  
4186  #define SCX_ATTR(_name)								\
4187  	static struct kobj_attribute scx_attr_##_name = {			\
4188  		.attr = { .name = __stringify(_name), .mode = 0444 },		\
4189  		.show = scx_attr_##_name##_show,				\
4190  	}
4191  
scx_attr_state_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4192  static ssize_t scx_attr_state_show(struct kobject *kobj,
4193  				   struct kobj_attribute *ka, char *buf)
4194  {
4195  	return sysfs_emit(buf, "%s\n",
4196  			  scx_ops_enable_state_str[scx_ops_enable_state()]);
4197  }
4198  SCX_ATTR(state);
4199  
scx_attr_switch_all_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4200  static ssize_t scx_attr_switch_all_show(struct kobject *kobj,
4201  					struct kobj_attribute *ka, char *buf)
4202  {
4203  	return sysfs_emit(buf, "%d\n", READ_ONCE(scx_switching_all));
4204  }
4205  SCX_ATTR(switch_all);
4206  
scx_attr_nr_rejected_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4207  static ssize_t scx_attr_nr_rejected_show(struct kobject *kobj,
4208  					 struct kobj_attribute *ka, char *buf)
4209  {
4210  	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_nr_rejected));
4211  }
4212  SCX_ATTR(nr_rejected);
4213  
scx_attr_hotplug_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4214  static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4215  					 struct kobj_attribute *ka, char *buf)
4216  {
4217  	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4218  }
4219  SCX_ATTR(hotplug_seq);
4220  
scx_attr_enable_seq_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4221  static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4222  					struct kobj_attribute *ka, char *buf)
4223  {
4224  	return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4225  }
4226  SCX_ATTR(enable_seq);
4227  
4228  static struct attribute *scx_global_attrs[] = {
4229  	&scx_attr_state.attr,
4230  	&scx_attr_switch_all.attr,
4231  	&scx_attr_nr_rejected.attr,
4232  	&scx_attr_hotplug_seq.attr,
4233  	&scx_attr_enable_seq.attr,
4234  	NULL,
4235  };
4236  
4237  static const struct attribute_group scx_global_attr_group = {
4238  	.attrs = scx_global_attrs,
4239  };
4240  
scx_kobj_release(struct kobject * kobj)4241  static void scx_kobj_release(struct kobject *kobj)
4242  {
4243  	kfree(kobj);
4244  }
4245  
scx_attr_ops_show(struct kobject * kobj,struct kobj_attribute * ka,char * buf)4246  static ssize_t scx_attr_ops_show(struct kobject *kobj,
4247  				 struct kobj_attribute *ka, char *buf)
4248  {
4249  	return sysfs_emit(buf, "%s\n", scx_ops.name);
4250  }
4251  SCX_ATTR(ops);
4252  
4253  static struct attribute *scx_sched_attrs[] = {
4254  	&scx_attr_ops.attr,
4255  	NULL,
4256  };
4257  ATTRIBUTE_GROUPS(scx_sched);
4258  
4259  static const struct kobj_type scx_ktype = {
4260  	.release = scx_kobj_release,
4261  	.sysfs_ops = &kobj_sysfs_ops,
4262  	.default_groups = scx_sched_groups,
4263  };
4264  
scx_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)4265  static int scx_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
4266  {
4267  	return add_uevent_var(env, "SCXOPS=%s", scx_ops.name);
4268  }
4269  
4270  static const struct kset_uevent_ops scx_uevent_ops = {
4271  	.uevent = scx_uevent,
4272  };
4273  
4274  /*
4275   * Used by sched_fork() and __setscheduler_prio() to pick the matching
4276   * sched_class. dl/rt are already handled.
4277   */
task_should_scx(int policy)4278  bool task_should_scx(int policy)
4279  {
4280  	if (!scx_enabled() ||
4281  	    unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
4282  		return false;
4283  	if (READ_ONCE(scx_switching_all))
4284  		return true;
4285  	return policy == SCHED_EXT;
4286  }
4287  
4288  /**
4289   * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress
4290   *
4291   * Bypassing guarantees that all runnable tasks make forward progress without
4292   * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might
4293   * be held by tasks that the BPF scheduler is forgetting to run, which
4294   * unfortunately also excludes toggling the static branches.
4295   *
4296   * Let's work around by overriding a couple ops and modifying behaviors based on
4297   * the DISABLING state and then cycling the queued tasks through dequeue/enqueue
4298   * to force global FIFO scheduling.
4299   *
4300   * - ops.select_cpu() is ignored and the default select_cpu() is used.
4301   *
4302   * - ops.enqueue() is ignored and tasks are queued in simple global FIFO order.
4303   *   %SCX_OPS_ENQ_LAST is also ignored.
4304   *
4305   * - ops.dispatch() is ignored.
4306   *
4307   * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
4308   *   can't be trusted. Whenever a tick triggers, the running task is rotated to
4309   *   the tail of the queue with core_sched_at touched.
4310   *
4311   * - pick_next_task() suppresses zero slice warning.
4312   *
4313   * - scx_bpf_kick_cpu() is disabled to avoid irq_work malfunction during PM
4314   *   operations.
4315   *
4316   * - scx_prio_less() reverts to the default core_sched_at order.
4317   */
scx_ops_bypass(bool bypass)4318  static void scx_ops_bypass(bool bypass)
4319  {
4320  	int cpu;
4321  	unsigned long flags;
4322  
4323  	raw_spin_lock_irqsave(&__scx_ops_bypass_lock, flags);
4324  	if (bypass) {
4325  		scx_ops_bypass_depth++;
4326  		WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
4327  		if (scx_ops_bypass_depth != 1)
4328  			goto unlock;
4329  	} else {
4330  		scx_ops_bypass_depth--;
4331  		WARN_ON_ONCE(scx_ops_bypass_depth < 0);
4332  		if (scx_ops_bypass_depth != 0)
4333  			goto unlock;
4334  	}
4335  
4336  	/*
4337  	 * No task property is changing. We just need to make sure all currently
4338  	 * queued tasks are re-queued according to the new scx_rq_bypassing()
4339  	 * state. As an optimization, walk each rq's runnable_list instead of
4340  	 * the scx_tasks list.
4341  	 *
4342  	 * This function can't trust the scheduler and thus can't use
4343  	 * cpus_read_lock(). Walk all possible CPUs instead of online.
4344  	 */
4345  	for_each_possible_cpu(cpu) {
4346  		struct rq *rq = cpu_rq(cpu);
4347  		struct rq_flags rf;
4348  		struct task_struct *p, *n;
4349  
4350  		rq_lock(rq, &rf);
4351  
4352  		if (bypass) {
4353  			WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
4354  			rq->scx.flags |= SCX_RQ_BYPASSING;
4355  		} else {
4356  			WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
4357  			rq->scx.flags &= ~SCX_RQ_BYPASSING;
4358  		}
4359  
4360  		/*
4361  		 * We need to guarantee that no tasks are on the BPF scheduler
4362  		 * while bypassing. Either we see enabled or the enable path
4363  		 * sees scx_rq_bypassing() before moving tasks to SCX.
4364  		 */
4365  		if (!scx_enabled()) {
4366  			rq_unlock_irqrestore(rq, &rf);
4367  			continue;
4368  		}
4369  
4370  		/*
4371  		 * The use of list_for_each_entry_safe_reverse() is required
4372  		 * because each task is going to be removed from and added back
4373  		 * to the runnable_list during iteration. Because they're added
4374  		 * to the tail of the list, safe reverse iteration can still
4375  		 * visit all nodes.
4376  		 */
4377  		list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
4378  						 scx.runnable_node) {
4379  			struct sched_enq_and_set_ctx ctx;
4380  
4381  			/* cycling deq/enq is enough, see the function comment */
4382  			sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4383  			sched_enq_and_set_task(&ctx);
4384  		}
4385  
4386  		rq_unlock(rq, &rf);
4387  
4388  		/* resched to restore ticks and idle state */
4389  		resched_cpu(cpu);
4390  	}
4391  unlock:
4392  	raw_spin_unlock_irqrestore(&__scx_ops_bypass_lock, flags);
4393  }
4394  
free_exit_info(struct scx_exit_info * ei)4395  static void free_exit_info(struct scx_exit_info *ei)
4396  {
4397  	kfree(ei->dump);
4398  	kfree(ei->msg);
4399  	kfree(ei->bt);
4400  	kfree(ei);
4401  }
4402  
alloc_exit_info(size_t exit_dump_len)4403  static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
4404  {
4405  	struct scx_exit_info *ei;
4406  
4407  	ei = kzalloc(sizeof(*ei), GFP_KERNEL);
4408  	if (!ei)
4409  		return NULL;
4410  
4411  	ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
4412  	ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
4413  	ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
4414  
4415  	if (!ei->bt || !ei->msg || !ei->dump) {
4416  		free_exit_info(ei);
4417  		return NULL;
4418  	}
4419  
4420  	return ei;
4421  }
4422  
scx_exit_reason(enum scx_exit_kind kind)4423  static const char *scx_exit_reason(enum scx_exit_kind kind)
4424  {
4425  	switch (kind) {
4426  	case SCX_EXIT_UNREG:
4427  		return "unregistered from user space";
4428  	case SCX_EXIT_UNREG_BPF:
4429  		return "unregistered from BPF";
4430  	case SCX_EXIT_UNREG_KERN:
4431  		return "unregistered from the main kernel";
4432  	case SCX_EXIT_SYSRQ:
4433  		return "disabled by sysrq-S";
4434  	case SCX_EXIT_ERROR:
4435  		return "runtime error";
4436  	case SCX_EXIT_ERROR_BPF:
4437  		return "scx_bpf_error";
4438  	case SCX_EXIT_ERROR_STALL:
4439  		return "runnable task stall";
4440  	default:
4441  		return "<UNKNOWN>";
4442  	}
4443  }
4444  
scx_ops_disable_workfn(struct kthread_work * work)4445  static void scx_ops_disable_workfn(struct kthread_work *work)
4446  {
4447  	struct scx_exit_info *ei = scx_exit_info;
4448  	struct scx_task_iter sti;
4449  	struct task_struct *p;
4450  	struct rhashtable_iter rht_iter;
4451  	struct scx_dispatch_q *dsq;
4452  	int i, kind;
4453  
4454  	kind = atomic_read(&scx_exit_kind);
4455  	while (true) {
4456  		/*
4457  		 * NONE indicates that a new scx_ops has been registered since
4458  		 * disable was scheduled - don't kill the new ops. DONE
4459  		 * indicates that the ops has already been disabled.
4460  		 */
4461  		if (kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)
4462  			return;
4463  		if (atomic_try_cmpxchg(&scx_exit_kind, &kind, SCX_EXIT_DONE))
4464  			break;
4465  	}
4466  	ei->kind = kind;
4467  	ei->reason = scx_exit_reason(ei->kind);
4468  
4469  	/* guarantee forward progress by bypassing scx_ops */
4470  	scx_ops_bypass(true);
4471  
4472  	switch (scx_ops_set_enable_state(SCX_OPS_DISABLING)) {
4473  	case SCX_OPS_DISABLING:
4474  		WARN_ONCE(true, "sched_ext: duplicate disabling instance?");
4475  		break;
4476  	case SCX_OPS_DISABLED:
4477  		pr_warn("sched_ext: ops error detected without ops (%s)\n",
4478  			scx_exit_info->msg);
4479  		WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4480  			     SCX_OPS_DISABLING);
4481  		goto done;
4482  	default:
4483  		break;
4484  	}
4485  
4486  	/*
4487  	 * Here, every runnable task is guaranteed to make forward progress and
4488  	 * we can safely use blocking synchronization constructs. Actually
4489  	 * disable ops.
4490  	 */
4491  	mutex_lock(&scx_ops_enable_mutex);
4492  
4493  	static_branch_disable(&__scx_switched_all);
4494  	WRITE_ONCE(scx_switching_all, false);
4495  
4496  	/*
4497  	 * Shut down cgroup support before tasks so that the cgroup attach path
4498  	 * doesn't race against scx_ops_exit_task().
4499  	 */
4500  	scx_cgroup_lock();
4501  	scx_cgroup_exit();
4502  	scx_cgroup_unlock();
4503  
4504  	/*
4505  	 * The BPF scheduler is going away. All tasks including %TASK_DEAD ones
4506  	 * must be switched out and exited synchronously.
4507  	 */
4508  	percpu_down_write(&scx_fork_rwsem);
4509  
4510  	scx_ops_init_task_enabled = false;
4511  
4512  	scx_task_iter_start(&sti);
4513  	while ((p = scx_task_iter_next_locked(&sti))) {
4514  		const struct sched_class *old_class = p->sched_class;
4515  		const struct sched_class *new_class =
4516  			__setscheduler_class(p->policy, p->prio);
4517  		struct sched_enq_and_set_ctx ctx;
4518  
4519  		if (old_class != new_class && p->se.sched_delayed)
4520  			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
4521  
4522  		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
4523  
4524  		p->sched_class = new_class;
4525  		check_class_changing(task_rq(p), p, old_class);
4526  
4527  		sched_enq_and_set_task(&ctx);
4528  
4529  		check_class_changed(task_rq(p), p, old_class, p->prio);
4530  		scx_ops_exit_task(p);
4531  	}
4532  	scx_task_iter_stop(&sti);
4533  	percpu_up_write(&scx_fork_rwsem);
4534  
4535  	/* no task is on scx, turn off all the switches and flush in-progress calls */
4536  	static_branch_disable(&__scx_ops_enabled);
4537  	for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4538  		static_branch_disable(&scx_has_op[i]);
4539  	static_branch_disable(&scx_ops_enq_last);
4540  	static_branch_disable(&scx_ops_enq_exiting);
4541  	static_branch_disable(&scx_ops_cpu_preempt);
4542  	static_branch_disable(&scx_builtin_idle_enabled);
4543  	synchronize_rcu();
4544  
4545  	if (ei->kind >= SCX_EXIT_ERROR) {
4546  		pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4547  		       scx_ops.name, ei->reason);
4548  
4549  		if (ei->msg[0] != '\0')
4550  			pr_err("sched_ext: %s: %s\n", scx_ops.name, ei->msg);
4551  #ifdef CONFIG_STACKTRACE
4552  		stack_trace_print(ei->bt, ei->bt_len, 2);
4553  #endif
4554  	} else {
4555  		pr_info("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
4556  			scx_ops.name, ei->reason);
4557  	}
4558  
4559  	if (scx_ops.exit)
4560  		SCX_CALL_OP(SCX_KF_UNLOCKED, exit, ei);
4561  
4562  	cancel_delayed_work_sync(&scx_watchdog_work);
4563  
4564  	/*
4565  	 * Delete the kobject from the hierarchy eagerly in addition to just
4566  	 * dropping a reference. Otherwise, if the object is deleted
4567  	 * asynchronously, sysfs could observe an object of the same name still
4568  	 * in the hierarchy when another scheduler is loaded.
4569  	 */
4570  	kobject_del(scx_root_kobj);
4571  	kobject_put(scx_root_kobj);
4572  	scx_root_kobj = NULL;
4573  
4574  	memset(&scx_ops, 0, sizeof(scx_ops));
4575  
4576  	rhashtable_walk_enter(&dsq_hash, &rht_iter);
4577  	do {
4578  		rhashtable_walk_start(&rht_iter);
4579  
4580  		while ((dsq = rhashtable_walk_next(&rht_iter)) && !IS_ERR(dsq))
4581  			destroy_dsq(dsq->id);
4582  
4583  		rhashtable_walk_stop(&rht_iter);
4584  	} while (dsq == ERR_PTR(-EAGAIN));
4585  	rhashtable_walk_exit(&rht_iter);
4586  
4587  	free_percpu(scx_dsp_ctx);
4588  	scx_dsp_ctx = NULL;
4589  	scx_dsp_max_batch = 0;
4590  
4591  	free_exit_info(scx_exit_info);
4592  	scx_exit_info = NULL;
4593  
4594  	mutex_unlock(&scx_ops_enable_mutex);
4595  
4596  	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) !=
4597  		     SCX_OPS_DISABLING);
4598  done:
4599  	scx_ops_bypass(false);
4600  }
4601  
4602  static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn);
4603  
schedule_scx_ops_disable_work(void)4604  static void schedule_scx_ops_disable_work(void)
4605  {
4606  	struct kthread_worker *helper = READ_ONCE(scx_ops_helper);
4607  
4608  	/*
4609  	 * We may be called spuriously before the first bpf_sched_ext_reg(). If
4610  	 * scx_ops_helper isn't set up yet, there's nothing to do.
4611  	 */
4612  	if (helper)
4613  		kthread_queue_work(helper, &scx_ops_disable_work);
4614  }
4615  
scx_ops_disable(enum scx_exit_kind kind)4616  static void scx_ops_disable(enum scx_exit_kind kind)
4617  {
4618  	int none = SCX_EXIT_NONE;
4619  
4620  	if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
4621  		kind = SCX_EXIT_ERROR;
4622  
4623  	atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
4624  
4625  	schedule_scx_ops_disable_work();
4626  }
4627  
dump_newline(struct seq_buf * s)4628  static void dump_newline(struct seq_buf *s)
4629  {
4630  	trace_sched_ext_dump("");
4631  
4632  	/* @s may be zero sized and seq_buf triggers WARN if so */
4633  	if (s->size)
4634  		seq_buf_putc(s, '\n');
4635  }
4636  
dump_line(struct seq_buf * s,const char * fmt,...)4637  static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
4638  {
4639  	va_list args;
4640  
4641  #ifdef CONFIG_TRACEPOINTS
4642  	if (trace_sched_ext_dump_enabled()) {
4643  		/* protected by scx_dump_state()::dump_lock */
4644  		static char line_buf[SCX_EXIT_MSG_LEN];
4645  
4646  		va_start(args, fmt);
4647  		vscnprintf(line_buf, sizeof(line_buf), fmt, args);
4648  		va_end(args);
4649  
4650  		trace_sched_ext_dump(line_buf);
4651  	}
4652  #endif
4653  	/* @s may be zero sized and seq_buf triggers WARN if so */
4654  	if (s->size) {
4655  		va_start(args, fmt);
4656  		seq_buf_vprintf(s, fmt, args);
4657  		va_end(args);
4658  
4659  		seq_buf_putc(s, '\n');
4660  	}
4661  }
4662  
dump_stack_trace(struct seq_buf * s,const char * prefix,const unsigned long * bt,unsigned int len)4663  static void dump_stack_trace(struct seq_buf *s, const char *prefix,
4664  			     const unsigned long *bt, unsigned int len)
4665  {
4666  	unsigned int i;
4667  
4668  	for (i = 0; i < len; i++)
4669  		dump_line(s, "%s%pS", prefix, (void *)bt[i]);
4670  }
4671  
ops_dump_init(struct seq_buf * s,const char * prefix)4672  static void ops_dump_init(struct seq_buf *s, const char *prefix)
4673  {
4674  	struct scx_dump_data *dd = &scx_dump_data;
4675  
4676  	lockdep_assert_irqs_disabled();
4677  
4678  	dd->cpu = smp_processor_id();		/* allow scx_bpf_dump() */
4679  	dd->first = true;
4680  	dd->cursor = 0;
4681  	dd->s = s;
4682  	dd->prefix = prefix;
4683  }
4684  
ops_dump_flush(void)4685  static void ops_dump_flush(void)
4686  {
4687  	struct scx_dump_data *dd = &scx_dump_data;
4688  	char *line = dd->buf.line;
4689  
4690  	if (!dd->cursor)
4691  		return;
4692  
4693  	/*
4694  	 * There's something to flush and this is the first line. Insert a blank
4695  	 * line to distinguish ops dump.
4696  	 */
4697  	if (dd->first) {
4698  		dump_newline(dd->s);
4699  		dd->first = false;
4700  	}
4701  
4702  	/*
4703  	 * There may be multiple lines in $line. Scan and emit each line
4704  	 * separately.
4705  	 */
4706  	while (true) {
4707  		char *end = line;
4708  		char c;
4709  
4710  		while (*end != '\n' && *end != '\0')
4711  			end++;
4712  
4713  		/*
4714  		 * If $line overflowed, it may not have newline at the end.
4715  		 * Always emit with a newline.
4716  		 */
4717  		c = *end;
4718  		*end = '\0';
4719  		dump_line(dd->s, "%s%s", dd->prefix, line);
4720  		if (c == '\0')
4721  			break;
4722  
4723  		/* move to the next line */
4724  		end++;
4725  		if (*end == '\0')
4726  			break;
4727  		line = end;
4728  	}
4729  
4730  	dd->cursor = 0;
4731  }
4732  
ops_dump_exit(void)4733  static void ops_dump_exit(void)
4734  {
4735  	ops_dump_flush();
4736  	scx_dump_data.cpu = -1;
4737  }
4738  
scx_dump_task(struct seq_buf * s,struct scx_dump_ctx * dctx,struct task_struct * p,char marker)4739  static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
4740  			  struct task_struct *p, char marker)
4741  {
4742  	static unsigned long bt[SCX_EXIT_BT_LEN];
4743  	char dsq_id_buf[19] = "(n/a)";
4744  	unsigned long ops_state = atomic_long_read(&p->scx.ops_state);
4745  	unsigned int bt_len = 0;
4746  
4747  	if (p->scx.dsq)
4748  		scnprintf(dsq_id_buf, sizeof(dsq_id_buf), "0x%llx",
4749  			  (unsigned long long)p->scx.dsq->id);
4750  
4751  	dump_newline(s);
4752  	dump_line(s, " %c%c %s[%d] %+ldms",
4753  		  marker, task_state_to_char(p), p->comm, p->pid,
4754  		  jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies));
4755  	dump_line(s, "      scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu",
4756  		  scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
4757  		  p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
4758  		  ops_state >> SCX_OPSS_QSEQ_SHIFT);
4759  	dump_line(s, "      sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu",
4760  		  p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf,
4761  		  p->scx.dsq_vtime);
4762  	dump_line(s, "      cpus=%*pb", cpumask_pr_args(p->cpus_ptr));
4763  
4764  	if (SCX_HAS_OP(dump_task)) {
4765  		ops_dump_init(s, "    ");
4766  		SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p);
4767  		ops_dump_exit();
4768  	}
4769  
4770  #ifdef CONFIG_STACKTRACE
4771  	bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1);
4772  #endif
4773  	if (bt_len) {
4774  		dump_newline(s);
4775  		dump_stack_trace(s, "    ", bt, bt_len);
4776  	}
4777  }
4778  
scx_dump_state(struct scx_exit_info * ei,size_t dump_len)4779  static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
4780  {
4781  	static DEFINE_SPINLOCK(dump_lock);
4782  	static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
4783  	struct scx_dump_ctx dctx = {
4784  		.kind = ei->kind,
4785  		.exit_code = ei->exit_code,
4786  		.reason = ei->reason,
4787  		.at_ns = ktime_get_ns(),
4788  		.at_jiffies = jiffies,
4789  	};
4790  	struct seq_buf s;
4791  	unsigned long flags;
4792  	char *buf;
4793  	int cpu;
4794  
4795  	spin_lock_irqsave(&dump_lock, flags);
4796  
4797  	seq_buf_init(&s, ei->dump, dump_len);
4798  
4799  	if (ei->kind == SCX_EXIT_NONE) {
4800  		dump_line(&s, "Debug dump triggered by %s", ei->reason);
4801  	} else {
4802  		dump_line(&s, "%s[%d] triggered exit kind %d:",
4803  			  current->comm, current->pid, ei->kind);
4804  		dump_line(&s, "  %s (%s)", ei->reason, ei->msg);
4805  		dump_newline(&s);
4806  		dump_line(&s, "Backtrace:");
4807  		dump_stack_trace(&s, "  ", ei->bt, ei->bt_len);
4808  	}
4809  
4810  	if (SCX_HAS_OP(dump)) {
4811  		ops_dump_init(&s, "");
4812  		SCX_CALL_OP(SCX_KF_UNLOCKED, dump, &dctx);
4813  		ops_dump_exit();
4814  	}
4815  
4816  	dump_newline(&s);
4817  	dump_line(&s, "CPU states");
4818  	dump_line(&s, "----------");
4819  
4820  	for_each_possible_cpu(cpu) {
4821  		struct rq *rq = cpu_rq(cpu);
4822  		struct rq_flags rf;
4823  		struct task_struct *p;
4824  		struct seq_buf ns;
4825  		size_t avail, used;
4826  		bool idle;
4827  
4828  		rq_lock(rq, &rf);
4829  
4830  		idle = list_empty(&rq->scx.runnable_list) &&
4831  			rq->curr->sched_class == &idle_sched_class;
4832  
4833  		if (idle && !SCX_HAS_OP(dump_cpu))
4834  			goto next;
4835  
4836  		/*
4837  		 * We don't yet know whether ops.dump_cpu() will produce output
4838  		 * and we may want to skip the default CPU dump if it doesn't.
4839  		 * Use a nested seq_buf to generate the standard dump so that we
4840  		 * can decide whether to commit later.
4841  		 */
4842  		avail = seq_buf_get_buf(&s, &buf);
4843  		seq_buf_init(&ns, buf, avail);
4844  
4845  		dump_newline(&ns);
4846  		dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu",
4847  			  cpu, rq->scx.nr_running, rq->scx.flags,
4848  			  rq->scx.cpu_released, rq->scx.ops_qseq,
4849  			  rq->scx.pnt_seq);
4850  		dump_line(&ns, "          curr=%s[%d] class=%ps",
4851  			  rq->curr->comm, rq->curr->pid,
4852  			  rq->curr->sched_class);
4853  		if (!cpumask_empty(rq->scx.cpus_to_kick))
4854  			dump_line(&ns, "  cpus_to_kick   : %*pb",
4855  				  cpumask_pr_args(rq->scx.cpus_to_kick));
4856  		if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
4857  			dump_line(&ns, "  idle_to_kick   : %*pb",
4858  				  cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
4859  		if (!cpumask_empty(rq->scx.cpus_to_preempt))
4860  			dump_line(&ns, "  cpus_to_preempt: %*pb",
4861  				  cpumask_pr_args(rq->scx.cpus_to_preempt));
4862  		if (!cpumask_empty(rq->scx.cpus_to_wait))
4863  			dump_line(&ns, "  cpus_to_wait   : %*pb",
4864  				  cpumask_pr_args(rq->scx.cpus_to_wait));
4865  
4866  		used = seq_buf_used(&ns);
4867  		if (SCX_HAS_OP(dump_cpu)) {
4868  			ops_dump_init(&ns, "  ");
4869  			SCX_CALL_OP(SCX_KF_REST, dump_cpu, &dctx, cpu, idle);
4870  			ops_dump_exit();
4871  		}
4872  
4873  		/*
4874  		 * If idle && nothing generated by ops.dump_cpu(), there's
4875  		 * nothing interesting. Skip.
4876  		 */
4877  		if (idle && used == seq_buf_used(&ns))
4878  			goto next;
4879  
4880  		/*
4881  		 * $s may already have overflowed when $ns was created. If so,
4882  		 * calling commit on it will trigger BUG.
4883  		 */
4884  		if (avail) {
4885  			seq_buf_commit(&s, seq_buf_used(&ns));
4886  			if (seq_buf_has_overflowed(&ns))
4887  				seq_buf_set_overflow(&s);
4888  		}
4889  
4890  		if (rq->curr->sched_class == &ext_sched_class)
4891  			scx_dump_task(&s, &dctx, rq->curr, '*');
4892  
4893  		list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
4894  			scx_dump_task(&s, &dctx, p, ' ');
4895  	next:
4896  		rq_unlock(rq, &rf);
4897  	}
4898  
4899  	if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
4900  		memcpy(ei->dump + dump_len - sizeof(trunc_marker),
4901  		       trunc_marker, sizeof(trunc_marker));
4902  
4903  	spin_unlock_irqrestore(&dump_lock, flags);
4904  }
4905  
scx_ops_error_irq_workfn(struct irq_work * irq_work)4906  static void scx_ops_error_irq_workfn(struct irq_work *irq_work)
4907  {
4908  	struct scx_exit_info *ei = scx_exit_info;
4909  
4910  	if (ei->kind >= SCX_EXIT_ERROR)
4911  		scx_dump_state(ei, scx_ops.exit_dump_len);
4912  
4913  	schedule_scx_ops_disable_work();
4914  }
4915  
4916  static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn);
4917  
scx_ops_exit_kind(enum scx_exit_kind kind,s64 exit_code,const char * fmt,...)4918  static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
4919  					     s64 exit_code,
4920  					     const char *fmt, ...)
4921  {
4922  	struct scx_exit_info *ei = scx_exit_info;
4923  	int none = SCX_EXIT_NONE;
4924  	va_list args;
4925  
4926  	if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
4927  		return;
4928  
4929  	ei->exit_code = exit_code;
4930  #ifdef CONFIG_STACKTRACE
4931  	if (kind >= SCX_EXIT_ERROR)
4932  		ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);
4933  #endif
4934  	va_start(args, fmt);
4935  	vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
4936  	va_end(args);
4937  
4938  	/*
4939  	 * Set ei->kind and ->reason for scx_dump_state(). They'll be set again
4940  	 * in scx_ops_disable_workfn().
4941  	 */
4942  	ei->kind = kind;
4943  	ei->reason = scx_exit_reason(ei->kind);
4944  
4945  	irq_work_queue(&scx_ops_error_irq_work);
4946  }
4947  
scx_create_rt_helper(const char * name)4948  static struct kthread_worker *scx_create_rt_helper(const char *name)
4949  {
4950  	struct kthread_worker *helper;
4951  
4952  	helper = kthread_create_worker(0, name);
4953  	if (helper)
4954  		sched_set_fifo(helper->task);
4955  	return helper;
4956  }
4957  
check_hotplug_seq(const struct sched_ext_ops * ops)4958  static void check_hotplug_seq(const struct sched_ext_ops *ops)
4959  {
4960  	unsigned long long global_hotplug_seq;
4961  
4962  	/*
4963  	 * If a hotplug event has occurred between when a scheduler was
4964  	 * initialized, and when we were able to attach, exit and notify user
4965  	 * space about it.
4966  	 */
4967  	if (ops->hotplug_seq) {
4968  		global_hotplug_seq = atomic_long_read(&scx_hotplug_seq);
4969  		if (ops->hotplug_seq != global_hotplug_seq) {
4970  			scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
4971  				     "expected hotplug seq %llu did not match actual %llu",
4972  				     ops->hotplug_seq, global_hotplug_seq);
4973  		}
4974  	}
4975  }
4976  
validate_ops(const struct sched_ext_ops * ops)4977  static int validate_ops(const struct sched_ext_ops *ops)
4978  {
4979  	/*
4980  	 * It doesn't make sense to specify the SCX_OPS_ENQ_LAST flag if the
4981  	 * ops.enqueue() callback isn't implemented.
4982  	 */
4983  	if ((ops->flags & SCX_OPS_ENQ_LAST) && !ops->enqueue) {
4984  		scx_ops_error("SCX_OPS_ENQ_LAST requires ops.enqueue() to be implemented");
4985  		return -EINVAL;
4986  	}
4987  
4988  	return 0;
4989  }
4990  
scx_ops_enable(struct sched_ext_ops * ops,struct bpf_link * link)4991  static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
4992  {
4993  	struct scx_task_iter sti;
4994  	struct task_struct *p;
4995  	unsigned long timeout;
4996  	int i, cpu, node, ret;
4997  
4998  	if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
4999  			   cpu_possible_mask)) {
5000  		pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
5001  		return -EINVAL;
5002  	}
5003  
5004  	mutex_lock(&scx_ops_enable_mutex);
5005  
5006  	if (!scx_ops_helper) {
5007  		WRITE_ONCE(scx_ops_helper,
5008  			   scx_create_rt_helper("sched_ext_ops_helper"));
5009  		if (!scx_ops_helper) {
5010  			ret = -ENOMEM;
5011  			goto err_unlock;
5012  		}
5013  	}
5014  
5015  	if (!global_dsqs) {
5016  		struct scx_dispatch_q **dsqs;
5017  
5018  		dsqs = kcalloc(nr_node_ids, sizeof(dsqs[0]), GFP_KERNEL);
5019  		if (!dsqs) {
5020  			ret = -ENOMEM;
5021  			goto err_unlock;
5022  		}
5023  
5024  		for_each_node_state(node, N_POSSIBLE) {
5025  			struct scx_dispatch_q *dsq;
5026  
5027  			dsq = kzalloc_node(sizeof(*dsq), GFP_KERNEL, node);
5028  			if (!dsq) {
5029  				for_each_node_state(node, N_POSSIBLE)
5030  					kfree(dsqs[node]);
5031  				kfree(dsqs);
5032  				ret = -ENOMEM;
5033  				goto err_unlock;
5034  			}
5035  
5036  			init_dsq(dsq, SCX_DSQ_GLOBAL);
5037  			dsqs[node] = dsq;
5038  		}
5039  
5040  		global_dsqs = dsqs;
5041  	}
5042  
5043  	if (scx_ops_enable_state() != SCX_OPS_DISABLED) {
5044  		ret = -EBUSY;
5045  		goto err_unlock;
5046  	}
5047  
5048  	scx_root_kobj = kzalloc(sizeof(*scx_root_kobj), GFP_KERNEL);
5049  	if (!scx_root_kobj) {
5050  		ret = -ENOMEM;
5051  		goto err_unlock;
5052  	}
5053  
5054  	scx_root_kobj->kset = scx_kset;
5055  	ret = kobject_init_and_add(scx_root_kobj, &scx_ktype, NULL, "root");
5056  	if (ret < 0)
5057  		goto err;
5058  
5059  	scx_exit_info = alloc_exit_info(ops->exit_dump_len);
5060  	if (!scx_exit_info) {
5061  		ret = -ENOMEM;
5062  		goto err_del;
5063  	}
5064  
5065  	/*
5066  	 * Set scx_ops, transition to ENABLING and clear exit info to arm the
5067  	 * disable path. Failure triggers full disabling from here on.
5068  	 */
5069  	scx_ops = *ops;
5070  
5071  	WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_ENABLING) !=
5072  		     SCX_OPS_DISABLED);
5073  
5074  	atomic_set(&scx_exit_kind, SCX_EXIT_NONE);
5075  	scx_warned_zero_slice = false;
5076  
5077  	atomic_long_set(&scx_nr_rejected, 0);
5078  
5079  	for_each_possible_cpu(cpu)
5080  		cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE;
5081  
5082  	/*
5083  	 * Keep CPUs stable during enable so that the BPF scheduler can track
5084  	 * online CPUs by watching ->on/offline_cpu() after ->init().
5085  	 */
5086  	cpus_read_lock();
5087  
5088  	if (scx_ops.init) {
5089  		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
5090  		if (ret) {
5091  			ret = ops_sanitize_err("init", ret);
5092  			cpus_read_unlock();
5093  			scx_ops_error("ops.init() failed (%d)", ret);
5094  			goto err_disable;
5095  		}
5096  	}
5097  
5098  	for (i = SCX_OPI_CPU_HOTPLUG_BEGIN; i < SCX_OPI_CPU_HOTPLUG_END; i++)
5099  		if (((void (**)(void))ops)[i])
5100  			static_branch_enable_cpuslocked(&scx_has_op[i]);
5101  
5102  	check_hotplug_seq(ops);
5103  	cpus_read_unlock();
5104  
5105  	ret = validate_ops(ops);
5106  	if (ret)
5107  		goto err_disable;
5108  
5109  	WARN_ON_ONCE(scx_dsp_ctx);
5110  	scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
5111  	scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
5112  						   scx_dsp_max_batch),
5113  				     __alignof__(struct scx_dsp_ctx));
5114  	if (!scx_dsp_ctx) {
5115  		ret = -ENOMEM;
5116  		goto err_disable;
5117  	}
5118  
5119  	if (ops->timeout_ms)
5120  		timeout = msecs_to_jiffies(ops->timeout_ms);
5121  	else
5122  		timeout = SCX_WATCHDOG_MAX_TIMEOUT;
5123  
5124  	WRITE_ONCE(scx_watchdog_timeout, timeout);
5125  	WRITE_ONCE(scx_watchdog_timestamp, jiffies);
5126  	queue_delayed_work(system_unbound_wq, &scx_watchdog_work,
5127  			   scx_watchdog_timeout / 2);
5128  
5129  	/*
5130  	 * Once __scx_ops_enabled is set, %current can be switched to SCX
5131  	 * anytime. This can lead to stalls as some BPF schedulers (e.g.
5132  	 * userspace scheduling) may not function correctly before all tasks are
5133  	 * switched. Init in bypass mode to guarantee forward progress.
5134  	 */
5135  	scx_ops_bypass(true);
5136  
5137  	for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
5138  		if (((void (**)(void))ops)[i])
5139  			static_branch_enable(&scx_has_op[i]);
5140  
5141  	if (ops->flags & SCX_OPS_ENQ_LAST)
5142  		static_branch_enable(&scx_ops_enq_last);
5143  
5144  	if (ops->flags & SCX_OPS_ENQ_EXITING)
5145  		static_branch_enable(&scx_ops_enq_exiting);
5146  	if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5147  		static_branch_enable(&scx_ops_cpu_preempt);
5148  
5149  	if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5150  		reset_idle_masks();
5151  		static_branch_enable(&scx_builtin_idle_enabled);
5152  	} else {
5153  		static_branch_disable(&scx_builtin_idle_enabled);
5154  	}
5155  
5156  	/*
5157  	 * Lock out forks, cgroup on/offlining and moves before opening the
5158  	 * floodgate so that they don't wander into the operations prematurely.
5159  	 */
5160  	percpu_down_write(&scx_fork_rwsem);
5161  
5162  	WARN_ON_ONCE(scx_ops_init_task_enabled);
5163  	scx_ops_init_task_enabled = true;
5164  
5165  	/*
5166  	 * Enable ops for every task. Fork is excluded by scx_fork_rwsem
5167  	 * preventing new tasks from being added. No need to exclude tasks
5168  	 * leaving as sched_ext_free() can handle both prepped and enabled
5169  	 * tasks. Prep all tasks first and then enable them with preemption
5170  	 * disabled.
5171  	 *
5172  	 * All cgroups should be initialized before scx_ops_init_task() so that
5173  	 * the BPF scheduler can reliably track each task's cgroup membership
5174  	 * from scx_ops_init_task(). Lock out cgroup on/offlining and task
5175  	 * migrations while tasks are being initialized so that
5176  	 * scx_cgroup_can_attach() never sees uninitialized tasks.
5177  	 */
5178  	scx_cgroup_lock();
5179  	ret = scx_cgroup_init();
5180  	if (ret)
5181  		goto err_disable_unlock_all;
5182  
5183  	scx_task_iter_start(&sti);
5184  	while ((p = scx_task_iter_next_locked(&sti))) {
5185  		/*
5186  		 * @p may already be dead, have lost all its usages counts and
5187  		 * be waiting for RCU grace period before being freed. @p can't
5188  		 * be initialized for SCX in such cases and should be ignored.
5189  		 */
5190  		if (!tryget_task_struct(p))
5191  			continue;
5192  
5193  		scx_task_iter_unlock(&sti);
5194  
5195  		ret = scx_ops_init_task(p, task_group(p), false);
5196  		if (ret) {
5197  			put_task_struct(p);
5198  			scx_task_iter_relock(&sti);
5199  			scx_task_iter_stop(&sti);
5200  			scx_ops_error("ops.init_task() failed (%d) for %s[%d]",
5201  				      ret, p->comm, p->pid);
5202  			goto err_disable_unlock_all;
5203  		}
5204  
5205  		scx_set_task_state(p, SCX_TASK_READY);
5206  
5207  		put_task_struct(p);
5208  		scx_task_iter_relock(&sti);
5209  	}
5210  	scx_task_iter_stop(&sti);
5211  	scx_cgroup_unlock();
5212  	percpu_up_write(&scx_fork_rwsem);
5213  
5214  	/*
5215  	 * All tasks are READY. It's safe to turn on scx_enabled() and switch
5216  	 * all eligible tasks.
5217  	 */
5218  	WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5219  	static_branch_enable(&__scx_ops_enabled);
5220  
5221  	/*
5222  	 * We're fully committed and can't fail. The task READY -> ENABLED
5223  	 * transitions here are synchronized against sched_ext_free() through
5224  	 * scx_tasks_lock.
5225  	 */
5226  	percpu_down_write(&scx_fork_rwsem);
5227  	scx_task_iter_start(&sti);
5228  	while ((p = scx_task_iter_next_locked(&sti))) {
5229  		const struct sched_class *old_class = p->sched_class;
5230  		const struct sched_class *new_class =
5231  			__setscheduler_class(p->policy, p->prio);
5232  		struct sched_enq_and_set_ctx ctx;
5233  
5234  		if (old_class != new_class && p->se.sched_delayed)
5235  			dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5236  
5237  		sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
5238  
5239  		p->scx.slice = SCX_SLICE_DFL;
5240  		p->sched_class = new_class;
5241  		check_class_changing(task_rq(p), p, old_class);
5242  
5243  		sched_enq_and_set_task(&ctx);
5244  
5245  		check_class_changed(task_rq(p), p, old_class, p->prio);
5246  	}
5247  	scx_task_iter_stop(&sti);
5248  	percpu_up_write(&scx_fork_rwsem);
5249  
5250  	scx_ops_bypass(false);
5251  
5252  	if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) {
5253  		WARN_ON_ONCE(atomic_read(&scx_exit_kind) == SCX_EXIT_NONE);
5254  		goto err_disable;
5255  	}
5256  
5257  	if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5258  		static_branch_enable(&__scx_switched_all);
5259  
5260  	pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5261  		scx_ops.name, scx_switched_all() ? "" : " (partial)");
5262  	kobject_uevent(scx_root_kobj, KOBJ_ADD);
5263  	mutex_unlock(&scx_ops_enable_mutex);
5264  
5265  	atomic_long_inc(&scx_enable_seq);
5266  
5267  	return 0;
5268  
5269  err_del:
5270  	kobject_del(scx_root_kobj);
5271  err:
5272  	kobject_put(scx_root_kobj);
5273  	scx_root_kobj = NULL;
5274  	if (scx_exit_info) {
5275  		free_exit_info(scx_exit_info);
5276  		scx_exit_info = NULL;
5277  	}
5278  err_unlock:
5279  	mutex_unlock(&scx_ops_enable_mutex);
5280  	return ret;
5281  
5282  err_disable_unlock_all:
5283  	scx_cgroup_unlock();
5284  	percpu_up_write(&scx_fork_rwsem);
5285  	scx_ops_bypass(false);
5286  err_disable:
5287  	mutex_unlock(&scx_ops_enable_mutex);
5288  	/*
5289  	 * Returning an error code here would not pass all the error information
5290  	 * to userspace. Record errno using scx_ops_error() for cases
5291  	 * scx_ops_error() wasn't already invoked and exit indicating success so
5292  	 * that the error is notified through ops.exit() with all the details.
5293  	 *
5294  	 * Flush scx_ops_disable_work to ensure that error is reported before
5295  	 * init completion.
5296  	 */
5297  	scx_ops_error("scx_ops_enable() failed (%d)", ret);
5298  	kthread_flush_work(&scx_ops_disable_work);
5299  	return 0;
5300  }
5301  
5302  
5303  /********************************************************************************
5304   * bpf_struct_ops plumbing.
5305   */
5306  #include <linux/bpf_verifier.h>
5307  #include <linux/bpf.h>
5308  #include <linux/btf.h>
5309  
5310  extern struct btf *btf_vmlinux;
5311  static const struct btf_type *task_struct_type;
5312  static u32 task_struct_type_id;
5313  
set_arg_maybe_null(const char * op,int arg_n,int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5314  static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size,
5315  			       enum bpf_access_type type,
5316  			       const struct bpf_prog *prog,
5317  			       struct bpf_insn_access_aux *info)
5318  {
5319  	struct btf *btf = bpf_get_btf_vmlinux();
5320  	const struct bpf_struct_ops_desc *st_ops_desc;
5321  	const struct btf_member *member;
5322  	const struct btf_type *t;
5323  	u32 btf_id, member_idx;
5324  	const char *mname;
5325  
5326  	/* struct_ops op args are all sequential, 64-bit numbers */
5327  	if (off != arg_n * sizeof(__u64))
5328  		return false;
5329  
5330  	/* btf_id should be the type id of struct sched_ext_ops */
5331  	btf_id = prog->aux->attach_btf_id;
5332  	st_ops_desc = bpf_struct_ops_find(btf, btf_id);
5333  	if (!st_ops_desc)
5334  		return false;
5335  
5336  	/* BTF type of struct sched_ext_ops */
5337  	t = st_ops_desc->type;
5338  
5339  	member_idx = prog->expected_attach_type;
5340  	if (member_idx >= btf_type_vlen(t))
5341  		return false;
5342  
5343  	/*
5344  	 * Get the member name of this struct_ops program, which corresponds to
5345  	 * a field in struct sched_ext_ops. For example, the member name of the
5346  	 * dispatch struct_ops program (callback) is "dispatch".
5347  	 */
5348  	member = &btf_type_member(t)[member_idx];
5349  	mname = btf_name_by_offset(btf_vmlinux, member->name_off);
5350  
5351  	if (!strcmp(mname, op)) {
5352  		/*
5353  		 * The value is a pointer to a type (struct task_struct) given
5354  		 * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED),
5355  		 * however, can be a NULL (PTR_MAYBE_NULL). The BPF program
5356  		 * should check the pointer to make sure it is not NULL before
5357  		 * using it, or the verifier will reject the program.
5358  		 *
5359  		 * Longer term, this is something that should be addressed by
5360  		 * BTF, and be fully contained within the verifier.
5361  		 */
5362  		info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED;
5363  		info->btf = btf_vmlinux;
5364  		info->btf_id = task_struct_type_id;
5365  
5366  		return true;
5367  	}
5368  
5369  	return false;
5370  }
5371  
bpf_scx_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)5372  static bool bpf_scx_is_valid_access(int off, int size,
5373  				    enum bpf_access_type type,
5374  				    const struct bpf_prog *prog,
5375  				    struct bpf_insn_access_aux *info)
5376  {
5377  	if (type != BPF_READ)
5378  		return false;
5379  	if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) ||
5380  	    set_arg_maybe_null("yield", 1, off, size, type, prog, info))
5381  		return true;
5382  	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
5383  		return false;
5384  	if (off % size != 0)
5385  		return false;
5386  
5387  	return btf_ctx_access(off, size, type, prog, info);
5388  }
5389  
bpf_scx_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)5390  static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log,
5391  				     const struct bpf_reg_state *reg, int off,
5392  				     int size)
5393  {
5394  	const struct btf_type *t;
5395  
5396  	t = btf_type_by_id(reg->btf, reg->btf_id);
5397  	if (t == task_struct_type) {
5398  		if (off >= offsetof(struct task_struct, scx.slice) &&
5399  		    off + size <= offsetofend(struct task_struct, scx.slice))
5400  			return SCALAR_VALUE;
5401  		if (off >= offsetof(struct task_struct, scx.dsq_vtime) &&
5402  		    off + size <= offsetofend(struct task_struct, scx.dsq_vtime))
5403  			return SCALAR_VALUE;
5404  		if (off >= offsetof(struct task_struct, scx.disallow) &&
5405  		    off + size <= offsetofend(struct task_struct, scx.disallow))
5406  			return SCALAR_VALUE;
5407  	}
5408  
5409  	return -EACCES;
5410  }
5411  
5412  static const struct bpf_func_proto *
bpf_scx_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)5413  bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5414  {
5415  	switch (func_id) {
5416  	case BPF_FUNC_task_storage_get:
5417  		return &bpf_task_storage_get_proto;
5418  	case BPF_FUNC_task_storage_delete:
5419  		return &bpf_task_storage_delete_proto;
5420  	default:
5421  		return bpf_base_func_proto(func_id, prog);
5422  	}
5423  }
5424  
5425  static const struct bpf_verifier_ops bpf_scx_verifier_ops = {
5426  	.get_func_proto = bpf_scx_get_func_proto,
5427  	.is_valid_access = bpf_scx_is_valid_access,
5428  	.btf_struct_access = bpf_scx_btf_struct_access,
5429  };
5430  
bpf_scx_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)5431  static int bpf_scx_init_member(const struct btf_type *t,
5432  			       const struct btf_member *member,
5433  			       void *kdata, const void *udata)
5434  {
5435  	const struct sched_ext_ops *uops = udata;
5436  	struct sched_ext_ops *ops = kdata;
5437  	u32 moff = __btf_member_bit_offset(t, member) / 8;
5438  	int ret;
5439  
5440  	switch (moff) {
5441  	case offsetof(struct sched_ext_ops, dispatch_max_batch):
5442  		if (*(u32 *)(udata + moff) > INT_MAX)
5443  			return -E2BIG;
5444  		ops->dispatch_max_batch = *(u32 *)(udata + moff);
5445  		return 1;
5446  	case offsetof(struct sched_ext_ops, flags):
5447  		if (*(u64 *)(udata + moff) & ~SCX_OPS_ALL_FLAGS)
5448  			return -EINVAL;
5449  		ops->flags = *(u64 *)(udata + moff);
5450  		return 1;
5451  	case offsetof(struct sched_ext_ops, name):
5452  		ret = bpf_obj_name_cpy(ops->name, uops->name,
5453  				       sizeof(ops->name));
5454  		if (ret < 0)
5455  			return ret;
5456  		if (ret == 0)
5457  			return -EINVAL;
5458  		return 1;
5459  	case offsetof(struct sched_ext_ops, timeout_ms):
5460  		if (msecs_to_jiffies(*(u32 *)(udata + moff)) >
5461  		    SCX_WATCHDOG_MAX_TIMEOUT)
5462  			return -E2BIG;
5463  		ops->timeout_ms = *(u32 *)(udata + moff);
5464  		return 1;
5465  	case offsetof(struct sched_ext_ops, exit_dump_len):
5466  		ops->exit_dump_len =
5467  			*(u32 *)(udata + moff) ?: SCX_EXIT_DUMP_DFL_LEN;
5468  		return 1;
5469  	case offsetof(struct sched_ext_ops, hotplug_seq):
5470  		ops->hotplug_seq = *(u64 *)(udata + moff);
5471  		return 1;
5472  	}
5473  
5474  	return 0;
5475  }
5476  
bpf_scx_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)5477  static int bpf_scx_check_member(const struct btf_type *t,
5478  				const struct btf_member *member,
5479  				const struct bpf_prog *prog)
5480  {
5481  	u32 moff = __btf_member_bit_offset(t, member) / 8;
5482  
5483  	switch (moff) {
5484  	case offsetof(struct sched_ext_ops, init_task):
5485  #ifdef CONFIG_EXT_GROUP_SCHED
5486  	case offsetof(struct sched_ext_ops, cgroup_init):
5487  	case offsetof(struct sched_ext_ops, cgroup_exit):
5488  	case offsetof(struct sched_ext_ops, cgroup_prep_move):
5489  #endif
5490  	case offsetof(struct sched_ext_ops, cpu_online):
5491  	case offsetof(struct sched_ext_ops, cpu_offline):
5492  	case offsetof(struct sched_ext_ops, init):
5493  	case offsetof(struct sched_ext_ops, exit):
5494  		break;
5495  	default:
5496  		if (prog->sleepable)
5497  			return -EINVAL;
5498  	}
5499  
5500  	return 0;
5501  }
5502  
bpf_scx_reg(void * kdata,struct bpf_link * link)5503  static int bpf_scx_reg(void *kdata, struct bpf_link *link)
5504  {
5505  	return scx_ops_enable(kdata, link);
5506  }
5507  
bpf_scx_unreg(void * kdata,struct bpf_link * link)5508  static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
5509  {
5510  	scx_ops_disable(SCX_EXIT_UNREG);
5511  	kthread_flush_work(&scx_ops_disable_work);
5512  }
5513  
bpf_scx_init(struct btf * btf)5514  static int bpf_scx_init(struct btf *btf)
5515  {
5516  	s32 type_id;
5517  
5518  	type_id = btf_find_by_name_kind(btf, "task_struct", BTF_KIND_STRUCT);
5519  	if (type_id < 0)
5520  		return -EINVAL;
5521  	task_struct_type = btf_type_by_id(btf, type_id);
5522  	task_struct_type_id = type_id;
5523  
5524  	return 0;
5525  }
5526  
bpf_scx_update(void * kdata,void * old_kdata,struct bpf_link * link)5527  static int bpf_scx_update(void *kdata, void *old_kdata, struct bpf_link *link)
5528  {
5529  	/*
5530  	 * sched_ext does not support updating the actively-loaded BPF
5531  	 * scheduler, as registering a BPF scheduler can always fail if the
5532  	 * scheduler returns an error code for e.g. ops.init(), ops.init_task(),
5533  	 * etc. Similarly, we can always race with unregistration happening
5534  	 * elsewhere, such as with sysrq.
5535  	 */
5536  	return -EOPNOTSUPP;
5537  }
5538  
bpf_scx_validate(void * kdata)5539  static int bpf_scx_validate(void *kdata)
5540  {
5541  	return 0;
5542  }
5543  
select_cpu_stub(struct task_struct * p,s32 prev_cpu,u64 wake_flags)5544  static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
enqueue_stub(struct task_struct * p,u64 enq_flags)5545  static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
dequeue_stub(struct task_struct * p,u64 enq_flags)5546  static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
dispatch_stub(s32 prev_cpu,struct task_struct * p)5547  static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
tick_stub(struct task_struct * p)5548  static void tick_stub(struct task_struct *p) {}
runnable_stub(struct task_struct * p,u64 enq_flags)5549  static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
running_stub(struct task_struct * p)5550  static void running_stub(struct task_struct *p) {}
stopping_stub(struct task_struct * p,bool runnable)5551  static void stopping_stub(struct task_struct *p, bool runnable) {}
quiescent_stub(struct task_struct * p,u64 deq_flags)5552  static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
yield_stub(struct task_struct * from,struct task_struct * to)5553  static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
core_sched_before_stub(struct task_struct * a,struct task_struct * b)5554  static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
set_weight_stub(struct task_struct * p,u32 weight)5555  static void set_weight_stub(struct task_struct *p, u32 weight) {}
set_cpumask_stub(struct task_struct * p,const struct cpumask * mask)5556  static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
update_idle_stub(s32 cpu,bool idle)5557  static void update_idle_stub(s32 cpu, bool idle) {}
cpu_acquire_stub(s32 cpu,struct scx_cpu_acquire_args * args)5558  static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
cpu_release_stub(s32 cpu,struct scx_cpu_release_args * args)5559  static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
init_task_stub(struct task_struct * p,struct scx_init_task_args * args)5560  static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
exit_task_stub(struct task_struct * p,struct scx_exit_task_args * args)5561  static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
enable_stub(struct task_struct * p)5562  static void enable_stub(struct task_struct *p) {}
disable_stub(struct task_struct * p)5563  static void disable_stub(struct task_struct *p) {}
5564  #ifdef CONFIG_EXT_GROUP_SCHED
cgroup_init_stub(struct cgroup * cgrp,struct scx_cgroup_init_args * args)5565  static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
cgroup_exit_stub(struct cgroup * cgrp)5566  static void cgroup_exit_stub(struct cgroup *cgrp) {}
cgroup_prep_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5567  static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
cgroup_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5568  static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
cgroup_cancel_move_stub(struct task_struct * p,struct cgroup * from,struct cgroup * to)5569  static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
cgroup_set_weight_stub(struct cgroup * cgrp,u32 weight)5570  static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
5571  #endif
cpu_online_stub(s32 cpu)5572  static void cpu_online_stub(s32 cpu) {}
cpu_offline_stub(s32 cpu)5573  static void cpu_offline_stub(s32 cpu) {}
init_stub(void)5574  static s32 init_stub(void) { return -EINVAL; }
exit_stub(struct scx_exit_info * info)5575  static void exit_stub(struct scx_exit_info *info) {}
dump_stub(struct scx_dump_ctx * ctx)5576  static void dump_stub(struct scx_dump_ctx *ctx) {}
dump_cpu_stub(struct scx_dump_ctx * ctx,s32 cpu,bool idle)5577  static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
dump_task_stub(struct scx_dump_ctx * ctx,struct task_struct * p)5578  static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
5579  
5580  static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
5581  	.select_cpu = select_cpu_stub,
5582  	.enqueue = enqueue_stub,
5583  	.dequeue = dequeue_stub,
5584  	.dispatch = dispatch_stub,
5585  	.tick = tick_stub,
5586  	.runnable = runnable_stub,
5587  	.running = running_stub,
5588  	.stopping = stopping_stub,
5589  	.quiescent = quiescent_stub,
5590  	.yield = yield_stub,
5591  	.core_sched_before = core_sched_before_stub,
5592  	.set_weight = set_weight_stub,
5593  	.set_cpumask = set_cpumask_stub,
5594  	.update_idle = update_idle_stub,
5595  	.cpu_acquire = cpu_acquire_stub,
5596  	.cpu_release = cpu_release_stub,
5597  	.init_task = init_task_stub,
5598  	.exit_task = exit_task_stub,
5599  	.enable = enable_stub,
5600  	.disable = disable_stub,
5601  #ifdef CONFIG_EXT_GROUP_SCHED
5602  	.cgroup_init = cgroup_init_stub,
5603  	.cgroup_exit = cgroup_exit_stub,
5604  	.cgroup_prep_move = cgroup_prep_move_stub,
5605  	.cgroup_move = cgroup_move_stub,
5606  	.cgroup_cancel_move = cgroup_cancel_move_stub,
5607  	.cgroup_set_weight = cgroup_set_weight_stub,
5608  #endif
5609  	.cpu_online = cpu_online_stub,
5610  	.cpu_offline = cpu_offline_stub,
5611  	.init = init_stub,
5612  	.exit = exit_stub,
5613  	.dump = dump_stub,
5614  	.dump_cpu = dump_cpu_stub,
5615  	.dump_task = dump_task_stub,
5616  };
5617  
5618  static struct bpf_struct_ops bpf_sched_ext_ops = {
5619  	.verifier_ops = &bpf_scx_verifier_ops,
5620  	.reg = bpf_scx_reg,
5621  	.unreg = bpf_scx_unreg,
5622  	.check_member = bpf_scx_check_member,
5623  	.init_member = bpf_scx_init_member,
5624  	.init = bpf_scx_init,
5625  	.update = bpf_scx_update,
5626  	.validate = bpf_scx_validate,
5627  	.name = "sched_ext_ops",
5628  	.owner = THIS_MODULE,
5629  	.cfi_stubs = &__bpf_ops_sched_ext_ops
5630  };
5631  
5632  
5633  /********************************************************************************
5634   * System integration and init.
5635   */
5636  
sysrq_handle_sched_ext_reset(u8 key)5637  static void sysrq_handle_sched_ext_reset(u8 key)
5638  {
5639  	if (scx_ops_helper)
5640  		scx_ops_disable(SCX_EXIT_SYSRQ);
5641  	else
5642  		pr_info("sched_ext: BPF scheduler not yet used\n");
5643  }
5644  
5645  static const struct sysrq_key_op sysrq_sched_ext_reset_op = {
5646  	.handler	= sysrq_handle_sched_ext_reset,
5647  	.help_msg	= "reset-sched-ext(S)",
5648  	.action_msg	= "Disable sched_ext and revert all tasks to CFS",
5649  	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5650  };
5651  
sysrq_handle_sched_ext_dump(u8 key)5652  static void sysrq_handle_sched_ext_dump(u8 key)
5653  {
5654  	struct scx_exit_info ei = { .kind = SCX_EXIT_NONE, .reason = "SysRq-D" };
5655  
5656  	if (scx_enabled())
5657  		scx_dump_state(&ei, 0);
5658  }
5659  
5660  static const struct sysrq_key_op sysrq_sched_ext_dump_op = {
5661  	.handler	= sysrq_handle_sched_ext_dump,
5662  	.help_msg	= "dump-sched-ext(D)",
5663  	.action_msg	= "Trigger sched_ext debug dump",
5664  	.enable_mask	= SYSRQ_ENABLE_RTNICE,
5665  };
5666  
can_skip_idle_kick(struct rq * rq)5667  static bool can_skip_idle_kick(struct rq *rq)
5668  {
5669  	lockdep_assert_rq_held(rq);
5670  
5671  	/*
5672  	 * We can skip idle kicking if @rq is going to go through at least one
5673  	 * full SCX scheduling cycle before going idle. Just checking whether
5674  	 * curr is not idle is insufficient because we could be racing
5675  	 * balance_one() trying to pull the next task from a remote rq, which
5676  	 * may fail, and @rq may become idle afterwards.
5677  	 *
5678  	 * The race window is small and we don't and can't guarantee that @rq is
5679  	 * only kicked while idle anyway. Skip only when sure.
5680  	 */
5681  	return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
5682  }
5683  
kick_one_cpu(s32 cpu,struct rq * this_rq,unsigned long * pseqs)5684  static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs)
5685  {
5686  	struct rq *rq = cpu_rq(cpu);
5687  	struct scx_rq *this_scx = &this_rq->scx;
5688  	bool should_wait = false;
5689  	unsigned long flags;
5690  
5691  	raw_spin_rq_lock_irqsave(rq, flags);
5692  
5693  	/*
5694  	 * During CPU hotplug, a CPU may depend on kicking itself to make
5695  	 * forward progress. Allow kicking self regardless of online state.
5696  	 */
5697  	if (cpu_online(cpu) || cpu == cpu_of(this_rq)) {
5698  		if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) {
5699  			if (rq->curr->sched_class == &ext_sched_class)
5700  				rq->curr->scx.slice = 0;
5701  			cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5702  		}
5703  
5704  		if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) {
5705  			pseqs[cpu] = rq->scx.pnt_seq;
5706  			should_wait = true;
5707  		}
5708  
5709  		resched_curr(rq);
5710  	} else {
5711  		cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt);
5712  		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5713  	}
5714  
5715  	raw_spin_rq_unlock_irqrestore(rq, flags);
5716  
5717  	return should_wait;
5718  }
5719  
kick_one_cpu_if_idle(s32 cpu,struct rq * this_rq)5720  static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
5721  {
5722  	struct rq *rq = cpu_rq(cpu);
5723  	unsigned long flags;
5724  
5725  	raw_spin_rq_lock_irqsave(rq, flags);
5726  
5727  	if (!can_skip_idle_kick(rq) &&
5728  	    (cpu_online(cpu) || cpu == cpu_of(this_rq)))
5729  		resched_curr(rq);
5730  
5731  	raw_spin_rq_unlock_irqrestore(rq, flags);
5732  }
5733  
kick_cpus_irq_workfn(struct irq_work * irq_work)5734  static void kick_cpus_irq_workfn(struct irq_work *irq_work)
5735  {
5736  	struct rq *this_rq = this_rq();
5737  	struct scx_rq *this_scx = &this_rq->scx;
5738  	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
5739  	bool should_wait = false;
5740  	s32 cpu;
5741  
5742  	for_each_cpu(cpu, this_scx->cpus_to_kick) {
5743  		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
5744  		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
5745  		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5746  	}
5747  
5748  	for_each_cpu(cpu, this_scx->cpus_to_kick_if_idle) {
5749  		kick_one_cpu_if_idle(cpu, this_rq);
5750  		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle);
5751  	}
5752  
5753  	if (!should_wait)
5754  		return;
5755  
5756  	for_each_cpu(cpu, this_scx->cpus_to_wait) {
5757  		unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq;
5758  
5759  		if (cpu != cpu_of(this_rq)) {
5760  			/*
5761  			 * Pairs with smp_store_release() issued by this CPU in
5762  			 * scx_next_task_picked() on the resched path.
5763  			 *
5764  			 * We busy-wait here to guarantee that no other task can
5765  			 * be scheduled on our core before the target CPU has
5766  			 * entered the resched path.
5767  			 */
5768  			while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu])
5769  				cpu_relax();
5770  		}
5771  
5772  		cpumask_clear_cpu(cpu, this_scx->cpus_to_wait);
5773  	}
5774  }
5775  
5776  /**
5777   * print_scx_info - print out sched_ext scheduler state
5778   * @log_lvl: the log level to use when printing
5779   * @p: target task
5780   *
5781   * If a sched_ext scheduler is enabled, print the name and state of the
5782   * scheduler. If @p is on sched_ext, print further information about the task.
5783   *
5784   * This function can be safely called on any task as long as the task_struct
5785   * itself is accessible. While safe, this function isn't synchronized and may
5786   * print out mixups or garbages of limited length.
5787   */
print_scx_info(const char * log_lvl,struct task_struct * p)5788  void print_scx_info(const char *log_lvl, struct task_struct *p)
5789  {
5790  	enum scx_ops_enable_state state = scx_ops_enable_state();
5791  	const char *all = READ_ONCE(scx_switching_all) ? "+all" : "";
5792  	char runnable_at_buf[22] = "?";
5793  	struct sched_class *class;
5794  	unsigned long runnable_at;
5795  
5796  	if (state == SCX_OPS_DISABLED)
5797  		return;
5798  
5799  	/*
5800  	 * Carefully check if the task was running on sched_ext, and then
5801  	 * carefully copy the time it's been runnable, and its state.
5802  	 */
5803  	if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) ||
5804  	    class != &ext_sched_class) {
5805  		printk("%sSched_ext: %s (%s%s)", log_lvl, scx_ops.name,
5806  		       scx_ops_enable_state_str[state], all);
5807  		return;
5808  	}
5809  
5810  	if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at,
5811  				      sizeof(runnable_at)))
5812  		scnprintf(runnable_at_buf, sizeof(runnable_at_buf), "%+ldms",
5813  			  jiffies_delta_msecs(runnable_at, jiffies));
5814  
5815  	/* print everything onto one line to conserve console space */
5816  	printk("%sSched_ext: %s (%s%s), task: runnable_at=%s",
5817  	       log_lvl, scx_ops.name, scx_ops_enable_state_str[state], all,
5818  	       runnable_at_buf);
5819  }
5820  
scx_pm_handler(struct notifier_block * nb,unsigned long event,void * ptr)5821  static int scx_pm_handler(struct notifier_block *nb, unsigned long event, void *ptr)
5822  {
5823  	/*
5824  	 * SCX schedulers often have userspace components which are sometimes
5825  	 * involved in critial scheduling paths. PM operations involve freezing
5826  	 * userspace which can lead to scheduling misbehaviors including stalls.
5827  	 * Let's bypass while PM operations are in progress.
5828  	 */
5829  	switch (event) {
5830  	case PM_HIBERNATION_PREPARE:
5831  	case PM_SUSPEND_PREPARE:
5832  	case PM_RESTORE_PREPARE:
5833  		scx_ops_bypass(true);
5834  		break;
5835  	case PM_POST_HIBERNATION:
5836  	case PM_POST_SUSPEND:
5837  	case PM_POST_RESTORE:
5838  		scx_ops_bypass(false);
5839  		break;
5840  	}
5841  
5842  	return NOTIFY_OK;
5843  }
5844  
5845  static struct notifier_block scx_pm_notifier = {
5846  	.notifier_call = scx_pm_handler,
5847  };
5848  
init_sched_ext_class(void)5849  void __init init_sched_ext_class(void)
5850  {
5851  	s32 cpu, v;
5852  
5853  	/*
5854  	 * The following is to prevent the compiler from optimizing out the enum
5855  	 * definitions so that BPF scheduler implementations can use them
5856  	 * through the generated vmlinux.h.
5857  	 */
5858  	WRITE_ONCE(v, SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | SCX_KICK_PREEMPT |
5859  		   SCX_TG_ONLINE);
5860  
5861  	BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
5862  #ifdef CONFIG_SMP
5863  	BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
5864  	BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
5865  #endif
5866  	scx_kick_cpus_pnt_seqs =
5867  		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
5868  			       __alignof__(scx_kick_cpus_pnt_seqs[0]));
5869  	BUG_ON(!scx_kick_cpus_pnt_seqs);
5870  
5871  	for_each_possible_cpu(cpu) {
5872  		struct rq *rq = cpu_rq(cpu);
5873  
5874  		init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
5875  		INIT_LIST_HEAD(&rq->scx.runnable_list);
5876  		INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
5877  
5878  		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
5879  		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
5880  		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
5881  		BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
5882  		init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
5883  		init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5884  
5885  		if (cpu_online(cpu))
5886  			cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
5887  	}
5888  
5889  	register_sysrq_key('S', &sysrq_sched_ext_reset_op);
5890  	register_sysrq_key('D', &sysrq_sched_ext_dump_op);
5891  	INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
5892  }
5893  
5894  
5895  /********************************************************************************
5896   * Helpers that can be called from the BPF scheduler.
5897   */
5898  #include <linux/btf_ids.h>
5899  
5900  __bpf_kfunc_start_defs();
5901  
5902  /**
5903   * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
5904   * @p: task_struct to select a CPU for
5905   * @prev_cpu: CPU @p was on previously
5906   * @wake_flags: %SCX_WAKE_* flags
5907   * @is_idle: out parameter indicating whether the returned CPU is idle
5908   *
5909   * Can only be called from ops.select_cpu() if the built-in CPU selection is
5910   * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
5911   * @p, @prev_cpu and @wake_flags match ops.select_cpu().
5912   *
5913   * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
5914   * currently idle and thus a good candidate for direct dispatching.
5915   */
scx_bpf_select_cpu_dfl(struct task_struct * p,s32 prev_cpu,u64 wake_flags,bool * is_idle)5916  __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
5917  				       u64 wake_flags, bool *is_idle)
5918  {
5919  	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
5920  		scx_ops_error("built-in idle tracking is disabled");
5921  		goto prev_cpu;
5922  	}
5923  
5924  	if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
5925  		goto prev_cpu;
5926  
5927  #ifdef CONFIG_SMP
5928  	return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
5929  #endif
5930  
5931  prev_cpu:
5932  	*is_idle = false;
5933  	return prev_cpu;
5934  }
5935  
5936  __bpf_kfunc_end_defs();
5937  
5938  BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
5939  BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
5940  BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
5941  
5942  static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
5943  	.owner			= THIS_MODULE,
5944  	.set			= &scx_kfunc_ids_select_cpu,
5945  };
5946  
scx_dispatch_preamble(struct task_struct * p,u64 enq_flags)5947  static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
5948  {
5949  	if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
5950  		return false;
5951  
5952  	lockdep_assert_irqs_disabled();
5953  
5954  	if (unlikely(!p)) {
5955  		scx_ops_error("called with NULL task");
5956  		return false;
5957  	}
5958  
5959  	if (unlikely(enq_flags & __SCX_ENQ_INTERNAL_MASK)) {
5960  		scx_ops_error("invalid enq_flags 0x%llx", enq_flags);
5961  		return false;
5962  	}
5963  
5964  	return true;
5965  }
5966  
scx_dispatch_commit(struct task_struct * p,u64 dsq_id,u64 enq_flags)5967  static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags)
5968  {
5969  	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
5970  	struct task_struct *ddsp_task;
5971  
5972  	ddsp_task = __this_cpu_read(direct_dispatch_task);
5973  	if (ddsp_task) {
5974  		mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags);
5975  		return;
5976  	}
5977  
5978  	if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
5979  		scx_ops_error("dispatch buffer overflow");
5980  		return;
5981  	}
5982  
5983  	dspc->buf[dspc->cursor++] = (struct scx_dsp_buf_ent){
5984  		.task = p,
5985  		.qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK,
5986  		.dsq_id = dsq_id,
5987  		.enq_flags = enq_flags,
5988  	};
5989  }
5990  
5991  __bpf_kfunc_start_defs();
5992  
5993  /**
5994   * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
5995   * @p: task_struct to dispatch
5996   * @dsq_id: DSQ to dispatch to
5997   * @slice: duration @p can run for in nsecs, 0 to keep the current value
5998   * @enq_flags: SCX_ENQ_*
5999   *
6000   * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
6001   * to call this function spuriously. Can be called from ops.enqueue(),
6002   * ops.select_cpu(), and ops.dispatch().
6003   *
6004   * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
6005   * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be
6006   * used to target the local DSQ of a CPU other than the enqueueing one. Use
6007   * ops.select_cpu() to be on the target CPU in the first place.
6008   *
6009   * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6010   * will be directly dispatched to the corresponding dispatch queue after
6011   * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
6012   * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
6013   * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6014   * task is dispatched.
6015   *
6016   * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6017   * and this function can be called upto ops.dispatch_max_batch times to dispatch
6018   * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6019   * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6020   *
6021   * This function doesn't have any locking restrictions and may be called under
6022   * BPF locks (in the future when BPF introduces more flexible locking).
6023   *
6024   * @p is allowed to run for @slice. The scheduling path is triggered on slice
6025   * exhaustion. If zero, the current residual slice is maintained. If
6026   * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6027   * scx_bpf_kick_cpu() to trigger scheduling.
6028   */
scx_bpf_dispatch(struct task_struct * p,u64 dsq_id,u64 slice,u64 enq_flags)6029  __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice,
6030  				  u64 enq_flags)
6031  {
6032  	if (!scx_dispatch_preamble(p, enq_flags))
6033  		return;
6034  
6035  	if (slice)
6036  		p->scx.slice = slice;
6037  	else
6038  		p->scx.slice = p->scx.slice ?: 1;
6039  
6040  	scx_dispatch_commit(p, dsq_id, enq_flags);
6041  }
6042  
6043  /**
6044   * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
6045   * @p: task_struct to dispatch
6046   * @dsq_id: DSQ to dispatch to
6047   * @slice: duration @p can run for in nsecs, 0 to keep the current value
6048   * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6049   * @enq_flags: SCX_ENQ_*
6050   *
6051   * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
6052   * Tasks queued into the priority queue are ordered by @vtime and always
6053   * consumed after the tasks in the FIFO queue. All other aspects are identical
6054   * to scx_bpf_dispatch().
6055   *
6056   * @vtime ordering is according to time_before64() which considers wrapping. A
6057   * numerically larger vtime may indicate an earlier position in the ordering and
6058   * vice-versa.
6059   */
scx_bpf_dispatch_vtime(struct task_struct * p,u64 dsq_id,u64 slice,u64 vtime,u64 enq_flags)6060  __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6061  					u64 slice, u64 vtime, u64 enq_flags)
6062  {
6063  	if (!scx_dispatch_preamble(p, enq_flags))
6064  		return;
6065  
6066  	if (slice)
6067  		p->scx.slice = slice;
6068  	else
6069  		p->scx.slice = p->scx.slice ?: 1;
6070  
6071  	p->scx.dsq_vtime = vtime;
6072  
6073  	scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6074  }
6075  
6076  __bpf_kfunc_end_defs();
6077  
6078  BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
6079  BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
6080  BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
6081  BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch)
6082  
6083  static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
6084  	.owner			= THIS_MODULE,
6085  	.set			= &scx_kfunc_ids_enqueue_dispatch,
6086  };
6087  
scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern * kit,struct task_struct * p,u64 dsq_id,u64 enq_flags)6088  static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
6089  				  struct task_struct *p, u64 dsq_id,
6090  				  u64 enq_flags)
6091  {
6092  	struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
6093  	struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
6094  	bool dispatched = false;
6095  	bool in_balance;
6096  	unsigned long flags;
6097  
6098  	if (!scx_kf_allowed_if_unlocked() && !scx_kf_allowed(SCX_KF_DISPATCH))
6099  		return false;
6100  
6101  	/*
6102  	 * Can be called from either ops.dispatch() locking this_rq() or any
6103  	 * context where no rq lock is held. If latter, lock @p's task_rq which
6104  	 * we'll likely need anyway.
6105  	 */
6106  	src_rq = task_rq(p);
6107  
6108  	local_irq_save(flags);
6109  	this_rq = this_rq();
6110  	in_balance = this_rq->scx.flags & SCX_RQ_IN_BALANCE;
6111  
6112  	if (in_balance) {
6113  		if (this_rq != src_rq) {
6114  			raw_spin_rq_unlock(this_rq);
6115  			raw_spin_rq_lock(src_rq);
6116  		}
6117  	} else {
6118  		raw_spin_rq_lock(src_rq);
6119  	}
6120  
6121  	locked_rq = src_rq;
6122  	raw_spin_lock(&src_dsq->lock);
6123  
6124  	/*
6125  	 * Did someone else get to it? @p could have already left $src_dsq, got
6126  	 * re-enqueud, or be in the process of being consumed by someone else.
6127  	 */
6128  	if (unlikely(p->scx.dsq != src_dsq ||
6129  		     u32_before(kit->cursor.priv, p->scx.dsq_seq) ||
6130  		     p->scx.holding_cpu >= 0) ||
6131  	    WARN_ON_ONCE(src_rq != task_rq(p))) {
6132  		raw_spin_unlock(&src_dsq->lock);
6133  		goto out;
6134  	}
6135  
6136  	/* @p is still on $src_dsq and stable, determine the destination */
6137  	dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
6138  
6139  	if (dst_dsq->id == SCX_DSQ_LOCAL) {
6140  		dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
6141  		if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
6142  			dst_dsq = find_global_dsq(p);
6143  			dst_rq = src_rq;
6144  		}
6145  	} else {
6146  		/* no need to migrate if destination is a non-local DSQ */
6147  		dst_rq = src_rq;
6148  	}
6149  
6150  	/*
6151  	 * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
6152  	 * CPU, @p will be migrated.
6153  	 */
6154  	if (dst_dsq->id == SCX_DSQ_LOCAL) {
6155  		/* @p is going from a non-local DSQ to a local DSQ */
6156  		if (src_rq == dst_rq) {
6157  			task_unlink_from_dsq(p, src_dsq);
6158  			move_local_task_to_local_dsq(p, enq_flags,
6159  						     src_dsq, dst_rq);
6160  			raw_spin_unlock(&src_dsq->lock);
6161  		} else {
6162  			raw_spin_unlock(&src_dsq->lock);
6163  			move_remote_task_to_local_dsq(p, enq_flags,
6164  						      src_rq, dst_rq);
6165  			locked_rq = dst_rq;
6166  		}
6167  	} else {
6168  		/*
6169  		 * @p is going from a non-local DSQ to a non-local DSQ. As
6170  		 * $src_dsq is already locked, do an abbreviated dequeue.
6171  		 */
6172  		task_unlink_from_dsq(p, src_dsq);
6173  		p->scx.dsq = NULL;
6174  		raw_spin_unlock(&src_dsq->lock);
6175  
6176  		if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
6177  			p->scx.dsq_vtime = kit->vtime;
6178  		dispatch_enqueue(dst_dsq, p, enq_flags);
6179  	}
6180  
6181  	if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
6182  		p->scx.slice = kit->slice;
6183  
6184  	dispatched = true;
6185  out:
6186  	if (in_balance) {
6187  		if (this_rq != locked_rq) {
6188  			raw_spin_rq_unlock(locked_rq);
6189  			raw_spin_rq_lock(this_rq);
6190  		}
6191  	} else {
6192  		raw_spin_rq_unlock_irqrestore(locked_rq, flags);
6193  	}
6194  
6195  	kit->cursor.flags &= ~(__SCX_DSQ_ITER_HAS_SLICE |
6196  			       __SCX_DSQ_ITER_HAS_VTIME);
6197  	return dispatched;
6198  }
6199  
6200  __bpf_kfunc_start_defs();
6201  
6202  /**
6203   * scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
6204   *
6205   * Can only be called from ops.dispatch().
6206   */
scx_bpf_dispatch_nr_slots(void)6207  __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(void)
6208  {
6209  	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6210  		return 0;
6211  
6212  	return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
6213  }
6214  
6215  /**
6216   * scx_bpf_dispatch_cancel - Cancel the latest dispatch
6217   *
6218   * Cancel the latest dispatch. Can be called multiple times to cancel further
6219   * dispatches. Can only be called from ops.dispatch().
6220   */
scx_bpf_dispatch_cancel(void)6221  __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6222  {
6223  	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6224  
6225  	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6226  		return;
6227  
6228  	if (dspc->cursor > 0)
6229  		dspc->cursor--;
6230  	else
6231  		scx_ops_error("dispatch buffer underflow");
6232  }
6233  
6234  /**
6235   * scx_bpf_consume - Transfer a task from a DSQ to the current CPU's local DSQ
6236   * @dsq_id: DSQ to consume
6237   *
6238   * Consume a task from the non-local DSQ identified by @dsq_id and transfer it
6239   * to the current CPU's local DSQ for execution. Can only be called from
6240   * ops.dispatch().
6241   *
6242   * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
6243   * trying to consume the specified DSQ. It may also grab rq locks and thus can't
6244   * be called under any BPF locks.
6245   *
6246   * Returns %true if a task has been consumed, %false if there isn't any task to
6247   * consume.
6248   */
scx_bpf_consume(u64 dsq_id)6249  __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
6250  {
6251  	struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
6252  	struct scx_dispatch_q *dsq;
6253  
6254  	if (!scx_kf_allowed(SCX_KF_DISPATCH))
6255  		return false;
6256  
6257  	flush_dispatch_buf(dspc->rq);
6258  
6259  	dsq = find_user_dsq(dsq_id);
6260  	if (unlikely(!dsq)) {
6261  		scx_ops_error("invalid DSQ ID 0x%016llx", dsq_id);
6262  		return false;
6263  	}
6264  
6265  	if (consume_dispatch_q(dspc->rq, dsq)) {
6266  		/*
6267  		 * A successfully consumed task can be dequeued before it starts
6268  		 * running while the CPU is trying to migrate other dispatched
6269  		 * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
6270  		 * local DSQ.
6271  		 */
6272  		dspc->nr_tasks++;
6273  		return true;
6274  	} else {
6275  		return false;
6276  	}
6277  }
6278  
6279  /**
6280   * scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
6281   * @it__iter: DSQ iterator in progress
6282   * @slice: duration the dispatched task can run for in nsecs
6283   *
6284   * Override the slice of the next task that will be dispatched from @it__iter
6285   * using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
6286   * the previous slice duration is kept.
6287   */
scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq * it__iter,u64 slice)6288  __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6289  				struct bpf_iter_scx_dsq *it__iter, u64 slice)
6290  {
6291  	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6292  
6293  	kit->slice = slice;
6294  	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
6295  }
6296  
6297  /**
6298   * scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
6299   * @it__iter: DSQ iterator in progress
6300   * @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
6301   *
6302   * Override the vtime of the next task that will be dispatched from @it__iter
6303   * using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
6304   * previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
6305   * dispatch the next task, the override is ignored and cleared.
6306   */
scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq * it__iter,u64 vtime)6307  __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6308  				struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6309  {
6310  	struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
6311  
6312  	kit->vtime = vtime;
6313  	kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
6314  }
6315  
6316  /**
6317   * scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
6318   * @it__iter: DSQ iterator in progress
6319   * @p: task to transfer
6320   * @dsq_id: DSQ to move @p to
6321   * @enq_flags: SCX_ENQ_*
6322   *
6323   * Transfer @p which is on the DSQ currently iterated by @it__iter to the DSQ
6324   * specified by @dsq_id. All DSQs - local DSQs, global DSQ and user DSQs - can
6325   * be the destination.
6326   *
6327   * For the transfer to be successful, @p must still be on the DSQ and have been
6328   * queued before the DSQ iteration started. This function doesn't care whether
6329   * @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
6330   * been queued before the iteration started.
6331   *
6332   * @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
6333   * update.
6334   *
6335   * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
6336   * lock (e.g. BPF timers or SYSCALL programs).
6337   *
6338   * Returns %true if @p has been consumed, %false if @p had already been consumed
6339   * or dequeued.
6340   */
scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6341  __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6342  					   struct task_struct *p, u64 dsq_id,
6343  					   u64 enq_flags)
6344  {
6345  	return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6346  				     p, dsq_id, enq_flags);
6347  }
6348  
6349  /**
6350   * scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
6351   * @it__iter: DSQ iterator in progress
6352   * @p: task to transfer
6353   * @dsq_id: DSQ to move @p to
6354   * @enq_flags: SCX_ENQ_*
6355   *
6356   * Transfer @p which is on the DSQ currently iterated by @it__iter to the
6357   * priority queue of the DSQ specified by @dsq_id. The destination must be a
6358   * user DSQ as only user DSQs support priority queue.
6359   *
6360   * @p's slice and vtime are kept by default. Use
6361   * scx_bpf_dispatch_from_dsq_set_slice() and
6362   * scx_bpf_dispatch_from_dsq_set_vtime() to update.
6363   *
6364   * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6365   * scx_bpf_dispatch_vtime() for more information on @vtime.
6366   */
scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq * it__iter,struct task_struct * p,u64 dsq_id,u64 enq_flags)6367  __bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6368  						 struct task_struct *p, u64 dsq_id,
6369  						 u64 enq_flags)
6370  {
6371  	return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6372  				     p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6373  }
6374  
6375  __bpf_kfunc_end_defs();
6376  
6377  BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
6378  BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
6379  BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
6380  BTF_ID_FLAGS(func, scx_bpf_consume)
6381  BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
6382  BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
6383  BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6384  BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6385  BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
6386  
6387  static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
6388  	.owner			= THIS_MODULE,
6389  	.set			= &scx_kfunc_ids_dispatch,
6390  };
6391  
6392  __bpf_kfunc_start_defs();
6393  
6394  /**
6395   * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
6396   *
6397   * Iterate over all of the tasks currently enqueued on the local DSQ of the
6398   * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of
6399   * processed tasks. Can only be called from ops.cpu_release().
6400   */
scx_bpf_reenqueue_local(void)6401  __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
6402  {
6403  	LIST_HEAD(tasks);
6404  	u32 nr_enqueued = 0;
6405  	struct rq *rq;
6406  	struct task_struct *p, *n;
6407  
6408  	if (!scx_kf_allowed(SCX_KF_CPU_RELEASE))
6409  		return 0;
6410  
6411  	rq = cpu_rq(smp_processor_id());
6412  	lockdep_assert_rq_held(rq);
6413  
6414  	/*
6415  	 * The BPF scheduler may choose to dispatch tasks back to
6416  	 * @rq->scx.local_dsq. Move all candidate tasks off to a private list
6417  	 * first to avoid processing the same tasks repeatedly.
6418  	 */
6419  	list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
6420  				 scx.dsq_list.node) {
6421  		/*
6422  		 * If @p is being migrated, @p's current CPU may not agree with
6423  		 * its allowed CPUs and the migration_cpu_stop is about to
6424  		 * deactivate and re-activate @p anyway. Skip re-enqueueing.
6425  		 *
6426  		 * While racing sched property changes may also dequeue and
6427  		 * re-enqueue a migrating task while its current CPU and allowed
6428  		 * CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
6429  		 * the current local DSQ for running tasks and thus are not
6430  		 * visible to the BPF scheduler.
6431  		 */
6432  		if (p->migration_pending)
6433  			continue;
6434  
6435  		dispatch_dequeue(rq, p);
6436  		list_add_tail(&p->scx.dsq_list.node, &tasks);
6437  	}
6438  
6439  	list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) {
6440  		list_del_init(&p->scx.dsq_list.node);
6441  		do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
6442  		nr_enqueued++;
6443  	}
6444  
6445  	return nr_enqueued;
6446  }
6447  
6448  __bpf_kfunc_end_defs();
6449  
6450  BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
6451  BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
6452  BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
6453  
6454  static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
6455  	.owner			= THIS_MODULE,
6456  	.set			= &scx_kfunc_ids_cpu_release,
6457  };
6458  
6459  __bpf_kfunc_start_defs();
6460  
6461  /**
6462   * scx_bpf_create_dsq - Create a custom DSQ
6463   * @dsq_id: DSQ to create
6464   * @node: NUMA node to allocate from
6465   *
6466   * Create a custom DSQ identified by @dsq_id. Can be called from any sleepable
6467   * scx callback, and any BPF_PROG_TYPE_SYSCALL prog.
6468   */
scx_bpf_create_dsq(u64 dsq_id,s32 node)6469  __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
6470  {
6471  	if (unlikely(node >= (int)nr_node_ids ||
6472  		     (node < 0 && node != NUMA_NO_NODE)))
6473  		return -EINVAL;
6474  	return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
6475  }
6476  
6477  __bpf_kfunc_end_defs();
6478  
6479  BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
6480  BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6481  BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
6482  BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime_from_dsq, KF_RCU)
6483  BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
6484  
6485  static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = {
6486  	.owner			= THIS_MODULE,
6487  	.set			= &scx_kfunc_ids_unlocked,
6488  };
6489  
6490  __bpf_kfunc_start_defs();
6491  
6492  /**
6493   * scx_bpf_kick_cpu - Trigger reschedule on a CPU
6494   * @cpu: cpu to kick
6495   * @flags: %SCX_KICK_* flags
6496   *
6497   * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or
6498   * trigger rescheduling on a busy CPU. This can be called from any online
6499   * scx_ops operation and the actual kicking is performed asynchronously through
6500   * an irq work.
6501   */
scx_bpf_kick_cpu(s32 cpu,u64 flags)6502  __bpf_kfunc void scx_bpf_kick_cpu(s32 cpu, u64 flags)
6503  {
6504  	struct rq *this_rq;
6505  	unsigned long irq_flags;
6506  
6507  	if (!ops_cpu_valid(cpu, NULL))
6508  		return;
6509  
6510  	local_irq_save(irq_flags);
6511  
6512  	this_rq = this_rq();
6513  
6514  	/*
6515  	 * While bypassing for PM ops, IRQ handling may not be online which can
6516  	 * lead to irq_work_queue() malfunction such as infinite busy wait for
6517  	 * IRQ status update. Suppress kicking.
6518  	 */
6519  	if (scx_rq_bypassing(this_rq))
6520  		goto out;
6521  
6522  	/*
6523  	 * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting
6524  	 * rq locks. We can probably be smarter and avoid bouncing if called
6525  	 * from ops which don't hold a rq lock.
6526  	 */
6527  	if (flags & SCX_KICK_IDLE) {
6528  		struct rq *target_rq = cpu_rq(cpu);
6529  
6530  		if (unlikely(flags & (SCX_KICK_PREEMPT | SCX_KICK_WAIT)))
6531  			scx_ops_error("PREEMPT/WAIT cannot be used with SCX_KICK_IDLE");
6532  
6533  		if (raw_spin_rq_trylock(target_rq)) {
6534  			if (can_skip_idle_kick(target_rq)) {
6535  				raw_spin_rq_unlock(target_rq);
6536  				goto out;
6537  			}
6538  			raw_spin_rq_unlock(target_rq);
6539  		}
6540  		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick_if_idle);
6541  	} else {
6542  		cpumask_set_cpu(cpu, this_rq->scx.cpus_to_kick);
6543  
6544  		if (flags & SCX_KICK_PREEMPT)
6545  			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_preempt);
6546  		if (flags & SCX_KICK_WAIT)
6547  			cpumask_set_cpu(cpu, this_rq->scx.cpus_to_wait);
6548  	}
6549  
6550  	irq_work_queue(&this_rq->scx.kick_cpus_irq_work);
6551  out:
6552  	local_irq_restore(irq_flags);
6553  }
6554  
6555  /**
6556   * scx_bpf_dsq_nr_queued - Return the number of queued tasks
6557   * @dsq_id: id of the DSQ
6558   *
6559   * Return the number of tasks in the DSQ matching @dsq_id. If not found,
6560   * -%ENOENT is returned.
6561   */
scx_bpf_dsq_nr_queued(u64 dsq_id)6562  __bpf_kfunc s32 scx_bpf_dsq_nr_queued(u64 dsq_id)
6563  {
6564  	struct scx_dispatch_q *dsq;
6565  	s32 ret;
6566  
6567  	preempt_disable();
6568  
6569  	if (dsq_id == SCX_DSQ_LOCAL) {
6570  		ret = READ_ONCE(this_rq()->scx.local_dsq.nr);
6571  		goto out;
6572  	} else if ((dsq_id & SCX_DSQ_LOCAL_ON) == SCX_DSQ_LOCAL_ON) {
6573  		s32 cpu = dsq_id & SCX_DSQ_LOCAL_CPU_MASK;
6574  
6575  		if (ops_cpu_valid(cpu, NULL)) {
6576  			ret = READ_ONCE(cpu_rq(cpu)->scx.local_dsq.nr);
6577  			goto out;
6578  		}
6579  	} else {
6580  		dsq = find_user_dsq(dsq_id);
6581  		if (dsq) {
6582  			ret = READ_ONCE(dsq->nr);
6583  			goto out;
6584  		}
6585  	}
6586  	ret = -ENOENT;
6587  out:
6588  	preempt_enable();
6589  	return ret;
6590  }
6591  
6592  /**
6593   * scx_bpf_destroy_dsq - Destroy a custom DSQ
6594   * @dsq_id: DSQ to destroy
6595   *
6596   * Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
6597   * scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
6598   * empty and no further tasks are dispatched to it. Ignored if called on a DSQ
6599   * which doesn't exist. Can be called from any online scx_ops operations.
6600   */
scx_bpf_destroy_dsq(u64 dsq_id)6601  __bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
6602  {
6603  	destroy_dsq(dsq_id);
6604  }
6605  
6606  /**
6607   * bpf_iter_scx_dsq_new - Create a DSQ iterator
6608   * @it: iterator to initialize
6609   * @dsq_id: DSQ to iterate
6610   * @flags: %SCX_DSQ_ITER_*
6611   *
6612   * Initialize BPF iterator @it which can be used with bpf_for_each() to walk
6613   * tasks in the DSQ specified by @dsq_id. Iteration using @it only includes
6614   * tasks which are already queued when this function is invoked.
6615   */
bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq * it,u64 dsq_id,u64 flags)6616  __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
6617  				     u64 flags)
6618  {
6619  	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6620  
6621  	BUILD_BUG_ON(sizeof(struct bpf_iter_scx_dsq_kern) >
6622  		     sizeof(struct bpf_iter_scx_dsq));
6623  	BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
6624  		     __alignof__(struct bpf_iter_scx_dsq));
6625  
6626  	if (flags & ~__SCX_DSQ_ITER_USER_FLAGS)
6627  		return -EINVAL;
6628  
6629  	kit->dsq = find_user_dsq(dsq_id);
6630  	if (!kit->dsq)
6631  		return -ENOENT;
6632  
6633  	INIT_LIST_HEAD(&kit->cursor.node);
6634  	kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
6635  	kit->cursor.priv = READ_ONCE(kit->dsq->seq);
6636  
6637  	return 0;
6638  }
6639  
6640  /**
6641   * bpf_iter_scx_dsq_next - Progress a DSQ iterator
6642   * @it: iterator to progress
6643   *
6644   * Return the next task. See bpf_iter_scx_dsq_new().
6645   */
bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq * it)6646  __bpf_kfunc struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it)
6647  {
6648  	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6649  	bool rev = kit->cursor.flags & SCX_DSQ_ITER_REV;
6650  	struct task_struct *p;
6651  	unsigned long flags;
6652  
6653  	if (!kit->dsq)
6654  		return NULL;
6655  
6656  	raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6657  
6658  	if (list_empty(&kit->cursor.node))
6659  		p = NULL;
6660  	else
6661  		p = container_of(&kit->cursor, struct task_struct, scx.dsq_list);
6662  
6663  	/*
6664  	 * Only tasks which were queued before the iteration started are
6665  	 * visible. This bounds BPF iterations and guarantees that vtime never
6666  	 * jumps in the other direction while iterating.
6667  	 */
6668  	do {
6669  		p = nldsq_next_task(kit->dsq, p, rev);
6670  	} while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq)));
6671  
6672  	if (p) {
6673  		if (rev)
6674  			list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node);
6675  		else
6676  			list_move(&kit->cursor.node, &p->scx.dsq_list.node);
6677  	} else {
6678  		list_del_init(&kit->cursor.node);
6679  	}
6680  
6681  	raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6682  
6683  	return p;
6684  }
6685  
6686  /**
6687   * bpf_iter_scx_dsq_destroy - Destroy a DSQ iterator
6688   * @it: iterator to destroy
6689   *
6690   * Undo scx_iter_scx_dsq_new().
6691   */
bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq * it)6692  __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it)
6693  {
6694  	struct bpf_iter_scx_dsq_kern *kit = (void *)it;
6695  
6696  	if (!kit->dsq)
6697  		return;
6698  
6699  	if (!list_empty(&kit->cursor.node)) {
6700  		unsigned long flags;
6701  
6702  		raw_spin_lock_irqsave(&kit->dsq->lock, flags);
6703  		list_del_init(&kit->cursor.node);
6704  		raw_spin_unlock_irqrestore(&kit->dsq->lock, flags);
6705  	}
6706  	kit->dsq = NULL;
6707  }
6708  
6709  __bpf_kfunc_end_defs();
6710  
__bstr_format(u64 * data_buf,char * line_buf,size_t line_size,char * fmt,unsigned long long * data,u32 data__sz)6711  static s32 __bstr_format(u64 *data_buf, char *line_buf, size_t line_size,
6712  			 char *fmt, unsigned long long *data, u32 data__sz)
6713  {
6714  	struct bpf_bprintf_data bprintf_data = { .get_bin_args = true };
6715  	s32 ret;
6716  
6717  	if (data__sz % 8 || data__sz > MAX_BPRINTF_VARARGS * 8 ||
6718  	    (data__sz && !data)) {
6719  		scx_ops_error("invalid data=%p and data__sz=%u",
6720  			      (void *)data, data__sz);
6721  		return -EINVAL;
6722  	}
6723  
6724  	ret = copy_from_kernel_nofault(data_buf, data, data__sz);
6725  	if (ret < 0) {
6726  		scx_ops_error("failed to read data fields (%d)", ret);
6727  		return ret;
6728  	}
6729  
6730  	ret = bpf_bprintf_prepare(fmt, UINT_MAX, data_buf, data__sz / 8,
6731  				  &bprintf_data);
6732  	if (ret < 0) {
6733  		scx_ops_error("format preparation failed (%d)", ret);
6734  		return ret;
6735  	}
6736  
6737  	ret = bstr_printf(line_buf, line_size, fmt,
6738  			  bprintf_data.bin_args);
6739  	bpf_bprintf_cleanup(&bprintf_data);
6740  	if (ret < 0) {
6741  		scx_ops_error("(\"%s\", %p, %u) failed to format",
6742  			      fmt, data, data__sz);
6743  		return ret;
6744  	}
6745  
6746  	return ret;
6747  }
6748  
bstr_format(struct scx_bstr_buf * buf,char * fmt,unsigned long long * data,u32 data__sz)6749  static s32 bstr_format(struct scx_bstr_buf *buf,
6750  		       char *fmt, unsigned long long *data, u32 data__sz)
6751  {
6752  	return __bstr_format(buf->data, buf->line, sizeof(buf->line),
6753  			     fmt, data, data__sz);
6754  }
6755  
6756  __bpf_kfunc_start_defs();
6757  
6758  /**
6759   * scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
6760   * @exit_code: Exit value to pass to user space via struct scx_exit_info.
6761   * @fmt: error message format string
6762   * @data: format string parameters packaged using ___bpf_fill() macro
6763   * @data__sz: @data len, must end in '__sz' for the verifier
6764   *
6765   * Indicate that the BPF scheduler wants to exit gracefully, and initiate ops
6766   * disabling.
6767   */
scx_bpf_exit_bstr(s64 exit_code,char * fmt,unsigned long long * data,u32 data__sz)6768  __bpf_kfunc void scx_bpf_exit_bstr(s64 exit_code, char *fmt,
6769  				   unsigned long long *data, u32 data__sz)
6770  {
6771  	unsigned long flags;
6772  
6773  	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6774  	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6775  		scx_ops_exit_kind(SCX_EXIT_UNREG_BPF, exit_code, "%s",
6776  				  scx_exit_bstr_buf.line);
6777  	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6778  }
6779  
6780  /**
6781   * scx_bpf_error_bstr - Indicate fatal error
6782   * @fmt: error message format string
6783   * @data: format string parameters packaged using ___bpf_fill() macro
6784   * @data__sz: @data len, must end in '__sz' for the verifier
6785   *
6786   * Indicate that the BPF scheduler encountered a fatal error and initiate ops
6787   * disabling.
6788   */
scx_bpf_error_bstr(char * fmt,unsigned long long * data,u32 data__sz)6789  __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
6790  				    u32 data__sz)
6791  {
6792  	unsigned long flags;
6793  
6794  	raw_spin_lock_irqsave(&scx_exit_bstr_buf_lock, flags);
6795  	if (bstr_format(&scx_exit_bstr_buf, fmt, data, data__sz) >= 0)
6796  		scx_ops_exit_kind(SCX_EXIT_ERROR_BPF, 0, "%s",
6797  				  scx_exit_bstr_buf.line);
6798  	raw_spin_unlock_irqrestore(&scx_exit_bstr_buf_lock, flags);
6799  }
6800  
6801  /**
6802   * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler
6803   * @fmt: format string
6804   * @data: format string parameters packaged using ___bpf_fill() macro
6805   * @data__sz: @data len, must end in '__sz' for the verifier
6806   *
6807   * To be called through scx_bpf_dump() helper from ops.dump(), dump_cpu() and
6808   * dump_task() to generate extra debug dump specific to the BPF scheduler.
6809   *
6810   * The extra dump may be multiple lines. A single line may be split over
6811   * multiple calls. The last line is automatically terminated.
6812   */
scx_bpf_dump_bstr(char * fmt,unsigned long long * data,u32 data__sz)6813  __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data,
6814  				   u32 data__sz)
6815  {
6816  	struct scx_dump_data *dd = &scx_dump_data;
6817  	struct scx_bstr_buf *buf = &dd->buf;
6818  	s32 ret;
6819  
6820  	if (raw_smp_processor_id() != dd->cpu) {
6821  		scx_ops_error("scx_bpf_dump() must only be called from ops.dump() and friends");
6822  		return;
6823  	}
6824  
6825  	/* append the formatted string to the line buf */
6826  	ret = __bstr_format(buf->data, buf->line + dd->cursor,
6827  			    sizeof(buf->line) - dd->cursor, fmt, data, data__sz);
6828  	if (ret < 0) {
6829  		dump_line(dd->s, "%s[!] (\"%s\", %p, %u) failed to format (%d)",
6830  			  dd->prefix, fmt, data, data__sz, ret);
6831  		return;
6832  	}
6833  
6834  	dd->cursor += ret;
6835  	dd->cursor = min_t(s32, dd->cursor, sizeof(buf->line));
6836  
6837  	if (!dd->cursor)
6838  		return;
6839  
6840  	/*
6841  	 * If the line buf overflowed or ends in a newline, flush it into the
6842  	 * dump. This is to allow the caller to generate a single line over
6843  	 * multiple calls. As ops_dump_flush() can also handle multiple lines in
6844  	 * the line buf, the only case which can lead to an unexpected
6845  	 * truncation is when the caller keeps generating newlines in the middle
6846  	 * instead of the end consecutively. Don't do that.
6847  	 */
6848  	if (dd->cursor >= sizeof(buf->line) || buf->line[dd->cursor - 1] == '\n')
6849  		ops_dump_flush();
6850  }
6851  
6852  /**
6853   * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU
6854   * @cpu: CPU of interest
6855   *
6856   * Return the maximum relative capacity of @cpu in relation to the most
6857   * performant CPU in the system. The return value is in the range [1,
6858   * %SCX_CPUPERF_ONE]. See scx_bpf_cpuperf_cur().
6859   */
scx_bpf_cpuperf_cap(s32 cpu)6860  __bpf_kfunc u32 scx_bpf_cpuperf_cap(s32 cpu)
6861  {
6862  	if (ops_cpu_valid(cpu, NULL))
6863  		return arch_scale_cpu_capacity(cpu);
6864  	else
6865  		return SCX_CPUPERF_ONE;
6866  }
6867  
6868  /**
6869   * scx_bpf_cpuperf_cur - Query the current relative performance of a CPU
6870   * @cpu: CPU of interest
6871   *
6872   * Return the current relative performance of @cpu in relation to its maximum.
6873   * The return value is in the range [1, %SCX_CPUPERF_ONE].
6874   *
6875   * The current performance level of a CPU in relation to the maximum performance
6876   * available in the system can be calculated as follows:
6877   *
6878   *   scx_bpf_cpuperf_cap() * scx_bpf_cpuperf_cur() / %SCX_CPUPERF_ONE
6879   *
6880   * The result is in the range [1, %SCX_CPUPERF_ONE].
6881   */
scx_bpf_cpuperf_cur(s32 cpu)6882  __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu)
6883  {
6884  	if (ops_cpu_valid(cpu, NULL))
6885  		return arch_scale_freq_capacity(cpu);
6886  	else
6887  		return SCX_CPUPERF_ONE;
6888  }
6889  
6890  /**
6891   * scx_bpf_cpuperf_set - Set the relative performance target of a CPU
6892   * @cpu: CPU of interest
6893   * @perf: target performance level [0, %SCX_CPUPERF_ONE]
6894   * @flags: %SCX_CPUPERF_* flags
6895   *
6896   * Set the target performance level of @cpu to @perf. @perf is in linear
6897   * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the
6898   * schedutil cpufreq governor chooses the target frequency.
6899   *
6900   * The actual performance level chosen, CPU grouping, and the overhead and
6901   * latency of the operations are dependent on the hardware and cpufreq driver in
6902   * use. Consult hardware and cpufreq documentation for more information. The
6903   * current performance level can be monitored using scx_bpf_cpuperf_cur().
6904   */
scx_bpf_cpuperf_set(s32 cpu,u32 perf)6905  __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
6906  {
6907  	if (unlikely(perf > SCX_CPUPERF_ONE)) {
6908  		scx_ops_error("Invalid cpuperf target %u for CPU %d", perf, cpu);
6909  		return;
6910  	}
6911  
6912  	if (ops_cpu_valid(cpu, NULL)) {
6913  		struct rq *rq = cpu_rq(cpu);
6914  
6915  		rq->scx.cpuperf_target = perf;
6916  
6917  		rcu_read_lock_sched_notrace();
6918  		cpufreq_update_util(cpu_rq(cpu), 0);
6919  		rcu_read_unlock_sched_notrace();
6920  	}
6921  }
6922  
6923  /**
6924   * scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
6925   *
6926   * All valid CPU IDs in the system are smaller than the returned value.
6927   */
scx_bpf_nr_cpu_ids(void)6928  __bpf_kfunc u32 scx_bpf_nr_cpu_ids(void)
6929  {
6930  	return nr_cpu_ids;
6931  }
6932  
6933  /**
6934   * scx_bpf_get_possible_cpumask - Get a referenced kptr to cpu_possible_mask
6935   */
scx_bpf_get_possible_cpumask(void)6936  __bpf_kfunc const struct cpumask *scx_bpf_get_possible_cpumask(void)
6937  {
6938  	return cpu_possible_mask;
6939  }
6940  
6941  /**
6942   * scx_bpf_get_online_cpumask - Get a referenced kptr to cpu_online_mask
6943   */
scx_bpf_get_online_cpumask(void)6944  __bpf_kfunc const struct cpumask *scx_bpf_get_online_cpumask(void)
6945  {
6946  	return cpu_online_mask;
6947  }
6948  
6949  /**
6950   * scx_bpf_put_cpumask - Release a possible/online cpumask
6951   * @cpumask: cpumask to release
6952   */
scx_bpf_put_cpumask(const struct cpumask * cpumask)6953  __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
6954  {
6955  	/*
6956  	 * Empty function body because we aren't actually acquiring or releasing
6957  	 * a reference to a global cpumask, which is read-only in the caller and
6958  	 * is never released. The acquire / release semantics here are just used
6959  	 * to make the cpumask is a trusted pointer in the caller.
6960  	 */
6961  }
6962  
6963  /**
6964   * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
6965   * per-CPU cpumask.
6966   *
6967   * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6968   */
scx_bpf_get_idle_cpumask(void)6969  __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
6970  {
6971  	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6972  		scx_ops_error("built-in idle tracking is disabled");
6973  		return cpu_none_mask;
6974  	}
6975  
6976  #ifdef CONFIG_SMP
6977  	return idle_masks.cpu;
6978  #else
6979  	return cpu_none_mask;
6980  #endif
6981  }
6982  
6983  /**
6984   * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
6985   * per-physical-core cpumask. Can be used to determine if an entire physical
6986   * core is free.
6987   *
6988   * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
6989   */
scx_bpf_get_idle_smtmask(void)6990  __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
6991  {
6992  	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
6993  		scx_ops_error("built-in idle tracking is disabled");
6994  		return cpu_none_mask;
6995  	}
6996  
6997  #ifdef CONFIG_SMP
6998  	if (sched_smt_active())
6999  		return idle_masks.smt;
7000  	else
7001  		return idle_masks.cpu;
7002  #else
7003  	return cpu_none_mask;
7004  #endif
7005  }
7006  
7007  /**
7008   * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
7009   * either the percpu, or SMT idle-tracking cpumask.
7010   */
scx_bpf_put_idle_cpumask(const struct cpumask * idle_mask)7011  __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
7012  {
7013  	/*
7014  	 * Empty function body because we aren't actually acquiring or releasing
7015  	 * a reference to a global idle cpumask, which is read-only in the
7016  	 * caller and is never released. The acquire / release semantics here
7017  	 * are just used to make the cpumask a trusted pointer in the caller.
7018  	 */
7019  }
7020  
7021  /**
7022   * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
7023   * @cpu: cpu to test and clear idle for
7024   *
7025   * Returns %true if @cpu was idle and its idle state was successfully cleared.
7026   * %false otherwise.
7027   *
7028   * Unavailable if ops.update_idle() is implemented and
7029   * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7030   */
scx_bpf_test_and_clear_cpu_idle(s32 cpu)7031  __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
7032  {
7033  	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7034  		scx_ops_error("built-in idle tracking is disabled");
7035  		return false;
7036  	}
7037  
7038  	if (ops_cpu_valid(cpu, NULL))
7039  		return test_and_clear_cpu_idle(cpu);
7040  	else
7041  		return false;
7042  }
7043  
7044  /**
7045   * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
7046   * @cpus_allowed: Allowed cpumask
7047   * @flags: %SCX_PICK_IDLE_CPU_* flags
7048   *
7049   * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
7050   * number on success. -%EBUSY if no matching cpu was found.
7051   *
7052   * Idle CPU tracking may race against CPU scheduling state transitions. For
7053   * example, this function may return -%EBUSY as CPUs are transitioning into the
7054   * idle state. If the caller then assumes that there will be dispatch events on
7055   * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
7056   * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
7057   * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
7058   * event in the near future.
7059   *
7060   * Unavailable if ops.update_idle() is implemented and
7061   * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
7062   */
scx_bpf_pick_idle_cpu(const struct cpumask * cpus_allowed,u64 flags)7063  __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
7064  				      u64 flags)
7065  {
7066  	if (!static_branch_likely(&scx_builtin_idle_enabled)) {
7067  		scx_ops_error("built-in idle tracking is disabled");
7068  		return -EBUSY;
7069  	}
7070  
7071  	return scx_pick_idle_cpu(cpus_allowed, flags);
7072  }
7073  
7074  /**
7075   * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
7076   * @cpus_allowed: Allowed cpumask
7077   * @flags: %SCX_PICK_IDLE_CPU_* flags
7078   *
7079   * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
7080   * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
7081   * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
7082   * empty.
7083   *
7084   * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
7085   * set, this function can't tell which CPUs are idle and will always pick any
7086   * CPU.
7087   */
scx_bpf_pick_any_cpu(const struct cpumask * cpus_allowed,u64 flags)7088  __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
7089  				     u64 flags)
7090  {
7091  	s32 cpu;
7092  
7093  	if (static_branch_likely(&scx_builtin_idle_enabled)) {
7094  		cpu = scx_pick_idle_cpu(cpus_allowed, flags);
7095  		if (cpu >= 0)
7096  			return cpu;
7097  	}
7098  
7099  	cpu = cpumask_any_distribute(cpus_allowed);
7100  	if (cpu < nr_cpu_ids)
7101  		return cpu;
7102  	else
7103  		return -EBUSY;
7104  }
7105  
7106  /**
7107   * scx_bpf_task_running - Is task currently running?
7108   * @p: task of interest
7109   */
scx_bpf_task_running(const struct task_struct * p)7110  __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p)
7111  {
7112  	return task_rq(p)->curr == p;
7113  }
7114  
7115  /**
7116   * scx_bpf_task_cpu - CPU a task is currently associated with
7117   * @p: task of interest
7118   */
scx_bpf_task_cpu(const struct task_struct * p)7119  __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p)
7120  {
7121  	return task_cpu(p);
7122  }
7123  
7124  /**
7125   * scx_bpf_cpu_rq - Fetch the rq of a CPU
7126   * @cpu: CPU of the rq
7127   */
scx_bpf_cpu_rq(s32 cpu)7128  __bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
7129  {
7130  	if (!ops_cpu_valid(cpu, NULL))
7131  		return NULL;
7132  
7133  	return cpu_rq(cpu);
7134  }
7135  
7136  /**
7137   * scx_bpf_task_cgroup - Return the sched cgroup of a task
7138   * @p: task of interest
7139   *
7140   * @p->sched_task_group->css.cgroup represents the cgroup @p is associated with
7141   * from the scheduler's POV. SCX operations should use this function to
7142   * determine @p's current cgroup as, unlike following @p->cgroups,
7143   * @p->sched_task_group is protected by @p's rq lock and thus atomic w.r.t. all
7144   * rq-locked operations. Can be called on the parameter tasks of rq-locked
7145   * operations. The restriction guarantees that @p's rq is locked by the caller.
7146   */
7147  #ifdef CONFIG_CGROUP_SCHED
scx_bpf_task_cgroup(struct task_struct * p)7148  __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
7149  {
7150  	struct task_group *tg = p->sched_task_group;
7151  	struct cgroup *cgrp = &cgrp_dfl_root.cgrp;
7152  
7153  	if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p))
7154  		goto out;
7155  
7156  	/*
7157  	 * A task_group may either be a cgroup or an autogroup. In the latter
7158  	 * case, @tg->css.cgroup is %NULL. A task_group can't become the other
7159  	 * kind once created.
7160  	 */
7161  	if (tg && tg->css.cgroup)
7162  		cgrp = tg->css.cgroup;
7163  	else
7164  		cgrp = &cgrp_dfl_root.cgrp;
7165  out:
7166  	cgroup_get(cgrp);
7167  	return cgrp;
7168  }
7169  #endif
7170  
7171  __bpf_kfunc_end_defs();
7172  
7173  BTF_KFUNCS_START(scx_kfunc_ids_any)
7174  BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
7175  BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
7176  BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
7177  BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED)
7178  BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL)
7179  BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY)
7180  BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
7181  BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
7182  BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
7183  BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
7184  BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
7185  BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
7186  BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
7187  BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
7188  BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
7189  BTF_ID_FLAGS(func, scx_bpf_put_cpumask, KF_RELEASE)
7190  BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
7191  BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
7192  BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
7193  BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
7194  BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
7195  BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
7196  BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
7197  BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
7198  BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
7199  #ifdef CONFIG_CGROUP_SCHED
7200  BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
7201  #endif
7202  BTF_KFUNCS_END(scx_kfunc_ids_any)
7203  
7204  static const struct btf_kfunc_id_set scx_kfunc_set_any = {
7205  	.owner			= THIS_MODULE,
7206  	.set			= &scx_kfunc_ids_any,
7207  };
7208  
scx_init(void)7209  static int __init scx_init(void)
7210  {
7211  	int ret;
7212  
7213  	/*
7214  	 * kfunc registration can't be done from init_sched_ext_class() as
7215  	 * register_btf_kfunc_id_set() needs most of the system to be up.
7216  	 *
7217  	 * Some kfuncs are context-sensitive and can only be called from
7218  	 * specific SCX ops. They are grouped into BTF sets accordingly.
7219  	 * Unfortunately, BPF currently doesn't have a way of enforcing such
7220  	 * restrictions. Eventually, the verifier should be able to enforce
7221  	 * them. For now, register them the same and make each kfunc explicitly
7222  	 * check using scx_kf_allowed().
7223  	 */
7224  	if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7225  					     &scx_kfunc_set_select_cpu)) ||
7226  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7227  					     &scx_kfunc_set_enqueue_dispatch)) ||
7228  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7229  					     &scx_kfunc_set_dispatch)) ||
7230  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7231  					     &scx_kfunc_set_cpu_release)) ||
7232  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7233  					     &scx_kfunc_set_unlocked)) ||
7234  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7235  					     &scx_kfunc_set_unlocked)) ||
7236  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
7237  					     &scx_kfunc_set_any)) ||
7238  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
7239  					     &scx_kfunc_set_any)) ||
7240  	    (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
7241  					     &scx_kfunc_set_any))) {
7242  		pr_err("sched_ext: Failed to register kfunc sets (%d)\n", ret);
7243  		return ret;
7244  	}
7245  
7246  	ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
7247  	if (ret) {
7248  		pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
7249  		return ret;
7250  	}
7251  
7252  	ret = register_pm_notifier(&scx_pm_notifier);
7253  	if (ret) {
7254  		pr_err("sched_ext: Failed to register PM notifier (%d)\n", ret);
7255  		return ret;
7256  	}
7257  
7258  	scx_kset = kset_create_and_add("sched_ext", &scx_uevent_ops, kernel_kobj);
7259  	if (!scx_kset) {
7260  		pr_err("sched_ext: Failed to create /sys/kernel/sched_ext\n");
7261  		return -ENOMEM;
7262  	}
7263  
7264  	ret = sysfs_create_group(&scx_kset->kobj, &scx_global_attr_group);
7265  	if (ret < 0) {
7266  		pr_err("sched_ext: Failed to add global attributes\n");
7267  		return ret;
7268  	}
7269  
7270  	return 0;
7271  }
7272  __initcall(scx_init);
7273