1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __LINUX_PREEMPT_H
3  #define __LINUX_PREEMPT_H
4  
5  /*
6   * include/linux/preempt.h - macros for accessing and manipulating
7   * preempt_count (used for kernel preemption, interrupt count, etc.)
8   */
9  
10  #include <linux/linkage.h>
11  #include <linux/cleanup.h>
12  #include <linux/types.h>
13  
14  /*
15   * We put the hardirq and softirq counter into the preemption
16   * counter. The bitmask has the following meaning:
17   *
18   * - bits 0-7 are the preemption count (max preemption depth: 256)
19   * - bits 8-15 are the softirq count (max # of softirqs: 256)
20   *
21   * The hardirq count could in theory be the same as the number of
22   * interrupts in the system, but we run all interrupt handlers with
23   * interrupts disabled, so we cannot have nesting interrupts. Though
24   * there are a few palaeontologic drivers which reenable interrupts in
25   * the handler, so we need more than one bit here.
26   *
27   *         PREEMPT_MASK:	0x000000ff
28   *         SOFTIRQ_MASK:	0x0000ff00
29   *         HARDIRQ_MASK:	0x000f0000
30   *             NMI_MASK:	0x00f00000
31   * PREEMPT_NEED_RESCHED:	0x80000000
32   */
33  #define PREEMPT_BITS	8
34  #define SOFTIRQ_BITS	8
35  #define HARDIRQ_BITS	4
36  #define NMI_BITS	4
37  
38  #define PREEMPT_SHIFT	0
39  #define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
40  #define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
41  #define NMI_SHIFT	(HARDIRQ_SHIFT + HARDIRQ_BITS)
42  
43  #define __IRQ_MASK(x)	((1UL << (x))-1)
44  
45  #define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
46  #define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
47  #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
48  #define NMI_MASK	(__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
49  
50  #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
51  #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
52  #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
53  #define NMI_OFFSET	(1UL << NMI_SHIFT)
54  
55  #define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
56  
57  #define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
58  
59  /*
60   * Disable preemption until the scheduler is running -- use an unconditional
61   * value so that it also works on !PREEMPT_COUNT kernels.
62   *
63   * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
64   */
65  #define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
66  
67  /*
68   * Initial preempt_count value; reflects the preempt_count schedule invariant
69   * which states that during context switches:
70   *
71   *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
72   *
73   * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
74   * Note: See finish_task_switch().
75   */
76  #define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
77  
78  /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
79  #include <asm/preempt.h>
80  
81  /**
82   * interrupt_context_level - return interrupt context level
83   *
84   * Returns the current interrupt context level.
85   *  0 - normal context
86   *  1 - softirq context
87   *  2 - hardirq context
88   *  3 - NMI context
89   */
interrupt_context_level(void)90  static __always_inline unsigned char interrupt_context_level(void)
91  {
92  	unsigned long pc = preempt_count();
93  	unsigned char level = 0;
94  
95  	level += !!(pc & (NMI_MASK));
96  	level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
97  	level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
98  
99  	return level;
100  }
101  
102  /*
103   * These macro definitions avoid redundant invocations of preempt_count()
104   * because such invocations would result in redundant loads given that
105   * preempt_count() is commonly implemented with READ_ONCE().
106   */
107  
108  #define nmi_count()	(preempt_count() & NMI_MASK)
109  #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
110  #ifdef CONFIG_PREEMPT_RT
111  # define softirq_count()	(current->softirq_disable_cnt & SOFTIRQ_MASK)
112  # define irq_count()		((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
113  #else
114  # define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
115  # define irq_count()		(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
116  #endif
117  
118  /*
119   * Macros to retrieve the current execution context:
120   *
121   * in_nmi()		- We're in NMI context
122   * in_hardirq()		- We're in hard IRQ context
123   * in_serving_softirq()	- We're in softirq context
124   * in_task()		- We're in task context
125   */
126  #define in_nmi()		(nmi_count())
127  #define in_hardirq()		(hardirq_count())
128  #define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
129  #ifdef CONFIG_PREEMPT_RT
130  # define in_task()		(!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
131  #else
132  # define in_task()		(!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
133  #endif
134  
135  /*
136   * The following macros are deprecated and should not be used in new code:
137   * in_irq()       - Obsolete version of in_hardirq()
138   * in_softirq()   - We have BH disabled, or are processing softirqs
139   * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
140   */
141  #define in_irq()		(hardirq_count())
142  #define in_softirq()		(softirq_count())
143  #define in_interrupt()		(irq_count())
144  
145  /*
146   * The preempt_count offset after preempt_disable();
147   */
148  #if defined(CONFIG_PREEMPT_COUNT)
149  # define PREEMPT_DISABLE_OFFSET	PREEMPT_OFFSET
150  #else
151  # define PREEMPT_DISABLE_OFFSET	0
152  #endif
153  
154  /*
155   * The preempt_count offset after spin_lock()
156   */
157  #if !defined(CONFIG_PREEMPT_RT)
158  #define PREEMPT_LOCK_OFFSET		PREEMPT_DISABLE_OFFSET
159  #else
160  /* Locks on RT do not disable preemption */
161  #define PREEMPT_LOCK_OFFSET		0
162  #endif
163  
164  /*
165   * The preempt_count offset needed for things like:
166   *
167   *  spin_lock_bh()
168   *
169   * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
170   * softirqs, such that unlock sequences of:
171   *
172   *  spin_unlock();
173   *  local_bh_enable();
174   *
175   * Work as expected.
176   */
177  #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
178  
179  /*
180   * Are we running in atomic context?  WARNING: this macro cannot
181   * always detect atomic context; in particular, it cannot know about
182   * held spinlocks in non-preemptible kernels.  Thus it should not be
183   * used in the general case to determine whether sleeping is possible.
184   * Do not use in_atomic() in driver code.
185   */
186  #define in_atomic()	(preempt_count() != 0)
187  
188  /*
189   * Check whether we were atomic before we did preempt_disable():
190   * (used by the scheduler)
191   */
192  #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
193  
194  #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
195  extern void preempt_count_add(int val);
196  extern void preempt_count_sub(int val);
197  #define preempt_count_dec_and_test() \
198  	({ preempt_count_sub(1); should_resched(0); })
199  #else
200  #define preempt_count_add(val)	__preempt_count_add(val)
201  #define preempt_count_sub(val)	__preempt_count_sub(val)
202  #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
203  #endif
204  
205  #define __preempt_count_inc() __preempt_count_add(1)
206  #define __preempt_count_dec() __preempt_count_sub(1)
207  
208  #define preempt_count_inc() preempt_count_add(1)
209  #define preempt_count_dec() preempt_count_sub(1)
210  
211  #ifdef CONFIG_PREEMPT_COUNT
212  
213  #define preempt_disable() \
214  do { \
215  	preempt_count_inc(); \
216  	barrier(); \
217  } while (0)
218  
219  #define sched_preempt_enable_no_resched() \
220  do { \
221  	barrier(); \
222  	preempt_count_dec(); \
223  } while (0)
224  
225  #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
226  
227  #define preemptible()	(preempt_count() == 0 && !irqs_disabled())
228  
229  #ifdef CONFIG_PREEMPTION
230  #define preempt_enable() \
231  do { \
232  	barrier(); \
233  	if (unlikely(preempt_count_dec_and_test())) \
234  		__preempt_schedule(); \
235  } while (0)
236  
237  #define preempt_enable_notrace() \
238  do { \
239  	barrier(); \
240  	if (unlikely(__preempt_count_dec_and_test())) \
241  		__preempt_schedule_notrace(); \
242  } while (0)
243  
244  #define preempt_check_resched() \
245  do { \
246  	if (should_resched(0)) \
247  		__preempt_schedule(); \
248  } while (0)
249  
250  #else /* !CONFIG_PREEMPTION */
251  #define preempt_enable() \
252  do { \
253  	barrier(); \
254  	preempt_count_dec(); \
255  } while (0)
256  
257  #define preempt_enable_notrace() \
258  do { \
259  	barrier(); \
260  	__preempt_count_dec(); \
261  } while (0)
262  
263  #define preempt_check_resched() do { } while (0)
264  #endif /* CONFIG_PREEMPTION */
265  
266  #define preempt_disable_notrace() \
267  do { \
268  	__preempt_count_inc(); \
269  	barrier(); \
270  } while (0)
271  
272  #define preempt_enable_no_resched_notrace() \
273  do { \
274  	barrier(); \
275  	__preempt_count_dec(); \
276  } while (0)
277  
278  #else /* !CONFIG_PREEMPT_COUNT */
279  
280  /*
281   * Even if we don't have any preemption, we need preempt disable/enable
282   * to be barriers, so that we don't have things like get_user/put_user
283   * that can cause faults and scheduling migrate into our preempt-protected
284   * region.
285   */
286  #define preempt_disable()			barrier()
287  #define sched_preempt_enable_no_resched()	barrier()
288  #define preempt_enable_no_resched()		barrier()
289  #define preempt_enable()			barrier()
290  #define preempt_check_resched()			do { } while (0)
291  
292  #define preempt_disable_notrace()		barrier()
293  #define preempt_enable_no_resched_notrace()	barrier()
294  #define preempt_enable_notrace()		barrier()
295  #define preemptible()				0
296  
297  #endif /* CONFIG_PREEMPT_COUNT */
298  
299  #ifdef MODULE
300  /*
301   * Modules have no business playing preemption tricks.
302   */
303  #undef sched_preempt_enable_no_resched
304  #undef preempt_enable_no_resched
305  #undef preempt_enable_no_resched_notrace
306  #undef preempt_check_resched
307  #endif
308  
309  #define preempt_set_need_resched() \
310  do { \
311  	set_preempt_need_resched(); \
312  } while (0)
313  #define preempt_fold_need_resched() \
314  do { \
315  	if (tif_need_resched()) \
316  		set_preempt_need_resched(); \
317  } while (0)
318  
319  #ifdef CONFIG_PREEMPT_NOTIFIERS
320  
321  struct preempt_notifier;
322  
323  /**
324   * preempt_ops - notifiers called when a task is preempted and rescheduled
325   * @sched_in: we're about to be rescheduled:
326   *    notifier: struct preempt_notifier for the task being scheduled
327   *    cpu:  cpu we're scheduled on
328   * @sched_out: we've just been preempted
329   *    notifier: struct preempt_notifier for the task being preempted
330   *    next: the task that's kicking us out
331   *
332   * Please note that sched_in and out are called under different
333   * contexts.  sched_out is called with rq lock held and irq disabled
334   * while sched_in is called without rq lock and irq enabled.  This
335   * difference is intentional and depended upon by its users.
336   */
337  struct preempt_ops {
338  	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
339  	void (*sched_out)(struct preempt_notifier *notifier,
340  			  struct task_struct *next);
341  };
342  
343  /**
344   * preempt_notifier - key for installing preemption notifiers
345   * @link: internal use
346   * @ops: defines the notifier functions to be called
347   *
348   * Usually used in conjunction with container_of().
349   */
350  struct preempt_notifier {
351  	struct hlist_node link;
352  	struct preempt_ops *ops;
353  };
354  
355  void preempt_notifier_inc(void);
356  void preempt_notifier_dec(void);
357  void preempt_notifier_register(struct preempt_notifier *notifier);
358  void preempt_notifier_unregister(struct preempt_notifier *notifier);
359  
preempt_notifier_init(struct preempt_notifier * notifier,struct preempt_ops * ops)360  static inline void preempt_notifier_init(struct preempt_notifier *notifier,
361  				     struct preempt_ops *ops)
362  {
363  	/* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
364  	notifier->link.next = NULL;
365  	notifier->link.pprev = NULL;
366  	notifier->ops = ops;
367  }
368  
369  #endif
370  
371  #ifdef CONFIG_SMP
372  
373  /*
374   * Migrate-Disable and why it is undesired.
375   *
376   * When a preempted task becomes elegible to run under the ideal model (IOW it
377   * becomes one of the M highest priority tasks), it might still have to wait
378   * for the preemptee's migrate_disable() section to complete. Thereby suffering
379   * a reduction in bandwidth in the exact duration of the migrate_disable()
380   * section.
381   *
382   * Per this argument, the change from preempt_disable() to migrate_disable()
383   * gets us:
384   *
385   * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
386   *   it would have had to wait for the lower priority task.
387   *
388   * - a lower priority tasks; which under preempt_disable() could've instantly
389   *   migrated away when another CPU becomes available, is now constrained
390   *   by the ability to push the higher priority task away, which might itself be
391   *   in a migrate_disable() section, reducing it's available bandwidth.
392   *
393   * IOW it trades latency / moves the interference term, but it stays in the
394   * system, and as long as it remains unbounded, the system is not fully
395   * deterministic.
396   *
397   *
398   * The reason we have it anyway.
399   *
400   * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
401   * number of primitives into becoming preemptible, they would also allow
402   * migration. This turns out to break a bunch of per-cpu usage. To this end,
403   * all these primitives employ migirate_disable() to restore this implicit
404   * assumption.
405   *
406   * This is a 'temporary' work-around at best. The correct solution is getting
407   * rid of the above assumptions and reworking the code to employ explicit
408   * per-cpu locking or short preempt-disable regions.
409   *
410   * The end goal must be to get rid of migrate_disable(), alternatively we need
411   * a schedulability theory that does not depend on abritrary migration.
412   *
413   *
414   * Notes on the implementation.
415   *
416   * The implementation is particularly tricky since existing code patterns
417   * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
418   * This means that it cannot use cpus_read_lock() to serialize against hotplug,
419   * nor can it easily migrate itself into a pending affinity mask change on
420   * migrate_enable().
421   *
422   *
423   * Note: even non-work-conserving schedulers like semi-partitioned depends on
424   *       migration, so migrate_disable() is not only a problem for
425   *       work-conserving schedulers.
426   *
427   */
428  extern void migrate_disable(void);
429  extern void migrate_enable(void);
430  
431  #else
432  
migrate_disable(void)433  static inline void migrate_disable(void) { }
migrate_enable(void)434  static inline void migrate_enable(void) { }
435  
436  #endif /* CONFIG_SMP */
437  
438  /**
439   * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
440   *
441   * Use for code which requires preemption protection inside a critical
442   * section which has preemption disabled implicitly on non-PREEMPT_RT
443   * enabled kernels, by e.g.:
444   *  - holding a spinlock/rwlock
445   *  - soft interrupt context
446   *  - regular interrupt handlers
447   *
448   * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
449   * interrupt context and regular interrupt handlers are preemptible and
450   * only prevent migration. preempt_disable_nested() ensures that preemption
451   * is disabled for cases which require CPU local serialization even on
452   * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
453   *
454   * The use cases are code sequences which are not serialized by a
455   * particular lock instance, e.g.:
456   *  - seqcount write side critical sections where the seqcount is not
457   *    associated to a particular lock and therefore the automatic
458   *    protection mechanism does not work. This prevents a live lock
459   *    against a preempting high priority reader.
460   *  - RMW per CPU variable updates like vmstat.
461   */
462  /* Macro to avoid header recursion hell vs. lockdep */
463  #define preempt_disable_nested()				\
464  do {								\
465  	if (IS_ENABLED(CONFIG_PREEMPT_RT))			\
466  		preempt_disable();				\
467  	else							\
468  		lockdep_assert_preemption_disabled();		\
469  } while (0)
470  
471  /**
472   * preempt_enable_nested - Undo the effect of preempt_disable_nested()
473   */
preempt_enable_nested(void)474  static __always_inline void preempt_enable_nested(void)
475  {
476  	if (IS_ENABLED(CONFIG_PREEMPT_RT))
477  		preempt_enable();
478  }
479  
480  DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
481  DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
482  DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
483  
484  #ifdef CONFIG_PREEMPT_DYNAMIC
485  
486  extern bool preempt_model_none(void);
487  extern bool preempt_model_voluntary(void);
488  extern bool preempt_model_full(void);
489  
490  #else
491  
492  static inline bool preempt_model_none(void)
493  {
494  	return IS_ENABLED(CONFIG_PREEMPT_NONE);
495  }
496  static inline bool preempt_model_voluntary(void)
497  {
498  	return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
499  }
500  static inline bool preempt_model_full(void)
501  {
502  	return IS_ENABLED(CONFIG_PREEMPT);
503  }
504  
505  #endif
506  
preempt_model_rt(void)507  static inline bool preempt_model_rt(void)
508  {
509  	return IS_ENABLED(CONFIG_PREEMPT_RT);
510  }
511  
512  /*
513   * Does the preemption model allow non-cooperative preemption?
514   *
515   * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
516   * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
517   * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
518   * PREEMPT_NONE model.
519   */
preempt_model_preemptible(void)520  static inline bool preempt_model_preemptible(void)
521  {
522  	return preempt_model_full() || preempt_model_rt();
523  }
524  
525  #endif /* __LINUX_PREEMPT_H */
526