1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * workqueue.h --- work queue handling for Linux.
4   */
5  
6  #ifndef _LINUX_WORKQUEUE_H
7  #define _LINUX_WORKQUEUE_H
8  
9  #include <linux/timer.h>
10  #include <linux/linkage.h>
11  #include <linux/bitops.h>
12  #include <linux/lockdep.h>
13  #include <linux/threads.h>
14  #include <linux/atomic.h>
15  #include <linux/cpumask_types.h>
16  #include <linux/rcupdate.h>
17  #include <linux/workqueue_types.h>
18  
19  /*
20   * The first word is the work queue pointer and the flags rolled into
21   * one
22   */
23  #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24  
25  enum work_bits {
26  	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
27  	WORK_STRUCT_INACTIVE_BIT,	/* work item is inactive */
28  	WORK_STRUCT_PWQ_BIT,		/* data points to pwq */
29  	WORK_STRUCT_LINKED_BIT,		/* next work is linked to this one */
30  #ifdef CONFIG_DEBUG_OBJECTS_WORK
31  	WORK_STRUCT_STATIC_BIT,		/* static initializer (debugobjects) */
32  #endif
33  	WORK_STRUCT_FLAG_BITS,
34  
35  	/* color for workqueue flushing */
36  	WORK_STRUCT_COLOR_SHIFT	= WORK_STRUCT_FLAG_BITS,
37  	WORK_STRUCT_COLOR_BITS	= 4,
38  
39  	/*
40  	 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
41  	 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
42  	 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
43  	 *
44  	 * MSB
45  	 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
46  	 *                     4 bits        4 or 5 bits
47  	 */
48  	WORK_STRUCT_PWQ_SHIFT	= WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
49  
50  	/*
51  	 * data contains off-queue information when !WORK_STRUCT_PWQ.
52  	 *
53  	 * MSB
54  	 * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
55  	 *                  16 bits          1 bit        4 or 5 bits
56  	 */
57  	WORK_OFFQ_FLAG_SHIFT	= WORK_STRUCT_FLAG_BITS,
58  	WORK_OFFQ_BH_BIT	= WORK_OFFQ_FLAG_SHIFT,
59  	WORK_OFFQ_FLAG_END,
60  	WORK_OFFQ_FLAG_BITS	= WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
61  
62  	WORK_OFFQ_DISABLE_SHIFT	= WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
63  	WORK_OFFQ_DISABLE_BITS	= 16,
64  
65  	/*
66  	 * When a work item is off queue, the high bits encode off-queue flags
67  	 * and the last pool it was on. Cap pool ID to 31 bits and use the
68  	 * highest number to indicate that no pool is associated.
69  	 */
70  	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS,
71  	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
72  	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
73  };
74  
75  enum work_flags {
76  	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
77  	WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT,
78  	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
79  	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
80  #ifdef CONFIG_DEBUG_OBJECTS_WORK
81  	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
82  #else
83  	WORK_STRUCT_STATIC	= 0,
84  #endif
85  };
86  
87  enum wq_misc_consts {
88  	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS),
89  
90  	/* not bound to any CPU, prefer the local CPU */
91  	WORK_CPU_UNBOUND	= NR_CPUS,
92  
93  	/* bit mask for work_busy() return values */
94  	WORK_BUSY_PENDING	= 1 << 0,
95  	WORK_BUSY_RUNNING	= 1 << 1,
96  
97  	/* maximum string length for set_worker_desc() */
98  	WORKER_DESC_LEN		= 32,
99  };
100  
101  /* Convenience constants - of type 'unsigned long', not 'enum'! */
102  #define WORK_OFFQ_BH		(1ul << WORK_OFFQ_BH_BIT)
103  #define WORK_OFFQ_FLAG_MASK	(((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
104  #define WORK_OFFQ_DISABLE_MASK	(((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
105  #define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
106  #define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
107  #define WORK_STRUCT_PWQ_MASK	(~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
108  
109  #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
110  #define WORK_DATA_STATIC_INIT()	\
111  	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
112  
113  struct delayed_work {
114  	struct work_struct work;
115  	struct timer_list timer;
116  
117  	/* target workqueue and CPU ->timer uses to queue ->work */
118  	struct workqueue_struct *wq;
119  	int cpu;
120  };
121  
122  struct rcu_work {
123  	struct work_struct work;
124  	struct rcu_head rcu;
125  
126  	/* target workqueue ->rcu uses to queue ->work */
127  	struct workqueue_struct *wq;
128  };
129  
130  enum wq_affn_scope {
131  	WQ_AFFN_DFL,			/* use system default */
132  	WQ_AFFN_CPU,			/* one pod per CPU */
133  	WQ_AFFN_SMT,			/* one pod poer SMT */
134  	WQ_AFFN_CACHE,			/* one pod per LLC */
135  	WQ_AFFN_NUMA,			/* one pod per NUMA node */
136  	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
137  
138  	WQ_AFFN_NR_TYPES,
139  };
140  
141  /**
142   * struct workqueue_attrs - A struct for workqueue attributes.
143   *
144   * This can be used to change attributes of an unbound workqueue.
145   */
146  struct workqueue_attrs {
147  	/**
148  	 * @nice: nice level
149  	 */
150  	int nice;
151  
152  	/**
153  	 * @cpumask: allowed CPUs
154  	 *
155  	 * Work items in this workqueue are affine to these CPUs and not allowed
156  	 * to execute on other CPUs. A pool serving a workqueue must have the
157  	 * same @cpumask.
158  	 */
159  	cpumask_var_t cpumask;
160  
161  	/**
162  	 * @__pod_cpumask: internal attribute used to create per-pod pools
163  	 *
164  	 * Internal use only.
165  	 *
166  	 * Per-pod unbound worker pools are used to improve locality. Always a
167  	 * subset of ->cpumask. A workqueue can be associated with multiple
168  	 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
169  	 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
170  	 */
171  	cpumask_var_t __pod_cpumask;
172  
173  	/**
174  	 * @affn_strict: affinity scope is strict
175  	 *
176  	 * If clear, workqueue will make a best-effort attempt at starting the
177  	 * worker inside @__pod_cpumask but the scheduler is free to migrate it
178  	 * outside.
179  	 *
180  	 * If set, workers are only allowed to run inside @__pod_cpumask.
181  	 */
182  	bool affn_strict;
183  
184  	/*
185  	 * Below fields aren't properties of a worker_pool. They only modify how
186  	 * :c:func:`apply_workqueue_attrs` select pools and thus don't
187  	 * participate in pool hash calculations or equality comparisons.
188  	 *
189  	 * If @affn_strict is set, @cpumask isn't a property of a worker_pool
190  	 * either.
191  	 */
192  
193  	/**
194  	 * @affn_scope: unbound CPU affinity scope
195  	 *
196  	 * CPU pods are used to improve execution locality of unbound work
197  	 * items. There are multiple pod types, one for each wq_affn_scope, and
198  	 * every CPU in the system belongs to one pod in every pod type. CPUs
199  	 * that belong to the same pod share the worker pool. For example,
200  	 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
201  	 * pool for each NUMA node.
202  	 */
203  	enum wq_affn_scope affn_scope;
204  
205  	/**
206  	 * @ordered: work items must be executed one by one in queueing order
207  	 */
208  	bool ordered;
209  };
210  
to_delayed_work(struct work_struct * work)211  static inline struct delayed_work *to_delayed_work(struct work_struct *work)
212  {
213  	return container_of(work, struct delayed_work, work);
214  }
215  
to_rcu_work(struct work_struct * work)216  static inline struct rcu_work *to_rcu_work(struct work_struct *work)
217  {
218  	return container_of(work, struct rcu_work, work);
219  }
220  
221  struct execute_work {
222  	struct work_struct work;
223  };
224  
225  #ifdef CONFIG_LOCKDEP
226  /*
227   * NB: because we have to copy the lockdep_map, setting _key
228   * here is required, otherwise it could get initialised to the
229   * copy of the lockdep_map!
230   */
231  #define __WORK_INIT_LOCKDEP_MAP(n, k) \
232  	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
233  #else
234  #define __WORK_INIT_LOCKDEP_MAP(n, k)
235  #endif
236  
237  #define __WORK_INITIALIZER(n, f) {					\
238  	.data = WORK_DATA_STATIC_INIT(),				\
239  	.entry	= { &(n).entry, &(n).entry },				\
240  	.func = (f),							\
241  	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
242  	}
243  
244  #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
245  	.work = __WORK_INITIALIZER((n).work, (f)),			\
246  	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
247  				     (tflags) | TIMER_IRQSAFE),		\
248  	}
249  
250  #define DECLARE_WORK(n, f)						\
251  	struct work_struct n = __WORK_INITIALIZER(n, f)
252  
253  #define DECLARE_DELAYED_WORK(n, f)					\
254  	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
255  
256  #define DECLARE_DEFERRABLE_WORK(n, f)					\
257  	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
258  
259  #ifdef CONFIG_DEBUG_OBJECTS_WORK
260  extern void __init_work(struct work_struct *work, int onstack);
261  extern void destroy_work_on_stack(struct work_struct *work);
262  extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)263  static inline unsigned int work_static(struct work_struct *work)
264  {
265  	return *work_data_bits(work) & WORK_STRUCT_STATIC;
266  }
267  #else
__init_work(struct work_struct * work,int onstack)268  static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)269  static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)270  static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)271  static inline unsigned int work_static(struct work_struct *work) { return 0; }
272  #endif
273  
274  /*
275   * initialize all of a work item in one go
276   *
277   * NOTE! No point in using "atomic_long_set()": using a direct
278   * assignment of the work data initializer allows the compiler
279   * to generate better code.
280   */
281  #ifdef CONFIG_LOCKDEP
282  #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
283  	do {								\
284  		__init_work((_work), _onstack);				\
285  		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
286  		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
287  		INIT_LIST_HEAD(&(_work)->entry);			\
288  		(_work)->func = (_func);				\
289  	} while (0)
290  #else
291  #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
292  	do {								\
293  		__init_work((_work), _onstack);				\
294  		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
295  		INIT_LIST_HEAD(&(_work)->entry);			\
296  		(_work)->func = (_func);				\
297  	} while (0)
298  #endif
299  
300  #define __INIT_WORK(_work, _func, _onstack)				\
301  	do {								\
302  		static __maybe_unused struct lock_class_key __key;	\
303  									\
304  		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
305  	} while (0)
306  
307  #define INIT_WORK(_work, _func)						\
308  	__INIT_WORK((_work), (_func), 0)
309  
310  #define INIT_WORK_ONSTACK(_work, _func)					\
311  	__INIT_WORK((_work), (_func), 1)
312  
313  #define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
314  	__INIT_WORK_KEY((_work), (_func), 1, _key)
315  
316  #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
317  	do {								\
318  		INIT_WORK(&(_work)->work, (_func));			\
319  		__init_timer(&(_work)->timer,				\
320  			     delayed_work_timer_fn,			\
321  			     (_tflags) | TIMER_IRQSAFE);		\
322  	} while (0)
323  
324  #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
325  	do {								\
326  		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
327  		__init_timer_on_stack(&(_work)->timer,			\
328  				      delayed_work_timer_fn,		\
329  				      (_tflags) | TIMER_IRQSAFE);	\
330  	} while (0)
331  
332  #define INIT_DELAYED_WORK(_work, _func)					\
333  	__INIT_DELAYED_WORK(_work, _func, 0)
334  
335  #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
336  	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
337  
338  #define INIT_DEFERRABLE_WORK(_work, _func)				\
339  	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
340  
341  #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
342  	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
343  
344  #define INIT_RCU_WORK(_work, _func)					\
345  	INIT_WORK(&(_work)->work, (_func))
346  
347  #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
348  	INIT_WORK_ONSTACK(&(_work)->work, (_func))
349  
350  /**
351   * work_pending - Find out whether a work item is currently pending
352   * @work: The work item in question
353   */
354  #define work_pending(work) \
355  	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
356  
357  /**
358   * delayed_work_pending - Find out whether a delayable work item is currently
359   * pending
360   * @w: The work item in question
361   */
362  #define delayed_work_pending(w) \
363  	work_pending(&(w)->work)
364  
365  /*
366   * Workqueue flags and constants.  For details, please refer to
367   * Documentation/core-api/workqueue.rst.
368   */
369  enum wq_flags {
370  	WQ_BH			= 1 << 0, /* execute in bottom half (softirq) context */
371  	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
372  	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
373  	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
374  	WQ_HIGHPRI		= 1 << 4, /* high priority */
375  	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
376  	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
377  
378  	/*
379  	 * Per-cpu workqueues are generally preferred because they tend to
380  	 * show better performance thanks to cache locality.  Per-cpu
381  	 * workqueues exclude the scheduler from choosing the CPU to
382  	 * execute the worker threads, which has an unfortunate side effect
383  	 * of increasing power consumption.
384  	 *
385  	 * The scheduler considers a CPU idle if it doesn't have any task
386  	 * to execute and tries to keep idle cores idle to conserve power;
387  	 * however, for example, a per-cpu work item scheduled from an
388  	 * interrupt handler on an idle CPU will force the scheduler to
389  	 * execute the work item on that CPU breaking the idleness, which in
390  	 * turn may lead to more scheduling choices which are sub-optimal
391  	 * in terms of power consumption.
392  	 *
393  	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
394  	 * but become unbound if workqueue.power_efficient kernel param is
395  	 * specified.  Per-cpu workqueues which are identified to
396  	 * contribute significantly to power-consumption are identified and
397  	 * marked with this flag and enabling the power_efficient mode
398  	 * leads to noticeable power saving at the cost of small
399  	 * performance disadvantage.
400  	 *
401  	 * http://thread.gmane.org/gmane.linux.kernel/1480396
402  	 */
403  	WQ_POWER_EFFICIENT	= 1 << 7,
404  
405  	__WQ_DESTROYING		= 1 << 15, /* internal: workqueue is destroying */
406  	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
407  	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
408  	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
409  
410  	/* BH wq only allows the following flags */
411  	__WQ_BH_ALLOWS		= WQ_BH | WQ_HIGHPRI,
412  };
413  
414  enum wq_consts {
415  	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
416  	WQ_UNBOUND_MAX_ACTIVE	= WQ_MAX_ACTIVE,
417  	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
418  
419  	/*
420  	 * Per-node default cap on min_active. Unless explicitly set, min_active
421  	 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
422  	 * workqueue_struct->min_active definition.
423  	 */
424  	WQ_DFL_MIN_ACTIVE	= 8,
425  };
426  
427  /*
428   * System-wide workqueues which are always present.
429   *
430   * system_wq is the one used by schedule[_delayed]_work[_on]().
431   * Multi-CPU multi-threaded.  There are users which expect relatively
432   * short queue flush time.  Don't queue works which can run for too
433   * long.
434   *
435   * system_highpri_wq is similar to system_wq but for work items which
436   * require WQ_HIGHPRI.
437   *
438   * system_long_wq is similar to system_wq but may host long running
439   * works.  Queue flushing might take relatively long.
440   *
441   * system_unbound_wq is unbound workqueue.  Workers are not bound to
442   * any specific CPU, not concurrency managed, and all queued works are
443   * executed immediately as long as max_active limit is not reached and
444   * resources are available.
445   *
446   * system_freezable_wq is equivalent to system_wq except that it's
447   * freezable.
448   *
449   * *_power_efficient_wq are inclined towards saving power and converted
450   * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
451   * they are same as their non-power-efficient counterparts - e.g.
452   * system_power_efficient_wq is identical to system_wq if
453   * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
454   *
455   * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
456   * are executed in the queueing CPU's BH context in the queueing order.
457   */
458  extern struct workqueue_struct *system_wq;
459  extern struct workqueue_struct *system_highpri_wq;
460  extern struct workqueue_struct *system_long_wq;
461  extern struct workqueue_struct *system_unbound_wq;
462  extern struct workqueue_struct *system_freezable_wq;
463  extern struct workqueue_struct *system_power_efficient_wq;
464  extern struct workqueue_struct *system_freezable_power_efficient_wq;
465  extern struct workqueue_struct *system_bh_wq;
466  extern struct workqueue_struct *system_bh_highpri_wq;
467  
468  void workqueue_softirq_action(bool highpri);
469  void workqueue_softirq_dead(unsigned int cpu);
470  
471  /**
472   * alloc_workqueue - allocate a workqueue
473   * @fmt: printf format for the name of the workqueue
474   * @flags: WQ_* flags
475   * @max_active: max in-flight work items, 0 for default
476   * @...: args for @fmt
477   *
478   * For a per-cpu workqueue, @max_active limits the number of in-flight work
479   * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
480   * executing at most one work item for the workqueue.
481   *
482   * For unbound workqueues, @max_active limits the number of in-flight work items
483   * for the whole system. e.g. @max_active of 16 indicates that that there can be
484   * at most 16 work items executing for the workqueue in the whole system.
485   *
486   * As sharing the same active counter for an unbound workqueue across multiple
487   * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
488   * according to the proportion of the number of online CPUs and enforced
489   * independently.
490   *
491   * Depending on online CPU distribution, a node may end up with per-node
492   * max_active which is significantly lower than @max_active, which can lead to
493   * deadlocks if the per-node concurrency limit is lower than the maximum number
494   * of interdependent work items for the workqueue.
495   *
496   * To guarantee forward progress regardless of online CPU distribution, the
497   * concurrency limit on every node is guaranteed to be equal to or greater than
498   * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
499   * that the sum of per-node max_active's may be larger than @max_active.
500   *
501   * For detailed information on %WQ_* flags, please refer to
502   * Documentation/core-api/workqueue.rst.
503   *
504   * RETURNS:
505   * Pointer to the allocated workqueue on success, %NULL on failure.
506   */
507  __printf(1, 4) struct workqueue_struct *
508  alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
509  
510  #ifdef CONFIG_LOCKDEP
511  /**
512   * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
513   * @fmt: printf format for the name of the workqueue
514   * @flags: WQ_* flags
515   * @max_active: max in-flight work items, 0 for default
516   * @lockdep_map: user-defined lockdep_map
517   * @...: args for @fmt
518   *
519   * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
520   * workqueues created with the same purpose and to avoid leaking a lockdep_map
521   * on each workqueue creation.
522   *
523   * RETURNS:
524   * Pointer to the allocated workqueue on success, %NULL on failure.
525   */
526  __printf(1, 5) struct workqueue_struct *
527  alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
528  			    struct lockdep_map *lockdep_map, ...);
529  
530  /**
531   * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
532   * user-defined lockdep_map
533   *
534   * @fmt: printf format for the name of the workqueue
535   * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
536   * @lockdep_map: user-defined lockdep_map
537   * @args: args for @fmt
538   *
539   * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
540   * Useful for workqueues created with the same purpose and to avoid leaking a
541   * lockdep_map on each workqueue creation.
542   *
543   * RETURNS:
544   * Pointer to the allocated workqueue on success, %NULL on failure.
545   */
546  #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...)	\
547  	alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),	\
548  				    1, lockdep_map, ##args)
549  #endif
550  
551  /**
552   * alloc_ordered_workqueue - allocate an ordered workqueue
553   * @fmt: printf format for the name of the workqueue
554   * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
555   * @args: args for @fmt
556   *
557   * Allocate an ordered workqueue.  An ordered workqueue executes at
558   * most one work item at any given time in the queued order.  They are
559   * implemented as unbound workqueues with @max_active of one.
560   *
561   * RETURNS:
562   * Pointer to the allocated workqueue on success, %NULL on failure.
563   */
564  #define alloc_ordered_workqueue(fmt, flags, args...)			\
565  	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
566  
567  #define create_workqueue(name)						\
568  	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
569  #define create_freezable_workqueue(name)				\
570  	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
571  			WQ_MEM_RECLAIM, 1, (name))
572  #define create_singlethread_workqueue(name)				\
573  	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
574  
575  #define from_work(var, callback_work, work_fieldname)	\
576  	container_of(callback_work, typeof(*var), work_fieldname)
577  
578  extern void destroy_workqueue(struct workqueue_struct *wq);
579  
580  struct workqueue_attrs *alloc_workqueue_attrs(void);
581  void free_workqueue_attrs(struct workqueue_attrs *attrs);
582  int apply_workqueue_attrs(struct workqueue_struct *wq,
583  			  const struct workqueue_attrs *attrs);
584  extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
585  
586  extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
587  			struct work_struct *work);
588  extern bool queue_work_node(int node, struct workqueue_struct *wq,
589  			    struct work_struct *work);
590  extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
591  			struct delayed_work *work, unsigned long delay);
592  extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
593  			struct delayed_work *dwork, unsigned long delay);
594  extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
595  
596  extern void __flush_workqueue(struct workqueue_struct *wq);
597  extern void drain_workqueue(struct workqueue_struct *wq);
598  
599  extern int schedule_on_each_cpu(work_func_t func);
600  
601  int execute_in_process_context(work_func_t fn, struct execute_work *);
602  
603  extern bool flush_work(struct work_struct *work);
604  extern bool cancel_work(struct work_struct *work);
605  extern bool cancel_work_sync(struct work_struct *work);
606  
607  extern bool flush_delayed_work(struct delayed_work *dwork);
608  extern bool cancel_delayed_work(struct delayed_work *dwork);
609  extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
610  
611  extern bool disable_work(struct work_struct *work);
612  extern bool disable_work_sync(struct work_struct *work);
613  extern bool enable_work(struct work_struct *work);
614  
615  extern bool disable_delayed_work(struct delayed_work *dwork);
616  extern bool disable_delayed_work_sync(struct delayed_work *dwork);
617  extern bool enable_delayed_work(struct delayed_work *dwork);
618  
619  extern bool flush_rcu_work(struct rcu_work *rwork);
620  
621  extern void workqueue_set_max_active(struct workqueue_struct *wq,
622  				     int max_active);
623  extern void workqueue_set_min_active(struct workqueue_struct *wq,
624  				     int min_active);
625  extern struct work_struct *current_work(void);
626  extern bool current_is_workqueue_rescuer(void);
627  extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
628  extern unsigned int work_busy(struct work_struct *work);
629  extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
630  extern void print_worker_info(const char *log_lvl, struct task_struct *task);
631  extern void show_all_workqueues(void);
632  extern void show_freezable_workqueues(void);
633  extern void show_one_workqueue(struct workqueue_struct *wq);
634  extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
635  
636  /**
637   * queue_work - queue work on a workqueue
638   * @wq: workqueue to use
639   * @work: work to queue
640   *
641   * Returns %false if @work was already on a queue, %true otherwise.
642   *
643   * We queue the work to the CPU on which it was submitted, but if the CPU dies
644   * it can be processed by another CPU.
645   *
646   * Memory-ordering properties:  If it returns %true, guarantees that all stores
647   * preceding the call to queue_work() in the program order will be visible from
648   * the CPU which will execute @work by the time such work executes, e.g.,
649   *
650   * { x is initially 0 }
651   *
652   *   CPU0				CPU1
653   *
654   *   WRITE_ONCE(x, 1);			[ @work is being executed ]
655   *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
656   *
657   * Forbids: r0 == true && r1 == 0
658   */
queue_work(struct workqueue_struct * wq,struct work_struct * work)659  static inline bool queue_work(struct workqueue_struct *wq,
660  			      struct work_struct *work)
661  {
662  	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
663  }
664  
665  /**
666   * queue_delayed_work - queue work on a workqueue after delay
667   * @wq: workqueue to use
668   * @dwork: delayable work to queue
669   * @delay: number of jiffies to wait before queueing
670   *
671   * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
672   */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)673  static inline bool queue_delayed_work(struct workqueue_struct *wq,
674  				      struct delayed_work *dwork,
675  				      unsigned long delay)
676  {
677  	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
678  }
679  
680  /**
681   * mod_delayed_work - modify delay of or queue a delayed work
682   * @wq: workqueue to use
683   * @dwork: work to queue
684   * @delay: number of jiffies to wait before queueing
685   *
686   * mod_delayed_work_on() on local CPU.
687   */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)688  static inline bool mod_delayed_work(struct workqueue_struct *wq,
689  				    struct delayed_work *dwork,
690  				    unsigned long delay)
691  {
692  	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
693  }
694  
695  /**
696   * schedule_work_on - put work task on a specific cpu
697   * @cpu: cpu to put the work task on
698   * @work: job to be done
699   *
700   * This puts a job on a specific cpu
701   */
schedule_work_on(int cpu,struct work_struct * work)702  static inline bool schedule_work_on(int cpu, struct work_struct *work)
703  {
704  	return queue_work_on(cpu, system_wq, work);
705  }
706  
707  /**
708   * schedule_work - put work task in global workqueue
709   * @work: job to be done
710   *
711   * Returns %false if @work was already on the kernel-global workqueue and
712   * %true otherwise.
713   *
714   * This puts a job in the kernel-global workqueue if it was not already
715   * queued and leaves it in the same position on the kernel-global
716   * workqueue otherwise.
717   *
718   * Shares the same memory-ordering properties of queue_work(), cf. the
719   * DocBook header of queue_work().
720   */
schedule_work(struct work_struct * work)721  static inline bool schedule_work(struct work_struct *work)
722  {
723  	return queue_work(system_wq, work);
724  }
725  
726  /**
727   * enable_and_queue_work - Enable and queue a work item on a specific workqueue
728   * @wq: The target workqueue
729   * @work: The work item to be enabled and queued
730   *
731   * This function combines the operations of enable_work() and queue_work(),
732   * providing a convenient way to enable and queue a work item in a single call.
733   * It invokes enable_work() on @work and then queues it if the disable depth
734   * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
735   * and %false otherwise.
736   *
737   * Note that @work is always queued when disable depth reaches zero. If the
738   * desired behavior is queueing only if certain events took place while @work is
739   * disabled, the user should implement the necessary state tracking and perform
740   * explicit conditional queueing after enable_work().
741   */
enable_and_queue_work(struct workqueue_struct * wq,struct work_struct * work)742  static inline bool enable_and_queue_work(struct workqueue_struct *wq,
743  					 struct work_struct *work)
744  {
745  	if (enable_work(work)) {
746  		queue_work(wq, work);
747  		return true;
748  	}
749  	return false;
750  }
751  
752  /*
753   * Detect attempt to flush system-wide workqueues at compile time when possible.
754   * Warn attempt to flush system-wide workqueues at runtime.
755   *
756   * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
757   * for reasons and steps for converting system-wide workqueues into local workqueues.
758   */
759  extern void __warn_flushing_systemwide_wq(void)
760  	__compiletime_warning("Please avoid flushing system-wide workqueues.");
761  
762  /* Please stop using this function, for this function will be removed in near future. */
763  #define flush_scheduled_work()						\
764  ({									\
765  	__warn_flushing_systemwide_wq();				\
766  	__flush_workqueue(system_wq);					\
767  })
768  
769  #define flush_workqueue(wq)						\
770  ({									\
771  	struct workqueue_struct *_wq = (wq);				\
772  									\
773  	if ((__builtin_constant_p(_wq == system_wq) &&			\
774  	     _wq == system_wq) ||					\
775  	    (__builtin_constant_p(_wq == system_highpri_wq) &&		\
776  	     _wq == system_highpri_wq) ||				\
777  	    (__builtin_constant_p(_wq == system_long_wq) &&		\
778  	     _wq == system_long_wq) ||					\
779  	    (__builtin_constant_p(_wq == system_unbound_wq) &&		\
780  	     _wq == system_unbound_wq) ||				\
781  	    (__builtin_constant_p(_wq == system_freezable_wq) &&	\
782  	     _wq == system_freezable_wq) ||				\
783  	    (__builtin_constant_p(_wq == system_power_efficient_wq) &&	\
784  	     _wq == system_power_efficient_wq) ||			\
785  	    (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
786  	     _wq == system_freezable_power_efficient_wq))		\
787  		__warn_flushing_systemwide_wq();			\
788  	__flush_workqueue(_wq);						\
789  })
790  
791  /**
792   * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
793   * @cpu: cpu to use
794   * @dwork: job to be done
795   * @delay: number of jiffies to wait
796   *
797   * After waiting for a given time this puts a job in the kernel-global
798   * workqueue on the specified CPU.
799   */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)800  static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
801  					    unsigned long delay)
802  {
803  	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
804  }
805  
806  /**
807   * schedule_delayed_work - put work task in global workqueue after delay
808   * @dwork: job to be done
809   * @delay: number of jiffies to wait or 0 for immediate execution
810   *
811   * After waiting for a given time this puts a job in the kernel-global
812   * workqueue.
813   */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)814  static inline bool schedule_delayed_work(struct delayed_work *dwork,
815  					 unsigned long delay)
816  {
817  	return queue_delayed_work(system_wq, dwork, delay);
818  }
819  
820  #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)821  static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
822  {
823  	return fn(arg);
824  }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)825  static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
826  {
827  	return fn(arg);
828  }
829  #else
830  long work_on_cpu_key(int cpu, long (*fn)(void *),
831  		     void *arg, struct lock_class_key *key);
832  /*
833   * A new key is defined for each caller to make sure the work
834   * associated with the function doesn't share its locking class.
835   */
836  #define work_on_cpu(_cpu, _fn, _arg)			\
837  ({							\
838  	static struct lock_class_key __key;		\
839  							\
840  	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
841  })
842  
843  long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
844  			  void *arg, struct lock_class_key *key);
845  
846  /*
847   * A new key is defined for each caller to make sure the work
848   * associated with the function doesn't share its locking class.
849   */
850  #define work_on_cpu_safe(_cpu, _fn, _arg)		\
851  ({							\
852  	static struct lock_class_key __key;		\
853  							\
854  	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
855  })
856  #endif /* CONFIG_SMP */
857  
858  #ifdef CONFIG_FREEZER
859  extern void freeze_workqueues_begin(void);
860  extern bool freeze_workqueues_busy(void);
861  extern void thaw_workqueues(void);
862  #endif /* CONFIG_FREEZER */
863  
864  #ifdef CONFIG_SYSFS
865  int workqueue_sysfs_register(struct workqueue_struct *wq);
866  #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)867  static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
868  { return 0; }
869  #endif	/* CONFIG_SYSFS */
870  
871  #ifdef CONFIG_WQ_WATCHDOG
872  void wq_watchdog_touch(int cpu);
873  #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)874  static inline void wq_watchdog_touch(int cpu) { }
875  #endif	/* CONFIG_WQ_WATCHDOG */
876  
877  #ifdef CONFIG_SMP
878  int workqueue_prepare_cpu(unsigned int cpu);
879  int workqueue_online_cpu(unsigned int cpu);
880  int workqueue_offline_cpu(unsigned int cpu);
881  #endif
882  
883  void __init workqueue_init_early(void);
884  void __init workqueue_init(void);
885  void __init workqueue_init_topology(void);
886  
887  #endif
888