Lines Matching +full:locality +full:- +full:specific
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
51 * data contains off-queue information when !WORK_STRUCT_PWQ.
60 WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
66 * When a work item is off queue, the high bits encode off-queue flags
71 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
101 /* Convenience constants - of type 'unsigned long', not 'enum'! */
103 #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
104 #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
105 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
107 #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
117 /* target workqueue and CPU ->timer uses to queue ->work */
126 /* target workqueue ->rcu uses to queue ->work */
142 * struct workqueue_attrs - A struct for workqueue attributes.
162 * @__pod_cpumask: internal attribute used to create per-pod pools
166 * Per-pod unbound worker pools are used to improve locality. Always a
167 * subset of ->cpumask. A workqueue can be associated with multiple
176 * If clear, workqueue will make a best-effort attempt at starting the
196 * CPU pods are used to improve execution locality of unbound work
285 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
286 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
287 INIT_LIST_HEAD(&(_work)->entry); \
288 (_work)->func = (_func); \
294 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
295 INIT_LIST_HEAD(&(_work)->entry); \
296 (_work)->func = (_func); \
318 INIT_WORK(&(_work)->work, (_func)); \
319 __init_timer(&(_work)->timer, \
326 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
327 __init_timer_on_stack(&(_work)->timer, \
345 INIT_WORK(&(_work)->work, (_func))
348 INIT_WORK_ONSTACK(&(_work)->work, (_func))
351 * work_pending - Find out whether a work item is currently pending
358 * delayed_work_pending - Find out whether a delayable work item is currently
363 work_pending(&(w)->work)
367 * Documentation/core-api/workqueue.rst.
379 * Per-cpu workqueues are generally preferred because they tend to
380 * show better performance thanks to cache locality. Per-cpu
387 * however, for example, a per-cpu work item scheduled from an
390 * turn may lead to more scheduling choices which are sub-optimal
393 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
395 * specified. Per-cpu workqueues which are identified to
396 * contribute significantly to power-consumption are identified and
420 * Per-node default cap on min_active. Unless explicitly set, min_active
422 * workqueue_struct->min_active definition.
428 * System-wide workqueues which are always present.
431 * Multi-CPU multi-threaded. There are users which expect relatively
442 * any specific CPU, not concurrency managed, and all queued works are
451 * they are same as their non-power-efficient counterparts - e.g.
472 * alloc_workqueue - allocate a workqueue
475 * @max_active: max in-flight work items, 0 for default
478 * For a per-cpu workqueue, @max_active limits the number of in-flight work
482 * For unbound workqueues, @max_active limits the number of in-flight work items
491 * Depending on online CPU distribution, a node may end up with per-node
493 * deadlocks if the per-node concurrency limit is lower than the maximum number
499 * that the sum of per-node max_active's may be larger than @max_active.
502 * Documentation/core-api/workqueue.rst.
512 * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
515 * @max_active: max in-flight work items, 0 for default
516 * @lockdep_map: user-defined lockdep_map
519 * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
531 * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
532 * user-defined lockdep_map
536 * @lockdep_map: user-defined lockdep_map
539 * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
552 * alloc_ordered_workqueue - allocate an ordered workqueue
637 * queue_work - queue work on a workqueue
646 * Memory-ordering properties: If it returns %true, guarantees that all stores
666 * queue_delayed_work - queue work on a workqueue after delay
681 * mod_delayed_work - modify delay of or queue a delayed work
696 * schedule_work_on - put work task on a specific cpu
700 * This puts a job on a specific cpu
708 * schedule_work - put work task in global workqueue
711 * Returns %false if @work was already on the kernel-global workqueue and
714 * This puts a job in the kernel-global workqueue if it was not already
715 * queued and leaves it in the same position on the kernel-global
718 * Shares the same memory-ordering properties of queue_work(), cf. the
727 * enable_and_queue_work - Enable and queue a work item on a specific workqueue
753 * Detect attempt to flush system-wide workqueues at compile time when possible.
754 * Warn attempt to flush system-wide workqueues at runtime.
756 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
757 * for reasons and steps for converting system-wide workqueues into local workqueues.
760 __compiletime_warning("Please avoid flushing system-wide workqueues.");
792 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
797 * After waiting for a given time this puts a job in the kernel-global
807 * schedule_delayed_work - put work task in global workqueue after delay
811 * After waiting for a given time this puts a job in the kernel-global