Home
last modified time | relevance | path

Searched full:work (Results 1 – 25 of 5258) sorted by relevance

12345678910>>...211

/linux-6.12.1/fs/smb/server/
Dksmbd_work.c21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local
23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct()
27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct()
30 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct()
31 work->iov_alloc_cnt = 4; in ksmbd_alloc_work_struct()
[all …]
Dserver.c88 * @work: smb work containing server thread information
92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument
96 if (ksmbd_conn_exiting(work->conn) || in check_conn_state()
97 ksmbd_conn_need_reconnect(work->conn)) { in check_conn_state()
98 rsp_hdr = work->response_buf; in check_conn_state()
108 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument
115 if (check_conn_state(work)) in __process_request()
118 if (ksmbd_verify_smb_message(work)) { in __process_request()
119 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER); in __process_request()
123 command = conn->ops->get_cmd_val(work); in __process_request()
[all …]
Dsmb2pdu.h437 bool is_smb2_neg_cmd(struct ksmbd_work *work);
438 bool is_smb2_rsp(struct ksmbd_work *work);
440 u16 get_smb2_cmd_val(struct ksmbd_work *work);
441 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err);
442 int init_smb2_rsp_hdr(struct ksmbd_work *work);
443 int smb2_allocate_rsp_buf(struct ksmbd_work *work);
444 bool is_chained_smb2_message(struct ksmbd_work *work);
445 int init_smb2_neg_rsp(struct ksmbd_work *work);
446 void smb2_set_err_rsp(struct ksmbd_work *work);
447 int smb2_check_user_session(struct ksmbd_work *work);
[all …]
Dsmb2pdu.c42 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument
44 if (work->next_smb2_rcv_hdr_off) { in __wbuf()
45 *req = ksmbd_req_buf_next(work); in __wbuf()
46 *rsp = ksmbd_resp_buf_next(work); in __wbuf()
48 *req = smb2_get_msg(work->request_buf); in __wbuf()
49 *rsp = smb2_get_msg(work->response_buf); in __wbuf()
83 * @work: smb work
88 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument
90 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); in smb2_get_ksmbd_tcon()
101 if (xa_empty(&work->sess->tree_conns)) { in smb2_get_ksmbd_tcon()
[all …]
Dksmbd_work.h86 struct work_struct work; member
97 * @work: smb work containing response buffer
99 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument
101 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next()
106 * @work: smb work containing response buffer
108 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument
110 return work->response_buf + work->curr_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_curr()
115 * @work: smb work containing response buffer
117 static inline void *ksmbd_req_buf_next(struct ksmbd_work *work) in ksmbd_req_buf_next() argument
119 return work->request_buf + work->next_smb2_rcv_hdr_off + 4; in ksmbd_req_buf_next()
[all …]
Dsmb_common.c129 * @work: smb work
135 int ksmbd_verify_smb_message(struct ksmbd_work *work) in ksmbd_verify_smb_message() argument
137 struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work); in ksmbd_verify_smb_message()
141 return ksmbd_smb2_check_message(work); in ksmbd_verify_smb_message()
143 hdr = work->request_buf; in ksmbd_verify_smb_message()
146 work->conn->outstanding_credits++; in ksmbd_verify_smb_message()
306 * @work: smb work containing smb header
310 static u16 get_smb1_cmd_val(struct ksmbd_work *work) in get_smb1_cmd_val() argument
317 * @work: smb work containing smb request
321 static int init_smb1_rsp_hdr(struct ksmbd_work *work) in init_smb1_rsp_hdr() argument
[all …]
/linux-6.12.1/drivers/gpu/drm/
Ddrm_flip_work.c46 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) in drm_flip_work_queue_task() argument
50 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task()
51 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
52 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task()
56 * drm_flip_work_queue - queue work
57 * @work: the flip-work
60 * Queues work, that will later be run (passed back to drm_flip_func_t
61 * func) on a work queue after drm_flip_work_commit() is called.
63 void drm_flip_work_queue(struct drm_flip_work *work, void *val) in drm_flip_work_queue() argument
70 drm_flip_work_queue_task(work, task); in drm_flip_work_queue()
[all …]
Ddrm_vblank_work.c38 * generic delayed work implementation which delays work execution until a
39 * particular vblank has passed, and then executes the work at realtime
43 * re-arming work items can be easily implemented.
48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local
54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works()
55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works()
58 list_del_init(&work->node); in drm_handle_vblank_works()
60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works()
67 /* Handle cancelling any pending vblank work items and drop respective vblank
72 struct drm_vblank_work *work, *next; in drm_vblank_cancel_pending_works() local
[all …]
/linux-6.12.1/include/trace/events/
Dworkqueue.h14 * workqueue_queue_work - called when a work gets queued
17 * @work: pointer to struct work_struct
19 * This event occurs when a work is queued immediately or once a
20 * delayed work is actually queued on a workqueue (ie: once the delay
26 struct work_struct *work),
28 TP_ARGS(req_cpu, pwq, work),
31 __field( void *, work )
39 __entry->work = work;
40 __entry->function = work->func;
46 TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
[all …]
/linux-6.12.1/virt/kvm/
Dasync_pf.c45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument
48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
63 * work item is fully processed. in async_pf_execute()
99 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument
102 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work()
105 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work()
111 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work()
113 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work()
114 WARN_ON_ONCE(work->work.func); in kvm_flush_and_free_async_pf_work()
116 flush_work(&work->work); in kvm_flush_and_free_async_pf_work()
[all …]
/linux-6.12.1/fs/btrfs/
Dasync-thread.c29 /* List head pointing to ordered work list */
55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
57 return work->wq->fs_info; in btrfs_work_owner()
163 * Hook for threshold which will be called before executing the work,
214 struct btrfs_work *work; in run_ordered_work() local
223 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
225 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
231 * updates from ordinary work function. in run_ordered_work()
237 * we leave the work item on the list as a barrier so in run_ordered_work()
238 * that later work items that are done don't have their in run_ordered_work()
[all …]
/linux-6.12.1/kernel/
Dtask_work.c19 * task_work_add - ask the @task to execute @work->func()
21 * @work: the callback to run
24 * Queue @work for task_work_run() below and notify the @task if @notify
35 * @TWA_RESUME work is run only when the task exits the kernel and returns to
40 * Fails if the @task is exiting/exited and thus it can't process this @work.
41 * Otherwise @work->func() will be called when the @task goes through one of
44 * If the targeted task is exiting, then an error is returned and the work item
54 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument
68 * Record the work call stack in order to print it in KASAN in task_work_add()
75 kasan_record_aux_stack_noalloc(work); in task_work_add()
[all …]
Dirq_work.c57 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim()
63 * If the work is already pending, no need to raise the IPI. in irq_work_claim()
79 static __always_inline void irq_work_raise(struct irq_work *work) in irq_work_raise() argument
82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); in irq_work_raise()
87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
88 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local()
107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local()
110 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local()
[all …]
Dkthread.c307 * functions which do some additional work in non-modular code such as
798 * when they finish. There is defined a safe point for freezing when one work
807 struct kthread_work *work; in kthread_worker_fn() local
830 work = NULL; in kthread_worker_fn()
833 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
835 list_del_init(&work->node); in kthread_worker_fn()
837 worker->current_work = work; in kthread_worker_fn()
840 if (work) { in kthread_worker_fn()
841 kthread_work_func_t func = work->func; in kthread_worker_fn()
843 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
[all …]
Dworkqueue.c18 * This is the generic async execution mechanism. Work items as are
21 * normal work items and the other for high priority ones) and some extra
238 PWQ_STAT_STARTED, /* work items started execution */
239 PWQ_STAT_COMPLETED, /* work items completed execution */
245 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
269 * When pwq->nr_active >= max_active, new work item is queued to
273 * All work items marked with WORK_STRUCT_INACTIVE do not participate in
274 * nr_active and all work items in pwq->inactive_works are marked with
275 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
277 * pool->worklist or worker->scheduled. Those work itmes are only struct
[all …]
/linux-6.12.1/LICENSES/dual/
Dcopyleft-next-0.3.126 of, publicly perform and publicly display My Work.
40 Legal Notices contained in My Work (to the extent they remain
47 If You Distribute a Derived Work, You must license the entire Derived
48 Work as a whole under this License, with prominent notice of such
50 separate Distribution of portions of the Derived Work.
52 If the Derived Work includes material licensed under the GPL, You may
53 instead license the Derived Work under the GPL.
57 When Distributing a Covered Work, You may not impose further
58 restrictions on the exercise of rights in the Covered Work granted under
64 However, You may Distribute a Covered Work incorporating material
[all …]
DApache-2.049 "Work" shall mean the work of authorship, whether in Source or Object form,
51 is included in or attached to the work (an example is provided in the
54 "Derivative Works" shall mean any work, whether in Source or Object form,
55 that is based on (or derived from) the Work and for which the editorial
57 a whole, an original work of authorship. For the purposes of this License,
59 merely link (or bind by name) to the interfaces of, the Work and Derivative
62 "Contribution" shall mean any work of authorship, including the original
63 version of the Work and any modifications or additions to that Work or
65 inclusion in the Work by the copyright owner or by an individual or Legal
72 and improving the Work, but excluding communication that is conspicuously
[all …]
/linux-6.12.1/include/linux/
Dworkqueue.h3 * workqueue.h --- work queue handling for Linux.
20 * The first word is the work queue pointer and the flags rolled into
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
29 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
66 * When a work item is off queue, the high bits encode off-queue flags
114 struct work_struct work; member
117 /* target workqueue and CPU ->timer uses to queue ->work */
123 struct work_struct work; member
[all …]
Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
46 * @work: identifier for the completion structure
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
62 * @work: identifier for the completion structure
[all …]
/linux-6.12.1/tools/perf/
Dbuiltin-kwork.c314 struct kwork_work *work; in work_search() local
318 work = container_of(node, struct kwork_work, node); in work_search()
319 cmp = work_cmp(sort_list, key, work); in work_search()
325 if (work->name == NULL) in work_search()
326 work->name = key->name; in work_search()
327 return work; in work_search()
361 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
363 if (work == NULL) { in work_new()
364 pr_err("Failed to zalloc kwork work\n"); in work_new()
369 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
[all …]
/linux-6.12.1/drivers/staging/octeon/
Dethernet-rx.c59 * @work: Work queue entry pointing to the packet.
63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument
68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error()
70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error()
72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error()
82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error()
99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error()
102 while (i < work->word1.len - 1) { in cvm_oct_check_rcv_error()
111 work->packet_ptr.s.addr += i + 1; in cvm_oct_check_rcv_error()
[all …]
/linux-6.12.1/rust/kernel/
Dworkqueue.rs3 //! Work queues.
5 //! This file has two components: The raw work item API, and the safe work item API.
15 //! The raw API consists of the [`RawWorkItem`] trait, where the work item needs to provide an
16 //! arbitrary function that knows how to enqueue the work item. It should usually not be used
21 //! The safe API is used via the [`Work`] struct and [`WorkItem`] traits. Furthermore, it also
24 //! * The [`Work`] struct is the Rust wrapper for the C `work_struct` type.
37 //! use kernel::workqueue::{self, impl_has_work, new_work, Work, WorkItem};
43 //! work: Work<MyStruct>,
47 //! impl HasWork<Self> for MyStruct { self.work }
54 //! work <- new_work!("MyStruct::work"),
[all …]
/linux-6.12.1/net/wireless/
Ddebugfs.c115 struct wiphy_work work; member
131 struct wiphy_work *work) in wiphy_locked_debugfs_read_work() argument
133 struct debugfs_read_work *w = container_of(work, typeof(*w), work); in wiphy_locked_debugfs_read_work()
144 wiphy_work_cancel(w->wiphy, &w->work); in wiphy_locked_debugfs_read_cancel()
159 struct debugfs_read_work work = { in wiphy_locked_debugfs_read() local
167 .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion), in wiphy_locked_debugfs_read()
171 .cancel_data = &work, in wiphy_locked_debugfs_read()
177 wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work); in wiphy_locked_debugfs_read()
178 wiphy_work_queue(wiphy, &work.work); in wiphy_locked_debugfs_read()
181 wait_for_completion(&work.completion); in wiphy_locked_debugfs_read()
[all …]
/linux-6.12.1/Documentation/core-api/
Dworkqueue.rst17 When such an asynchronous execution context is needed, a work item
22 While there are work items on the workqueue the worker executes the
23 functions associated with the work items one after the other. When
24 there is no work item left on the workqueue the worker becomes idle.
25 When a new work item gets queued, the worker begins executing again.
43 while an ST wq one for the whole system. Work items had to compete for
72 abstraction, the work item, is introduced.
74 A work item is a simple struct that holds a pointer to the function
76 wants a function to be executed asynchronously it has to set up a work
77 item pointing to that function and queue that work item on a
[all …]
/linux-6.12.1/io_uring/
Dio-wq.c33 IO_WORKER_F_BOUND = 3, /* is doing bounded work */
67 struct work_struct work; member
163 struct io_wq_work *work) in io_work_get_acct() argument
165 return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
254 * If there's work to do, returns true with acct->lock acquired. If not,
293 * starting work or finishing work. In either case, if it does in io_wq_activate_free_worker()
294 * to go sleep, we'll kick off a new task for this work anyway. in io_wq_activate_free_worker()
311 * Most likely an attempt to queue unbounded work on an io_wq that in io_wq_create_worker()
393 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create()
430 * Worker will start processing some work. Move it to the busy list, if
[all …]

12345678910>>...211