Home
last modified time | relevance | path

Searched refs:issue_flags (Results 1 – 25 of 56) sorted by relevance

123

/linux-6.12.1/io_uring/
Dnet.h24 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
28 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
30 int io_send(struct io_kiocb *req, unsigned int issue_flags);
33 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
34 int io_recv(struct io_kiocb *req, unsigned int issue_flags);
39 int io_accept(struct io_kiocb *req, unsigned int issue_flags);
42 int io_socket(struct io_kiocb *req, unsigned int issue_flags);
45 int io_connect(struct io_kiocb *req, unsigned int issue_flags);
47 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
48 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
[all …]
During_cmd.c35 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_uring_cleanup() argument
40 if (issue_flags & IO_URING_F_UNLOCKED) in io_req_uring_cleanup()
81 unsigned int issue_flags) in io_uring_cmd_del_cancelable() argument
90 io_ring_submit_lock(ctx, issue_flags); in io_uring_cmd_del_cancelable()
92 io_ring_submit_unlock(ctx, issue_flags); in io_uring_cmd_del_cancelable()
105 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument
112 io_ring_submit_lock(ctx, issue_flags); in io_uring_cmd_mark_cancelable()
114 io_ring_submit_unlock(ctx, issue_flags); in io_uring_cmd_mark_cancelable()
151 unsigned issue_flags) in io_uring_cmd_done() argument
155 io_uring_cmd_del_cancelable(ioucmd, issue_flags); in io_uring_cmd_done()
[all …]
Dkbuf.h67 unsigned int issue_flags);
69 unsigned int issue_flags);
74 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
77 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
83 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
85 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
116 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle() argument
121 return io_kbuf_recycle_legacy(req, issue_flags); in io_kbuf_recycle()
195 int nbufs, unsigned issue_flags) in __io_put_kbufs() argument
207 __io_put_kbuf(req, len, issue_flags); in __io_put_kbufs()
[all …]
Dopenclose.c114 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
128 if (issue_flags & IO_URING_F_NONBLOCK) { in io_openat2()
153 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) in io_openat2()
158 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) in io_openat2()
164 ret = io_fixed_fd_install(req, issue_flags, file, in io_openat2()
175 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
177 return io_openat2(req, issue_flags); in io_openat()
188 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, in __io_close_fixed() argument
193 io_ring_submit_lock(ctx, issue_flags); in __io_close_fixed()
195 io_ring_submit_unlock(ctx, issue_flags); in __io_close_fixed()
[all …]
Dmsg_ring.c42 unsigned int issue_flags) in io_double_lock_ctx() argument
49 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_double_lock_ctx()
137 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring_data() argument
159 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_ring_data()
169 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) in io_msg_grab_file() argument
176 io_ring_submit_lock(ctx, issue_flags); in io_msg_grab_file()
183 io_ring_submit_unlock(ctx, issue_flags); in io_msg_grab_file()
187 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) in io_msg_install_complete() argument
194 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) in io_msg_install_complete()
248 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) in io_msg_send_fd() argument
[all …]
Dopenclose.h3 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
17 int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags);
Dnet.c105 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
111 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_shutdown()
138 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument
144 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { in io_netmsg_recycle()
446 unsigned int issue_flags) in io_req_msg_cleanup() argument
449 io_netmsg_recycle(req, issue_flags); in io_req_msg_cleanup()
493 unsigned issue_flags) in io_send_finish() argument
500 cflags = io_put_kbuf(req, *ret, issue_flags); in io_send_finish()
504 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); in io_send_finish()
525 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
[all …]
Dfs.h4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
16 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags);
19 int io_linkat(struct io_kiocb *req, unsigned int issue_flags);
Dfutex.h7 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags);
8 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags);
9 int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
13 unsigned int issue_flags);
21 unsigned int issue_flags) in io_futex_cancel() argument
Dxattr.c108 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fgetxattr() argument
113 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fgetxattr()
123 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) in io_getxattr() argument
130 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_getxattr()
206 static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags, in __io_setxattr() argument
221 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fsetxattr() argument
225 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsetxattr()
227 ret = __io_setxattr(req, issue_flags, &req->file->f_path); in io_fsetxattr()
232 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) in io_setxattr() argument
239 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_setxattr()
[all …]
Dxattr.h6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
Dfutex.c117 unsigned int issue_flags) in io_futex_cancel() argument
126 io_ring_submit_lock(ctx, issue_flags); in io_futex_cancel()
136 io_ring_submit_unlock(ctx, issue_flags); in io_futex_cancel()
265 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futexv_wait() argument
272 io_ring_submit_lock(ctx, issue_flags); in io_futexv_wait()
280 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait()
316 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait()
320 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futex_wait() argument
333 io_ring_submit_lock(ctx, issue_flags); in io_futex_wait()
350 io_ring_submit_unlock(ctx, issue_flags); in io_futex_wait()
[all …]
Dcancel.c104 unsigned issue_flags) in io_try_cancel() argument
119 ret = io_poll_cancel(ctx, cd, issue_flags); in io_try_cancel()
123 ret = io_waitid_cancel(ctx, cd, issue_flags); in io_try_cancel()
127 ret = io_futex_cancel(ctx, cd, issue_flags); in io_try_cancel()
167 unsigned int issue_flags) in __io_async_cancel() argument
175 ret = io_try_cancel(tctx, cd, issue_flags); in __io_async_cancel()
184 io_ring_submit_lock(ctx, issue_flags); in __io_async_cancel()
194 io_ring_submit_unlock(ctx, issue_flags); in __io_async_cancel()
198 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
216 issue_flags); in io_async_cancel()
[all …]
Dsync.c40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_sync_file_range()
70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
77 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsync()
99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fallocate()
Dpoll.h34 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
37 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
41 unsigned issue_flags);
42 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
Drw.c91 unsigned int issue_flags) in __io_import_iovec() argument
105 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
137 unsigned int issue_flags) in io_import_iovec() argument
141 ret = __io_import_iovec(rw, req, io, issue_flags); in io_import_iovec()
158 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_rw_recycle() argument
163 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { in io_rw_recycle()
176 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_rw_cleanup() argument
207 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup()
590 unsigned int issue_flags) in kiocb_done() argument
605 io_put_kbuf(req, ret, issue_flags)); in kiocb_done()
[all …]
Dfs.c81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
86 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_renameat()
130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
135 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_unlinkat()
177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat() argument
182 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_mkdirat()
227 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_symlinkat() argument
232 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_symlinkat()
272 int io_linkat(struct io_kiocb *req, unsigned int issue_flags) in io_linkat() argument
277 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_linkat()
Dsplice.c48 int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
56 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_tee()
59 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_tee()
88 int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
97 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_splice()
100 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_splice()
Dkbuf.c55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle_legacy() argument
61 io_ring_submit_lock(ctx, issue_flags); in io_kbuf_recycle_legacy()
69 io_ring_submit_unlock(ctx, issue_flags); in io_kbuf_recycle_legacy()
73 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags) in __io_put_kbuf() argument
87 if (issue_flags & IO_URING_F_UNLOCKED) { in __io_put_kbuf()
137 unsigned int issue_flags) in io_ring_buffer_select() argument
157 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { in io_ring_buffer_select()
175 unsigned int issue_flags) in io_buffer_select() argument
181 io_ring_submit_lock(req->ctx, issue_flags); in io_buffer_select()
186 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
[all …]
Dsync.h4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags);
7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags);
9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
Dpoll.c583 unsigned issue_flags) in __io_arm_poll_handler() argument
605 ipt->owning = issue_flags & IO_URING_F_UNLOCKED; in __io_arm_poll_handler()
609 if (issue_flags & IO_URING_F_UNLOCKED) in __io_arm_poll_handler()
688 unsigned issue_flags) in io_req_alloc_apoll() argument
696 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_req_alloc_apoll()
715 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument
748 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
755 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
757 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
889 unsigned issue_flags) in io_poll_cancel() argument
[all …]
Dadvise.c51 int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
57 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_madvise()
96 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
101 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa)); in io_fadvise()
Drw.h21 int io_read(struct io_kiocb *req, unsigned int issue_flags);
22 int io_write(struct io_kiocb *req, unsigned int issue_flags);
27 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
/linux-6.12.1/include/linux/io_uring/
Dcmd.h47 unsigned issue_flags);
58 unsigned int issue_flags);
70 ssize_t ret2, unsigned issue_flags) in io_uring_cmd_done() argument
79 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument
/linux-6.12.1/drivers/nvme/host/
Dioctl.c409 unsigned issue_flags) in nvme_uring_task_cb() argument
415 io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); in nvme_uring_task_cb()
453 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) in nvme_uring_cmd_io() argument
495 if (issue_flags & IO_URING_F_NONBLOCK) { in nvme_uring_cmd_io()
499 if (issue_flags & IO_URING_F_IOPOLL) in nvme_uring_cmd_io()
622 static int nvme_uring_cmd_checks(unsigned int issue_flags) in nvme_uring_cmd_checks() argument
626 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != in nvme_uring_cmd_checks()
633 unsigned int issue_flags) in nvme_ns_uring_cmd() argument
640 ret = nvme_uring_cmd_checks(issue_flags); in nvme_ns_uring_cmd()
646 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); in nvme_ns_uring_cmd()
[all …]

123