Lines Matching full:req

34 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)  in io_file_supports_nowait()  argument
37 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
40 if (io_file_can_poll(req)) { in io_file_supports_nowait()
43 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
68 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
72 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
78 if (req->ctx->compat) in io_iov_buffer_select_prep()
89 static int __io_import_iovec(int ddir, struct io_kiocb *req, in __io_import_iovec() argument
93 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_iovec()
94 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_iovec()
103 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
104 if (io_do_buffer_select(req)) { in __io_import_iovec()
105 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
123 req->ctx->compat); in __io_import_iovec()
127 req->flags |= REQ_F_NEED_CLEANUP; in __io_import_iovec()
135 static inline int io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
141 ret = __io_import_iovec(rw, req, io, issue_flags); in io_import_iovec()
158 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_rw_recycle() argument
160 struct io_async_rw *rw = req->async_data; in io_rw_recycle()
168 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { in io_rw_recycle()
171 req->async_data = NULL; in io_rw_recycle()
172 req->flags &= ~REQ_F_ASYNC_DATA; in io_rw_recycle()
176 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_rw_cleanup() argument
205 if (!(req->flags & REQ_F_REFCOUNT)) { in io_req_rw_cleanup()
206 req->flags &= ~REQ_F_NEED_CLEANUP; in io_req_rw_cleanup()
207 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup()
211 static int io_rw_alloc_async(struct io_kiocb *req) in io_rw_alloc_async() argument
213 struct io_ring_ctx *ctx = req->ctx; in io_rw_alloc_async()
221 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_alloc_async()
223 req->flags |= REQ_F_ASYNC_DATA; in io_rw_alloc_async()
224 req->async_data = rw; in io_rw_alloc_async()
228 if (!io_alloc_async_data(req)) { in io_rw_alloc_async()
229 rw = req->async_data; in io_rw_alloc_async()
240 static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import) in io_prep_rw_setup() argument
245 if (io_rw_alloc_async(req)) in io_prep_rw_setup()
248 if (!do_import || io_do_buffer_select(req)) in io_prep_rw_setup()
251 rw = req->async_data; in io_prep_rw_setup()
252 ret = io_import_iovec(ddir, req, rw, 0); in io_prep_rw_setup()
260 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
263 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw()
269 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
286 return io_prep_rw_setup(req, ddir, do_import); in io_prep_rw()
289 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read() argument
291 return io_prep_rw(req, sqe, ITER_DEST, true); in io_prep_read()
294 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write() argument
296 return io_prep_rw(req, sqe, ITER_SOURCE, true); in io_prep_write()
299 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rwv() argument
302 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); in io_prep_rwv()
305 ret = io_prep_rw(req, sqe, ddir, do_import); in io_prep_rwv()
315 return io_iov_buffer_select_prep(req); in io_prep_rwv()
318 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_readv() argument
320 return io_prep_rwv(req, sqe, ITER_DEST); in io_prep_readv()
323 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_writev() argument
325 return io_prep_rwv(req, sqe, ITER_SOURCE); in io_prep_writev()
328 static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw_fixed() argument
331 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw_fixed()
332 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw_fixed()
337 ret = io_prep_rw(req, sqe, ddir, false); in io_prep_rw_fixed()
341 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw_fixed()
343 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw_fixed()
344 req->imu = ctx->user_bufs[index]; in io_prep_rw_fixed()
345 io_req_set_rsrc_node(req, ctx, 0); in io_prep_rw_fixed()
347 io = req->async_data; in io_prep_rw_fixed()
348 ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len); in io_prep_rw_fixed()
353 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read_fixed() argument
355 return io_prep_rw_fixed(req, sqe, ITER_DEST); in io_prep_read_fixed()
358 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write_fixed() argument
360 return io_prep_rw_fixed(req, sqe, ITER_SOURCE); in io_prep_write_fixed()
367 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_mshot_prep() argument
369 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot_prep()
373 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_read_mshot_prep()
376 ret = io_prep_rw(req, sqe, ITER_DEST, false); in io_read_mshot_prep()
383 req->flags |= REQ_F_APOLL_MULTISHOT; in io_read_mshot_prep()
387 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup() argument
389 io_rw_iovec_free(req->async_data); in io_readv_writev_cleanup()
392 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
394 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_kiocb_update_pos()
399 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
400 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
401 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
410 static void io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
412 struct io_async_rw *io = req->async_data; in io_resubmit_prep()
417 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
419 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
420 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
424 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
438 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
443 static void io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
446 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
452 static void io_req_end_write(struct io_kiocb *req) in io_req_end_write() argument
454 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
455 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_end_write()
465 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
467 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_io_end()
470 io_req_end_write(req); in io_req_io_end()
471 fsnotify_modify(req->file); in io_req_io_end()
473 fsnotify_access(req->file); in io_req_io_end()
477 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
479 if (unlikely(res != req->cqe.res)) { in __io_complete_rw_common()
480 if (res == -EAGAIN && io_rw_should_reissue(req)) { in __io_complete_rw_common()
485 io_req_io_end(req); in __io_complete_rw_common()
486 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in __io_complete_rw_common()
489 req_set_fail(req); in __io_complete_rw_common()
490 req->cqe.res = res; in __io_complete_rw_common()
495 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
497 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
500 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
509 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_req_rw_complete() argument
511 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_rw_complete()
517 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_req_rw_complete()
520 io_req_io_end(req); in io_req_rw_complete()
522 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) in io_req_rw_complete()
523 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); in io_req_rw_complete()
525 io_req_rw_cleanup(req, 0); in io_req_rw_complete()
526 io_req_task_complete(req, ts); in io_req_rw_complete()
532 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw() local
535 if (__io_complete_rw_common(req, res)) in io_complete_rw()
537 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_complete_rw()
539 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
540 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); in io_complete_rw()
546 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw_iopoll() local
549 io_req_end_write(req); in io_complete_rw_iopoll()
550 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
551 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
552 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in io_complete_rw_iopoll()
555 req->cqe.res = res; in io_complete_rw_iopoll()
559 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
589 static int kiocb_done(struct io_kiocb *req, ssize_t ret, in kiocb_done() argument
592 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in kiocb_done()
593 unsigned final_ret = io_fixup_rw_res(req, ret); in kiocb_done()
595 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
596 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
598 if (!__io_complete_rw_common(req, ret)) { in kiocb_done()
603 io_req_io_end(req); in kiocb_done()
604 io_req_set_res(req, final_ret, in kiocb_done()
605 io_put_kbuf(req, ret, issue_flags)); in kiocb_done()
606 io_req_rw_cleanup(req, issue_flags); in kiocb_done()
613 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
614 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
615 io_resubmit_prep(req); in kiocb_done()
706 struct io_kiocb *req = wait->private; in io_async_buf_func() local
707 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_async_buf_func()
717 io_req_task_queue(req); in io_async_buf_func()
733 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
735 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
737 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_retry()
741 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
752 if (io_file_can_poll(req) || in io_rw_should_retry()
753 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) in io_rw_should_retry()
757 wait->wait.private = req; in io_rw_should_retry()
778 static bool need_complete_io(struct io_kiocb *req) in need_complete_io() argument
780 return req->flags & REQ_F_ISREG || in need_complete_io()
781 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
784 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) in io_rw_init_file() argument
786 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_init_file()
788 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
789 struct file *file = req->file; in io_rw_init_file()
795 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
796 req->flags |= io_file_get_flags(file); in io_rw_init_file()
810 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
811 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
820 req->iopoll_completed = 0; in io_rw_init_file()
830 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) in __io_read() argument
833 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_read()
834 struct io_async_rw *io = req->async_data; in __io_read()
839 if (io_do_buffer_select(req)) { in __io_read()
840 ret = io_import_iovec(ITER_DEST, req, io, issue_flags); in __io_read()
844 ret = io_rw_init_file(req, FMODE_READ, READ); in __io_read()
847 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
851 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) in __io_read()
859 ppos = io_kiocb_update_pos(req); in __io_read()
861 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
875 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in __io_read()
876 req->flags &= ~REQ_F_REISSUE; in __io_read()
878 if (io_file_can_poll(req)) in __io_read()
881 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
884 if (req->flags & REQ_F_NOWAIT) in __io_read()
889 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
890 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { in __io_read()
915 if (!io_rw_should_retry(req)) { in __io_read()
920 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
939 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
943 ret = __io_read(req, issue_flags); in io_read()
945 return kiocb_done(req, ret, issue_flags); in io_read()
950 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) in io_read_mshot() argument
952 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot()
959 if (!io_file_can_poll(req)) in io_read_mshot()
962 ret = __io_read(req, issue_flags); in io_read_mshot()
973 if (io_kbuf_recycle(req, issue_flags)) in io_read_mshot()
979 io_kbuf_recycle(req, issue_flags); in io_read_mshot()
981 req_set_fail(req); in io_read_mshot()
989 cflags = io_put_kbuf(req, ret, issue_flags); in io_read_mshot()
992 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { in io_read_mshot()
999 io_poll_multishot_retry(req); in io_read_mshot()
1010 io_req_set_res(req, ret, cflags); in io_read_mshot()
1011 io_req_rw_cleanup(req, issue_flags); in io_read_mshot()
1017 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) in io_kiocb_start_write() argument
1022 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
1036 int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
1039 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_write()
1040 struct io_async_rw *io = req->async_data; in io_write()
1045 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); in io_write()
1048 req->cqe.res = iov_iter_count(&io->iter); in io_write()
1052 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) in io_write()
1057 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && in io_write()
1058 (req->flags & REQ_F_ISREG)) in io_write()
1067 ppos = io_kiocb_update_pos(req); in io_write()
1069 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
1073 if (unlikely(!io_kiocb_start_write(req, kiocb))) in io_write()
1077 if (likely(req->file->f_op->write_iter)) in io_write()
1078 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); in io_write()
1079 else if (req->file->f_op->write) in io_write()
1084 if (req->flags & REQ_F_REISSUE) { in io_write()
1085 req->flags &= ~REQ_F_REISSUE; in io_write()
1096 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
1100 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
1103 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1104 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1105 req->cqe.res, ret2); in io_write()
1116 io_req_end_write(req); in io_write()
1120 return kiocb_done(req, ret2, issue_flags); in io_write()
1125 io_req_end_write(req); in io_write()
1130 void io_rw_fail(struct io_kiocb *req) in io_rw_fail() argument
1134 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1135 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1153 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1154 struct file *file = req->file; in io_do_iopoll()
1162 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1165 if (req->opcode == IORING_OP_URING_CMD) { in io_do_iopoll()
1168 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_do_iopoll()
1172 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_do_iopoll()
1181 /* iopoll may have completed current req */ in io_do_iopoll()
1183 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1194 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1197 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1200 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); in io_do_iopoll()
1201 if (req->opcode != IORING_OP_URING_CMD) in io_do_iopoll()
1202 io_req_rw_cleanup(req, 0); in io_do_iopoll()