Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/blk-mq.h>
37 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
43 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
55 uiov = u64_to_user_ptr(rw->addr); in io_iov_compat_buffer_select_prep()
57 return -EFAULT; in io_iov_compat_buffer_select_prep()
58 if (__get_user(clen, &uiov->iov_len)) in io_iov_compat_buffer_select_prep()
59 return -EFAULT; in io_iov_compat_buffer_select_prep()
61 return -EINVAL; in io_iov_compat_buffer_select_prep()
63 rw->len = clen; in io_iov_compat_buffer_select_prep()
74 if (rw->len != 1) in io_iov_buffer_select_prep()
75 return -EINVAL; in io_iov_buffer_select_prep()
78 if (req->ctx->compat) in io_iov_buffer_select_prep()
82 uiov = u64_to_user_ptr(rw->addr); in io_iov_buffer_select_prep()
84 return -EFAULT; in io_iov_buffer_select_prep()
85 rw->len = iov.iov_len; in io_iov_buffer_select_prep()
93 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_iovec()
100 buf = u64_to_user_ptr(rw->addr); in __io_import_iovec()
101 sqe_len = rw->len; in __io_import_iovec()
103 if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
107 return -ENOBUFS; in __io_import_iovec()
108 rw->addr = (unsigned long) buf; in __io_import_iovec()
109 rw->len = sqe_len; in __io_import_iovec()
112 return import_ubuf(ddir, buf, sqe_len, &io->iter); in __io_import_iovec()
115 if (io->free_iovec) { in __io_import_iovec()
116 nr_segs = io->free_iov_nr; in __io_import_iovec()
117 iov = io->free_iovec; in __io_import_iovec()
119 iov = &io->fast_iov; in __io_import_iovec()
122 ret = __import_iovec(ddir, buf, sqe_len, nr_segs, &iov, &io->iter, in __io_import_iovec()
123 req->ctx->compat); in __io_import_iovec()
127 req->flags |= REQ_F_NEED_CLEANUP; in __io_import_iovec()
128 io->free_iov_nr = io->iter.nr_segs; in __io_import_iovec()
129 kfree(io->free_iovec); in __io_import_iovec()
130 io->free_iovec = iov; in __io_import_iovec()
145 iov_iter_save_state(&io->iter, &io->iter_state); in io_import_iovec()
151 if (rw->free_iovec) { in io_rw_iovec_free()
152 kfree(rw->free_iovec); in io_rw_iovec_free()
153 rw->free_iov_nr = 0; in io_rw_iovec_free()
154 rw->free_iovec = NULL; in io_rw_iovec_free()
160 struct io_async_rw *rw = req->async_data; in io_rw_recycle()
167 iov = rw->free_iovec; in io_rw_recycle()
168 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { in io_rw_recycle()
171 req->async_data = NULL; in io_rw_recycle()
172 req->flags &= ~REQ_F_ASYNC_DATA; in io_rw_recycle()
179 * Disable quick recycling for anything that's gone through io-wq. in io_req_rw_cleanup()
184 * task io-wq in io_req_rw_cleanup()
186 * punt to io-wq in io_req_rw_cleanup()
189 * ->ki_complete() in io_req_rw_cleanup()
194 * iov_iter_count() <- look at iov_iter again in io_req_rw_cleanup()
196 * which can lead to a UAF. This is only possible for io-wq offload in io_req_rw_cleanup()
197 * as the cleanup can run in parallel. As io-wq is not the fast path, in io_req_rw_cleanup()
201 * path should assume that a successful (or -EIOCBQUEUED) return can in io_req_rw_cleanup()
205 if (!(req->flags & REQ_F_REFCOUNT)) { in io_req_rw_cleanup()
206 req->flags &= ~REQ_F_NEED_CLEANUP; in io_req_rw_cleanup()
213 struct io_ring_ctx *ctx = req->ctx; in io_rw_alloc_async()
216 rw = io_alloc_cache_get(&ctx->rw_cache); in io_rw_alloc_async()
218 if (rw->free_iovec) { in io_rw_alloc_async()
219 kasan_mempool_unpoison_object(rw->free_iovec, in io_rw_alloc_async()
220 rw->free_iov_nr * sizeof(struct iovec)); in io_rw_alloc_async()
221 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_alloc_async()
223 req->flags |= REQ_F_ASYNC_DATA; in io_rw_alloc_async()
224 req->async_data = rw; in io_rw_alloc_async()
229 rw = req->async_data; in io_rw_alloc_async()
230 rw->free_iovec = NULL; in io_rw_alloc_async()
231 rw->free_iov_nr = 0; in io_rw_alloc_async()
233 rw->bytes_done = 0; in io_rw_alloc_async()
237 return -ENOMEM; in io_rw_alloc_async()
246 return -ENOMEM; in io_prep_rw_setup()
251 rw = req->async_data; in io_prep_rw_setup()
256 iov_iter_save_state(&rw->iter, &rw->iter_state); in io_prep_rw_setup()
267 rw->kiocb.ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
268 /* used for fixed read/write too - just read unconditionally */ in io_prep_rw()
269 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
271 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
277 rw->kiocb.ki_ioprio = ioprio; in io_prep_rw()
279 rw->kiocb.ki_ioprio = get_current_ioprio(); in io_prep_rw()
281 rw->kiocb.dio_complete = NULL; in io_prep_rw()
283 rw->addr = READ_ONCE(sqe->addr); in io_prep_rw()
284 rw->len = READ_ONCE(sqe->len); in io_prep_rw()
285 rw->flags = READ_ONCE(sqe->rw_flags); in io_prep_rw()
302 const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); in io_prep_rwv()
312 * Have to do this validation here, as this is in io_read() rw->len in io_prep_rwv()
332 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw_fixed()
341 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw_fixed()
342 return -EFAULT; in io_prep_rw_fixed()
343 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw_fixed()
344 req->imu = ctx->user_bufs[index]; in io_prep_rw_fixed()
347 io = req->async_data; in io_prep_rw_fixed()
348 ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len); in io_prep_rw_fixed()
349 iov_iter_save_state(&io->iter, &io->iter_state); in io_prep_rw_fixed()
373 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_read_mshot_prep()
374 return -EINVAL; in io_read_mshot_prep()
380 if (rw->addr || rw->len) in io_read_mshot_prep()
381 return -EINVAL; in io_read_mshot_prep()
383 req->flags |= REQ_F_APOLL_MULTISHOT; in io_read_mshot_prep()
389 io_rw_iovec_free(req->async_data); in io_readv_writev_cleanup()
396 if (rw->kiocb.ki_pos != -1) in io_kiocb_update_pos()
397 return &rw->kiocb.ki_pos; in io_kiocb_update_pos()
399 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
400 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
401 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
402 return &rw->kiocb.ki_pos; in io_kiocb_update_pos()
405 rw->kiocb.ki_pos = 0; in io_kiocb_update_pos()
412 struct io_async_rw *io = req->async_data; in io_resubmit_prep()
414 iov_iter_restore(&io->iter, &io->iter_state); in io_resubmit_prep()
419 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
420 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
424 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
425 !(ctx->flags & IORING_SETUP_IOPOLL))) in io_rw_should_reissue()
430 * -EAGAIN. in io_rw_should_reissue()
432 if (percpu_ref_is_dying(&ctx->refs)) in io_rw_should_reissue()
435 * Play it safe and assume not safe to re-import and reissue if we're in io_rw_should_reissue()
438 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
454 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
457 kiocb_end_write(&rw->kiocb); in io_req_end_write()
469 if (rw->kiocb.ki_flags & IOCB_WRITE) { in io_req_io_end()
471 fsnotify_modify(req->file); in io_req_io_end()
473 fsnotify_access(req->file); in io_req_io_end()
479 if (unlikely(res != req->cqe.res)) { in __io_complete_rw_common()
480 if (res == -EAGAIN && io_rw_should_reissue(req)) { in __io_complete_rw_common()
486 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in __io_complete_rw_common()
490 req->cqe.res = res; in __io_complete_rw_common()
497 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
500 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
502 res = io->bytes_done; in io_fixup_rw_res()
504 res += io->bytes_done; in io_fixup_rw_res()
512 struct kiocb *kiocb = &rw->kiocb; in io_req_rw_complete()
514 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) { in io_req_rw_complete()
515 long res = kiocb->dio_complete(rw->kiocb.private); in io_req_rw_complete()
522 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) in io_req_rw_complete()
523 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0); in io_req_rw_complete()
534 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { in io_complete_rw()
539 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
548 if (kiocb->ki_flags & IOCB_WRITE) in io_complete_rw_iopoll()
550 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
551 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
552 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in io_complete_rw_iopoll()
555 req->cqe.res = res; in io_complete_rw_iopoll()
558 /* order with io_iopoll_complete() checking ->iopoll_completed */ in io_complete_rw_iopoll()
559 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
565 if (ret == -EIOCBQUEUED) in io_rw_done()
571 case -ERESTARTSYS: in io_rw_done()
572 case -ERESTARTNOINTR: in io_rw_done()
573 case -ERESTARTNOHAND: in io_rw_done()
574 case -ERESTART_RESTARTBLOCK: in io_rw_done()
580 ret = -EINTR; in io_rw_done()
585 INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll, in io_rw_done()
595 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
596 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
597 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { in kiocb_done()
610 io_rw_done(&rw->kiocb, ret); in kiocb_done()
613 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
614 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
616 return -EAGAIN; in kiocb_done()
623 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; in io_kiocb_ppos()
627 * For files that don't have ->read_iter() and ->write_iter(), handle them
628 * by looping over ->read() or ->write() manually.
632 struct kiocb *kiocb = &rw->kiocb; in loop_rw_iter()
633 struct file *file = kiocb->ki_filp; in loop_rw_iter()
639 * support non-blocking either. For the latter, this just causes in loop_rw_iter()
642 if (kiocb->ki_flags & IOCB_HIPRI) in loop_rw_iter()
643 return -EOPNOTSUPP; in loop_rw_iter()
644 if ((kiocb->ki_flags & IOCB_NOWAIT) && in loop_rw_iter()
645 !(kiocb->ki_filp->f_flags & O_NONBLOCK)) in loop_rw_iter()
646 return -EAGAIN; in loop_rw_iter()
656 addr = iter->ubuf + iter->iov_offset; in loop_rw_iter()
662 addr = u64_to_user_ptr(rw->addr); in loop_rw_iter()
663 len = rw->len; in loop_rw_iter()
667 nr = file->f_op->read(file, addr, len, ppos); in loop_rw_iter()
669 nr = file->f_op->write(file, addr, len, ppos); in loop_rw_iter()
680 rw->addr += nr; in loop_rw_iter()
681 rw->len -= nr; in loop_rw_iter()
682 if (!rw->len) in loop_rw_iter()
706 struct io_kiocb *req = wait->private; in io_async_buf_func()
715 rw->kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
716 list_del_init(&wait->entry); in io_async_buf_func()
735 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
736 struct wait_page_queue *wait = &io->wpq; in io_rw_should_retry()
738 struct kiocb *kiocb = &rw->kiocb; in io_rw_should_retry()
740 /* never retry for NOWAIT, we just complete with -EAGAIN */ in io_rw_should_retry()
741 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
745 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) in io_rw_should_retry()
753 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) in io_rw_should_retry()
756 wait->wait.func = io_async_buf_func; in io_rw_should_retry()
757 wait->wait.private = req; in io_rw_should_retry()
758 wait->wait.flags = 0; in io_rw_should_retry()
759 INIT_LIST_HEAD(&wait->wait.entry); in io_rw_should_retry()
760 kiocb->ki_flags |= IOCB_WAITQ; in io_rw_should_retry()
761 kiocb->ki_flags &= ~IOCB_NOWAIT; in io_rw_should_retry()
762 kiocb->ki_waitq = wait; in io_rw_should_retry()
768 struct file *file = rw->kiocb.ki_filp; in io_iter_do_read()
770 if (likely(file->f_op->read_iter)) in io_iter_do_read()
771 return file->f_op->read_iter(&rw->kiocb, iter); in io_iter_do_read()
772 else if (file->f_op->read) in io_iter_do_read()
775 return -EINVAL; in io_iter_do_read()
780 return req->flags & REQ_F_ISREG || in need_complete_io()
781 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
787 struct kiocb *kiocb = &rw->kiocb; in io_rw_init_file()
788 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
789 struct file *file = req->file; in io_rw_init_file()
792 if (unlikely(!(file->f_mode & mode))) in io_rw_init_file()
793 return -EBADF; in io_rw_init_file()
795 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
796 req->flags |= io_file_get_flags(file); in io_rw_init_file()
798 kiocb->ki_flags = file->f_iocb_flags; in io_rw_init_file()
799 ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type); in io_rw_init_file()
802 kiocb->ki_flags |= IOCB_ALLOC_CACHE; in io_rw_init_file()
806 * supports async. Otherwise it's impossible to use O_NONBLOCK files in io_rw_init_file()
809 if (kiocb->ki_flags & IOCB_NOWAIT || in io_rw_init_file()
810 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
811 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
813 if (ctx->flags & IORING_SETUP_IOPOLL) { in io_rw_init_file()
814 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) in io_rw_init_file()
815 return -EOPNOTSUPP; in io_rw_init_file()
817 kiocb->private = NULL; in io_rw_init_file()
818 kiocb->ki_flags |= IOCB_HIPRI; in io_rw_init_file()
819 kiocb->ki_complete = io_complete_rw_iopoll; in io_rw_init_file()
820 req->iopoll_completed = 0; in io_rw_init_file()
822 if (kiocb->ki_flags & IOCB_HIPRI) in io_rw_init_file()
823 return -EINVAL; in io_rw_init_file()
824 kiocb->ki_complete = io_complete_rw; in io_rw_init_file()
834 struct io_async_rw *io = req->async_data; in __io_read()
835 struct kiocb *kiocb = &rw->kiocb; in __io_read()
847 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
852 return -EAGAIN; in __io_read()
853 kiocb->ki_flags |= IOCB_NOWAIT; in __io_read()
855 /* Ensure we clear previously set non-block flag */ in __io_read()
856 kiocb->ki_flags &= ~IOCB_NOWAIT; in __io_read()
861 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
865 ret = io_iter_do_read(rw, &io->iter); in __io_read()
868 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT in __io_read()
869 * issue, even though they should be returning -EAGAIN. To be safe, in __io_read()
872 if (ret == -EOPNOTSUPP && force_nonblock) in __io_read()
873 ret = -EAGAIN; in __io_read()
875 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in __io_read()
876 req->flags &= ~REQ_F_REISSUE; in __io_read()
879 return -EAGAIN; in __io_read()
880 /* IOPOLL retry should happen for io-wq threads */ in __io_read()
881 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
884 if (req->flags & REQ_F_NOWAIT) in __io_read()
887 } else if (ret == -EIOCBQUEUED) { in __io_read()
889 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
890 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { in __io_read()
900 iov_iter_restore(&io->iter, &io->iter_state); in __io_read()
908 iov_iter_advance(&io->iter, ret); in __io_read()
909 if (!iov_iter_count(&io->iter)) in __io_read()
911 io->bytes_done += ret; in __io_read()
912 iov_iter_save_state(&io->iter, &io->iter_state); in __io_read()
916 kiocb->ki_flags &= ~IOCB_WAITQ; in __io_read()
917 return -EAGAIN; in __io_read()
920 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
923 * we get -EIOCBQUEUED, then we'll get a notification when the in __io_read()
927 ret = io_iter_do_read(rw, &io->iter); in __io_read()
928 if (ret == -EIOCBQUEUED) in __io_read()
931 kiocb->ki_flags &= ~IOCB_WAITQ; in __io_read()
932 iov_iter_restore(&io->iter, &io->iter_state); in __io_read()
960 return -EBADFD; in io_read_mshot()
965 * If we get -EAGAIN, recycle our buffer and just let normal poll in io_read_mshot()
968 if (ret == -EAGAIN) { in io_read_mshot()
970 * Reset rw->len to 0 again to avoid clamping future mshot in io_read_mshot()
974 rw->len = 0; in io_read_mshot()
977 return -EAGAIN; in io_read_mshot()
985 * armed, if it's still set. Put our buffer and post a CQE. If in io_read_mshot()
986 * we fail to post a CQE, or multishot is no longer set, then in io_read_mshot()
990 rw->len = 0; /* similarly to above, reset len to 0 */ in io_read_mshot()
1002 return -EAGAIN; in io_read_mshot()
1007 * Either an error, or we've hit overflow posting the CQE. For any in io_read_mshot()
1022 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
1024 if (!(kiocb->ki_flags & IOCB_NOWAIT)) { in io_kiocb_start_write()
1029 inode = file_inode(kiocb->ki_filp); in io_kiocb_start_write()
1030 ret = sb_start_write_trylock(inode->i_sb); in io_kiocb_start_write()
1032 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); in io_kiocb_start_write()
1040 struct io_async_rw *io = req->async_data; in io_write()
1041 struct kiocb *kiocb = &rw->kiocb; in io_write()
1048 req->cqe.res = iov_iter_count(&io->iter); in io_write()
1056 if (!(kiocb->ki_flags & IOCB_DIRECT) && in io_write()
1057 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && in io_write()
1058 (req->flags & REQ_F_ISREG)) in io_write()
1061 kiocb->ki_flags |= IOCB_NOWAIT; in io_write()
1063 /* Ensure we clear previously set non-block flag */ in io_write()
1064 kiocb->ki_flags &= ~IOCB_NOWAIT; in io_write()
1069 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
1074 return -EAGAIN; in io_write()
1075 kiocb->ki_flags |= IOCB_WRITE; in io_write()
1077 if (likely(req->file->f_op->write_iter)) in io_write()
1078 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); in io_write()
1079 else if (req->file->f_op->write) in io_write()
1080 ret2 = loop_rw_iter(WRITE, rw, &io->iter); in io_write()
1082 ret2 = -EINVAL; in io_write()
1084 if (req->flags & REQ_F_REISSUE) { in io_write()
1085 req->flags &= ~REQ_F_REISSUE; in io_write()
1086 ret2 = -EAGAIN; in io_write()
1090 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just in io_write()
1093 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) in io_write()
1094 ret2 = -EAGAIN; in io_write()
1096 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
1098 if (!force_nonblock || ret2 != -EAGAIN) { in io_write()
1099 /* IOPOLL retry should happen for io-wq threads */ in io_write()
1100 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
1103 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1104 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1105 req->cqe.res, ret2); in io_write()
1112 iov_iter_save_state(&io->iter, &io->iter_state); in io_write()
1113 io->bytes_done += ret2; in io_write()
1115 if (kiocb->ki_flags & IOCB_WRITE) in io_write()
1117 return -EAGAIN; in io_write()
1123 iov_iter_restore(&io->iter, &io->iter_state); in io_write()
1124 if (kiocb->ki_flags & IOCB_WRITE) in io_write()
1126 return -EAGAIN; in io_write()
1134 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1135 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1149 if (ctx->poll_multi_queue || force_nonspin) in io_do_iopoll()
1152 wq_list_for_each(pos, start, &ctx->iopoll_list) { in io_do_iopoll()
1154 struct file *file = req->file; in io_do_iopoll()
1162 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1165 if (req->opcode == IORING_OP_URING_CMD) { in io_do_iopoll()
1169 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob, in io_do_iopoll()
1174 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags); in io_do_iopoll()
1183 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1196 /* order with io_complete_rw_iopoll(), e.g. ->result updates */ in io_do_iopoll()
1197 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1200 req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0); in io_do_iopoll()
1201 if (req->opcode != IORING_OP_URING_CMD) in io_do_iopoll()
1207 pos = start ? start->next : ctx->iopoll_list.first; in io_do_iopoll()
1208 wq_list_cut(&ctx->iopoll_list, prev, start); in io_do_iopoll()
1210 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs))) in io_do_iopoll()
1212 ctx->submit_state.compl_reqs.first = pos; in io_do_iopoll()
1221 if (rw->free_iovec) { in io_rw_cache_free()
1222 kasan_mempool_unpoison_object(rw->free_iovec, in io_rw_cache_free()
1223 rw->free_iov_nr * sizeof(struct iovec)); in io_rw_cache_free()