Lines Matching refs:req

38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)  in io_file_supports_nowait()  argument
41 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
44 if (io_file_can_poll(req)) { in io_file_supports_nowait()
47 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
64 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
68 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
73 if (io_is_compat(req->ctx)) in io_iov_buffer_select_prep()
83 static int io_import_vec(int ddir, struct io_kiocb *req, in io_import_vec() argument
100 io_is_compat(req->ctx)); in io_import_vec()
104 req->flags |= REQ_F_NEED_CLEANUP; in io_import_vec()
110 static int __io_import_rw_buffer(int ddir, struct io_kiocb *req, in __io_import_rw_buffer() argument
114 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_rw_buffer()
115 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_rw_buffer()
119 if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT)) in __io_import_rw_buffer()
120 return io_import_vec(ddir, req, io, sel->addr, sqe_len); in __io_import_rw_buffer()
122 if (io_do_buffer_select(req)) { in __io_import_rw_buffer()
123 *sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags); in __io_import_rw_buffer()
132 static inline int io_import_rw_buffer(int rw, struct io_kiocb *req, in io_import_rw_buffer() argument
139 ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags); in io_import_rw_buffer()
147 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_rw_recycle() argument
149 struct io_async_rw *rw = req->async_data; in io_rw_recycle()
158 if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) in io_rw_recycle()
159 io_req_async_data_clear(req, 0); in io_rw_recycle()
162 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_rw_cleanup() argument
191 if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { in io_req_rw_cleanup()
192 req->flags &= ~REQ_F_NEED_CLEANUP; in io_req_rw_cleanup()
193 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup()
197 static int io_rw_alloc_async(struct io_kiocb *req) in io_rw_alloc_async() argument
199 struct io_ring_ctx *ctx = req->ctx; in io_rw_alloc_async()
202 rw = io_uring_alloc_async_data(&ctx->rw_cache, req); in io_rw_alloc_async()
206 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_alloc_async()
225 static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, in io_prep_rw_pi() argument
239 io = req->async_data; in io_prep_rw_pi()
247 req->flags |= REQ_F_HAS_METADATA; in io_prep_rw_pi()
252 static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in __io_prep_rw() argument
255 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_prep_rw()
261 if (io_rw_alloc_async(req)) in __io_prep_rw()
263 io = req->async_data; in __io_prep_rw()
267 req->buf_index = READ_ONCE(sqe->buf_index); in __io_prep_rw()
268 io->buf_group = req->buf_index; in __io_prep_rw()
284 if (req->ctx->flags & IORING_SETUP_IOPOLL) in __io_prep_rw()
302 return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask); in __io_prep_rw()
307 static int io_rw_do_import(struct io_kiocb *req, int ddir) in io_rw_do_import() argument
311 if (io_do_buffer_select(req)) in io_rw_do_import()
314 return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0); in io_rw_do_import()
317 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
322 ret = __io_prep_rw(req, sqe, ddir); in io_prep_rw()
326 return io_rw_do_import(req, ddir); in io_prep_rw()
329 int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read() argument
331 return io_prep_rw(req, sqe, ITER_DEST); in io_prep_read()
334 int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write() argument
336 return io_prep_rw(req, sqe, ITER_SOURCE); in io_prep_write()
339 static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rwv() argument
344 ret = io_prep_rw(req, sqe, ddir); in io_prep_rwv()
347 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_prep_rwv()
354 return io_iov_buffer_select_prep(req); in io_prep_rwv()
357 int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_readv() argument
359 return io_prep_rwv(req, sqe, ITER_DEST); in io_prep_readv()
362 int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_writev() argument
364 return io_prep_rwv(req, sqe, ITER_SOURCE); in io_prep_writev()
367 static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, in io_init_rw_fixed() argument
370 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_init_rw_fixed()
371 struct io_async_rw *io = req->async_data; in io_init_rw_fixed()
377 ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, in io_init_rw_fixed()
383 int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_read_fixed() argument
385 return __io_prep_rw(req, sqe, ITER_DEST); in io_prep_read_fixed()
388 int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_write_fixed() argument
390 return __io_prep_rw(req, sqe, ITER_SOURCE); in io_prep_write_fixed()
393 static int io_rw_import_reg_vec(struct io_kiocb *req, in io_rw_import_reg_vec() argument
397 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_import_reg_vec()
401 ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec, in io_rw_import_reg_vec()
406 req->flags &= ~REQ_F_IMPORT_BUFFER; in io_rw_import_reg_vec()
410 static int io_rw_prep_reg_vec(struct io_kiocb *req) in io_rw_prep_reg_vec() argument
412 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_prep_reg_vec()
413 struct io_async_rw *io = req->async_data; in io_rw_prep_reg_vec()
417 return io_prep_reg_iovec(req, &io->vec, uvec, rw->len); in io_rw_prep_reg_vec()
420 int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_readv_fixed() argument
424 ret = __io_prep_rw(req, sqe, ITER_DEST); in io_prep_readv_fixed()
427 return io_rw_prep_reg_vec(req); in io_prep_readv_fixed()
430 int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_writev_fixed() argument
434 ret = __io_prep_rw(req, sqe, ITER_SOURCE); in io_prep_writev_fixed()
437 return io_rw_prep_reg_vec(req); in io_prep_writev_fixed()
444 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_mshot_prep() argument
446 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot_prep()
450 if (!(req->flags & REQ_F_BUFFER_SELECT)) in io_read_mshot_prep()
453 ret = __io_prep_rw(req, sqe, ITER_DEST); in io_read_mshot_prep()
460 req->flags |= REQ_F_APOLL_MULTISHOT; in io_read_mshot_prep()
464 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup() argument
466 lockdep_assert_held(&req->ctx->uring_lock); in io_readv_writev_cleanup()
467 io_rw_recycle(req, 0); in io_readv_writev_cleanup()
470 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
472 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_kiocb_update_pos()
477 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
478 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
479 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
487 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
490 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_reissue()
491 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
492 struct io_async_rw *io = req->async_data; in io_rw_should_reissue()
493 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
497 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
516 static void io_req_end_write(struct io_kiocb *req) in io_req_end_write() argument
518 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
519 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_end_write()
529 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
531 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_io_end()
534 io_req_end_write(req); in io_req_io_end()
535 fsnotify_modify(req->file); in io_req_io_end()
537 fsnotify_access(req->file); in io_req_io_end()
541 static void __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
543 if (res == req->cqe.res) in __io_complete_rw_common()
545 if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) { in __io_complete_rw_common()
546 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in __io_complete_rw_common()
548 req_set_fail(req); in __io_complete_rw_common()
549 req->cqe.res = res; in __io_complete_rw_common()
553 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
555 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
558 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
567 void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw) in io_req_rw_complete() argument
569 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_rw_complete()
575 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_req_rw_complete()
578 io_req_io_end(req); in io_req_rw_complete()
580 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) in io_req_rw_complete()
581 req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL); in io_req_rw_complete()
583 io_req_rw_cleanup(req, 0); in io_req_rw_complete()
584 io_req_task_complete(req, tw); in io_req_rw_complete()
590 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw() local
593 __io_complete_rw_common(req, res); in io_complete_rw()
594 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_complete_rw()
596 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
597 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); in io_complete_rw()
603 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw_iopoll() local
606 io_req_end_write(req); in io_complete_rw_iopoll()
607 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
608 if (res == -EAGAIN && io_rw_should_reissue(req)) in io_complete_rw_iopoll()
609 req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; in io_complete_rw_iopoll()
611 req->cqe.res = res; in io_complete_rw_iopoll()
615 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
618 static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) in io_rw_done() argument
620 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_done()
643 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_rw_done()
649 static int kiocb_done(struct io_kiocb *req, ssize_t ret, in kiocb_done() argument
652 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in kiocb_done()
653 unsigned final_ret = io_fixup_rw_res(req, ret); in kiocb_done()
655 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
656 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
657 if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { in kiocb_done()
660 __io_complete_rw_common(req, ret); in kiocb_done()
665 io_req_io_end(req); in kiocb_done()
667 cflags = io_put_kbuf(req, ret, sel->buf_list); in kiocb_done()
668 io_req_set_res(req, final_ret, cflags); in kiocb_done()
669 io_req_rw_cleanup(req, issue_flags); in kiocb_done()
672 io_rw_done(req, ret); in kiocb_done()
689 struct io_kiocb *req = cmd_to_io_kiocb(rw); in loop_rw_iter() local
705 if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf) in loop_rw_iter()
766 struct io_kiocb *req = wait->private; in io_async_buf_func() local
767 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_async_buf_func()
777 io_req_task_queue(req); in io_async_buf_func()
793 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
795 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
797 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_retry()
804 if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA)) in io_rw_should_retry()
815 if (io_file_can_poll(req) || in io_rw_should_retry()
816 !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) in io_rw_should_retry()
820 wait->wait.private = req; in io_rw_should_retry()
841 static bool need_complete_io(struct io_kiocb *req) in need_complete_io() argument
843 return req->flags & REQ_F_ISREG || in need_complete_io()
844 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
847 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) in io_rw_init_file() argument
849 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_init_file()
851 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
852 struct file *file = req->file; in io_rw_init_file()
858 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
859 req->flags |= io_file_get_flags(file); in io_rw_init_file()
873 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
874 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
881 req->iopoll_completed = 0; in io_rw_init_file()
884 req->flags &= ~REQ_F_IOPOLL_STATE; in io_rw_init_file()
885 req->iopoll_start = ktime_get_ns(); in io_rw_init_file()
892 if (req->flags & REQ_F_HAS_METADATA) { in io_rw_init_file()
893 struct io_async_rw *io = req->async_data; in io_rw_init_file()
902 if (!(req->file->f_flags & O_DIRECT)) in io_rw_init_file()
911 static int __io_read(struct io_kiocb *req, struct io_br_sel *sel, in __io_read() argument
915 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_read()
916 struct io_async_rw *io = req->async_data; in __io_read()
921 if (req->flags & REQ_F_IMPORT_BUFFER) { in __io_read()
922 ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags); in __io_read()
925 } else if (io_do_buffer_select(req)) { in __io_read()
926 ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags); in __io_read()
930 ret = io_rw_init_file(req, FMODE_READ, READ); in __io_read()
933 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
937 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) in __io_read()
945 ppos = io_kiocb_update_pos(req); in __io_read()
947 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
963 if (io_file_can_poll(req)) in __io_read()
966 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
969 if (req->flags & REQ_F_NOWAIT) in __io_read()
974 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
975 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || in __io_read()
1002 if (!io_rw_should_retry(req)) { in __io_read()
1007 req->cqe.res = iov_iter_count(&io->iter); in __io_read()
1026 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
1031 ret = __io_read(req, &sel, issue_flags); in io_read()
1033 return kiocb_done(req, ret, &sel, issue_flags); in io_read()
1035 if (req->flags & REQ_F_BUFFERS_COMMIT) in io_read()
1036 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_read()
1040 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) in io_read_mshot() argument
1042 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read_mshot()
1050 if (!io_file_can_poll(req)) in io_read_mshot()
1055 ret = __io_read(req, &sel, issue_flags); in io_read_mshot()
1066 if (io_kbuf_recycle(req, sel.buf_list, issue_flags)) in io_read_mshot()
1070 io_kbuf_recycle(req, sel.buf_list, issue_flags); in io_read_mshot()
1072 req_set_fail(req); in io_read_mshot()
1073 } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_read_mshot()
1074 cflags = io_put_kbuf(req, ret, sel.buf_list); in io_read_mshot()
1082 cflags = io_put_kbuf(req, ret, sel.buf_list); in io_read_mshot()
1085 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { in io_read_mshot()
1092 io_poll_multishot_retry(req); in io_read_mshot()
1102 io_req_set_res(req, ret, cflags); in io_read_mshot()
1103 io_req_rw_cleanup(req, issue_flags); in io_read_mshot()
1107 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) in io_kiocb_start_write() argument
1112 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
1126 int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
1129 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_write()
1130 struct io_async_rw *io = req->async_data; in io_write()
1135 if (req->flags & REQ_F_IMPORT_BUFFER) { in io_write()
1136 ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags); in io_write()
1141 ret = io_rw_init_file(req, FMODE_WRITE, WRITE); in io_write()
1144 req->cqe.res = iov_iter_count(&io->iter); in io_write()
1148 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) in io_write()
1153 !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && in io_write()
1154 (req->flags & REQ_F_ISREG)) in io_write()
1163 ppos = io_kiocb_update_pos(req); in io_write()
1165 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
1169 if (unlikely(!io_kiocb_start_write(req, kiocb))) in io_write()
1173 if (likely(req->file->f_op->write_iter)) in io_write()
1174 ret2 = req->file->f_op->write_iter(kiocb, &io->iter); in io_write()
1175 else if (req->file->f_op->write) in io_write()
1187 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
1191 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
1194 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1195 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1196 req->cqe.res, ret2); in io_write()
1207 io_req_end_write(req); in io_write()
1211 return kiocb_done(req, ret2, NULL, issue_flags); in io_write()
1217 io_req_end_write(req); in io_write()
1222 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_read_fixed() argument
1226 ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); in io_read_fixed()
1230 return io_read(req, issue_flags); in io_read_fixed()
1233 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_write_fixed() argument
1237 ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); in io_write_fixed()
1241 return io_write(req, issue_flags); in io_write_fixed()
1244 void io_rw_fail(struct io_kiocb *req) in io_rw_fail() argument
1248 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1249 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1252 static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob, in io_uring_classic_poll() argument
1255 struct file *file = req->file; in io_uring_classic_poll()
1257 if (req->opcode == IORING_OP_URING_CMD) { in io_uring_classic_poll()
1260 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_uring_classic_poll()
1263 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_uring_classic_poll()
1269 static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req) in io_hybrid_iopoll_delay() argument
1276 if (req->flags & REQ_F_IOPOLL_STATE) in io_hybrid_iopoll_delay()
1286 req->flags |= REQ_F_IOPOLL_STATE; in io_hybrid_iopoll_delay()
1303 static int io_uring_hybrid_poll(struct io_kiocb *req, in io_uring_hybrid_poll() argument
1306 struct io_ring_ctx *ctx = req->ctx; in io_uring_hybrid_poll()
1310 sleep_time = io_hybrid_iopoll_delay(ctx, req); in io_uring_hybrid_poll()
1311 ret = io_uring_classic_poll(req, iob, poll_flags); in io_uring_hybrid_poll()
1312 runtime = ktime_get_ns() - req->iopoll_start - sleep_time; in io_uring_hybrid_poll()
1339 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1347 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1351 ret = io_uring_hybrid_poll(req, &iob, poll_flags); in io_do_iopoll()
1353 ret = io_uring_classic_poll(req, &iob, poll_flags); in io_do_iopoll()
1362 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1373 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1376 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1379 req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); in io_do_iopoll()
1380 if (req->opcode != IORING_OP_URING_CMD) in io_do_iopoll()
1381 io_req_rw_cleanup(req, 0); in io_do_iopoll()