Lines Matching defs:req
80 void io_req_defer_failed(struct io_kiocb *req, s32 res);
83 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
84 bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]);
87 void io_req_track_inflight(struct io_kiocb *req);
88 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
89 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
92 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
93 void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
94 void io_req_task_queue(struct io_kiocb *req);
95 void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
96 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
97 void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw);
105 void io_req_queue_iowq(struct io_kiocb *req);
107 int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw);
115 void io_free_req(struct io_kiocb *req);
116 void io_queue_next(struct io_kiocb *req);
155 static inline void io_req_task_work_add(struct io_kiocb *req)
157 __io_req_task_work_add(req, 0);
203 struct io_kiocb *req)
216 memcpy(cqe, &req->cqe, sizeof(*cqe));
218 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
219 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
223 trace_io_uring_complete(req->ctx, req, cqe);
227 static inline void req_set_fail(struct io_kiocb *req)
229 req->flags |= REQ_F_FAIL;
230 if (req->flags & REQ_F_CQE_SKIP) {
231 req->flags &= ~REQ_F_CQE_SKIP;
232 req->flags |= REQ_F_SKIP_LINK_CQES;
236 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
238 req->cqe.res = res;
239 req->cqe.flags = cflags;
243 struct io_kiocb *req)
246 req->async_data = io_cache_alloc(cache, GFP_KERNEL);
248 const struct io_issue_def *def = &io_issue_defs[req->opcode];
251 req->async_data = kmalloc(def->async_size, GFP_KERNEL);
253 if (req->async_data)
254 req->flags |= REQ_F_ASYNC_DATA;
255 return req->async_data;
258 static inline bool req_has_async_data(struct io_kiocb *req)
260 return req->flags & REQ_F_ASYNC_DATA;
263 static inline void io_put_file(struct io_kiocb *req)
265 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
266 fput(req->file);
408 static inline void io_req_complete_defer(struct io_kiocb *req)
409 __must_hold(&req->ctx->uring_lock)
411 struct io_submit_state *state = &req->ctx->submit_state;
413 lockdep_assert_held(&req->ctx->uring_lock);
415 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
443 struct io_kiocb *req;
445 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
447 return req;
450 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
456 *req = io_extract_req(ctx);
484 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
486 io_req_set_res(req, res, 0);
487 req->io_task_work.func = io_req_task_complete;
488 io_req_task_work_add(req);
502 static inline bool io_file_can_poll(struct io_kiocb *req)
504 if (req->flags & REQ_F_CAN_POLL)
506 if (req->file && file_can_poll(req->file)) {
507 req->flags |= REQ_F_CAN_POLL;