1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6b5d3ae20SJens Axboe #include <linux/resume_user_mode.h> 7c1755c25SBreno Leitao #include <linux/kasan.h> 895041b93SJens Axboe #include <linux/poll.h> 9ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 1044648532SJens Axboe #include <uapi/linux/eventpoll.h> 1149f7a309SGabriel Krisman Bertazi #include "alloc_cache.h" 12ab1c84d8SPavel Begunkov #include "io-wq.h" 13a6b21fbbSPavel Begunkov #include "slist.h" 14ab1c84d8SPavel Begunkov #include "filetable.h" 15ef623a64SGabriel Krisman Bertazi #include "opdef.h" 16de23077eSJens Axboe 17f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 18f3b44f92SJens Axboe #include <trace/events/io_uring.h> 19f3b44f92SJens Axboe #endif 20f3b44f92SJens Axboe 218501fe70SPavel Begunkov enum { 225027d024SPavel Begunkov IOU_OK = 0, /* deprecated, use IOU_COMPLETE */ 235027d024SPavel Begunkov IOU_COMPLETE = 0, 245027d024SPavel Begunkov 2597b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 26114eccdfSDylan Yudaken 27114eccdfSDylan Yudaken /* 287a9dcb05SPavel Begunkov * The request has more work to do and should be retried. io_uring will 297a9dcb05SPavel Begunkov * attempt to wait on the file for eligible opcodes, but otherwise 307a9dcb05SPavel Begunkov * it'll be handed to iowq for blocking execution. It works for normal 317a9dcb05SPavel Begunkov * requests as well as for the multi shot mode. 327a9dcb05SPavel Begunkov */ 337a9dcb05SPavel Begunkov IOU_RETRY = -EAGAIN, 347a9dcb05SPavel Begunkov 357a9dcb05SPavel Begunkov /* 36704ea888SJens Axboe * Requeue the task_work to restart operations on this request. The 37704ea888SJens Axboe * actual value isn't important, should just be not an otherwise 38704ea888SJens Axboe * valid error code, yet less than -MAX_ERRNO and valid internally. 39704ea888SJens Axboe */ 40704ea888SJens Axboe IOU_REQUEUE = -3072, 4197b388d7SJens Axboe }; 4297b388d7SJens Axboe 43405b4dc1SStefan Roesch struct io_wait_queue { 44405b4dc1SStefan Roesch struct wait_queue_entry wq; 45405b4dc1SStefan Roesch struct io_ring_ctx *ctx; 46405b4dc1SStefan Roesch unsigned cq_tail; 471100c4a2SJens Axboe unsigned cq_min_tail; 48405b4dc1SStefan Roesch unsigned nr_timeouts; 49cebf123cSJens Axboe int hit_timeout; 501100c4a2SJens Axboe ktime_t min_timeout; 51405b4dc1SStefan Roesch ktime_t timeout; 52cebf123cSJens Axboe struct hrtimer t; 53405b4dc1SStefan Roesch 548d0c12a8SStefan Roesch #ifdef CONFIG_NET_RX_BUSY_POLL 55342b2e39SPavel Begunkov ktime_t napi_busy_poll_dt; 568d0c12a8SStefan Roesch bool napi_prefer_busy_poll; 578d0c12a8SStefan Roesch #endif 58405b4dc1SStefan Roesch }; 59405b4dc1SStefan Roesch 60405b4dc1SStefan Roesch static inline bool io_should_wake(struct io_wait_queue *iowq) 61405b4dc1SStefan Roesch { 62405b4dc1SStefan Roesch struct io_ring_ctx *ctx = iowq->ctx; 63405b4dc1SStefan Roesch int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; 64405b4dc1SStefan Roesch 65405b4dc1SStefan Roesch /* 66405b4dc1SStefan Roesch * Wake up if we have enough events, or if a timeout occurred since we 67405b4dc1SStefan Roesch * started waiting. For timeouts, we always want to return to userspace, 68405b4dc1SStefan Roesch * regardless of event count. 69405b4dc1SStefan Roesch */ 70405b4dc1SStefan Roesch return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; 71405b4dc1SStefan Roesch } 72405b4dc1SStefan Roesch 7309d0a8eaSJens Axboe #define IORING_MAX_ENTRIES 32768 7409d0a8eaSJens Axboe #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) 7509d0a8eaSJens Axboe 7609d0a8eaSJens Axboe unsigned long rings_size(unsigned int flags, unsigned int sq_entries, 7709d0a8eaSJens Axboe unsigned int cq_entries, size_t *sq_offset); 7881d8191eSJens Axboe int io_uring_fill_params(unsigned entries, struct io_uring_params *p); 7920d6b633SPavel Begunkov bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); 80c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx); 81973fc83fSDylan Yudaken void io_req_defer_failed(struct io_kiocb *req, s32 res); 82b529c96aSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 83f33096a3SJens Axboe void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 84e5c12945SPavel Begunkov bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); 859046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 869046c641SPavel Begunkov 879046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 889046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 899046c641SPavel Begunkov unsigned issue_flags); 909046c641SPavel Begunkov 918501fe70SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); 92*ea910678SPavel Begunkov void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); 939046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 94bcf8a029SCaleb Sander Mateos void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); 959046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 96bcf8a029SCaleb Sander Mateos void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); 97af5d68f8SJens Axboe struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); 98af5d68f8SJens Axboe struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); 999046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 1009046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 1019046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 1029046c641SPavel Begunkov struct io_ring_ctx *ctx); 1039046c641SPavel Begunkov 1046e76ac59SJosh Triplett int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 1056e76ac59SJosh Triplett int start, int end); 1066746ee4cSPavel Begunkov void io_req_queue_iowq(struct io_kiocb *req); 1076e76ac59SJosh Triplett 108bcf8a029SCaleb Sander Mateos int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw); 1099046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 1109046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 111ec26c225SPavel Begunkov void __io_submit_flush_completions(struct io_ring_ctx *ctx); 1129046c641SPavel Begunkov 1139046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 1149046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 1159046c641SPavel Begunkov 1169046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 1179046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 11863809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 119bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 1209046c641SPavel Begunkov 121f03baeceSJens Axboe bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, 1229046c641SPavel Begunkov bool cancel_all); 1239046c641SPavel Begunkov 124c4320315SJens Axboe void io_activate_pollwq(struct io_ring_ctx *ctx); 125c4320315SJens Axboe 1261658633cSJens Axboe static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) 1271658633cSJens Axboe { 128c133b3b0SPavel Begunkov #if defined(CONFIG_PROVE_LOCKING) 1291658633cSJens Axboe lockdep_assert(in_task()); 1301658633cSJens Axboe 13160495b08SPavel Begunkov if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 13260495b08SPavel Begunkov lockdep_assert_held(&ctx->uring_lock); 13360495b08SPavel Begunkov 1341658633cSJens Axboe if (ctx->flags & IORING_SETUP_IOPOLL) { 1351658633cSJens Axboe lockdep_assert_held(&ctx->uring_lock); 1361658633cSJens Axboe } else if (!ctx->task_complete) { 1371658633cSJens Axboe lockdep_assert_held(&ctx->completion_lock); 1381658633cSJens Axboe } else if (ctx->submitter_task) { 1391658633cSJens Axboe /* 1401658633cSJens Axboe * ->submitter_task may be NULL and we can still post a CQE, 1411658633cSJens Axboe * if the ring has been setup with IORING_SETUP_R_DISABLED. 1421658633cSJens Axboe * Not from an SQE, as those cannot be submitted, but via 1431658633cSJens Axboe * updating tagged resources. 1441658633cSJens Axboe */ 14560495b08SPavel Begunkov if (!percpu_ref_is_dying(&ctx->refs)) 1461658633cSJens Axboe lockdep_assert(current == ctx->submitter_task); 1471658633cSJens Axboe } 1481658633cSJens Axboe #endif 149c133b3b0SPavel Begunkov } 150f26cc959SPavel Begunkov 1513035deacSPavel Begunkov static inline bool io_is_compat(struct io_ring_ctx *ctx) 1523035deacSPavel Begunkov { 1533035deacSPavel Begunkov return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat); 1543035deacSPavel Begunkov } 1553035deacSPavel Begunkov 156e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req) 157e52d2e58SPavel Begunkov { 1588501fe70SPavel Begunkov __io_req_task_work_add(req, 0); 159e52d2e58SPavel Begunkov } 160e52d2e58SPavel Begunkov 161da12d9abSPavel Begunkov static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) 162da12d9abSPavel Begunkov { 163da12d9abSPavel Begunkov if (!wq_list_empty(&ctx->submit_state.compl_reqs) || 164902ce82cSPavel Begunkov ctx->submit_state.cq_flush) 165da12d9abSPavel Begunkov __io_submit_flush_completions(ctx); 166da12d9abSPavel Begunkov } 167da12d9abSPavel Begunkov 1689046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 1699046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 170f3b44f92SJens Axboe 17159fbc409SPavel Begunkov static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, 17259fbc409SPavel Begunkov struct io_uring_cqe **ret, 173aa1df3a3SPavel Begunkov bool overflow) 174f3b44f92SJens Axboe { 175f26cc959SPavel Begunkov io_lockdep_assert_cq_locked(ctx); 176f26cc959SPavel Begunkov 17720d6b633SPavel Begunkov if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { 17820d6b633SPavel Begunkov if (unlikely(!io_cqe_cache_refill(ctx, overflow))) 17959fbc409SPavel Begunkov return false; 18020d6b633SPavel Begunkov } 18159fbc409SPavel Begunkov *ret = ctx->cqe_cached; 182f3b44f92SJens Axboe ctx->cached_cq_tail++; 183f3b44f92SJens Axboe ctx->cqe_cached++; 184b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 185b3659a65SPavel Begunkov ctx->cqe_cached++; 18659fbc409SPavel Begunkov return true; 187f3b44f92SJens Axboe } 188f3b44f92SJens Axboe 18959fbc409SPavel Begunkov static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) 190aa1df3a3SPavel Begunkov { 19159fbc409SPavel Begunkov return io_get_cqe_overflow(ctx, ret, false); 192f3b44f92SJens Axboe } 193f3b44f92SJens Axboe 19411ed914bSDavid Wei static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx, 19511ed914bSDavid Wei struct io_uring_cqe **cqe_ret) 19611ed914bSDavid Wei { 19711ed914bSDavid Wei io_lockdep_assert_cq_locked(ctx); 19811ed914bSDavid Wei 19911ed914bSDavid Wei ctx->cq_extra++; 20011ed914bSDavid Wei ctx->submit_state.cq_flush = true; 20111ed914bSDavid Wei return io_get_cqe(ctx, cqe_ret); 20211ed914bSDavid Wei } 20311ed914bSDavid Wei 204093a650bSPavel Begunkov static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, 205093a650bSPavel Begunkov struct io_kiocb *req) 206f3b44f92SJens Axboe { 207f3b44f92SJens Axboe struct io_uring_cqe *cqe; 208f3b44f92SJens Axboe 209f3b44f92SJens Axboe /* 210f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 211f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 212f3b44f92SJens Axboe * the ring. 213f3b44f92SJens Axboe */ 21459fbc409SPavel Begunkov if (unlikely(!io_get_cqe(ctx, &cqe))) 215f66f7342SPavel Begunkov return false; 216e0486f3fSDylan Yudaken 217e0486f3fSDylan Yudaken 218f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 219e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 220b24c5d75SPavel Begunkov memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); 221b24c5d75SPavel Begunkov memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 222e8c328c3SPavel Begunkov } 2232946f08aSPavel Begunkov 2242946f08aSPavel Begunkov if (trace_io_uring_complete_enabled()) 2252946f08aSPavel Begunkov trace_io_uring_complete(req->ctx, req, cqe); 226f3b44f92SJens Axboe return true; 227f3b44f92SJens Axboe } 228f3b44f92SJens Axboe 229531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 230531113bbSJens Axboe { 231531113bbSJens Axboe req->flags |= REQ_F_FAIL; 232531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 233531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 234531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 235531113bbSJens Axboe } 236531113bbSJens Axboe } 237531113bbSJens Axboe 238de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 239de23077eSJens Axboe { 240de23077eSJens Axboe req->cqe.res = res; 241de23077eSJens Axboe req->cqe.flags = cflags; 242de23077eSJens Axboe } 243de23077eSJens Axboe 24449f7a309SGabriel Krisman Bertazi static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache, 245fa359552SJens Axboe struct io_kiocb *req) 24649f7a309SGabriel Krisman Bertazi { 247ff74954eSJens Axboe if (cache) { 248fa359552SJens Axboe req->async_data = io_cache_alloc(cache, GFP_KERNEL); 249ff74954eSJens Axboe } else { 250ef623a64SGabriel Krisman Bertazi const struct io_issue_def *def = &io_issue_defs[req->opcode]; 251ef623a64SGabriel Krisman Bertazi 252ef623a64SGabriel Krisman Bertazi WARN_ON_ONCE(!def->async_size); 253ef623a64SGabriel Krisman Bertazi req->async_data = kmalloc(def->async_size, GFP_KERNEL); 254ff74954eSJens Axboe } 255ef623a64SGabriel Krisman Bertazi if (req->async_data) 256ef623a64SGabriel Krisman Bertazi req->flags |= REQ_F_ASYNC_DATA; 257ef623a64SGabriel Krisman Bertazi return req->async_data; 258ef623a64SGabriel Krisman Bertazi } 259ef623a64SGabriel Krisman Bertazi 26099f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 26199f15d8dSJens Axboe { 26299f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 26399f15d8dSJens Axboe } 26499f15d8dSJens Axboe 26517bc2837SJens Axboe static inline void io_put_file(struct io_kiocb *req) 266531113bbSJens Axboe { 26717bc2837SJens Axboe if (!(req->flags & REQ_F_FIXED_FILE) && req->file) 26817bc2837SJens Axboe fput(req->file); 269531113bbSJens Axboe } 270531113bbSJens Axboe 271cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 272cd40cae2SJens Axboe unsigned issue_flags) 273cd40cae2SJens Axboe { 274cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 275bfe30bfdSJens Axboe if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 276cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 277cd40cae2SJens Axboe } 278cd40cae2SJens Axboe 279cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 280cd40cae2SJens Axboe unsigned issue_flags) 281cd40cae2SJens Axboe { 282cd40cae2SJens Axboe /* 283cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 284cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 285cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 286cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 287cd40cae2SJens Axboe */ 288bfe30bfdSJens Axboe if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) 289cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 290cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 291cd40cae2SJens Axboe } 292cd40cae2SJens Axboe 293f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 294f9ead18cSJens Axboe { 295f9ead18cSJens Axboe /* order cqe stores with ring update */ 296f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 297f9ead18cSJens Axboe } 298f9ead18cSJens Axboe 2997b235dd8SPavel Begunkov static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) 3007b235dd8SPavel Begunkov { 301bca39f39SPavel Begunkov if (wq_has_sleeper(&ctx->poll_wq)) 3027b235dd8SPavel Begunkov __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, 3037b235dd8SPavel Begunkov poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 3047b235dd8SPavel Begunkov } 3057b235dd8SPavel Begunkov 3066e7248adSPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx) 307f3b44f92SJens Axboe { 308f3b44f92SJens Axboe /* 30944648532SJens Axboe * Trigger waitqueue handler on all waiters on our waitqueue. This 31044648532SJens Axboe * won't necessarily wake up all the tasks, io_should_wake() will make 31144648532SJens Axboe * that decision. 31244648532SJens Axboe * 31344648532SJens Axboe * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter 31444648532SJens Axboe * set in the mask so that if we recurse back into our own poll 31544648532SJens Axboe * waitqueue handlers, we know we have a dependency between eventfd or 31644648532SJens Axboe * epoll and should terminate multishot poll at that point. 317f3b44f92SJens Axboe */ 3186e7248adSPavel Begunkov if (wq_has_sleeper(&ctx->cq_wait)) 31944648532SJens Axboe __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, 32044648532SJens Axboe poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 321f3b44f92SJens Axboe } 322f3b44f92SJens Axboe 32317437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 32417437f31SJens Axboe { 32517437f31SJens Axboe struct io_rings *r = ctx->rings; 32617437f31SJens Axboe 32728aabffaSJens Axboe /* 32828aabffaSJens Axboe * SQPOLL must use the actual sqring head, as using the cached_sq_head 32928aabffaSJens Axboe * is race prone if the SQPOLL thread has grabbed entries but not yet 33028aabffaSJens Axboe * committed them to the ring. For !SQPOLL, this doesn't matter, but 33128aabffaSJens Axboe * since this helper is just used for SQPOLL sqring waits (or POLLOUT), 33228aabffaSJens Axboe * just read the actual sqring head unconditionally. 33328aabffaSJens Axboe */ 33428aabffaSJens Axboe return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; 33517437f31SJens Axboe } 33617437f31SJens Axboe 33717437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 33817437f31SJens Axboe { 33917437f31SJens Axboe struct io_rings *rings = ctx->rings; 340e3ef728fSJens Axboe unsigned int entries; 34117437f31SJens Axboe 34217437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 343e3ef728fSJens Axboe entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 344e3ef728fSJens Axboe return min(entries, ctx->sq_entries); 34517437f31SJens Axboe } 34617437f31SJens Axboe 347c0e0d6baSDylan Yudaken static inline int io_run_task_work(void) 34817437f31SJens Axboe { 349af5d68f8SJens Axboe bool ret = false; 350af5d68f8SJens Axboe 3517cfe7a09SJens Axboe /* 3527cfe7a09SJens Axboe * Always check-and-clear the task_work notification signal. With how 3537cfe7a09SJens Axboe * signaling works for task_work, we can find it set with nothing to 3547cfe7a09SJens Axboe * run. We need to clear it for that case, like get_signal() does. 3557cfe7a09SJens Axboe */ 35646a525e1SJens Axboe if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 35717437f31SJens Axboe clear_notify_signal(); 358b5d3ae20SJens Axboe /* 359b5d3ae20SJens Axboe * PF_IO_WORKER never returns to userspace, so check here if we have 360b5d3ae20SJens Axboe * notify work that needs processing. 361b5d3ae20SJens Axboe */ 362af5d68f8SJens Axboe if (current->flags & PF_IO_WORKER) { 363af5d68f8SJens Axboe if (test_thread_flag(TIF_NOTIFY_RESUME)) { 3642f2bb1ffSJens Axboe __set_current_state(TASK_RUNNING); 365b5d3ae20SJens Axboe resume_user_mode_work(NULL); 3662f2bb1ffSJens Axboe } 367af5d68f8SJens Axboe if (current->io_uring) { 368af5d68f8SJens Axboe unsigned int count = 0; 369af5d68f8SJens Axboe 3708f7033aaSJens Axboe __set_current_state(TASK_RUNNING); 371af5d68f8SJens Axboe tctx_task_work_run(current->io_uring, UINT_MAX, &count); 372af5d68f8SJens Axboe if (count) 373af5d68f8SJens Axboe ret = true; 374af5d68f8SJens Axboe } 375af5d68f8SJens Axboe } 3767cfe7a09SJens Axboe if (task_work_pending(current)) { 37746a525e1SJens Axboe __set_current_state(TASK_RUNNING); 37817437f31SJens Axboe task_work_run(); 379af5d68f8SJens Axboe ret = true; 38017437f31SJens Axboe } 38117437f31SJens Axboe 382af5d68f8SJens Axboe return ret; 383c0e0d6baSDylan Yudaken } 384c0e0d6baSDylan Yudaken 38540cfe553SDavid Wei static inline bool io_local_work_pending(struct io_ring_ctx *ctx) 38640cfe553SDavid Wei { 387f46b9cdbSDavid Wei return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist); 38840cfe553SDavid Wei } 38940cfe553SDavid Wei 390dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 391dac6a0eaSJens Axboe { 39240cfe553SDavid Wei return task_work_pending(current) || io_local_work_pending(ctx); 393dac6a0eaSJens Axboe } 394dac6a0eaSJens Axboe 395bcf8a029SCaleb Sander Mateos static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw) 396aa1e90f6SPavel Begunkov { 3978e5b3b89SPavel Begunkov lockdep_assert_held(&ctx->uring_lock); 398aa1e90f6SPavel Begunkov } 399aa1e90f6SPavel Begunkov 4009da070b1SPavel Begunkov /* 4019da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 4029da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 4039da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 4049da070b1SPavel Begunkov */ 4059da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 4069da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 407aa1e90f6SPavel Begunkov { 408aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 409aa1e90f6SPavel Begunkov 4109da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 4119da070b1SPavel Begunkov 412aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 413aa1e90f6SPavel Begunkov } 414aa1e90f6SPavel Begunkov 41546929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 41646929b08SPavel Begunkov { 417bca39f39SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || 418bca39f39SPavel Begunkov ctx->has_evfd || ctx->poll_activated)) 41946929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 42046929b08SPavel Begunkov } 42146929b08SPavel Begunkov 42263809137SPavel Begunkov static inline void io_get_task_refs(int nr) 42363809137SPavel Begunkov { 42463809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 42563809137SPavel Begunkov 42663809137SPavel Begunkov tctx->cached_refs -= nr; 42763809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 42863809137SPavel Begunkov io_task_refs_refill(tctx); 42963809137SPavel Begunkov } 43063809137SPavel Begunkov 431bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 432bd1a3783SPavel Begunkov { 433bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 434bd1a3783SPavel Begunkov } 435bd1a3783SPavel Begunkov 436c1755c25SBreno Leitao extern struct kmem_cache *req_cachep; 437c1755c25SBreno Leitao 438c8576f3eSPavel Begunkov static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) 439bd1a3783SPavel Begunkov { 440c1755c25SBreno Leitao struct io_kiocb *req; 441bd1a3783SPavel Begunkov 442c1755c25SBreno Leitao req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); 443c1755c25SBreno Leitao wq_stack_extract(&ctx->submit_state.free_list); 444c1755c25SBreno Leitao return req; 445bd1a3783SPavel Begunkov } 446bd1a3783SPavel Begunkov 447c8576f3eSPavel Begunkov static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) 448c8576f3eSPavel Begunkov { 449c8576f3eSPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) { 450c8576f3eSPavel Begunkov if (!__io_alloc_req_refill(ctx)) 451c8576f3eSPavel Begunkov return false; 452c8576f3eSPavel Begunkov } 453c8576f3eSPavel Begunkov *req = io_extract_req(ctx); 454c8576f3eSPavel Begunkov return true; 455c8576f3eSPavel Begunkov } 456c8576f3eSPavel Begunkov 457140102aeSPavel Begunkov static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 458140102aeSPavel Begunkov { 459140102aeSPavel Begunkov return likely(ctx->submitter_task == current); 460140102aeSPavel Begunkov } 461140102aeSPavel Begunkov 46276de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 46376de6749SPavel Begunkov { 4646567506bSPavel Begunkov return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 4656567506bSPavel Begunkov ctx->submitter_task == current); 46676de6749SPavel Begunkov } 46776de6749SPavel Begunkov 468b6f58a3fSJens Axboe /* 469b6f58a3fSJens Axboe * Terminate the request if either of these conditions are true: 470b6f58a3fSJens Axboe * 471b6f58a3fSJens Axboe * 1) It's being executed by the original task, but that task is marked 472b6f58a3fSJens Axboe * with PF_EXITING as it's exiting. 473b6f58a3fSJens Axboe * 2) PF_KTHREAD is set, in which case the invoker of the task_work is 474b6f58a3fSJens Axboe * our fallback task_work. 475b6f58a3fSJens Axboe */ 476b6f58a3fSJens Axboe static inline bool io_should_terminate_tw(void) 477b6f58a3fSJens Axboe { 478b6f58a3fSJens Axboe return current->flags & (PF_KTHREAD | PF_EXITING); 479b6f58a3fSJens Axboe } 480b6f58a3fSJens Axboe 481833b5dffSPavel Begunkov static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) 482833b5dffSPavel Begunkov { 483833b5dffSPavel Begunkov io_req_set_res(req, res, 0); 484833b5dffSPavel Begunkov req->io_task_work.func = io_req_task_complete; 485833b5dffSPavel Begunkov io_req_task_work_add(req); 486833b5dffSPavel Begunkov } 487833b5dffSPavel Begunkov 48896c7d4f8SBreno Leitao /* 48996c7d4f8SBreno Leitao * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each 49096c7d4f8SBreno Leitao * slot. 49196c7d4f8SBreno Leitao */ 49296c7d4f8SBreno Leitao static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) 49396c7d4f8SBreno Leitao { 49496c7d4f8SBreno Leitao if (ctx->flags & IORING_SETUP_SQE128) 49596c7d4f8SBreno Leitao return 2 * sizeof(struct io_uring_sqe); 49696c7d4f8SBreno Leitao return sizeof(struct io_uring_sqe); 49796c7d4f8SBreno Leitao } 49895041b93SJens Axboe 49995041b93SJens Axboe static inline bool io_file_can_poll(struct io_kiocb *req) 50095041b93SJens Axboe { 50195041b93SJens Axboe if (req->flags & REQ_F_CAN_POLL) 50295041b93SJens Axboe return true; 5035fc16fa5SJens Axboe if (req->file && file_can_poll(req->file)) { 50495041b93SJens Axboe req->flags |= REQ_F_CAN_POLL; 50595041b93SJens Axboe return true; 50695041b93SJens Axboe } 50795041b93SJens Axboe return false; 50895041b93SJens Axboe } 509428f1382SJens Axboe 5102b8e976bSPavel Begunkov static inline ktime_t io_get_time(struct io_ring_ctx *ctx) 5112b8e976bSPavel Begunkov { 5122b8e976bSPavel Begunkov if (ctx->clockid == CLOCK_MONOTONIC) 5132b8e976bSPavel Begunkov return ktime_get(); 5142b8e976bSPavel Begunkov 5152b8e976bSPavel Begunkov return ktime_get_with_offset(ctx->clock_offset); 5162b8e976bSPavel Begunkov } 5172b8e976bSPavel Begunkov 518428f1382SJens Axboe enum { 519428f1382SJens Axboe IO_CHECK_CQ_OVERFLOW_BIT, 520428f1382SJens Axboe IO_CHECK_CQ_DROPPED_BIT, 521428f1382SJens Axboe }; 522428f1382SJens Axboe 523428f1382SJens Axboe static inline bool io_has_work(struct io_ring_ctx *ctx) 524428f1382SJens Axboe { 525428f1382SJens Axboe return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || 52640cfe553SDavid Wei io_local_work_pending(ctx); 527428f1382SJens Axboe } 528de23077eSJens Axboe #endif 529