xref: /linux/io_uring/io_uring.h (revision 2946f08ae9ed650b94e0ffebcdfdda8de76bd926)
1de23077eSJens Axboe #ifndef IOU_CORE_H
2de23077eSJens Axboe #define IOU_CORE_H
3de23077eSJens Axboe 
4de23077eSJens Axboe #include <linux/errno.h>
5cd40cae2SJens Axboe #include <linux/lockdep.h>
6b5d3ae20SJens Axboe #include <linux/resume_user_mode.h>
7c1755c25SBreno Leitao #include <linux/kasan.h>
895041b93SJens Axboe #include <linux/poll.h>
9ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h>
1044648532SJens Axboe #include <uapi/linux/eventpoll.h>
11ab1c84d8SPavel Begunkov #include "io-wq.h"
12a6b21fbbSPavel Begunkov #include "slist.h"
13ab1c84d8SPavel Begunkov #include "filetable.h"
14de23077eSJens Axboe 
15f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS
16f3b44f92SJens Axboe #include <trace/events/io_uring.h>
17f3b44f92SJens Axboe #endif
18f3b44f92SJens Axboe 
198501fe70SPavel Begunkov enum {
2097b388d7SJens Axboe 	IOU_OK			= 0,
2197b388d7SJens Axboe 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
22114eccdfSDylan Yudaken 
23114eccdfSDylan Yudaken 	/*
24704ea888SJens Axboe 	 * Requeue the task_work to restart operations on this request. The
25704ea888SJens Axboe 	 * actual value isn't important, should just be not an otherwise
26704ea888SJens Axboe 	 * valid error code, yet less than -MAX_ERRNO and valid internally.
27704ea888SJens Axboe 	 */
28704ea888SJens Axboe 	IOU_REQUEUE		= -3072,
29704ea888SJens Axboe 
30704ea888SJens Axboe 	/*
3191482864SPavel Begunkov 	 * Intended only when both IO_URING_F_MULTISHOT is passed
3291482864SPavel Begunkov 	 * to indicate to the poll runner that multishot should be
33114eccdfSDylan Yudaken 	 * removed and the result is set on req->cqe.res.
34114eccdfSDylan Yudaken 	 */
35114eccdfSDylan Yudaken 	IOU_STOP_MULTISHOT	= -ECANCELED,
3697b388d7SJens Axboe };
3797b388d7SJens Axboe 
38405b4dc1SStefan Roesch struct io_wait_queue {
39405b4dc1SStefan Roesch 	struct wait_queue_entry wq;
40405b4dc1SStefan Roesch 	struct io_ring_ctx *ctx;
41405b4dc1SStefan Roesch 	unsigned cq_tail;
421100c4a2SJens Axboe 	unsigned cq_min_tail;
43405b4dc1SStefan Roesch 	unsigned nr_timeouts;
44cebf123cSJens Axboe 	int hit_timeout;
451100c4a2SJens Axboe 	ktime_t min_timeout;
46405b4dc1SStefan Roesch 	ktime_t timeout;
47cebf123cSJens Axboe 	struct hrtimer t;
48405b4dc1SStefan Roesch 
498d0c12a8SStefan Roesch #ifdef CONFIG_NET_RX_BUSY_POLL
50342b2e39SPavel Begunkov 	ktime_t napi_busy_poll_dt;
518d0c12a8SStefan Roesch 	bool napi_prefer_busy_poll;
528d0c12a8SStefan Roesch #endif
53405b4dc1SStefan Roesch };
54405b4dc1SStefan Roesch 
55405b4dc1SStefan Roesch static inline bool io_should_wake(struct io_wait_queue *iowq)
56405b4dc1SStefan Roesch {
57405b4dc1SStefan Roesch 	struct io_ring_ctx *ctx = iowq->ctx;
58405b4dc1SStefan Roesch 	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
59405b4dc1SStefan Roesch 
60405b4dc1SStefan Roesch 	/*
61405b4dc1SStefan Roesch 	 * Wake up if we have enough events, or if a timeout occurred since we
62405b4dc1SStefan Roesch 	 * started waiting. For timeouts, we always want to return to userspace,
63405b4dc1SStefan Roesch 	 * regardless of event count.
64405b4dc1SStefan Roesch 	 */
65405b4dc1SStefan Roesch 	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
66405b4dc1SStefan Roesch }
67405b4dc1SStefan Roesch 
6820d6b633SPavel Begunkov bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
69c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx);
70973fc83fSDylan Yudaken void io_req_defer_failed(struct io_kiocb *req, s32 res);
71b529c96aSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
72f33096a3SJens Axboe void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
73e5c12945SPavel Begunkov bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
749046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
759046c641SPavel Begunkov 
769046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd);
779046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
789046c641SPavel Begunkov 			       unsigned issue_flags);
799046c641SPavel Begunkov 
808501fe70SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
81c3ac76f9SJens Axboe void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
82c3ac76f9SJens Axboe 				 unsigned flags);
839046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req);
849046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req);
85a282967cSPavel Begunkov void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
869046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret);
87a282967cSPavel Begunkov void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
88af5d68f8SJens Axboe struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
89af5d68f8SJens Axboe struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
909046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb);
919046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
929046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task,
939046c641SPavel Begunkov 				struct io_ring_ctx *ctx);
949046c641SPavel Begunkov 
956e76ac59SJosh Triplett int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
966e76ac59SJosh Triplett 				     int start, int end);
976746ee4cSPavel Begunkov void io_req_queue_iowq(struct io_kiocb *req);
986e76ac59SJosh Triplett 
99a282967cSPavel Begunkov int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
1009046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
1019046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
102ec26c225SPavel Begunkov void __io_submit_flush_completions(struct io_ring_ctx *ctx);
1039046c641SPavel Begunkov 
1049046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
1059046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work);
1069046c641SPavel Begunkov 
1079046c641SPavel Begunkov void io_free_req(struct io_kiocb *req);
1089046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req);
10963809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx);
110bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
1119046c641SPavel Begunkov 
1129046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1139046c641SPavel Begunkov 			bool cancel_all);
1149046c641SPavel Begunkov 
115c4320315SJens Axboe void io_activate_pollwq(struct io_ring_ctx *ctx);
116c4320315SJens Axboe 
1171658633cSJens Axboe static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
1181658633cSJens Axboe {
119c133b3b0SPavel Begunkov #if defined(CONFIG_PROVE_LOCKING)
1201658633cSJens Axboe 	lockdep_assert(in_task());
1211658633cSJens Axboe 
1221658633cSJens Axboe 	if (ctx->flags & IORING_SETUP_IOPOLL) {
1231658633cSJens Axboe 		lockdep_assert_held(&ctx->uring_lock);
1241658633cSJens Axboe 	} else if (!ctx->task_complete) {
1251658633cSJens Axboe 		lockdep_assert_held(&ctx->completion_lock);
1261658633cSJens Axboe 	} else if (ctx->submitter_task) {
1271658633cSJens Axboe 		/*
1281658633cSJens Axboe 		 * ->submitter_task may be NULL and we can still post a CQE,
1291658633cSJens Axboe 		 * if the ring has been setup with IORING_SETUP_R_DISABLED.
1301658633cSJens Axboe 		 * Not from an SQE, as those cannot be submitted, but via
1311658633cSJens Axboe 		 * updating tagged resources.
1321658633cSJens Axboe 		 */
1331658633cSJens Axboe 		if (ctx->submitter_task->flags & PF_EXITING)
1341658633cSJens Axboe 			lockdep_assert(current_work());
1351658633cSJens Axboe 		else
1361658633cSJens Axboe 			lockdep_assert(current == ctx->submitter_task);
1371658633cSJens Axboe 	}
1381658633cSJens Axboe #endif
139c133b3b0SPavel Begunkov }
140f26cc959SPavel Begunkov 
141e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req)
142e52d2e58SPavel Begunkov {
1438501fe70SPavel Begunkov 	__io_req_task_work_add(req, 0);
144e52d2e58SPavel Begunkov }
145e52d2e58SPavel Begunkov 
146da12d9abSPavel Begunkov static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
147da12d9abSPavel Begunkov {
148da12d9abSPavel Begunkov 	if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
149902ce82cSPavel Begunkov 	    ctx->submit_state.cq_flush)
150da12d9abSPavel Begunkov 		__io_submit_flush_completions(ctx);
151da12d9abSPavel Begunkov }
152da12d9abSPavel Begunkov 
1539046c641SPavel Begunkov #define io_for_each_link(pos, head) \
1549046c641SPavel Begunkov 	for (pos = (head); pos; pos = pos->link)
155f3b44f92SJens Axboe 
15659fbc409SPavel Begunkov static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
15759fbc409SPavel Begunkov 					struct io_uring_cqe **ret,
158aa1df3a3SPavel Begunkov 					bool overflow)
159f3b44f92SJens Axboe {
160f26cc959SPavel Begunkov 	io_lockdep_assert_cq_locked(ctx);
161f26cc959SPavel Begunkov 
16220d6b633SPavel Begunkov 	if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
16320d6b633SPavel Begunkov 		if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
16459fbc409SPavel Begunkov 			return false;
16520d6b633SPavel Begunkov 	}
16659fbc409SPavel Begunkov 	*ret = ctx->cqe_cached;
167f3b44f92SJens Axboe 	ctx->cached_cq_tail++;
168f3b44f92SJens Axboe 	ctx->cqe_cached++;
169b3659a65SPavel Begunkov 	if (ctx->flags & IORING_SETUP_CQE32)
170b3659a65SPavel Begunkov 		ctx->cqe_cached++;
17159fbc409SPavel Begunkov 	return true;
172f3b44f92SJens Axboe }
173f3b44f92SJens Axboe 
17459fbc409SPavel Begunkov static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
175aa1df3a3SPavel Begunkov {
17659fbc409SPavel Begunkov 	return io_get_cqe_overflow(ctx, ret, false);
177f3b44f92SJens Axboe }
178f3b44f92SJens Axboe 
179093a650bSPavel Begunkov static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
180093a650bSPavel Begunkov 					    struct io_kiocb *req)
181f3b44f92SJens Axboe {
182f3b44f92SJens Axboe 	struct io_uring_cqe *cqe;
183f3b44f92SJens Axboe 
184f3b44f92SJens Axboe 	/*
185f3b44f92SJens Axboe 	 * If we can't get a cq entry, userspace overflowed the
186f3b44f92SJens Axboe 	 * submission (by quite a lot). Increment the overflow count in
187f3b44f92SJens Axboe 	 * the ring.
188f3b44f92SJens Axboe 	 */
18959fbc409SPavel Begunkov 	if (unlikely(!io_get_cqe(ctx, &cqe)))
190f66f7342SPavel Begunkov 		return false;
191e0486f3fSDylan Yudaken 
192e0486f3fSDylan Yudaken 
193f3b44f92SJens Axboe 	memcpy(cqe, &req->cqe, sizeof(*cqe));
194e8c328c3SPavel Begunkov 	if (ctx->flags & IORING_SETUP_CQE32) {
195b24c5d75SPavel Begunkov 		memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
196b24c5d75SPavel Begunkov 		memset(&req->big_cqe, 0, sizeof(req->big_cqe));
197e8c328c3SPavel Begunkov 	}
198*2946f08aSPavel Begunkov 
199*2946f08aSPavel Begunkov 	if (trace_io_uring_complete_enabled())
200*2946f08aSPavel Begunkov 		trace_io_uring_complete(req->ctx, req, cqe);
201f3b44f92SJens Axboe 	return true;
202f3b44f92SJens Axboe }
203f3b44f92SJens Axboe 
204531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req)
205531113bbSJens Axboe {
206531113bbSJens Axboe 	req->flags |= REQ_F_FAIL;
207531113bbSJens Axboe 	if (req->flags & REQ_F_CQE_SKIP) {
208531113bbSJens Axboe 		req->flags &= ~REQ_F_CQE_SKIP;
209531113bbSJens Axboe 		req->flags |= REQ_F_SKIP_LINK_CQES;
210531113bbSJens Axboe 	}
211531113bbSJens Axboe }
212531113bbSJens Axboe 
213de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
214de23077eSJens Axboe {
215de23077eSJens Axboe 	req->cqe.res = res;
216de23077eSJens Axboe 	req->cqe.flags = cflags;
217de23077eSJens Axboe }
218de23077eSJens Axboe 
21999f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req)
22099f15d8dSJens Axboe {
22199f15d8dSJens Axboe 	return req->flags & REQ_F_ASYNC_DATA;
22299f15d8dSJens Axboe }
22399f15d8dSJens Axboe 
22417bc2837SJens Axboe static inline void io_put_file(struct io_kiocb *req)
225531113bbSJens Axboe {
22617bc2837SJens Axboe 	if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
22717bc2837SJens Axboe 		fput(req->file);
228531113bbSJens Axboe }
229531113bbSJens Axboe 
230cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
231cd40cae2SJens Axboe 					 unsigned issue_flags)
232cd40cae2SJens Axboe {
233cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
234bfe30bfdSJens Axboe 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
235cd40cae2SJens Axboe 		mutex_unlock(&ctx->uring_lock);
236cd40cae2SJens Axboe }
237cd40cae2SJens Axboe 
238cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
239cd40cae2SJens Axboe 				       unsigned issue_flags)
240cd40cae2SJens Axboe {
241cd40cae2SJens Axboe 	/*
242cd40cae2SJens Axboe 	 * "Normal" inline submissions always hold the uring_lock, since we
243cd40cae2SJens Axboe 	 * grab it from the system call. Same is true for the SQPOLL offload.
244cd40cae2SJens Axboe 	 * The only exception is when we've detached the request and issue it
245cd40cae2SJens Axboe 	 * from an async worker thread, grab the lock for that case.
246cd40cae2SJens Axboe 	 */
247bfe30bfdSJens Axboe 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
248cd40cae2SJens Axboe 		mutex_lock(&ctx->uring_lock);
249cd40cae2SJens Axboe 	lockdep_assert_held(&ctx->uring_lock);
250cd40cae2SJens Axboe }
251cd40cae2SJens Axboe 
252f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx)
253f9ead18cSJens Axboe {
254f9ead18cSJens Axboe 	/* order cqe stores with ring update */
255f9ead18cSJens Axboe 	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
256f9ead18cSJens Axboe }
257f9ead18cSJens Axboe 
2587b235dd8SPavel Begunkov static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
2597b235dd8SPavel Begunkov {
260bca39f39SPavel Begunkov 	if (wq_has_sleeper(&ctx->poll_wq))
2617b235dd8SPavel Begunkov 		__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
2627b235dd8SPavel Begunkov 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
2637b235dd8SPavel Begunkov }
2647b235dd8SPavel Begunkov 
2656e7248adSPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx)
266f3b44f92SJens Axboe {
267f3b44f92SJens Axboe 	/*
26844648532SJens Axboe 	 * Trigger waitqueue handler on all waiters on our waitqueue. This
26944648532SJens Axboe 	 * won't necessarily wake up all the tasks, io_should_wake() will make
27044648532SJens Axboe 	 * that decision.
27144648532SJens Axboe 	 *
27244648532SJens Axboe 	 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
27344648532SJens Axboe 	 * set in the mask so that if we recurse back into our own poll
27444648532SJens Axboe 	 * waitqueue handlers, we know we have a dependency between eventfd or
27544648532SJens Axboe 	 * epoll and should terminate multishot poll at that point.
276f3b44f92SJens Axboe 	 */
2776e7248adSPavel Begunkov 	if (wq_has_sleeper(&ctx->cq_wait))
27844648532SJens Axboe 		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
27944648532SJens Axboe 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
280f3b44f92SJens Axboe }
281f3b44f92SJens Axboe 
28217437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx)
28317437f31SJens Axboe {
28417437f31SJens Axboe 	struct io_rings *r = ctx->rings;
28517437f31SJens Axboe 
28628aabffaSJens Axboe 	/*
28728aabffaSJens Axboe 	 * SQPOLL must use the actual sqring head, as using the cached_sq_head
28828aabffaSJens Axboe 	 * is race prone if the SQPOLL thread has grabbed entries but not yet
28928aabffaSJens Axboe 	 * committed them to the ring. For !SQPOLL, this doesn't matter, but
29028aabffaSJens Axboe 	 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
29128aabffaSJens Axboe 	 * just read the actual sqring head unconditionally.
29228aabffaSJens Axboe 	 */
29328aabffaSJens Axboe 	return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
29417437f31SJens Axboe }
29517437f31SJens Axboe 
29617437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
29717437f31SJens Axboe {
29817437f31SJens Axboe 	struct io_rings *rings = ctx->rings;
299e3ef728fSJens Axboe 	unsigned int entries;
30017437f31SJens Axboe 
30117437f31SJens Axboe 	/* make sure SQ entry isn't read before tail */
302e3ef728fSJens Axboe 	entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
303e3ef728fSJens Axboe 	return min(entries, ctx->sq_entries);
30417437f31SJens Axboe }
30517437f31SJens Axboe 
306c0e0d6baSDylan Yudaken static inline int io_run_task_work(void)
30717437f31SJens Axboe {
308af5d68f8SJens Axboe 	bool ret = false;
309af5d68f8SJens Axboe 
3107cfe7a09SJens Axboe 	/*
3117cfe7a09SJens Axboe 	 * Always check-and-clear the task_work notification signal. With how
3127cfe7a09SJens Axboe 	 * signaling works for task_work, we can find it set with nothing to
3137cfe7a09SJens Axboe 	 * run. We need to clear it for that case, like get_signal() does.
3147cfe7a09SJens Axboe 	 */
31546a525e1SJens Axboe 	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
31617437f31SJens Axboe 		clear_notify_signal();
317b5d3ae20SJens Axboe 	/*
318b5d3ae20SJens Axboe 	 * PF_IO_WORKER never returns to userspace, so check here if we have
319b5d3ae20SJens Axboe 	 * notify work that needs processing.
320b5d3ae20SJens Axboe 	 */
321af5d68f8SJens Axboe 	if (current->flags & PF_IO_WORKER) {
322af5d68f8SJens Axboe 		if (test_thread_flag(TIF_NOTIFY_RESUME)) {
3232f2bb1ffSJens Axboe 			__set_current_state(TASK_RUNNING);
324b5d3ae20SJens Axboe 			resume_user_mode_work(NULL);
3252f2bb1ffSJens Axboe 		}
326af5d68f8SJens Axboe 		if (current->io_uring) {
327af5d68f8SJens Axboe 			unsigned int count = 0;
328af5d68f8SJens Axboe 
3298f7033aaSJens Axboe 			__set_current_state(TASK_RUNNING);
330af5d68f8SJens Axboe 			tctx_task_work_run(current->io_uring, UINT_MAX, &count);
331af5d68f8SJens Axboe 			if (count)
332af5d68f8SJens Axboe 				ret = true;
333af5d68f8SJens Axboe 		}
334af5d68f8SJens Axboe 	}
3357cfe7a09SJens Axboe 	if (task_work_pending(current)) {
33646a525e1SJens Axboe 		__set_current_state(TASK_RUNNING);
33717437f31SJens Axboe 		task_work_run();
338af5d68f8SJens Axboe 		ret = true;
33917437f31SJens Axboe 	}
34017437f31SJens Axboe 
341af5d68f8SJens Axboe 	return ret;
342c0e0d6baSDylan Yudaken }
343c0e0d6baSDylan Yudaken 
344dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
345dac6a0eaSJens Axboe {
34622537c9fSJens Axboe 	return task_work_pending(current) || !llist_empty(&ctx->work_llist);
347dac6a0eaSJens Axboe }
348dac6a0eaSJens Axboe 
349a282967cSPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
350aa1e90f6SPavel Begunkov {
3518e5b3b89SPavel Begunkov 	lockdep_assert_held(&ctx->uring_lock);
352aa1e90f6SPavel Begunkov }
353aa1e90f6SPavel Begunkov 
3549da070b1SPavel Begunkov /*
3559da070b1SPavel Begunkov  * Don't complete immediately but use deferred completion infrastructure.
3569da070b1SPavel Begunkov  * Protected by ->uring_lock and can only be used either with
3579da070b1SPavel Begunkov  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
3589da070b1SPavel Begunkov  */
3599da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req)
3609da070b1SPavel Begunkov 	__must_hold(&req->ctx->uring_lock)
361aa1e90f6SPavel Begunkov {
362aa1e90f6SPavel Begunkov 	struct io_submit_state *state = &req->ctx->submit_state;
363aa1e90f6SPavel Begunkov 
3649da070b1SPavel Begunkov 	lockdep_assert_held(&req->ctx->uring_lock);
3659da070b1SPavel Begunkov 
366aa1e90f6SPavel Begunkov 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
367aa1e90f6SPavel Begunkov }
368aa1e90f6SPavel Begunkov 
36946929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
37046929b08SPavel Begunkov {
371bca39f39SPavel Begunkov 	if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
372bca39f39SPavel Begunkov 		     ctx->has_evfd || ctx->poll_activated))
37346929b08SPavel Begunkov 		__io_commit_cqring_flush(ctx);
37446929b08SPavel Begunkov }
37546929b08SPavel Begunkov 
37663809137SPavel Begunkov static inline void io_get_task_refs(int nr)
37763809137SPavel Begunkov {
37863809137SPavel Begunkov 	struct io_uring_task *tctx = current->io_uring;
37963809137SPavel Begunkov 
38063809137SPavel Begunkov 	tctx->cached_refs -= nr;
38163809137SPavel Begunkov 	if (unlikely(tctx->cached_refs < 0))
38263809137SPavel Begunkov 		io_task_refs_refill(tctx);
38363809137SPavel Begunkov }
38463809137SPavel Begunkov 
385bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
386bd1a3783SPavel Begunkov {
387bd1a3783SPavel Begunkov 	return !ctx->submit_state.free_list.next;
388bd1a3783SPavel Begunkov }
389bd1a3783SPavel Begunkov 
390c1755c25SBreno Leitao extern struct kmem_cache *req_cachep;
391b3a4dbc8SGabriel Krisman Bertazi extern struct kmem_cache *io_buf_cachep;
392c1755c25SBreno Leitao 
393c8576f3eSPavel Begunkov static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
394bd1a3783SPavel Begunkov {
395c1755c25SBreno Leitao 	struct io_kiocb *req;
396bd1a3783SPavel Begunkov 
397c1755c25SBreno Leitao 	req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
398c1755c25SBreno Leitao 	wq_stack_extract(&ctx->submit_state.free_list);
399c1755c25SBreno Leitao 	return req;
400bd1a3783SPavel Begunkov }
401bd1a3783SPavel Begunkov 
402c8576f3eSPavel Begunkov static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
403c8576f3eSPavel Begunkov {
404c8576f3eSPavel Begunkov 	if (unlikely(io_req_cache_empty(ctx))) {
405c8576f3eSPavel Begunkov 		if (!__io_alloc_req_refill(ctx))
406c8576f3eSPavel Begunkov 			return false;
407c8576f3eSPavel Begunkov 	}
408c8576f3eSPavel Begunkov 	*req = io_extract_req(ctx);
409c8576f3eSPavel Begunkov 	return true;
410c8576f3eSPavel Begunkov }
411c8576f3eSPavel Begunkov 
412140102aeSPavel Begunkov static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
413140102aeSPavel Begunkov {
414140102aeSPavel Begunkov 	return likely(ctx->submitter_task == current);
415140102aeSPavel Begunkov }
416140102aeSPavel Begunkov 
41776de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
41876de6749SPavel Begunkov {
4196567506bSPavel Begunkov 	return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
4206567506bSPavel Begunkov 		      ctx->submitter_task == current);
42176de6749SPavel Begunkov }
42276de6749SPavel Begunkov 
423833b5dffSPavel Begunkov static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
424833b5dffSPavel Begunkov {
425833b5dffSPavel Begunkov 	io_req_set_res(req, res, 0);
426833b5dffSPavel Begunkov 	req->io_task_work.func = io_req_task_complete;
427833b5dffSPavel Begunkov 	io_req_task_work_add(req);
428833b5dffSPavel Begunkov }
429833b5dffSPavel Begunkov 
43096c7d4f8SBreno Leitao /*
43196c7d4f8SBreno Leitao  * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
43296c7d4f8SBreno Leitao  * slot.
43396c7d4f8SBreno Leitao  */
43496c7d4f8SBreno Leitao static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
43596c7d4f8SBreno Leitao {
43696c7d4f8SBreno Leitao 	if (ctx->flags & IORING_SETUP_SQE128)
43796c7d4f8SBreno Leitao 		return 2 * sizeof(struct io_uring_sqe);
43896c7d4f8SBreno Leitao 	return sizeof(struct io_uring_sqe);
43996c7d4f8SBreno Leitao }
44095041b93SJens Axboe 
44195041b93SJens Axboe static inline bool io_file_can_poll(struct io_kiocb *req)
44295041b93SJens Axboe {
44395041b93SJens Axboe 	if (req->flags & REQ_F_CAN_POLL)
44495041b93SJens Axboe 		return true;
4455fc16fa5SJens Axboe 	if (req->file && file_can_poll(req->file)) {
44695041b93SJens Axboe 		req->flags |= REQ_F_CAN_POLL;
44795041b93SJens Axboe 		return true;
44895041b93SJens Axboe 	}
44995041b93SJens Axboe 	return false;
45095041b93SJens Axboe }
451428f1382SJens Axboe 
4522b8e976bSPavel Begunkov static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
4532b8e976bSPavel Begunkov {
4542b8e976bSPavel Begunkov 	if (ctx->clockid == CLOCK_MONOTONIC)
4552b8e976bSPavel Begunkov 		return ktime_get();
4562b8e976bSPavel Begunkov 
4572b8e976bSPavel Begunkov 	return ktime_get_with_offset(ctx->clock_offset);
4582b8e976bSPavel Begunkov }
4592b8e976bSPavel Begunkov 
460428f1382SJens Axboe enum {
461428f1382SJens Axboe 	IO_CHECK_CQ_OVERFLOW_BIT,
462428f1382SJens Axboe 	IO_CHECK_CQ_DROPPED_BIT,
463428f1382SJens Axboe };
464428f1382SJens Axboe 
465428f1382SJens Axboe static inline bool io_has_work(struct io_ring_ctx *ctx)
466428f1382SJens Axboe {
467428f1382SJens Axboe 	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
468428f1382SJens Axboe 	       !llist_empty(&ctx->work_llist);
469428f1382SJens Axboe }
470de23077eSJens Axboe #endif
471