1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
8
9 #include <uapi/linux/io_uring.h>
10
11 #include "io_uring.h"
12 #include "alloc_cache.h"
13 #include "rsrc.h"
14 #include "uring_cmd.h"
15 #include "poll.h"
16
io_cmd_cache_free(const void * entry)17 void io_cmd_cache_free(const void *entry)
18 {
19 struct io_async_cmd *ac = (struct io_async_cmd *)entry;
20
21 io_vec_free(&ac->vec);
22 kfree(ac);
23 }
24
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)25 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
26 {
27 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
28 struct io_async_cmd *ac = req->async_data;
29
30 if (issue_flags & IO_URING_F_UNLOCKED)
31 return;
32
33 io_alloc_cache_vec_kasan(&ac->vec);
34 if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
35 io_vec_free(&ac->vec);
36
37 if (io_alloc_cache_put(&req->ctx->cmd_cache, ac)) {
38 ioucmd->sqe = NULL;
39 req->async_data = NULL;
40 req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
41 }
42 }
43
io_uring_cmd_cleanup(struct io_kiocb * req)44 void io_uring_cmd_cleanup(struct io_kiocb *req)
45 {
46 io_req_uring_cleanup(req, 0);
47 }
48
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
50 struct io_uring_task *tctx, bool cancel_all)
51 {
52 struct hlist_node *tmp;
53 struct io_kiocb *req;
54 bool ret = false;
55
56 lockdep_assert_held(&ctx->uring_lock);
57
58 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
59 hash_node) {
60 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
61 struct io_uring_cmd);
62 struct file *file = req->file;
63
64 if (!cancel_all && req->tctx != tctx)
65 continue;
66
67 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
68 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
69 IO_URING_F_COMPLETE_DEFER);
70 ret = true;
71 }
72 }
73 io_submit_flush_completions(ctx);
74 return ret;
75 }
76
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)77 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
78 unsigned int issue_flags)
79 {
80 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
81 struct io_ring_ctx *ctx = req->ctx;
82
83 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
84 return;
85
86 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
87 io_ring_submit_lock(ctx, issue_flags);
88 hlist_del(&req->hash_node);
89 io_ring_submit_unlock(ctx, issue_flags);
90 }
91
92 /*
93 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
94 * will try to cancel this issued command by sending ->uring_cmd() with
95 * issue_flags of IO_URING_F_CANCEL.
96 *
97 * The command is guaranteed to not be done when calling ->uring_cmd()
98 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
99 * with race between io_uring canceling and normal completion.
100 */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)101 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
102 unsigned int issue_flags)
103 {
104 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
105 struct io_ring_ctx *ctx = req->ctx;
106
107 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
108 cmd->flags |= IORING_URING_CMD_CANCELABLE;
109 io_ring_submit_lock(ctx, issue_flags);
110 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
111 io_ring_submit_unlock(ctx, issue_flags);
112 }
113 }
114 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
115
io_uring_cmd_work(struct io_kiocb * req,io_tw_token_t tw)116 static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
117 {
118 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
119 unsigned int flags = IO_URING_F_COMPLETE_DEFER;
120
121 if (io_should_terminate_tw())
122 flags |= IO_URING_F_TASK_DEAD;
123
124 /* task_work executor checks the deffered list completion */
125 ioucmd->task_work_cb(ioucmd, flags);
126 }
127
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)128 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
129 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
130 unsigned flags)
131 {
132 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
133
134 if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
135 return;
136
137 ioucmd->task_work_cb = task_work_cb;
138 req->io_task_work.func = io_uring_cmd_work;
139 __io_req_task_work_add(req, flags);
140 }
141 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
142
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)143 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
144 u64 extra1, u64 extra2)
145 {
146 req->big_cqe.extra1 = extra1;
147 req->big_cqe.extra2 = extra2;
148 }
149
150 /*
151 * Called by consumers of io_uring_cmd, if they originally returned
152 * -EIOCBQUEUED upon receiving the command.
153 */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)154 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
155 unsigned issue_flags)
156 {
157 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
158
159 if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
160 return;
161
162 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
163
164 if (ret < 0)
165 req_set_fail(req);
166
167 io_req_set_res(req, ret, 0);
168 if (req->ctx->flags & IORING_SETUP_CQE32)
169 io_req_set_cqe32_extra(req, res2, 0);
170 io_req_uring_cleanup(req, issue_flags);
171 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
172 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
173 smp_store_release(&req->iopoll_completed, 1);
174 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
175 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
176 return;
177 io_req_complete_defer(req);
178 } else {
179 req->io_task_work.func = io_req_task_complete;
180 io_req_task_work_add(req);
181 }
182 }
183 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
184
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)185 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
186 {
187 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
188 struct io_async_cmd *ac;
189
190 if (sqe->__pad1)
191 return -EINVAL;
192
193 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
194 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
195 return -EINVAL;
196
197 if (ioucmd->flags & IORING_URING_CMD_FIXED)
198 req->buf_index = READ_ONCE(sqe->buf_index);
199
200 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
201
202 ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
203 if (!ac)
204 return -ENOMEM;
205 ioucmd->sqe = sqe;
206 return 0;
207 }
208
io_uring_cmd_sqe_copy(struct io_kiocb * req)209 void io_uring_cmd_sqe_copy(struct io_kiocb *req)
210 {
211 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
212 struct io_async_cmd *ac = req->async_data;
213
214 /* Should not happen, as REQ_F_SQE_COPIED covers this */
215 if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes))
216 return;
217 memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
218 ioucmd->sqe = ac->sqes;
219 }
220
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)221 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
222 {
223 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
224 struct io_ring_ctx *ctx = req->ctx;
225 struct file *file = req->file;
226 int ret;
227
228 if (!file->f_op->uring_cmd)
229 return -EOPNOTSUPP;
230
231 ret = security_uring_cmd(ioucmd);
232 if (ret)
233 return ret;
234
235 if (ctx->flags & IORING_SETUP_SQE128)
236 issue_flags |= IO_URING_F_SQE128;
237 if (ctx->flags & IORING_SETUP_CQE32)
238 issue_flags |= IO_URING_F_CQE32;
239 if (io_is_compat(ctx))
240 issue_flags |= IO_URING_F_COMPAT;
241 if (ctx->flags & IORING_SETUP_IOPOLL) {
242 if (!file->f_op->uring_cmd_iopoll)
243 return -EOPNOTSUPP;
244 issue_flags |= IO_URING_F_IOPOLL;
245 req->iopoll_completed = 0;
246 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
247 /* make sure every req only blocks once */
248 req->flags &= ~REQ_F_IOPOLL_STATE;
249 req->iopoll_start = ktime_get_ns();
250 }
251 }
252
253 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
254 if (ret == -EAGAIN) {
255 ioucmd->flags |= IORING_URING_CMD_REISSUE;
256 return ret;
257 }
258 if (ret == -EIOCBQUEUED)
259 return ret;
260 if (ret < 0)
261 req_set_fail(req);
262 io_req_uring_cleanup(req, issue_flags);
263 io_req_set_res(req, ret, 0);
264 return IOU_COMPLETE;
265 }
266
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)267 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
268 struct iov_iter *iter,
269 struct io_uring_cmd *ioucmd,
270 unsigned int issue_flags)
271 {
272 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
273
274 if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
275 return -EINVAL;
276
277 return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
278 }
279 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
280
io_uring_cmd_import_fixed_vec(struct io_uring_cmd * ioucmd,const struct iovec __user * uvec,size_t uvec_segs,int ddir,struct iov_iter * iter,unsigned issue_flags)281 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
282 const struct iovec __user *uvec,
283 size_t uvec_segs,
284 int ddir, struct iov_iter *iter,
285 unsigned issue_flags)
286 {
287 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
288 struct io_async_cmd *ac = req->async_data;
289 int ret;
290
291 if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
292 return -EINVAL;
293
294 ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
295 if (ret)
296 return ret;
297
298 return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
299 issue_flags);
300 }
301 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
302
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)303 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
304 {
305 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
306
307 io_req_queue_iowq(req);
308 }
309
io_cmd_poll_multishot(struct io_uring_cmd * cmd,unsigned int issue_flags,__poll_t mask)310 int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
311 unsigned int issue_flags, __poll_t mask)
312 {
313 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
314 int ret;
315
316 if (likely(req->flags & REQ_F_APOLL_MULTISHOT))
317 return 0;
318
319 req->flags |= REQ_F_APOLL_MULTISHOT;
320 mask &= ~EPOLLONESHOT;
321
322 ret = io_arm_apoll(req, issue_flags, mask);
323 return ret == IO_APOLL_OK ? -EIOCBQUEUED : -ECANCELED;
324 }
325
io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd * cmd,unsigned int issue_flags,struct io_uring_cqe cqe[2])326 bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
327 unsigned int issue_flags,
328 struct io_uring_cqe cqe[2])
329 {
330 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
331
332 if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_MULTISHOT)))
333 return false;
334 return io_req_post_cqe32(req, cqe);
335 }
336