1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
9 #include <net/sock.h>
10 
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
13 
14 #include "io_uring.h"
15 #include "alloc_cache.h"
16 #include "rsrc.h"
17 #include "uring_cmd.h"
18 
io_cmd_cache_free(const void * entry)19 void io_cmd_cache_free(const void *entry)
20 {
21 	struct io_async_cmd *ac = (struct io_async_cmd *)entry;
22 
23 	io_vec_free(&ac->vec);
24 	kfree(ac);
25 }
26 
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)27 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
28 {
29 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
30 	struct io_async_cmd *ac = req->async_data;
31 	struct io_uring_cmd_data *cache = &ac->data;
32 
33 	if (cache->op_data) {
34 		kfree(cache->op_data);
35 		cache->op_data = NULL;
36 	}
37 
38 	if (issue_flags & IO_URING_F_UNLOCKED)
39 		return;
40 
41 	io_alloc_cache_vec_kasan(&ac->vec);
42 	if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
43 		io_vec_free(&ac->vec);
44 
45 	if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
46 		ioucmd->sqe = NULL;
47 		req->async_data = NULL;
48 		req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
49 	}
50 }
51 
io_uring_cmd_cleanup(struct io_kiocb * req)52 void io_uring_cmd_cleanup(struct io_kiocb *req)
53 {
54 	io_req_uring_cleanup(req, 0);
55 }
56 
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)57 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
58 				   struct io_uring_task *tctx, bool cancel_all)
59 {
60 	struct hlist_node *tmp;
61 	struct io_kiocb *req;
62 	bool ret = false;
63 
64 	lockdep_assert_held(&ctx->uring_lock);
65 
66 	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
67 			hash_node) {
68 		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
69 				struct io_uring_cmd);
70 		struct file *file = req->file;
71 
72 		if (!cancel_all && req->tctx != tctx)
73 			continue;
74 
75 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
76 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
77 						   IO_URING_F_COMPLETE_DEFER);
78 			ret = true;
79 		}
80 	}
81 	io_submit_flush_completions(ctx);
82 	return ret;
83 }
84 
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)85 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
86 		unsigned int issue_flags)
87 {
88 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
89 	struct io_ring_ctx *ctx = req->ctx;
90 
91 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
92 		return;
93 
94 	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
95 	io_ring_submit_lock(ctx, issue_flags);
96 	hlist_del(&req->hash_node);
97 	io_ring_submit_unlock(ctx, issue_flags);
98 }
99 
100 /*
101  * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
102  * will try to cancel this issued command by sending ->uring_cmd() with
103  * issue_flags of IO_URING_F_CANCEL.
104  *
105  * The command is guaranteed to not be done when calling ->uring_cmd()
106  * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
107  * with race between io_uring canceling and normal completion.
108  */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)109 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
110 		unsigned int issue_flags)
111 {
112 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
113 	struct io_ring_ctx *ctx = req->ctx;
114 
115 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
116 		cmd->flags |= IORING_URING_CMD_CANCELABLE;
117 		io_ring_submit_lock(ctx, issue_flags);
118 		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
119 		io_ring_submit_unlock(ctx, issue_flags);
120 	}
121 }
122 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
123 
io_uring_cmd_work(struct io_kiocb * req,io_tw_token_t tw)124 static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
125 {
126 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
127 	unsigned int flags = IO_URING_F_COMPLETE_DEFER;
128 
129 	if (io_should_terminate_tw())
130 		flags |= IO_URING_F_TASK_DEAD;
131 
132 	/* task_work executor checks the deffered list completion */
133 	ioucmd->task_work_cb(ioucmd, flags);
134 }
135 
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)136 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
137 			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
138 			unsigned flags)
139 {
140 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
141 
142 	ioucmd->task_work_cb = task_work_cb;
143 	req->io_task_work.func = io_uring_cmd_work;
144 	__io_req_task_work_add(req, flags);
145 }
146 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
147 
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)148 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
149 					  u64 extra1, u64 extra2)
150 {
151 	req->big_cqe.extra1 = extra1;
152 	req->big_cqe.extra2 = extra2;
153 }
154 
155 /*
156  * Called by consumers of io_uring_cmd, if they originally returned
157  * -EIOCBQUEUED upon receiving the command.
158  */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)159 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
160 		       unsigned issue_flags)
161 {
162 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
163 
164 	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
165 
166 	if (ret < 0)
167 		req_set_fail(req);
168 
169 	io_req_set_res(req, ret, 0);
170 	if (req->ctx->flags & IORING_SETUP_CQE32)
171 		io_req_set_cqe32_extra(req, res2, 0);
172 	io_req_uring_cleanup(req, issue_flags);
173 	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
174 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
175 		smp_store_release(&req->iopoll_completed, 1);
176 	} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
177 		if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
178 			return;
179 		io_req_complete_defer(req);
180 	} else {
181 		req->io_task_work.func = io_req_task_complete;
182 		io_req_task_work_add(req);
183 	}
184 }
185 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
186 
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)187 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
188 				   const struct io_uring_sqe *sqe)
189 {
190 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
191 	struct io_async_cmd *ac;
192 
193 	/* see io_uring_cmd_get_async_data() */
194 	BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
195 
196 	ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
197 	if (!ac)
198 		return -ENOMEM;
199 	ac->data.op_data = NULL;
200 
201 	/*
202 	 * Unconditionally cache the SQE for now - this is only needed for
203 	 * requests that go async, but prep handlers must ensure that any
204 	 * sqe data is stable beyond prep. Since uring_cmd is special in
205 	 * that it doesn't read in per-op data, play it safe and ensure that
206 	 * any SQE data is stable beyond prep. This can later get relaxed.
207 	 */
208 	memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
209 	ioucmd->sqe = ac->sqes;
210 	return 0;
211 }
212 
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)213 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
214 {
215 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
216 
217 	if (sqe->__pad1)
218 		return -EINVAL;
219 
220 	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
221 	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
222 		return -EINVAL;
223 
224 	if (ioucmd->flags & IORING_URING_CMD_FIXED)
225 		req->buf_index = READ_ONCE(sqe->buf_index);
226 
227 	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
228 
229 	return io_uring_cmd_prep_setup(req, sqe);
230 }
231 
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)232 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
233 {
234 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
235 	struct io_ring_ctx *ctx = req->ctx;
236 	struct file *file = req->file;
237 	int ret;
238 
239 	if (!file->f_op->uring_cmd)
240 		return -EOPNOTSUPP;
241 
242 	ret = security_uring_cmd(ioucmd);
243 	if (ret)
244 		return ret;
245 
246 	if (ctx->flags & IORING_SETUP_SQE128)
247 		issue_flags |= IO_URING_F_SQE128;
248 	if (ctx->flags & IORING_SETUP_CQE32)
249 		issue_flags |= IO_URING_F_CQE32;
250 	if (io_is_compat(ctx))
251 		issue_flags |= IO_URING_F_COMPAT;
252 	if (ctx->flags & IORING_SETUP_IOPOLL) {
253 		if (!file->f_op->uring_cmd_iopoll)
254 			return -EOPNOTSUPP;
255 		issue_flags |= IO_URING_F_IOPOLL;
256 		req->iopoll_completed = 0;
257 		if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
258 			/* make sure every req only blocks once */
259 			req->flags &= ~REQ_F_IOPOLL_STATE;
260 			req->iopoll_start = ktime_get_ns();
261 		}
262 	}
263 
264 	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
265 	if (ret == -EAGAIN || ret == -EIOCBQUEUED)
266 		return ret;
267 	if (ret < 0)
268 		req_set_fail(req);
269 	io_req_uring_cleanup(req, issue_flags);
270 	io_req_set_res(req, ret, 0);
271 	return IOU_OK;
272 }
273 
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)274 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
275 			      struct iov_iter *iter,
276 			      struct io_uring_cmd *ioucmd,
277 			      unsigned int issue_flags)
278 {
279 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
280 
281 	return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
282 }
283 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
284 
io_uring_cmd_import_fixed_vec(struct io_uring_cmd * ioucmd,const struct iovec __user * uvec,size_t uvec_segs,int ddir,struct iov_iter * iter,unsigned issue_flags)285 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
286 				  const struct iovec __user *uvec,
287 				  size_t uvec_segs,
288 				  int ddir, struct iov_iter *iter,
289 				  unsigned issue_flags)
290 {
291 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
292 	struct io_async_cmd *ac = req->async_data;
293 	int ret;
294 
295 	ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
296 	if (ret)
297 		return ret;
298 
299 	return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
300 				 issue_flags);
301 }
302 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
303 
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)304 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
305 {
306 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
307 
308 	io_req_queue_iowq(req);
309 }
310 
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)311 static inline int io_uring_cmd_getsockopt(struct socket *sock,
312 					  struct io_uring_cmd *cmd,
313 					  unsigned int issue_flags)
314 {
315 	const struct io_uring_sqe *sqe = cmd->sqe;
316 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
317 	int optlen, optname, level, err;
318 	void __user *optval;
319 
320 	level = READ_ONCE(sqe->level);
321 	if (level != SOL_SOCKET)
322 		return -EOPNOTSUPP;
323 
324 	optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
325 	optname = READ_ONCE(sqe->optname);
326 	optlen = READ_ONCE(sqe->optlen);
327 
328 	err = do_sock_getsockopt(sock, compat, level, optname,
329 				 USER_SOCKPTR(optval),
330 				 KERNEL_SOCKPTR(&optlen));
331 	if (err)
332 		return err;
333 
334 	/* On success, return optlen */
335 	return optlen;
336 }
337 
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)338 static inline int io_uring_cmd_setsockopt(struct socket *sock,
339 					  struct io_uring_cmd *cmd,
340 					  unsigned int issue_flags)
341 {
342 	const struct io_uring_sqe *sqe = cmd->sqe;
343 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
344 	int optname, optlen, level;
345 	void __user *optval;
346 	sockptr_t optval_s;
347 
348 	optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
349 	optname = READ_ONCE(sqe->optname);
350 	optlen = READ_ONCE(sqe->optlen);
351 	level = READ_ONCE(sqe->level);
352 	optval_s = USER_SOCKPTR(optval);
353 
354 	return do_sock_setsockopt(sock, compat, level, optname, optval_s,
355 				  optlen);
356 }
357 
358 #if defined(CONFIG_NET)
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)359 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
360 {
361 	struct socket *sock = cmd->file->private_data;
362 	struct sock *sk = sock->sk;
363 	struct proto *prot = READ_ONCE(sk->sk_prot);
364 	int ret, arg = 0;
365 
366 	if (!prot || !prot->ioctl)
367 		return -EOPNOTSUPP;
368 
369 	switch (cmd->cmd_op) {
370 	case SOCKET_URING_OP_SIOCINQ:
371 		ret = prot->ioctl(sk, SIOCINQ, &arg);
372 		if (ret)
373 			return ret;
374 		return arg;
375 	case SOCKET_URING_OP_SIOCOUTQ:
376 		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
377 		if (ret)
378 			return ret;
379 		return arg;
380 	case SOCKET_URING_OP_GETSOCKOPT:
381 		return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
382 	case SOCKET_URING_OP_SETSOCKOPT:
383 		return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
384 	default:
385 		return -EOPNOTSUPP;
386 	}
387 }
388 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
389 #endif
390