1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8
9 #include <uapi/linux/io_uring.h>
10
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "filetable.h"
14 #include "msg_ring.h"
15
16 /* All valid masks for MSG_RING */
17 #define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
18 IORING_MSG_RING_FLAGS_PASS)
19
20 struct io_msg {
21 struct file *file;
22 struct file *src_file;
23 struct callback_head tw;
24 u64 user_data;
25 u32 len;
26 u32 cmd;
27 u32 src_fd;
28 union {
29 u32 dst_fd;
30 u32 cqe_flags;
31 };
32 u32 flags;
33 };
34
io_double_unlock_ctx(struct io_ring_ctx * octx)35 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
36 {
37 mutex_unlock(&octx->uring_lock);
38 }
39
io_lock_external_ctx(struct io_ring_ctx * octx,unsigned int issue_flags)40 static int io_lock_external_ctx(struct io_ring_ctx *octx,
41 unsigned int issue_flags)
42 {
43 /*
44 * To ensure proper ordering between the two ctxs, we can only
45 * attempt a trylock on the target. If that fails and we already have
46 * the source ctx lock, punt to io-wq.
47 */
48 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
49 if (!mutex_trylock(&octx->uring_lock))
50 return -EAGAIN;
51 return 0;
52 }
53 mutex_lock(&octx->uring_lock);
54 return 0;
55 }
56
io_msg_ring_cleanup(struct io_kiocb * req)57 void io_msg_ring_cleanup(struct io_kiocb *req)
58 {
59 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
60
61 if (WARN_ON_ONCE(!msg->src_file))
62 return;
63
64 fput(msg->src_file);
65 msg->src_file = NULL;
66 }
67
io_msg_need_remote(struct io_ring_ctx * target_ctx)68 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
69 {
70 return target_ctx->task_complete;
71 }
72
io_msg_tw_complete(struct io_tw_req tw_req,io_tw_token_t tw)73 static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
74 {
75 struct io_kiocb *req = tw_req.req;
76 struct io_ring_ctx *ctx = req->ctx;
77
78 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79 kfree_rcu(req, rcu_head);
80 percpu_ref_put(&ctx->refs);
81 }
82
io_msg_remote_post(struct io_ring_ctx * ctx,struct io_kiocb * req,int res,u32 cflags,u64 user_data)83 static void io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
84 int res, u32 cflags, u64 user_data)
85 {
86 req->opcode = IORING_OP_NOP;
87 req->cqe.user_data = user_data;
88 io_req_set_res(req, res, cflags);
89 percpu_ref_get(&ctx->refs);
90 req->ctx = ctx;
91 req->tctx = NULL;
92 req->io_task_work.func = io_msg_tw_complete;
93 io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
94 }
95
io_msg_data_remote(struct io_ring_ctx * target_ctx,struct io_msg * msg)96 static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
97 struct io_msg *msg)
98 {
99 struct io_kiocb *target;
100 u32 flags = 0;
101
102 target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
103 if (unlikely(!target))
104 return -ENOMEM;
105
106 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
107 flags = msg->cqe_flags;
108
109 io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data);
110 return 0;
111 }
112
__io_msg_ring_data(struct io_ring_ctx * target_ctx,struct io_msg * msg,unsigned int issue_flags)113 static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
114 struct io_msg *msg, unsigned int issue_flags)
115 {
116 u32 flags = 0;
117 int ret;
118
119 if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
120 return -EINVAL;
121 if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
122 return -EINVAL;
123 /*
124 * Keep IORING_SETUP_R_DISABLED check before submitter_task load
125 * in io_msg_data_remote() -> io_req_task_work_add_remote()
126 */
127 if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
128 return -EBADFD;
129
130 if (io_msg_need_remote(target_ctx))
131 return io_msg_data_remote(target_ctx, msg);
132
133 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
134 flags = msg->cqe_flags;
135
136 ret = -EOVERFLOW;
137 if (target_ctx->flags & IORING_SETUP_IOPOLL) {
138 if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
139 return -EAGAIN;
140 }
141 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
142 ret = 0;
143 if (target_ctx->flags & IORING_SETUP_IOPOLL)
144 io_double_unlock_ctx(target_ctx);
145 return ret;
146 }
147
io_msg_ring_data(struct io_kiocb * req,unsigned int issue_flags)148 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
149 {
150 struct io_ring_ctx *target_ctx = req->file->private_data;
151 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
152
153 return __io_msg_ring_data(target_ctx, msg, issue_flags);
154 }
155
io_msg_grab_file(struct io_kiocb * req,unsigned int issue_flags)156 static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
157 {
158 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
159 struct io_ring_ctx *ctx = req->ctx;
160 struct io_rsrc_node *node;
161 int ret = -EBADF;
162
163 io_ring_submit_lock(ctx, issue_flags);
164 node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
165 if (node) {
166 msg->src_file = io_slot_file(node);
167 if (msg->src_file)
168 get_file(msg->src_file);
169 req->flags |= REQ_F_NEED_CLEANUP;
170 ret = 0;
171 }
172 io_ring_submit_unlock(ctx, issue_flags);
173 return ret;
174 }
175
io_msg_install_complete(struct io_kiocb * req,unsigned int issue_flags)176 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
177 {
178 struct io_ring_ctx *target_ctx = req->file->private_data;
179 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
180 struct file *src_file = msg->src_file;
181 int ret;
182
183 if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
184 return -EAGAIN;
185
186 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
187 if (ret < 0)
188 goto out_unlock;
189
190 msg->src_file = NULL;
191 req->flags &= ~REQ_F_NEED_CLEANUP;
192
193 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
194 goto out_unlock;
195 /*
196 * If this fails, the target still received the file descriptor but
197 * wasn't notified of the fact. This means that if this request
198 * completes with -EOVERFLOW, then the sender must ensure that a
199 * later IORING_OP_MSG_RING delivers the message.
200 */
201 if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
202 ret = -EOVERFLOW;
203 out_unlock:
204 io_double_unlock_ctx(target_ctx);
205 return ret;
206 }
207
io_msg_tw_fd_complete(struct callback_head * head)208 static void io_msg_tw_fd_complete(struct callback_head *head)
209 {
210 struct io_msg *msg = container_of(head, struct io_msg, tw);
211 struct io_kiocb *req = cmd_to_io_kiocb(msg);
212 int ret = -EOWNERDEAD;
213
214 if (!(current->flags & PF_EXITING))
215 ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
216 if (ret < 0)
217 req_set_fail(req);
218 io_req_queue_tw_complete(req, ret);
219 }
220
io_msg_fd_remote(struct io_kiocb * req)221 static int io_msg_fd_remote(struct io_kiocb *req)
222 {
223 struct io_ring_ctx *ctx = req->file->private_data;
224 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
225 struct task_struct *task = ctx->submitter_task;
226
227 init_task_work(&msg->tw, io_msg_tw_fd_complete);
228 if (task_work_add(task, &msg->tw, TWA_SIGNAL))
229 return -EOWNERDEAD;
230
231 return IOU_ISSUE_SKIP_COMPLETE;
232 }
233
io_msg_send_fd(struct io_kiocb * req,unsigned int issue_flags)234 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
235 {
236 struct io_ring_ctx *target_ctx = req->file->private_data;
237 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
238 struct io_ring_ctx *ctx = req->ctx;
239
240 if (msg->len)
241 return -EINVAL;
242 if (target_ctx == ctx)
243 return -EINVAL;
244 /*
245 * Keep IORING_SETUP_R_DISABLED check before submitter_task load
246 * in io_msg_fd_remote()
247 */
248 if (smp_load_acquire(&target_ctx->flags) & IORING_SETUP_R_DISABLED)
249 return -EBADFD;
250 if (!msg->src_file) {
251 int ret = io_msg_grab_file(req, issue_flags);
252 if (unlikely(ret))
253 return ret;
254 }
255
256 if (io_msg_need_remote(target_ctx))
257 return io_msg_fd_remote(req);
258 return io_msg_install_complete(req, issue_flags);
259 }
260
__io_msg_ring_prep(struct io_msg * msg,const struct io_uring_sqe * sqe)261 static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
262 {
263 if (unlikely(sqe->buf_index || sqe->personality))
264 return -EINVAL;
265
266 msg->src_file = NULL;
267 msg->user_data = READ_ONCE(sqe->off);
268 msg->len = READ_ONCE(sqe->len);
269 msg->cmd = READ_ONCE(sqe->addr);
270 msg->src_fd = READ_ONCE(sqe->addr3);
271 msg->dst_fd = READ_ONCE(sqe->file_index);
272 msg->flags = READ_ONCE(sqe->msg_ring_flags);
273 if (msg->flags & ~IORING_MSG_RING_MASK)
274 return -EINVAL;
275
276 return 0;
277 }
278
io_msg_ring_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)279 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
280 {
281 return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
282 }
283
io_msg_ring(struct io_kiocb * req,unsigned int issue_flags)284 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
285 {
286 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
287 int ret;
288
289 ret = -EBADFD;
290 if (!io_is_uring_fops(req->file))
291 goto done;
292
293 switch (msg->cmd) {
294 case IORING_MSG_DATA:
295 ret = io_msg_ring_data(req, issue_flags);
296 break;
297 case IORING_MSG_SEND_FD:
298 ret = io_msg_send_fd(req, issue_flags);
299 break;
300 default:
301 ret = -EINVAL;
302 break;
303 }
304
305 done:
306 if (ret < 0) {
307 if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
308 return ret;
309 req_set_fail(req);
310 }
311 io_req_set_res(req, ret, 0);
312 return IOU_COMPLETE;
313 }
314
io_uring_sync_msg_ring(struct io_uring_sqe * sqe)315 int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
316 {
317 struct io_msg io_msg = { };
318 int ret;
319
320 ret = __io_msg_ring_prep(&io_msg, sqe);
321 if (unlikely(ret))
322 return ret;
323
324 /*
325 * Only data sending supported, not IORING_MSG_SEND_FD as that one
326 * doesn't make sense without a source ring to send files from.
327 */
328 if (io_msg.cmd != IORING_MSG_DATA)
329 return -EINVAL;
330
331 CLASS(fd, f)(sqe->fd);
332 if (fd_empty(f))
333 return -EBADF;
334 if (!io_is_uring_fops(fd_file(f)))
335 return -EBADFD;
336 return __io_msg_ring_data(fd_file(f)->private_data,
337 &io_msg, IO_URING_F_UNLOCKED);
338 }
339