xref: /linux/io_uring/kbuf.c (revision c612261bedd6bbab7109f798715e449c9d20ff2f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19 
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22 
23 /* Mapped buffer ring, return io_uring_buf from head */
24 #define io_ring_head_to_buf(br, head, mask)	&(br)->bufs[(head) & (mask)]
25 
26 struct io_provide_buf {
27 	struct file			*file;
28 	__u64				addr;
29 	__u32				len;
30 	__u32				bgid;
31 	__u32				nbufs;
32 	__u16				bid;
33 };
34 
io_kbuf_inc_commit(struct io_buffer_list * bl,int len)35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36 {
37 	/* No data consumed, return false early to avoid consuming the buffer */
38 	if (!len)
39 		return false;
40 
41 	while (len) {
42 		struct io_uring_buf *buf;
43 		u32 buf_len, this_len;
44 
45 		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
46 		buf_len = READ_ONCE(buf->len);
47 		this_len = min_t(u32, len, buf_len);
48 		buf_len -= this_len;
49 		/* Stop looping for invalid buffer length of 0 */
50 		if (buf_len || !this_len) {
51 			WRITE_ONCE(buf->addr, READ_ONCE(buf->addr) + this_len);
52 			WRITE_ONCE(buf->len, buf_len);
53 			return false;
54 		}
55 		WRITE_ONCE(buf->len, 0);
56 		bl->head++;
57 		len -= this_len;
58 	}
59 	return true;
60 }
61 
io_kbuf_commit(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)62 bool io_kbuf_commit(struct io_kiocb *req,
63 		    struct io_buffer_list *bl, int len, int nr)
64 {
65 	if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
66 		return true;
67 
68 	req->flags &= ~REQ_F_BUFFERS_COMMIT;
69 
70 	if (unlikely(len < 0))
71 		return true;
72 	if (bl->flags & IOBL_INC)
73 		return io_kbuf_inc_commit(bl, len);
74 	bl->head += nr;
75 	return true;
76 }
77 
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)78 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
79 							unsigned int bgid)
80 {
81 	lockdep_assert_held(&ctx->uring_lock);
82 
83 	return xa_load(&ctx->io_bl_xa, bgid);
84 }
85 
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)86 static int io_buffer_add_list(struct io_ring_ctx *ctx,
87 			      struct io_buffer_list *bl, unsigned int bgid)
88 {
89 	/*
90 	 * Store buffer group ID and finally mark the list as visible.
91 	 * The normal lookup doesn't care about the visibility as we're
92 	 * always under the ->uring_lock, but lookups from mmap do.
93 	 */
94 	bl->bgid = bgid;
95 	guard(mutex)(&ctx->mmap_lock);
96 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
97 }
98 
io_kbuf_drop_legacy(struct io_kiocb * req)99 void io_kbuf_drop_legacy(struct io_kiocb *req)
100 {
101 	if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
102 		return;
103 	req->flags &= ~REQ_F_BUFFER_SELECTED;
104 	kfree(req->kbuf);
105 	req->kbuf = NULL;
106 }
107 
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)108 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
109 {
110 	struct io_ring_ctx *ctx = req->ctx;
111 	struct io_buffer_list *bl;
112 	struct io_buffer *buf;
113 
114 	io_ring_submit_lock(ctx, issue_flags);
115 
116 	buf = req->kbuf;
117 	bl = io_buffer_get_list(ctx, buf->bgid);
118 	/*
119 	 * If the buffer list was upgraded to a ring-based one, or removed,
120 	 * while the request was in-flight in io-wq, drop it.
121 	 */
122 	if (bl && !(bl->flags & IOBL_BUF_RING)) {
123 		list_add(&buf->list, &bl->buf_list);
124 		bl->nbufs++;
125 	} else {
126 		kfree(buf);
127 	}
128 	req->flags &= ~REQ_F_BUFFER_SELECTED;
129 	req->kbuf = NULL;
130 
131 	io_ring_submit_unlock(ctx, issue_flags);
132 	return true;
133 }
134 
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)135 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
136 					      struct io_buffer_list *bl)
137 {
138 	if (!list_empty(&bl->buf_list)) {
139 		struct io_buffer *kbuf;
140 
141 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
142 		list_del(&kbuf->list);
143 		bl->nbufs--;
144 		if (*len == 0 || *len > kbuf->len)
145 			*len = kbuf->len;
146 		if (list_empty(&bl->buf_list))
147 			req->flags |= REQ_F_BL_EMPTY;
148 		req->flags |= REQ_F_BUFFER_SELECTED;
149 		req->kbuf = kbuf;
150 		req->buf_index = kbuf->bid;
151 		return u64_to_user_ptr(kbuf->addr);
152 	}
153 	return NULL;
154 }
155 
io_provided_buffers_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,struct iovec * iov)156 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
157 				      struct io_buffer_list *bl,
158 				      struct iovec *iov)
159 {
160 	void __user *buf;
161 
162 	buf = io_provided_buffer_select(req, len, bl);
163 	if (unlikely(!buf))
164 		return -ENOBUFS;
165 
166 	iov[0].iov_base = buf;
167 	iov[0].iov_len = *len;
168 	return 1;
169 }
170 
io_should_commit(struct io_kiocb * req,unsigned int issue_flags)171 static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
172 {
173 	/*
174 	* If we came in unlocked, we have no choice but to consume the
175 	* buffer here, otherwise nothing ensures that the buffer won't
176 	* get used by others. This does mean it'll be pinned until the
177 	* IO completes, coming in unlocked means we're being called from
178 	* io-wq context and there may be further retries in async hybrid
179 	* mode. For the locked case, the caller must call commit when
180 	* the transfer completes (or if we get -EAGAIN and must poll of
181 	* retry).
182 	*/
183 	if (issue_flags & IO_URING_F_UNLOCKED)
184 		return true;
185 
186 	/* uring_cmd commits kbuf upfront, no need to auto-commit */
187 	if (!io_file_can_poll(req) && !io_is_uring_cmd(req))
188 		return true;
189 	return false;
190 }
191 
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)192 static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
193 					      struct io_buffer_list *bl,
194 					      unsigned int issue_flags)
195 {
196 	struct io_uring_buf_ring *br = bl->buf_ring;
197 	__u16 tail, head = bl->head;
198 	struct io_br_sel sel = { };
199 	struct io_uring_buf *buf;
200 	u32 buf_len;
201 
202 	tail = smp_load_acquire(&br->tail);
203 	if (unlikely(tail == head))
204 		return sel;
205 
206 	if (head + 1 == tail)
207 		req->flags |= REQ_F_BL_EMPTY;
208 
209 	buf = io_ring_head_to_buf(br, head, bl->mask);
210 	buf_len = READ_ONCE(buf->len);
211 	if (*len == 0 || *len > buf_len)
212 		*len = buf_len;
213 	req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
214 	req->buf_index = READ_ONCE(buf->bid);
215 	sel.buf_list = bl;
216 	sel.addr = u64_to_user_ptr(READ_ONCE(buf->addr));
217 
218 	if (io_should_commit(req, issue_flags)) {
219 		if (!io_kbuf_commit(req, sel.buf_list, *len, 1))
220 			req->flags |= REQ_F_BUF_MORE;
221 		sel.buf_list = NULL;
222 	}
223 	return sel;
224 }
225 
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned buf_group,unsigned int issue_flags)226 struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
227 				  unsigned buf_group, unsigned int issue_flags)
228 {
229 	struct io_ring_ctx *ctx = req->ctx;
230 	struct io_br_sel sel = { };
231 	struct io_buffer_list *bl;
232 
233 	io_ring_submit_lock(req->ctx, issue_flags);
234 
235 	bl = io_buffer_get_list(ctx, buf_group);
236 	if (likely(bl)) {
237 		if (bl->flags & IOBL_BUF_RING)
238 			sel = io_ring_buffer_select(req, len, bl, issue_flags);
239 		else
240 			sel.addr = io_provided_buffer_select(req, len, bl);
241 	}
242 	io_ring_submit_unlock(req->ctx, issue_flags);
243 	return sel;
244 }
245 
246 /* cap it at a reasonable 256, will be one page even for 4K */
247 #define PEEK_MAX_IMPORT		256
248 
io_ring_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_buffer_list * bl)249 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
250 				struct io_buffer_list *bl)
251 {
252 	struct io_uring_buf_ring *br = bl->buf_ring;
253 	struct iovec *iov = arg->iovs;
254 	int nr_iovs = arg->nr_iovs;
255 	__u16 nr_avail, tail, head;
256 	struct io_uring_buf *buf;
257 
258 	tail = smp_load_acquire(&br->tail);
259 	head = bl->head;
260 	nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
261 	if (unlikely(!nr_avail))
262 		return -ENOBUFS;
263 
264 	buf = io_ring_head_to_buf(br, head, bl->mask);
265 	if (arg->max_len) {
266 		u32 len = READ_ONCE(buf->len);
267 		size_t needed;
268 
269 		if (unlikely(!len))
270 			return -ENOBUFS;
271 		needed = (arg->max_len + len - 1) / len;
272 		needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
273 		if (nr_avail > needed)
274 			nr_avail = needed;
275 	}
276 
277 	/*
278 	 * only alloc a bigger array if we know we have data to map, eg not
279 	 * a speculative peek operation.
280 	 */
281 	if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
282 		iov = kmalloc_objs(struct iovec, nr_avail);
283 		if (unlikely(!iov))
284 			return -ENOMEM;
285 		if (arg->mode & KBUF_MODE_FREE)
286 			kfree(arg->iovs);
287 		arg->iovs = iov;
288 		nr_iovs = nr_avail;
289 	} else if (nr_avail < nr_iovs) {
290 		nr_iovs = nr_avail;
291 	}
292 
293 	/* set it to max, if not set, so we can use it unconditionally */
294 	if (!arg->max_len)
295 		arg->max_len = INT_MAX;
296 
297 	req->buf_index = READ_ONCE(buf->bid);
298 	do {
299 		u32 len = READ_ONCE(buf->len);
300 
301 		/* truncate end piece, if needed, for non partial buffers */
302 		if (len > arg->max_len) {
303 			len = arg->max_len;
304 			if (!(bl->flags & IOBL_INC)) {
305 				arg->partial_map = 1;
306 				if (iov != arg->iovs)
307 					break;
308 				WRITE_ONCE(buf->len, len);
309 			}
310 		}
311 
312 		iov->iov_base = u64_to_user_ptr(READ_ONCE(buf->addr));
313 		iov->iov_len = len;
314 		iov++;
315 
316 		arg->out_len += len;
317 		arg->max_len -= len;
318 		if (!arg->max_len)
319 			break;
320 
321 		buf = io_ring_head_to_buf(br, ++head, bl->mask);
322 	} while (--nr_iovs);
323 
324 	if (head == tail)
325 		req->flags |= REQ_F_BL_EMPTY;
326 
327 	req->flags |= REQ_F_BUFFER_RING;
328 	return iov - arg->iovs;
329 }
330 
io_buffers_select(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_br_sel * sel,unsigned int issue_flags)331 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
332 		      struct io_br_sel *sel, unsigned int issue_flags)
333 {
334 	struct io_ring_ctx *ctx = req->ctx;
335 	int ret = -ENOENT;
336 
337 	io_ring_submit_lock(ctx, issue_flags);
338 	sel->buf_list = io_buffer_get_list(ctx, arg->buf_group);
339 	if (unlikely(!sel->buf_list))
340 		goto out_unlock;
341 
342 	if (sel->buf_list->flags & IOBL_BUF_RING) {
343 		ret = io_ring_buffers_peek(req, arg, sel->buf_list);
344 		/*
345 		 * Don't recycle these buffers if we need to go through poll.
346 		 * Nobody else can use them anyway, and holding on to provided
347 		 * buffers for a send/write operation would happen on the app
348 		 * side anyway with normal buffers. Besides, we already
349 		 * committed them, they cannot be put back in the queue.
350 		 */
351 		if (ret > 0) {
352 			req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
353 			if (!io_kbuf_commit(req, sel->buf_list, arg->out_len, ret))
354 				req->flags |= REQ_F_BUF_MORE;
355 		}
356 	} else {
357 		ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
358 	}
359 out_unlock:
360 	if (issue_flags & IO_URING_F_UNLOCKED) {
361 		sel->buf_list = NULL;
362 		mutex_unlock(&ctx->uring_lock);
363 	}
364 	return ret;
365 }
366 
io_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_br_sel * sel)367 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
368 		    struct io_br_sel *sel)
369 {
370 	struct io_ring_ctx *ctx = req->ctx;
371 	struct io_buffer_list *bl;
372 	int ret;
373 
374 	lockdep_assert_held(&ctx->uring_lock);
375 
376 	bl = io_buffer_get_list(ctx, arg->buf_group);
377 	if (unlikely(!bl))
378 		return -ENOENT;
379 
380 	if (bl->flags & IOBL_BUF_RING) {
381 		ret = io_ring_buffers_peek(req, arg, bl);
382 		if (ret > 0)
383 			req->flags |= REQ_F_BUFFERS_COMMIT;
384 		sel->buf_list = bl;
385 		return ret;
386 	}
387 
388 	/* don't support multiple buffer selections for legacy */
389 	sel->buf_list = NULL;
390 	return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
391 }
392 
__io_put_kbuf_ring(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)393 static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
394 				      struct io_buffer_list *bl, int len, int nr)
395 {
396 	bool ret = true;
397 
398 	if (bl)
399 		ret = io_kbuf_commit(req, bl, len, nr);
400 	if (ret && (req->flags & REQ_F_BUF_MORE))
401 		ret = false;
402 
403 	req->flags &= ~(REQ_F_BUFFER_RING | REQ_F_BUF_MORE);
404 	return ret;
405 }
406 
__io_put_kbufs(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nbufs)407 unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
408 			    int len, int nbufs)
409 {
410 	unsigned int ret;
411 
412 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
413 
414 	if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
415 		io_kbuf_drop_legacy(req);
416 		return ret;
417 	}
418 
419 	if (!__io_put_kbuf_ring(req, bl, len, nbufs))
420 		ret |= IORING_CQE_F_BUF_MORE;
421 	return ret;
422 }
423 
io_remove_buffers_legacy(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned long nbufs)424 static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
425 				    struct io_buffer_list *bl,
426 				    unsigned long nbufs)
427 {
428 	unsigned long i = 0;
429 	struct io_buffer *nxt;
430 
431 	/* protects io_buffers_cache */
432 	lockdep_assert_held(&ctx->uring_lock);
433 	WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
434 
435 	for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
436 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
437 		list_del(&nxt->list);
438 		bl->nbufs--;
439 		kfree(nxt);
440 		cond_resched();
441 	}
442 	return i;
443 }
444 
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)445 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
446 {
447 	if (bl->flags & IOBL_BUF_RING)
448 		io_free_region(ctx->user, &bl->region);
449 	else
450 		io_remove_buffers_legacy(ctx, bl, -1U);
451 
452 	kfree(bl);
453 }
454 
io_destroy_buffers(struct io_ring_ctx * ctx)455 void io_destroy_buffers(struct io_ring_ctx *ctx)
456 {
457 	struct io_buffer_list *bl;
458 
459 	while (1) {
460 		unsigned long index = 0;
461 
462 		scoped_guard(mutex, &ctx->mmap_lock) {
463 			bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
464 			if (bl)
465 				xa_erase(&ctx->io_bl_xa, bl->bgid);
466 		}
467 		if (!bl)
468 			break;
469 		io_put_bl(ctx, bl);
470 	}
471 }
472 
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)473 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
474 {
475 	scoped_guard(mutex, &ctx->mmap_lock)
476 		WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
477 	io_put_bl(ctx, bl);
478 }
479 
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)480 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
481 {
482 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
483 	u64 tmp;
484 
485 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
486 	    sqe->splice_fd_in)
487 		return -EINVAL;
488 
489 	tmp = READ_ONCE(sqe->fd);
490 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
491 		return -EINVAL;
492 
493 	memset(p, 0, sizeof(*p));
494 	p->nbufs = tmp;
495 	p->bgid = READ_ONCE(sqe->buf_group);
496 	return 0;
497 }
498 
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)499 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
500 {
501 	unsigned long size, tmp_check;
502 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
503 	u64 tmp;
504 
505 	if (sqe->rw_flags || sqe->splice_fd_in)
506 		return -EINVAL;
507 
508 	tmp = READ_ONCE(sqe->fd);
509 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
510 		return -E2BIG;
511 	p->nbufs = tmp;
512 	p->addr = READ_ONCE(sqe->addr);
513 	p->len = READ_ONCE(sqe->len);
514 	if (!p->len)
515 		return -EINVAL;
516 
517 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
518 				&size))
519 		return -EOVERFLOW;
520 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
521 		return -EOVERFLOW;
522 	if (!access_ok(u64_to_user_ptr(p->addr), size))
523 		return -EFAULT;
524 
525 	p->bgid = READ_ONCE(sqe->buf_group);
526 	tmp = READ_ONCE(sqe->off);
527 	if (tmp > USHRT_MAX)
528 		return -E2BIG;
529 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
530 		return -EINVAL;
531 	p->bid = tmp;
532 	return 0;
533 }
534 
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)535 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
536 			  struct io_buffer_list *bl)
537 {
538 	struct io_buffer *buf;
539 	u64 addr = pbuf->addr;
540 	int ret = -ENOMEM, i, bid = pbuf->bid;
541 
542 	for (i = 0; i < pbuf->nbufs; i++) {
543 		/*
544 		 * Nonsensical to have more than sizeof(bid) buffers in a
545 		 * buffer list, as the application then has no way of knowing
546 		 * which duplicate bid refers to what buffer.
547 		 */
548 		if (bl->nbufs == USHRT_MAX) {
549 			ret = -EOVERFLOW;
550 			break;
551 		}
552 		buf = kmalloc_obj(*buf, GFP_KERNEL_ACCOUNT);
553 		if (!buf)
554 			break;
555 
556 		list_add_tail(&buf->list, &bl->buf_list);
557 		bl->nbufs++;
558 		buf->addr = addr;
559 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
560 		buf->bid = bid;
561 		buf->bgid = pbuf->bgid;
562 		addr += pbuf->len;
563 		bid++;
564 		cond_resched();
565 	}
566 
567 	return i ? 0 : ret;
568 }
569 
__io_manage_buffers_legacy(struct io_kiocb * req,struct io_buffer_list * bl)570 static int __io_manage_buffers_legacy(struct io_kiocb *req,
571 					struct io_buffer_list *bl)
572 {
573 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
574 	int ret;
575 
576 	if (!bl) {
577 		if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
578 			return -ENOENT;
579 		bl = kzalloc_obj(*bl, GFP_KERNEL_ACCOUNT);
580 		if (!bl)
581 			return -ENOMEM;
582 
583 		INIT_LIST_HEAD(&bl->buf_list);
584 		ret = io_buffer_add_list(req->ctx, bl, p->bgid);
585 		if (ret) {
586 			kfree(bl);
587 			return ret;
588 		}
589 	}
590 	/* can't use provide/remove buffers command on mapped buffers */
591 	if (bl->flags & IOBL_BUF_RING)
592 		return -EINVAL;
593 	if (req->opcode == IORING_OP_PROVIDE_BUFFERS)
594 		return io_add_buffers(req->ctx, p, bl);
595 	return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
596 }
597 
io_manage_buffers_legacy(struct io_kiocb * req,unsigned int issue_flags)598 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags)
599 {
600 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
601 	struct io_ring_ctx *ctx = req->ctx;
602 	struct io_buffer_list *bl;
603 	int ret;
604 
605 	io_ring_submit_lock(ctx, issue_flags);
606 	bl = io_buffer_get_list(ctx, p->bgid);
607 	ret = __io_manage_buffers_legacy(req, bl);
608 	io_ring_submit_unlock(ctx, issue_flags);
609 
610 	if (ret < 0)
611 		req_set_fail(req);
612 	io_req_set_res(req, ret, 0);
613 	return IOU_COMPLETE;
614 }
615 
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)616 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
617 {
618 	struct io_uring_buf_reg reg;
619 	struct io_buffer_list *bl;
620 	struct io_uring_region_desc rd;
621 	struct io_uring_buf_ring *br;
622 	unsigned long mmap_offset;
623 	unsigned long ring_size;
624 	int ret;
625 
626 	lockdep_assert_held(&ctx->uring_lock);
627 
628 	if (copy_from_user(&reg, arg, sizeof(reg)))
629 		return -EFAULT;
630 	if (!mem_is_zero(reg.resv, sizeof(reg.resv)))
631 		return -EINVAL;
632 	if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
633 		return -EINVAL;
634 	if (!is_power_of_2(reg.ring_entries))
635 		return -EINVAL;
636 	/* cannot disambiguate full vs empty due to head/tail size */
637 	if (reg.ring_entries >= 65536)
638 		return -EINVAL;
639 
640 	bl = io_buffer_get_list(ctx, reg.bgid);
641 	if (bl) {
642 		/* if mapped buffer ring OR classic exists, don't allow */
643 		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
644 			return -EEXIST;
645 		io_destroy_bl(ctx, bl);
646 	}
647 
648 	bl = kzalloc_obj(*bl, GFP_KERNEL_ACCOUNT);
649 	if (!bl)
650 		return -ENOMEM;
651 
652 	mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
653 	ring_size = flex_array_size(br, bufs, reg.ring_entries);
654 
655 	memset(&rd, 0, sizeof(rd));
656 	rd.size = PAGE_ALIGN(ring_size);
657 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
658 		rd.user_addr = reg.ring_addr;
659 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
660 	}
661 	ret = io_create_region(ctx, &bl->region, &rd, mmap_offset);
662 	if (ret)
663 		goto fail;
664 	br = io_region_get_ptr(&bl->region);
665 
666 #ifdef SHM_COLOUR
667 	/*
668 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
669 	 * is set and we must guarantee that the kernel and user side align
670 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
671 	 * the application mmap's the provided ring buffer. Fail the request
672 	 * if we, by chance, don't end up with aligned addresses. The app
673 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
674 	 * this transparently.
675 	 */
676 	if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
677 	    ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
678 		ret = -EINVAL;
679 		goto fail;
680 	}
681 #endif
682 
683 	bl->nr_entries = reg.ring_entries;
684 	bl->mask = reg.ring_entries - 1;
685 	bl->flags |= IOBL_BUF_RING;
686 	bl->buf_ring = br;
687 	if (reg.flags & IOU_PBUF_RING_INC)
688 		bl->flags |= IOBL_INC;
689 	ret = io_buffer_add_list(ctx, bl, reg.bgid);
690 	if (!ret)
691 		return 0;
692 fail:
693 	io_free_region(ctx->user, &bl->region);
694 	kfree(bl);
695 	return ret;
696 }
697 
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)698 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
699 {
700 	struct io_uring_buf_reg reg;
701 	struct io_buffer_list *bl;
702 
703 	lockdep_assert_held(&ctx->uring_lock);
704 
705 	if (copy_from_user(&reg, arg, sizeof(reg)))
706 		return -EFAULT;
707 	if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags)
708 		return -EINVAL;
709 
710 	bl = io_buffer_get_list(ctx, reg.bgid);
711 	if (!bl)
712 		return -ENOENT;
713 	if (!(bl->flags & IOBL_BUF_RING))
714 		return -EINVAL;
715 
716 	scoped_guard(mutex, &ctx->mmap_lock)
717 		xa_erase(&ctx->io_bl_xa, bl->bgid);
718 
719 	io_put_bl(ctx, bl);
720 	return 0;
721 }
722 
io_register_pbuf_status(struct io_ring_ctx * ctx,void __user * arg)723 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
724 {
725 	struct io_uring_buf_status buf_status;
726 	struct io_buffer_list *bl;
727 
728 	if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
729 		return -EFAULT;
730 	if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv)))
731 		return -EINVAL;
732 
733 	bl = io_buffer_get_list(ctx, buf_status.buf_group);
734 	if (!bl)
735 		return -ENOENT;
736 	if (!(bl->flags & IOBL_BUF_RING))
737 		return -EINVAL;
738 
739 	buf_status.head = bl->head;
740 	if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
741 		return -EFAULT;
742 
743 	return 0;
744 }
745 
io_pbuf_get_region(struct io_ring_ctx * ctx,unsigned int bgid)746 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
747 					    unsigned int bgid)
748 {
749 	struct io_buffer_list *bl;
750 
751 	lockdep_assert_held(&ctx->mmap_lock);
752 
753 	bl = xa_load(&ctx->io_bl_xa, bgid);
754 	if (!bl || !(bl->flags & IOBL_BUF_RING))
755 		return NULL;
756 	return &bl->region;
757 }
758