1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7
8 enum {
9 /* ring mapped provided buffers */
10 IOBL_BUF_RING = 1,
11 /* buffers are consumed incrementally rather than always fully */
12 IOBL_INC = 2,
13 };
14
15 struct io_buffer_list {
16 /*
17 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
18 * then these are classic provided buffers and ->buf_list is used.
19 */
20 union {
21 struct list_head buf_list;
22 struct io_uring_buf_ring *buf_ring;
23 };
24 __u16 bgid;
25
26 /* below is for ring provided buffers */
27 __u16 buf_nr_pages;
28 __u16 nr_entries;
29 __u16 head;
30 __u16 mask;
31
32 __u16 flags;
33
34 struct io_mapped_region region;
35 };
36
37 struct io_buffer {
38 struct list_head list;
39 __u64 addr;
40 __u32 len;
41 __u16 bid;
42 __u16 bgid;
43 };
44
45 enum {
46 /* can alloc a bigger vec */
47 KBUF_MODE_EXPAND = 1,
48 /* if bigger vec allocated, free old one */
49 KBUF_MODE_FREE = 2,
50 };
51
52 struct buf_sel_arg {
53 struct iovec *iovs;
54 size_t out_len;
55 size_t max_len;
56 unsigned short nr_iovs;
57 unsigned short mode;
58 };
59
60 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
61 unsigned int issue_flags);
62 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
63 unsigned int issue_flags);
64 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
65 void io_destroy_buffers(struct io_ring_ctx *ctx);
66
67 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
68 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
69
70 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
71 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
72
73 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
74 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
75 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
76
77 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
78 void io_kbuf_drop_legacy(struct io_kiocb *req);
79
80 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs);
81 bool io_kbuf_commit(struct io_kiocb *req,
82 struct io_buffer_list *bl, int len, int nr);
83
84 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
85 unsigned int bgid);
86
io_kbuf_recycle_ring(struct io_kiocb * req)87 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
88 {
89 /*
90 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
91 * the flag and hence ensure that bl->head doesn't get incremented.
92 * If the tail has already been incremented, hang on to it.
93 * The exception is partial io, that case we should increment bl->head
94 * to monopolize the buffer.
95 */
96 if (req->buf_list) {
97 req->buf_index = req->buf_list->bgid;
98 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
99 return true;
100 }
101 return false;
102 }
103
io_do_buffer_select(struct io_kiocb * req)104 static inline bool io_do_buffer_select(struct io_kiocb *req)
105 {
106 if (!(req->flags & REQ_F_BUFFER_SELECT))
107 return false;
108 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
109 }
110
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)111 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
112 {
113 if (req->flags & REQ_F_BL_NO_RECYCLE)
114 return false;
115 if (req->flags & REQ_F_BUFFER_SELECTED)
116 return io_kbuf_recycle_legacy(req, issue_flags);
117 if (req->flags & REQ_F_BUFFER_RING)
118 return io_kbuf_recycle_ring(req);
119 return false;
120 }
121
io_put_kbuf(struct io_kiocb * req,int len,unsigned issue_flags)122 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
123 unsigned issue_flags)
124 {
125 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
126 return 0;
127 return __io_put_kbufs(req, len, 1);
128 }
129
io_put_kbufs(struct io_kiocb * req,int len,int nbufs,unsigned issue_flags)130 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
131 int nbufs, unsigned issue_flags)
132 {
133 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
134 return 0;
135 return __io_put_kbufs(req, len, nbufs);
136 }
137 #endif
138