1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_RSRC_H
3 #define IOU_RSRC_H
4
5 #include <linux/io_uring_types.h>
6 #include <linux/lockdep.h>
7
8 #define IO_VEC_CACHE_SOFT_CAP 256
9
10 enum {
11 IORING_RSRC_FILE = 0,
12 IORING_RSRC_BUFFER = 1,
13 };
14
15 struct io_rsrc_node {
16 unsigned char type;
17 int refs;
18
19 u64 tag;
20 union {
21 unsigned long file_ptr;
22 struct io_mapped_ubuf *buf;
23 };
24 };
25
26 enum {
27 IO_IMU_DEST = 1 << ITER_DEST,
28 IO_IMU_SOURCE = 1 << ITER_SOURCE,
29 };
30
31 struct io_mapped_ubuf {
32 u64 ubuf;
33 unsigned int len;
34 unsigned int nr_bvecs;
35 unsigned int folio_shift;
36 refcount_t refs;
37 unsigned long acct_pages;
38 void (*release)(void *);
39 void *priv;
40 bool is_kbuf;
41 u8 dir;
42 struct bio_vec bvec[] __counted_by(nr_bvecs);
43 };
44
45 struct io_imu_folio_data {
46 /* Head folio can be partially included in the fixed buf */
47 unsigned int nr_pages_head;
48 /* For non-head/tail folios, has to be fully included */
49 unsigned int nr_pages_mid;
50 unsigned int folio_shift;
51 unsigned int nr_folios;
52 };
53
54 bool io_rsrc_cache_init(struct io_ring_ctx *ctx);
55 void io_rsrc_cache_free(struct io_ring_ctx *ctx);
56 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
57 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
58 void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
59 int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
60
61 struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
62 unsigned issue_flags);
63 int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
64 u64 buf_addr, size_t len, int ddir,
65 unsigned issue_flags);
66 int io_import_reg_vec(int ddir, struct iov_iter *iter,
67 struct io_kiocb *req, struct iou_vec *vec,
68 unsigned nr_iovs, unsigned issue_flags);
69 int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
70 const struct iovec __user *uvec, size_t uvec_segs);
71
72 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
73 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
74 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
75 unsigned int nr_args, u64 __user *tags);
76 int io_sqe_files_unregister(struct io_ring_ctx *ctx);
77 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
78 unsigned nr_args, u64 __user *tags);
79
80 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
81 unsigned nr_args);
82 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
83 unsigned size, unsigned type);
84 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
85 unsigned int size, unsigned int type);
86 int io_buffer_validate(struct iovec *iov);
87
88 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
89 struct io_imu_folio_data *data);
90
io_rsrc_node_lookup(struct io_rsrc_data * data,int index)91 static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
92 int index)
93 {
94 if (index < data->nr)
95 return data->nodes[array_index_nospec(index, data->nr)];
96 return NULL;
97 }
98
io_put_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_node * node)99 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
100 {
101 lockdep_assert_held(&ctx->uring_lock);
102 if (!--node->refs)
103 io_free_rsrc_node(ctx, node);
104 }
105
io_reset_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_data * data,int index)106 static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx,
107 struct io_rsrc_data *data, int index)
108 {
109 struct io_rsrc_node *node = data->nodes[index];
110
111 if (!node)
112 return false;
113 io_put_rsrc_node(ctx, node);
114 data->nodes[index] = NULL;
115 return true;
116 }
117
io_req_put_rsrc_nodes(struct io_kiocb * req)118 static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
119 {
120 if (req->file_node) {
121 io_put_rsrc_node(req->ctx, req->file_node);
122 req->file_node = NULL;
123 }
124 if (req->flags & REQ_F_BUF_NODE) {
125 io_put_rsrc_node(req->ctx, req->buf_node);
126 req->buf_node = NULL;
127 }
128 }
129
io_req_assign_rsrc_node(struct io_rsrc_node ** dst_node,struct io_rsrc_node * node)130 static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
131 struct io_rsrc_node *node)
132 {
133 node->refs++;
134 *dst_node = node;
135 }
136
io_req_assign_buf_node(struct io_kiocb * req,struct io_rsrc_node * node)137 static inline void io_req_assign_buf_node(struct io_kiocb *req,
138 struct io_rsrc_node *node)
139 {
140 io_req_assign_rsrc_node(&req->buf_node, node);
141 req->flags |= REQ_F_BUF_NODE;
142 }
143
144 int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
145 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
146
147 int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
148
__io_unaccount_mem(struct user_struct * user,unsigned long nr_pages)149 static inline void __io_unaccount_mem(struct user_struct *user,
150 unsigned long nr_pages)
151 {
152 atomic_long_sub(nr_pages, &user->locked_vm);
153 }
154
155 void io_vec_free(struct iou_vec *iv);
156 int io_vec_realloc(struct iou_vec *iv, unsigned nr_entries);
157
io_vec_reset_iovec(struct iou_vec * iv,struct iovec * iovec,unsigned nr)158 static inline void io_vec_reset_iovec(struct iou_vec *iv,
159 struct iovec *iovec, unsigned nr)
160 {
161 io_vec_free(iv);
162 iv->iovec = iovec;
163 iv->nr = nr;
164 }
165
io_alloc_cache_vec_kasan(struct iou_vec * iv)166 static inline void io_alloc_cache_vec_kasan(struct iou_vec *iv)
167 {
168 if (IS_ENABLED(CONFIG_KASAN))
169 io_vec_free(iv);
170 }
171
172 #endif
173