1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "../fs/internal.h"
14
15 #include "io_uring.h"
16 #include "fs.h"
17
18 struct io_rename {
19 struct file *file;
20 int old_dfd;
21 int new_dfd;
22 struct delayed_filename oldpath;
23 struct delayed_filename newpath;
24 int flags;
25 };
26
27 struct io_unlink {
28 struct file *file;
29 int dfd;
30 int flags;
31 struct delayed_filename filename;
32 };
33
34 struct io_mkdir {
35 struct file *file;
36 int dfd;
37 umode_t mode;
38 struct delayed_filename filename;
39 };
40
41 struct io_link {
42 struct file *file;
43 int old_dfd;
44 int new_dfd;
45 struct delayed_filename oldpath;
46 struct delayed_filename newpath;
47 int flags;
48 };
49
io_renameat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
51 {
52 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
53 const char __user *oldf, *newf;
54 int err;
55
56 if (sqe->buf_index || sqe->splice_fd_in)
57 return -EINVAL;
58 if (unlikely(req->flags & REQ_F_FIXED_FILE))
59 return -EBADF;
60
61 ren->old_dfd = READ_ONCE(sqe->fd);
62 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
63 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
64 ren->new_dfd = READ_ONCE(sqe->len);
65 ren->flags = READ_ONCE(sqe->rename_flags);
66
67 err = delayed_getname(&ren->oldpath, oldf);
68 if (unlikely(err))
69 return err;
70
71 err = delayed_getname(&ren->newpath, newf);
72 if (unlikely(err)) {
73 dismiss_delayed_filename(&ren->oldpath);
74 return err;
75 }
76
77 req->flags |= REQ_F_NEED_CLEANUP;
78 req->flags |= REQ_F_FORCE_ASYNC;
79 return 0;
80 }
81
io_renameat(struct io_kiocb * req,unsigned int issue_flags)82 int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
83 {
84 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
85 CLASS(filename_complete_delayed, old)(&ren->oldpath);
86 CLASS(filename_complete_delayed, new)(&ren->newpath);
87 int ret;
88
89 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
90
91 ret = filename_renameat2(ren->old_dfd, old,
92 ren->new_dfd, new, ren->flags);
93
94 req->flags &= ~REQ_F_NEED_CLEANUP;
95 io_req_set_res(req, ret, 0);
96 return IOU_COMPLETE;
97 }
98
io_renameat_cleanup(struct io_kiocb * req)99 void io_renameat_cleanup(struct io_kiocb *req)
100 {
101 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
102
103 dismiss_delayed_filename(&ren->oldpath);
104 dismiss_delayed_filename(&ren->newpath);
105 }
106
io_unlinkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)107 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
108 {
109 struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
110 const char __user *fname;
111 int err;
112
113 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
114 return -EINVAL;
115 if (unlikely(req->flags & REQ_F_FIXED_FILE))
116 return -EBADF;
117
118 un->dfd = READ_ONCE(sqe->fd);
119
120 un->flags = READ_ONCE(sqe->unlink_flags);
121 if (un->flags & ~AT_REMOVEDIR)
122 return -EINVAL;
123
124 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
125 err = delayed_getname(&un->filename, fname);
126 if (unlikely(err))
127 return err;
128
129 req->flags |= REQ_F_NEED_CLEANUP;
130 req->flags |= REQ_F_FORCE_ASYNC;
131 return 0;
132 }
133
io_unlinkat(struct io_kiocb * req,unsigned int issue_flags)134 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
135 {
136 struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
137 CLASS(filename_complete_delayed, name)(&un->filename);
138 int ret;
139
140 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
141
142 if (un->flags & AT_REMOVEDIR)
143 ret = filename_rmdir(un->dfd, name);
144 else
145 ret = filename_unlinkat(un->dfd, name);
146
147 req->flags &= ~REQ_F_NEED_CLEANUP;
148 io_req_set_res(req, ret, 0);
149 return IOU_COMPLETE;
150 }
151
io_unlinkat_cleanup(struct io_kiocb * req)152 void io_unlinkat_cleanup(struct io_kiocb *req)
153 {
154 struct io_unlink *ul = io_kiocb_to_cmd(req, struct io_unlink);
155
156 dismiss_delayed_filename(&ul->filename);
157 }
158
io_mkdirat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)159 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
160 {
161 struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
162 const char __user *fname;
163 int err;
164
165 if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
166 return -EINVAL;
167 if (unlikely(req->flags & REQ_F_FIXED_FILE))
168 return -EBADF;
169
170 mkd->dfd = READ_ONCE(sqe->fd);
171 mkd->mode = READ_ONCE(sqe->len);
172
173 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
174 err = delayed_getname(&mkd->filename, fname);
175 if (unlikely(err))
176 return err;
177
178 req->flags |= REQ_F_NEED_CLEANUP;
179 req->flags |= REQ_F_FORCE_ASYNC;
180 return 0;
181 }
182
io_mkdirat(struct io_kiocb * req,unsigned int issue_flags)183 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
184 {
185 struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
186 CLASS(filename_complete_delayed, name)(&mkd->filename);
187 int ret;
188
189 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
190
191 ret = filename_mkdirat(mkd->dfd, name, mkd->mode);
192
193 req->flags &= ~REQ_F_NEED_CLEANUP;
194 io_req_set_res(req, ret, 0);
195 return IOU_COMPLETE;
196 }
197
io_mkdirat_cleanup(struct io_kiocb * req)198 void io_mkdirat_cleanup(struct io_kiocb *req)
199 {
200 struct io_mkdir *md = io_kiocb_to_cmd(req, struct io_mkdir);
201
202 dismiss_delayed_filename(&md->filename);
203 }
204
io_symlinkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)205 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
206 {
207 struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
208 const char __user *oldpath, *newpath;
209 int err;
210
211 if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
212 return -EINVAL;
213 if (unlikely(req->flags & REQ_F_FIXED_FILE))
214 return -EBADF;
215
216 sl->new_dfd = READ_ONCE(sqe->fd);
217 oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
218 newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
219
220 err = delayed_getname(&sl->oldpath, oldpath);
221 if (unlikely(err))
222 return err;
223
224 err = delayed_getname(&sl->newpath, newpath);
225 if (unlikely(err)) {
226 dismiss_delayed_filename(&sl->oldpath);
227 return err;
228 }
229
230 req->flags |= REQ_F_NEED_CLEANUP;
231 req->flags |= REQ_F_FORCE_ASYNC;
232 return 0;
233 }
234
io_symlinkat(struct io_kiocb * req,unsigned int issue_flags)235 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
236 {
237 struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
238 CLASS(filename_complete_delayed, old)(&sl->oldpath);
239 CLASS(filename_complete_delayed, new)(&sl->newpath);
240 int ret;
241
242 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
243
244 ret = filename_symlinkat(old, sl->new_dfd, new);
245
246 req->flags &= ~REQ_F_NEED_CLEANUP;
247 io_req_set_res(req, ret, 0);
248 return IOU_COMPLETE;
249 }
250
io_linkat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)251 int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
252 {
253 struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
254 const char __user *oldf, *newf;
255 int err;
256
257 if (sqe->buf_index || sqe->splice_fd_in)
258 return -EINVAL;
259 if (unlikely(req->flags & REQ_F_FIXED_FILE))
260 return -EBADF;
261
262 lnk->old_dfd = READ_ONCE(sqe->fd);
263 lnk->new_dfd = READ_ONCE(sqe->len);
264 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
265 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
266 lnk->flags = READ_ONCE(sqe->hardlink_flags);
267
268 err = delayed_getname_uflags(&lnk->oldpath, oldf, lnk->flags);
269 if (unlikely(err))
270 return err;
271
272 err = delayed_getname(&lnk->newpath, newf);
273 if (unlikely(err)) {
274 dismiss_delayed_filename(&lnk->oldpath);
275 return err;
276 }
277
278 req->flags |= REQ_F_NEED_CLEANUP;
279 req->flags |= REQ_F_FORCE_ASYNC;
280 return 0;
281 }
282
io_linkat(struct io_kiocb * req,unsigned int issue_flags)283 int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
284 {
285 struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
286 CLASS(filename_complete_delayed, old)(&lnk->oldpath);
287 CLASS(filename_complete_delayed, new)(&lnk->newpath);
288 int ret;
289
290 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
291
292 ret = filename_linkat(lnk->old_dfd, old, lnk->new_dfd, new, lnk->flags);
293
294 req->flags &= ~REQ_F_NEED_CLEANUP;
295 io_req_set_res(req, ret, 0);
296 return IOU_COMPLETE;
297 }
298
io_link_cleanup(struct io_kiocb * req)299 void io_link_cleanup(struct io_kiocb *req)
300 {
301 struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
302
303 dismiss_delayed_filename(&sl->oldpath);
304 dismiss_delayed_filename(&sl->newpath);
305 }
306