1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/sched/signal.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/falloc.h>
19 #include <linux/uio.h>
20 #include <linux/fs.h>
21 #include <linux/filelock.h>
22 #include <linux/splice.h>
23 #include <linux/task_io_accounting_ops.h>
24 #include <linux/iomap.h>
25
fuse_send_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,int opcode,struct fuse_open_out * outargp)26 static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
27 unsigned int open_flags, int opcode,
28 struct fuse_open_out *outargp)
29 {
30 struct fuse_open_in inarg;
31 FUSE_ARGS(args);
32
33 memset(&inarg, 0, sizeof(inarg));
34 inarg.flags = open_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
35 if (!fm->fc->atomic_o_trunc)
36 inarg.flags &= ~O_TRUNC;
37
38 if (fm->fc->handle_killpriv_v2 &&
39 (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) {
40 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
41 }
42
43 args.opcode = opcode;
44 args.nodeid = nodeid;
45 args.in_numargs = 1;
46 args.in_args[0].size = sizeof(inarg);
47 args.in_args[0].value = &inarg;
48 args.out_numargs = 1;
49 args.out_args[0].size = sizeof(*outargp);
50 args.out_args[0].value = outargp;
51
52 return fuse_simple_request(fm, &args);
53 }
54
fuse_file_alloc(struct fuse_mount * fm,bool release)55 struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release)
56 {
57 struct fuse_file *ff;
58
59 ff = kzalloc_obj(struct fuse_file, GFP_KERNEL_ACCOUNT);
60 if (unlikely(!ff))
61 return NULL;
62
63 ff->fm = fm;
64 if (release) {
65 ff->args = kzalloc_obj(*ff->args, GFP_KERNEL_ACCOUNT);
66 if (!ff->args) {
67 kfree(ff);
68 return NULL;
69 }
70 }
71
72 INIT_LIST_HEAD(&ff->write_entry);
73 refcount_set(&ff->count, 1);
74 RB_CLEAR_NODE(&ff->polled_node);
75 init_waitqueue_head(&ff->poll_wait);
76
77 ff->kh = atomic64_inc_return(&fm->fc->khctr);
78
79 return ff;
80 }
81
fuse_file_free(struct fuse_file * ff)82 void fuse_file_free(struct fuse_file *ff)
83 {
84 kfree(ff->args);
85 kfree(ff);
86 }
87
fuse_file_get(struct fuse_file * ff)88 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
89 {
90 refcount_inc(&ff->count);
91 return ff;
92 }
93
fuse_release_end(struct fuse_mount * fm,struct fuse_args * args,int error)94 static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
95 int error)
96 {
97 struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
98
99 iput(ra->inode);
100 kfree(ra);
101 }
102
fuse_file_put(struct fuse_file * ff,bool sync)103 static void fuse_file_put(struct fuse_file *ff, bool sync)
104 {
105 if (refcount_dec_and_test(&ff->count)) {
106 struct fuse_release_args *ra = &ff->args->release_args;
107 struct fuse_args *args = (ra ? &ra->args : NULL);
108
109 if (ra && ra->inode)
110 fuse_file_io_release(ff, ra->inode);
111
112 if (!args) {
113 /* Do nothing when server does not implement 'opendir' */
114 } else if (args->opcode == FUSE_RELEASE && ff->fm->fc->no_open) {
115 fuse_release_end(ff->fm, args, 0);
116 } else if (sync) {
117 fuse_simple_request(ff->fm, args);
118 fuse_release_end(ff->fm, args, 0);
119 } else {
120 args->end = fuse_release_end;
121 if (fuse_simple_background(ff->fm, args,
122 GFP_KERNEL | __GFP_NOFAIL))
123 fuse_release_end(ff->fm, args, -ENOTCONN);
124 }
125 kfree(ff);
126 }
127 }
128
fuse_file_open(struct fuse_mount * fm,u64 nodeid,unsigned int open_flags,bool isdir)129 struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
130 unsigned int open_flags, bool isdir)
131 {
132 struct fuse_conn *fc = fm->fc;
133 struct fuse_file *ff;
134 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
135 bool open = isdir ? !fc->no_opendir : !fc->no_open;
136 bool release = !isdir || open;
137
138 /*
139 * ff->args->release_args still needs to be allocated (so we can hold an
140 * inode reference while there are pending inflight file operations when
141 * ->release() is called, see fuse_prepare_release()) even if
142 * fc->no_open is set else it becomes possible for reclaim to deadlock
143 * if while servicing the readahead request the server triggers reclaim
144 * and reclaim evicts the inode of the file being read ahead.
145 */
146 ff = fuse_file_alloc(fm, release);
147 if (!ff)
148 return ERR_PTR(-ENOMEM);
149
150 ff->fh = 0;
151 /* Default for no-open */
152 ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
153 if (open) {
154 /* Store outarg for fuse_finish_open() */
155 struct fuse_open_out *outargp = &ff->args->open_outarg;
156 int err;
157
158 err = fuse_send_open(fm, nodeid, open_flags, opcode, outargp);
159 if (!err) {
160 ff->fh = outargp->fh;
161 ff->open_flags = outargp->open_flags;
162 } else if (err != -ENOSYS) {
163 fuse_file_free(ff);
164 return ERR_PTR(err);
165 } else {
166 if (isdir) {
167 /* No release needed */
168 kfree(ff->args);
169 ff->args = NULL;
170 fc->no_opendir = 1;
171 } else {
172 fc->no_open = 1;
173 }
174 }
175 }
176
177 if (isdir)
178 ff->open_flags &= ~FOPEN_DIRECT_IO;
179
180 ff->nodeid = nodeid;
181
182 return ff;
183 }
184
fuse_do_open(struct fuse_mount * fm,u64 nodeid,struct file * file,bool isdir)185 int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
186 bool isdir)
187 {
188 struct fuse_file *ff = fuse_file_open(fm, nodeid, file->f_flags, isdir);
189
190 if (!IS_ERR(ff))
191 file->private_data = ff;
192
193 return PTR_ERR_OR_ZERO(ff);
194 }
195 EXPORT_SYMBOL_GPL(fuse_do_open);
196
fuse_link_write_file(struct file * file)197 static void fuse_link_write_file(struct file *file)
198 {
199 struct inode *inode = file_inode(file);
200 struct fuse_inode *fi = get_fuse_inode(inode);
201 struct fuse_file *ff = file->private_data;
202 /*
203 * file may be written through mmap, so chain it onto the
204 * inodes's write_file list
205 */
206 spin_lock(&fi->lock);
207 if (list_empty(&ff->write_entry))
208 list_add(&ff->write_entry, &fi->write_files);
209 spin_unlock(&fi->lock);
210 }
211
fuse_finish_open(struct inode * inode,struct file * file)212 int fuse_finish_open(struct inode *inode, struct file *file)
213 {
214 struct fuse_file *ff = file->private_data;
215 struct fuse_conn *fc = get_fuse_conn(inode);
216 int err;
217
218 err = fuse_file_io_open(file, inode);
219 if (err)
220 return err;
221
222 if (ff->open_flags & FOPEN_STREAM)
223 stream_open(inode, file);
224 else if (ff->open_flags & FOPEN_NONSEEKABLE)
225 nonseekable_open(inode, file);
226
227 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
228 fuse_link_write_file(file);
229
230 return 0;
231 }
232
fuse_truncate_update_attr(struct inode * inode,struct file * file)233 static void fuse_truncate_update_attr(struct inode *inode, struct file *file)
234 {
235 struct fuse_conn *fc = get_fuse_conn(inode);
236 struct fuse_inode *fi = get_fuse_inode(inode);
237
238 spin_lock(&fi->lock);
239 fi->attr_version = atomic64_inc_return(&fc->attr_version);
240 i_size_write(inode, 0);
241 spin_unlock(&fi->lock);
242 file_update_time(file);
243 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
244 }
245
fuse_open(struct inode * inode,struct file * file)246 static int fuse_open(struct inode *inode, struct file *file)
247 {
248 struct fuse_mount *fm = get_fuse_mount(inode);
249 struct fuse_inode *fi = get_fuse_inode(inode);
250 struct fuse_conn *fc = fm->fc;
251 struct fuse_file *ff;
252 int err;
253 bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc;
254 bool is_wb_truncate = is_truncate && fc->writeback_cache;
255 bool dax_truncate = is_truncate && FUSE_IS_DAX(inode);
256
257 if (fuse_is_bad(inode))
258 return -EIO;
259
260 err = generic_file_open(inode, file);
261 if (err)
262 return err;
263
264 if (is_wb_truncate || dax_truncate)
265 inode_lock(inode);
266
267 if (dax_truncate) {
268 filemap_invalidate_lock(inode->i_mapping);
269 err = fuse_dax_break_layouts(inode, 0, -1);
270 if (err)
271 goto out_inode_unlock;
272 }
273
274 if (is_wb_truncate || dax_truncate)
275 fuse_set_nowrite(inode);
276
277 err = fuse_do_open(fm, get_node_id(inode), file, false);
278 if (!err) {
279 ff = file->private_data;
280 err = fuse_finish_open(inode, file);
281 if (err)
282 fuse_sync_release(fi, ff, file->f_flags);
283 else if (is_truncate)
284 fuse_truncate_update_attr(inode, file);
285 }
286
287 if (is_wb_truncate || dax_truncate)
288 fuse_release_nowrite(inode);
289 if (!err) {
290 if (is_truncate)
291 truncate_pagecache(inode, 0);
292 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
293 invalidate_inode_pages2(inode->i_mapping);
294 }
295 if (dax_truncate)
296 filemap_invalidate_unlock(inode->i_mapping);
297 out_inode_unlock:
298 if (is_wb_truncate || dax_truncate)
299 inode_unlock(inode);
300
301 return err;
302 }
303
fuse_prepare_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags,int opcode,bool sync)304 static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
305 unsigned int flags, int opcode, bool sync)
306 {
307 struct fuse_conn *fc = ff->fm->fc;
308 struct fuse_release_args *ra = &ff->args->release_args;
309
310 if (fuse_file_passthrough(ff))
311 fuse_passthrough_release(ff, fuse_inode_backing(fi));
312
313 /* Inode is NULL on error path of fuse_create_open() */
314 if (likely(fi)) {
315 spin_lock(&fi->lock);
316 list_del(&ff->write_entry);
317 spin_unlock(&fi->lock);
318 }
319 spin_lock(&fc->lock);
320 if (!RB_EMPTY_NODE(&ff->polled_node))
321 rb_erase(&ff->polled_node, &fc->polled_files);
322 spin_unlock(&fc->lock);
323
324 wake_up_interruptible_all(&ff->poll_wait);
325
326 if (!ra)
327 return;
328
329 /* ff->args was used for open outarg */
330 memset(ff->args, 0, sizeof(*ff->args));
331 ra->inarg.fh = ff->fh;
332 ra->inarg.flags = flags;
333 ra->args.in_numargs = 1;
334 ra->args.in_args[0].size = sizeof(struct fuse_release_in);
335 ra->args.in_args[0].value = &ra->inarg;
336 ra->args.opcode = opcode;
337 ra->args.nodeid = ff->nodeid;
338 ra->args.force = true;
339 ra->args.nocreds = true;
340
341 /*
342 * Hold inode until release is finished.
343 * From fuse_sync_release() the refcount is 1 and everything's
344 * synchronous, so we are fine with not doing igrab() here.
345 */
346 ra->inode = sync ? NULL : igrab(&fi->inode);
347 }
348
fuse_file_release(struct inode * inode,struct fuse_file * ff,unsigned int open_flags,fl_owner_t id,bool isdir)349 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
350 unsigned int open_flags, fl_owner_t id, bool isdir)
351 {
352 struct fuse_inode *fi = get_fuse_inode(inode);
353 struct fuse_release_args *ra = &ff->args->release_args;
354 int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
355
356 fuse_prepare_release(fi, ff, open_flags, opcode, false);
357
358 if (ra && ff->flock) {
359 ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
360 ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id);
361 }
362
363 /*
364 * Normally this will send the RELEASE request, however if
365 * some asynchronous READ or WRITE requests are outstanding,
366 * the sending will be delayed.
367 *
368 * Make the release synchronous if this is a fuseblk mount,
369 * synchronous RELEASE is allowed (and desirable) in this case
370 * because the server can be trusted not to screw up.
371 *
372 * Always use the asynchronous file put because the current thread
373 * might be the fuse server. This can happen if a process starts some
374 * aio and closes the fd before the aio completes. Since aio takes its
375 * own ref to the file, the IO completion has to drop the ref, which is
376 * how the fuse server can end up closing its clients' files.
377 */
378 fuse_file_put(ff, false);
379 }
380
fuse_release_common(struct file * file,bool isdir)381 void fuse_release_common(struct file *file, bool isdir)
382 {
383 fuse_file_release(file_inode(file), file->private_data, file->f_flags,
384 (fl_owner_t) file, isdir);
385 }
386
fuse_release(struct inode * inode,struct file * file)387 static int fuse_release(struct inode *inode, struct file *file)
388 {
389 struct fuse_conn *fc = get_fuse_conn(inode);
390
391 /*
392 * Dirty pages might remain despite write_inode_now() call from
393 * fuse_flush() due to writes racing with the close.
394 */
395 if (fc->writeback_cache)
396 write_inode_now(inode, 1);
397
398 fuse_release_common(file, false);
399
400 /* return value is ignored by VFS */
401 return 0;
402 }
403
fuse_sync_release(struct fuse_inode * fi,struct fuse_file * ff,unsigned int flags)404 void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
405 unsigned int flags)
406 {
407 WARN_ON(refcount_read(&ff->count) > 1);
408 fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true);
409 fuse_file_put(ff, true);
410 }
411 EXPORT_SYMBOL_GPL(fuse_sync_release);
412
413 /*
414 * Scramble the ID space with XTEA, so that the value of the files_struct
415 * pointer is not exposed to userspace.
416 */
fuse_lock_owner_id(struct fuse_conn * fc,fl_owner_t id)417 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
418 {
419 u32 *k = fc->scramble_key;
420 u64 v = (unsigned long) id;
421 u32 v0 = v;
422 u32 v1 = v >> 32;
423 u32 sum = 0;
424 int i;
425
426 for (i = 0; i < 32; i++) {
427 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
428 sum += 0x9E3779B9;
429 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
430 }
431
432 return (u64) v0 + ((u64) v1 << 32);
433 }
434
435 struct fuse_writepage_args {
436 struct fuse_io_args ia;
437 struct list_head queue_entry;
438 struct inode *inode;
439 struct fuse_sync_bucket *bucket;
440 };
441
442 /*
443 * Wait for all pending writepages on the inode to finish.
444 *
445 * This is currently done by blocking further writes with FUSE_NOWRITE
446 * and waiting for all sent writes to complete.
447 *
448 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
449 * could conflict with truncation.
450 */
fuse_sync_writes(struct inode * inode)451 static void fuse_sync_writes(struct inode *inode)
452 {
453 fuse_set_nowrite(inode);
454 fuse_release_nowrite(inode);
455 }
456
fuse_flush(struct file * file,fl_owner_t id)457 static int fuse_flush(struct file *file, fl_owner_t id)
458 {
459 struct inode *inode = file_inode(file);
460 struct fuse_mount *fm = get_fuse_mount(inode);
461 struct fuse_file *ff = file->private_data;
462 struct fuse_flush_in inarg;
463 FUSE_ARGS(args);
464 int err;
465
466 if (fuse_is_bad(inode))
467 return -EIO;
468
469 if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
470 return 0;
471
472 err = write_inode_now(inode, 1);
473 if (err)
474 return err;
475
476 err = filemap_check_errors(file->f_mapping);
477 if (err)
478 return err;
479
480 err = 0;
481 if (fm->fc->no_flush)
482 goto inval_attr_out;
483
484 memset(&inarg, 0, sizeof(inarg));
485 inarg.fh = ff->fh;
486 inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
487 args.opcode = FUSE_FLUSH;
488 args.nodeid = get_node_id(inode);
489 args.in_numargs = 1;
490 args.in_args[0].size = sizeof(inarg);
491 args.in_args[0].value = &inarg;
492 args.force = true;
493
494 err = fuse_simple_request(fm, &args);
495 if (err == -ENOSYS) {
496 fm->fc->no_flush = 1;
497 err = 0;
498 }
499
500 inval_attr_out:
501 /*
502 * In memory i_blocks is not maintained by fuse, if writeback cache is
503 * enabled, i_blocks from cached attr may not be accurate.
504 */
505 if (!err && fm->fc->writeback_cache)
506 fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
507 return err;
508 }
509
fuse_fsync_common(struct file * file,loff_t start,loff_t end,int datasync,int opcode)510 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
511 int datasync, int opcode)
512 {
513 struct inode *inode = file->f_mapping->host;
514 struct fuse_mount *fm = get_fuse_mount(inode);
515 struct fuse_file *ff = file->private_data;
516 FUSE_ARGS(args);
517 struct fuse_fsync_in inarg;
518
519 memset(&inarg, 0, sizeof(inarg));
520 inarg.fh = ff->fh;
521 inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
522 args.opcode = opcode;
523 args.nodeid = get_node_id(inode);
524 args.in_numargs = 1;
525 args.in_args[0].size = sizeof(inarg);
526 args.in_args[0].value = &inarg;
527 return fuse_simple_request(fm, &args);
528 }
529
fuse_fsync(struct file * file,loff_t start,loff_t end,int datasync)530 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
531 int datasync)
532 {
533 struct inode *inode = file->f_mapping->host;
534 struct fuse_conn *fc = get_fuse_conn(inode);
535 int err;
536
537 if (fuse_is_bad(inode))
538 return -EIO;
539
540 inode_lock(inode);
541
542 /*
543 * Start writeback against all dirty pages of the inode, then
544 * wait for all outstanding writes, before sending the FSYNC
545 * request.
546 */
547 err = file_write_and_wait_range(file, start, end);
548 if (err)
549 goto out;
550
551 fuse_sync_writes(inode);
552
553 /*
554 * Due to implementation of fuse writeback
555 * file_write_and_wait_range() does not catch errors.
556 * We have to do this directly after fuse_sync_writes()
557 */
558 err = file_check_and_advance_wb_err(file);
559 if (err)
560 goto out;
561
562 err = sync_inode_metadata(inode, 1);
563 if (err)
564 goto out;
565
566 if (fc->no_fsync)
567 goto out;
568
569 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
570 if (err == -ENOSYS) {
571 fc->no_fsync = 1;
572 err = 0;
573 }
574 out:
575 inode_unlock(inode);
576
577 return err;
578 }
579
fuse_read_args_fill(struct fuse_io_args * ia,struct file * file,loff_t pos,size_t count,int opcode)580 void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
581 size_t count, int opcode)
582 {
583 struct fuse_file *ff = file->private_data;
584 struct fuse_args *args = &ia->ap.args;
585
586 ia->read.in.fh = ff->fh;
587 ia->read.in.offset = pos;
588 ia->read.in.size = count;
589 ia->read.in.flags = file->f_flags;
590 args->opcode = opcode;
591 args->nodeid = ff->nodeid;
592 args->in_numargs = 1;
593 args->in_args[0].size = sizeof(ia->read.in);
594 args->in_args[0].value = &ia->read.in;
595 args->out_argvar = true;
596 args->out_numargs = 1;
597 args->out_args[0].size = count;
598 }
599
fuse_release_user_pages(struct fuse_args_pages * ap,ssize_t nres,bool should_dirty)600 static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres,
601 bool should_dirty)
602 {
603 unsigned int i;
604
605 for (i = 0; i < ap->num_folios; i++) {
606 if (should_dirty)
607 folio_mark_dirty_lock(ap->folios[i]);
608 if (ap->args.is_pinned)
609 unpin_folio(ap->folios[i]);
610 }
611
612 if (nres > 0 && ap->args.invalidate_vmap)
613 invalidate_kernel_vmap_range(ap->args.vmap_base, nres);
614 }
615
fuse_io_release(struct kref * kref)616 static void fuse_io_release(struct kref *kref)
617 {
618 kfree(container_of(kref, struct fuse_io_priv, refcnt));
619 }
620
fuse_get_res_by_io(struct fuse_io_priv * io)621 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
622 {
623 if (io->err)
624 return io->err;
625
626 if (io->bytes >= 0 && io->write)
627 return -EIO;
628
629 return io->bytes < 0 ? io->size : io->bytes;
630 }
631
632 /*
633 * In case of short read, the caller sets 'pos' to the position of
634 * actual end of fuse request in IO request. Otherwise, if bytes_requested
635 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
636 *
637 * An example:
638 * User requested DIO read of 64K. It was split into two 32K fuse requests,
639 * both submitted asynchronously. The first of them was ACKed by userspace as
640 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
641 * second request was ACKed as short, e.g. only 1K was read, resulting in
642 * pos == 33K.
643 *
644 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
645 * will be equal to the length of the longest contiguous fragment of
646 * transferred data starting from the beginning of IO request.
647 */
fuse_aio_complete(struct fuse_io_priv * io,int err,ssize_t pos)648 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
649 {
650 int left;
651
652 spin_lock(&io->lock);
653 if (err)
654 io->err = io->err ? : err;
655 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
656 io->bytes = pos;
657
658 left = --io->reqs;
659 if (!left && io->blocking)
660 complete(io->done);
661 spin_unlock(&io->lock);
662
663 if (!left && !io->blocking) {
664 ssize_t res = fuse_get_res_by_io(io);
665
666 if (res >= 0) {
667 struct inode *inode = file_inode(io->iocb->ki_filp);
668 struct fuse_conn *fc = get_fuse_conn(inode);
669 struct fuse_inode *fi = get_fuse_inode(inode);
670
671 spin_lock(&fi->lock);
672 fi->attr_version = atomic64_inc_return(&fc->attr_version);
673 spin_unlock(&fi->lock);
674 }
675
676 io->iocb->ki_complete(io->iocb, res);
677 }
678
679 kref_put(&io->refcnt, fuse_io_release);
680 }
681
fuse_io_alloc(struct fuse_io_priv * io,unsigned int nfolios)682 static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
683 unsigned int nfolios)
684 {
685 struct fuse_io_args *ia;
686
687 ia = kzalloc_obj(*ia);
688 if (ia) {
689 ia->io = io;
690 ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL,
691 &ia->ap.descs);
692 if (!ia->ap.folios) {
693 kfree(ia);
694 ia = NULL;
695 }
696 }
697 return ia;
698 }
699
fuse_io_free(struct fuse_io_args * ia)700 static void fuse_io_free(struct fuse_io_args *ia)
701 {
702 kfree(ia->ap.folios);
703 kfree(ia);
704 }
705
fuse_aio_complete_req(struct fuse_mount * fm,struct fuse_args * args,int err)706 static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
707 int err)
708 {
709 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
710 struct fuse_io_priv *io = ia->io;
711 ssize_t pos = -1;
712 size_t nres;
713
714 if (err) {
715 /* Nothing */
716 } else if (io->write) {
717 if (ia->write.out.size > ia->write.in.size) {
718 err = -EIO;
719 } else {
720 nres = ia->write.out.size;
721 if (ia->write.in.size != ia->write.out.size)
722 pos = ia->write.in.offset - io->offset +
723 ia->write.out.size;
724 }
725 } else {
726 u32 outsize = args->out_args[0].size;
727
728 nres = outsize;
729 if (ia->read.in.size != outsize)
730 pos = ia->read.in.offset - io->offset + outsize;
731 }
732
733 fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty);
734
735 fuse_aio_complete(io, err, pos);
736 fuse_io_free(ia);
737 }
738
fuse_async_req_send(struct fuse_mount * fm,struct fuse_io_args * ia,size_t num_bytes)739 static ssize_t fuse_async_req_send(struct fuse_mount *fm,
740 struct fuse_io_args *ia, size_t num_bytes)
741 {
742 ssize_t err;
743 struct fuse_io_priv *io = ia->io;
744
745 spin_lock(&io->lock);
746 kref_get(&io->refcnt);
747 io->size += num_bytes;
748 io->reqs++;
749 spin_unlock(&io->lock);
750
751 ia->ap.args.end = fuse_aio_complete_req;
752 ia->ap.args.may_block = io->should_dirty;
753 err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
754 if (err)
755 fuse_aio_complete_req(fm, &ia->ap.args, err);
756
757 return num_bytes;
758 }
759
fuse_send_read(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)760 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
761 fl_owner_t owner)
762 {
763 struct file *file = ia->io->iocb->ki_filp;
764 struct fuse_file *ff = file->private_data;
765 struct fuse_mount *fm = ff->fm;
766
767 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
768 if (owner != NULL) {
769 ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
770 ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
771 }
772
773 if (ia->io->async)
774 return fuse_async_req_send(fm, ia, count);
775
776 return fuse_simple_request(fm, &ia->ap.args);
777 }
778
fuse_read_update_size(struct inode * inode,loff_t size,u64 attr_ver)779 static void fuse_read_update_size(struct inode *inode, loff_t size,
780 u64 attr_ver)
781 {
782 struct fuse_conn *fc = get_fuse_conn(inode);
783 struct fuse_inode *fi = get_fuse_inode(inode);
784
785 spin_lock(&fi->lock);
786 if (attr_ver >= fi->attr_version && size < inode->i_size &&
787 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
788 fi->attr_version = atomic64_inc_return(&fc->attr_version);
789 i_size_write(inode, size);
790 }
791 spin_unlock(&fi->lock);
792 }
793
fuse_short_read(struct inode * inode,u64 attr_ver,size_t num_read,struct fuse_args_pages * ap)794 static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
795 struct fuse_args_pages *ap)
796 {
797 struct fuse_conn *fc = get_fuse_conn(inode);
798
799 /*
800 * If writeback_cache is enabled, a short read means there's a hole in
801 * the file. Some data after the hole is in page cache, but has not
802 * reached the client fs yet. So the hole is not present there.
803 */
804 if (!fc->writeback_cache) {
805 loff_t pos = folio_pos(ap->folios[0]) + num_read;
806 fuse_read_update_size(inode, pos, attr_ver);
807 }
808 }
809
fuse_do_readfolio(struct file * file,struct folio * folio,size_t off,size_t len)810 static int fuse_do_readfolio(struct file *file, struct folio *folio,
811 size_t off, size_t len)
812 {
813 struct inode *inode = folio->mapping->host;
814 struct fuse_mount *fm = get_fuse_mount(inode);
815 loff_t pos = folio_pos(folio) + off;
816 struct fuse_folio_desc desc = {
817 .offset = off,
818 .length = len,
819 };
820 struct fuse_io_args ia = {
821 .ap.args.page_zeroing = true,
822 .ap.args.out_pages = true,
823 .ap.num_folios = 1,
824 .ap.folios = &folio,
825 .ap.descs = &desc,
826 };
827 ssize_t res;
828 u64 attr_ver;
829
830 attr_ver = fuse_get_attr_version(fm->fc);
831
832 /* Don't overflow end offset */
833 if (pos + (desc.length - 1) == LLONG_MAX)
834 desc.length--;
835
836 fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
837 res = fuse_simple_request(fm, &ia.ap.args);
838 if (res < 0)
839 return res;
840 /*
841 * Short read means EOF. If file size is larger, truncate it
842 */
843 if (res < desc.length)
844 fuse_short_read(inode, attr_ver, res, &ia.ap);
845
846 return 0;
847 }
848
fuse_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)849 static int fuse_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
850 unsigned int flags, struct iomap *iomap,
851 struct iomap *srcmap)
852 {
853 iomap->type = IOMAP_MAPPED;
854 iomap->length = length;
855 iomap->offset = offset;
856 return 0;
857 }
858
859 static const struct iomap_ops fuse_iomap_ops = {
860 .iomap_begin = fuse_iomap_begin,
861 };
862
863 struct fuse_fill_read_data {
864 struct file *file;
865
866 /* Fields below are used if sending the read request asynchronously */
867 struct fuse_conn *fc;
868 struct fuse_io_args *ia;
869 unsigned int nr_bytes;
870 };
871
872 /* forward declarations */
873 static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
874 unsigned len, struct fuse_args_pages *ap,
875 unsigned cur_bytes, bool write);
876 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
877 unsigned int count, bool async);
878
fuse_handle_readahead(struct folio * folio,struct readahead_control * rac,struct fuse_fill_read_data * data,loff_t pos,size_t len)879 static int fuse_handle_readahead(struct folio *folio,
880 struct readahead_control *rac,
881 struct fuse_fill_read_data *data, loff_t pos,
882 size_t len)
883 {
884 struct fuse_io_args *ia = data->ia;
885 size_t off = offset_in_folio(folio, pos);
886 struct fuse_conn *fc = data->fc;
887 struct fuse_args_pages *ap;
888 unsigned int nr_pages;
889
890 if (ia && fuse_folios_need_send(fc, pos, len, &ia->ap, data->nr_bytes,
891 false)) {
892 fuse_send_readpages(ia, data->file, data->nr_bytes,
893 fc->async_read);
894 data->nr_bytes = 0;
895 data->ia = NULL;
896 ia = NULL;
897 }
898 if (!ia) {
899 if (fc->num_background >= fc->congestion_threshold &&
900 rac->ra->async_size >= readahead_count(rac))
901 /*
902 * Congested and only async pages left, so skip the
903 * rest.
904 */
905 return -EAGAIN;
906
907 nr_pages = min(fc->max_pages, readahead_count(rac));
908 data->ia = fuse_io_alloc(NULL, nr_pages);
909 if (!data->ia)
910 return -ENOMEM;
911 ia = data->ia;
912 }
913 folio_get(folio);
914 ap = &ia->ap;
915 ap->folios[ap->num_folios] = folio;
916 ap->descs[ap->num_folios].offset = off;
917 ap->descs[ap->num_folios].length = len;
918 data->nr_bytes += len;
919 ap->num_folios++;
920
921 return 0;
922 }
923
fuse_iomap_read_folio_range_async(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t len)924 static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
925 struct iomap_read_folio_ctx *ctx,
926 size_t len)
927 {
928 struct fuse_fill_read_data *data = ctx->read_ctx;
929 struct folio *folio = ctx->cur_folio;
930 loff_t pos = iter->pos;
931 size_t off = offset_in_folio(folio, pos);
932 struct file *file = data->file;
933 int ret;
934
935 if (ctx->rac) {
936 ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len);
937 } else {
938 /*
939 * for non-readahead read requests, do reads synchronously
940 * since it's not guaranteed that the server can handle
941 * out-of-order reads
942 */
943 ret = fuse_do_readfolio(file, folio, off, len);
944 if (!ret)
945 iomap_finish_folio_read(folio, off, len, ret);
946 }
947 return ret;
948 }
949
fuse_iomap_submit_read(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx)950 static void fuse_iomap_submit_read(const struct iomap_iter *iter,
951 struct iomap_read_folio_ctx *ctx)
952 {
953 struct fuse_fill_read_data *data = ctx->read_ctx;
954
955 if (data->ia)
956 fuse_send_readpages(data->ia, data->file, data->nr_bytes,
957 data->fc->async_read);
958 }
959
960 static const struct iomap_read_ops fuse_iomap_read_ops = {
961 .read_folio_range = fuse_iomap_read_folio_range_async,
962 .submit_read = fuse_iomap_submit_read,
963 };
964
fuse_read_folio(struct file * file,struct folio * folio)965 static int fuse_read_folio(struct file *file, struct folio *folio)
966 {
967 struct inode *inode = folio->mapping->host;
968 struct fuse_fill_read_data data = {
969 .file = file,
970 };
971 struct iomap_read_folio_ctx ctx = {
972 .cur_folio = folio,
973 .ops = &fuse_iomap_read_ops,
974 .read_ctx = &data,
975
976 };
977
978 if (fuse_is_bad(inode)) {
979 folio_unlock(folio);
980 return -EIO;
981 }
982
983 iomap_read_folio(&fuse_iomap_ops, &ctx, NULL);
984 fuse_invalidate_atime(inode);
985 return 0;
986 }
987
fuse_iomap_read_folio_range(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t len)988 static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
989 struct folio *folio, loff_t pos,
990 size_t len)
991 {
992 struct file *file = iter->private;
993 size_t off = offset_in_folio(folio, pos);
994
995 return fuse_do_readfolio(file, folio, off, len);
996 }
997
fuse_readpages_end(struct fuse_mount * fm,struct fuse_args * args,int err)998 static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
999 int err)
1000 {
1001 int i;
1002 struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
1003 struct fuse_args_pages *ap = &ia->ap;
1004 size_t count = ia->read.in.size;
1005 size_t num_read = args->out_args[0].size;
1006 struct address_space *mapping;
1007 struct inode *inode;
1008
1009 WARN_ON_ONCE(!ap->num_folios);
1010 mapping = ap->folios[0]->mapping;
1011 inode = mapping->host;
1012
1013 /*
1014 * Short read means EOF. If file size is larger, truncate it
1015 */
1016 if (!err && num_read < count)
1017 fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
1018
1019 fuse_invalidate_atime(inode);
1020
1021 for (i = 0; i < ap->num_folios; i++) {
1022 iomap_finish_folio_read(ap->folios[i], ap->descs[i].offset,
1023 ap->descs[i].length, err);
1024 folio_put(ap->folios[i]);
1025 }
1026 if (ia->ff)
1027 fuse_file_put(ia->ff, false);
1028
1029 fuse_io_free(ia);
1030 }
1031
fuse_send_readpages(struct fuse_io_args * ia,struct file * file,unsigned int count,bool async)1032 static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file,
1033 unsigned int count, bool async)
1034 {
1035 struct fuse_file *ff = file->private_data;
1036 struct fuse_mount *fm = ff->fm;
1037 struct fuse_args_pages *ap = &ia->ap;
1038 loff_t pos = folio_pos(ap->folios[0]);
1039 ssize_t res;
1040 int err;
1041
1042 ap->args.out_pages = true;
1043 ap->args.page_zeroing = true;
1044 ap->args.page_replace = true;
1045
1046 /* Don't overflow end offset */
1047 if (pos + (count - 1) == LLONG_MAX) {
1048 count--;
1049 ap->descs[ap->num_folios - 1].length--;
1050 }
1051 WARN_ON((loff_t) (pos + count) < 0);
1052
1053 fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
1054 ia->read.attr_ver = fuse_get_attr_version(fm->fc);
1055 if (async) {
1056 ia->ff = fuse_file_get(ff);
1057 ap->args.end = fuse_readpages_end;
1058 err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
1059 if (!err)
1060 return;
1061 } else {
1062 res = fuse_simple_request(fm, &ap->args);
1063 err = res < 0 ? res : 0;
1064 }
1065 fuse_readpages_end(fm, &ap->args, err);
1066 }
1067
fuse_readahead(struct readahead_control * rac)1068 static void fuse_readahead(struct readahead_control *rac)
1069 {
1070 struct inode *inode = rac->mapping->host;
1071 struct fuse_conn *fc = get_fuse_conn(inode);
1072 struct fuse_fill_read_data data = {
1073 .file = rac->file,
1074 .fc = fc,
1075 };
1076 struct iomap_read_folio_ctx ctx = {
1077 .ops = &fuse_iomap_read_ops,
1078 .rac = rac,
1079 .read_ctx = &data
1080 };
1081
1082 if (fuse_is_bad(inode))
1083 return;
1084
1085 iomap_readahead(&fuse_iomap_ops, &ctx, NULL);
1086 }
1087
fuse_cache_read_iter(struct kiocb * iocb,struct iov_iter * to)1088 static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
1089 {
1090 struct inode *inode = iocb->ki_filp->f_mapping->host;
1091 struct fuse_conn *fc = get_fuse_conn(inode);
1092
1093 /*
1094 * In auto invalidate mode, always update attributes on read.
1095 * Otherwise, only update if we attempt to read past EOF (to ensure
1096 * i_size is up to date).
1097 */
1098 if (fc->auto_inval_data ||
1099 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
1100 int err;
1101 err = fuse_update_attributes(inode, iocb->ki_filp, STATX_SIZE);
1102 if (err)
1103 return err;
1104 }
1105
1106 return generic_file_read_iter(iocb, to);
1107 }
1108
fuse_write_args_fill(struct fuse_io_args * ia,struct fuse_file * ff,loff_t pos,size_t count)1109 static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
1110 loff_t pos, size_t count)
1111 {
1112 struct fuse_args *args = &ia->ap.args;
1113
1114 ia->write.in.fh = ff->fh;
1115 ia->write.in.offset = pos;
1116 ia->write.in.size = count;
1117 args->opcode = FUSE_WRITE;
1118 args->nodeid = ff->nodeid;
1119 args->in_numargs = 2;
1120 if (ff->fm->fc->minor < 9)
1121 args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
1122 else
1123 args->in_args[0].size = sizeof(ia->write.in);
1124 args->in_args[0].value = &ia->write.in;
1125 args->in_args[1].size = count;
1126 args->out_numargs = 1;
1127 args->out_args[0].size = sizeof(ia->write.out);
1128 args->out_args[0].value = &ia->write.out;
1129 }
1130
fuse_write_flags(struct kiocb * iocb)1131 static unsigned int fuse_write_flags(struct kiocb *iocb)
1132 {
1133 unsigned int flags = iocb->ki_filp->f_flags;
1134
1135 if (iocb_is_dsync(iocb))
1136 flags |= O_DSYNC;
1137 if (iocb->ki_flags & IOCB_SYNC)
1138 flags |= O_SYNC;
1139
1140 return flags;
1141 }
1142
fuse_send_write(struct fuse_io_args * ia,loff_t pos,size_t count,fl_owner_t owner)1143 static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
1144 size_t count, fl_owner_t owner)
1145 {
1146 struct kiocb *iocb = ia->io->iocb;
1147 struct file *file = iocb->ki_filp;
1148 struct fuse_file *ff = file->private_data;
1149 struct fuse_mount *fm = ff->fm;
1150 struct fuse_write_in *inarg = &ia->write.in;
1151 ssize_t err;
1152
1153 fuse_write_args_fill(ia, ff, pos, count);
1154 inarg->flags = fuse_write_flags(iocb);
1155 if (owner != NULL) {
1156 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
1157 inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
1158 }
1159
1160 if (ia->io->async)
1161 return fuse_async_req_send(fm, ia, count);
1162
1163 err = fuse_simple_request(fm, &ia->ap.args);
1164 if (!err && ia->write.out.size > count)
1165 err = -EIO;
1166
1167 return err ?: ia->write.out.size;
1168 }
1169
fuse_write_update_attr(struct inode * inode,loff_t pos,ssize_t written)1170 bool fuse_write_update_attr(struct inode *inode, loff_t pos, ssize_t written)
1171 {
1172 struct fuse_conn *fc = get_fuse_conn(inode);
1173 struct fuse_inode *fi = get_fuse_inode(inode);
1174 bool ret = false;
1175
1176 spin_lock(&fi->lock);
1177 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1178 if (written > 0 && pos > inode->i_size) {
1179 i_size_write(inode, pos);
1180 ret = true;
1181 }
1182 spin_unlock(&fi->lock);
1183
1184 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
1185
1186 return ret;
1187 }
1188
fuse_send_write_pages(struct fuse_io_args * ia,struct kiocb * iocb,struct inode * inode,loff_t pos,size_t count)1189 static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
1190 struct kiocb *iocb, struct inode *inode,
1191 loff_t pos, size_t count)
1192 {
1193 struct fuse_args_pages *ap = &ia->ap;
1194 struct file *file = iocb->ki_filp;
1195 struct fuse_file *ff = file->private_data;
1196 struct fuse_mount *fm = ff->fm;
1197 unsigned int offset, i;
1198 bool short_write;
1199 int err;
1200
1201 for (i = 0; i < ap->num_folios; i++)
1202 folio_wait_writeback(ap->folios[i]);
1203
1204 fuse_write_args_fill(ia, ff, pos, count);
1205 ia->write.in.flags = fuse_write_flags(iocb);
1206 if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID))
1207 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1208
1209 err = fuse_simple_request(fm, &ap->args);
1210 if (!err && ia->write.out.size > count)
1211 err = -EIO;
1212
1213 short_write = ia->write.out.size < count;
1214 offset = ap->descs[0].offset;
1215 count = ia->write.out.size;
1216 for (i = 0; i < ap->num_folios; i++) {
1217 struct folio *folio = ap->folios[i];
1218
1219 if (err) {
1220 folio_clear_uptodate(folio);
1221 } else {
1222 if (count >= folio_size(folio) - offset)
1223 count -= folio_size(folio) - offset;
1224 else {
1225 if (short_write)
1226 folio_clear_uptodate(folio);
1227 count = 0;
1228 }
1229 offset = 0;
1230 }
1231 if (ia->write.folio_locked && (i == ap->num_folios - 1))
1232 folio_unlock(folio);
1233 folio_put(folio);
1234 }
1235
1236 return err;
1237 }
1238
fuse_fill_write_pages(struct fuse_io_args * ia,struct address_space * mapping,struct iov_iter * ii,loff_t pos,unsigned int max_folios)1239 static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
1240 struct address_space *mapping,
1241 struct iov_iter *ii, loff_t pos,
1242 unsigned int max_folios)
1243 {
1244 struct fuse_args_pages *ap = &ia->ap;
1245 struct fuse_conn *fc = get_fuse_conn(mapping->host);
1246 unsigned offset = pos & (PAGE_SIZE - 1);
1247 size_t count = 0;
1248 unsigned int num;
1249 int err = 0;
1250
1251 num = min(iov_iter_count(ii), fc->max_write);
1252
1253 ap->args.in_pages = true;
1254
1255 while (num && ap->num_folios < max_folios) {
1256 size_t tmp;
1257 struct folio *folio;
1258 pgoff_t index = pos >> PAGE_SHIFT;
1259 unsigned int bytes;
1260 unsigned int folio_offset;
1261
1262 again:
1263 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
1264 mapping_gfp_mask(mapping));
1265 if (IS_ERR(folio)) {
1266 err = PTR_ERR(folio);
1267 break;
1268 }
1269
1270 if (mapping_writably_mapped(mapping))
1271 flush_dcache_folio(folio);
1272
1273 folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
1274 bytes = min(folio_size(folio) - folio_offset, num);
1275
1276 tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
1277 flush_dcache_folio(folio);
1278
1279 if (!tmp) {
1280 folio_unlock(folio);
1281 folio_put(folio);
1282
1283 /*
1284 * Ensure forward progress by faulting in
1285 * while not holding the folio lock:
1286 */
1287 if (fault_in_iov_iter_readable(ii, bytes)) {
1288 err = -EFAULT;
1289 break;
1290 }
1291
1292 goto again;
1293 }
1294
1295 ap->folios[ap->num_folios] = folio;
1296 ap->descs[ap->num_folios].offset = folio_offset;
1297 ap->descs[ap->num_folios].length = tmp;
1298 ap->num_folios++;
1299
1300 count += tmp;
1301 pos += tmp;
1302 num -= tmp;
1303 offset += tmp;
1304 if (offset == folio_size(folio))
1305 offset = 0;
1306
1307 /* If we copied full folio, mark it uptodate */
1308 if (tmp == folio_size(folio))
1309 folio_mark_uptodate(folio);
1310
1311 if (folio_test_uptodate(folio)) {
1312 folio_unlock(folio);
1313 } else {
1314 ia->write.folio_locked = true;
1315 break;
1316 }
1317 if (!fc->big_writes || offset != 0)
1318 break;
1319 }
1320
1321 return count > 0 ? count : err;
1322 }
1323
fuse_wr_pages(loff_t pos,size_t len,unsigned int max_pages)1324 static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
1325 unsigned int max_pages)
1326 {
1327 unsigned int pages = ((pos + len - 1) >> PAGE_SHIFT) -
1328 (pos >> PAGE_SHIFT) + 1;
1329
1330 return min(pages, max_pages);
1331 }
1332
fuse_perform_write(struct kiocb * iocb,struct iov_iter * ii)1333 static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii)
1334 {
1335 struct address_space *mapping = iocb->ki_filp->f_mapping;
1336 struct inode *inode = mapping->host;
1337 struct fuse_conn *fc = get_fuse_conn(inode);
1338 struct fuse_inode *fi = get_fuse_inode(inode);
1339 loff_t pos = iocb->ki_pos;
1340 int err = 0;
1341 ssize_t res = 0;
1342
1343 if (inode->i_size < pos + iov_iter_count(ii))
1344 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1345
1346 do {
1347 ssize_t count;
1348 struct fuse_io_args ia = {};
1349 struct fuse_args_pages *ap = &ia.ap;
1350 unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
1351 fc->max_pages);
1352
1353 ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs);
1354 if (!ap->folios) {
1355 err = -ENOMEM;
1356 break;
1357 }
1358
1359 count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
1360 if (count <= 0) {
1361 err = count;
1362 } else {
1363 err = fuse_send_write_pages(&ia, iocb, inode,
1364 pos, count);
1365 if (!err) {
1366 size_t num_written = ia.write.out.size;
1367
1368 res += num_written;
1369 pos += num_written;
1370
1371 /* break out of the loop on short write */
1372 if (num_written != count)
1373 err = -EIO;
1374 }
1375 }
1376 kfree(ap->folios);
1377 } while (!err && iov_iter_count(ii));
1378
1379 fuse_write_update_attr(inode, pos, res);
1380 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1381
1382 if (!res)
1383 return err;
1384 iocb->ki_pos += res;
1385 return res;
1386 }
1387
fuse_io_past_eof(struct kiocb * iocb,struct iov_iter * iter)1388 static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
1389 {
1390 struct inode *inode = file_inode(iocb->ki_filp);
1391
1392 return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
1393 }
1394
1395 /*
1396 * @return true if an exclusive lock for direct IO writes is needed
1397 */
fuse_dio_wr_exclusive_lock(struct kiocb * iocb,struct iov_iter * from)1398 static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from)
1399 {
1400 struct file *file = iocb->ki_filp;
1401 struct fuse_file *ff = file->private_data;
1402 struct inode *inode = file_inode(iocb->ki_filp);
1403 struct fuse_inode *fi = get_fuse_inode(inode);
1404
1405 /* Server side has to advise that it supports parallel dio writes. */
1406 if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES))
1407 return true;
1408
1409 /*
1410 * Append will need to know the eventual EOF - always needs an
1411 * exclusive lock.
1412 */
1413 if (iocb->ki_flags & IOCB_APPEND)
1414 return true;
1415
1416 /* shared locks are not allowed with parallel page cache IO */
1417 if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state))
1418 return true;
1419
1420 /* Parallel dio beyond EOF is not supported, at least for now. */
1421 if (fuse_io_past_eof(iocb, from))
1422 return true;
1423
1424 return false;
1425 }
1426
fuse_dio_lock(struct kiocb * iocb,struct iov_iter * from,bool * exclusive)1427 static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
1428 bool *exclusive)
1429 {
1430 struct inode *inode = file_inode(iocb->ki_filp);
1431 struct fuse_inode *fi = get_fuse_inode(inode);
1432
1433 *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
1434 if (*exclusive) {
1435 inode_lock(inode);
1436 } else {
1437 inode_lock_shared(inode);
1438 /*
1439 * New parallal dio allowed only if inode is not in caching
1440 * mode and denies new opens in caching mode. This check
1441 * should be performed only after taking shared inode lock.
1442 * Previous past eof check was without inode lock and might
1443 * have raced, so check it again.
1444 */
1445 if (fuse_io_past_eof(iocb, from) ||
1446 fuse_inode_uncached_io_start(fi, NULL) != 0) {
1447 inode_unlock_shared(inode);
1448 inode_lock(inode);
1449 *exclusive = true;
1450 }
1451 }
1452 }
1453
fuse_dio_unlock(struct kiocb * iocb,bool exclusive)1454 static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
1455 {
1456 struct inode *inode = file_inode(iocb->ki_filp);
1457 struct fuse_inode *fi = get_fuse_inode(inode);
1458
1459 if (exclusive) {
1460 inode_unlock(inode);
1461 } else {
1462 /* Allow opens in caching mode after last parallel dio end */
1463 fuse_inode_uncached_io_end(fi);
1464 inode_unlock_shared(inode);
1465 }
1466 }
1467
1468 static const struct iomap_write_ops fuse_iomap_write_ops = {
1469 .read_folio_range = fuse_iomap_read_folio_range,
1470 };
1471
fuse_cache_write_iter(struct kiocb * iocb,struct iov_iter * from)1472 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
1473 {
1474 struct file *file = iocb->ki_filp;
1475 struct mnt_idmap *idmap = file_mnt_idmap(file);
1476 struct address_space *mapping = file->f_mapping;
1477 ssize_t written = 0;
1478 struct inode *inode = mapping->host;
1479 ssize_t err, count;
1480 struct fuse_conn *fc = get_fuse_conn(inode);
1481 bool writeback = false;
1482
1483 if (fc->writeback_cache) {
1484 /* Update size (EOF optimization) and mode (SUID clearing) */
1485 err = fuse_update_attributes(mapping->host, file,
1486 STATX_SIZE | STATX_MODE);
1487 if (err)
1488 return err;
1489
1490 if (!fc->handle_killpriv_v2 ||
1491 !setattr_should_drop_suidgid(idmap, file_inode(file)))
1492 writeback = true;
1493 }
1494
1495 inode_lock(inode);
1496
1497 err = count = generic_write_checks(iocb, from);
1498 if (err <= 0)
1499 goto out;
1500
1501 task_io_account_write(count);
1502
1503 err = kiocb_modified(iocb);
1504 if (err)
1505 goto out;
1506
1507 if (iocb->ki_flags & IOCB_DIRECT) {
1508 written = generic_file_direct_write(iocb, from);
1509 if (written < 0 || !iov_iter_count(from))
1510 goto out;
1511 written = direct_write_fallback(iocb, from, written,
1512 fuse_perform_write(iocb, from));
1513 } else if (writeback) {
1514 /*
1515 * Use iomap so that we can do granular uptodate reads
1516 * and granular dirty tracking for large folios.
1517 */
1518 written = iomap_file_buffered_write(iocb, from,
1519 &fuse_iomap_ops,
1520 &fuse_iomap_write_ops,
1521 file);
1522 } else {
1523 written = fuse_perform_write(iocb, from);
1524 }
1525 out:
1526 inode_unlock(inode);
1527 if (written > 0)
1528 written = generic_write_sync(iocb, written);
1529
1530 return written ? written : err;
1531 }
1532
fuse_get_user_addr(const struct iov_iter * ii)1533 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1534 {
1535 return (unsigned long)iter_iov(ii)->iov_base + ii->iov_offset;
1536 }
1537
fuse_get_frag_size(const struct iov_iter * ii,size_t max_size)1538 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1539 size_t max_size)
1540 {
1541 return min(iov_iter_single_seg_count(ii), max_size);
1542 }
1543
fuse_get_user_pages(struct fuse_args_pages * ap,struct iov_iter * ii,size_t * nbytesp,int write,unsigned int max_pages,bool use_pages_for_kvec_io)1544 static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
1545 size_t *nbytesp, int write,
1546 unsigned int max_pages,
1547 bool use_pages_for_kvec_io)
1548 {
1549 bool flush_or_invalidate = false;
1550 unsigned int nr_pages = 0;
1551 size_t nbytes = 0; /* # bytes already packed in req */
1552 ssize_t ret = 0;
1553
1554 /* Special case for kernel I/O: can copy directly into the buffer.
1555 * However if the implementation of fuse_conn requires pages instead of
1556 * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead.
1557 */
1558 if (iov_iter_is_kvec(ii)) {
1559 void *user_addr = (void *)fuse_get_user_addr(ii);
1560
1561 if (!use_pages_for_kvec_io) {
1562 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1563
1564 if (write)
1565 ap->args.in_args[1].value = user_addr;
1566 else
1567 ap->args.out_args[0].value = user_addr;
1568
1569 iov_iter_advance(ii, frag_size);
1570 *nbytesp = frag_size;
1571 return 0;
1572 }
1573
1574 if (is_vmalloc_addr(user_addr)) {
1575 ap->args.vmap_base = user_addr;
1576 flush_or_invalidate = true;
1577 }
1578 }
1579
1580 /*
1581 * Until there is support for iov_iter_extract_folios(), we have to
1582 * manually extract pages using iov_iter_extract_pages() and then
1583 * copy that to a folios array.
1584 */
1585 struct page **pages = kzalloc(max_pages * sizeof(struct page *),
1586 GFP_KERNEL);
1587 if (!pages) {
1588 ret = -ENOMEM;
1589 goto out;
1590 }
1591
1592 while (nbytes < *nbytesp && nr_pages < max_pages) {
1593 unsigned nfolios, i;
1594 size_t start;
1595
1596 ret = iov_iter_extract_pages(ii, &pages,
1597 *nbytesp - nbytes,
1598 max_pages - nr_pages,
1599 0, &start);
1600 if (ret < 0)
1601 break;
1602
1603 nbytes += ret;
1604
1605 nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE);
1606
1607 for (i = 0; i < nfolios; i++) {
1608 struct folio *folio = page_folio(pages[i]);
1609 unsigned int offset = start +
1610 (folio_page_idx(folio, pages[i]) << PAGE_SHIFT);
1611 unsigned int len = umin(ret, PAGE_SIZE - start);
1612
1613 ap->descs[ap->num_folios].offset = offset;
1614 ap->descs[ap->num_folios].length = len;
1615 ap->folios[ap->num_folios] = folio;
1616 start = 0;
1617 ret -= len;
1618 ap->num_folios++;
1619 }
1620
1621 nr_pages += nfolios;
1622 }
1623 kfree(pages);
1624
1625 if (write && flush_or_invalidate)
1626 flush_kernel_vmap_range(ap->args.vmap_base, nbytes);
1627
1628 ap->args.invalidate_vmap = !write && flush_or_invalidate;
1629 ap->args.is_pinned = iov_iter_extract_will_pin(ii);
1630 ap->args.user_pages = true;
1631 if (write)
1632 ap->args.in_pages = true;
1633 else
1634 ap->args.out_pages = true;
1635
1636 out:
1637 *nbytesp = nbytes;
1638
1639 return ret < 0 ? ret : 0;
1640 }
1641
fuse_direct_io(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos,int flags)1642 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1643 loff_t *ppos, int flags)
1644 {
1645 int write = flags & FUSE_DIO_WRITE;
1646 int cuse = flags & FUSE_DIO_CUSE;
1647 struct file *file = io->iocb->ki_filp;
1648 struct address_space *mapping = file->f_mapping;
1649 struct inode *inode = mapping->host;
1650 struct fuse_file *ff = file->private_data;
1651 struct fuse_conn *fc = ff->fm->fc;
1652 size_t nmax = write ? fc->max_write : fc->max_read;
1653 loff_t pos = *ppos;
1654 size_t count = iov_iter_count(iter);
1655 pgoff_t idx_from = pos >> PAGE_SHIFT;
1656 pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
1657 ssize_t res = 0;
1658 int err = 0;
1659 struct fuse_io_args *ia;
1660 unsigned int max_pages;
1661 bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
1662
1663 max_pages = iov_iter_npages(iter, fc->max_pages);
1664 ia = fuse_io_alloc(io, max_pages);
1665 if (!ia)
1666 return -ENOMEM;
1667
1668 if (fopen_direct_io) {
1669 res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
1670 if (res) {
1671 fuse_io_free(ia);
1672 return res;
1673 }
1674 }
1675 if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) {
1676 if (!write)
1677 inode_lock(inode);
1678 fuse_sync_writes(inode);
1679 if (!write)
1680 inode_unlock(inode);
1681 }
1682
1683 if (fopen_direct_io && write) {
1684 res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1685 if (res) {
1686 fuse_io_free(ia);
1687 return res;
1688 }
1689 }
1690
1691 io->should_dirty = !write && user_backed_iter(iter);
1692 while (count) {
1693 ssize_t nres;
1694 fl_owner_t owner = current->files;
1695 size_t nbytes = min(count, nmax);
1696
1697 err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
1698 max_pages, fc->use_pages_for_kvec_io);
1699 if (err && !nbytes)
1700 break;
1701
1702 if (write) {
1703 if (!capable(CAP_FSETID))
1704 ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID;
1705
1706 nres = fuse_send_write(ia, pos, nbytes, owner);
1707 } else {
1708 nres = fuse_send_read(ia, pos, nbytes, owner);
1709 }
1710
1711 if (!io->async || nres < 0) {
1712 fuse_release_user_pages(&ia->ap, nres, io->should_dirty);
1713 fuse_io_free(ia);
1714 }
1715 ia = NULL;
1716 if (nres < 0) {
1717 iov_iter_revert(iter, nbytes);
1718 err = nres;
1719 break;
1720 }
1721 WARN_ON(nres > nbytes);
1722
1723 count -= nres;
1724 res += nres;
1725 pos += nres;
1726 if (nres != nbytes) {
1727 iov_iter_revert(iter, nbytes - nres);
1728 break;
1729 }
1730 if (count) {
1731 max_pages = iov_iter_npages(iter, fc->max_pages);
1732 ia = fuse_io_alloc(io, max_pages);
1733 if (!ia)
1734 break;
1735 }
1736 }
1737 if (ia)
1738 fuse_io_free(ia);
1739 if (res > 0)
1740 *ppos = pos;
1741
1742 if (res > 0 && write && fopen_direct_io) {
1743 /*
1744 * As in generic_file_direct_write(), invalidate after the
1745 * write, to invalidate read-ahead cache that may have competed
1746 * with the write.
1747 */
1748 invalidate_inode_pages2_range(mapping, idx_from, idx_to);
1749 }
1750
1751 return res > 0 ? res : err;
1752 }
1753 EXPORT_SYMBOL_GPL(fuse_direct_io);
1754
__fuse_direct_read(struct fuse_io_priv * io,struct iov_iter * iter,loff_t * ppos)1755 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1756 struct iov_iter *iter,
1757 loff_t *ppos)
1758 {
1759 ssize_t res;
1760 struct inode *inode = file_inode(io->iocb->ki_filp);
1761
1762 res = fuse_direct_io(io, iter, ppos, 0);
1763
1764 fuse_invalidate_atime(inode);
1765
1766 return res;
1767 }
1768
1769 static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
1770
fuse_direct_read_iter(struct kiocb * iocb,struct iov_iter * to)1771 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
1772 {
1773 ssize_t res;
1774
1775 if (!is_sync_kiocb(iocb)) {
1776 res = fuse_direct_IO(iocb, to);
1777 } else {
1778 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1779
1780 res = __fuse_direct_read(&io, to, &iocb->ki_pos);
1781 }
1782
1783 return res;
1784 }
1785
fuse_direct_write_iter(struct kiocb * iocb,struct iov_iter * from)1786 static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
1787 {
1788 struct inode *inode = file_inode(iocb->ki_filp);
1789 ssize_t res;
1790 bool exclusive;
1791
1792 fuse_dio_lock(iocb, from, &exclusive);
1793 res = generic_write_checks(iocb, from);
1794 if (res > 0) {
1795 task_io_account_write(res);
1796 if (!is_sync_kiocb(iocb)) {
1797 res = fuse_direct_IO(iocb, from);
1798 } else {
1799 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
1800
1801 res = fuse_direct_io(&io, from, &iocb->ki_pos,
1802 FUSE_DIO_WRITE);
1803 fuse_write_update_attr(inode, iocb->ki_pos, res);
1804 }
1805 }
1806 fuse_dio_unlock(iocb, exclusive);
1807
1808 return res;
1809 }
1810
fuse_file_read_iter(struct kiocb * iocb,struct iov_iter * to)1811 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1812 {
1813 struct file *file = iocb->ki_filp;
1814 struct fuse_file *ff = file->private_data;
1815 struct inode *inode = file_inode(file);
1816
1817 if (fuse_is_bad(inode))
1818 return -EIO;
1819
1820 if (FUSE_IS_DAX(inode))
1821 return fuse_dax_read_iter(iocb, to);
1822
1823 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1824 if (ff->open_flags & FOPEN_DIRECT_IO)
1825 return fuse_direct_read_iter(iocb, to);
1826 else if (fuse_file_passthrough(ff))
1827 return fuse_passthrough_read_iter(iocb, to);
1828 else
1829 return fuse_cache_read_iter(iocb, to);
1830 }
1831
fuse_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1832 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1833 {
1834 struct file *file = iocb->ki_filp;
1835 struct fuse_file *ff = file->private_data;
1836 struct inode *inode = file_inode(file);
1837
1838 if (fuse_is_bad(inode))
1839 return -EIO;
1840
1841 if (FUSE_IS_DAX(inode))
1842 return fuse_dax_write_iter(iocb, from);
1843
1844 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1845 if (ff->open_flags & FOPEN_DIRECT_IO)
1846 return fuse_direct_write_iter(iocb, from);
1847 else if (fuse_file_passthrough(ff))
1848 return fuse_passthrough_write_iter(iocb, from);
1849 else
1850 return fuse_cache_write_iter(iocb, from);
1851 }
1852
fuse_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1853 static ssize_t fuse_splice_read(struct file *in, loff_t *ppos,
1854 struct pipe_inode_info *pipe, size_t len,
1855 unsigned int flags)
1856 {
1857 struct fuse_file *ff = in->private_data;
1858
1859 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1860 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1861 return fuse_passthrough_splice_read(in, ppos, pipe, len, flags);
1862 else
1863 return filemap_splice_read(in, ppos, pipe, len, flags);
1864 }
1865
fuse_splice_write(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)1866 static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out,
1867 loff_t *ppos, size_t len, unsigned int flags)
1868 {
1869 struct fuse_file *ff = out->private_data;
1870
1871 /* FOPEN_DIRECT_IO overrides FOPEN_PASSTHROUGH */
1872 if (fuse_file_passthrough(ff) && !(ff->open_flags & FOPEN_DIRECT_IO))
1873 return fuse_passthrough_splice_write(pipe, out, ppos, len, flags);
1874 else
1875 return iter_file_splice_write(pipe, out, ppos, len, flags);
1876 }
1877
fuse_writepage_free(struct fuse_writepage_args * wpa)1878 static void fuse_writepage_free(struct fuse_writepage_args *wpa)
1879 {
1880 struct fuse_args_pages *ap = &wpa->ia.ap;
1881
1882 if (wpa->bucket)
1883 fuse_sync_bucket_dec(wpa->bucket);
1884
1885 fuse_file_put(wpa->ia.ff, false);
1886
1887 kfree(ap->folios);
1888 kfree(wpa);
1889 }
1890
fuse_writepage_finish(struct fuse_writepage_args * wpa)1891 static void fuse_writepage_finish(struct fuse_writepage_args *wpa)
1892 {
1893 struct fuse_args_pages *ap = &wpa->ia.ap;
1894 struct inode *inode = wpa->inode;
1895 struct fuse_inode *fi = get_fuse_inode(inode);
1896 int i;
1897
1898 for (i = 0; i < ap->num_folios; i++)
1899 /*
1900 * Benchmarks showed that ending writeback within the
1901 * scope of the fi->lock alleviates xarray lock
1902 * contention and noticeably improves performance.
1903 */
1904 iomap_finish_folio_write(inode, ap->folios[i],
1905 ap->descs[i].length);
1906
1907 wake_up(&fi->page_waitq);
1908 }
1909
1910 /* Called under fi->lock, may release and reacquire it */
fuse_send_writepage(struct fuse_mount * fm,struct fuse_writepage_args * wpa,loff_t size)1911 static void fuse_send_writepage(struct fuse_mount *fm,
1912 struct fuse_writepage_args *wpa, loff_t size)
1913 __releases(fi->lock)
1914 __acquires(fi->lock)
1915 {
1916 struct fuse_inode *fi = get_fuse_inode(wpa->inode);
1917 struct fuse_args_pages *ap = &wpa->ia.ap;
1918 struct fuse_write_in *inarg = &wpa->ia.write.in;
1919 struct fuse_args *args = &ap->args;
1920 __u64 data_size = 0;
1921 int err, i;
1922
1923 for (i = 0; i < ap->num_folios; i++)
1924 data_size += ap->descs[i].length;
1925
1926 fi->writectr++;
1927 if (inarg->offset + data_size <= size) {
1928 inarg->size = data_size;
1929 } else if (inarg->offset < size) {
1930 inarg->size = size - inarg->offset;
1931 } else {
1932 /* Got truncated off completely */
1933 goto out_free;
1934 }
1935
1936 args->in_args[1].size = inarg->size;
1937 args->force = true;
1938 args->nocreds = true;
1939
1940 err = fuse_simple_background(fm, args, GFP_ATOMIC);
1941 if (err == -ENOMEM) {
1942 spin_unlock(&fi->lock);
1943 err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
1944 spin_lock(&fi->lock);
1945 }
1946
1947 /* Fails on broken connection only */
1948 if (unlikely(err))
1949 goto out_free;
1950
1951 return;
1952
1953 out_free:
1954 fi->writectr--;
1955 fuse_writepage_finish(wpa);
1956 spin_unlock(&fi->lock);
1957 fuse_writepage_free(wpa);
1958 spin_lock(&fi->lock);
1959 }
1960
1961 /*
1962 * If fi->writectr is positive (no truncate or fsync going on) send
1963 * all queued writepage requests.
1964 *
1965 * Called with fi->lock
1966 */
fuse_flush_writepages(struct inode * inode)1967 void fuse_flush_writepages(struct inode *inode)
1968 __releases(fi->lock)
1969 __acquires(fi->lock)
1970 {
1971 struct fuse_mount *fm = get_fuse_mount(inode);
1972 struct fuse_inode *fi = get_fuse_inode(inode);
1973 loff_t crop = i_size_read(inode);
1974 struct fuse_writepage_args *wpa;
1975
1976 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1977 wpa = list_entry(fi->queued_writes.next,
1978 struct fuse_writepage_args, queue_entry);
1979 list_del_init(&wpa->queue_entry);
1980 fuse_send_writepage(fm, wpa, crop);
1981 }
1982 }
1983
fuse_writepage_end(struct fuse_mount * fm,struct fuse_args * args,int error)1984 static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
1985 int error)
1986 {
1987 struct fuse_writepage_args *wpa =
1988 container_of(args, typeof(*wpa), ia.ap.args);
1989 struct inode *inode = wpa->inode;
1990 struct fuse_inode *fi = get_fuse_inode(inode);
1991 struct fuse_conn *fc = get_fuse_conn(inode);
1992
1993 mapping_set_error(inode->i_mapping, error);
1994 /*
1995 * A writeback finished and this might have updated mtime/ctime on
1996 * server making local mtime/ctime stale. Hence invalidate attrs.
1997 * Do this only if writeback_cache is not enabled. If writeback_cache
1998 * is enabled, we trust local ctime/mtime.
1999 */
2000 if (!fc->writeback_cache)
2001 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY);
2002 spin_lock(&fi->lock);
2003 fi->writectr--;
2004 fuse_writepage_finish(wpa);
2005 spin_unlock(&fi->lock);
2006 fuse_writepage_free(wpa);
2007 }
2008
__fuse_write_file_get(struct fuse_inode * fi)2009 static struct fuse_file *__fuse_write_file_get(struct fuse_inode *fi)
2010 {
2011 struct fuse_file *ff;
2012
2013 spin_lock(&fi->lock);
2014 ff = list_first_entry_or_null(&fi->write_files, struct fuse_file,
2015 write_entry);
2016 if (ff)
2017 fuse_file_get(ff);
2018 spin_unlock(&fi->lock);
2019
2020 return ff;
2021 }
2022
fuse_write_file_get(struct fuse_inode * fi)2023 static struct fuse_file *fuse_write_file_get(struct fuse_inode *fi)
2024 {
2025 struct fuse_file *ff = __fuse_write_file_get(fi);
2026 WARN_ON(!ff);
2027 return ff;
2028 }
2029
fuse_write_inode(struct inode * inode,struct writeback_control * wbc)2030 int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
2031 {
2032 struct fuse_inode *fi = get_fuse_inode(inode);
2033 struct fuse_file *ff;
2034 int err;
2035
2036 ff = __fuse_write_file_get(fi);
2037 err = fuse_flush_times(inode, ff);
2038 if (ff)
2039 fuse_file_put(ff, false);
2040
2041 return err;
2042 }
2043
fuse_writepage_args_alloc(void)2044 static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
2045 {
2046 struct fuse_writepage_args *wpa;
2047 struct fuse_args_pages *ap;
2048
2049 wpa = kzalloc_obj(*wpa, GFP_NOFS);
2050 if (wpa) {
2051 ap = &wpa->ia.ap;
2052 ap->num_folios = 0;
2053 ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs);
2054 if (!ap->folios) {
2055 kfree(wpa);
2056 wpa = NULL;
2057 }
2058 }
2059 return wpa;
2060
2061 }
2062
fuse_writepage_add_to_bucket(struct fuse_conn * fc,struct fuse_writepage_args * wpa)2063 static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
2064 struct fuse_writepage_args *wpa)
2065 {
2066 if (!fc->sync_fs)
2067 return;
2068
2069 rcu_read_lock();
2070 /* Prevent resurrection of dead bucket in unlikely race with syncfs */
2071 do {
2072 wpa->bucket = rcu_dereference(fc->curr_bucket);
2073 } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
2074 rcu_read_unlock();
2075 }
2076
fuse_writepage_args_page_fill(struct fuse_writepage_args * wpa,struct folio * folio,uint32_t folio_index,loff_t offset,unsigned len)2077 static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio,
2078 uint32_t folio_index, loff_t offset, unsigned len)
2079 {
2080 struct fuse_args_pages *ap = &wpa->ia.ap;
2081
2082 ap->folios[folio_index] = folio;
2083 ap->descs[folio_index].offset = offset;
2084 ap->descs[folio_index].length = len;
2085 }
2086
fuse_writepage_args_setup(struct folio * folio,size_t offset,struct fuse_file * ff)2087 static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio,
2088 size_t offset,
2089 struct fuse_file *ff)
2090 {
2091 struct inode *inode = folio->mapping->host;
2092 struct fuse_conn *fc = get_fuse_conn(inode);
2093 struct fuse_writepage_args *wpa;
2094 struct fuse_args_pages *ap;
2095
2096 wpa = fuse_writepage_args_alloc();
2097 if (!wpa)
2098 return NULL;
2099
2100 fuse_writepage_add_to_bucket(fc, wpa);
2101 fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio) + offset, 0);
2102 wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
2103 wpa->inode = inode;
2104 wpa->ia.ff = ff;
2105
2106 ap = &wpa->ia.ap;
2107 ap->args.in_pages = true;
2108 ap->args.end = fuse_writepage_end;
2109
2110 return wpa;
2111 }
2112
2113 struct fuse_fill_wb_data {
2114 struct fuse_writepage_args *wpa;
2115 struct fuse_file *ff;
2116 unsigned int max_folios;
2117 /*
2118 * nr_bytes won't overflow since fuse_folios_need_send() caps
2119 * wb requests to never exceed fc->max_pages (which has an upper bound
2120 * of U16_MAX).
2121 */
2122 unsigned int nr_bytes;
2123 };
2124
fuse_pages_realloc(struct fuse_fill_wb_data * data,unsigned int max_pages)2125 static bool fuse_pages_realloc(struct fuse_fill_wb_data *data,
2126 unsigned int max_pages)
2127 {
2128 struct fuse_args_pages *ap = &data->wpa->ia.ap;
2129 struct folio **folios;
2130 struct fuse_folio_desc *descs;
2131 unsigned int nfolios = min_t(unsigned int,
2132 max_t(unsigned int, data->max_folios * 2,
2133 FUSE_DEFAULT_MAX_PAGES_PER_REQ),
2134 max_pages);
2135 WARN_ON(nfolios <= data->max_folios);
2136
2137 folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs);
2138 if (!folios)
2139 return false;
2140
2141 memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios);
2142 memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios);
2143 kfree(ap->folios);
2144 ap->folios = folios;
2145 ap->descs = descs;
2146 data->max_folios = nfolios;
2147
2148 return true;
2149 }
2150
fuse_writepages_send(struct inode * inode,struct fuse_fill_wb_data * data)2151 static void fuse_writepages_send(struct inode *inode,
2152 struct fuse_fill_wb_data *data)
2153 {
2154 struct fuse_writepage_args *wpa = data->wpa;
2155 struct fuse_inode *fi = get_fuse_inode(inode);
2156
2157 spin_lock(&fi->lock);
2158 list_add_tail(&wpa->queue_entry, &fi->queued_writes);
2159 fuse_flush_writepages(inode);
2160 spin_unlock(&fi->lock);
2161 }
2162
fuse_folios_need_send(struct fuse_conn * fc,loff_t pos,unsigned len,struct fuse_args_pages * ap,unsigned cur_bytes,bool write)2163 static bool fuse_folios_need_send(struct fuse_conn *fc, loff_t pos,
2164 unsigned len, struct fuse_args_pages *ap,
2165 unsigned cur_bytes, bool write)
2166 {
2167 struct folio *prev_folio;
2168 struct fuse_folio_desc prev_desc;
2169 unsigned bytes = cur_bytes + len;
2170 loff_t prev_pos;
2171 size_t max_bytes = write ? fc->max_write : fc->max_read;
2172
2173 WARN_ON(!ap->num_folios);
2174
2175 /* Reached max pages */
2176 if ((bytes + PAGE_SIZE - 1) >> PAGE_SHIFT > fc->max_pages)
2177 return true;
2178
2179 if (bytes > max_bytes)
2180 return true;
2181
2182 /* Discontinuity */
2183 prev_folio = ap->folios[ap->num_folios - 1];
2184 prev_desc = ap->descs[ap->num_folios - 1];
2185 prev_pos = folio_pos(prev_folio) + prev_desc.offset + prev_desc.length;
2186 if (prev_pos != pos)
2187 return true;
2188
2189 return false;
2190 }
2191
fuse_iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,unsigned len,u64 end_pos)2192 static ssize_t fuse_iomap_writeback_range(struct iomap_writepage_ctx *wpc,
2193 struct folio *folio, u64 pos,
2194 unsigned len, u64 end_pos)
2195 {
2196 struct fuse_fill_wb_data *data = wpc->wb_ctx;
2197 struct fuse_writepage_args *wpa = data->wpa;
2198 struct fuse_args_pages *ap = &wpa->ia.ap;
2199 struct inode *inode = wpc->inode;
2200 struct fuse_inode *fi = get_fuse_inode(inode);
2201 struct fuse_conn *fc = get_fuse_conn(inode);
2202 loff_t offset = offset_in_folio(folio, pos);
2203
2204 WARN_ON_ONCE(!data);
2205
2206 if (!data->ff) {
2207 data->ff = fuse_write_file_get(fi);
2208 if (!data->ff)
2209 return -EIO;
2210 }
2211
2212 if (wpa) {
2213 bool send = fuse_folios_need_send(fc, pos, len, ap,
2214 data->nr_bytes, true);
2215
2216 if (!send) {
2217 /*
2218 * Need to grow the pages array? If so, did the
2219 * expansion fail?
2220 */
2221 send = (ap->num_folios == data->max_folios) &&
2222 !fuse_pages_realloc(data, fc->max_pages);
2223 }
2224
2225 if (send) {
2226 fuse_writepages_send(inode, data);
2227 data->wpa = NULL;
2228 data->nr_bytes = 0;
2229 }
2230 }
2231
2232 if (data->wpa == NULL) {
2233 wpa = fuse_writepage_args_setup(folio, offset, data->ff);
2234 if (!wpa)
2235 return -ENOMEM;
2236 fuse_file_get(wpa->ia.ff);
2237 data->max_folios = 1;
2238 ap = &wpa->ia.ap;
2239 }
2240
2241 fuse_writepage_args_page_fill(wpa, folio, ap->num_folios,
2242 offset, len);
2243 data->nr_bytes += len;
2244
2245 ap->num_folios++;
2246 if (!data->wpa)
2247 data->wpa = wpa;
2248
2249 return len;
2250 }
2251
fuse_iomap_writeback_submit(struct iomap_writepage_ctx * wpc,int error)2252 static int fuse_iomap_writeback_submit(struct iomap_writepage_ctx *wpc,
2253 int error)
2254 {
2255 struct fuse_fill_wb_data *data = wpc->wb_ctx;
2256
2257 WARN_ON_ONCE(!data);
2258
2259 if (data->wpa) {
2260 WARN_ON(!data->wpa->ia.ap.num_folios);
2261 fuse_writepages_send(wpc->inode, data);
2262 }
2263
2264 if (data->ff)
2265 fuse_file_put(data->ff, false);
2266
2267 return error;
2268 }
2269
2270 static const struct iomap_writeback_ops fuse_writeback_ops = {
2271 .writeback_range = fuse_iomap_writeback_range,
2272 .writeback_submit = fuse_iomap_writeback_submit,
2273 };
2274
fuse_writepages(struct address_space * mapping,struct writeback_control * wbc)2275 static int fuse_writepages(struct address_space *mapping,
2276 struct writeback_control *wbc)
2277 {
2278 struct inode *inode = mapping->host;
2279 struct fuse_conn *fc = get_fuse_conn(inode);
2280 struct fuse_fill_wb_data data = {};
2281 struct iomap_writepage_ctx wpc = {
2282 .inode = inode,
2283 .iomap.type = IOMAP_MAPPED,
2284 .wbc = wbc,
2285 .ops = &fuse_writeback_ops,
2286 .wb_ctx = &data,
2287 };
2288
2289 if (fuse_is_bad(inode))
2290 return -EIO;
2291
2292 if (wbc->sync_mode == WB_SYNC_NONE &&
2293 fc->num_background >= fc->congestion_threshold)
2294 return 0;
2295
2296 return iomap_writepages(&wpc);
2297 }
2298
fuse_launder_folio(struct folio * folio)2299 static int fuse_launder_folio(struct folio *folio)
2300 {
2301 int err = 0;
2302 struct fuse_fill_wb_data data = {};
2303 struct iomap_writepage_ctx wpc = {
2304 .inode = folio->mapping->host,
2305 .iomap.type = IOMAP_MAPPED,
2306 .ops = &fuse_writeback_ops,
2307 .wb_ctx = &data,
2308 };
2309
2310 if (folio_clear_dirty_for_io(folio)) {
2311 err = iomap_writeback_folio(&wpc, folio);
2312 err = fuse_iomap_writeback_submit(&wpc, err);
2313 if (!err)
2314 folio_wait_writeback(folio);
2315 }
2316 return err;
2317 }
2318
2319 /*
2320 * Write back dirty data/metadata now (there may not be any suitable
2321 * open files later for data)
2322 */
fuse_vma_close(struct vm_area_struct * vma)2323 static void fuse_vma_close(struct vm_area_struct *vma)
2324 {
2325 int err;
2326
2327 err = write_inode_now(vma->vm_file->f_mapping->host, 1);
2328 mapping_set_error(vma->vm_file->f_mapping, err);
2329 }
2330
2331 /*
2332 * Wait for writeback against this page to complete before allowing it
2333 * to be marked dirty again, and hence written back again, possibly
2334 * before the previous writepage completed.
2335 *
2336 * Block here, instead of in ->writepage(), so that the userspace fs
2337 * can only block processes actually operating on the filesystem.
2338 *
2339 * Otherwise unprivileged userspace fs would be able to block
2340 * unrelated:
2341 *
2342 * - page migration
2343 * - sync(2)
2344 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
2345 */
fuse_page_mkwrite(struct vm_fault * vmf)2346 static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
2347 {
2348 struct folio *folio = page_folio(vmf->page);
2349 struct inode *inode = file_inode(vmf->vma->vm_file);
2350
2351 file_update_time(vmf->vma->vm_file);
2352 folio_lock(folio);
2353 if (folio->mapping != inode->i_mapping) {
2354 folio_unlock(folio);
2355 return VM_FAULT_NOPAGE;
2356 }
2357
2358 folio_wait_writeback(folio);
2359 return VM_FAULT_LOCKED;
2360 }
2361
2362 static const struct vm_operations_struct fuse_file_vm_ops = {
2363 .close = fuse_vma_close,
2364 .fault = filemap_fault,
2365 .map_pages = filemap_map_pages,
2366 .page_mkwrite = fuse_page_mkwrite,
2367 };
2368
fuse_file_mmap(struct file * file,struct vm_area_struct * vma)2369 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
2370 {
2371 struct fuse_file *ff = file->private_data;
2372 struct fuse_conn *fc = ff->fm->fc;
2373 struct inode *inode = file_inode(file);
2374 int rc;
2375
2376 /* DAX mmap is superior to direct_io mmap */
2377 if (FUSE_IS_DAX(inode))
2378 return fuse_dax_mmap(file, vma);
2379
2380 /*
2381 * If inode is in passthrough io mode, because it has some file open
2382 * in passthrough mode, either mmap to backing file or fail mmap,
2383 * because mixing cached mmap and passthrough io mode is not allowed.
2384 */
2385 if (fuse_file_passthrough(ff))
2386 return fuse_passthrough_mmap(file, vma);
2387 else if (fuse_inode_backing(get_fuse_inode(inode)))
2388 return -ENODEV;
2389
2390 /*
2391 * FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
2392 * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP.
2393 */
2394 if (ff->open_flags & FOPEN_DIRECT_IO) {
2395 /*
2396 * Can't provide the coherency needed for MAP_SHARED
2397 * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
2398 */
2399 if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
2400 return -ENODEV;
2401
2402 invalidate_inode_pages2(file->f_mapping);
2403
2404 if (!(vma->vm_flags & VM_MAYSHARE)) {
2405 /* MAP_PRIVATE */
2406 return generic_file_mmap(file, vma);
2407 }
2408
2409 /*
2410 * First mmap of direct_io file enters caching inode io mode.
2411 * Also waits for parallel dio writers to go into serial mode
2412 * (exclusive instead of shared lock).
2413 * After first mmap, the inode stays in caching io mode until
2414 * the direct_io file release.
2415 */
2416 rc = fuse_file_cached_io_open(inode, ff);
2417 if (rc)
2418 return rc;
2419 }
2420
2421 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2422 fuse_link_write_file(file);
2423
2424 file_accessed(file);
2425 vma->vm_ops = &fuse_file_vm_ops;
2426 return 0;
2427 }
2428
convert_fuse_file_lock(struct fuse_conn * fc,const struct fuse_file_lock * ffl,struct file_lock * fl)2429 static int convert_fuse_file_lock(struct fuse_conn *fc,
2430 const struct fuse_file_lock *ffl,
2431 struct file_lock *fl)
2432 {
2433 switch (ffl->type) {
2434 case F_UNLCK:
2435 break;
2436
2437 case F_RDLCK:
2438 case F_WRLCK:
2439 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
2440 ffl->end < ffl->start)
2441 return -EIO;
2442
2443 fl->fl_start = ffl->start;
2444 fl->fl_end = ffl->end;
2445
2446 /*
2447 * Convert pid into init's pid namespace. The locks API will
2448 * translate it into the caller's pid namespace.
2449 */
2450 rcu_read_lock();
2451 fl->c.flc_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
2452 rcu_read_unlock();
2453 break;
2454
2455 default:
2456 return -EIO;
2457 }
2458 fl->c.flc_type = ffl->type;
2459 return 0;
2460 }
2461
fuse_lk_fill(struct fuse_args * args,struct file * file,const struct file_lock * fl,int opcode,pid_t pid,int flock,struct fuse_lk_in * inarg)2462 static void fuse_lk_fill(struct fuse_args *args, struct file *file,
2463 const struct file_lock *fl, int opcode, pid_t pid,
2464 int flock, struct fuse_lk_in *inarg)
2465 {
2466 struct inode *inode = file_inode(file);
2467 struct fuse_conn *fc = get_fuse_conn(inode);
2468 struct fuse_file *ff = file->private_data;
2469
2470 memset(inarg, 0, sizeof(*inarg));
2471 inarg->fh = ff->fh;
2472 inarg->owner = fuse_lock_owner_id(fc, fl->c.flc_owner);
2473 inarg->lk.start = fl->fl_start;
2474 inarg->lk.end = fl->fl_end;
2475 inarg->lk.type = fl->c.flc_type;
2476 inarg->lk.pid = pid;
2477 if (flock)
2478 inarg->lk_flags |= FUSE_LK_FLOCK;
2479 args->opcode = opcode;
2480 args->nodeid = get_node_id(inode);
2481 args->in_numargs = 1;
2482 args->in_args[0].size = sizeof(*inarg);
2483 args->in_args[0].value = inarg;
2484 }
2485
fuse_getlk(struct file * file,struct file_lock * fl)2486 static int fuse_getlk(struct file *file, struct file_lock *fl)
2487 {
2488 struct inode *inode = file_inode(file);
2489 struct fuse_mount *fm = get_fuse_mount(inode);
2490 FUSE_ARGS(args);
2491 struct fuse_lk_in inarg;
2492 struct fuse_lk_out outarg;
2493 int err;
2494
2495 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
2496 args.out_numargs = 1;
2497 args.out_args[0].size = sizeof(outarg);
2498 args.out_args[0].value = &outarg;
2499 err = fuse_simple_request(fm, &args);
2500 if (!err)
2501 err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
2502
2503 return err;
2504 }
2505
fuse_setlk(struct file * file,struct file_lock * fl,int flock)2506 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
2507 {
2508 struct inode *inode = file_inode(file);
2509 struct fuse_mount *fm = get_fuse_mount(inode);
2510 FUSE_ARGS(args);
2511 struct fuse_lk_in inarg;
2512 int opcode = (fl->c.flc_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
2513 struct pid *pid = fl->c.flc_type != F_UNLCK ? task_tgid(current) : NULL;
2514 pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
2515 int err;
2516
2517 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
2518 /* NLM needs asynchronous locks, which we don't support yet */
2519 return -ENOLCK;
2520 }
2521
2522 fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
2523 err = fuse_simple_request(fm, &args);
2524
2525 /* locking is restartable */
2526 if (err == -EINTR)
2527 err = -ERESTARTSYS;
2528
2529 return err;
2530 }
2531
fuse_file_lock(struct file * file,int cmd,struct file_lock * fl)2532 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
2533 {
2534 struct inode *inode = file_inode(file);
2535 struct fuse_conn *fc = get_fuse_conn(inode);
2536 int err;
2537
2538 if (cmd == F_CANCELLK) {
2539 err = 0;
2540 } else if (cmd == F_GETLK) {
2541 if (fc->no_lock) {
2542 posix_test_lock(file, fl);
2543 err = 0;
2544 } else
2545 err = fuse_getlk(file, fl);
2546 } else {
2547 if (fc->no_lock)
2548 err = posix_lock_file(file, fl, NULL);
2549 else
2550 err = fuse_setlk(file, fl, 0);
2551 }
2552 return err;
2553 }
2554
fuse_file_flock(struct file * file,int cmd,struct file_lock * fl)2555 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
2556 {
2557 struct inode *inode = file_inode(file);
2558 struct fuse_conn *fc = get_fuse_conn(inode);
2559 int err;
2560
2561 if (fc->no_flock) {
2562 err = locks_lock_file_wait(file, fl);
2563 } else {
2564 struct fuse_file *ff = file->private_data;
2565
2566 /* emulate flock with POSIX locks */
2567 ff->flock = true;
2568 err = fuse_setlk(file, fl, 1);
2569 }
2570
2571 return err;
2572 }
2573
fuse_bmap(struct address_space * mapping,sector_t block)2574 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
2575 {
2576 struct inode *inode = mapping->host;
2577 struct fuse_mount *fm = get_fuse_mount(inode);
2578 FUSE_ARGS(args);
2579 struct fuse_bmap_in inarg;
2580 struct fuse_bmap_out outarg;
2581 int err;
2582
2583 if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
2584 return 0;
2585
2586 memset(&inarg, 0, sizeof(inarg));
2587 inarg.block = block;
2588 inarg.blocksize = inode->i_sb->s_blocksize;
2589 args.opcode = FUSE_BMAP;
2590 args.nodeid = get_node_id(inode);
2591 args.in_numargs = 1;
2592 args.in_args[0].size = sizeof(inarg);
2593 args.in_args[0].value = &inarg;
2594 args.out_numargs = 1;
2595 args.out_args[0].size = sizeof(outarg);
2596 args.out_args[0].value = &outarg;
2597 err = fuse_simple_request(fm, &args);
2598 if (err == -ENOSYS)
2599 fm->fc->no_bmap = 1;
2600
2601 return err ? 0 : outarg.block;
2602 }
2603
fuse_lseek(struct file * file,loff_t offset,int whence)2604 static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
2605 {
2606 struct inode *inode = file->f_mapping->host;
2607 struct fuse_mount *fm = get_fuse_mount(inode);
2608 struct fuse_file *ff = file->private_data;
2609 FUSE_ARGS(args);
2610 struct fuse_lseek_in inarg = {
2611 .fh = ff->fh,
2612 .offset = offset,
2613 .whence = whence
2614 };
2615 struct fuse_lseek_out outarg;
2616 int err;
2617
2618 if (fm->fc->no_lseek)
2619 goto fallback;
2620
2621 args.opcode = FUSE_LSEEK;
2622 args.nodeid = ff->nodeid;
2623 args.in_numargs = 1;
2624 args.in_args[0].size = sizeof(inarg);
2625 args.in_args[0].value = &inarg;
2626 args.out_numargs = 1;
2627 args.out_args[0].size = sizeof(outarg);
2628 args.out_args[0].value = &outarg;
2629 err = fuse_simple_request(fm, &args);
2630 if (err) {
2631 if (err == -ENOSYS) {
2632 fm->fc->no_lseek = 1;
2633 goto fallback;
2634 }
2635 return err;
2636 }
2637
2638 return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
2639
2640 fallback:
2641 err = fuse_update_attributes(inode, file, STATX_SIZE);
2642 if (!err)
2643 return generic_file_llseek(file, offset, whence);
2644 else
2645 return err;
2646 }
2647
fuse_file_llseek(struct file * file,loff_t offset,int whence)2648 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2649 {
2650 loff_t retval;
2651 struct inode *inode = file_inode(file);
2652
2653 switch (whence) {
2654 case SEEK_SET:
2655 case SEEK_CUR:
2656 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2657 retval = generic_file_llseek(file, offset, whence);
2658 break;
2659 case SEEK_END:
2660 inode_lock(inode);
2661 retval = fuse_update_attributes(inode, file, STATX_SIZE);
2662 if (!retval)
2663 retval = generic_file_llseek(file, offset, whence);
2664 inode_unlock(inode);
2665 break;
2666 case SEEK_HOLE:
2667 case SEEK_DATA:
2668 inode_lock(inode);
2669 retval = fuse_lseek(file, offset, whence);
2670 inode_unlock(inode);
2671 break;
2672 default:
2673 retval = -EINVAL;
2674 }
2675
2676 return retval;
2677 }
2678
2679 /*
2680 * All files which have been polled are linked to RB tree
2681 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2682 * find the matching one.
2683 */
fuse_find_polled_node(struct fuse_conn * fc,u64 kh,struct rb_node ** parent_out)2684 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2685 struct rb_node **parent_out)
2686 {
2687 struct rb_node **link = &fc->polled_files.rb_node;
2688 struct rb_node *last = NULL;
2689
2690 while (*link) {
2691 struct fuse_file *ff;
2692
2693 last = *link;
2694 ff = rb_entry(last, struct fuse_file, polled_node);
2695
2696 if (kh < ff->kh)
2697 link = &last->rb_left;
2698 else if (kh > ff->kh)
2699 link = &last->rb_right;
2700 else
2701 return link;
2702 }
2703
2704 if (parent_out)
2705 *parent_out = last;
2706 return link;
2707 }
2708
2709 /*
2710 * The file is about to be polled. Make sure it's on the polled_files
2711 * RB tree. Note that files once added to the polled_files tree are
2712 * not removed before the file is released. This is because a file
2713 * polled once is likely to be polled again.
2714 */
fuse_register_polled_file(struct fuse_conn * fc,struct fuse_file * ff)2715 static void fuse_register_polled_file(struct fuse_conn *fc,
2716 struct fuse_file *ff)
2717 {
2718 spin_lock(&fc->lock);
2719 if (RB_EMPTY_NODE(&ff->polled_node)) {
2720 struct rb_node **link, *parent;
2721
2722 link = fuse_find_polled_node(fc, ff->kh, &parent);
2723 BUG_ON(*link);
2724 rb_link_node(&ff->polled_node, parent, link);
2725 rb_insert_color(&ff->polled_node, &fc->polled_files);
2726 }
2727 spin_unlock(&fc->lock);
2728 }
2729
fuse_file_poll(struct file * file,poll_table * wait)2730 __poll_t fuse_file_poll(struct file *file, poll_table *wait)
2731 {
2732 struct fuse_file *ff = file->private_data;
2733 struct fuse_mount *fm = ff->fm;
2734 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2735 struct fuse_poll_out outarg;
2736 FUSE_ARGS(args);
2737 int err;
2738
2739 if (fm->fc->no_poll)
2740 return DEFAULT_POLLMASK;
2741
2742 poll_wait(file, &ff->poll_wait, wait);
2743 inarg.events = mangle_poll(poll_requested_events(wait));
2744
2745 /*
2746 * Ask for notification iff there's someone waiting for it.
2747 * The client may ignore the flag and always notify.
2748 */
2749 if (waitqueue_active(&ff->poll_wait)) {
2750 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2751 fuse_register_polled_file(fm->fc, ff);
2752 }
2753
2754 args.opcode = FUSE_POLL;
2755 args.nodeid = ff->nodeid;
2756 args.in_numargs = 1;
2757 args.in_args[0].size = sizeof(inarg);
2758 args.in_args[0].value = &inarg;
2759 args.out_numargs = 1;
2760 args.out_args[0].size = sizeof(outarg);
2761 args.out_args[0].value = &outarg;
2762 err = fuse_simple_request(fm, &args);
2763
2764 if (!err)
2765 return demangle_poll(outarg.revents);
2766 if (err == -ENOSYS) {
2767 fm->fc->no_poll = 1;
2768 return DEFAULT_POLLMASK;
2769 }
2770 return EPOLLERR;
2771 }
2772 EXPORT_SYMBOL_GPL(fuse_file_poll);
2773
2774 /*
2775 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2776 * wakes up the poll waiters.
2777 */
fuse_notify_poll_wakeup(struct fuse_conn * fc,struct fuse_notify_poll_wakeup_out * outarg)2778 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2779 struct fuse_notify_poll_wakeup_out *outarg)
2780 {
2781 u64 kh = outarg->kh;
2782 struct rb_node **link;
2783
2784 spin_lock(&fc->lock);
2785
2786 link = fuse_find_polled_node(fc, kh, NULL);
2787 if (*link) {
2788 struct fuse_file *ff;
2789
2790 ff = rb_entry(*link, struct fuse_file, polled_node);
2791 wake_up_interruptible_sync(&ff->poll_wait);
2792 }
2793
2794 spin_unlock(&fc->lock);
2795 return 0;
2796 }
2797
fuse_do_truncate(struct file * file)2798 static void fuse_do_truncate(struct file *file)
2799 {
2800 struct inode *inode = file->f_mapping->host;
2801 struct iattr attr;
2802
2803 attr.ia_valid = ATTR_SIZE;
2804 attr.ia_size = i_size_read(inode);
2805
2806 attr.ia_file = file;
2807 attr.ia_valid |= ATTR_FILE;
2808
2809 fuse_do_setattr(file_mnt_idmap(file), file_dentry(file), &attr, file);
2810 }
2811
fuse_round_up(struct fuse_conn * fc,loff_t off)2812 static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
2813 {
2814 return round_up(off, fc->max_pages << PAGE_SHIFT);
2815 }
2816
2817 static ssize_t
fuse_direct_IO(struct kiocb * iocb,struct iov_iter * iter)2818 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
2819 {
2820 DECLARE_COMPLETION_ONSTACK(wait);
2821 ssize_t ret = 0;
2822 struct file *file = iocb->ki_filp;
2823 struct fuse_file *ff = file->private_data;
2824 loff_t pos = 0;
2825 struct inode *inode;
2826 loff_t i_size;
2827 size_t count = iov_iter_count(iter), shortened = 0;
2828 loff_t offset = iocb->ki_pos;
2829 struct fuse_io_priv *io;
2830
2831 pos = offset;
2832 inode = file->f_mapping->host;
2833 i_size = i_size_read(inode);
2834
2835 if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
2836 return 0;
2837
2838 io = kmalloc_obj(struct fuse_io_priv);
2839 if (!io)
2840 return -ENOMEM;
2841 spin_lock_init(&io->lock);
2842 kref_init(&io->refcnt);
2843 io->reqs = 1;
2844 io->bytes = -1;
2845 io->size = 0;
2846 io->offset = offset;
2847 io->write = (iov_iter_rw(iter) == WRITE);
2848 io->err = 0;
2849 /*
2850 * By default, we want to optimize all I/Os with async request
2851 * submission to the client filesystem if supported.
2852 */
2853 io->async = ff->fm->fc->async_dio;
2854 io->iocb = iocb;
2855 io->blocking = is_sync_kiocb(iocb);
2856
2857 /* optimization for short read */
2858 if (io->async && !io->write && offset + count > i_size) {
2859 iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
2860 shortened = count - iov_iter_count(iter);
2861 count -= shortened;
2862 }
2863
2864 /*
2865 * We cannot asynchronously extend the size of a file.
2866 * In such case the aio will behave exactly like sync io.
2867 */
2868 if ((offset + count > i_size) && io->write)
2869 io->blocking = true;
2870
2871 if (io->async && io->blocking) {
2872 /*
2873 * Additional reference to keep io around after
2874 * calling fuse_aio_complete()
2875 */
2876 kref_get(&io->refcnt);
2877 io->done = &wait;
2878 }
2879
2880 if (iov_iter_rw(iter) == WRITE) {
2881 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
2882 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
2883 } else {
2884 ret = __fuse_direct_read(io, iter, &pos);
2885 }
2886 iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
2887
2888 if (io->async) {
2889 bool blocking = io->blocking;
2890
2891 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2892
2893 /* we have a non-extending, async request, so return */
2894 if (!blocking)
2895 return -EIOCBQUEUED;
2896
2897 wait_for_completion(&wait);
2898 ret = fuse_get_res_by_io(io);
2899 }
2900
2901 kref_put(&io->refcnt, fuse_io_release);
2902
2903 if (iov_iter_rw(iter) == WRITE) {
2904 fuse_write_update_attr(inode, pos, ret);
2905 /* For extending writes we already hold exclusive lock */
2906 if (ret < 0 && offset + count > i_size)
2907 fuse_do_truncate(file);
2908 }
2909
2910 return ret;
2911 }
2912
fuse_writeback_range(struct inode * inode,loff_t start,loff_t end)2913 static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
2914 {
2915 int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
2916
2917 if (!err)
2918 fuse_sync_writes(inode);
2919
2920 return err;
2921 }
2922
fuse_file_fallocate(struct file * file,int mode,loff_t offset,loff_t length)2923 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2924 loff_t length)
2925 {
2926 struct fuse_file *ff = file->private_data;
2927 struct inode *inode = file_inode(file);
2928 struct fuse_inode *fi = get_fuse_inode(inode);
2929 struct fuse_mount *fm = ff->fm;
2930 FUSE_ARGS(args);
2931 struct fuse_fallocate_in inarg = {
2932 .fh = ff->fh,
2933 .offset = offset,
2934 .length = length,
2935 .mode = mode
2936 };
2937 int err;
2938 bool block_faults = FUSE_IS_DAX(inode) &&
2939 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2940 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)));
2941
2942 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
2943 FALLOC_FL_ZERO_RANGE))
2944 return -EOPNOTSUPP;
2945
2946 if (fm->fc->no_fallocate)
2947 return -EOPNOTSUPP;
2948
2949 inode_lock(inode);
2950 if (block_faults) {
2951 filemap_invalidate_lock(inode->i_mapping);
2952 err = fuse_dax_break_layouts(inode, 0, -1);
2953 if (err)
2954 goto out;
2955 }
2956
2957 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) {
2958 loff_t endbyte = offset + length - 1;
2959
2960 err = fuse_writeback_range(inode, offset, endbyte);
2961 if (err)
2962 goto out;
2963 }
2964
2965 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2966 offset + length > i_size_read(inode)) {
2967 err = inode_newsize_ok(inode, offset + length);
2968 if (err)
2969 goto out;
2970 }
2971
2972 err = file_modified(file);
2973 if (err)
2974 goto out;
2975
2976 if (!(mode & FALLOC_FL_KEEP_SIZE))
2977 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2978
2979 args.opcode = FUSE_FALLOCATE;
2980 args.nodeid = ff->nodeid;
2981 args.in_numargs = 1;
2982 args.in_args[0].size = sizeof(inarg);
2983 args.in_args[0].value = &inarg;
2984 err = fuse_simple_request(fm, &args);
2985 if (err == -ENOSYS) {
2986 fm->fc->no_fallocate = 1;
2987 err = -EOPNOTSUPP;
2988 }
2989 if (err)
2990 goto out;
2991
2992 /* we could have extended the file */
2993 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
2994 if (fuse_write_update_attr(inode, offset + length, length))
2995 file_update_time(file);
2996 }
2997
2998 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE))
2999 truncate_pagecache_range(inode, offset, offset + length - 1);
3000
3001 fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
3002
3003 out:
3004 if (!(mode & FALLOC_FL_KEEP_SIZE))
3005 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3006
3007 if (block_faults)
3008 filemap_invalidate_unlock(inode->i_mapping);
3009
3010 inode_unlock(inode);
3011
3012 fuse_flush_time_update(inode);
3013
3014 return err;
3015 }
3016
__fuse_copy_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len,unsigned int flags)3017 static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
3018 struct file *file_out, loff_t pos_out,
3019 size_t len, unsigned int flags)
3020 {
3021 struct fuse_file *ff_in = file_in->private_data;
3022 struct fuse_file *ff_out = file_out->private_data;
3023 struct inode *inode_in = file_inode(file_in);
3024 struct inode *inode_out = file_inode(file_out);
3025 struct fuse_inode *fi_out = get_fuse_inode(inode_out);
3026 struct fuse_mount *fm = ff_in->fm;
3027 struct fuse_conn *fc = fm->fc;
3028 FUSE_ARGS(args);
3029 struct fuse_copy_file_range_in inarg = {
3030 .fh_in = ff_in->fh,
3031 .off_in = pos_in,
3032 .nodeid_out = ff_out->nodeid,
3033 .fh_out = ff_out->fh,
3034 .off_out = pos_out,
3035 .len = len,
3036 .flags = flags
3037 };
3038 struct fuse_write_out outarg;
3039 struct fuse_copy_file_range_out outarg_64;
3040 u64 bytes_copied;
3041 ssize_t err;
3042 /* mark unstable when write-back is not used, and file_out gets
3043 * extended */
3044 bool is_unstable = (!fc->writeback_cache) &&
3045 ((pos_out + len) > inode_out->i_size);
3046
3047 if (fc->no_copy_file_range)
3048 return -EOPNOTSUPP;
3049
3050 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
3051 return -EXDEV;
3052
3053 inode_lock(inode_in);
3054 err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
3055 inode_unlock(inode_in);
3056 if (err)
3057 return err;
3058
3059 inode_lock(inode_out);
3060
3061 err = file_modified(file_out);
3062 if (err)
3063 goto out;
3064
3065 /*
3066 * Write out dirty pages in the destination file before sending the COPY
3067 * request to userspace. After the request is completed, truncate off
3068 * pages (including partial ones) from the cache that have been copied,
3069 * since these contain stale data at that point.
3070 *
3071 * This should be mostly correct, but if the COPY writes to partial
3072 * pages (at the start or end) and the parts not covered by the COPY are
3073 * written through a memory map after calling fuse_writeback_range(),
3074 * then these partial page modifications will be lost on truncation.
3075 *
3076 * It is unlikely that someone would rely on such mixed style
3077 * modifications. Yet this does give less guarantees than if the
3078 * copying was performed with write(2).
3079 *
3080 * To fix this a mapping->invalidate_lock could be used to prevent new
3081 * faults while the copy is ongoing.
3082 */
3083 err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
3084 if (err)
3085 goto out;
3086
3087 if (is_unstable)
3088 set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3089
3090 args.opcode = FUSE_COPY_FILE_RANGE_64;
3091 args.nodeid = ff_in->nodeid;
3092 args.in_numargs = 1;
3093 args.in_args[0].size = sizeof(inarg);
3094 args.in_args[0].value = &inarg;
3095 args.out_numargs = 1;
3096 args.out_args[0].size = sizeof(outarg_64);
3097 args.out_args[0].value = &outarg_64;
3098 if (fc->no_copy_file_range_64) {
3099 fallback:
3100 /* Fall back to old op that can't handle large copy length */
3101 args.opcode = FUSE_COPY_FILE_RANGE;
3102 args.out_args[0].size = sizeof(outarg);
3103 args.out_args[0].value = &outarg;
3104 inarg.len = len = min_t(size_t, len, UINT_MAX & PAGE_MASK);
3105 }
3106 err = fuse_simple_request(fm, &args);
3107 if (err == -ENOSYS) {
3108 if (fc->no_copy_file_range_64) {
3109 fc->no_copy_file_range = 1;
3110 err = -EOPNOTSUPP;
3111 } else {
3112 fc->no_copy_file_range_64 = 1;
3113 goto fallback;
3114 }
3115 }
3116 if (err)
3117 goto out;
3118
3119 bytes_copied = fc->no_copy_file_range_64 ?
3120 outarg.size : outarg_64.bytes_copied;
3121
3122 if (bytes_copied > len) {
3123 err = -EIO;
3124 goto out;
3125 }
3126
3127 truncate_inode_pages_range(inode_out->i_mapping,
3128 ALIGN_DOWN(pos_out, PAGE_SIZE),
3129 ALIGN(pos_out + bytes_copied, PAGE_SIZE) - 1);
3130
3131 file_update_time(file_out);
3132 fuse_write_update_attr(inode_out, pos_out + bytes_copied, bytes_copied);
3133
3134 err = bytes_copied;
3135 out:
3136 if (is_unstable)
3137 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
3138
3139 inode_unlock(inode_out);
3140 file_accessed(file_in);
3141
3142 fuse_flush_time_update(inode_out);
3143
3144 return err;
3145 }
3146
fuse_copy_file_range(struct file * src_file,loff_t src_off,struct file * dst_file,loff_t dst_off,size_t len,unsigned int flags)3147 static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
3148 struct file *dst_file, loff_t dst_off,
3149 size_t len, unsigned int flags)
3150 {
3151 ssize_t ret;
3152
3153 ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
3154 len, flags);
3155
3156 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3157 ret = splice_copy_file_range(src_file, src_off, dst_file,
3158 dst_off, len);
3159 return ret;
3160 }
3161
3162 static const struct file_operations fuse_file_operations = {
3163 .llseek = fuse_file_llseek,
3164 .read_iter = fuse_file_read_iter,
3165 .write_iter = fuse_file_write_iter,
3166 .mmap = fuse_file_mmap,
3167 .open = fuse_open,
3168 .flush = fuse_flush,
3169 .release = fuse_release,
3170 .fsync = fuse_fsync,
3171 .lock = fuse_file_lock,
3172 .get_unmapped_area = thp_get_unmapped_area,
3173 .flock = fuse_file_flock,
3174 .splice_read = fuse_splice_read,
3175 .splice_write = fuse_splice_write,
3176 .unlocked_ioctl = fuse_file_ioctl,
3177 .compat_ioctl = fuse_file_compat_ioctl,
3178 .poll = fuse_file_poll,
3179 .fallocate = fuse_file_fallocate,
3180 .copy_file_range = fuse_copy_file_range,
3181 .setlease = generic_setlease,
3182 };
3183
3184 static const struct address_space_operations fuse_file_aops = {
3185 .read_folio = fuse_read_folio,
3186 .readahead = fuse_readahead,
3187 .writepages = fuse_writepages,
3188 .launder_folio = fuse_launder_folio,
3189 .dirty_folio = iomap_dirty_folio,
3190 .release_folio = iomap_release_folio,
3191 .invalidate_folio = iomap_invalidate_folio,
3192 .is_partially_uptodate = iomap_is_partially_uptodate,
3193 .migrate_folio = filemap_migrate_folio,
3194 .bmap = fuse_bmap,
3195 .direct_IO = fuse_direct_IO,
3196 };
3197
fuse_init_file_inode(struct inode * inode,unsigned int flags)3198 void fuse_init_file_inode(struct inode *inode, unsigned int flags)
3199 {
3200 struct fuse_inode *fi = get_fuse_inode(inode);
3201 struct fuse_conn *fc = get_fuse_conn(inode);
3202
3203 inode->i_fop = &fuse_file_operations;
3204 inode->i_data.a_ops = &fuse_file_aops;
3205 if (fc->writeback_cache)
3206 mapping_set_writeback_may_deadlock_on_reclaim(&inode->i_data);
3207
3208 INIT_LIST_HEAD(&fi->write_files);
3209 INIT_LIST_HEAD(&fi->queued_writes);
3210 fi->writectr = 0;
3211 fi->iocachectr = 0;
3212 init_waitqueue_head(&fi->page_waitq);
3213 init_waitqueue_head(&fi->direct_io_waitq);
3214
3215 if (IS_ENABLED(CONFIG_FUSE_DAX))
3216 fuse_dax_inode_init(inode, flags);
3217 }
3218