1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/file.h>
13 #include <linux/fs_context.h>
14 #include <linux/moduleparam.h>
15 #include <linux/sched.h>
16 #include <linux/namei.h>
17 #include <linux/slab.h>
18 #include <linux/xattr.h>
19 #include <linux/iversion.h>
20 #include <linux/posix_acl.h>
21 #include <linux/security.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24
25 static bool __read_mostly allow_sys_admin_access;
26 module_param(allow_sys_admin_access, bool, 0644);
27 MODULE_PARM_DESC(allow_sys_admin_access,
28 "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check");
29
30 struct dentry_bucket {
31 struct rb_root tree;
32 spinlock_t lock;
33 };
34
35 #define FUSE_HASH_BITS 5
36 #define FUSE_HASH_SIZE (1 << FUSE_HASH_BITS)
37 static struct dentry_bucket dentry_hash[FUSE_HASH_SIZE];
38 struct delayed_work dentry_tree_work;
39
40 /* Minimum invalidation work queue frequency */
41 #define FUSE_DENTRY_INVAL_FREQ_MIN 5
42
43 unsigned __read_mostly inval_wq;
inval_wq_set(const char * val,const struct kernel_param * kp)44 static int inval_wq_set(const char *val, const struct kernel_param *kp)
45 {
46 unsigned int num;
47 unsigned int old = inval_wq;
48 int ret;
49
50 if (!val)
51 return -EINVAL;
52
53 ret = kstrtouint(val, 0, &num);
54 if (ret)
55 return ret;
56
57 if ((num < FUSE_DENTRY_INVAL_FREQ_MIN) && (num != 0))
58 return -EINVAL;
59
60 /* This should prevent overflow in secs_to_jiffies() */
61 if (num > USHRT_MAX)
62 return -EINVAL;
63
64 *((unsigned int *)kp->arg) = num;
65
66 if (num && !old)
67 schedule_delayed_work(&dentry_tree_work,
68 secs_to_jiffies(num));
69 else if (!num && old)
70 cancel_delayed_work_sync(&dentry_tree_work);
71
72 return 0;
73 }
74 static const struct kernel_param_ops inval_wq_ops = {
75 .set = inval_wq_set,
76 .get = param_get_uint,
77 };
78 module_param_cb(inval_wq, &inval_wq_ops, &inval_wq, 0644);
79 __MODULE_PARM_TYPE(inval_wq, "uint");
80 MODULE_PARM_DESC(inval_wq,
81 "Dentries invalidation work queue period in secs (>= "
82 __stringify(FUSE_DENTRY_INVAL_FREQ_MIN) ").");
83
get_dentry_bucket(struct dentry * dentry)84 static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry)
85 {
86 int i = hash_ptr(dentry, FUSE_HASH_BITS);
87
88 return &dentry_hash[i];
89 }
90
fuse_advise_use_readdirplus(struct inode * dir)91 static void fuse_advise_use_readdirplus(struct inode *dir)
92 {
93 struct fuse_inode *fi = get_fuse_inode(dir);
94
95 set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state);
96 }
97
98 struct fuse_dentry {
99 u64 time;
100 union {
101 struct rcu_head rcu;
102 struct rb_node node;
103 };
104 struct dentry *dentry;
105 };
106
__fuse_dentry_tree_del_node(struct fuse_dentry * fd,struct dentry_bucket * bucket)107 static void __fuse_dentry_tree_del_node(struct fuse_dentry *fd,
108 struct dentry_bucket *bucket)
109 {
110 if (!RB_EMPTY_NODE(&fd->node)) {
111 rb_erase(&fd->node, &bucket->tree);
112 RB_CLEAR_NODE(&fd->node);
113 }
114 }
115
fuse_dentry_tree_del_node(struct dentry * dentry)116 static void fuse_dentry_tree_del_node(struct dentry *dentry)
117 {
118 struct fuse_dentry *fd = dentry->d_fsdata;
119 struct dentry_bucket *bucket = get_dentry_bucket(dentry);
120
121 spin_lock(&bucket->lock);
122 __fuse_dentry_tree_del_node(fd, bucket);
123 spin_unlock(&bucket->lock);
124 }
125
fuse_dentry_tree_add_node(struct dentry * dentry)126 static void fuse_dentry_tree_add_node(struct dentry *dentry)
127 {
128 struct fuse_dentry *fd = dentry->d_fsdata;
129 struct dentry_bucket *bucket;
130 struct fuse_dentry *cur;
131 struct rb_node **p, *parent = NULL;
132
133 if (!inval_wq)
134 return;
135
136 bucket = get_dentry_bucket(dentry);
137
138 spin_lock(&bucket->lock);
139
140 __fuse_dentry_tree_del_node(fd, bucket);
141
142 p = &bucket->tree.rb_node;
143 while (*p) {
144 parent = *p;
145 cur = rb_entry(*p, struct fuse_dentry, node);
146 if (fd->time < cur->time)
147 p = &(*p)->rb_left;
148 else
149 p = &(*p)->rb_right;
150 }
151 rb_link_node(&fd->node, parent, p);
152 rb_insert_color(&fd->node, &bucket->tree);
153 spin_unlock(&bucket->lock);
154 }
155
156 /*
157 * work queue which, when enabled, will periodically check for expired dentries
158 * in the dentries tree.
159 */
fuse_dentry_tree_work(struct work_struct * work)160 static void fuse_dentry_tree_work(struct work_struct *work)
161 {
162 LIST_HEAD(dispose);
163 struct fuse_dentry *fd;
164 struct rb_node *node;
165 int i;
166
167 for (i = 0; i < FUSE_HASH_SIZE; i++) {
168 spin_lock(&dentry_hash[i].lock);
169 node = rb_first(&dentry_hash[i].tree);
170 while (node) {
171 fd = rb_entry(node, struct fuse_dentry, node);
172 if (!time_before64(fd->time, get_jiffies_64()))
173 break;
174
175 rb_erase(&fd->node, &dentry_hash[i].tree);
176 RB_CLEAR_NODE(&fd->node);
177 spin_lock(&fd->dentry->d_lock);
178 /* If dentry is still referenced, let next dput release it */
179 fd->dentry->d_flags |= DCACHE_OP_DELETE;
180 spin_unlock(&fd->dentry->d_lock);
181 d_dispose_if_unused(fd->dentry, &dispose);
182 if (need_resched()) {
183 spin_unlock(&dentry_hash[i].lock);
184 cond_resched();
185 spin_lock(&dentry_hash[i].lock);
186 }
187 node = rb_first(&dentry_hash[i].tree);
188 }
189 spin_unlock(&dentry_hash[i].lock);
190 }
191 shrink_dentry_list(&dispose);
192
193 if (inval_wq)
194 schedule_delayed_work(&dentry_tree_work,
195 secs_to_jiffies(inval_wq));
196 }
197
fuse_epoch_work(struct work_struct * work)198 void fuse_epoch_work(struct work_struct *work)
199 {
200 struct fuse_conn *fc = container_of(work, struct fuse_conn,
201 epoch_work);
202 struct fuse_mount *fm;
203 struct inode *inode;
204
205 down_read(&fc->killsb);
206
207 inode = fuse_ilookup(fc, FUSE_ROOT_ID, &fm);
208 if (inode) {
209 iput(inode);
210 /* Remove all possible active references to cached inodes */
211 shrink_dcache_sb(fm->sb);
212 } else
213 pr_warn("Failed to get root inode");
214
215 up_read(&fc->killsb);
216 }
217
fuse_dentry_tree_init(void)218 void fuse_dentry_tree_init(void)
219 {
220 int i;
221
222 for (i = 0; i < FUSE_HASH_SIZE; i++) {
223 spin_lock_init(&dentry_hash[i].lock);
224 dentry_hash[i].tree = RB_ROOT;
225 }
226 INIT_DELAYED_WORK(&dentry_tree_work, fuse_dentry_tree_work);
227 }
228
fuse_dentry_tree_cleanup(void)229 void fuse_dentry_tree_cleanup(void)
230 {
231 int i;
232
233 inval_wq = 0;
234 cancel_delayed_work_sync(&dentry_tree_work);
235
236 for (i = 0; i < FUSE_HASH_SIZE; i++)
237 WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree));
238 }
239
__fuse_dentry_settime(struct dentry * dentry,u64 time)240 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time)
241 {
242 ((struct fuse_dentry *) dentry->d_fsdata)->time = time;
243 }
244
fuse_dentry_time(const struct dentry * entry)245 static inline u64 fuse_dentry_time(const struct dentry *entry)
246 {
247 return ((struct fuse_dentry *) entry->d_fsdata)->time;
248 }
249
fuse_dentry_settime(struct dentry * dentry,u64 time)250 static void fuse_dentry_settime(struct dentry *dentry, u64 time)
251 {
252 struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
253 bool delete = !time && fc->delete_stale;
254 /*
255 * Mess with DCACHE_OP_DELETE because dput() will be faster without it.
256 * Don't care about races, either way it's just an optimization
257 */
258 if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) ||
259 (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) {
260 spin_lock(&dentry->d_lock);
261 if (!delete)
262 dentry->d_flags &= ~DCACHE_OP_DELETE;
263 else
264 dentry->d_flags |= DCACHE_OP_DELETE;
265 spin_unlock(&dentry->d_lock);
266 }
267
268 __fuse_dentry_settime(dentry, time);
269 fuse_dentry_tree_add_node(dentry);
270 }
271
272 /*
273 * FUSE caches dentries and attributes with separate timeout. The
274 * time in jiffies until the dentry/attributes are valid is stored in
275 * dentry->d_fsdata and fuse_inode->i_time respectively.
276 */
277
278 /*
279 * Calculate the time in jiffies until a dentry/attributes are valid
280 */
fuse_time_to_jiffies(u64 sec,u32 nsec)281 u64 fuse_time_to_jiffies(u64 sec, u32 nsec)
282 {
283 if (sec || nsec) {
284 struct timespec64 ts = {
285 sec,
286 min_t(u32, nsec, NSEC_PER_SEC - 1)
287 };
288
289 return get_jiffies_64() + timespec64_to_jiffies(&ts);
290 } else
291 return 0;
292 }
293
294 /*
295 * Set dentry and possibly attribute timeouts from the lookup/mk*
296 * replies
297 */
fuse_change_entry_timeout(struct dentry * entry,struct fuse_entry_out * o)298 void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
299 {
300 fuse_dentry_settime(entry,
301 fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
302 }
303
fuse_invalidate_attr_mask(struct inode * inode,u32 mask)304 void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
305 {
306 set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask);
307 }
308
309 /*
310 * Mark the attributes as stale, so that at the next call to
311 * ->getattr() they will be fetched from userspace
312 */
fuse_invalidate_attr(struct inode * inode)313 void fuse_invalidate_attr(struct inode *inode)
314 {
315 fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS);
316 }
317
fuse_dir_changed(struct inode * dir)318 static void fuse_dir_changed(struct inode *dir)
319 {
320 fuse_invalidate_attr(dir);
321 inode_maybe_inc_iversion(dir, false);
322 }
323
324 /*
325 * Mark the attributes as stale due to an atime change. Avoid the invalidate if
326 * atime is not used.
327 */
fuse_invalidate_atime(struct inode * inode)328 void fuse_invalidate_atime(struct inode *inode)
329 {
330 if (!IS_RDONLY(inode))
331 fuse_invalidate_attr_mask(inode, STATX_ATIME);
332 }
333
334 /*
335 * Just mark the entry as stale, so that a next attempt to look it up
336 * will result in a new lookup call to userspace
337 *
338 * This is called when a dentry is about to become negative and the
339 * timeout is unknown (unlink, rmdir, rename and in some cases
340 * lookup)
341 */
fuse_invalidate_entry_cache(struct dentry * entry)342 void fuse_invalidate_entry_cache(struct dentry *entry)
343 {
344 fuse_dentry_settime(entry, 0);
345 }
346
347 /*
348 * Same as fuse_invalidate_entry_cache(), but also try to remove the
349 * dentry from the hash
350 */
fuse_invalidate_entry(struct dentry * entry)351 static void fuse_invalidate_entry(struct dentry *entry)
352 {
353 d_invalidate(entry);
354 fuse_invalidate_entry_cache(entry);
355 }
356
fuse_lookup_init(struct fuse_args * args,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg)357 static void fuse_lookup_init(struct fuse_args *args, u64 nodeid,
358 const struct qstr *name,
359 struct fuse_entry_out *outarg)
360 {
361 memset(outarg, 0, sizeof(struct fuse_entry_out));
362 args->opcode = FUSE_LOOKUP;
363 args->nodeid = nodeid;
364 args->in_numargs = 3;
365 fuse_set_zero_arg0(args);
366 args->in_args[1].size = name->len;
367 args->in_args[1].value = name->name;
368 args->in_args[2].size = 1;
369 args->in_args[2].value = "";
370 args->out_numargs = 1;
371 args->out_args[0].size = sizeof(struct fuse_entry_out);
372 args->out_args[0].value = outarg;
373 }
374
375 /*
376 * Check whether the dentry is still valid
377 *
378 * If the entry validity timeout has expired and the dentry is
379 * positive, try to redo the lookup. If the lookup results in a
380 * different inode, then let the VFS invalidate the dentry and redo
381 * the lookup once more. If the lookup results in the same inode,
382 * then refresh the attributes, timeouts and mark the dentry valid.
383 */
fuse_dentry_revalidate(struct inode * dir,const struct qstr * name,struct dentry * entry,unsigned int flags)384 static int fuse_dentry_revalidate(struct inode *dir, const struct qstr *name,
385 struct dentry *entry, unsigned int flags)
386 {
387 struct inode *inode;
388 struct fuse_mount *fm;
389 struct fuse_conn *fc;
390 struct fuse_inode *fi;
391 int ret;
392
393 fc = get_fuse_conn_super(dir->i_sb);
394 if (entry->d_time < atomic_read(&fc->epoch))
395 goto invalid;
396
397 inode = d_inode_rcu(entry);
398 if (inode && fuse_is_bad(inode))
399 goto invalid;
400 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
401 (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) {
402 struct fuse_entry_out outarg;
403 FUSE_ARGS(args);
404 struct fuse_forget_link *forget;
405 u64 attr_version;
406
407 /* For negative dentries, always do a fresh lookup */
408 if (!inode)
409 goto invalid;
410
411 ret = -ECHILD;
412 if (flags & LOOKUP_RCU)
413 goto out;
414
415 fm = get_fuse_mount(inode);
416
417 forget = fuse_alloc_forget();
418 ret = -ENOMEM;
419 if (!forget)
420 goto out;
421
422 attr_version = fuse_get_attr_version(fm->fc);
423
424 fuse_lookup_init(&args, get_node_id(dir), name, &outarg);
425 ret = fuse_simple_request(fm, &args);
426 /* Zero nodeid is same as -ENOENT */
427 if (!ret && !outarg.nodeid)
428 ret = -ENOENT;
429 if (!ret) {
430 fi = get_fuse_inode(inode);
431 if (outarg.nodeid != get_node_id(inode) ||
432 (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) {
433 fuse_queue_forget(fm->fc, forget,
434 outarg.nodeid, 1);
435 goto invalid;
436 }
437 spin_lock(&fi->lock);
438 fi->nlookup++;
439 spin_unlock(&fi->lock);
440 }
441 kfree(forget);
442 if (ret == -ENOMEM || ret == -EINTR)
443 goto out;
444 if (ret || fuse_invalid_attr(&outarg.attr) ||
445 fuse_stale_inode(inode, outarg.generation, &outarg.attr))
446 goto invalid;
447
448 forget_all_cached_acls(inode);
449 fuse_change_attributes(inode, &outarg.attr, NULL,
450 ATTR_TIMEOUT(&outarg),
451 attr_version);
452 fuse_change_entry_timeout(entry, &outarg);
453 } else if (inode) {
454 fi = get_fuse_inode(inode);
455 if (flags & LOOKUP_RCU) {
456 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
457 return -ECHILD;
458 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
459 fuse_advise_use_readdirplus(dir);
460 }
461 }
462 ret = 1;
463 out:
464 return ret;
465
466 invalid:
467 ret = 0;
468 goto out;
469 }
470
fuse_dentry_init(struct dentry * dentry)471 static int fuse_dentry_init(struct dentry *dentry)
472 {
473 struct fuse_dentry *fd;
474
475 fd = kzalloc_obj(struct fuse_dentry,
476 GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE);
477 if (!fd)
478 return -ENOMEM;
479
480 fd->dentry = dentry;
481 RB_CLEAR_NODE(&fd->node);
482 dentry->d_fsdata = fd;
483 /*
484 * Initialising d_time (epoch) to '0' ensures the dentry is invalid
485 * if compared to fc->epoch, which is initialized to '1'.
486 */
487 dentry->d_time = 0;
488
489 return 0;
490 }
491
fuse_dentry_release(struct dentry * dentry)492 static void fuse_dentry_release(struct dentry *dentry)
493 {
494 struct fuse_dentry *fd = dentry->d_fsdata;
495
496 if (!RB_EMPTY_NODE(&fd->node))
497 fuse_dentry_tree_del_node(dentry);
498 kfree_rcu(fd, rcu);
499 }
500
fuse_dentry_delete(const struct dentry * dentry)501 static int fuse_dentry_delete(const struct dentry *dentry)
502 {
503 return time_before64(fuse_dentry_time(dentry), get_jiffies_64());
504 }
505
506 /*
507 * Create a fuse_mount object with a new superblock (with path->dentry
508 * as the root), and return that mount so it can be auto-mounted on
509 * @path.
510 */
fuse_dentry_automount(struct path * path)511 static struct vfsmount *fuse_dentry_automount(struct path *path)
512 {
513 struct fs_context *fsc;
514 struct vfsmount *mnt;
515 struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry));
516
517 fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
518 if (IS_ERR(fsc))
519 return ERR_CAST(fsc);
520
521 /* Pass the FUSE inode of the mount for fuse_get_tree_submount() */
522 fsc->fs_private = mp_fi;
523
524 /* Create the submount */
525 mnt = fc_mount(fsc);
526 put_fs_context(fsc);
527 return mnt;
528 }
529
530 const struct dentry_operations fuse_dentry_operations = {
531 .d_revalidate = fuse_dentry_revalidate,
532 .d_delete = fuse_dentry_delete,
533 .d_init = fuse_dentry_init,
534 .d_release = fuse_dentry_release,
535 .d_automount = fuse_dentry_automount,
536 };
537
fuse_valid_type(int m)538 int fuse_valid_type(int m)
539 {
540 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
541 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
542 }
543
fuse_valid_size(u64 size)544 static bool fuse_valid_size(u64 size)
545 {
546 return size <= LLONG_MAX;
547 }
548
fuse_invalid_attr(struct fuse_attr * attr)549 bool fuse_invalid_attr(struct fuse_attr *attr)
550 {
551 return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
552 }
553
fuse_lookup_name(struct super_block * sb,u64 nodeid,const struct qstr * name,struct fuse_entry_out * outarg,struct inode ** inode)554 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
555 struct fuse_entry_out *outarg, struct inode **inode)
556 {
557 struct fuse_mount *fm = get_fuse_mount_super(sb);
558 FUSE_ARGS(args);
559 struct fuse_forget_link *forget;
560 u64 attr_version, evict_ctr;
561 int err;
562
563 *inode = NULL;
564 err = -ENAMETOOLONG;
565 if (name->len > fm->fc->name_max)
566 goto out;
567
568
569 forget = fuse_alloc_forget();
570 err = -ENOMEM;
571 if (!forget)
572 goto out;
573
574 attr_version = fuse_get_attr_version(fm->fc);
575 evict_ctr = fuse_get_evict_ctr(fm->fc);
576
577 fuse_lookup_init(&args, nodeid, name, outarg);
578 err = fuse_simple_request(fm, &args);
579 /* Zero nodeid is same as -ENOENT, but with valid timeout */
580 if (err || !outarg->nodeid)
581 goto out_put_forget;
582
583 err = -EIO;
584 if (fuse_invalid_attr(&outarg->attr))
585 goto out_put_forget;
586 if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
587 pr_warn_once("root generation should be zero\n");
588 outarg->generation = 0;
589 }
590
591 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
592 &outarg->attr, ATTR_TIMEOUT(outarg),
593 attr_version, evict_ctr);
594 err = -ENOMEM;
595 if (!*inode) {
596 fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
597 goto out;
598 }
599 err = 0;
600
601 out_put_forget:
602 kfree(forget);
603 out:
604 return err;
605 }
606
fuse_lookup(struct inode * dir,struct dentry * entry,unsigned int flags)607 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
608 unsigned int flags)
609 {
610 struct fuse_entry_out outarg;
611 struct fuse_conn *fc;
612 struct inode *inode;
613 struct dentry *newent;
614 int err, epoch;
615 bool outarg_valid = true;
616 bool locked;
617
618 if (fuse_is_bad(dir))
619 return ERR_PTR(-EIO);
620
621 fc = get_fuse_conn_super(dir->i_sb);
622 epoch = atomic_read(&fc->epoch);
623
624 locked = fuse_lock_inode(dir);
625 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
626 &outarg, &inode);
627 fuse_unlock_inode(dir, locked);
628 if (err == -ENOENT) {
629 outarg_valid = false;
630 err = 0;
631 }
632 if (err)
633 goto out_err;
634
635 err = -EIO;
636 if (inode && get_node_id(inode) == FUSE_ROOT_ID)
637 goto out_iput;
638
639 newent = d_splice_alias(inode, entry);
640 err = PTR_ERR(newent);
641 if (IS_ERR(newent))
642 goto out_err;
643
644 entry = newent ? newent : entry;
645 entry->d_time = epoch;
646 if (outarg_valid)
647 fuse_change_entry_timeout(entry, &outarg);
648 else
649 fuse_invalidate_entry_cache(entry);
650
651 if (inode)
652 fuse_advise_use_readdirplus(dir);
653 return newent;
654
655 out_iput:
656 iput(inode);
657 out_err:
658 return ERR_PTR(err);
659 }
660
get_security_context(struct dentry * entry,umode_t mode,struct fuse_in_arg * ext)661 static int get_security_context(struct dentry *entry, umode_t mode,
662 struct fuse_in_arg *ext)
663 {
664 struct fuse_secctx *fctx;
665 struct fuse_secctx_header *header;
666 struct lsm_context lsmctx = { };
667 void *ptr;
668 u32 total_len = sizeof(*header);
669 int err, nr_ctx = 0;
670 const char *name = NULL;
671 size_t namesize;
672
673 err = security_dentry_init_security(entry, mode, &entry->d_name,
674 &name, &lsmctx);
675
676 /* If no LSM is supporting this security hook ignore error */
677 if (err && err != -EOPNOTSUPP)
678 goto out_err;
679
680 if (lsmctx.len) {
681 nr_ctx = 1;
682 namesize = strlen(name) + 1;
683 err = -EIO;
684 if (WARN_ON(namesize > XATTR_NAME_MAX + 1 ||
685 lsmctx.len > S32_MAX))
686 goto out_err;
687 total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namesize +
688 lsmctx.len);
689 }
690
691 err = -ENOMEM;
692 header = ptr = kzalloc(total_len, GFP_KERNEL);
693 if (!ptr)
694 goto out_err;
695
696 header->nr_secctx = nr_ctx;
697 header->size = total_len;
698 ptr += sizeof(*header);
699 if (nr_ctx) {
700 fctx = ptr;
701 fctx->size = lsmctx.len;
702 ptr += sizeof(*fctx);
703
704 strscpy(ptr, name, namesize);
705 ptr += namesize;
706
707 memcpy(ptr, lsmctx.context, lsmctx.len);
708 }
709 ext->size = total_len;
710 ext->value = header;
711 err = 0;
712 out_err:
713 if (nr_ctx)
714 security_release_secctx(&lsmctx);
715 return err;
716 }
717
extend_arg(struct fuse_in_arg * buf,u32 bytes)718 static void *extend_arg(struct fuse_in_arg *buf, u32 bytes)
719 {
720 void *p;
721 u32 newlen = buf->size + bytes;
722
723 p = krealloc(buf->value, newlen, GFP_KERNEL);
724 if (!p) {
725 kfree(buf->value);
726 buf->size = 0;
727 buf->value = NULL;
728 return NULL;
729 }
730
731 memset(p + buf->size, 0, bytes);
732 buf->value = p;
733 buf->size = newlen;
734
735 return p + newlen - bytes;
736 }
737
fuse_ext_size(size_t size)738 static u32 fuse_ext_size(size_t size)
739 {
740 return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size);
741 }
742
743 /*
744 * This adds just a single supplementary group that matches the parent's group.
745 */
get_create_supp_group(struct mnt_idmap * idmap,struct inode * dir,struct fuse_in_arg * ext)746 static int get_create_supp_group(struct mnt_idmap *idmap,
747 struct inode *dir,
748 struct fuse_in_arg *ext)
749 {
750 struct fuse_conn *fc = get_fuse_conn(dir);
751 struct fuse_ext_header *xh;
752 struct fuse_supp_groups *sg;
753 kgid_t kgid = dir->i_gid;
754 vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid);
755 gid_t parent_gid = from_kgid(fc->user_ns, kgid);
756
757 u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0]));
758
759 if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) ||
760 !vfsgid_in_group_p(vfsgid))
761 return 0;
762
763 xh = extend_arg(ext, sg_len);
764 if (!xh)
765 return -ENOMEM;
766
767 xh->size = sg_len;
768 xh->type = FUSE_EXT_GROUPS;
769
770 sg = (struct fuse_supp_groups *) &xh[1];
771 sg->nr_groups = 1;
772 sg->groups[0] = parent_gid;
773
774 return 0;
775 }
776
get_create_ext(struct mnt_idmap * idmap,struct fuse_args * args,struct inode * dir,struct dentry * dentry,umode_t mode)777 static int get_create_ext(struct mnt_idmap *idmap,
778 struct fuse_args *args,
779 struct inode *dir, struct dentry *dentry,
780 umode_t mode)
781 {
782 struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb);
783 struct fuse_in_arg ext = { .size = 0, .value = NULL };
784 int err = 0;
785
786 if (fc->init_security)
787 err = get_security_context(dentry, mode, &ext);
788 if (!err && fc->create_supp_group)
789 err = get_create_supp_group(idmap, dir, &ext);
790
791 if (!err && ext.size) {
792 WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args));
793 args->is_ext = true;
794 args->ext_idx = args->in_numargs++;
795 args->in_args[args->ext_idx] = ext;
796 } else {
797 kfree(ext.value);
798 }
799
800 return err;
801 }
802
free_ext_value(struct fuse_args * args)803 static void free_ext_value(struct fuse_args *args)
804 {
805 if (args->is_ext)
806 kfree(args->in_args[args->ext_idx].value);
807 }
808
809 /*
810 * Atomic create+open operation
811 *
812 * If the filesystem doesn't support this, then fall back to separate
813 * 'mknod' + 'open' requests.
814 */
fuse_create_open(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,struct file * file,unsigned int flags,umode_t mode,u32 opcode)815 static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir,
816 struct dentry *entry, struct file *file,
817 unsigned int flags, umode_t mode, u32 opcode)
818 {
819 struct inode *inode;
820 struct fuse_mount *fm = get_fuse_mount(dir);
821 FUSE_ARGS(args);
822 struct fuse_forget_link *forget;
823 struct fuse_create_in inarg;
824 struct fuse_open_out *outopenp;
825 struct fuse_entry_out outentry;
826 struct fuse_inode *fi;
827 struct fuse_file *ff;
828 int epoch, err;
829 bool trunc = flags & O_TRUNC;
830
831 /* Userspace expects S_IFREG in create mode */
832 BUG_ON((mode & S_IFMT) != S_IFREG);
833
834 epoch = atomic_read(&fm->fc->epoch);
835 forget = fuse_alloc_forget();
836 err = -ENOMEM;
837 if (!forget)
838 goto out_err;
839
840 err = -ENOMEM;
841 ff = fuse_file_alloc(fm, true);
842 if (!ff)
843 goto out_put_forget_req;
844
845 if (!fm->fc->dont_mask)
846 mode &= ~current_umask();
847
848 flags &= ~O_NOCTTY;
849 memset(&inarg, 0, sizeof(inarg));
850 memset(&outentry, 0, sizeof(outentry));
851 inarg.flags = flags;
852 inarg.mode = mode;
853 inarg.umask = current_umask();
854
855 if (fm->fc->handle_killpriv_v2 && trunc &&
856 !(flags & O_EXCL) && !capable(CAP_FSETID)) {
857 inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID;
858 }
859
860 args.opcode = opcode;
861 args.nodeid = get_node_id(dir);
862 args.in_numargs = 2;
863 args.in_args[0].size = sizeof(inarg);
864 args.in_args[0].value = &inarg;
865 args.in_args[1].size = entry->d_name.len + 1;
866 args.in_args[1].value = entry->d_name.name;
867 args.out_numargs = 2;
868 args.out_args[0].size = sizeof(outentry);
869 args.out_args[0].value = &outentry;
870 /* Store outarg for fuse_finish_open() */
871 outopenp = &ff->args->open_outarg;
872 args.out_args[1].size = sizeof(*outopenp);
873 args.out_args[1].value = outopenp;
874
875 err = get_create_ext(idmap, &args, dir, entry, mode);
876 if (err)
877 goto out_free_ff;
878
879 err = fuse_simple_idmap_request(idmap, fm, &args);
880 free_ext_value(&args);
881 if (err)
882 goto out_free_ff;
883
884 err = -EIO;
885 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
886 fuse_invalid_attr(&outentry.attr))
887 goto out_free_ff;
888
889 ff->fh = outopenp->fh;
890 ff->nodeid = outentry.nodeid;
891 ff->open_flags = outopenp->open_flags;
892 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
893 &outentry.attr, ATTR_TIMEOUT(&outentry), 0, 0);
894 if (!inode) {
895 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
896 fuse_sync_release(NULL, ff, flags);
897 fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1);
898 err = -ENOMEM;
899 goto out_err;
900 }
901 kfree(forget);
902 d_instantiate(entry, inode);
903 entry->d_time = epoch;
904 fuse_change_entry_timeout(entry, &outentry);
905 fuse_dir_changed(dir);
906 err = generic_file_open(inode, file);
907 if (!err) {
908 file->private_data = ff;
909 err = finish_open(file, entry, fuse_finish_open);
910 }
911 if (err) {
912 fi = get_fuse_inode(inode);
913 fuse_sync_release(fi, ff, flags);
914 } else {
915 if (fm->fc->atomic_o_trunc && trunc)
916 truncate_pagecache(inode, 0);
917 else if (!(ff->open_flags & FOPEN_KEEP_CACHE))
918 invalidate_inode_pages2(inode->i_mapping);
919 }
920 return err;
921
922 out_free_ff:
923 fuse_file_free(ff);
924 out_put_forget_req:
925 kfree(forget);
926 out_err:
927 return err;
928 }
929
930 static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
931 umode_t, dev_t);
fuse_atomic_open(struct inode * dir,struct dentry * entry,struct file * file,unsigned flags,umode_t mode)932 static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
933 struct file *file, unsigned flags,
934 umode_t mode)
935 {
936 int err;
937 struct mnt_idmap *idmap = file_mnt_idmap(file);
938 struct fuse_conn *fc = get_fuse_conn(dir);
939
940 if (fuse_is_bad(dir))
941 return -EIO;
942
943 if (d_in_lookup(entry)) {
944 struct dentry *res = fuse_lookup(dir, entry, 0);
945 if (res || d_really_is_positive(entry))
946 return finish_no_open(file, res);
947 }
948
949 if (!(flags & O_CREAT))
950 return finish_no_open(file, NULL);
951
952 /* Only creates */
953 file->f_mode |= FMODE_CREATED;
954
955 if (fc->no_create)
956 goto mknod;
957
958 err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE);
959 if (err == -ENOSYS) {
960 fc->no_create = 1;
961 goto mknod;
962 } else if (err == -EEXIST)
963 fuse_invalidate_entry(entry);
964 return err;
965
966 mknod:
967 err = fuse_mknod(idmap, dir, entry, mode, 0);
968 if (err)
969 return err;
970 return finish_no_open(file, NULL);
971 }
972
973 /*
974 * Code shared between mknod, mkdir, symlink and link
975 */
create_new_entry(struct mnt_idmap * idmap,struct fuse_mount * fm,struct fuse_args * args,struct inode * dir,struct dentry * entry,umode_t mode)976 static struct dentry *create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
977 struct fuse_args *args, struct inode *dir,
978 struct dentry *entry, umode_t mode)
979 {
980 struct fuse_entry_out outarg;
981 struct inode *inode;
982 struct dentry *d;
983 struct fuse_forget_link *forget;
984 int epoch, err;
985
986 if (fuse_is_bad(dir))
987 return ERR_PTR(-EIO);
988
989 epoch = atomic_read(&fm->fc->epoch);
990
991 forget = fuse_alloc_forget();
992 if (!forget)
993 return ERR_PTR(-ENOMEM);
994
995 memset(&outarg, 0, sizeof(outarg));
996 args->nodeid = get_node_id(dir);
997 args->out_numargs = 1;
998 args->out_args[0].size = sizeof(outarg);
999 args->out_args[0].value = &outarg;
1000
1001 if (args->opcode != FUSE_LINK) {
1002 err = get_create_ext(idmap, args, dir, entry, mode);
1003 if (err)
1004 goto out_put_forget_req;
1005 }
1006
1007 err = fuse_simple_idmap_request(idmap, fm, args);
1008 free_ext_value(args);
1009 if (err)
1010 goto out_put_forget_req;
1011
1012 err = -EIO;
1013 if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
1014 goto out_put_forget_req;
1015
1016 if ((outarg.attr.mode ^ mode) & S_IFMT)
1017 goto out_put_forget_req;
1018
1019 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
1020 &outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0);
1021 if (!inode) {
1022 fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
1023 return ERR_PTR(-ENOMEM);
1024 }
1025 kfree(forget);
1026
1027 d_drop(entry);
1028 d = d_splice_alias(inode, entry);
1029 if (IS_ERR(d))
1030 return d;
1031
1032 if (d) {
1033 d->d_time = epoch;
1034 fuse_change_entry_timeout(d, &outarg);
1035 } else {
1036 entry->d_time = epoch;
1037 fuse_change_entry_timeout(entry, &outarg);
1038 }
1039 fuse_dir_changed(dir);
1040 return d;
1041
1042 out_put_forget_req:
1043 if (err == -EEXIST)
1044 fuse_invalidate_entry(entry);
1045 kfree(forget);
1046 return ERR_PTR(err);
1047 }
1048
create_new_nondir(struct mnt_idmap * idmap,struct fuse_mount * fm,struct fuse_args * args,struct inode * dir,struct dentry * entry,umode_t mode)1049 static int create_new_nondir(struct mnt_idmap *idmap, struct fuse_mount *fm,
1050 struct fuse_args *args, struct inode *dir,
1051 struct dentry *entry, umode_t mode)
1052 {
1053 /*
1054 * Note that when creating anything other than a directory we
1055 * can be sure create_new_entry() will NOT return an alternate
1056 * dentry as d_splice_alias() only returns an alternate dentry
1057 * for directories. So we don't need to check for that case
1058 * when passing back the result.
1059 */
1060 WARN_ON_ONCE(S_ISDIR(mode));
1061
1062 return PTR_ERR(create_new_entry(idmap, fm, args, dir, entry, mode));
1063 }
1064
fuse_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,dev_t rdev)1065 static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
1066 struct dentry *entry, umode_t mode, dev_t rdev)
1067 {
1068 struct fuse_mknod_in inarg;
1069 struct fuse_mount *fm = get_fuse_mount(dir);
1070 FUSE_ARGS(args);
1071
1072 if (!fm->fc->dont_mask)
1073 mode &= ~current_umask();
1074
1075 memset(&inarg, 0, sizeof(inarg));
1076 inarg.mode = mode;
1077 inarg.rdev = new_encode_dev(rdev);
1078 inarg.umask = current_umask();
1079 args.opcode = FUSE_MKNOD;
1080 args.in_numargs = 2;
1081 args.in_args[0].size = sizeof(inarg);
1082 args.in_args[0].value = &inarg;
1083 args.in_args[1].size = entry->d_name.len + 1;
1084 args.in_args[1].value = entry->d_name.name;
1085 return create_new_nondir(idmap, fm, &args, dir, entry, mode);
1086 }
1087
fuse_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode,bool excl)1088 static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
1089 struct dentry *entry, umode_t mode, bool excl)
1090 {
1091 return fuse_mknod(idmap, dir, entry, mode, 0);
1092 }
1093
fuse_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)1094 static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
1095 struct file *file, umode_t mode)
1096 {
1097 struct fuse_conn *fc = get_fuse_conn(dir);
1098 int err;
1099
1100 if (fc->no_tmpfile)
1101 return -EOPNOTSUPP;
1102
1103 err = fuse_create_open(idmap, dir, file->f_path.dentry, file,
1104 file->f_flags, mode, FUSE_TMPFILE);
1105 if (err == -ENOSYS) {
1106 fc->no_tmpfile = 1;
1107 err = -EOPNOTSUPP;
1108 }
1109 return err;
1110 }
1111
fuse_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,umode_t mode)1112 static struct dentry *fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
1113 struct dentry *entry, umode_t mode)
1114 {
1115 struct fuse_mkdir_in inarg;
1116 struct fuse_mount *fm = get_fuse_mount(dir);
1117 FUSE_ARGS(args);
1118
1119 if (!fm->fc->dont_mask)
1120 mode &= ~current_umask();
1121
1122 memset(&inarg, 0, sizeof(inarg));
1123 inarg.mode = mode;
1124 inarg.umask = current_umask();
1125 args.opcode = FUSE_MKDIR;
1126 args.in_numargs = 2;
1127 args.in_args[0].size = sizeof(inarg);
1128 args.in_args[0].value = &inarg;
1129 args.in_args[1].size = entry->d_name.len + 1;
1130 args.in_args[1].value = entry->d_name.name;
1131 return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR);
1132 }
1133
fuse_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * entry,const char * link)1134 static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
1135 struct dentry *entry, const char *link)
1136 {
1137 struct fuse_mount *fm = get_fuse_mount(dir);
1138 unsigned len = strlen(link) + 1;
1139 FUSE_ARGS(args);
1140
1141 args.opcode = FUSE_SYMLINK;
1142 args.in_numargs = 3;
1143 fuse_set_zero_arg0(&args);
1144 args.in_args[1].size = entry->d_name.len + 1;
1145 args.in_args[1].value = entry->d_name.name;
1146 args.in_args[2].size = len;
1147 args.in_args[2].value = link;
1148 return create_new_nondir(idmap, fm, &args, dir, entry, S_IFLNK);
1149 }
1150
fuse_flush_time_update(struct inode * inode)1151 void fuse_flush_time_update(struct inode *inode)
1152 {
1153 int err = sync_inode_metadata(inode, 1);
1154
1155 mapping_set_error(inode->i_mapping, err);
1156 }
1157
fuse_update_ctime_in_cache(struct inode * inode)1158 static void fuse_update_ctime_in_cache(struct inode *inode)
1159 {
1160 if (!IS_NOCMTIME(inode)) {
1161 inode_set_ctime_current(inode);
1162 mark_inode_dirty_sync(inode);
1163 fuse_flush_time_update(inode);
1164 }
1165 }
1166
fuse_update_ctime(struct inode * inode)1167 void fuse_update_ctime(struct inode *inode)
1168 {
1169 fuse_invalidate_attr_mask(inode, STATX_CTIME);
1170 fuse_update_ctime_in_cache(inode);
1171 }
1172
fuse_entry_unlinked(struct dentry * entry)1173 static void fuse_entry_unlinked(struct dentry *entry)
1174 {
1175 struct inode *inode = d_inode(entry);
1176 struct fuse_conn *fc = get_fuse_conn(inode);
1177 struct fuse_inode *fi = get_fuse_inode(inode);
1178
1179 spin_lock(&fi->lock);
1180 fi->attr_version = atomic64_inc_return(&fc->attr_version);
1181 /*
1182 * If i_nlink == 0 then unlink doesn't make sense, yet this can
1183 * happen if userspace filesystem is careless. It would be
1184 * difficult to enforce correct nlink usage so just ignore this
1185 * condition here
1186 */
1187 if (S_ISDIR(inode->i_mode))
1188 clear_nlink(inode);
1189 else if (inode->i_nlink > 0)
1190 drop_nlink(inode);
1191 spin_unlock(&fi->lock);
1192 fuse_invalidate_entry_cache(entry);
1193 fuse_update_ctime(inode);
1194 }
1195
fuse_unlink(struct inode * dir,struct dentry * entry)1196 static int fuse_unlink(struct inode *dir, struct dentry *entry)
1197 {
1198 int err;
1199 struct fuse_mount *fm = get_fuse_mount(dir);
1200 FUSE_ARGS(args);
1201
1202 if (fuse_is_bad(dir))
1203 return -EIO;
1204
1205 args.opcode = FUSE_UNLINK;
1206 args.nodeid = get_node_id(dir);
1207 args.in_numargs = 2;
1208 fuse_set_zero_arg0(&args);
1209 args.in_args[1].size = entry->d_name.len + 1;
1210 args.in_args[1].value = entry->d_name.name;
1211 err = fuse_simple_request(fm, &args);
1212 if (!err) {
1213 fuse_dir_changed(dir);
1214 fuse_entry_unlinked(entry);
1215 } else if (err == -EINTR || err == -ENOENT)
1216 fuse_invalidate_entry(entry);
1217 return err;
1218 }
1219
fuse_rmdir(struct inode * dir,struct dentry * entry)1220 static int fuse_rmdir(struct inode *dir, struct dentry *entry)
1221 {
1222 int err;
1223 struct fuse_mount *fm = get_fuse_mount(dir);
1224 FUSE_ARGS(args);
1225
1226 if (fuse_is_bad(dir))
1227 return -EIO;
1228
1229 args.opcode = FUSE_RMDIR;
1230 args.nodeid = get_node_id(dir);
1231 args.in_numargs = 2;
1232 fuse_set_zero_arg0(&args);
1233 args.in_args[1].size = entry->d_name.len + 1;
1234 args.in_args[1].value = entry->d_name.name;
1235 err = fuse_simple_request(fm, &args);
1236 if (!err) {
1237 fuse_dir_changed(dir);
1238 fuse_entry_unlinked(entry);
1239 } else if (err == -EINTR || err == -ENOENT)
1240 fuse_invalidate_entry(entry);
1241 return err;
1242 }
1243
fuse_rename_common(struct mnt_idmap * idmap,struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags,int opcode,size_t argsize)1244 static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent,
1245 struct inode *newdir, struct dentry *newent,
1246 unsigned int flags, int opcode, size_t argsize)
1247 {
1248 int err;
1249 struct fuse_rename2_in inarg;
1250 struct fuse_mount *fm = get_fuse_mount(olddir);
1251 FUSE_ARGS(args);
1252
1253 memset(&inarg, 0, argsize);
1254 inarg.newdir = get_node_id(newdir);
1255 inarg.flags = flags;
1256 args.opcode = opcode;
1257 args.nodeid = get_node_id(olddir);
1258 args.in_numargs = 3;
1259 args.in_args[0].size = argsize;
1260 args.in_args[0].value = &inarg;
1261 args.in_args[1].size = oldent->d_name.len + 1;
1262 args.in_args[1].value = oldent->d_name.name;
1263 args.in_args[2].size = newent->d_name.len + 1;
1264 args.in_args[2].value = newent->d_name.name;
1265 err = fuse_simple_idmap_request(idmap, fm, &args);
1266 if (!err) {
1267 /* ctime changes */
1268 fuse_update_ctime(d_inode(oldent));
1269
1270 if (flags & RENAME_EXCHANGE)
1271 fuse_update_ctime(d_inode(newent));
1272
1273 fuse_dir_changed(olddir);
1274 if (olddir != newdir)
1275 fuse_dir_changed(newdir);
1276
1277 /* newent will end up negative */
1278 if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
1279 fuse_entry_unlinked(newent);
1280 } else if (err == -EINTR || err == -ENOENT) {
1281 /* If request was interrupted, DEITY only knows if the
1282 rename actually took place. If the invalidation
1283 fails (e.g. some process has CWD under the renamed
1284 directory), then there can be inconsistency between
1285 the dcache and the real filesystem. Tough luck. */
1286 fuse_invalidate_entry(oldent);
1287 if (d_really_is_positive(newent))
1288 fuse_invalidate_entry(newent);
1289 }
1290
1291 return err;
1292 }
1293
fuse_rename2(struct mnt_idmap * idmap,struct inode * olddir,struct dentry * oldent,struct inode * newdir,struct dentry * newent,unsigned int flags)1294 static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir,
1295 struct dentry *oldent, struct inode *newdir,
1296 struct dentry *newent, unsigned int flags)
1297 {
1298 struct fuse_conn *fc = get_fuse_conn(olddir);
1299 int err;
1300
1301 if (fuse_is_bad(olddir))
1302 return -EIO;
1303
1304 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
1305 return -EINVAL;
1306
1307 if (flags) {
1308 if (fc->no_rename2 || fc->minor < 23)
1309 return -EINVAL;
1310
1311 err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap,
1312 olddir, oldent, newdir, newent, flags,
1313 FUSE_RENAME2,
1314 sizeof(struct fuse_rename2_in));
1315 if (err == -ENOSYS) {
1316 fc->no_rename2 = 1;
1317 err = -EINVAL;
1318 }
1319 } else {
1320 err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0,
1321 FUSE_RENAME,
1322 sizeof(struct fuse_rename_in));
1323 }
1324
1325 return err;
1326 }
1327
fuse_link(struct dentry * entry,struct inode * newdir,struct dentry * newent)1328 static int fuse_link(struct dentry *entry, struct inode *newdir,
1329 struct dentry *newent)
1330 {
1331 int err;
1332 struct fuse_link_in inarg;
1333 struct inode *inode = d_inode(entry);
1334 struct fuse_mount *fm = get_fuse_mount(inode);
1335 FUSE_ARGS(args);
1336
1337 if (fm->fc->no_link)
1338 goto out;
1339
1340 memset(&inarg, 0, sizeof(inarg));
1341 inarg.oldnodeid = get_node_id(inode);
1342 args.opcode = FUSE_LINK;
1343 args.in_numargs = 2;
1344 args.in_args[0].size = sizeof(inarg);
1345 args.in_args[0].value = &inarg;
1346 args.in_args[1].size = newent->d_name.len + 1;
1347 args.in_args[1].value = newent->d_name.name;
1348 err = create_new_nondir(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
1349 if (!err)
1350 fuse_update_ctime_in_cache(inode);
1351 else if (err == -EINTR)
1352 fuse_invalidate_attr(inode);
1353
1354 if (err == -ENOSYS)
1355 fm->fc->no_link = 1;
1356 out:
1357 if (fm->fc->no_link)
1358 return -EPERM;
1359
1360 return err;
1361 }
1362
fuse_fillattr(struct mnt_idmap * idmap,struct inode * inode,struct fuse_attr * attr,struct kstat * stat)1363 static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
1364 struct fuse_attr *attr, struct kstat *stat)
1365 {
1366 unsigned int blkbits;
1367 struct fuse_conn *fc = get_fuse_conn(inode);
1368 vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns,
1369 make_kuid(fc->user_ns, attr->uid));
1370 vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns,
1371 make_kgid(fc->user_ns, attr->gid));
1372
1373 stat->dev = inode->i_sb->s_dev;
1374 stat->ino = attr->ino;
1375 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
1376 stat->nlink = attr->nlink;
1377 stat->uid = vfsuid_into_kuid(vfsuid);
1378 stat->gid = vfsgid_into_kgid(vfsgid);
1379 stat->rdev = inode->i_rdev;
1380 stat->atime.tv_sec = attr->atime;
1381 stat->atime.tv_nsec = attr->atimensec;
1382 stat->mtime.tv_sec = attr->mtime;
1383 stat->mtime.tv_nsec = attr->mtimensec;
1384 stat->ctime.tv_sec = attr->ctime;
1385 stat->ctime.tv_nsec = attr->ctimensec;
1386 stat->size = attr->size;
1387 stat->blocks = attr->blocks;
1388
1389 if (attr->blksize != 0)
1390 blkbits = ilog2(attr->blksize);
1391 else
1392 blkbits = inode->i_sb->s_blocksize_bits;
1393
1394 stat->blksize = 1 << blkbits;
1395 }
1396
fuse_statx_to_attr(struct fuse_statx * sx,struct fuse_attr * attr)1397 static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
1398 {
1399 memset(attr, 0, sizeof(*attr));
1400 attr->ino = sx->ino;
1401 attr->size = sx->size;
1402 attr->blocks = sx->blocks;
1403 attr->atime = sx->atime.tv_sec;
1404 attr->mtime = sx->mtime.tv_sec;
1405 attr->ctime = sx->ctime.tv_sec;
1406 attr->atimensec = sx->atime.tv_nsec;
1407 attr->mtimensec = sx->mtime.tv_nsec;
1408 attr->ctimensec = sx->ctime.tv_nsec;
1409 attr->mode = sx->mode;
1410 attr->nlink = sx->nlink;
1411 attr->uid = sx->uid;
1412 attr->gid = sx->gid;
1413 attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
1414 attr->blksize = sx->blksize;
1415 }
1416
fuse_do_statx(struct mnt_idmap * idmap,struct inode * inode,struct file * file,struct kstat * stat)1417 static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode,
1418 struct file *file, struct kstat *stat)
1419 {
1420 int err;
1421 struct fuse_attr attr;
1422 struct fuse_statx *sx;
1423 struct fuse_statx_in inarg;
1424 struct fuse_statx_out outarg;
1425 struct fuse_mount *fm = get_fuse_mount(inode);
1426 u64 attr_version = fuse_get_attr_version(fm->fc);
1427 FUSE_ARGS(args);
1428
1429 memset(&inarg, 0, sizeof(inarg));
1430 memset(&outarg, 0, sizeof(outarg));
1431 /* Directories have separate file-handle space */
1432 if (file && S_ISREG(inode->i_mode)) {
1433 struct fuse_file *ff = file->private_data;
1434
1435 inarg.getattr_flags |= FUSE_GETATTR_FH;
1436 inarg.fh = ff->fh;
1437 }
1438 /* For now leave sync hints as the default, request all stats. */
1439 inarg.sx_flags = 0;
1440 inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
1441 args.opcode = FUSE_STATX;
1442 args.nodeid = get_node_id(inode);
1443 args.in_numargs = 1;
1444 args.in_args[0].size = sizeof(inarg);
1445 args.in_args[0].value = &inarg;
1446 args.out_numargs = 1;
1447 args.out_args[0].size = sizeof(outarg);
1448 args.out_args[0].value = &outarg;
1449 err = fuse_simple_request(fm, &args);
1450 if (err)
1451 return err;
1452
1453 sx = &outarg.stat;
1454 if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
1455 ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
1456 inode_wrong_type(inode, sx->mode)))) {
1457 fuse_make_bad(inode);
1458 return -EIO;
1459 }
1460
1461 fuse_statx_to_attr(&outarg.stat, &attr);
1462 if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
1463 fuse_change_attributes(inode, &attr, &outarg.stat,
1464 ATTR_TIMEOUT(&outarg), attr_version);
1465 }
1466
1467 if (stat) {
1468 stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
1469 stat->btime.tv_sec = sx->btime.tv_sec;
1470 stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
1471 fuse_fillattr(idmap, inode, &attr, stat);
1472 stat->result_mask |= STATX_TYPE;
1473 }
1474
1475 return 0;
1476 }
1477
fuse_do_getattr(struct mnt_idmap * idmap,struct inode * inode,struct kstat * stat,struct file * file)1478 static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode,
1479 struct kstat *stat, struct file *file)
1480 {
1481 int err;
1482 struct fuse_getattr_in inarg;
1483 struct fuse_attr_out outarg;
1484 struct fuse_mount *fm = get_fuse_mount(inode);
1485 FUSE_ARGS(args);
1486 u64 attr_version;
1487
1488 attr_version = fuse_get_attr_version(fm->fc);
1489
1490 memset(&inarg, 0, sizeof(inarg));
1491 memset(&outarg, 0, sizeof(outarg));
1492 /* Directories have separate file-handle space */
1493 if (file && S_ISREG(inode->i_mode)) {
1494 struct fuse_file *ff = file->private_data;
1495
1496 inarg.getattr_flags |= FUSE_GETATTR_FH;
1497 inarg.fh = ff->fh;
1498 }
1499 args.opcode = FUSE_GETATTR;
1500 args.nodeid = get_node_id(inode);
1501 args.in_numargs = 1;
1502 args.in_args[0].size = sizeof(inarg);
1503 args.in_args[0].value = &inarg;
1504 args.out_numargs = 1;
1505 args.out_args[0].size = sizeof(outarg);
1506 args.out_args[0].value = &outarg;
1507 err = fuse_simple_request(fm, &args);
1508 if (!err) {
1509 if (fuse_invalid_attr(&outarg.attr) ||
1510 inode_wrong_type(inode, outarg.attr.mode)) {
1511 fuse_make_bad(inode);
1512 err = -EIO;
1513 } else {
1514 fuse_change_attributes(inode, &outarg.attr, NULL,
1515 ATTR_TIMEOUT(&outarg),
1516 attr_version);
1517 if (stat)
1518 fuse_fillattr(idmap, inode, &outarg.attr, stat);
1519 }
1520 }
1521 return err;
1522 }
1523
fuse_update_get_attr(struct mnt_idmap * idmap,struct inode * inode,struct file * file,struct kstat * stat,u32 request_mask,unsigned int flags)1524 static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode,
1525 struct file *file, struct kstat *stat,
1526 u32 request_mask, unsigned int flags)
1527 {
1528 struct fuse_inode *fi = get_fuse_inode(inode);
1529 struct fuse_conn *fc = get_fuse_conn(inode);
1530 int err = 0;
1531 bool sync;
1532 u32 inval_mask = READ_ONCE(fi->inval_mask);
1533 u32 cache_mask = fuse_get_cache_mask(inode);
1534
1535
1536 /* FUSE only supports basic stats and possibly btime */
1537 request_mask &= STATX_BASIC_STATS | STATX_BTIME;
1538 retry:
1539 if (fc->no_statx)
1540 request_mask &= STATX_BASIC_STATS;
1541
1542 if (!request_mask)
1543 sync = false;
1544 else if (flags & AT_STATX_FORCE_SYNC)
1545 sync = true;
1546 else if (flags & AT_STATX_DONT_SYNC)
1547 sync = false;
1548 else if (request_mask & inval_mask & ~cache_mask)
1549 sync = true;
1550 else
1551 sync = time_before64(fi->i_time, get_jiffies_64());
1552
1553 if (sync) {
1554 forget_all_cached_acls(inode);
1555 /* Try statx if BTIME is requested */
1556 if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
1557 err = fuse_do_statx(idmap, inode, file, stat);
1558 if (err == -ENOSYS) {
1559 fc->no_statx = 1;
1560 err = 0;
1561 goto retry;
1562 }
1563 } else {
1564 err = fuse_do_getattr(idmap, inode, stat, file);
1565 }
1566 } else if (stat) {
1567 generic_fillattr(idmap, request_mask, inode, stat);
1568 stat->mode = fi->orig_i_mode;
1569 stat->ino = fi->orig_ino;
1570 stat->blksize = 1 << fi->cached_i_blkbits;
1571 if (test_bit(FUSE_I_BTIME, &fi->state)) {
1572 stat->btime = fi->i_btime;
1573 stat->result_mask |= STATX_BTIME;
1574 }
1575 }
1576
1577 return err;
1578 }
1579
fuse_update_attributes(struct inode * inode,struct file * file,u32 mask)1580 int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask)
1581 {
1582 return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0);
1583 }
1584
fuse_reverse_inval_entry(struct fuse_conn * fc,u64 parent_nodeid,u64 child_nodeid,struct qstr * name,u32 flags)1585 int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid,
1586 u64 child_nodeid, struct qstr *name, u32 flags)
1587 {
1588 int err = -ENOTDIR;
1589 struct inode *parent;
1590 struct dentry *dir = NULL;
1591 struct dentry *entry = NULL;
1592
1593 parent = fuse_ilookup(fc, parent_nodeid, NULL);
1594 if (!parent)
1595 return -ENOENT;
1596
1597 if (!S_ISDIR(parent->i_mode))
1598 goto put_parent;
1599
1600 err = -ENOENT;
1601 dir = d_find_alias(parent);
1602 if (!dir)
1603 goto put_parent;
1604 while (!entry) {
1605 struct dentry *child = try_lookup_noperm(name, dir);
1606 if (!child || IS_ERR(child))
1607 goto put_parent;
1608 entry = start_removing_dentry(dir, child);
1609 dput(child);
1610 if (IS_ERR(entry))
1611 goto put_parent;
1612 if (!d_same_name(entry, dir, name)) {
1613 end_removing(entry);
1614 entry = NULL;
1615 }
1616 }
1617
1618 fuse_dir_changed(parent);
1619 if (!(flags & FUSE_EXPIRE_ONLY))
1620 d_invalidate(entry);
1621 fuse_invalidate_entry_cache(entry);
1622
1623 if (child_nodeid != 0) {
1624 inode_lock(d_inode(entry));
1625 if (get_node_id(d_inode(entry)) != child_nodeid) {
1626 err = -ENOENT;
1627 goto badentry;
1628 }
1629 if (d_mountpoint(entry)) {
1630 err = -EBUSY;
1631 goto badentry;
1632 }
1633 if (d_is_dir(entry)) {
1634 shrink_dcache_parent(entry);
1635 if (!simple_empty(entry)) {
1636 err = -ENOTEMPTY;
1637 goto badentry;
1638 }
1639 d_inode(entry)->i_flags |= S_DEAD;
1640 }
1641 dont_mount(entry);
1642 clear_nlink(d_inode(entry));
1643 err = 0;
1644 badentry:
1645 inode_unlock(d_inode(entry));
1646 if (!err)
1647 d_delete(entry);
1648 } else {
1649 err = 0;
1650 }
1651
1652 end_removing(entry);
1653 put_parent:
1654 dput(dir);
1655 iput(parent);
1656 return err;
1657 }
1658
fuse_permissible_uidgid(struct fuse_conn * fc)1659 static inline bool fuse_permissible_uidgid(struct fuse_conn *fc)
1660 {
1661 const struct cred *cred = current_cred();
1662
1663 return (uid_eq(cred->euid, fc->user_id) &&
1664 uid_eq(cred->suid, fc->user_id) &&
1665 uid_eq(cred->uid, fc->user_id) &&
1666 gid_eq(cred->egid, fc->group_id) &&
1667 gid_eq(cred->sgid, fc->group_id) &&
1668 gid_eq(cred->gid, fc->group_id));
1669 }
1670
1671 /*
1672 * Calling into a user-controlled filesystem gives the filesystem
1673 * daemon ptrace-like capabilities over the current process. This
1674 * means, that the filesystem daemon is able to record the exact
1675 * filesystem operations performed, and can also control the behavior
1676 * of the requester process in otherwise impossible ways. For example
1677 * it can delay the operation for arbitrary length of time allowing
1678 * DoS against the requester.
1679 *
1680 * For this reason only those processes can call into the filesystem,
1681 * for which the owner of the mount has ptrace privilege. This
1682 * excludes processes started by other users, suid or sgid processes.
1683 */
fuse_allow_current_process(struct fuse_conn * fc)1684 bool fuse_allow_current_process(struct fuse_conn *fc)
1685 {
1686 bool allow;
1687
1688 if (fc->allow_other)
1689 allow = current_in_userns(fc->user_ns);
1690 else
1691 allow = fuse_permissible_uidgid(fc);
1692
1693 if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN))
1694 allow = true;
1695
1696 return allow;
1697 }
1698
fuse_access(struct inode * inode,int mask)1699 static int fuse_access(struct inode *inode, int mask)
1700 {
1701 struct fuse_mount *fm = get_fuse_mount(inode);
1702 FUSE_ARGS(args);
1703 struct fuse_access_in inarg;
1704 int err;
1705
1706 BUG_ON(mask & MAY_NOT_BLOCK);
1707
1708 /*
1709 * We should not send FUSE_ACCESS to the userspace
1710 * when idmapped mounts are enabled as for this case
1711 * we have fc->default_permissions = 1 and access
1712 * permission checks are done on the kernel side.
1713 */
1714 WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP));
1715
1716 if (fm->fc->no_access)
1717 return 0;
1718
1719 memset(&inarg, 0, sizeof(inarg));
1720 inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC);
1721 args.opcode = FUSE_ACCESS;
1722 args.nodeid = get_node_id(inode);
1723 args.in_numargs = 1;
1724 args.in_args[0].size = sizeof(inarg);
1725 args.in_args[0].value = &inarg;
1726 err = fuse_simple_request(fm, &args);
1727 if (err == -ENOSYS) {
1728 fm->fc->no_access = 1;
1729 err = 0;
1730 }
1731 return err;
1732 }
1733
fuse_perm_getattr(struct inode * inode,int mask)1734 static int fuse_perm_getattr(struct inode *inode, int mask)
1735 {
1736 if (mask & MAY_NOT_BLOCK)
1737 return -ECHILD;
1738
1739 forget_all_cached_acls(inode);
1740 return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL);
1741 }
1742
1743 /*
1744 * Check permission. The two basic access models of FUSE are:
1745 *
1746 * 1) Local access checking ('default_permissions' mount option) based
1747 * on file mode. This is the plain old disk filesystem permission
1748 * model.
1749 *
1750 * 2) "Remote" access checking, where server is responsible for
1751 * checking permission in each inode operation. An exception to this
1752 * is if ->permission() was invoked from sys_access() in which case an
1753 * access request is sent. Execute permission is still checked
1754 * locally based on file mode.
1755 */
fuse_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)1756 static int fuse_permission(struct mnt_idmap *idmap,
1757 struct inode *inode, int mask)
1758 {
1759 struct fuse_conn *fc = get_fuse_conn(inode);
1760 bool refreshed = false;
1761 int err = 0;
1762
1763 if (fuse_is_bad(inode))
1764 return -EIO;
1765
1766 if (!fuse_allow_current_process(fc))
1767 return -EACCES;
1768
1769 /*
1770 * If attributes are needed, refresh them before proceeding
1771 */
1772 if (fc->default_permissions ||
1773 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1774 struct fuse_inode *fi = get_fuse_inode(inode);
1775 u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
1776
1777 if (perm_mask & READ_ONCE(fi->inval_mask) ||
1778 time_before64(fi->i_time, get_jiffies_64())) {
1779 refreshed = true;
1780
1781 err = fuse_perm_getattr(inode, mask);
1782 if (err)
1783 return err;
1784 }
1785 }
1786
1787 if (fc->default_permissions) {
1788 err = generic_permission(idmap, inode, mask);
1789
1790 /* If permission is denied, try to refresh file
1791 attributes. This is also needed, because the root
1792 node will at first have no permissions */
1793 if (err == -EACCES && !refreshed) {
1794 err = fuse_perm_getattr(inode, mask);
1795 if (!err)
1796 err = generic_permission(idmap,
1797 inode, mask);
1798 }
1799
1800 /* Note: the opposite of the above test does not
1801 exist. So if permissions are revoked this won't be
1802 noticed immediately, only after the attribute
1803 timeout has expired */
1804 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1805 err = fuse_access(inode, mask);
1806 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1807 if (!(inode->i_mode & S_IXUGO)) {
1808 if (refreshed)
1809 return -EACCES;
1810
1811 err = fuse_perm_getattr(inode, mask);
1812 if (!err && !(inode->i_mode & S_IXUGO))
1813 return -EACCES;
1814 }
1815 }
1816 return err;
1817 }
1818
fuse_readlink_folio(struct inode * inode,struct folio * folio)1819 static int fuse_readlink_folio(struct inode *inode, struct folio *folio)
1820 {
1821 struct fuse_mount *fm = get_fuse_mount(inode);
1822 struct fuse_folio_desc desc = { .length = folio_size(folio) - 1 };
1823 struct fuse_args_pages ap = {
1824 .num_folios = 1,
1825 .folios = &folio,
1826 .descs = &desc,
1827 };
1828 char *link;
1829 ssize_t res;
1830
1831 ap.args.opcode = FUSE_READLINK;
1832 ap.args.nodeid = get_node_id(inode);
1833 ap.args.out_pages = true;
1834 ap.args.out_argvar = true;
1835 ap.args.page_zeroing = true;
1836 ap.args.out_numargs = 1;
1837 ap.args.out_args[0].size = desc.length;
1838 res = fuse_simple_request(fm, &ap.args);
1839
1840 fuse_invalidate_atime(inode);
1841
1842 if (res < 0)
1843 return res;
1844
1845 if (WARN_ON(res >= PAGE_SIZE))
1846 return -EIO;
1847
1848 link = folio_address(folio);
1849 link[res] = '\0';
1850
1851 return 0;
1852 }
1853
fuse_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * callback)1854 static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
1855 struct delayed_call *callback)
1856 {
1857 struct fuse_conn *fc = get_fuse_conn(inode);
1858 struct folio *folio;
1859 int err;
1860
1861 err = -EIO;
1862 if (fuse_is_bad(inode))
1863 goto out_err;
1864
1865 if (fc->cache_symlinks)
1866 return page_get_link_raw(dentry, inode, callback);
1867
1868 err = -ECHILD;
1869 if (!dentry)
1870 goto out_err;
1871
1872 folio = folio_alloc(GFP_KERNEL, 0);
1873 err = -ENOMEM;
1874 if (!folio)
1875 goto out_err;
1876
1877 err = fuse_readlink_folio(inode, folio);
1878 if (err) {
1879 folio_put(folio);
1880 goto out_err;
1881 }
1882
1883 set_delayed_call(callback, page_put_link, folio);
1884
1885 return folio_address(folio);
1886
1887 out_err:
1888 return ERR_PTR(err);
1889 }
1890
fuse_dir_open(struct inode * inode,struct file * file)1891 static int fuse_dir_open(struct inode *inode, struct file *file)
1892 {
1893 struct fuse_mount *fm = get_fuse_mount(inode);
1894 int err;
1895
1896 if (fuse_is_bad(inode))
1897 return -EIO;
1898
1899 err = generic_file_open(inode, file);
1900 if (err)
1901 return err;
1902
1903 err = fuse_do_open(fm, get_node_id(inode), file, true);
1904 if (!err) {
1905 struct fuse_file *ff = file->private_data;
1906
1907 /*
1908 * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for
1909 * directories for backward compatibility, though it's unlikely
1910 * to be useful.
1911 */
1912 if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE))
1913 nonseekable_open(inode, file);
1914 if (!(ff->open_flags & FOPEN_KEEP_CACHE))
1915 invalidate_inode_pages2(inode->i_mapping);
1916 }
1917
1918 return err;
1919 }
1920
fuse_dir_release(struct inode * inode,struct file * file)1921 static int fuse_dir_release(struct inode *inode, struct file *file)
1922 {
1923 fuse_release_common(file, true);
1924
1925 return 0;
1926 }
1927
fuse_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1928 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
1929 int datasync)
1930 {
1931 struct inode *inode = file->f_mapping->host;
1932 struct fuse_conn *fc = get_fuse_conn(inode);
1933 int err;
1934
1935 if (fuse_is_bad(inode))
1936 return -EIO;
1937
1938 if (fc->no_fsyncdir)
1939 return 0;
1940
1941 inode_lock(inode);
1942 err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
1943 if (err == -ENOSYS) {
1944 fc->no_fsyncdir = 1;
1945 err = 0;
1946 }
1947 inode_unlock(inode);
1948
1949 return err;
1950 }
1951
fuse_dir_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1952 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
1953 unsigned long arg)
1954 {
1955 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1956
1957 /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */
1958 if (fc->minor < 18)
1959 return -ENOTTY;
1960
1961 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR);
1962 }
1963
fuse_dir_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1964 static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd,
1965 unsigned long arg)
1966 {
1967 struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host);
1968
1969 if (fc->minor < 18)
1970 return -ENOTTY;
1971
1972 return fuse_ioctl_common(file, cmd, arg,
1973 FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR);
1974 }
1975
update_mtime(unsigned ivalid,bool trust_local_mtime)1976 static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
1977 {
1978 /* Always update if mtime is explicitly set */
1979 if (ivalid & ATTR_MTIME_SET)
1980 return true;
1981
1982 /* Or if kernel i_mtime is the official one */
1983 if (trust_local_mtime)
1984 return true;
1985
1986 /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
1987 if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
1988 return false;
1989
1990 /* In all other cases update */
1991 return true;
1992 }
1993
iattr_to_fattr(struct mnt_idmap * idmap,struct fuse_conn * fc,struct iattr * iattr,struct fuse_setattr_in * arg,bool trust_local_cmtime)1994 static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc,
1995 struct iattr *iattr, struct fuse_setattr_in *arg,
1996 bool trust_local_cmtime)
1997 {
1998 unsigned ivalid = iattr->ia_valid;
1999
2000 if (ivalid & ATTR_MODE)
2001 arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
2002
2003 if (ivalid & ATTR_UID) {
2004 kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid);
2005
2006 arg->valid |= FATTR_UID;
2007 arg->uid = from_kuid(fc->user_ns, fsuid);
2008 }
2009
2010 if (ivalid & ATTR_GID) {
2011 kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid);
2012
2013 arg->valid |= FATTR_GID;
2014 arg->gid = from_kgid(fc->user_ns, fsgid);
2015 }
2016
2017 if (ivalid & ATTR_SIZE)
2018 arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
2019 if (ivalid & ATTR_ATIME) {
2020 arg->valid |= FATTR_ATIME;
2021 arg->atime = iattr->ia_atime.tv_sec;
2022 arg->atimensec = iattr->ia_atime.tv_nsec;
2023 if (!(ivalid & ATTR_ATIME_SET))
2024 arg->valid |= FATTR_ATIME_NOW;
2025 }
2026 if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
2027 arg->valid |= FATTR_MTIME;
2028 arg->mtime = iattr->ia_mtime.tv_sec;
2029 arg->mtimensec = iattr->ia_mtime.tv_nsec;
2030 if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
2031 arg->valid |= FATTR_MTIME_NOW;
2032 }
2033 if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
2034 arg->valid |= FATTR_CTIME;
2035 arg->ctime = iattr->ia_ctime.tv_sec;
2036 arg->ctimensec = iattr->ia_ctime.tv_nsec;
2037 }
2038 }
2039
2040 /*
2041 * Prevent concurrent writepages on inode
2042 *
2043 * This is done by adding a negative bias to the inode write counter
2044 * and waiting for all pending writes to finish.
2045 */
fuse_set_nowrite(struct inode * inode)2046 void fuse_set_nowrite(struct inode *inode)
2047 {
2048 struct fuse_inode *fi = get_fuse_inode(inode);
2049
2050 BUG_ON(!inode_is_locked(inode));
2051
2052 spin_lock(&fi->lock);
2053 BUG_ON(fi->writectr < 0);
2054 fi->writectr += FUSE_NOWRITE;
2055 spin_unlock(&fi->lock);
2056 wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
2057 }
2058
2059 /*
2060 * Allow writepages on inode
2061 *
2062 * Remove the bias from the writecounter and send any queued
2063 * writepages.
2064 */
__fuse_release_nowrite(struct inode * inode)2065 static void __fuse_release_nowrite(struct inode *inode)
2066 {
2067 struct fuse_inode *fi = get_fuse_inode(inode);
2068
2069 BUG_ON(fi->writectr != FUSE_NOWRITE);
2070 fi->writectr = 0;
2071 fuse_flush_writepages(inode);
2072 }
2073
fuse_release_nowrite(struct inode * inode)2074 void fuse_release_nowrite(struct inode *inode)
2075 {
2076 struct fuse_inode *fi = get_fuse_inode(inode);
2077
2078 spin_lock(&fi->lock);
2079 __fuse_release_nowrite(inode);
2080 spin_unlock(&fi->lock);
2081 }
2082
fuse_setattr_fill(struct fuse_conn * fc,struct fuse_args * args,struct inode * inode,struct fuse_setattr_in * inarg_p,struct fuse_attr_out * outarg_p)2083 static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
2084 struct inode *inode,
2085 struct fuse_setattr_in *inarg_p,
2086 struct fuse_attr_out *outarg_p)
2087 {
2088 args->opcode = FUSE_SETATTR;
2089 args->nodeid = get_node_id(inode);
2090 args->in_numargs = 1;
2091 args->in_args[0].size = sizeof(*inarg_p);
2092 args->in_args[0].value = inarg_p;
2093 args->out_numargs = 1;
2094 args->out_args[0].size = sizeof(*outarg_p);
2095 args->out_args[0].value = outarg_p;
2096 }
2097
2098 /*
2099 * Flush inode->i_mtime to the server
2100 */
fuse_flush_times(struct inode * inode,struct fuse_file * ff)2101 int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
2102 {
2103 struct fuse_mount *fm = get_fuse_mount(inode);
2104 FUSE_ARGS(args);
2105 struct fuse_setattr_in inarg;
2106 struct fuse_attr_out outarg;
2107
2108 memset(&inarg, 0, sizeof(inarg));
2109 memset(&outarg, 0, sizeof(outarg));
2110
2111 inarg.valid = FATTR_MTIME;
2112 inarg.mtime = inode_get_mtime_sec(inode);
2113 inarg.mtimensec = inode_get_mtime_nsec(inode);
2114 if (fm->fc->minor >= 23) {
2115 inarg.valid |= FATTR_CTIME;
2116 inarg.ctime = inode_get_ctime_sec(inode);
2117 inarg.ctimensec = inode_get_ctime_nsec(inode);
2118 }
2119 if (ff) {
2120 inarg.valid |= FATTR_FH;
2121 inarg.fh = ff->fh;
2122 }
2123 fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg);
2124
2125 return fuse_simple_request(fm, &args);
2126 }
2127
2128 /*
2129 * Set attributes, and at the same time refresh them.
2130 *
2131 * Truncation is slightly complicated, because the 'truncate' request
2132 * may fail, in which case we don't want to touch the mapping.
2133 * vmtruncate() doesn't allow for this case, so do the rlimit checking
2134 * and the actual truncation by hand.
2135 */
fuse_do_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr,struct file * file)2136 int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2137 struct iattr *attr, struct file *file)
2138 {
2139 struct inode *inode = d_inode(dentry);
2140 struct fuse_mount *fm = get_fuse_mount(inode);
2141 struct fuse_conn *fc = fm->fc;
2142 struct fuse_inode *fi = get_fuse_inode(inode);
2143 struct address_space *mapping = inode->i_mapping;
2144 FUSE_ARGS(args);
2145 struct fuse_setattr_in inarg;
2146 struct fuse_attr_out outarg;
2147 bool is_truncate = false;
2148 bool is_wb = fc->writeback_cache && S_ISREG(inode->i_mode);
2149 loff_t oldsize;
2150 int err;
2151 bool trust_local_cmtime = is_wb;
2152 bool fault_blocked = false;
2153 u64 attr_version;
2154
2155 if (!fc->default_permissions)
2156 attr->ia_valid |= ATTR_FORCE;
2157
2158 err = setattr_prepare(idmap, dentry, attr);
2159 if (err)
2160 return err;
2161
2162 if (attr->ia_valid & ATTR_SIZE) {
2163 if (WARN_ON(!S_ISREG(inode->i_mode)))
2164 return -EIO;
2165 is_truncate = true;
2166 }
2167
2168 if (FUSE_IS_DAX(inode) && is_truncate) {
2169 filemap_invalidate_lock(mapping);
2170 fault_blocked = true;
2171 err = fuse_dax_break_layouts(inode, 0, -1);
2172 if (err) {
2173 filemap_invalidate_unlock(mapping);
2174 return err;
2175 }
2176 }
2177
2178 if (attr->ia_valid & ATTR_OPEN) {
2179 /* This is coming from open(..., ... | O_TRUNC); */
2180 WARN_ON(!(attr->ia_valid & ATTR_SIZE));
2181 WARN_ON(attr->ia_size != 0);
2182 if (fc->atomic_o_trunc) {
2183 /*
2184 * No need to send request to userspace, since actual
2185 * truncation has already been done by OPEN. But still
2186 * need to truncate page cache.
2187 */
2188 i_size_write(inode, 0);
2189 truncate_pagecache(inode, 0);
2190 goto out;
2191 }
2192 file = NULL;
2193 }
2194
2195 /* Flush dirty data/metadata before non-truncate SETATTR */
2196 if (is_wb &&
2197 attr->ia_valid &
2198 (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
2199 ATTR_TIMES_SET)) {
2200 err = write_inode_now(inode, true);
2201 if (err)
2202 return err;
2203
2204 fuse_set_nowrite(inode);
2205 fuse_release_nowrite(inode);
2206 }
2207
2208 if (is_truncate) {
2209 fuse_set_nowrite(inode);
2210 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2211 if (trust_local_cmtime && attr->ia_size != inode->i_size)
2212 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
2213 }
2214
2215 memset(&inarg, 0, sizeof(inarg));
2216 memset(&outarg, 0, sizeof(outarg));
2217 iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime);
2218 if (file) {
2219 struct fuse_file *ff = file->private_data;
2220 inarg.valid |= FATTR_FH;
2221 inarg.fh = ff->fh;
2222 }
2223
2224 /* Kill suid/sgid for non-directory chown unconditionally */
2225 if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) &&
2226 attr->ia_valid & (ATTR_UID | ATTR_GID))
2227 inarg.valid |= FATTR_KILL_SUIDGID;
2228
2229 if (attr->ia_valid & ATTR_SIZE) {
2230 /* For mandatory locking in truncate */
2231 inarg.valid |= FATTR_LOCKOWNER;
2232 inarg.lock_owner = fuse_lock_owner_id(fc, current->files);
2233
2234 /* Kill suid/sgid for truncate only if no CAP_FSETID */
2235 if (fc->handle_killpriv_v2 && !capable(CAP_FSETID))
2236 inarg.valid |= FATTR_KILL_SUIDGID;
2237 }
2238
2239 attr_version = fuse_get_attr_version(fm->fc);
2240 fuse_setattr_fill(fc, &args, inode, &inarg, &outarg);
2241 err = fuse_simple_request(fm, &args);
2242 if (err) {
2243 if (err == -EINTR)
2244 fuse_invalidate_attr(inode);
2245 goto error;
2246 }
2247
2248 if (fuse_invalid_attr(&outarg.attr) ||
2249 inode_wrong_type(inode, outarg.attr.mode)) {
2250 fuse_make_bad(inode);
2251 err = -EIO;
2252 goto error;
2253 }
2254
2255 spin_lock(&fi->lock);
2256 /* the kernel maintains i_mtime locally */
2257 if (trust_local_cmtime) {
2258 if (attr->ia_valid & ATTR_MTIME)
2259 inode_set_mtime_to_ts(inode, attr->ia_mtime);
2260 if (attr->ia_valid & ATTR_CTIME)
2261 inode_set_ctime_to_ts(inode, attr->ia_ctime);
2262 /* FIXME: clear I_DIRTY_SYNC? */
2263 }
2264
2265 if (fi->attr_version > attr_version) {
2266 /*
2267 * Apply attributes, for example for fsnotify_change(), but set
2268 * attribute timeout to zero.
2269 */
2270 outarg.attr_valid = outarg.attr_valid_nsec = 0;
2271 }
2272
2273 fuse_change_attributes_common(inode, &outarg.attr, NULL,
2274 ATTR_TIMEOUT(&outarg),
2275 fuse_get_cache_mask(inode), 0);
2276 oldsize = inode->i_size;
2277 /* see the comment in fuse_change_attributes() */
2278 if (!is_wb || is_truncate)
2279 i_size_write(inode, outarg.attr.size);
2280
2281 if (is_truncate) {
2282 /* NOTE: this may release/reacquire fi->lock */
2283 __fuse_release_nowrite(inode);
2284 }
2285 spin_unlock(&fi->lock);
2286
2287 /*
2288 * Only call invalidate_inode_pages2() after removing
2289 * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
2290 */
2291 if ((is_truncate || !is_wb) &&
2292 S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
2293 truncate_pagecache(inode, outarg.attr.size);
2294 invalidate_inode_pages2(mapping);
2295 }
2296
2297 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2298 out:
2299 if (fault_blocked)
2300 filemap_invalidate_unlock(mapping);
2301
2302 return 0;
2303
2304 error:
2305 if (is_truncate)
2306 fuse_release_nowrite(inode);
2307
2308 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2309
2310 if (fault_blocked)
2311 filemap_invalidate_unlock(mapping);
2312 return err;
2313 }
2314
fuse_setattr(struct mnt_idmap * idmap,struct dentry * entry,struct iattr * attr)2315 static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry,
2316 struct iattr *attr)
2317 {
2318 struct inode *inode = d_inode(entry);
2319 struct fuse_conn *fc = get_fuse_conn(inode);
2320 struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
2321 int ret;
2322
2323 if (fuse_is_bad(inode))
2324 return -EIO;
2325
2326 if (!fuse_allow_current_process(get_fuse_conn(inode)))
2327 return -EACCES;
2328
2329 if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
2330 attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
2331 ATTR_MODE);
2332
2333 /*
2334 * The only sane way to reliably kill suid/sgid is to do it in
2335 * the userspace filesystem
2336 *
2337 * This should be done on write(), truncate() and chown().
2338 */
2339 if (!fc->handle_killpriv && !fc->handle_killpriv_v2) {
2340 /*
2341 * ia_mode calculation may have used stale i_mode.
2342 * Refresh and recalculate.
2343 */
2344 ret = fuse_do_getattr(idmap, inode, NULL, file);
2345 if (ret)
2346 return ret;
2347
2348 attr->ia_mode = inode->i_mode;
2349 if (inode->i_mode & S_ISUID) {
2350 attr->ia_valid |= ATTR_MODE;
2351 attr->ia_mode &= ~S_ISUID;
2352 }
2353 if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
2354 attr->ia_valid |= ATTR_MODE;
2355 attr->ia_mode &= ~S_ISGID;
2356 }
2357 }
2358 }
2359 if (!attr->ia_valid)
2360 return 0;
2361
2362 ret = fuse_do_setattr(idmap, entry, attr, file);
2363 if (!ret) {
2364 /*
2365 * If filesystem supports acls it may have updated acl xattrs in
2366 * the filesystem, so forget cached acls for the inode.
2367 */
2368 if (fc->posix_acl)
2369 forget_all_cached_acls(inode);
2370
2371 /* Directory mode changed, may need to revalidate access */
2372 if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE))
2373 fuse_invalidate_entry_cache(entry);
2374 }
2375 return ret;
2376 }
2377
fuse_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)2378 static int fuse_getattr(struct mnt_idmap *idmap,
2379 const struct path *path, struct kstat *stat,
2380 u32 request_mask, unsigned int flags)
2381 {
2382 struct inode *inode = d_inode(path->dentry);
2383 struct fuse_conn *fc = get_fuse_conn(inode);
2384
2385 if (fuse_is_bad(inode))
2386 return -EIO;
2387
2388 if (!fuse_allow_current_process(fc)) {
2389 if (!request_mask) {
2390 /*
2391 * If user explicitly requested *nothing* then don't
2392 * error out, but return st_dev only.
2393 */
2394 stat->result_mask = 0;
2395 stat->dev = inode->i_sb->s_dev;
2396 return 0;
2397 }
2398 return -EACCES;
2399 }
2400
2401 return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags);
2402 }
2403
2404 static const struct inode_operations fuse_dir_inode_operations = {
2405 .lookup = fuse_lookup,
2406 .mkdir = fuse_mkdir,
2407 .symlink = fuse_symlink,
2408 .unlink = fuse_unlink,
2409 .rmdir = fuse_rmdir,
2410 .rename = fuse_rename2,
2411 .link = fuse_link,
2412 .setattr = fuse_setattr,
2413 .create = fuse_create,
2414 .atomic_open = fuse_atomic_open,
2415 .tmpfile = fuse_tmpfile,
2416 .mknod = fuse_mknod,
2417 .permission = fuse_permission,
2418 .getattr = fuse_getattr,
2419 .listxattr = fuse_listxattr,
2420 .get_inode_acl = fuse_get_inode_acl,
2421 .get_acl = fuse_get_acl,
2422 .set_acl = fuse_set_acl,
2423 .fileattr_get = fuse_fileattr_get,
2424 .fileattr_set = fuse_fileattr_set,
2425 };
2426
2427 static const struct file_operations fuse_dir_operations = {
2428 .llseek = generic_file_llseek,
2429 .read = generic_read_dir,
2430 .iterate_shared = fuse_readdir,
2431 .open = fuse_dir_open,
2432 .release = fuse_dir_release,
2433 .fsync = fuse_dir_fsync,
2434 .unlocked_ioctl = fuse_dir_ioctl,
2435 .compat_ioctl = fuse_dir_compat_ioctl,
2436 };
2437
2438 static const struct inode_operations fuse_common_inode_operations = {
2439 .setattr = fuse_setattr,
2440 .permission = fuse_permission,
2441 .getattr = fuse_getattr,
2442 .listxattr = fuse_listxattr,
2443 .get_inode_acl = fuse_get_inode_acl,
2444 .get_acl = fuse_get_acl,
2445 .set_acl = fuse_set_acl,
2446 .fileattr_get = fuse_fileattr_get,
2447 .fileattr_set = fuse_fileattr_set,
2448 };
2449
2450 static const struct inode_operations fuse_symlink_inode_operations = {
2451 .setattr = fuse_setattr,
2452 .get_link = fuse_get_link,
2453 .getattr = fuse_getattr,
2454 .listxattr = fuse_listxattr,
2455 };
2456
fuse_init_common(struct inode * inode)2457 void fuse_init_common(struct inode *inode)
2458 {
2459 inode->i_op = &fuse_common_inode_operations;
2460 }
2461
fuse_init_dir(struct inode * inode)2462 void fuse_init_dir(struct inode *inode)
2463 {
2464 struct fuse_inode *fi = get_fuse_inode(inode);
2465
2466 inode->i_op = &fuse_dir_inode_operations;
2467 inode->i_fop = &fuse_dir_operations;
2468
2469 spin_lock_init(&fi->rdc.lock);
2470 fi->rdc.cached = false;
2471 fi->rdc.size = 0;
2472 fi->rdc.pos = 0;
2473 fi->rdc.version = 0;
2474 }
2475
fuse_symlink_read_folio(struct file * null,struct folio * folio)2476 static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
2477 {
2478 int err = fuse_readlink_folio(folio->mapping->host, folio);
2479
2480 if (!err)
2481 folio_mark_uptodate(folio);
2482
2483 folio_unlock(folio);
2484
2485 return err;
2486 }
2487
2488 static const struct address_space_operations fuse_symlink_aops = {
2489 .read_folio = fuse_symlink_read_folio,
2490 };
2491
fuse_init_symlink(struct inode * inode)2492 void fuse_init_symlink(struct inode *inode)
2493 {
2494 inode->i_op = &fuse_symlink_inode_operations;
2495 inode->i_data.a_ops = &fuse_symlink_aops;
2496 inode_nohighmem(inode);
2497 }
2498