1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
35 *
36 * Copyright (c) 2019 The FreeBSD Foundation
37 *
38 * Portions of this software were developed by BFF Storage Systems, LLC under
39 * sponsorship from the FreeBSD Foundation.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 *
50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/counter.h>
66 #include <sys/module.h>
67 #include <sys/errno.h>
68 #include <sys/kernel.h>
69 #include <sys/conf.h>
70 #include <sys/uio.h>
71 #include <sys/malloc.h>
72 #include <sys/queue.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/sdt.h>
76 #include <sys/sx.h>
77 #include <sys/proc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/namei.h>
81 #include <sys/stat.h>
82 #include <sys/unistd.h>
83 #include <sys/filedesc.h>
84 #include <sys/file.h>
85 #include <sys/fcntl.h>
86 #include <sys/dirent.h>
87 #include <sys/bio.h>
88 #include <sys/buf.h>
89 #include <sys/sysctl.h>
90 #include <sys/priv.h>
91
92 #include "fuse.h"
93 #include "fuse_file.h"
94 #include "fuse_internal.h"
95 #include "fuse_io.h"
96 #include "fuse_ipc.h"
97 #include "fuse_node.h"
98 #include "fuse_file.h"
99
100 SDT_PROVIDER_DECLARE(fusefs);
101 /*
102 * Fuse trace probe:
103 * arg0: verbosity. Higher numbers give more verbose messages
104 * arg1: Textual message
105 */
106 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*");
107
108 #ifdef ZERO_PAD_INCOMPLETE_BUFS
109 static int isbzero(void *buf, size_t len);
110
111 #endif
112
113 counter_u64_t fuse_lookup_cache_hits;
114 counter_u64_t fuse_lookup_cache_misses;
115
116 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
117 &fuse_lookup_cache_hits, "number of positive cache hits in lookup");
118
119 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
120 &fuse_lookup_cache_misses, "number of cache misses in lookup");
121
122 int
fuse_internal_get_cached_vnode(struct mount * mp,ino_t ino,int flags,struct vnode ** vpp)123 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags,
124 struct vnode **vpp)
125 {
126 struct bintime now;
127 struct thread *td = curthread;
128 uint64_t nodeid = ino;
129 int error;
130
131 *vpp = NULL;
132
133 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp,
134 fuse_vnode_cmp, &nodeid);
135 if (error)
136 return error;
137 /*
138 * Check the entry cache timeout. We have to do this within fusefs
139 * instead of by using cache_enter_time/cache_lookup because those
140 * routines are only intended to work with pathnames, not inodes
141 */
142 if (*vpp != NULL) {
143 getbinuptime(&now);
144 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){
145 counter_u64_add(fuse_lookup_cache_hits, 1);
146 return 0;
147 } else {
148 /* Entry cache timeout */
149 counter_u64_add(fuse_lookup_cache_misses, 1);
150 cache_purge(*vpp);
151 vput(*vpp);
152 *vpp = NULL;
153 }
154 }
155 return 0;
156 }
157
158 SDT_PROBE_DEFINE0(fusefs, , internal, access_vadmin);
159 /* Synchronously send a FUSE_ACCESS operation */
160 int
fuse_internal_access(struct vnode * vp,accmode_t mode,struct thread * td,struct ucred * cred)161 fuse_internal_access(struct vnode *vp,
162 accmode_t mode,
163 struct thread *td,
164 struct ucred *cred)
165 {
166 int err = 0;
167 uint32_t mask = F_OK;
168 int dataflags;
169 struct mount *mp;
170 struct fuse_dispatcher fdi;
171 struct fuse_access_in *fai;
172 struct fuse_data *data;
173
174 mp = vnode_mount(vp);
175
176 data = fuse_get_mpdata(mp);
177 dataflags = data->dataflags;
178
179 if (mode == 0)
180 return 0;
181
182 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) {
183 switch (vp->v_type) {
184 case VDIR:
185 /* FALLTHROUGH */
186 case VLNK:
187 /* FALLTHROUGH */
188 case VREG:
189 return EROFS;
190 default:
191 break;
192 }
193 }
194
195 /* Unless explicitly permitted, deny everyone except the fs owner. */
196 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) {
197 if (fuse_match_cred(data->daemoncred, cred))
198 return EPERM;
199 }
200
201 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
202 struct vattr va;
203
204 fuse_internal_getattr(vp, &va, cred, td);
205 return vaccess(vp->v_type, va.va_mode, va.va_uid,
206 va.va_gid, mode, cred);
207 }
208
209 if (mode & VADMIN) {
210 /*
211 * The FUSE protocol doesn't have an equivalent of VADMIN, so
212 * it's a bug if we ever reach this point with that bit set.
213 */
214 SDT_PROBE0(fusefs, , internal, access_vadmin);
215 }
216
217 if (fsess_not_impl(mp, FUSE_ACCESS))
218 return 0;
219
220 if ((mode & (VWRITE | VAPPEND)) != 0)
221 mask |= W_OK;
222 if ((mode & VREAD) != 0)
223 mask |= R_OK;
224 if ((mode & VEXEC) != 0)
225 mask |= X_OK;
226
227 fdisp_init(&fdi, sizeof(*fai));
228 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred);
229
230 fai = fdi.indata;
231 fai->mask = mask;
232
233 err = fdisp_wait_answ(&fdi);
234 fdisp_destroy(&fdi);
235
236 if (err == ENOSYS) {
237 fsess_set_notimpl(mp, FUSE_ACCESS);
238 err = 0;
239 }
240 return err;
241 }
242
243 /*
244 * Cache FUSE attributes from attr, in attribute cache associated with vnode
245 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the
246 * converted attributes there as well.
247 *
248 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do
249 * return the result to the caller).
250 */
251 void
fuse_internal_cache_attrs(struct vnode * vp,struct fuse_attr * attr,uint64_t attr_valid,uint32_t attr_valid_nsec,struct vattr * vap,bool from_server)252 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr,
253 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap,
254 bool from_server)
255 {
256 struct mount *mp;
257 struct fuse_vnode_data *fvdat;
258 struct fuse_data *data;
259 struct vattr *vp_cache_at;
260
261 mp = vnode_mount(vp);
262 fvdat = VTOFUD(vp);
263 data = fuse_get_mpdata(mp);
264
265 ASSERT_CACHED_ATTRS_LOCKED(vp);
266
267 fuse_validity_2_bintime(attr_valid, attr_valid_nsec,
268 &fvdat->attr_cache_timeout);
269
270 if (vnode_isreg(vp) &&
271 fvdat->cached_attrs.va_size != VNOVAL &&
272 fvdat->flag & FN_SIZECHANGE &&
273 attr->size != fvdat->cached_attrs.va_size)
274 {
275 if (data->cache_mode == FUSE_CACHE_WB)
276 {
277 const char *msg;
278
279 /*
280 * The server changed the file's size even though we're
281 * using writeback cacheing and and we have outstanding
282 * dirty writes! That's a server bug.
283 */
284 if (fuse_libabi_geq(data, 7, 23)) {
285 msg = "writeback cache incoherent! "
286 "To prevent data corruption, disable "
287 "the writeback cache according to your "
288 "FUSE server's documentation.";
289 } else {
290 msg = "writeback cache incoherent! "
291 "To prevent data corruption, disable "
292 "the writeback cache by setting "
293 "vfs.fusefs.data_cache_mode to 0 or 1.";
294 }
295 fuse_warn(data, FSESS_WARN_WB_CACHE_INCOHERENT, msg);
296 }
297 if (fuse_vnode_attr_cache_valid(vp) &&
298 data->cache_mode != FUSE_CACHE_UC)
299 {
300 /*
301 * The server changed the file's size even though we
302 * have it cached and our cache has not yet expired.
303 * That's a bug.
304 */
305 fuse_warn(data, FSESS_WARN_CACHE_INCOHERENT,
306 "cache incoherent! "
307 "To prevent "
308 "data corruption, disable the data cache "
309 "by mounting with -o direct_io, or as "
310 "directed otherwise by your FUSE server's "
311 "documentation.");
312 }
313 }
314
315 /* Fix our buffers if the filesize changed without us knowing */
316 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) {
317 (void)fuse_vnode_setsize(vp, attr->size, from_server);
318 fvdat->cached_attrs.va_size = attr->size;
319 }
320
321 if (attr_valid > 0 || attr_valid_nsec > 0)
322 vp_cache_at = &(fvdat->cached_attrs);
323 else if (vap != NULL)
324 vp_cache_at = vap;
325 else
326 return;
327
328 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0];
329 vp_cache_at->va_fileid = attr->ino;
330 vp_cache_at->va_mode = attr->mode & ~S_IFMT;
331 vp_cache_at->va_nlink = attr->nlink;
332 vp_cache_at->va_uid = attr->uid;
333 vp_cache_at->va_gid = attr->gid;
334 vp_cache_at->va_rdev = attr->rdev;
335 vp_cache_at->va_size = attr->size;
336 /* XXX on i386, seconds are truncated to 32 bits */
337 vp_cache_at->va_atime.tv_sec = attr->atime;
338 vp_cache_at->va_atime.tv_nsec = attr->atimensec;
339 vp_cache_at->va_mtime.tv_sec = attr->mtime;
340 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec;
341 vp_cache_at->va_ctime.tv_sec = attr->ctime;
342 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec;
343 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0)
344 vp_cache_at->va_blocksize = attr->blksize;
345 else
346 vp_cache_at->va_blocksize = PAGE_SIZE;
347 vp_cache_at->va_type = IFTOVT(attr->mode);
348 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE;
349 vp_cache_at->va_flags = 0;
350
351 if (vap != vp_cache_at && vap != NULL)
352 memcpy(vap, vp_cache_at, sizeof(*vap));
353 }
354
355 /* fsync */
356
357 int
fuse_internal_fsync_callback(struct fuse_ticket * tick,struct uio * uio)358 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio)
359 {
360 if (tick->tk_aw_ohead.error == ENOSYS) {
361 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick));
362 }
363 return 0;
364 }
365
366 int
fuse_internal_fsync(struct vnode * vp,struct thread * td,int waitfor,bool datasync)367 fuse_internal_fsync(struct vnode *vp,
368 struct thread *td,
369 int waitfor,
370 bool datasync)
371 {
372 struct fuse_fsync_in *ffsi = NULL;
373 struct fuse_dispatcher fdi;
374 struct fuse_filehandle *fufh;
375 struct fuse_vnode_data *fvdat = VTOFUD(vp);
376 struct mount *mp = vnode_mount(vp);
377 int op = FUSE_FSYNC;
378 int err = 0;
379
380 if (fsess_not_impl(vnode_mount(vp),
381 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
382 return 0;
383 }
384 if (vnode_isdir(vp))
385 op = FUSE_FSYNCDIR;
386
387 if (fsess_not_impl(mp, op))
388 return 0;
389
390 fdisp_init(&fdi, sizeof(*ffsi));
391 /*
392 * fsync every open file handle for this file, because we can't be sure
393 * which file handle the caller is really referring to.
394 */
395 LIST_FOREACH(fufh, &fvdat->handles, next) {
396 fdi.iosize = sizeof(*ffsi);
397 if (ffsi == NULL)
398 fdisp_make_vp(&fdi, op, vp, td, NULL);
399 else
400 fdisp_refresh_vp(&fdi, op, vp, td, NULL);
401 ffsi = fdi.indata;
402 ffsi->fh = fufh->fh_id;
403 ffsi->fsync_flags = 0;
404
405 if (datasync)
406 ffsi->fsync_flags = FUSE_FSYNC_FDATASYNC;
407
408 if (waitfor == MNT_WAIT) {
409 err = fdisp_wait_answ(&fdi);
410 } else {
411 fuse_insert_callback(fdi.tick,
412 fuse_internal_fsync_callback);
413 fuse_insert_message(fdi.tick, false);
414 }
415 if (err == ENOSYS) {
416 /* ENOSYS means "success, and don't call again" */
417 fsess_set_notimpl(mp, op);
418 err = 0;
419 break;
420 }
421 }
422 fdisp_destroy(&fdi);
423
424 return err;
425 }
426
427 /* Asynchronous invalidation */
428 SDT_PROBE_DEFINE3(fusefs, , internal, invalidate_entry,
429 "struct vnode*", "struct fuse_notify_inval_entry_out*", "char*");
430 int
fuse_internal_invalidate_entry(struct mount * mp,struct uio * uio)431 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio)
432 {
433 struct fuse_notify_inval_entry_out fnieo;
434 struct componentname cn;
435 struct vnode *dvp, *vp;
436 char name[PATH_MAX];
437 int err;
438
439 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0)
440 return (err);
441
442 if (fnieo.namelen >= sizeof(name))
443 return (EINVAL);
444
445 if ((err = uiomove(name, fnieo.namelen, uio)) != 0)
446 return (err);
447 name[fnieo.namelen] = '\0';
448 /* fusefs does not cache "." or ".." entries */
449 if (strncmp(name, ".", sizeof(".")) == 0 ||
450 strncmp(name, "..", sizeof("..")) == 0)
451 return (0);
452
453 if (fnieo.parent == FUSE_ROOT_ID)
454 err = VFS_ROOT(mp, LK_SHARED, &dvp);
455 else
456 err = fuse_internal_get_cached_vnode( mp, fnieo.parent,
457 LK_SHARED, &dvp);
458 SDT_PROBE3(fusefs, , internal, invalidate_entry, dvp, &fnieo, name);
459 /*
460 * If dvp is not in the cache, then it must've been reclaimed. And
461 * since fuse_vnop_reclaim does a cache_purge, name's entry must've
462 * been invalidated already. So we can safely return if dvp == NULL
463 */
464 if (err != 0 || dvp == NULL)
465 return (err);
466 /*
467 * XXX we can't check dvp's generation because the FUSE invalidate
468 * entry message doesn't include it. Worse case is that we invalidate
469 * an entry that didn't need to be invalidated.
470 */
471
472 cn.cn_nameiop = LOOKUP;
473 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */
474 cn.cn_cred = curthread->td_ucred;
475 cn.cn_lkflags = LK_SHARED;
476 cn.cn_pnbuf = NULL;
477 cn.cn_nameptr = name;
478 cn.cn_namelen = fnieo.namelen;
479 err = cache_lookup(dvp, &vp, &cn, NULL, NULL);
480 MPASS(err == 0);
481 CACHED_ATTR_LOCK(dvp);
482 fuse_vnode_clear_attr_cache(dvp);
483 CACHED_ATTR_UNLOCK(dvp);
484 vput(dvp);
485 return (0);
486 }
487
488 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_inode,
489 "struct vnode*", "struct fuse_notify_inval_inode_out *");
490 int
fuse_internal_invalidate_inode(struct mount * mp,struct uio * uio)491 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio)
492 {
493 struct fuse_notify_inval_inode_out fniio;
494 struct vnode *vp;
495 int err;
496
497 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0)
498 return (err);
499
500 if (fniio.ino == FUSE_ROOT_ID)
501 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp);
502 else
503 err = fuse_internal_get_cached_vnode(mp, fniio.ino,
504 LK_EXCLUSIVE, &vp);
505 SDT_PROBE2(fusefs, , internal, invalidate_inode, vp, &fniio);
506 if (err != 0 || vp == NULL)
507 return (err);
508 /*
509 * XXX we can't check vp's generation because the FUSE invalidate
510 * entry message doesn't include it. Worse case is that we invalidate
511 * an inode that didn't need to be invalidated.
512 */
513
514 /*
515 * Flush and invalidate buffers if off >= 0. Technically we only need
516 * to flush and invalidate the range of offsets [off, off + len), but
517 * for simplicity's sake we do everything.
518 */
519 if (fniio.off >= 0)
520 fuse_io_invalbuf(vp, curthread);
521 fuse_vnode_clear_attr_cache(vp);
522 vput(vp);
523 return (0);
524 }
525
526 /* mknod */
527 int
fuse_internal_mknod(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vattr * vap)528 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp,
529 struct componentname *cnp, struct vattr *vap)
530 {
531 struct fuse_data *data;
532 struct fuse_mknod_in fmni;
533 size_t insize;
534
535 data = fuse_get_mpdata(dvp->v_mount);
536
537 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode);
538 fmni.rdev = vap->va_rdev;
539 if (fuse_libabi_geq(data, 7, 12)) {
540 insize = sizeof(fmni);
541 fmni.umask = curthread->td_proc->p_pd->pd_cmask;
542 fmni.padding = 0;
543 } else {
544 insize = FUSE_COMPAT_MKNOD_IN_SIZE;
545 }
546 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni,
547 insize, vap->va_type));
548 }
549
550 /* readdir */
551
552 int
fuse_internal_readdir(struct vnode * vp,struct uio * uio,struct fuse_filehandle * fufh,struct fuse_iov * cookediov,int * ncookies,uint64_t * cookies)553 fuse_internal_readdir(struct vnode *vp,
554 struct uio *uio,
555 struct fuse_filehandle *fufh,
556 struct fuse_iov *cookediov,
557 int *ncookies,
558 uint64_t *cookies)
559 {
560 int err = 0;
561 struct fuse_dispatcher fdi;
562 struct fuse_read_in *fri = NULL;
563
564 if (uio_resid(uio) == 0)
565 return 0;
566 fdisp_init(&fdi, 0);
567
568 /*
569 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p
570 * I/O).
571 */
572 while (uio_resid(uio) > 0) {
573 fdi.iosize = sizeof(*fri);
574 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL);
575 fri = fdi.indata;
576 fri->fh = fufh->fh_id;
577 fri->offset = uio_offset(uio);
578 fri->size = MIN(uio->uio_resid,
579 fuse_get_mpdata(vp->v_mount)->max_read);
580
581 if ((err = fdisp_wait_answ(&fdi)))
582 break;
583 if ((err = fuse_internal_readdir_processdata(uio, fri->size,
584 fdi.answ, fdi.iosize, cookediov, ncookies, &cookies)))
585 break;
586 }
587
588 fdisp_destroy(&fdi);
589 return ((err == -1) ? 0 : err);
590 }
591
592 /*
593 * Return -1 to indicate that this readdir is finished, 0 if it copied
594 * all the directory data read in and it may be possible to read more
595 * and greater than 0 for a failure.
596 */
597 int
fuse_internal_readdir_processdata(struct uio * uio,size_t reqsize,void * buf,size_t bufsize,struct fuse_iov * cookediov,int * ncookies,uint64_t ** cookiesp)598 fuse_internal_readdir_processdata(struct uio *uio,
599 size_t reqsize,
600 void *buf,
601 size_t bufsize,
602 struct fuse_iov *cookediov,
603 int *ncookies,
604 uint64_t **cookiesp)
605 {
606 int err = 0;
607 int oreclen;
608 size_t freclen;
609
610 struct dirent *de;
611 struct fuse_dirent *fudge;
612 uint64_t *cookies;
613
614 cookies = *cookiesp;
615 if (bufsize < FUSE_NAME_OFFSET)
616 return -1;
617 for (;;) {
618 if (bufsize < FUSE_NAME_OFFSET) {
619 err = -1;
620 break;
621 }
622 fudge = (struct fuse_dirent *)buf;
623 freclen = FUSE_DIRENT_SIZE(fudge);
624
625 if (bufsize < freclen) {
626 /*
627 * This indicates a partial directory entry at the
628 * end of the directory data.
629 */
630 err = -1;
631 break;
632 }
633 #ifdef ZERO_PAD_INCOMPLETE_BUFS
634 if (isbzero(buf, FUSE_NAME_OFFSET)) {
635 err = -1;
636 break;
637 }
638 #endif
639
640 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) {
641 err = EINVAL;
642 break;
643 }
644 oreclen = GENERIC_DIRSIZ((struct pseudo_dirent *)
645 &fudge->namelen);
646
647 if (oreclen > uio_resid(uio)) {
648 /* Out of space for the dir so we are done. */
649 err = -1;
650 break;
651 }
652 fiov_adjust(cookediov, oreclen);
653 bzero(cookediov->base, oreclen);
654
655 de = (struct dirent *)cookediov->base;
656 de->d_fileno = fudge->ino;
657 de->d_off = fudge->off;
658 de->d_reclen = oreclen;
659 de->d_type = fudge->type;
660 de->d_namlen = fudge->namelen;
661 memcpy((char *)cookediov->base + sizeof(struct dirent) -
662 MAXNAMLEN - 1,
663 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen);
664 dirent_terminate(de);
665
666 err = uiomove(cookediov->base, cookediov->len, uio);
667 if (err)
668 break;
669 if (cookies != NULL) {
670 if (*ncookies == 0) {
671 err = -1;
672 break;
673 }
674 *cookies = fudge->off;
675 cookies++;
676 (*ncookies)--;
677 }
678 buf = (char *)buf + freclen;
679 bufsize -= freclen;
680 uio_setoffset(uio, fudge->off);
681 }
682 *cookiesp = cookies;
683
684 return err;
685 }
686
687 /* remove */
688
689 int
fuse_internal_remove(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,enum fuse_opcode op)690 fuse_internal_remove(struct vnode *dvp,
691 struct vnode *vp,
692 struct componentname *cnp,
693 enum fuse_opcode op)
694 {
695 struct fuse_dispatcher fdi;
696 nlink_t nlink;
697 int err = 0;
698
699 ASSERT_CACHED_ATTRS_LOCKED(vp);
700
701 fdisp_init(&fdi, cnp->cn_namelen + 1);
702 fdisp_make_vp(&fdi, op, dvp, curthread, cnp->cn_cred);
703
704 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
705 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
706
707 err = fdisp_wait_answ(&fdi);
708 fdisp_destroy(&fdi);
709
710 if (err)
711 return (err);
712
713 /*
714 * Access the cached nlink even if the attr cached has expired. If
715 * it's inaccurate, the worst that will happen is:
716 * 1) We'll recycle the vnode even though the file has another link we
717 * don't know about, costing a bit of cpu time, or
718 * 2) We won't recycle the vnode even though all of its links are gone.
719 * It will linger around until vnlru reclaims it, costing a bit of
720 * temporary memory.
721 */
722 nlink = VTOFUD(vp)->cached_attrs.va_nlink--;
723
724 /*
725 * Purge the parent's attribute cache because the daemon
726 * should've updated its mtime and ctime.
727 */
728 fuse_vnode_clear_attr_cache(dvp);
729
730 /* NB: nlink could be zero if it was never cached */
731 if (nlink <= 1 || vnode_vtype(vp) == VDIR) {
732 fuse_internal_vnode_disappear(vp);
733 } else {
734 cache_purge(vp);
735 fuse_vnode_update(vp, FN_CTIMECHANGE);
736 }
737
738 return err;
739 }
740
741 /* rename */
742
743 int
fuse_internal_rename(struct vnode * fdvp,struct componentname * fcnp,struct vnode * tdvp,struct componentname * tcnp)744 fuse_internal_rename(struct vnode *fdvp,
745 struct componentname *fcnp,
746 struct vnode *tdvp,
747 struct componentname *tcnp)
748 {
749 struct fuse_dispatcher fdi;
750 struct fuse_rename_in *fri;
751 int err = 0;
752
753 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2);
754 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, curthread, tcnp->cn_cred);
755
756 fri = fdi.indata;
757 fri->newdir = VTOI(tdvp);
758 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr,
759 fcnp->cn_namelen);
760 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0';
761 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1,
762 tcnp->cn_nameptr, tcnp->cn_namelen);
763 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen +
764 tcnp->cn_namelen + 1] = '\0';
765
766 err = fdisp_wait_answ(&fdi);
767 fdisp_destroy(&fdi);
768 return err;
769 }
770
771 /* strategy */
772
773 /* entity creation */
774
775 void
fuse_internal_newentry_makerequest(struct mount * mp,uint64_t dnid,struct componentname * cnp,enum fuse_opcode op,void * buf,size_t bufsize,struct fuse_dispatcher * fdip)776 fuse_internal_newentry_makerequest(struct mount *mp,
777 uint64_t dnid,
778 struct componentname *cnp,
779 enum fuse_opcode op,
780 void *buf,
781 size_t bufsize,
782 struct fuse_dispatcher *fdip)
783 {
784 fdip->iosize = bufsize + cnp->cn_namelen + 1;
785
786 fdisp_make(fdip, op, mp, dnid, curthread, cnp->cn_cred);
787 memcpy(fdip->indata, buf, bufsize);
788 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen);
789 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0';
790 }
791
792 int
fuse_internal_newentry_core(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,__enum_uint8 (vtype)vtyp,struct fuse_dispatcher * fdip)793 fuse_internal_newentry_core(struct vnode *dvp,
794 struct vnode **vpp,
795 struct componentname *cnp,
796 __enum_uint8(vtype) vtyp,
797 struct fuse_dispatcher *fdip)
798 {
799 int err = 0;
800 struct fuse_entry_out *feo;
801 struct mount *mp = vnode_mount(dvp);
802
803 if ((err = fdisp_wait_answ(fdip))) {
804 return err;
805 }
806 feo = fdip->answ;
807
808 if ((err = fuse_internal_checkentry(feo, vtyp))) {
809 return err;
810 }
811 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp);
812 if (err) {
813 fuse_internal_forget_send(mp, curthread, cnp->cn_cred,
814 feo->nodeid, 1);
815 return err;
816 }
817
818 /*
819 * Purge the parent's attribute cache because the daemon should've
820 * updated its mtime and ctime
821 */
822 fuse_vnode_clear_attr_cache(dvp);
823
824 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid,
825 feo->attr_valid_nsec, NULL, true);
826
827 return err;
828 }
829
830 int
fuse_internal_newentry(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,enum fuse_opcode op,void * buf,size_t bufsize,__enum_uint8 (vtype)vtype)831 fuse_internal_newentry(struct vnode *dvp,
832 struct vnode **vpp,
833 struct componentname *cnp,
834 enum fuse_opcode op,
835 void *buf,
836 size_t bufsize,
837 __enum_uint8(vtype) vtype)
838 {
839 int err;
840 struct fuse_dispatcher fdi;
841 struct mount *mp = vnode_mount(dvp);
842
843 fdisp_init(&fdi, 0);
844 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf,
845 bufsize, &fdi);
846 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi);
847 fdisp_destroy(&fdi);
848
849 return err;
850 }
851
852 /* entity destruction */
853
854 int
fuse_internal_forget_callback(struct fuse_ticket * ftick,struct uio * uio)855 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio)
856 {
857 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL,
858 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1);
859
860 return 0;
861 }
862
863 void
fuse_internal_forget_send(struct mount * mp,struct thread * td,struct ucred * cred,uint64_t nodeid,uint64_t nlookup)864 fuse_internal_forget_send(struct mount *mp,
865 struct thread *td,
866 struct ucred *cred,
867 uint64_t nodeid,
868 uint64_t nlookup)
869 {
870
871 struct fuse_dispatcher fdi;
872 struct fuse_forget_in *ffi;
873
874 /*
875 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu",
876 * (long long unsigned) nodeid));
877 */
878
879 fdisp_init(&fdi, sizeof(*ffi));
880 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred);
881
882 ffi = fdi.indata;
883 ffi->nlookup = nlookup;
884
885 fuse_insert_message(fdi.tick, false);
886 fdisp_destroy(&fdi);
887 }
888
889 /* Fetch the vnode's attributes from the daemon*/
890 int
fuse_internal_do_getattr(struct vnode * vp,struct vattr * vap,struct ucred * cred,struct thread * td)891 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap,
892 struct ucred *cred, struct thread *td)
893 {
894 struct fuse_dispatcher fdi;
895 struct fuse_vnode_data *fvdat = VTOFUD(vp);
896 struct fuse_getattr_in *fgai;
897 struct fuse_attr_out *fao;
898 __enum_uint8(vtype) vtyp;
899 int err;
900
901 fdisp_init(&fdi, sizeof(*fgai));
902 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred);
903 fgai = fdi.indata;
904 /*
905 * We could look up a file handle and set it in fgai->fh, but that
906 * involves extra runtime work and I'm unaware of any file systems that
907 * care.
908 */
909 fgai->getattr_flags = 0;
910 if ((err = fdisp_wait_answ(&fdi))) {
911 if (err == ENOENT)
912 fuse_internal_vnode_disappear(vp);
913 goto out;
914 }
915
916 fao = (struct fuse_attr_out *)fdi.answ;
917 vtyp = IFTOVT(fao->attr.mode);
918
919 CACHED_ATTR_LOCK(vp);
920 if (fvdat->flag & FN_SIZECHANGE)
921 fao->attr.size = fvdat->cached_attrs.va_size;
922 if (fvdat->flag & FN_ATIMECHANGE) {
923 fao->attr.atime = fvdat->cached_attrs.va_atime.tv_sec;
924 fao->attr.atimensec = fvdat->cached_attrs.va_atime.tv_nsec;
925 }
926 if (fvdat->flag & FN_CTIMECHANGE) {
927 fao->attr.ctime = fvdat->cached_attrs.va_ctime.tv_sec;
928 fao->attr.ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec;
929 }
930 if (fvdat->flag & FN_MTIMECHANGE) {
931 fao->attr.mtime = fvdat->cached_attrs.va_mtime.tv_sec;
932 fao->attr.mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec;
933 }
934
935 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
936 fao->attr_valid_nsec, vap, true);
937
938 CACHED_ATTR_UNLOCK(vp);
939 if (vtyp != vnode_vtype(vp)) {
940 fuse_internal_vnode_disappear(vp);
941 err = ENOENT;
942 }
943
944 out:
945 fdisp_destroy(&fdi);
946 return err;
947 }
948
949 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */
950 int
fuse_internal_getattr(struct vnode * vp,struct vattr * vap,struct ucred * cred,struct thread * td)951 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred,
952 struct thread *td)
953 {
954 struct vattr *attrs;
955
956 CACHED_ATTR_LOCK(vp);
957 if ((attrs = VTOVA(vp)) != NULL) {
958 *vap = *attrs; /* struct copy */
959 CACHED_ATTR_UNLOCK(vp);
960 return 0;
961 } else
962 CACHED_ATTR_UNLOCK(vp);
963
964 return fuse_internal_do_getattr(vp, vap, cred, td);
965 }
966
967 void
fuse_internal_vnode_disappear(struct vnode * vp)968 fuse_internal_vnode_disappear(struct vnode *vp)
969 {
970 struct fuse_vnode_data *fvdat = VTOFUD(vp);
971
972 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear");
973 fvdat->flag |= FN_REVOKED;
974 cache_purge(vp);
975 }
976
977 /* fuse start/stop */
978
979 SDT_PROBE_DEFINE2(fusefs, , internal, init_done,
980 "struct fuse_data*", "struct fuse_init_out*");
981 int
fuse_internal_init_callback(struct fuse_ticket * tick,struct uio * uio)982 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio)
983 {
984 int err = 0;
985 struct fuse_data *data = tick->tk_data;
986 struct fuse_init_out *fiio = NULL;
987
988 if (fdata_get_dead(data))
989 goto out;
990
991 if ((err = tick->tk_aw_ohead.error)) {
992 goto out;
993 }
994 if ((err = fticket_pull(tick, uio))) {
995 goto out;
996 }
997 fiio = fticket_resp(tick)->base;
998
999 data->fuse_libabi_major = fiio->major;
1000 data->fuse_libabi_minor = fiio->minor;
1001 if (!fuse_libabi_geq(data, 7, 4)) {
1002 /*
1003 * With a little work we could support servers as old as 7.1.
1004 * But there would be little payoff.
1005 */
1006 SDT_PROBE2(fusefs, , internal, trace, 1,
1007 "userspace version too low");
1008 err = EPROTONOSUPPORT;
1009 goto out;
1010 }
1011
1012 if (fuse_libabi_geq(data, 7, 5)) {
1013 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) ||
1014 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) {
1015 data->max_write = fiio->max_write;
1016 if (fiio->flags & FUSE_ASYNC_READ)
1017 data->dataflags |= FSESS_ASYNC_READ;
1018 if (fiio->flags & FUSE_POSIX_LOCKS)
1019 data->dataflags |= FSESS_POSIX_LOCKS;
1020 if (fiio->flags & FUSE_EXPORT_SUPPORT)
1021 data->dataflags |= FSESS_EXPORT_SUPPORT;
1022 /*
1023 * Don't bother to check FUSE_BIG_WRITES, because it's
1024 * redundant with max_write
1025 */
1026 /*
1027 * max_background and congestion_threshold are not
1028 * implemented
1029 */
1030 } else {
1031 err = EINVAL;
1032 }
1033 } else {
1034 /* Old fixed values */
1035 data->max_write = 4096;
1036 }
1037
1038 if (fuse_libabi_geq(data, 7, 6))
1039 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf;
1040
1041 if (!fuse_libabi_geq(data, 7, 7))
1042 fsess_set_notimpl(data->mp, FUSE_INTERRUPT);
1043
1044 if (!fuse_libabi_geq(data, 7, 8)) {
1045 fsess_set_notimpl(data->mp, FUSE_BMAP);
1046 fsess_set_notimpl(data->mp, FUSE_DESTROY);
1047 }
1048
1049 if (!fuse_libabi_geq(data, 7, 19)) {
1050 fsess_set_notimpl(data->mp, FUSE_FALLOCATE);
1051 }
1052
1053 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 &&
1054 fiio->time_gran <= 1000000000)
1055 data->time_gran = fiio->time_gran;
1056 else
1057 data->time_gran = 1;
1058
1059 if (!fuse_libabi_geq(data, 7, 23))
1060 data->cache_mode = fuse_data_cache_mode;
1061 else if (fiio->flags & FUSE_WRITEBACK_CACHE)
1062 data->cache_mode = FUSE_CACHE_WB;
1063 else
1064 data->cache_mode = FUSE_CACHE_WT;
1065
1066 if (!fuse_libabi_geq(data, 7, 24))
1067 fsess_set_notimpl(data->mp, FUSE_LSEEK);
1068
1069 if (!fuse_libabi_geq(data, 7, 28))
1070 fsess_set_notimpl(data->mp, FUSE_COPY_FILE_RANGE);
1071
1072 if (fuse_libabi_geq(data, 7, 33) && (fiio->flags & FUSE_SETXATTR_EXT))
1073 data->dataflags |= FSESS_SETXATTR_EXT;
1074 out:
1075 if (err) {
1076 fdata_set_dead(data);
1077 }
1078 FUSE_LOCK();
1079 data->dataflags |= FSESS_INITED;
1080 SDT_PROBE2(fusefs, , internal, init_done, data, fiio);
1081 wakeup(&data->ticketer);
1082 FUSE_UNLOCK();
1083
1084 return 0;
1085 }
1086
1087 void
fuse_internal_send_init(struct fuse_data * data,struct thread * td)1088 fuse_internal_send_init(struct fuse_data *data, struct thread *td)
1089 {
1090 struct fuse_init_in *fiii;
1091 struct fuse_dispatcher fdi;
1092
1093 fdisp_init(&fdi, sizeof(*fiii));
1094 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL);
1095 fiii = fdi.indata;
1096 fiii->major = FUSE_KERNEL_VERSION;
1097 fiii->minor = FUSE_KERNEL_MINOR_VERSION;
1098 /*
1099 * fusefs currently reads ahead no more than one cache block at a time.
1100 * See fuse_read_biobackend
1101 */
1102 fiii->max_readahead = maxbcachebuf;
1103 /*
1104 * Unsupported features:
1105 * FUSE_FILE_OPS: No known FUSE server or client supports it
1106 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it
1107 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even
1108 * when default ACLs are in use.
1109 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD
1110 * doesn't have splice(2).
1111 * FUSE_FLOCK_LOCKS: not yet implemented
1112 * FUSE_AUTO_INVAL_DATA: not yet implemented
1113 * FUSE_DO_READDIRPLUS: not yet implemented
1114 * FUSE_READDIRPLUS_AUTO: not yet implemented
1115 * FUSE_ASYNC_DIO: not yet implemented
1116 * FUSE_PARALLEL_DIROPS: not yet implemented
1117 * FUSE_HANDLE_KILLPRIV: not yet implemented
1118 * FUSE_POSIX_ACL: not yet implemented
1119 * FUSE_ABORT_ERROR: not yet implemented
1120 * FUSE_CACHE_SYMLINKS: not yet implemented
1121 * FUSE_MAX_PAGES: not yet implemented
1122 */
1123 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT
1124 | FUSE_BIG_WRITES | FUSE_HAS_IOCTL_DIR | FUSE_WRITEBACK_CACHE
1125 | FUSE_NO_OPEN_SUPPORT | FUSE_NO_OPENDIR_SUPPORT
1126 | FUSE_SETXATTR_EXT;
1127
1128 fuse_insert_callback(fdi.tick, fuse_internal_init_callback);
1129 fuse_insert_message(fdi.tick, false);
1130 fdisp_destroy(&fdi);
1131 }
1132
1133 /*
1134 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL,
1135 * send the request with root credentials
1136 */
fuse_internal_setattr(struct vnode * vp,struct vattr * vap,struct thread * td,struct ucred * cred)1137 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap,
1138 struct thread *td, struct ucred *cred)
1139 {
1140 struct fuse_vnode_data *fvdat;
1141 struct fuse_dispatcher fdi;
1142 struct fuse_setattr_in *fsai;
1143 struct mount *mp;
1144 pid_t pid = td->td_proc->p_pid;
1145 struct fuse_data *data;
1146 int err = 0;
1147 __enum_uint8(vtype) vtyp;
1148
1149 ASSERT_CACHED_ATTRS_LOCKED(vp);
1150
1151 mp = vnode_mount(vp);
1152 fvdat = VTOFUD(vp);
1153 data = fuse_get_mpdata(mp);
1154
1155 fdisp_init(&fdi, sizeof(*fsai));
1156 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1157 if (!cred) {
1158 fdi.finh->uid = 0;
1159 fdi.finh->gid = 0;
1160 }
1161 fsai = fdi.indata;
1162 fsai->valid = 0;
1163
1164 if (vap->va_uid != (uid_t)VNOVAL) {
1165 fsai->uid = vap->va_uid;
1166 fsai->valid |= FATTR_UID;
1167 }
1168 if (vap->va_gid != (gid_t)VNOVAL) {
1169 fsai->gid = vap->va_gid;
1170 fsai->valid |= FATTR_GID;
1171 }
1172 if (vap->va_size != VNOVAL) {
1173 struct fuse_filehandle *fufh = NULL;
1174
1175 /*Truncate to a new value. */
1176 fsai->size = vap->va_size;
1177 fsai->valid |= FATTR_SIZE;
1178
1179 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid);
1180 if (fufh) {
1181 fsai->fh = fufh->fh_id;
1182 fsai->valid |= FATTR_FH;
1183 }
1184 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1185 }
1186 if (vap->va_atime.tv_sec != VNOVAL) {
1187 fsai->atime = vap->va_atime.tv_sec;
1188 fsai->atimensec = vap->va_atime.tv_nsec;
1189 fsai->valid |= FATTR_ATIME;
1190 if (vap->va_vaflags & VA_UTIMES_NULL)
1191 fsai->valid |= FATTR_ATIME_NOW;
1192 } else if (fvdat->flag & FN_ATIMECHANGE) {
1193 fsai->atime = fvdat->cached_attrs.va_atime.tv_sec;
1194 fsai->atimensec = fvdat->cached_attrs.va_atime.tv_nsec;
1195 fsai->valid |= FATTR_ATIME;
1196 }
1197 if (vap->va_mtime.tv_sec != VNOVAL) {
1198 fsai->mtime = vap->va_mtime.tv_sec;
1199 fsai->mtimensec = vap->va_mtime.tv_nsec;
1200 fsai->valid |= FATTR_MTIME;
1201 if (vap->va_vaflags & VA_UTIMES_NULL)
1202 fsai->valid |= FATTR_MTIME_NOW;
1203 } else if (fvdat->flag & FN_MTIMECHANGE) {
1204 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec;
1205 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec;
1206 fsai->valid |= FATTR_MTIME;
1207 }
1208 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) {
1209 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec;
1210 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec;
1211 fsai->valid |= FATTR_CTIME;
1212 }
1213 if (vap->va_mode != (mode_t)VNOVAL) {
1214 fsai->mode = vap->va_mode & ALLPERMS;
1215 fsai->valid |= FATTR_MODE;
1216 }
1217 if (!fsai->valid) {
1218 goto out;
1219 }
1220
1221 if ((err = fdisp_wait_answ(&fdi)))
1222 goto out;
1223 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1224
1225 if (vnode_vtype(vp) != vtyp) {
1226 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1227 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! "
1228 "vnode_vtype is VNON and vtype isn't.");
1229 } else {
1230 /*
1231 * STALE vnode, ditch
1232 *
1233 * The vnode has changed its type "behind our back".
1234 * This probably means that the file got deleted and
1235 * recreated on the server, with the same inode.
1236 * There's nothing really we can do, so let us just
1237 * return ENOENT. After all, the entry must not have
1238 * existed in the recent past. If the user tries
1239 * again, it will work.
1240 */
1241 fuse_internal_vnode_disappear(vp);
1242 err = ENOENT;
1243 }
1244 }
1245 if (err == 0) {
1246 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ;
1247 fuse_vnode_undirty_cached_timestamps(vp, true);
1248 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
1249 fao->attr_valid_nsec, NULL, false);
1250 getnanouptime(&fvdat->last_local_modify);
1251 }
1252
1253 out:
1254 fdisp_destroy(&fdi);
1255 return err;
1256 }
1257
1258 /*
1259 * FreeBSD clears the SUID and SGID bits on any write by a non-root user.
1260 */
1261 void
fuse_internal_clear_suid_on_write(struct vnode * vp,struct ucred * cred,struct thread * td)1262 fuse_internal_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
1263 struct thread *td)
1264 {
1265 struct fuse_data *data;
1266 struct mount *mp;
1267 struct vattr va;
1268 int dataflags;
1269
1270 mp = vnode_mount(vp);
1271 data = fuse_get_mpdata(mp);
1272 dataflags = data->dataflags;
1273
1274 ASSERT_VOP_LOCKED(vp, __func__);
1275
1276 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
1277 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
1278 fuse_internal_getattr(vp, &va, cred, td);
1279 if (va.va_mode & (S_ISUID | S_ISGID)) {
1280 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID);
1281 /* Clear all vattr fields except mode */
1282 vattr_null(&va);
1283 va.va_mode = mode;
1284
1285 /*
1286 * Ignore fuse_internal_setattr's return value,
1287 * because at this point the write operation has
1288 * already succeeded and we don't want to return
1289 * failing status for that.
1290 */
1291 (void)fuse_internal_setattr(vp, &va, td, NULL);
1292 }
1293 }
1294 }
1295 }
1296
1297 #ifdef ZERO_PAD_INCOMPLETE_BUFS
1298 static int
isbzero(void * buf,size_t len)1299 isbzero(void *buf, size_t len)
1300 {
1301 int i;
1302
1303 for (i = 0; i < len; i++) {
1304 if (((char *)buf)[i])
1305 return (0);
1306 }
1307
1308 return (1);
1309 }
1310
1311 #endif
1312
1313 void
fuse_internal_init(void)1314 fuse_internal_init(void)
1315 {
1316 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK);
1317 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK);
1318 }
1319
1320 void
fuse_internal_destroy(void)1321 fuse_internal_destroy(void)
1322 {
1323 counter_u64_free(fuse_lookup_cache_hits);
1324 counter_u64_free(fuse_lookup_cache_misses);
1325 }
1326