xref: /src/sys/fs/tmpfs/tmpfs_vnops.c (revision 7aaec5f3faecf98e377c97e24dddb9c65f4b2e75)
1 /*	$NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * tmpfs vnode interface.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/dirent.h>
42 #include <sys/extattr.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/lockf.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/smr.h>
56 #include <sys/stat.h>
57 #include <sys/sysctl.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pager.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs_vnops.h>
71 #include <fs/tmpfs/tmpfs.h>
72 
73 SYSCTL_DECL(_vfs_tmpfs);
74 VFS_SMR_DECLARE;
75 
76 static volatile int tmpfs_rename_restarts;
77 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
78     __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
79     "Times rename had to restart due to lock contention");
80 
81 MALLOC_DEFINE(M_TMPFSEA, "tmpfs extattr", "tmpfs extattr structure");
82 
83 static int
tmpfs_vn_get_ino_alloc(struct mount * mp,void * arg,int lkflags,struct vnode ** rvp)84 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
85     struct vnode **rvp)
86 {
87 
88 	return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
89 }
90 
91 static int
tmpfs_lookup1(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp)92 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
93 {
94 	struct tmpfs_dirent *de;
95 	struct tmpfs_node *dnode, *pnode;
96 	struct tmpfs_mount *tm;
97 	int error;
98 
99 	/* Caller assumes responsibility for ensuring access (VEXEC). */
100 	dnode = VP_TO_TMPFS_DIR(dvp);
101 	*vpp = NULL;
102 
103 	/* We cannot be requesting the parent directory of the root node. */
104 	MPASS(IMPLIES(dnode->tn_type == VDIR &&
105 	    dnode->tn_dir.tn_parent == dnode,
106 	    !(cnp->cn_flags & ISDOTDOT)));
107 
108 	TMPFS_ASSERT_LOCKED(dnode);
109 	if (dnode->tn_dir.tn_parent == NULL) {
110 		error = ENOENT;
111 		goto out;
112 	}
113 	if (cnp->cn_flags & ISDOTDOT) {
114 		tm = VFS_TO_TMPFS(dvp->v_mount);
115 		pnode = dnode->tn_dir.tn_parent;
116 		tmpfs_ref_node(pnode);
117 		error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
118 		    pnode, cnp->cn_lkflags, vpp);
119 		tmpfs_free_node(tm, pnode);
120 		if (error != 0)
121 			goto out;
122 	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
123 		vref(dvp);
124 		*vpp = dvp;
125 		error = 0;
126 	} else {
127 		de = tmpfs_dir_lookup(dnode, NULL, cnp);
128 		if (de != NULL && de->td_node == NULL)
129 			cnp->cn_flags |= ISWHITEOUT;
130 		if (de == NULL || de->td_node == NULL) {
131 			/*
132 			 * The entry was not found in the directory.
133 			 * This is OK if we are creating or renaming an
134 			 * entry and are working on the last component of
135 			 * the path name.
136 			 */
137 			if ((cnp->cn_flags & ISLASTCN) &&
138 			    (cnp->cn_nameiop == CREATE || \
139 			    cnp->cn_nameiop == RENAME ||
140 			    (cnp->cn_nameiop == DELETE &&
141 			    cnp->cn_flags & DOWHITEOUT &&
142 			    cnp->cn_flags & ISWHITEOUT))) {
143 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
144 				    curthread);
145 				if (error != 0)
146 					goto out;
147 
148 				error = EJUSTRETURN;
149 			} else
150 				error = ENOENT;
151 		} else {
152 			struct tmpfs_node *tnode;
153 
154 			/*
155 			 * The entry was found, so get its associated
156 			 * tmpfs_node.
157 			 */
158 			tnode = de->td_node;
159 
160 			/*
161 			 * If we are not at the last path component and
162 			 * found a non-directory or non-link entry (which
163 			 * may itself be pointing to a directory), raise
164 			 * an error.
165 			 */
166 			if ((tnode->tn_type != VDIR &&
167 			    tnode->tn_type != VLNK) &&
168 			    !(cnp->cn_flags & ISLASTCN)) {
169 				error = ENOTDIR;
170 				goto out;
171 			}
172 
173 			/*
174 			 * If we are deleting or renaming the entry, keep
175 			 * track of its tmpfs_dirent so that it can be
176 			 * easily deleted later.
177 			 */
178 			if ((cnp->cn_flags & ISLASTCN) &&
179 			    (cnp->cn_nameiop == DELETE ||
180 			    cnp->cn_nameiop == RENAME)) {
181 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
182 				    curthread);
183 				if (error != 0)
184 					goto out;
185 
186 				/* Allocate a new vnode on the matching entry. */
187 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
188 				    cnp->cn_lkflags, vpp);
189 				if (error != 0)
190 					goto out;
191 
192 				if ((dnode->tn_mode & S_ISTXT) &&
193 				  VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
194 				  curthread) && VOP_ACCESS(*vpp, VADMIN,
195 				  cnp->cn_cred, curthread)) {
196 					error = EPERM;
197 					vput(*vpp);
198 					*vpp = NULL;
199 					goto out;
200 				}
201 			} else {
202 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
203 				    cnp->cn_lkflags, vpp);
204 				if (error != 0)
205 					goto out;
206 			}
207 		}
208 	}
209 
210 	/*
211 	 * Store the result of this lookup in the cache.  Avoid this if the
212 	 * request was for creation, as it does not improve timings on
213 	 * emprical tests.
214 	 */
215 	if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
216 		cache_enter(dvp, *vpp, cnp);
217 
218 out:
219 #ifdef INVARIANTS
220 	/*
221 	 * If there were no errors, *vpp cannot be null and it must be
222 	 * locked.
223 	 */
224 	if (error == 0) {
225 		MPASS(*vpp != NULL);
226 		ASSERT_VOP_LOCKED(*vpp, __func__);
227 	} else {
228 		MPASS(*vpp == NULL);
229 	}
230 #endif
231 
232 	return (error);
233 }
234 
235 static int
tmpfs_cached_lookup(struct vop_cachedlookup_args * v)236 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
237 {
238 
239 	return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
240 }
241 
242 static int
tmpfs_lookup(struct vop_lookup_args * v)243 tmpfs_lookup(struct vop_lookup_args *v)
244 {
245 	struct vnode *dvp = v->a_dvp;
246 	struct vnode **vpp = v->a_vpp;
247 	struct componentname *cnp = v->a_cnp;
248 	int error;
249 
250 	/* Check accessibility of requested node as a first step. */
251 	error = vn_dir_check_exec(dvp, cnp);
252 	if (error != 0)
253 		return (error);
254 
255 	return (tmpfs_lookup1(dvp, vpp, cnp));
256 }
257 
258 static int
tmpfs_create(struct vop_create_args * v)259 tmpfs_create(struct vop_create_args *v)
260 {
261 	struct vnode *dvp = v->a_dvp;
262 	struct vnode **vpp = v->a_vpp;
263 	struct componentname *cnp = v->a_cnp;
264 	struct vattr *vap = v->a_vap;
265 	int error;
266 
267 	MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
268 
269 	error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
270 	if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
271 		cache_enter(dvp, *vpp, cnp);
272 	return (error);
273 }
274 
275 static int
tmpfs_mknod(struct vop_mknod_args * v)276 tmpfs_mknod(struct vop_mknod_args *v)
277 {
278 	struct vnode *dvp = v->a_dvp;
279 	struct vnode **vpp = v->a_vpp;
280 	struct componentname *cnp = v->a_cnp;
281 	struct vattr *vap = v->a_vap;
282 
283 	if (!VATTR_ISDEV(vap) && vap->va_type != VFIFO)
284 		return (EINVAL);
285 
286 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
287 }
288 
289 struct fileops tmpfs_fnops;
290 
291 static int
tmpfs_open(struct vop_open_args * v)292 tmpfs_open(struct vop_open_args *v)
293 {
294 	struct vnode *vp;
295 	struct tmpfs_node *node;
296 	struct file *fp;
297 	int error, mode;
298 
299 	vp = v->a_vp;
300 	mode = v->a_mode;
301 	node = VP_TO_TMPFS_NODE(vp);
302 
303 	/*
304 	 * The file is still active but all its names have been removed
305 	 * (e.g. by a "rmdir $(pwd)").  It cannot be opened any more as
306 	 * it is about to die.
307 	 */
308 	if (node->tn_links < 1)
309 		return (ENOENT);
310 
311 	/* If the file is marked append-only, deny write requests. */
312 	if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
313 		error = EPERM;
314 	else {
315 		error = 0;
316 		/* For regular files, the call below is nop. */
317 		KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
318 		    OBJ_DEAD) == 0, ("dead object"));
319 		vnode_create_vobject(vp, node->tn_size, v->a_td);
320 	}
321 
322 	fp = v->a_fp;
323 	MPASS(fp == NULL || fp->f_data == NULL);
324 	if (error == 0 && fp != NULL && vp->v_type == VREG) {
325 		tmpfs_ref_node(node);
326 		finit_vnode(fp, mode, node, &tmpfs_fnops);
327 	}
328 
329 	return (error);
330 }
331 
332 static int
tmpfs_close(struct vop_close_args * v)333 tmpfs_close(struct vop_close_args *v)
334 {
335 	struct vnode *vp = v->a_vp;
336 
337 	/* Update node times. */
338 	tmpfs_update(vp);
339 
340 	return (0);
341 }
342 
343 int
tmpfs_fo_close(struct file * fp,struct thread * td)344 tmpfs_fo_close(struct file *fp, struct thread *td)
345 {
346 	struct tmpfs_node *node;
347 
348 	node = fp->f_data;
349 	if (node != NULL) {
350 		MPASS(node->tn_type == VREG);
351 		tmpfs_free_node(node->tn_reg.tn_tmp, node);
352 	}
353 	return (vnops.fo_close(fp, td));
354 }
355 
356 /*
357  * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
358  * the comment above cache_fplookup for details.
359  */
360 int
tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args * v)361 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
362 {
363 	struct vnode *vp;
364 	struct tmpfs_node *node;
365 	struct ucred *cred;
366 	mode_t all_x, mode;
367 
368 	vp = v->a_vp;
369 	node = VP_TO_TMPFS_NODE_SMR(vp);
370 	if (__predict_false(node == NULL))
371 		return (EAGAIN);
372 
373 	all_x = S_IXUSR | S_IXGRP | S_IXOTH;
374 	mode = atomic_load_short(&node->tn_mode);
375 	if (__predict_true((mode & all_x) == all_x))
376 		return (0);
377 
378 	cred = v->a_cred;
379 	return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
380 }
381 
382 static int
tmpfs_access_locked(struct vnode * vp,struct tmpfs_node * node,accmode_t accmode,struct ucred * cred)383 tmpfs_access_locked(struct vnode *vp, struct tmpfs_node *node,
384     accmode_t accmode, struct ucred *cred)
385 {
386 #ifdef INVARIANTS
387 	if (!mtx_owned(TMPFS_NODE_MTX(node))) {
388 		ASSERT_VOP_LOCKED(vp,
389 		    "tmpfs_access_locked needs locked vnode or node");
390 	}
391 #endif
392 
393 	if ((accmode & VWRITE) != 0 && (node->tn_flags & IMMUTABLE) != 0)
394 		return (EPERM);
395 	return (vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
396 	    accmode, cred));
397 }
398 
399 int
tmpfs_access(struct vop_access_args * v)400 tmpfs_access(struct vop_access_args *v)
401 {
402 	struct vnode *vp = v->a_vp;
403 	struct ucred *cred = v->a_cred;
404 	struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
405 	mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
406 	accmode_t accmode = v->a_accmode;
407 
408 	/*
409 	 * Common case path lookup.
410 	 */
411 	if (__predict_true(accmode == VEXEC &&
412 	    (node->tn_mode & all_x) == all_x))
413 		return (0);
414 
415 	switch (vp->v_type) {
416 	case VDIR:
417 		/* FALLTHROUGH */
418 	case VLNK:
419 		/* FALLTHROUGH */
420 	case VREG:
421 		if ((accmode & VWRITE) != 0 &&
422 		    (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
423 			return (EROFS);
424 		break;
425 
426 	case VBLK:
427 		/* FALLTHROUGH */
428 	case VCHR:
429 		/* FALLTHROUGH */
430 	case VSOCK:
431 		/* FALLTHROUGH */
432 	case VFIFO:
433 		break;
434 
435 	default:
436 		return (EINVAL);
437 	}
438 
439 	return (tmpfs_access_locked(vp, node, accmode, cred));
440 }
441 
442 int
tmpfs_stat(struct vop_stat_args * v)443 tmpfs_stat(struct vop_stat_args *v)
444 {
445 	struct vnode *vp = v->a_vp;
446 	struct stat *sb = v->a_sb;
447 	struct tmpfs_node *node;
448 	int error;
449 
450 	node = VP_TO_TMPFS_NODE(vp);
451 
452 	tmpfs_update_getattr(vp);
453 
454 	error = vop_stat_helper_pre(v);
455 	if (__predict_false(error))
456 		return (error);
457 
458 	sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
459 	sb->st_ino = node->tn_id;
460 	sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
461 	sb->st_nlink = node->tn_links;
462 	sb->st_uid = node->tn_uid;
463 	sb->st_gid = node->tn_gid;
464 	sb->st_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
465 	sb->st_size = node->tn_size;
466 	sb->st_atim.tv_sec = node->tn_atime.tv_sec;
467 	sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
468 	sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
469 	sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
470 	sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
471 	sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
472 	sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
473 	sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
474 	sb->st_blksize = PAGE_SIZE;
475 	sb->st_flags = node->tn_flags;
476 	sb->st_gen = node->tn_gen;
477 	sb->st_filerev = 0;
478 	if (vp->v_type == VREG) {
479 #ifdef __ILP32__
480 		vm_object_t obj = node->tn_reg.tn_aobj;
481 
482 		/* Handle torn read */
483 		VM_OBJECT_RLOCK(obj);
484 #endif
485 		sb->st_blocks = ptoa(node->tn_reg.tn_pages);
486 #ifdef __ILP32__
487 		VM_OBJECT_RUNLOCK(obj);
488 #endif
489 	} else {
490 		sb->st_blocks = node->tn_size;
491 	}
492 	sb->st_blocks /= S_BLKSIZE;
493 	return (vop_stat_helper_post(v, error));
494 }
495 
496 int
tmpfs_getattr(struct vop_getattr_args * v)497 tmpfs_getattr(struct vop_getattr_args *v)
498 {
499 	struct vnode *vp = v->a_vp;
500 	struct vattr *vap = v->a_vap;
501 	struct tmpfs_node *node;
502 
503 	node = VP_TO_TMPFS_NODE(vp);
504 
505 	tmpfs_update_getattr(vp);
506 
507 	vap->va_type = vp->v_type;
508 	vap->va_mode = node->tn_mode;
509 	vap->va_nlink = node->tn_links;
510 	vap->va_uid = node->tn_uid;
511 	vap->va_gid = node->tn_gid;
512 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
513 	vap->va_fileid = node->tn_id;
514 	vap->va_size = node->tn_size;
515 	vap->va_blocksize = PAGE_SIZE;
516 	vap->va_atime = node->tn_atime;
517 	vap->va_mtime = node->tn_mtime;
518 	vap->va_ctime = node->tn_ctime;
519 	vap->va_birthtime = node->tn_birthtime;
520 	vap->va_gen = node->tn_gen;
521 	vap->va_flags = node->tn_flags;
522 	vap->va_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
523 	if (vp->v_type == VREG) {
524 #ifdef __ILP32__
525 		vm_object_t obj = node->tn_reg.tn_aobj;
526 
527 		VM_OBJECT_RLOCK(obj);
528 #endif
529 		vap->va_bytes = ptoa(node->tn_reg.tn_pages);
530 #ifdef __ILP32__
531 		VM_OBJECT_RUNLOCK(obj);
532 #endif
533 	} else {
534 		vap->va_bytes = node->tn_size;
535 	}
536 	vap->va_filerev = 0;
537 
538 	return (0);
539 }
540 
541 int
tmpfs_setattr(struct vop_setattr_args * v)542 tmpfs_setattr(struct vop_setattr_args *v)
543 {
544 	struct vnode *vp = v->a_vp;
545 	struct vattr *vap = v->a_vap;
546 	struct ucred *cred = v->a_cred;
547 	struct thread *td = curthread;
548 
549 	int error;
550 
551 	ASSERT_VOP_IN_SEQC(vp);
552 
553 	error = 0;
554 
555 	/* Abort if any unsettable attribute is given. */
556 	if (vap->va_type != VNON ||
557 	    vap->va_nlink != VNOVAL ||
558 	    vap->va_fsid != VNOVAL ||
559 	    vap->va_fileid != VNOVAL ||
560 	    vap->va_blocksize != VNOVAL ||
561 	    vap->va_gen != VNOVAL ||
562 	    vap->va_rdev != VNOVAL ||
563 	    vap->va_bytes != VNOVAL)
564 		error = EINVAL;
565 
566 	if (error == 0 && (vap->va_flags != VNOVAL))
567 		error = tmpfs_chflags(vp, vap->va_flags, cred, td);
568 
569 	if (error == 0 && (vap->va_size != VNOVAL))
570 		error = tmpfs_chsize(vp, vap->va_size, cred, td);
571 
572 	if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
573 		error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
574 
575 	if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
576 		error = tmpfs_chmod(vp, vap->va_mode, cred, td);
577 
578 	if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
579 	    vap->va_atime.tv_nsec != VNOVAL) ||
580 	    (vap->va_mtime.tv_sec != VNOVAL &&
581 	    vap->va_mtime.tv_nsec != VNOVAL) ||
582 	    (vap->va_birthtime.tv_sec != VNOVAL &&
583 	    vap->va_birthtime.tv_nsec != VNOVAL)))
584 		error = tmpfs_chtimes(vp, vap, cred, td);
585 
586 	/*
587 	 * Update the node times.  We give preference to the error codes
588 	 * generated by this function rather than the ones that may arise
589 	 * from tmpfs_update.
590 	 */
591 	tmpfs_update(vp);
592 
593 	return (error);
594 }
595 
596 static int
tmpfs_read(struct vop_read_args * v)597 tmpfs_read(struct vop_read_args *v)
598 {
599 	struct vnode *vp;
600 	struct uio *uio;
601 	struct tmpfs_node *node;
602 
603 	vp = v->a_vp;
604 	if (vp->v_type != VREG)
605 		return (EISDIR);
606 	uio = v->a_uio;
607 	if (uio->uio_offset < 0)
608 		return (EINVAL);
609 	node = VP_TO_TMPFS_NODE(vp);
610 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
611 	return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
612 }
613 
614 static int
tmpfs_read_pgcache(struct vop_read_pgcache_args * v)615 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
616 {
617 	struct vnode *vp;
618 	struct tmpfs_node *node;
619 	vm_object_t object;
620 	off_t size;
621 	int error;
622 
623 	vp = v->a_vp;
624 	VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
625 
626 	if (v->a_uio->uio_offset < 0)
627 		return (EINVAL);
628 
629 	error = EJUSTRETURN;
630 	vfs_smr_enter();
631 
632 	node = VP_TO_TMPFS_NODE_SMR(vp);
633 	if (node == NULL)
634 		goto out_smr;
635 	MPASS(node->tn_type == VREG);
636 	MPASS(node->tn_refcount >= 1);
637 	object = node->tn_reg.tn_aobj;
638 	if (object == NULL)
639 		goto out_smr;
640 
641 	MPASS(object->type == tmpfs_pager_type);
642 	MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
643 	    OBJ_SWAP);
644 	if (!VN_IS_DOOMED(vp)) {
645 		/* size cannot become shorter due to rangelock. */
646 		size = node->tn_size;
647 		tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
648 		vfs_smr_exit();
649 		error = uiomove_object(object, size, v->a_uio);
650 		return (error);
651 	}
652 out_smr:
653 	vfs_smr_exit();
654 	return (error);
655 }
656 
657 static int
tmpfs_write(struct vop_write_args * v)658 tmpfs_write(struct vop_write_args *v)
659 {
660 	struct vnode *vp;
661 	struct uio *uio;
662 	struct tmpfs_node *node;
663 	off_t oldsize;
664 	ssize_t r;
665 	int error, ioflag;
666 	mode_t newmode;
667 
668 	vp = v->a_vp;
669 	uio = v->a_uio;
670 	ioflag = v->a_ioflag;
671 	error = 0;
672 	node = VP_TO_TMPFS_NODE(vp);
673 	oldsize = node->tn_size;
674 
675 	if (uio->uio_offset < 0 || vp->v_type != VREG)
676 		return (EINVAL);
677 	if (uio->uio_resid == 0)
678 		return (0);
679 	if (ioflag & IO_APPEND)
680 		uio->uio_offset = node->tn_size;
681 	error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
682 	    tm_maxfilesize, &r, uio->uio_td);
683 	if (error != 0) {
684 		vn_rlimit_fsizex_res(uio, r);
685 		return (error);
686 	}
687 
688 	if (uio->uio_offset + uio->uio_resid > node->tn_size) {
689 		error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
690 		    FALSE);
691 		if (error != 0)
692 			goto out;
693 	}
694 
695 	error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
696 	node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
697 	node->tn_accessed = true;
698 	if (node->tn_mode & (S_ISUID | S_ISGID)) {
699 		if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
700 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
701 			vn_seqc_write_begin(vp);
702 			atomic_store_short(&node->tn_mode, newmode);
703 			vn_seqc_write_end(vp);
704 		}
705 	}
706 	if (error != 0)
707 		(void)tmpfs_reg_resize(vp, oldsize, TRUE);
708 
709 out:
710 	MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
711 	MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
712 
713 	vn_rlimit_fsizex_res(uio, r);
714 	return (error);
715 }
716 
717 static int
tmpfs_deallocate(struct vop_deallocate_args * v)718 tmpfs_deallocate(struct vop_deallocate_args *v)
719 {
720 	return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
721 }
722 
723 static int
tmpfs_fsync(struct vop_fsync_args * v)724 tmpfs_fsync(struct vop_fsync_args *v)
725 {
726 	struct vnode *vp = v->a_vp;
727 
728 	tmpfs_check_mtime(vp);
729 	tmpfs_update(vp);
730 
731 	return (0);
732 }
733 
734 static int
tmpfs_remove(struct vop_remove_args * v)735 tmpfs_remove(struct vop_remove_args *v)
736 {
737 	struct vnode *dvp = v->a_dvp;
738 	struct vnode *vp = v->a_vp;
739 
740 	int error;
741 	struct tmpfs_dirent *de;
742 	struct tmpfs_mount *tmp;
743 	struct tmpfs_node *dnode;
744 	struct tmpfs_node *node;
745 
746 	if (vp->v_type == VDIR) {
747 		error = EISDIR;
748 		goto out;
749 	}
750 
751 	dnode = VP_TO_TMPFS_DIR(dvp);
752 	node = VP_TO_TMPFS_NODE(vp);
753 	tmp = VFS_TO_TMPFS(vp->v_mount);
754 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
755 	MPASS(de != NULL);
756 
757 	/* Files marked as immutable or append-only cannot be deleted. */
758 	if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
759 	    (dnode->tn_flags & APPEND)) {
760 		error = EPERM;
761 		goto out;
762 	}
763 
764 	/* Remove the entry from the directory; as it is a file, we do not
765 	 * have to change the number of hard links of the directory. */
766 	tmpfs_dir_detach(dvp, de);
767 	if (v->a_cnp->cn_flags & DOWHITEOUT)
768 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
769 
770 	/* Free the directory entry we just deleted.  Note that the node
771 	 * referred by it will not be removed until the vnode is really
772 	 * reclaimed. */
773 	tmpfs_free_dirent(tmp, de);
774 
775 	node->tn_status |= TMPFS_NODE_CHANGED;
776 	node->tn_accessed = true;
777 	error = 0;
778 
779 out:
780 	return (error);
781 }
782 
783 static int
tmpfs_link(struct vop_link_args * v)784 tmpfs_link(struct vop_link_args *v)
785 {
786 	struct vnode *dvp = v->a_tdvp;
787 	struct vnode *vp = v->a_vp;
788 	struct componentname *cnp = v->a_cnp;
789 
790 	int error;
791 	struct tmpfs_dirent *de;
792 	struct tmpfs_node *node;
793 
794 	MPASS(dvp != vp); /* XXX When can this be false? */
795 	node = VP_TO_TMPFS_NODE(vp);
796 
797 	/* Ensure that we do not overflow the maximum number of links imposed
798 	 * by the system. */
799 	MPASS(node->tn_links <= TMPFS_LINK_MAX);
800 	if (node->tn_links == TMPFS_LINK_MAX) {
801 		error = EMLINK;
802 		goto out;
803 	}
804 
805 	/* We cannot create links of files marked immutable or append-only. */
806 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
807 		error = EPERM;
808 		goto out;
809 	}
810 
811 	/* Allocate a new directory entry to represent the node. */
812 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
813 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
814 	if (error != 0)
815 		goto out;
816 
817 	/* Insert the new directory entry into the appropriate directory. */
818 	if (cnp->cn_flags & ISWHITEOUT)
819 		tmpfs_dir_whiteout_remove(dvp, cnp);
820 	tmpfs_dir_attach(dvp, de);
821 
822 	/* vp link count has changed, so update node times. */
823 	node->tn_status |= TMPFS_NODE_CHANGED;
824 	tmpfs_update(vp);
825 
826 	error = 0;
827 
828 out:
829 	return (error);
830 }
831 
832 /*
833  * We acquire all but fdvp locks using non-blocking acquisitions.  If we
834  * fail to acquire any lock in the path we will drop all held locks,
835  * acquire the new lock in a blocking fashion, and then release it and
836  * restart the rename.  This acquire/release step ensures that we do not
837  * spin on a lock waiting for release.  On error release all vnode locks
838  * and decrement references the way tmpfs_rename() would do.
839  */
840 static int
tmpfs_rename_relock(struct vnode * fdvp,struct vnode ** fvpp,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * fcnp,struct componentname * tcnp)841 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
842     struct vnode *tdvp, struct vnode **tvpp,
843     struct componentname *fcnp, struct componentname *tcnp)
844 {
845 	struct vnode *nvp;
846 	struct mount *mp;
847 	struct tmpfs_dirent *de;
848 	int error, restarts = 0;
849 
850 	VOP_UNLOCK(tdvp);
851 	if (*tvpp != NULL && *tvpp != tdvp)
852 		VOP_UNLOCK(*tvpp);
853 	mp = fdvp->v_mount;
854 
855 relock:
856 	restarts += 1;
857 	error = vn_lock(fdvp, LK_EXCLUSIVE);
858 	if (error)
859 		goto releout;
860 	if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
861 		VOP_UNLOCK(fdvp);
862 		error = vn_lock(tdvp, LK_EXCLUSIVE);
863 		if (error)
864 			goto releout;
865 		VOP_UNLOCK(tdvp);
866 		goto relock;
867 	}
868 	/*
869 	 * Re-resolve fvp to be certain it still exists and fetch the
870 	 * correct vnode.
871 	 */
872 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
873 	if (de == NULL) {
874 		VOP_UNLOCK(fdvp);
875 		VOP_UNLOCK(tdvp);
876 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
877 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
878 			error = EINVAL;
879 		else
880 			error = ENOENT;
881 		goto releout;
882 	}
883 	error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
884 	if (error != 0) {
885 		VOP_UNLOCK(fdvp);
886 		VOP_UNLOCK(tdvp);
887 		if (error != EBUSY)
888 			goto releout;
889 		error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
890 		if (error != 0)
891 			goto releout;
892 		VOP_UNLOCK(nvp);
893 		/*
894 		 * Concurrent rename race.
895 		 */
896 		if (nvp == tdvp) {
897 			vrele(nvp);
898 			error = EINVAL;
899 			goto releout;
900 		}
901 		vrele(*fvpp);
902 		*fvpp = nvp;
903 		goto relock;
904 	}
905 	vrele(*fvpp);
906 	*fvpp = nvp;
907 	VOP_UNLOCK(*fvpp);
908 	/*
909 	 * Re-resolve tvp and acquire the vnode lock if present.
910 	 */
911 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
912 	/*
913 	 * If tvp disappeared we just carry on.
914 	 */
915 	if (de == NULL && *tvpp != NULL) {
916 		vrele(*tvpp);
917 		*tvpp = NULL;
918 	}
919 	/*
920 	 * Get the tvp ino if the lookup succeeded.  We may have to restart
921 	 * if the non-blocking acquire fails.
922 	 */
923 	if (de != NULL) {
924 		nvp = NULL;
925 		error = tmpfs_alloc_vp(mp, de->td_node,
926 		    LK_EXCLUSIVE | LK_NOWAIT, &nvp);
927 		if (*tvpp != NULL)
928 			vrele(*tvpp);
929 		*tvpp = nvp;
930 		if (error != 0) {
931 			VOP_UNLOCK(fdvp);
932 			VOP_UNLOCK(tdvp);
933 			if (error != EBUSY)
934 				goto releout;
935 			error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
936 			    &nvp);
937 			if (error != 0)
938 				goto releout;
939 			VOP_UNLOCK(nvp);
940 			/*
941 			 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
942 			 */
943 			if (nvp == fdvp) {
944 				error = ENOTEMPTY;
945 				goto releout;
946 			}
947 			goto relock;
948 		}
949 	}
950 	tmpfs_rename_restarts += restarts;
951 
952 	return (0);
953 
954 releout:
955 	vrele(fdvp);
956 	vrele(*fvpp);
957 	vrele(tdvp);
958 	if (*tvpp != NULL)
959 		vrele(*tvpp);
960 	tmpfs_rename_restarts += restarts;
961 
962 	return (error);
963 }
964 
965 static int
tmpfs_rename(struct vop_rename_args * v)966 tmpfs_rename(struct vop_rename_args *v)
967 {
968 	struct vnode *fdvp = v->a_fdvp;
969 	struct vnode *fvp = v->a_fvp;
970 	struct componentname *fcnp = v->a_fcnp;
971 	struct vnode *tdvp = v->a_tdvp;
972 	struct vnode *tvp = v->a_tvp;
973 	struct componentname *tcnp = v->a_tcnp;
974 	char *newname;
975 	struct tmpfs_dirent *de;
976 	struct tmpfs_mount *tmp;
977 	struct tmpfs_node *fdnode;
978 	struct tmpfs_node *fnode;
979 	struct tmpfs_node *tnode;
980 	struct tmpfs_node *tdnode;
981 	int error;
982 	bool want_seqc_end;
983 
984 	want_seqc_end = false;
985 
986 	/*
987 	 * Disallow cross-device renames.
988 	 * XXX Why isn't this done by the caller?
989 	 */
990 	if (fvp->v_mount != tdvp->v_mount ||
991 	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
992 		error = EXDEV;
993 		goto out;
994 	}
995 
996 	if ((v->a_flags & ~(AT_RENAME_NOREPLACE)) != 0) {
997 		error = EOPNOTSUPP;
998 		goto out;
999 	}
1000 
1001 	/* If source and target are the same file, there is nothing to do. */
1002 	if (fvp == tvp) {
1003 		error = 0;
1004 		goto out;
1005 	}
1006 
1007 	/*
1008 	 * If we need to move the directory between entries, lock the
1009 	 * source so that we can safely operate on it.
1010 	 */
1011 	if (fdvp != tdvp && fdvp != tvp) {
1012 		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1013 			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1014 			    fcnp, tcnp);
1015 			if (error != 0)
1016 				return (error);
1017 			ASSERT_VOP_ELOCKED(fdvp,
1018 			    "tmpfs_rename: fdvp not locked");
1019 			ASSERT_VOP_ELOCKED(tdvp,
1020 			    "tmpfs_rename: tdvp not locked");
1021 			if (tvp != NULL) {
1022 				ASSERT_VOP_ELOCKED(tvp,
1023 				    "tmpfs_rename: tvp not locked");
1024 				if ((v->a_flags & AT_RENAME_NOREPLACE) != 0) {
1025 					error = EEXIST;
1026 					goto out_locked;
1027 				}
1028 			}
1029 			if (fvp == tvp) {
1030 				error = 0;
1031 				goto out_locked;
1032 			}
1033 		}
1034 	}
1035 
1036 	/*
1037 	 * Avoid manipulating '.' and '..' entries.
1038 	 */
1039 	if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1040 	    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) {
1041 		error = EINVAL;
1042 		goto out_locked;
1043 	}
1044 
1045 	if (tvp != NULL)
1046 		vn_seqc_write_begin(tvp);
1047 	vn_seqc_write_begin(tdvp);
1048 	vn_seqc_write_begin(fvp);
1049 	vn_seqc_write_begin(fdvp);
1050 	want_seqc_end = true;
1051 
1052 	tmp = VFS_TO_TMPFS(tdvp->v_mount);
1053 	tdnode = VP_TO_TMPFS_DIR(tdvp);
1054 	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1055 	fdnode = VP_TO_TMPFS_DIR(fdvp);
1056 	fnode = VP_TO_TMPFS_NODE(fvp);
1057 	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1058 
1059 	/*
1060 	 * Entry can disappear before we lock fdvp.
1061 	 */
1062 	if (de == NULL) {
1063 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1064 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1065 			error = EINVAL;
1066 		else
1067 			error = ENOENT;
1068 		goto out_locked;
1069 	}
1070 	MPASS(de->td_node == fnode);
1071 
1072 	/*
1073 	 * If re-naming a directory to another preexisting directory
1074 	 * ensure that the target directory is empty so that its
1075 	 * removal causes no side effects.
1076 	 * Kern_rename guarantees the destination to be a directory
1077 	 * if the source is one.
1078 	 */
1079 	if (tvp != NULL) {
1080 		MPASS(tnode != NULL);
1081 
1082 		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1083 		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1084 			error = EPERM;
1085 			goto out_locked;
1086 		}
1087 
1088 		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1089 			if (tnode->tn_size != 0 &&
1090 			    ((tcnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1091 			    tnode->tn_size > tnode->tn_dir.tn_wht_size)) {
1092 				error = ENOTEMPTY;
1093 				goto out_locked;
1094 			}
1095 		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1096 			error = ENOTDIR;
1097 			goto out_locked;
1098 		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1099 			error = EISDIR;
1100 			goto out_locked;
1101 		} else {
1102 			MPASS(fnode->tn_type != VDIR &&
1103 				tnode->tn_type != VDIR);
1104 		}
1105 	}
1106 
1107 	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1108 	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1109 		error = EPERM;
1110 		goto out_locked;
1111 	}
1112 
1113 	/*
1114 	 * Ensure that we have enough memory to hold the new name, if it
1115 	 * has to be changed.
1116 	 */
1117 	if (fcnp->cn_namelen != tcnp->cn_namelen ||
1118 	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1119 		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1120 	} else
1121 		newname = NULL;
1122 
1123 	/*
1124 	 * If the node is being moved to another directory, we have to do
1125 	 * the move.
1126 	 */
1127 	if (fdnode != tdnode) {
1128 		/*
1129 		 * In case we are moving a directory, we have to adjust its
1130 		 * parent to point to the new parent.
1131 		 */
1132 		if (de->td_node->tn_type == VDIR) {
1133 			struct tmpfs_node *n;
1134 
1135 			TMPFS_NODE_LOCK(fnode);
1136 			error = tmpfs_access_locked(fvp, fnode, VWRITE,
1137 			    tcnp->cn_cred);
1138 			TMPFS_NODE_UNLOCK(fnode);
1139 			if (error) {
1140 				if (newname != NULL)
1141 					free(newname, M_TMPFSNAME);
1142 				goto out_locked;
1143 			}
1144 
1145 			/*
1146 			 * Ensure the target directory is not a child of the
1147 			 * directory being moved.  Otherwise, we'd end up
1148 			 * with stale nodes.
1149 			 */
1150 			n = tdnode;
1151 			/*
1152 			 * TMPFS_LOCK guaranties that no nodes are freed while
1153 			 * traversing the list. Nodes can only be marked as
1154 			 * removed: tn_parent == NULL.
1155 			 */
1156 			TMPFS_LOCK(tmp);
1157 			TMPFS_NODE_LOCK(n);
1158 			while (n != n->tn_dir.tn_parent) {
1159 				struct tmpfs_node *parent;
1160 
1161 				if (n == fnode) {
1162 					TMPFS_NODE_UNLOCK(n);
1163 					TMPFS_UNLOCK(tmp);
1164 					error = EINVAL;
1165 					if (newname != NULL)
1166 						free(newname, M_TMPFSNAME);
1167 					goto out_locked;
1168 				}
1169 				parent = n->tn_dir.tn_parent;
1170 				TMPFS_NODE_UNLOCK(n);
1171 				if (parent == NULL) {
1172 					n = NULL;
1173 					break;
1174 				}
1175 				TMPFS_NODE_LOCK(parent);
1176 				if (parent->tn_dir.tn_parent == NULL) {
1177 					TMPFS_NODE_UNLOCK(parent);
1178 					n = NULL;
1179 					break;
1180 				}
1181 				n = parent;
1182 			}
1183 			TMPFS_UNLOCK(tmp);
1184 			if (n == NULL) {
1185 				error = EINVAL;
1186 				if (newname != NULL)
1187 					    free(newname, M_TMPFSNAME);
1188 				goto out_locked;
1189 			}
1190 			TMPFS_NODE_UNLOCK(n);
1191 
1192 			/* Adjust the parent pointer. */
1193 			TMPFS_VALIDATE_DIR(fnode);
1194 			TMPFS_NODE_LOCK(de->td_node);
1195 			de->td_node->tn_dir.tn_parent = tdnode;
1196 			TMPFS_NODE_UNLOCK(de->td_node);
1197 
1198 			/*
1199 			 * As a result of changing the target of the '..'
1200 			 * entry, the link count of the source and target
1201 			 * directories has to be adjusted.
1202 			 */
1203 			TMPFS_NODE_LOCK(tdnode);
1204 			TMPFS_ASSERT_LOCKED(tdnode);
1205 			tdnode->tn_links++;
1206 			TMPFS_NODE_UNLOCK(tdnode);
1207 
1208 			TMPFS_NODE_LOCK(fdnode);
1209 			TMPFS_ASSERT_LOCKED(fdnode);
1210 			fdnode->tn_links--;
1211 			TMPFS_NODE_UNLOCK(fdnode);
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Do the move: just remove the entry from the source directory
1217 	 * and insert it into the target one.
1218 	 */
1219 	tmpfs_dir_detach(fdvp, de);
1220 
1221 	if (fcnp->cn_flags & DOWHITEOUT)
1222 		tmpfs_dir_whiteout_add(fdvp, fcnp);
1223 	if (tcnp->cn_flags & ISWHITEOUT)
1224 		tmpfs_dir_whiteout_remove(tdvp, tcnp);
1225 
1226 	/*
1227 	 * If the name has changed, we need to make it effective by changing
1228 	 * it in the directory entry.
1229 	 */
1230 	if (newname != NULL) {
1231 		MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1232 
1233 		free(de->ud.td_name, M_TMPFSNAME);
1234 		de->ud.td_name = newname;
1235 		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1236 
1237 		fnode->tn_status |= TMPFS_NODE_CHANGED;
1238 		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1239 	}
1240 
1241 	/*
1242 	 * If we are overwriting an entry, we have to remove the old one
1243 	 * from the target directory.
1244 	 */
1245 	if (tvp != NULL) {
1246 		struct tmpfs_dirent *tde;
1247 
1248 		/* Remove the old entry from the target directory. */
1249 		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1250 		tmpfs_dir_detach(tdvp, tde);
1251 
1252 		/*
1253 		 * If we are overwriting a directory, per the ENOTEMPTY check
1254 		 * above it must either be empty or contain only whiteout
1255 		 * entries.  In the latter case (which can only happen if
1256 		 * IGNOREWHITEOUT was passed in tcnp->cn_flags), clear the
1257 		 * whiteout entries to avoid leaking memory.
1258 		 */
1259 		if (tnode->tn_type == VDIR && tnode->tn_size > 0)
1260 			tmpfs_dir_clear_whiteouts(tvp);
1261 
1262 		/* Update node's ctime because of possible hardlinks. */
1263 		tnode->tn_status |= TMPFS_NODE_CHANGED;
1264 		tmpfs_update(tvp);
1265 
1266 		/*
1267 		 * Free the directory entry we just deleted.  Note that the
1268 		 * node referred by it will not be removed until the vnode is
1269 		 * really reclaimed.
1270 		 */
1271 		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1272 	}
1273 
1274 	tmpfs_dir_attach(tdvp, de);
1275 
1276 	if (tmpfs_use_nc(fvp)) {
1277 		cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1278 	}
1279 
1280 	error = 0;
1281 
1282 out_locked:
1283 	if (fdvp != tdvp && fdvp != tvp)
1284 		VOP_UNLOCK(fdvp);
1285 
1286 out:
1287 	if (want_seqc_end) {
1288 		if (tvp != NULL)
1289 			vn_seqc_write_end(tvp);
1290 		vn_seqc_write_end(tdvp);
1291 		vn_seqc_write_end(fvp);
1292 		vn_seqc_write_end(fdvp);
1293 	}
1294 
1295 	/*
1296 	 * Release target nodes.
1297 	 * XXX: I don't understand when tdvp can be the same as tvp, but
1298 	 * other code takes care of this...
1299 	 */
1300 	if (tdvp == tvp)
1301 		vrele(tdvp);
1302 	else
1303 		vput(tdvp);
1304 	if (tvp != NULL)
1305 		vput(tvp);
1306 
1307 	/* Release source nodes. */
1308 	vrele(fdvp);
1309 	vrele(fvp);
1310 
1311 	return (error);
1312 }
1313 
1314 static int
tmpfs_mkdir(struct vop_mkdir_args * v)1315 tmpfs_mkdir(struct vop_mkdir_args *v)
1316 {
1317 	struct vnode *dvp = v->a_dvp;
1318 	struct vnode **vpp = v->a_vpp;
1319 	struct componentname *cnp = v->a_cnp;
1320 	struct vattr *vap = v->a_vap;
1321 
1322 	MPASS(vap->va_type == VDIR);
1323 
1324 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1325 }
1326 
1327 static int
tmpfs_rmdir(struct vop_rmdir_args * v)1328 tmpfs_rmdir(struct vop_rmdir_args *v)
1329 {
1330 	struct vnode *dvp = v->a_dvp;
1331 	struct vnode *vp = v->a_vp;
1332 	struct componentname *cnp = v->a_cnp;
1333 
1334 	int error;
1335 	struct tmpfs_dirent *de;
1336 	struct tmpfs_mount *tmp;
1337 	struct tmpfs_node *dnode;
1338 	struct tmpfs_node *node;
1339 
1340 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1341 	dnode = VP_TO_TMPFS_DIR(dvp);
1342 	node = VP_TO_TMPFS_DIR(vp);
1343 
1344 	/*
1345 	 * Directories with more than two non-whiteout entries ('.' and '..')
1346 	 * cannot be removed.
1347 	 */
1348 	if (node->tn_size != 0 &&
1349 	    ((cnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1350 	    node->tn_size > node->tn_dir.tn_wht_size)) {
1351 		error = ENOTEMPTY;
1352 		goto out;
1353 	}
1354 
1355 	/* Check flags to see if we are allowed to remove the directory. */
1356 	if ((dnode->tn_flags & APPEND)
1357 	    || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1358 		error = EPERM;
1359 		goto out;
1360 	}
1361 
1362 	/* This invariant holds only if we are not trying to remove "..".
1363 	 * We checked for that above so this is safe now. */
1364 	MPASS(node->tn_dir.tn_parent == dnode);
1365 
1366 	/* Get the directory entry associated with node (vp).  This was
1367 	 * filled by tmpfs_lookup while looking up the entry. */
1368 	de = tmpfs_dir_lookup(dnode, node, cnp);
1369 	MPASS(TMPFS_DIRENT_MATCHES(de,
1370 	    cnp->cn_nameptr,
1371 	    cnp->cn_namelen));
1372 
1373 	/* Detach the directory entry from the directory (dnode). */
1374 	tmpfs_dir_detach(dvp, de);
1375 
1376 	/*
1377 	 * If we are removing a directory, per the ENOTEMPTY check above it
1378 	 * must either be empty or contain only whiteout entries.  In the
1379 	 * latter case (which can only happen if IGNOREWHITEOUT was passed
1380 	 * in cnp->cn_flags), clear the whiteout entries to avoid leaking
1381 	 * memory.
1382 	 */
1383 	if (node->tn_size > 0)
1384 		tmpfs_dir_clear_whiteouts(vp);
1385 
1386 	if (cnp->cn_flags & DOWHITEOUT)
1387 		tmpfs_dir_whiteout_add(dvp, cnp);
1388 
1389 	/* No vnode should be allocated for this entry from this point */
1390 	TMPFS_NODE_LOCK(node);
1391 	node->tn_links--;
1392 	node->tn_dir.tn_parent = NULL;
1393 	node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1394 	node->tn_accessed = true;
1395 
1396 	TMPFS_NODE_UNLOCK(node);
1397 
1398 	TMPFS_NODE_LOCK(dnode);
1399 	dnode->tn_links--;
1400 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1401 	dnode->tn_accessed = true;
1402 	TMPFS_NODE_UNLOCK(dnode);
1403 
1404 	if (tmpfs_use_nc(dvp)) {
1405 		cache_vop_rmdir(dvp, vp);
1406 	}
1407 
1408 	/* Free the directory entry we just deleted.  Note that the node
1409 	 * referred by it will not be removed until the vnode is really
1410 	 * reclaimed. */
1411 	tmpfs_free_dirent(tmp, de);
1412 
1413 	/* Release the deleted vnode (will destroy the node, notify
1414 	 * interested parties and clean it from the cache). */
1415 
1416 	dnode->tn_status |= TMPFS_NODE_CHANGED;
1417 	tmpfs_update(dvp);
1418 
1419 	error = 0;
1420 
1421 out:
1422 	return (error);
1423 }
1424 
1425 static int
tmpfs_symlink(struct vop_symlink_args * v)1426 tmpfs_symlink(struct vop_symlink_args *v)
1427 {
1428 	struct vnode *dvp = v->a_dvp;
1429 	struct vnode **vpp = v->a_vpp;
1430 	struct componentname *cnp = v->a_cnp;
1431 	struct vattr *vap = v->a_vap;
1432 	const char *target = v->a_target;
1433 
1434 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1435 	MPASS(vap->va_type == VLNK);
1436 #else
1437 	vap->va_type = VLNK;
1438 #endif
1439 
1440 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1441 }
1442 
1443 static int
tmpfs_readdir(struct vop_readdir_args * va)1444 tmpfs_readdir(struct vop_readdir_args *va)
1445 {
1446 	struct vnode *vp;
1447 	struct uio *uio;
1448 	struct tmpfs_mount *tm;
1449 	struct tmpfs_node *node;
1450 	uint64_t **cookies;
1451 	int *eofflag, *ncookies;
1452 	ssize_t startresid;
1453 	int error, maxcookies;
1454 
1455 	vp = va->a_vp;
1456 	uio = va->a_uio;
1457 	eofflag = va->a_eofflag;
1458 	cookies = va->a_cookies;
1459 	ncookies = va->a_ncookies;
1460 
1461 	/* This operation only makes sense on directory nodes. */
1462 	if (vp->v_type != VDIR)
1463 		return (ENOTDIR);
1464 
1465 	maxcookies = 0;
1466 	node = VP_TO_TMPFS_DIR(vp);
1467 	tm = VFS_TO_TMPFS(vp->v_mount);
1468 
1469 	startresid = uio->uio_resid;
1470 
1471 	/* Allocate cookies for NFS and compat modules. */
1472 	if (cookies != NULL && ncookies != NULL) {
1473 		maxcookies = howmany(node->tn_size,
1474 		    sizeof(struct tmpfs_dirent)) + 2;
1475 		*cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1476 		    M_WAITOK);
1477 		*ncookies = 0;
1478 	}
1479 
1480 	if (cookies == NULL)
1481 		error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1482 	else
1483 		error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1484 		    ncookies);
1485 
1486 	/* Buffer was filled without hitting EOF. */
1487 	if (error == EJUSTRETURN)
1488 		error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1489 
1490 	if (error != 0 && cookies != NULL && ncookies != NULL) {
1491 		free(*cookies, M_TEMP);
1492 		*cookies = NULL;
1493 		*ncookies = 0;
1494 	}
1495 
1496 	if (eofflag != NULL)
1497 		*eofflag =
1498 		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1499 
1500 	return (error);
1501 }
1502 
1503 static int
tmpfs_readlink(struct vop_readlink_args * v)1504 tmpfs_readlink(struct vop_readlink_args *v)
1505 {
1506 	struct vnode *vp = v->a_vp;
1507 	struct uio *uio = v->a_uio;
1508 
1509 	int error;
1510 	struct tmpfs_node *node;
1511 
1512 	MPASS(uio->uio_offset == 0);
1513 	MPASS(vp->v_type == VLNK);
1514 
1515 	node = VP_TO_TMPFS_NODE(vp);
1516 
1517 	error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1518 	    uio);
1519 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1520 
1521 	return (error);
1522 }
1523 
1524 /*
1525  * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1526  * the comment above cache_fplookup for details.
1527  *
1528  * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1529  */
1530 static int
tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args * v)1531 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1532 {
1533 	struct vnode *vp;
1534 	struct tmpfs_node *node;
1535 	char *symlink;
1536 
1537 	vp = v->a_vp;
1538 	node = VP_TO_TMPFS_NODE_SMR(vp);
1539 	if (__predict_false(node == NULL))
1540 		return (EAGAIN);
1541 	if (!atomic_load_char(&node->tn_link_smr))
1542 		return (EAGAIN);
1543 	symlink = atomic_load_ptr(&node->tn_link_target);
1544 	if (symlink == NULL)
1545 		return (EAGAIN);
1546 
1547 	return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1548 }
1549 
1550 static int
tmpfs_inactive(struct vop_inactive_args * v)1551 tmpfs_inactive(struct vop_inactive_args *v)
1552 {
1553 	struct vnode *vp;
1554 	struct tmpfs_node *node;
1555 
1556 	vp = v->a_vp;
1557 	node = VP_TO_TMPFS_NODE(vp);
1558 	if (node->tn_links == 0)
1559 		vrecycle(vp);
1560 	else
1561 		tmpfs_check_mtime(vp);
1562 	return (0);
1563 }
1564 
1565 static int
tmpfs_need_inactive(struct vop_need_inactive_args * ap)1566 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1567 {
1568 	struct vnode *vp;
1569 	struct tmpfs_node *node;
1570 	struct vm_object *obj;
1571 
1572 	vp = ap->a_vp;
1573 	node = VP_TO_TMPFS_NODE(vp);
1574 	if (node->tn_links == 0)
1575 		goto need;
1576 	if (vp->v_type == VREG) {
1577 		obj = vp->v_object;
1578 		if (obj->generation != obj->cleangeneration)
1579 			goto need;
1580 	}
1581 	return (0);
1582 need:
1583 	return (1);
1584 }
1585 
1586 int
tmpfs_reclaim(struct vop_reclaim_args * v)1587 tmpfs_reclaim(struct vop_reclaim_args *v)
1588 {
1589 	struct vnode *vp;
1590 	struct tmpfs_mount *tmp;
1591 	struct tmpfs_node *node;
1592 	bool unlock;
1593 
1594 	vp = v->a_vp;
1595 	node = VP_TO_TMPFS_NODE(vp);
1596 	tmp = VFS_TO_TMPFS(vp->v_mount);
1597 
1598 	if (vp->v_type == VREG)
1599 		tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1600 	vp->v_object = NULL;
1601 
1602 	TMPFS_LOCK(tmp);
1603 	TMPFS_NODE_LOCK(node);
1604 	tmpfs_free_vp(vp);
1605 
1606 	/*
1607 	 * If the node referenced by this vnode was deleted by the user,
1608 	 * we must free its associated data structures (now that the vnode
1609 	 * is being reclaimed).
1610 	 */
1611 	unlock = true;
1612 	if (node->tn_links == 0 &&
1613 	    (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1614 		node->tn_vpstate = TMPFS_VNODE_DOOMED;
1615 		unlock = !tmpfs_free_node_locked(tmp, node, true);
1616 	}
1617 
1618 	if (unlock) {
1619 		TMPFS_NODE_UNLOCK(node);
1620 		TMPFS_UNLOCK(tmp);
1621 	}
1622 
1623 	MPASS(vp->v_data == NULL);
1624 	return (0);
1625 }
1626 
1627 int
tmpfs_print(struct vop_print_args * v)1628 tmpfs_print(struct vop_print_args *v)
1629 {
1630 	struct vnode *vp = v->a_vp;
1631 
1632 	struct tmpfs_node *node;
1633 
1634 	node = VP_TO_TMPFS_NODE(vp);
1635 
1636 	printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1637 	    node, node->tn_flags, (uintmax_t)node->tn_links);
1638 	printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1639 	    node->tn_mode, node->tn_uid, node->tn_gid,
1640 	    (intmax_t)node->tn_size, node->tn_status);
1641 
1642 	if (vp->v_type == VFIFO)
1643 		fifo_printinfo(vp);
1644 
1645 	printf("\n");
1646 
1647 	return (0);
1648 }
1649 
1650 int
tmpfs_pathconf(struct vop_pathconf_args * v)1651 tmpfs_pathconf(struct vop_pathconf_args *v)
1652 {
1653 	struct vnode *vp = v->a_vp;
1654 	int name = v->a_name;
1655 	long *retval = v->a_retval;
1656 
1657 	int error;
1658 
1659 	error = 0;
1660 
1661 	switch (name) {
1662 	case _PC_LINK_MAX:
1663 		*retval = TMPFS_LINK_MAX;
1664 		break;
1665 
1666 	case _PC_SYMLINK_MAX:
1667 		*retval = MAXPATHLEN;
1668 		break;
1669 
1670 	case _PC_NAME_MAX:
1671 		*retval = NAME_MAX;
1672 		break;
1673 
1674 	case _PC_PIPE_BUF:
1675 		if (vp->v_type == VDIR || vp->v_type == VFIFO)
1676 			*retval = PIPE_BUF;
1677 		else
1678 			error = EINVAL;
1679 		break;
1680 
1681 	case _PC_CHOWN_RESTRICTED:
1682 		*retval = 1;
1683 		break;
1684 
1685 	case _PC_NO_TRUNC:
1686 		*retval = 1;
1687 		break;
1688 
1689 	case _PC_SYNC_IO:
1690 		*retval = 1;
1691 		break;
1692 
1693 	case _PC_FILESIZEBITS:
1694 		*retval = 64;
1695 		break;
1696 
1697 	case _PC_MIN_HOLE_SIZE:
1698 		*retval = PAGE_SIZE;
1699 		break;
1700 
1701 	case _PC_HAS_HIDDENSYSTEM:
1702 		*retval = 1;
1703 		break;
1704 
1705 	default:
1706 		error = vop_stdpathconf(v);
1707 	}
1708 
1709 	return (error);
1710 }
1711 
1712 static int
tmpfs_vptofh(struct vop_vptofh_args * ap)1713 tmpfs_vptofh(struct vop_vptofh_args *ap)
1714 /*
1715 vop_vptofh {
1716 	IN struct vnode *a_vp;
1717 	IN struct fid *a_fhp;
1718 };
1719 */
1720 {
1721 	struct tmpfs_fid_data *const tfd = (struct tmpfs_fid_data *)ap->a_fhp;
1722 	struct tmpfs_node *node;
1723 	_Static_assert(sizeof(struct tmpfs_fid_data) <= sizeof(struct fid),
1724 	    "struct tmpfs_fid_data cannot be larger than struct fid");
1725 
1726 	node = VP_TO_TMPFS_NODE(ap->a_vp);
1727 	tfd->tfd_len = sizeof(*tfd);
1728 	tfd->tfd_gen = node->tn_gen;
1729 	tfd->tfd_id = node->tn_id;
1730 
1731 	return (0);
1732 }
1733 
1734 static int
tmpfs_whiteout(struct vop_whiteout_args * ap)1735 tmpfs_whiteout(struct vop_whiteout_args *ap)
1736 {
1737 	struct vnode *dvp = ap->a_dvp;
1738 	struct componentname *cnp = ap->a_cnp;
1739 	struct tmpfs_dirent *de;
1740 
1741 	switch (ap->a_flags) {
1742 	case LOOKUP:
1743 		return (0);
1744 	case CREATE:
1745 		de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1746 		if (de != NULL)
1747 			return (de->td_node == NULL ? 0 : EEXIST);
1748 		return (tmpfs_dir_whiteout_add(dvp, cnp));
1749 	case DELETE:
1750 		tmpfs_dir_whiteout_remove(dvp, cnp);
1751 		return (0);
1752 	default:
1753 		panic("tmpfs_whiteout: unknown op");
1754 	}
1755 }
1756 
1757 static int
tmpfs_vptocnp_dir(struct tmpfs_node * tn,struct tmpfs_node * tnp,struct tmpfs_dirent ** pde)1758 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1759     struct tmpfs_dirent **pde)
1760 {
1761 	struct tmpfs_dir_cursor dc;
1762 	struct tmpfs_dirent *de;
1763 
1764 	for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1765 	     de = tmpfs_dir_next(tnp, &dc)) {
1766 		if (de->td_node == tn) {
1767 			*pde = de;
1768 			return (0);
1769 		}
1770 	}
1771 	return (ENOENT);
1772 }
1773 
1774 static int
tmpfs_vptocnp_fill(struct vnode * vp,struct tmpfs_node * tn,struct tmpfs_node * tnp,char * buf,size_t * buflen,struct vnode ** dvp)1775 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1776     struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1777 {
1778 	struct tmpfs_dirent *de;
1779 	int error, i;
1780 
1781 	error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1782 	    dvp);
1783 	if (error != 0)
1784 		return (error);
1785 	error = tmpfs_vptocnp_dir(tn, tnp, &de);
1786 	if (error == 0) {
1787 		i = *buflen;
1788 		i -= de->td_namelen;
1789 		if (i < 0) {
1790 			error = ENOMEM;
1791 		} else {
1792 			bcopy(de->ud.td_name, buf + i, de->td_namelen);
1793 			*buflen = i;
1794 		}
1795 	}
1796 	if (error == 0) {
1797 		if (vp != *dvp)
1798 			VOP_UNLOCK(*dvp);
1799 	} else {
1800 		if (vp != *dvp)
1801 			vput(*dvp);
1802 		else
1803 			vrele(vp);
1804 	}
1805 	return (error);
1806 }
1807 
1808 static int
tmpfs_vptocnp(struct vop_vptocnp_args * ap)1809 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1810 {
1811 	struct vnode *vp, **dvp;
1812 	struct tmpfs_node *tn, *tnp, *tnp1;
1813 	struct tmpfs_dirent *de;
1814 	struct tmpfs_mount *tm;
1815 	char *buf;
1816 	size_t *buflen;
1817 	int error;
1818 
1819 	vp = ap->a_vp;
1820 	dvp = ap->a_vpp;
1821 	buf = ap->a_buf;
1822 	buflen = ap->a_buflen;
1823 
1824 	tm = VFS_TO_TMPFS(vp->v_mount);
1825 	tn = VP_TO_TMPFS_NODE(vp);
1826 	if (tn->tn_type == VDIR) {
1827 		tnp = tn->tn_dir.tn_parent;
1828 		if (tnp == NULL)
1829 			return (ENOENT);
1830 		tmpfs_ref_node(tnp);
1831 		error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1832 		    buflen, dvp);
1833 		tmpfs_free_node(tm, tnp);
1834 		return (error);
1835 	}
1836 restart:
1837 	TMPFS_LOCK(tm);
1838 restart_locked:
1839 	LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1840 		if (tnp->tn_type != VDIR)
1841 			continue;
1842 		TMPFS_NODE_LOCK(tnp);
1843 		tmpfs_ref_node(tnp);
1844 
1845 		/*
1846 		 * tn_vnode cannot be instantiated while we hold the
1847 		 * node lock, so the directory cannot be changed while
1848 		 * we iterate over it.  Do this to avoid instantiating
1849 		 * vnode for directories which cannot point to our
1850 		 * node.
1851 		 */
1852 		error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1853 		    &de) : 0;
1854 
1855 		if (error == 0) {
1856 			TMPFS_NODE_UNLOCK(tnp);
1857 			TMPFS_UNLOCK(tm);
1858 			error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1859 			    dvp);
1860 			if (error == 0) {
1861 				tmpfs_free_node(tm, tnp);
1862 				return (0);
1863 			}
1864 			if (VN_IS_DOOMED(vp)) {
1865 				tmpfs_free_node(tm, tnp);
1866 				return (ENOENT);
1867 			}
1868 			TMPFS_LOCK(tm);
1869 			TMPFS_NODE_LOCK(tnp);
1870 		}
1871 		if (tmpfs_free_node_locked(tm, tnp, false)) {
1872 			goto restart;
1873 		} else {
1874 			KASSERT(tnp->tn_refcount > 0,
1875 			    ("node %p refcount zero", tnp));
1876 			if (tnp->tn_attached) {
1877 				tnp1 = LIST_NEXT(tnp, tn_entries);
1878 				TMPFS_NODE_UNLOCK(tnp);
1879 			} else {
1880 				TMPFS_NODE_UNLOCK(tnp);
1881 				goto restart_locked;
1882 			}
1883 		}
1884 	}
1885 	TMPFS_UNLOCK(tm);
1886 	return (ENOENT);
1887 }
1888 
1889 void
tmpfs_extattr_free(struct tmpfs_extattr * ea)1890 tmpfs_extattr_free(struct tmpfs_extattr *ea)
1891 {
1892 	free(ea->ea_name, M_TMPFSEA);
1893 	free(ea->ea_value, M_TMPFSEA);
1894 	free(ea, M_TMPFSEA);
1895 }
1896 
1897 static bool
tmpfs_extattr_update_mem(struct tmpfs_mount * tmp,ssize_t size)1898 tmpfs_extattr_update_mem(struct tmpfs_mount *tmp, ssize_t size)
1899 {
1900 	TMPFS_LOCK(tmp);
1901 	if (size > 0 &&
1902 	    !tmpfs_pages_check_avail(tmp, howmany(size, PAGE_SIZE))) {
1903 		TMPFS_UNLOCK(tmp);
1904 		return (false);
1905 	}
1906 	if (tmp->tm_ea_memory_inuse + size > tmp->tm_ea_memory_max) {
1907 		TMPFS_UNLOCK(tmp);
1908 		return (false);
1909 	}
1910 	tmp->tm_ea_memory_inuse += size;
1911 	TMPFS_UNLOCK(tmp);
1912 	return (true);
1913 }
1914 
1915 static int
tmpfs_deleteextattr(struct vop_deleteextattr_args * ap)1916 tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
1917 {
1918 	struct vnode *vp = ap->a_vp;
1919 	struct tmpfs_mount *tmp;
1920 	struct tmpfs_node *node;
1921 	struct tmpfs_extattr *ea;
1922 	size_t namelen;
1923 	ssize_t diff;
1924 	int error;
1925 
1926 	node = VP_TO_TMPFS_NODE(vp);
1927 	tmp = VFS_TO_TMPFS(vp->v_mount);
1928 	if (VN_ISDEV(ap->a_vp))
1929 		return (EOPNOTSUPP);
1930 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1931 	    ap->a_cred, ap->a_td, VWRITE);
1932 	if (error != 0)
1933 		return (error);
1934 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1935 		return (EINVAL);
1936 	namelen = strlen(ap->a_name);
1937 	if (namelen > EXTATTR_MAXNAMELEN)
1938 		return (EINVAL);
1939 
1940 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1941 		if (ea->ea_namespace == ap->a_attrnamespace &&
1942 		    namelen == ea->ea_namelen &&
1943 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1944 			break;
1945 	}
1946 
1947 	if (ea == NULL)
1948 		return (ENOATTR);
1949 	LIST_REMOVE(ea, ea_extattrs);
1950 	diff = -(sizeof(struct tmpfs_extattr) + namelen + ea->ea_size);
1951 	tmpfs_extattr_update_mem(tmp, diff);
1952 	tmpfs_extattr_free(ea);
1953 	return (0);
1954 }
1955 
1956 static int
tmpfs_getextattr(struct vop_getextattr_args * ap)1957 tmpfs_getextattr(struct vop_getextattr_args *ap)
1958 {
1959 	struct vnode *vp = ap->a_vp;
1960 	struct tmpfs_node *node;
1961 	struct tmpfs_extattr *ea;
1962 	size_t namelen;
1963 	int error;
1964 
1965 	node = VP_TO_TMPFS_NODE(vp);
1966 	if (VN_ISDEV(ap->a_vp))
1967 		return (EOPNOTSUPP);
1968 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1969 	    ap->a_cred, ap->a_td, VREAD);
1970 	if (error != 0)
1971 		return (error);
1972 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1973 		return (EINVAL);
1974 	namelen = strlen(ap->a_name);
1975 	if (namelen > EXTATTR_MAXNAMELEN)
1976 		return (EINVAL);
1977 
1978 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1979 		if (ea->ea_namespace == ap->a_attrnamespace &&
1980 		    namelen == ea->ea_namelen &&
1981 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1982 			break;
1983 	}
1984 
1985 	if (ea == NULL)
1986 		return (ENOATTR);
1987 	if (ap->a_size != NULL)
1988 		*ap->a_size = ea->ea_size;
1989 	if (ap->a_uio != NULL && ea->ea_size != 0)
1990 		error = uiomove(ea->ea_value, ea->ea_size, ap->a_uio);
1991 	return (error);
1992 }
1993 
1994 static int
tmpfs_listextattr(struct vop_listextattr_args * ap)1995 tmpfs_listextattr(struct vop_listextattr_args *ap)
1996 {
1997 	struct vnode *vp = ap->a_vp;
1998 	struct tmpfs_node *node;
1999 	struct tmpfs_extattr *ea;
2000 	int error;
2001 
2002 	node = VP_TO_TMPFS_NODE(vp);
2003 	if (VN_ISDEV(ap->a_vp))
2004 		return (EOPNOTSUPP);
2005 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2006 	    ap->a_cred, ap->a_td, VREAD);
2007 	if (error != 0)
2008 		return (error);
2009 	if (ap->a_size != NULL)
2010 		*ap->a_size = 0;
2011 
2012 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2013 		if (ea->ea_namespace != ap->a_attrnamespace)
2014 			continue;
2015 		if (ap->a_size != NULL)
2016 			*ap->a_size += ea->ea_namelen + 1;
2017 		if (ap->a_uio != NULL) {
2018 			error = uiomove(&ea->ea_namelen, 1, ap->a_uio);
2019 			if (error != 0)
2020 				break;
2021 			error = uiomove(ea->ea_name, ea->ea_namelen, ap->a_uio);
2022 			if (error != 0)
2023 				break;
2024 		}
2025 	}
2026 
2027 	return (error);
2028 }
2029 
2030 static int
tmpfs_setextattr(struct vop_setextattr_args * ap)2031 tmpfs_setextattr(struct vop_setextattr_args *ap)
2032 {
2033 	struct vnode *vp = ap->a_vp;
2034 	struct tmpfs_mount *tmp;
2035 	struct tmpfs_node *node;
2036 	struct tmpfs_extattr *ea;
2037 	struct tmpfs_extattr *new_ea;
2038 	size_t attr_size;
2039 	size_t namelen;
2040 	ssize_t diff;
2041 	int error;
2042 
2043 	node = VP_TO_TMPFS_NODE(vp);
2044 	tmp = VFS_TO_TMPFS(vp->v_mount);
2045 	attr_size = ap->a_uio->uio_resid;
2046 	diff = 0;
2047 	if (VN_ISDEV(ap->a_vp))
2048 		return (EOPNOTSUPP);
2049 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2050 	    ap->a_cred, ap->a_td, VWRITE);
2051 	if (error != 0)
2052 		return (error);
2053 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
2054 		return (EINVAL);
2055 	namelen = strlen(ap->a_name);
2056 	if (namelen > EXTATTR_MAXNAMELEN)
2057 		return (EINVAL);
2058 
2059 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2060 		if (ea->ea_namespace == ap->a_attrnamespace &&
2061 		    namelen == ea->ea_namelen &&
2062 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0) {
2063 			diff -= sizeof(struct tmpfs_extattr) + ea->ea_namelen +
2064 			    ea->ea_size;
2065 			break;
2066 		}
2067 	}
2068 
2069 	diff += sizeof(struct tmpfs_extattr) + namelen + attr_size;
2070 	if (!tmpfs_extattr_update_mem(tmp, diff))
2071 		return (ENOSPC);
2072 	new_ea = malloc(sizeof(struct tmpfs_extattr), M_TMPFSEA, M_WAITOK);
2073 	new_ea->ea_namespace = ap->a_attrnamespace;
2074 	new_ea->ea_name = malloc(namelen, M_TMPFSEA, M_WAITOK);
2075 	new_ea->ea_namelen = namelen;
2076 	memcpy(new_ea->ea_name, ap->a_name, namelen);
2077 	if (attr_size != 0) {
2078 		new_ea->ea_value = malloc(attr_size, M_TMPFSEA, M_WAITOK);
2079 		new_ea->ea_size = attr_size;
2080 		error = uiomove(new_ea->ea_value, attr_size, ap->a_uio);
2081 	} else {
2082 		new_ea->ea_value = NULL;
2083 		new_ea->ea_size = 0;
2084 	}
2085 	if (error != 0) {
2086 		tmpfs_extattr_update_mem(tmp, -diff);
2087 		tmpfs_extattr_free(new_ea);
2088 		return (error);
2089 	}
2090 	if (ea != NULL) {
2091 		LIST_REMOVE(ea, ea_extattrs);
2092 		tmpfs_extattr_free(ea);
2093 	}
2094 	LIST_INSERT_HEAD(&node->tn_extattrs, new_ea, ea_extattrs);
2095 	return (0);
2096 }
2097 
2098 static off_t
tmpfs_seek_data_locked(vm_object_t obj,off_t noff)2099 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
2100 {
2101 	vm_pindex_t p;
2102 
2103 	p = swap_pager_seek_data(obj, OFF_TO_IDX(noff));
2104 	if (p == OBJ_MAX_SIZE)
2105 		p = obj->size;
2106 	return (p == OFF_TO_IDX(noff) ? noff : IDX_TO_OFF(p));
2107 }
2108 
2109 static int
tmpfs_seek_clamp(struct tmpfs_node * tn,off_t * noff,bool seekdata)2110 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
2111 {
2112 	if (*noff < tn->tn_size)
2113 		return (0);
2114 	if (seekdata)
2115 		return (ENXIO);
2116 	*noff = tn->tn_size;
2117 	return (0);
2118 }
2119 
2120 static off_t
tmpfs_seek_hole_locked(vm_object_t obj,off_t noff)2121 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
2122 {
2123 
2124 	return (IDX_TO_OFF(swap_pager_seek_hole(obj, OFF_TO_IDX(noff))));
2125 }
2126 
2127 static int
tmpfs_seek_datahole(struct vnode * vp,off_t * off,bool seekdata)2128 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
2129 {
2130 	struct tmpfs_node *tn;
2131 	vm_object_t obj;
2132 	off_t noff;
2133 	int error;
2134 
2135 	if (vp->v_type != VREG)
2136 		return (ENOTTY);
2137 	tn = VP_TO_TMPFS_NODE(vp);
2138 	noff = *off;
2139 	if (noff < 0)
2140 		return (ENXIO);
2141 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2142 	if (error != 0)
2143 		return (error);
2144 	obj = tn->tn_reg.tn_aobj;
2145 
2146 	VM_OBJECT_RLOCK(obj);
2147 	noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
2148 	    tmpfs_seek_hole_locked(obj, noff);
2149 	VM_OBJECT_RUNLOCK(obj);
2150 
2151 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2152 	if (error == 0)
2153 		*off = noff;
2154 	return (error);
2155 }
2156 
2157 static int
tmpfs_ioctl(struct vop_ioctl_args * ap)2158 tmpfs_ioctl(struct vop_ioctl_args *ap)
2159 {
2160 	struct vnode *vp = ap->a_vp;
2161 	int error = 0;
2162 
2163 	switch (ap->a_command) {
2164 	case FIOSEEKDATA:
2165 	case FIOSEEKHOLE:
2166 		error = vn_lock(vp, LK_SHARED);
2167 		if (error != 0) {
2168 			error = EBADF;
2169 			break;
2170 		}
2171 		error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
2172 		    ap->a_command == FIOSEEKDATA);
2173 		VOP_UNLOCK(vp);
2174 		break;
2175 	default:
2176 		error = ENOTTY;
2177 		break;
2178 	}
2179 	return (error);
2180 }
2181 
2182 /*
2183  * Vnode operations vector used for files stored in a tmpfs file system.
2184  */
2185 struct vop_vector tmpfs_vnodeop_entries = {
2186 	.vop_default =			&default_vnodeops,
2187 	.vop_lookup =			vfs_cache_lookup,
2188 	.vop_cachedlookup =		tmpfs_cached_lookup,
2189 	.vop_create =			tmpfs_create,
2190 	.vop_mknod =			tmpfs_mknod,
2191 	.vop_open =			tmpfs_open,
2192 	.vop_close =			tmpfs_close,
2193 	.vop_fplookup_vexec =		tmpfs_fplookup_vexec,
2194 	.vop_fplookup_symlink =		tmpfs_fplookup_symlink,
2195 	.vop_access =			tmpfs_access,
2196 	.vop_stat =			tmpfs_stat,
2197 	.vop_getattr =			tmpfs_getattr,
2198 	.vop_setattr =			tmpfs_setattr,
2199 	.vop_read =			tmpfs_read,
2200 	.vop_read_pgcache =		tmpfs_read_pgcache,
2201 	.vop_write =			tmpfs_write,
2202 	.vop_deallocate =		tmpfs_deallocate,
2203 	.vop_fsync =			tmpfs_fsync,
2204 	.vop_remove =			tmpfs_remove,
2205 	.vop_link =			tmpfs_link,
2206 	.vop_rename =			tmpfs_rename,
2207 	.vop_mkdir =			tmpfs_mkdir,
2208 	.vop_rmdir =			tmpfs_rmdir,
2209 	.vop_symlink =			tmpfs_symlink,
2210 	.vop_readdir =			tmpfs_readdir,
2211 	.vop_readlink =			tmpfs_readlink,
2212 	.vop_inactive =			tmpfs_inactive,
2213 	.vop_need_inactive =		tmpfs_need_inactive,
2214 	.vop_reclaim =			tmpfs_reclaim,
2215 	.vop_print =			tmpfs_print,
2216 	.vop_pathconf =			tmpfs_pathconf,
2217 	.vop_vptofh =			tmpfs_vptofh,
2218 	.vop_whiteout =			tmpfs_whiteout,
2219 	.vop_bmap =			VOP_EOPNOTSUPP,
2220 	.vop_vptocnp =			tmpfs_vptocnp,
2221 	.vop_lock1 =			vop_lock,
2222 	.vop_unlock = 			vop_unlock,
2223 	.vop_islocked = 		vop_islocked,
2224 	.vop_deleteextattr =		tmpfs_deleteextattr,
2225 	.vop_getextattr =		tmpfs_getextattr,
2226 	.vop_listextattr =		tmpfs_listextattr,
2227 	.vop_setextattr =		tmpfs_setextattr,
2228 	.vop_add_writecount =		vop_stdadd_writecount_nomsync,
2229 	.vop_ioctl =			tmpfs_ioctl,
2230 };
2231 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2232 
2233 /*
2234  * Same vector for mounts which do not use namecache.
2235  */
2236 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2237 	.vop_default =			&tmpfs_vnodeop_entries,
2238 	.vop_lookup =			tmpfs_lookup,
2239 };
2240 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);
2241