Lines Matching +full:0 +full:- +full:9 +full:a +full:- +full:f
2 * Virtio 9p backend
10 * the COPYING file in the top-level directory.
15 * Not so fast! You might want to read the 9p developer docs first:
16 * https://wiki.qemu.org/Documentation/9p
26 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
30 #include "virtio-9p.h"
31 #include "fsdev/qemu-fsdev.h"
32 #include "9p-xattr.h"
33 #include "9p-util.h"
45 Oread = 0x00,
46 Owrite = 0x01,
47 Ordwr = 0x02,
48 Oexec = 0x03,
49 Oexcl = 0x04,
50 Otrunc = 0x10,
51 Orexec = 0x20,
52 Orclose = 0x40,
53 Oappend = 0x80,
64 ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap); in pdu_marshal()
76 ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap); in pdu_unmarshal()
84 int ret = 0; in omode_to_uflags()
156 for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { in dotl_to_open_flags()
167 credp->fc_uid = -1; in cred_init()
168 credp->fc_gid = -1; in cred_init()
169 credp->fc_mode = -1; in cred_init()
170 credp->fc_rdev = -1; in cred_init()
192 path->data = NULL; in v9fs_path_init()
193 path->size = 0; in v9fs_path_init()
198 g_free(path->data); in v9fs_path_free()
199 path->data = NULL; in v9fs_path_free()
200 path->size = 0; in v9fs_path_free()
213 path->size = g_vasprintf(&path->data, fmt, ap) + 1; in v9fs_path_sprintf()
220 dst->size = src->size; in v9fs_path_copy()
221 dst->data = g_memdup(src->data, src->size); in v9fs_path_copy()
228 err = s->ops->name_to_path(&s->ctx, dirpath, name, path); in v9fs_name_to_path()
229 if (err < 0) { in v9fs_name_to_path()
230 err = -errno; in v9fs_name_to_path()
238 * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
239 * As a special case, We treat s1 as ancestor of s2 if they are same!
243 if (!strncmp(s1->data, s2->data, s1->size - 1)) { in v9fs_path_is_ancestor()
244 if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') { in v9fs_path_is_ancestor()
248 return 0; in v9fs_path_is_ancestor()
253 return str->size; in v9fs_string_size()
257 * returns 0 if fid got re-opened, 1 if not, < 0 on error
259 static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f) in v9fs_reopen_fid() argument
262 if (f->fid_type == P9_FID_FILE) { in v9fs_reopen_fid()
263 if (f->fs.fd == -1) { in v9fs_reopen_fid()
265 err = v9fs_co_open(pdu, f, f->open_flags); in v9fs_reopen_fid()
266 } while (err == -EINTR && !pdu->cancelled); in v9fs_reopen_fid()
268 } else if (f->fid_type == P9_FID_DIR) { in v9fs_reopen_fid()
269 if (f->fs.dir.stream == NULL) { in v9fs_reopen_fid()
271 err = v9fs_co_opendir(pdu, f); in v9fs_reopen_fid()
272 } while (err == -EINTR && !pdu->cancelled); in v9fs_reopen_fid()
281 V9fsFidState *f; in get_fid() local
282 V9fsState *s = pdu->s; in get_fid()
284 f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); in get_fid()
285 if (f) { in get_fid()
286 BUG_ON(f->clunked); in get_fid()
292 f->ref++; in get_fid()
299 err = v9fs_reopen_fid(pdu, f); in get_fid()
300 if (err < 0) { in get_fid()
301 f->ref--; in get_fid()
308 f->flags |= FID_REFERENCED; in get_fid()
309 return f; in get_fid()
316 V9fsFidState *f; in alloc_fid() local
318 f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); in alloc_fid()
319 if (f) { in alloc_fid()
321 BUG_ON(f->clunked); in alloc_fid()
324 f = g_new0(V9fsFidState, 1); in alloc_fid()
325 f->fid = fid; in alloc_fid()
326 f->fid_type = P9_FID_NONE; in alloc_fid()
327 f->ref = 1; in alloc_fid()
332 f->flags |= FID_REFERENCED; in alloc_fid()
333 g_hash_table_insert(s->fids, GINT_TO_POINTER(fid), f); in alloc_fid()
335 v9fs_readdir_init(s->proto_version, &f->fs.dir); in alloc_fid()
336 v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir); in alloc_fid()
338 return f; in alloc_fid()
343 int retval = 0; in v9fs_xattr_fid_clunk()
345 if (fidp->fs.xattr.xattrwalk_fid) { in v9fs_xattr_fid_clunk()
353 if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) { in v9fs_xattr_fid_clunk()
355 retval = -EINVAL; in v9fs_xattr_fid_clunk()
358 if (fidp->fs.xattr.len) { in v9fs_xattr_fid_clunk()
359 retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name, in v9fs_xattr_fid_clunk()
360 fidp->fs.xattr.value, in v9fs_xattr_fid_clunk()
361 fidp->fs.xattr.len, in v9fs_xattr_fid_clunk()
362 fidp->fs.xattr.flags); in v9fs_xattr_fid_clunk()
364 retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name); in v9fs_xattr_fid_clunk()
367 v9fs_string_free(&fidp->fs.xattr.name); in v9fs_xattr_fid_clunk()
369 g_free(fidp->fs.xattr.value); in v9fs_xattr_fid_clunk()
375 int retval = 0; in free_fid()
377 if (fidp->fid_type == P9_FID_FILE) { in free_fid()
379 if (fidp->fs.fd != -1) { in free_fid()
380 retval = v9fs_co_close(pdu, &fidp->fs); in free_fid()
382 } else if (fidp->fid_type == P9_FID_DIR) { in free_fid()
383 if (fidp->fs.dir.stream != NULL) { in free_fid()
384 retval = v9fs_co_closedir(pdu, &fidp->fs); in free_fid()
386 } else if (fidp->fid_type == P9_FID_XATTR) { in free_fid()
389 v9fs_path_free(&fidp->path); in free_fid()
396 BUG_ON(!fidp->ref); in put_fid()
397 fidp->ref--; in put_fid()
401 if (!fidp->ref && fidp->clunked) { in put_fid()
402 if (fidp->fid == pdu->s->root_fid) { in put_fid()
409 migrate_del_blocker(&pdu->s->migration_blocker); in put_fid()
413 return 0; in put_fid()
421 fidp = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid)); in clunk_fid()
423 g_hash_table_remove(s->fids, GINT_TO_POINTER(fid)); in clunk_fid()
424 fidp->clunked = true; in clunk_fid()
432 int reclaim_count = 0; in v9fs_reclaim_fd()
433 V9fsState *s = pdu->s; in v9fs_reclaim_fd()
434 V9fsFidState *f; in v9fs_reclaim_fd() local
438 int nclosed = 0; in v9fs_reclaim_fd()
441 if (s->reclaiming) { in v9fs_reclaim_fd()
444 s->reclaiming = true; in v9fs_reclaim_fd()
446 g_hash_table_iter_init(&iter, s->fids); in v9fs_reclaim_fd()
452 while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) { in v9fs_reclaim_fd()
457 if (f->ref || f->flags & FID_NON_RECLAIMABLE) { in v9fs_reclaim_fd()
461 * if it is a recently referenced fid in v9fs_reclaim_fd()
464 * in the next iteration. (a simple LRU without in v9fs_reclaim_fd()
467 if (f->flags & FID_REFERENCED) { in v9fs_reclaim_fd()
468 f->flags &= ~FID_REFERENCED; in v9fs_reclaim_fd()
474 if (f->fid_type == P9_FID_FILE) { in v9fs_reclaim_fd()
475 if (f->fs.fd != -1) { in v9fs_reclaim_fd()
478 * a clunk request won't free this fid in v9fs_reclaim_fd()
480 f->ref++; in v9fs_reclaim_fd()
481 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next); in v9fs_reclaim_fd()
482 f->fs_reclaim.fd = f->fs.fd; in v9fs_reclaim_fd()
483 f->fs.fd = -1; in v9fs_reclaim_fd()
486 } else if (f->fid_type == P9_FID_DIR) { in v9fs_reclaim_fd()
487 if (f->fs.dir.stream != NULL) { in v9fs_reclaim_fd()
490 * a clunk request won't free this fid in v9fs_reclaim_fd()
492 f->ref++; in v9fs_reclaim_fd()
493 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next); in v9fs_reclaim_fd()
494 f->fs_reclaim.dir.stream = f->fs.dir.stream; in v9fs_reclaim_fd()
495 f->fs.dir.stream = NULL; in v9fs_reclaim_fd()
504 * Close the picked FIDs altogether on a background I/O driver thread. Do in v9fs_reclaim_fd()
506 * thread <-> fs driver background thread) as low as possible. in v9fs_reclaim_fd()
509 QSLIST_FOREACH(f, &reclaim_list, reclaim_next) { in v9fs_reclaim_fd()
510 err = (f->fid_type == P9_FID_DIR) ? in v9fs_reclaim_fd()
511 s->ops->closedir(&s->ctx, &f->fs_reclaim) : in v9fs_reclaim_fd()
512 s->ops->close(&s->ctx, &f->fs_reclaim); in v9fs_reclaim_fd()
517 * unexpected case as FIDs were picked above by having a valid in v9fs_reclaim_fd()
520 error_report("9pfs: v9fs_reclaim_fd() WARNING: close() failed with EBADF"); in v9fs_reclaim_fd()
527 total_open_fd -= nclosed; in v9fs_reclaim_fd()
530 f = QSLIST_FIRST(&reclaim_list); in v9fs_reclaim_fd()
531 QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next); in v9fs_reclaim_fd()
536 put_fid(pdu, f); in v9fs_reclaim_fd()
539 s->reclaiming = false; in v9fs_reclaim_fd()
543 * This is used when a path is removed from the directory tree. Any
549 int err = 0; in v9fs_mark_fids_unreclaim()
550 V9fsState *s = pdu->s; in v9fs_mark_fids_unreclaim()
562 g_hash_table_iter_init(&iter, s->fids); in v9fs_mark_fids_unreclaim()
571 if (fidp->path.size == path->size && in v9fs_mark_fids_unreclaim()
572 !memcmp(fidp->path.data, path->data, path->size)) { in v9fs_mark_fids_unreclaim()
574 * Ensure the fid survives a potential clunk request during in v9fs_mark_fids_unreclaim()
577 fidp->ref++; in v9fs_mark_fids_unreclaim()
578 fidp->flags |= FID_NON_RECLAIMABLE; in v9fs_mark_fids_unreclaim()
583 for (i = 0; i < to_reopen->len; i++) { in v9fs_mark_fids_unreclaim()
587 if (err < 0) { in v9fs_mark_fids_unreclaim()
592 for (i = 0; i < to_reopen->len; i++) { in v9fs_mark_fids_unreclaim()
600 V9fsState *s = pdu->s; in virtfs_reset()
604 * Get a list of all the values (fid states) in the table, which in virtfs_reset()
607 g_autoptr(GList) fids = g_hash_table_get_values(s->fids); in virtfs_reset()
610 g_hash_table_steal_all(s->fids); in virtfs_reset()
617 for (freeing = fids; freeing; freeing = freeing->next) { in virtfs_reset()
618 fidp = freeing->data; in virtfs_reset()
619 fidp->ref++; in virtfs_reset()
620 fidp->clunked = true; in virtfs_reset()
625 #define P9_QID_TYPE_DIR 0x80
626 #define P9_QID_TYPE_SYMLINK 0x02
628 #define P9_STAT_MODE_DIR 0x80000000
629 #define P9_STAT_MODE_APPEND 0x40000000
630 #define P9_STAT_MODE_EXCL 0x20000000
631 #define P9_STAT_MODE_MOUNT 0x10000000
632 #define P9_STAT_MODE_AUTH 0x08000000
633 #define P9_STAT_MODE_TMP 0x04000000
634 #define P9_STAT_MODE_SYMLINK 0x02000000
635 #define P9_STAT_MODE_LINK 0x01000000
636 #define P9_STAT_MODE_DEVICE 0x00800000
637 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
638 #define P9_STAT_MODE_SOCKET 0x00100000
639 #define P9_STAT_MODE_SETUID 0x00080000
640 #define P9_STAT_MODE_SETGID 0x00040000
641 #define P9_STAT_MODE_SETVTX 0x00010000
650 /* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
653 return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023; in mirror8bit()
656 /* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
659 return ((uint64_t)mirror8bit(value & 0xff) << 56) | in mirror64bit()
660 ((uint64_t)mirror8bit((value >> 8) & 0xff) << 48) | in mirror64bit()
661 ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) | in mirror64bit()
662 ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) | in mirror64bit()
663 ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) | in mirror64bit()
664 ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) | in mirror64bit()
665 ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8) | in mirror64bit()
666 ((uint64_t)mirror8bit((value >> 56) & 0xff)); in mirror64bit()
678 * In practice that means: a good value for k depends on the expected amount
679 * of devices to be exposed by one export. For a small amount of devices k
680 * should be small, for a large amount of devices k might be increased
681 * instead. The default of k=0 should be fine for most users though.
683 * IMPORTANT: In case this ever becomes a runtime parameter; the value of
687 #define EXP_GOLOMB_K 0
690 * expGolombEncode() - Exponential Golomb algorithm for arbitrary k
691 * (including k=0).
701 * "prefix-free". The latter means the generated prefixes can be prepended
705 * This is a minor adjustment to the original Exp. Golomb algorithm in the
710 const uint64_t value = n + (1 << k) - 1; in expGolombEncode()
715 .bits = bits + MAX((bits - 1 - k), 0) in expGolombEncode()
720 * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix.
725 * respectively the mathematical "prefix-free" or "suffix-free" property
728 * If a passed prefix is suitable to create unique numbers, then the
736 (affix->type == AffixType_Suffix) ? in invertAffix()
739 mirror64bit(affix->value) >> in invertAffix()
740 ((sizeof(affix->value) * 8) - affix->bits), in invertAffix()
741 .bits = affix->bits in invertAffix()
746 * affixForIndex() - Generates suffix numbers with "suffix-free" property.
751 * This is just a wrapper function on top of the Exp. Golomb algorithm.
777 return e1->dev == e2->dev; in qpd_cmp_func()
783 return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix; in qpp_cmp_func()
789 return e1->dev == e2->dev && e1->ino == e2->ino; in qpf_cmp_func()
799 if (!ht || !ht->map) { in qp_table_destroy()
827 * number mapping on guest level. Since a device may end up having multiple
828 * entries in qpp_table, each entry most probably with a different suffix
830 * "agree" about a fix amount of bits (per device) to be always used for
842 val = qht_lookup(&pdu->s->qpd_table, &lookup, hash); in qid_inode_prefix_hash_bits()
846 affix = affixForIndex(pdu->s->qp_affix_next); in qid_inode_prefix_hash_bits()
847 val->prefix_bits = affix.bits; in qid_inode_prefix_hash_bits()
848 qht_insert(&pdu->s->qpd_table, val, hash, NULL); in qid_inode_prefix_hash_bits()
849 pdu->s->qp_ndevices++; in qid_inode_prefix_hash_bits()
851 return val->prefix_bits; in qid_inode_prefix_hash_bits()
855 * Slow / full mapping host inode nr -> guest inode nr.
857 * This function performs a slower and much more costly remapping of an
859 * number on guest. For every (dev, inode) combination on host a new
863 * This is just a "last resort" fallback solution if the much faster/cheaper
874 .dev = stbuf->st_dev, in qid_path_fullmap()
875 .ino = stbuf->st_ino in qid_path_fullmap()
880 val = qht_lookup(&pdu->s->qpf_table, &lookup, hash); in qid_path_fullmap()
883 if (pdu->s->qp_fullpath_next == 0) { in qid_path_fullmap()
886 "9p: No more prefixes available for remapping inodes from " in qid_path_fullmap()
889 return -ENFILE; in qid_path_fullmap()
897 1ULL << (sizeof(pdu->s->qp_affix_next) * 8) in qid_path_fullmap()
899 val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value; in qid_path_fullmap()
900 pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1); in qid_path_fullmap()
901 qht_insert(&pdu->s->qpf_table, val, hash, NULL); in qid_path_fullmap()
904 *path = val->path; in qid_path_fullmap()
905 return 0; in qid_path_fullmap()
909 * Quick mapping host inode nr -> guest inode nr.
914 * happen if the 9p export contains more than 1 exported file system (or
916 * files would have different device nrs, all files exported by 9p would
917 * share the same device nr on guest (the device nr of the virtual 9p device
921 * inode number from host, shifting the result upwards and then assigning a
927 * and track a very limited amount of suffixes in practice due to that.
935 * (i.e. over fixed size ones) utilizes the fact that in practice only a very
937 * less than 2 dozen devices per 9p export), so in practice we need to chop
945 * numbers on guest will be much smaller & human friendly. ;-)
950 const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev); in qid_path_suffixmap()
952 .dev = stbuf->st_dev, in qid_path_suffixmap()
953 .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits)) in qid_path_suffixmap()
957 val = qht_lookup(&pdu->s->qpp_table, &lookup, hash); in qid_path_suffixmap()
960 if (pdu->s->qp_affix_next == 0) { in qid_path_suffixmap()
963 "9p: Potential degraded performance of inode remapping" in qid_path_suffixmap()
965 return -ENFILE; in qid_path_suffixmap()
972 val->qp_affix_index = pdu->s->qp_affix_next++; in qid_path_suffixmap()
973 val->qp_affix = affixForIndex(val->qp_affix_index); in qid_path_suffixmap()
974 qht_insert(&pdu->s->qpp_table, val, hash, NULL); in qid_path_suffixmap()
977 *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value; in qid_path_suffixmap()
978 return 0; in qid_path_suffixmap()
986 if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) { in stat_to_qid()
988 err = qid_path_suffixmap(pdu, stbuf, &qidp->path); in stat_to_qid()
989 if (err == -ENFILE) { in stat_to_qid()
991 err = qid_path_fullmap(pdu, stbuf, &qidp->path); in stat_to_qid()
997 if (pdu->s->dev_id != stbuf->st_dev) { in stat_to_qid()
998 if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) { in stat_to_qid()
1000 "9p: Multiple devices detected in same VirtFS export. " in stat_to_qid()
1005 return -ENODEV; in stat_to_qid()
1008 "9p: Multiple devices detected in same VirtFS export, " in stat_to_qid()
1010 "misbehaviours on guest! You should either use a " in stat_to_qid()
1016 memset(&qidp->path, 0, sizeof(qidp->path)); in stat_to_qid()
1017 size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path)); in stat_to_qid()
1018 memcpy(&qidp->path, &stbuf->st_ino, size); in stat_to_qid()
1021 qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8); in stat_to_qid()
1022 qidp->type = 0; in stat_to_qid()
1023 if (S_ISDIR(stbuf->st_mode)) { in stat_to_qid()
1024 qidp->type |= P9_QID_TYPE_DIR; in stat_to_qid()
1026 if (S_ISLNK(stbuf->st_mode)) { in stat_to_qid()
1027 qidp->type |= P9_QID_TYPE_SYMLINK; in stat_to_qid()
1030 return 0; in stat_to_qid()
1037 if (!QLIST_EMPTY(&s->free_list)) { in pdu_alloc()
1038 pdu = QLIST_FIRST(&s->free_list); in pdu_alloc()
1040 QLIST_INSERT_HEAD(&s->active_list, pdu, next); in pdu_alloc()
1047 V9fsState *s = pdu->s; in pdu_free()
1049 g_assert(!pdu->cancelled); in pdu_free()
1051 QLIST_INSERT_HEAD(&s->free_list, pdu, next); in pdu_free()
1056 int8_t id = pdu->id + 1; /* Response */ in pdu_complete()
1057 V9fsState *s = pdu->s; in pdu_complete()
1061 * The 9p spec requires that successfully cancelled pdus receive no reply. in pdu_complete()
1062 * Sending a reply would confuse clients because they would in pdu_complete()
1064 * rather than a consequence of the cancellation. However, if in pdu_complete()
1068 * that assumes passing a non-error here will mean a successful in pdu_complete()
1071 bool discard = pdu->cancelled && len == -EINTR; in pdu_complete()
1073 trace_v9fs_rcancel(pdu->tag, pdu->id); in pdu_complete()
1074 pdu->size = 0; in pdu_complete()
1078 if (len < 0) { in pdu_complete()
1079 int err = -len; in pdu_complete()
1082 if (s->proto_version != V9FS_PROTO_2000L) { in pdu_complete()
1089 if (ret < 0) { in pdu_complete()
1099 if (ret < 0) { in pdu_complete()
1104 if (s->proto_version == V9FS_PROTO_2000L) { in pdu_complete()
1107 trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */ in pdu_complete()
1111 if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) { in pdu_complete()
1116 pdu->size = len; in pdu_complete()
1117 pdu->id = id; in pdu_complete()
1120 pdu->s->transport->push_and_notify(pdu); in pdu_complete()
1123 if (!qemu_co_queue_next(&pdu->complete)) { in pdu_complete()
1147 if (extension->size && extension->data[0] == 'c') { in v9mode_to_mode()
1173 if (stat->type == -1 && in donttouch_stat()
1174 stat->dev == -1 && in donttouch_stat()
1175 stat->qid.type == 0xff && in donttouch_stat()
1176 stat->qid.version == (uint32_t) -1 && in donttouch_stat()
1177 stat->qid.path == (uint64_t) -1 && in donttouch_stat()
1178 stat->mode == -1 && in donttouch_stat()
1179 stat->atime == -1 && in donttouch_stat()
1180 stat->mtime == -1 && in donttouch_stat()
1181 stat->length == -1 && in donttouch_stat()
1182 !stat->name.size && in donttouch_stat()
1183 !stat->uid.size && in donttouch_stat()
1184 !stat->gid.size && in donttouch_stat()
1185 !stat->muid.size && in donttouch_stat()
1186 stat->n_uid == -1 && in donttouch_stat()
1187 stat->n_gid == -1 && in donttouch_stat()
1188 stat->n_muid == -1) { in donttouch_stat()
1192 return 0; in donttouch_stat()
1197 v9fs_string_init(&stat->name); in v9fs_stat_init()
1198 v9fs_string_init(&stat->uid); in v9fs_stat_init()
1199 v9fs_string_init(&stat->gid); in v9fs_stat_init()
1200 v9fs_string_init(&stat->muid); in v9fs_stat_init()
1201 v9fs_string_init(&stat->extension); in v9fs_stat_init()
1206 v9fs_string_free(&stat->name); in v9fs_stat_free()
1207 v9fs_string_free(&stat->uid); in v9fs_stat_free()
1208 v9fs_string_free(&stat->gid); in v9fs_stat_free()
1209 v9fs_string_free(&stat->muid); in v9fs_stat_free()
1210 v9fs_string_free(&stat->extension); in v9fs_stat_free()
1217 mode = stbuf->st_mode & 0777; in stat_to_v9mode()
1218 if (S_ISDIR(stbuf->st_mode)) { in stat_to_v9mode()
1222 if (S_ISLNK(stbuf->st_mode)) { in stat_to_v9mode()
1226 if (S_ISSOCK(stbuf->st_mode)) { in stat_to_v9mode()
1230 if (S_ISFIFO(stbuf->st_mode)) { in stat_to_v9mode()
1234 if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) { in stat_to_v9mode()
1238 if (stbuf->st_mode & S_ISUID) { in stat_to_v9mode()
1242 if (stbuf->st_mode & S_ISGID) { in stat_to_v9mode()
1246 if (stbuf->st_mode & S_ISVTX) { in stat_to_v9mode()
1260 memset(v9stat, 0, sizeof(*v9stat)); in stat_to_v9stat()
1262 err = stat_to_qid(pdu, stbuf, &v9stat->qid); in stat_to_v9stat()
1263 if (err < 0) { in stat_to_v9stat()
1266 v9stat->mode = stat_to_v9mode(stbuf); in stat_to_v9stat()
1267 v9stat->atime = stbuf->st_atime; in stat_to_v9stat()
1268 v9stat->mtime = stbuf->st_mtime; in stat_to_v9stat()
1269 v9stat->length = stbuf->st_size; in stat_to_v9stat()
1271 v9fs_string_free(&v9stat->uid); in stat_to_v9stat()
1272 v9fs_string_free(&v9stat->gid); in stat_to_v9stat()
1273 v9fs_string_free(&v9stat->muid); in stat_to_v9stat()
1275 v9stat->n_uid = stbuf->st_uid; in stat_to_v9stat()
1276 v9stat->n_gid = stbuf->st_gid; in stat_to_v9stat()
1277 v9stat->n_muid = 0; in stat_to_v9stat()
1279 v9fs_string_free(&v9stat->extension); in stat_to_v9stat()
1281 if (v9stat->mode & P9_STAT_MODE_SYMLINK) { in stat_to_v9stat()
1282 err = v9fs_co_readlink(pdu, path, &v9stat->extension); in stat_to_v9stat()
1283 if (err < 0) { in stat_to_v9stat()
1286 } else if (v9stat->mode & P9_STAT_MODE_DEVICE) { in stat_to_v9stat()
1287 v9fs_string_sprintf(&v9stat->extension, "%c %u %u", in stat_to_v9stat()
1288 S_ISCHR(stbuf->st_mode) ? 'c' : 'b', in stat_to_v9stat()
1289 major(stbuf->st_rdev), minor(stbuf->st_rdev)); in stat_to_v9stat()
1290 } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) { in stat_to_v9stat()
1291 v9fs_string_sprintf(&v9stat->extension, "%s %lu", in stat_to_v9stat()
1292 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink); in stat_to_v9stat()
1295 v9fs_string_sprintf(&v9stat->name, "%s", basename); in stat_to_v9stat()
1297 v9stat->size = 61 + in stat_to_v9stat()
1298 v9fs_string_size(&v9stat->name) + in stat_to_v9stat()
1299 v9fs_string_size(&v9stat->uid) + in stat_to_v9stat()
1300 v9fs_string_size(&v9stat->gid) + in stat_to_v9stat()
1301 v9fs_string_size(&v9stat->muid) + in stat_to_v9stat()
1302 v9fs_string_size(&v9stat->extension); in stat_to_v9stat()
1303 return 0; in stat_to_v9stat()
1306 #define P9_STATS_MODE 0x00000001ULL
1307 #define P9_STATS_NLINK 0x00000002ULL
1308 #define P9_STATS_UID 0x00000004ULL
1309 #define P9_STATS_GID 0x00000008ULL
1310 #define P9_STATS_RDEV 0x00000010ULL
1311 #define P9_STATS_ATIME 0x00000020ULL
1312 #define P9_STATS_MTIME 0x00000040ULL
1313 #define P9_STATS_CTIME 0x00000080ULL
1314 #define P9_STATS_INO 0x00000100ULL
1315 #define P9_STATS_SIZE 0x00000200ULL
1316 #define P9_STATS_BLOCKS 0x00000400ULL
1318 #define P9_STATS_BTIME 0x00000800ULL
1319 #define P9_STATS_GEN 0x00001000ULL
1320 #define P9_STATS_DATA_VERSION 0x00002000ULL
1322 #define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */
1323 #define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */
1327 * blksize_to_iounit() - Block size exposed to 9p client.
1330 * @pdu: 9p client request
1334 * 9p client (guest OS side). The value returned suggests an "optimum" block
1335 * size for 9p I/O, i.e. to maximize performance.
1339 int32_t iounit = 0; in blksize_to_iounit()
1340 V9fsState *s = pdu->s; in blksize_to_iounit()
1344 * as well as less than (client msize - P9_IOHDRSZ) in blksize_to_iounit()
1347 iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize); in blksize_to_iounit()
1350 iounit = s->msize - P9_IOHDRSZ; in blksize_to_iounit()
1357 return blksize_to_iounit(pdu, stbuf->st_blksize); in stat_to_iounit()
1363 memset(v9lstat, 0, sizeof(*v9lstat)); in stat_to_v9stat_dotl()
1365 v9lstat->st_mode = stbuf->st_mode; in stat_to_v9stat_dotl()
1366 v9lstat->st_nlink = stbuf->st_nlink; in stat_to_v9stat_dotl()
1367 v9lstat->st_uid = stbuf->st_uid; in stat_to_v9stat_dotl()
1368 v9lstat->st_gid = stbuf->st_gid; in stat_to_v9stat_dotl()
1369 v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev); in stat_to_v9stat_dotl()
1370 v9lstat->st_size = stbuf->st_size; in stat_to_v9stat_dotl()
1371 v9lstat->st_blksize = stat_to_iounit(pdu, stbuf); in stat_to_v9stat_dotl()
1372 v9lstat->st_blocks = stbuf->st_blocks; in stat_to_v9stat_dotl()
1373 v9lstat->st_atime_sec = stbuf->st_atime; in stat_to_v9stat_dotl()
1374 v9lstat->st_mtime_sec = stbuf->st_mtime; in stat_to_v9stat_dotl()
1375 v9lstat->st_ctime_sec = stbuf->st_ctime; in stat_to_v9stat_dotl()
1377 v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec; in stat_to_v9stat_dotl()
1378 v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec; in stat_to_v9stat_dotl()
1379 v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec; in stat_to_v9stat_dotl()
1381 v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; in stat_to_v9stat_dotl()
1382 v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec; in stat_to_v9stat_dotl()
1383 v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec; in stat_to_v9stat_dotl()
1386 v9lstat->st_result_mask = P9_STATS_BASIC; in stat_to_v9stat_dotl()
1388 return stat_to_qid(pdu, stbuf, &v9lstat->qid); in stat_to_v9stat_dotl()
1396 for (i = 0; i < cnt; i++) { in print_sg()
1411 v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len); in v9fs_fix_path()
1417 return ctx->export_flags & V9FS_RDONLY; in is_ro_export()
1424 V9fsState *s = pdu->s; in v9fs_version()
1429 err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version); in v9fs_version()
1430 if (err < 0) { in v9fs_version()
1433 trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data); in v9fs_version()
1437 if (!strcmp(version.data, "9P2000.u")) { in v9fs_version()
1438 s->proto_version = V9FS_PROTO_2000U; in v9fs_version()
1439 } else if (!strcmp(version.data, "9P2000.L")) { in v9fs_version()
1440 s->proto_version = V9FS_PROTO_2000L; in v9fs_version()
1447 if (s->msize < P9_MIN_MSIZE) { in v9fs_version()
1448 err = -EMSGSIZE; in v9fs_version()
1450 "9pfs: Client requested msize < minimum msize (" in v9fs_version()
1457 if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) { in v9fs_version()
1459 "9p: degraded performance: a reasonable high msize should be " in v9fs_version()
1461 "https://wiki.qemu.org/Documentation/9psetup#msize for details." in v9fs_version()
1466 err = pdu_marshal(pdu, offset, "ds", s->msize, &version); in v9fs_version()
1467 if (err < 0) { in v9fs_version()
1471 trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data); in v9fs_version()
1480 V9fsState *s = pdu->s; in v9fs_attach()
1493 if (err < 0) { in v9fs_attach()
1496 trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data); in v9fs_attach()
1500 err = -EINVAL; in v9fs_attach()
1503 fidp->uid = n_uname; in v9fs_attach()
1504 err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path); in v9fs_attach()
1505 if (err < 0) { in v9fs_attach()
1506 err = -EINVAL; in v9fs_attach()
1510 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_attach()
1511 if (err < 0) { in v9fs_attach()
1512 err = -EINVAL; in v9fs_attach()
1517 if (err < 0) { in v9fs_attach()
1518 err = -EINVAL; in v9fs_attach()
1527 if (!s->migration_blocker) { in v9fs_attach()
1528 error_setg(&s->migration_blocker, in v9fs_attach()
1530 s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag); in v9fs_attach()
1531 err = migrate_add_blocker(&s->migration_blocker, NULL); in v9fs_attach()
1532 if (err < 0) { in v9fs_attach()
1536 s->root_fid = fid; in v9fs_attach()
1540 if (err < 0) { in v9fs_attach()
1546 memcpy(&s->root_st, &stbuf, sizeof(stbuf)); in v9fs_attach()
1547 trace_v9fs_attach_return(pdu->tag, pdu->id, in v9fs_attach()
1561 ssize_t err = 0; in v9fs_stat()
1569 if (err < 0) { in v9fs_stat()
1572 trace_v9fs_stat(pdu->tag, pdu->id, fid); in v9fs_stat()
1576 err = -ENOENT; in v9fs_stat()
1579 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_stat()
1580 if (err < 0) { in v9fs_stat()
1583 basename = g_path_get_basename(fidp->path.data); in v9fs_stat()
1584 err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat); in v9fs_stat()
1586 if (err < 0) { in v9fs_stat()
1589 err = pdu_marshal(pdu, offset, "wS", 0, &v9stat); in v9fs_stat()
1590 if (err < 0) { in v9fs_stat()
1594 trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, in v9fs_stat()
1606 return s->ops->has_valid_file_handle(fidp->fid_type, &fidp->fs); in fid_has_valid_file_handle()
1613 ssize_t retval = 0; in v9fs_getattr()
1621 if (retval < 0) { in v9fs_getattr()
1624 trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask); in v9fs_getattr()
1628 retval = -ENOENT; in v9fs_getattr()
1631 if (fid_has_valid_file_handle(pdu->s, fidp)) { in v9fs_getattr()
1634 retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_getattr()
1636 if (retval < 0) { in v9fs_getattr()
1640 if (retval < 0) { in v9fs_getattr()
1646 retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl); in v9fs_getattr()
1648 case 0: in v9fs_getattr()
1652 case -EINTR: in v9fs_getattr()
1660 retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl); in v9fs_getattr()
1661 if (retval < 0) { in v9fs_getattr()
1665 trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask, in v9fs_getattr()
1675 #define P9_ATTR_MODE (1 << 0)
1689 int err = 0; in v9fs_setattr()
1697 if (err < 0) { in v9fs_setattr()
1701 trace_v9fs_setattr(pdu->tag, pdu->id, fid, in v9fs_setattr()
1707 err = -EINVAL; in v9fs_setattr()
1711 err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode); in v9fs_setattr()
1712 if (err < 0) { in v9fs_setattr()
1720 times[0].tv_sec = v9iattr.atime_sec; in v9fs_setattr()
1721 times[0].tv_nsec = v9iattr.atime_nsec; in v9fs_setattr()
1723 times[0].tv_nsec = UTIME_NOW; in v9fs_setattr()
1726 times[0].tv_nsec = UTIME_OMIT; in v9fs_setattr()
1738 if (fid_has_valid_file_handle(pdu->s, fidp)) { in v9fs_setattr()
1741 err = v9fs_co_utimensat(pdu, &fidp->path, times); in v9fs_setattr()
1743 if (err < 0) { in v9fs_setattr()
1749 * chown(-1,-1) to update the ctime of the file in v9fs_setattr()
1755 v9iattr.uid = -1; in v9fs_setattr()
1758 v9iattr.gid = -1; in v9fs_setattr()
1760 err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid, in v9fs_setattr()
1762 if (err < 0) { in v9fs_setattr()
1767 if (fid_has_valid_file_handle(pdu->s, fidp)) { in v9fs_setattr()
1770 err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size); in v9fs_setattr()
1772 if (err < 0) { in v9fs_setattr()
1777 trace_v9fs_setattr_return(pdu->tag, pdu->id); in v9fs_setattr()
1791 if (err < 0) { in v9fs_walk_marshal()
1795 for (i = 0; i < nwnames; i++) { in v9fs_walk_marshal()
1797 if (err < 0) { in v9fs_walk_marshal()
1810 static bool same_stat_id(const struct stat *a, const struct stat *b) in same_stat_id() argument
1812 return a->st_dev == b->st_dev && a->st_ino == b->st_ino; in same_stat_id()
1816 * Returns a (newly allocated) comma-separated string presentation of the
1824 for (size_t i = 0; i < nwnames; ++i) { in trace_v9fs_walk_wnames()
1834 int i, err = 0, any_err = 0; in v9fs_walk()
1847 V9fsState *s = pdu->s; in v9fs_walk()
1851 if (err < 0) { in v9fs_walk()
1858 err = -EINVAL; in v9fs_walk()
1866 for (i = 0; i < nwnames; i++) { in v9fs_walk()
1868 if (err < 0) { in v9fs_walk()
1872 err = -ENOENT; in v9fs_walk()
1879 trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, in v9fs_walk()
1883 trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, ""); in v9fs_walk()
1888 err = -ENOENT; in v9fs_walk()
1896 * Needed to handle request with nwnames == 0 in v9fs_walk()
1898 v9fs_path_copy(&dpath, &fidp->path); in v9fs_walk()
1899 v9fs_path_copy(&path, &fidp->path); in v9fs_walk()
1907 nwalked = 0; in v9fs_walk()
1909 any_err |= err = -EINTR; in v9fs_walk()
1912 err = s->ops->lstat(&s->ctx, &dpath, &fidst); in v9fs_walk()
1913 if (err < 0) { in v9fs_walk()
1914 any_err |= err = -errno; in v9fs_walk()
1920 any_err |= err = -EINTR; in v9fs_walk()
1923 if (!same_stat_id(&pdu->s->root_st, &stbuf) || in v9fs_walk()
1926 err = s->ops->name_to_path(&s->ctx, &dpath, in v9fs_walk()
1929 if (err < 0) { in v9fs_walk()
1930 any_err |= err = -errno; in v9fs_walk()
1934 any_err |= err = -EINTR; in v9fs_walk()
1937 err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf); in v9fs_walk()
1938 if (err < 0) { in v9fs_walk()
1939 any_err |= err = -errno; in v9fs_walk()
1950 * NOTE: -EINTR is an exception where we deviate from the protocol spec in v9fs_walk()
1951 * and simply send a (R)Lerror response instead of bothering to assemble in v9fs_walk()
1952 * a (deducted) Rwalk response; because -EINTR is always the result of a in v9fs_walk()
1953 * Tflush request, so client would no longer wait for a response in this in v9fs_walk()
1956 if ((err < 0 && !nwalked) || err == -EINTR) { in v9fs_walk()
1961 if (err < 0 && !nwalked) { in v9fs_walk()
1967 v9fs_path_copy(&dpath, &fidp->path); in v9fs_walk()
1968 v9fs_path_copy(&path, &fidp->path); in v9fs_walk()
1970 for (name_idx = 0; name_idx < nwalked; name_idx++) { in v9fs_walk()
1971 if (!same_stat_id(&pdu->s->root_st, &stbuf) || in v9fs_walk()
1976 if (err < 0) { in v9fs_walk()
1984 if (any_err < 0) { in v9fs_walk()
1994 if (fidp->fid_type != P9_FID_NONE) { in v9fs_walk()
1995 err = -EINVAL; in v9fs_walk()
1999 v9fs_path_copy(&fidp->path, &path); in v9fs_walk()
2004 err = -EINVAL; in v9fs_walk()
2007 newfidp->uid = fidp->uid; in v9fs_walk()
2008 v9fs_path_copy(&newfidp->path, &path); in v9fs_walk()
2012 trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids); in v9fs_walk()
2022 trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "<?>"); in v9fs_walk()
2033 return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0); in get_iounit()
2042 int iounit = 0; in v9fs_open()
2043 ssize_t err = 0; in v9fs_open()
2048 V9fsState *s = pdu->s; in v9fs_open()
2051 if (s->proto_version == V9FS_PROTO_2000L) { in v9fs_open()
2058 if (err < 0) { in v9fs_open()
2063 (s->proto_version == V9FS_PROTO_2000L) ? in v9fs_open()
2066 trace_v9fs_open(pdu->tag, pdu->id, fid, mode, trace_oflags); in v9fs_open()
2071 err = -ENOENT; in v9fs_open()
2074 if (fidp->fid_type != P9_FID_NONE) { in v9fs_open()
2075 err = -EINVAL; in v9fs_open()
2079 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_open()
2080 if (err < 0) { in v9fs_open()
2084 if (err < 0) { in v9fs_open()
2089 if (err < 0) { in v9fs_open()
2092 fidp->fid_type = P9_FID_DIR; in v9fs_open()
2093 err = pdu_marshal(pdu, offset, "Qd", &qid, 0); in v9fs_open()
2094 if (err < 0) { in v9fs_open()
2099 if (s->proto_version == V9FS_PROTO_2000L) { in v9fs_open()
2104 if (is_ro_export(&s->ctx)) { in v9fs_open()
2107 err = -EROFS; in v9fs_open()
2112 if (err < 0) { in v9fs_open()
2115 fidp->fid_type = P9_FID_FILE; in v9fs_open()
2116 fidp->open_flags = flags; in v9fs_open()
2122 fidp->flags |= FID_NON_RECLAIMABLE; in v9fs_open()
2124 iounit = get_iounit(pdu, &fidp->path); in v9fs_open()
2126 if (err < 0) { in v9fs_open()
2131 trace_v9fs_open_return(pdu->tag, pdu->id, in v9fs_open()
2143 ssize_t err = 0; in v9fs_lcreate()
2155 if (err < 0) { in v9fs_lcreate()
2158 trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid); in v9fs_lcreate()
2161 err = -ENOENT; in v9fs_lcreate()
2166 err = -EEXIST; in v9fs_lcreate()
2172 err = -ENOENT; in v9fs_lcreate()
2175 if (fidp->fid_type != P9_FID_NONE) { in v9fs_lcreate()
2176 err = -EINVAL; in v9fs_lcreate()
2180 flags = get_dotl_openflags(pdu->s, flags); in v9fs_lcreate()
2183 if (err < 0) { in v9fs_lcreate()
2186 fidp->fid_type = P9_FID_FILE; in v9fs_lcreate()
2187 fidp->open_flags = flags; in v9fs_lcreate()
2193 fidp->flags |= FID_NON_RECLAIMABLE; in v9fs_lcreate()
2195 iounit = get_iounit(pdu, &fidp->path); in v9fs_lcreate()
2197 if (err < 0) { in v9fs_lcreate()
2201 if (err < 0) { in v9fs_lcreate()
2205 trace_v9fs_lcreate_return(pdu->tag, pdu->id, in v9fs_lcreate()
2224 if (err < 0) { in v9fs_fsync()
2227 trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync); in v9fs_fsync()
2231 err = -ENOENT; in v9fs_fsync()
2250 V9fsState *s = pdu->s; in v9fs_clunk()
2253 if (err < 0) { in v9fs_clunk()
2256 trace_v9fs_clunk(pdu->tag, pdu->id, fid); in v9fs_clunk()
2260 err = -ENOENT; in v9fs_clunk()
2267 fidp->ref++; in v9fs_clunk()
2277 * Create a QEMUIOVector for a sub-region of PDU iovecs
2282 * @is_write: true - write, false - read
2284 * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
2296 pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip); in v9fs_init_qiov_from_pdu()
2298 pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip); in v9fs_init_qiov_from_pdu()
2314 if (fidp->fs.xattr.len < off) { in v9fs_xattr_read()
2315 read_count = 0; in v9fs_xattr_read()
2317 read_count = fidp->fs.xattr.len - off; in v9fs_xattr_read()
2323 if (err < 0) { in v9fs_xattr_read()
2329 err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0, in v9fs_xattr_read()
2330 ((char *)fidp->fs.xattr.value) + off, in v9fs_xattr_read()
2333 if (err < 0) { in v9fs_xattr_read()
2346 int len, err = 0; in v9fs_do_readdir_with_stat()
2347 int32_t count = 0; in v9fs_do_readdir_with_stat()
2354 if (saved_dir_pos < 0) { in v9fs_do_readdir_with_stat()
2361 v9fs_readdir_lock(&fidp->fs.dir); in v9fs_do_readdir_with_stat()
2367 err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path); in v9fs_do_readdir_with_stat()
2368 if (err < 0) { in v9fs_do_readdir_with_stat()
2372 if (err < 0) { in v9fs_do_readdir_with_stat()
2375 err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat); in v9fs_do_readdir_with_stat()
2376 if (err < 0) { in v9fs_do_readdir_with_stat()
2380 v9fs_readdir_unlock(&fidp->fs.dir); in v9fs_do_readdir_with_stat()
2392 v9fs_readdir_unlock(&fidp->fs.dir); in v9fs_do_readdir_with_stat()
2394 if (len < 0) { in v9fs_do_readdir_with_stat()
2406 v9fs_readdir_unlock(&fidp->fs.dir); in v9fs_do_readdir_with_stat()
2409 if (err < 0) { in v9fs_do_readdir_with_stat()
2419 ssize_t err = 0; in v9fs_read()
2420 int32_t count = 0; in v9fs_read()
2425 V9fsState *s = pdu->s; in v9fs_read()
2428 if (err < 0) { in v9fs_read()
2431 trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count); in v9fs_read()
2435 err = -EINVAL; in v9fs_read()
2438 if (fidp->fid_type == P9_FID_DIR) { in v9fs_read()
2439 if (s->proto_version != V9FS_PROTO_2000U) { in v9fs_read()
2441 "9p: bad client: T_read request on directory only expected " in v9fs_read()
2442 "with 9P2000.u protocol version" in v9fs_read()
2444 err = -EOPNOTSUPP; in v9fs_read()
2447 if (off == 0) { in v9fs_read()
2451 if (count < 0) { in v9fs_read()
2456 if (err < 0) { in v9fs_read()
2460 } else if (fidp->fid_type == P9_FID_FILE) { in v9fs_read()
2469 qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count); in v9fs_read()
2470 if (0) { in v9fs_read()
2476 if (len >= 0) { in v9fs_read()
2480 } while (len == -EINTR && !pdu->cancelled); in v9fs_read()
2481 if (len < 0) { in v9fs_read()
2486 } while (count < max_count && len > 0); in v9fs_read()
2488 if (err < 0) { in v9fs_read()
2495 } else if (fidp->fid_type == P9_FID_XATTR) { in v9fs_read()
2498 err = -EINVAL; in v9fs_read()
2500 trace_v9fs_read_return(pdu->tag, pdu->id, count, err); in v9fs_read()
2508 * v9fs_readdir_response_size() - Returns size required in Rreaddir response
2528 next = e->next; in v9fs_free_dirents()
2529 g_free(e->dent); in v9fs_free_dirents()
2530 g_free(e->st); in v9fs_free_dirents()
2541 int len, err = 0; in v9fs_do_readdir()
2542 int32_t count = 0; in v9fs_do_readdir()
2551 * enabled we have to make a full stat for each directory entry in v9fs_do_readdir()
2553 const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES; in v9fs_do_readdir()
2556 * Fetch all required directory entries altogether on a background IO in v9fs_do_readdir()
2563 if (count < 0) { in v9fs_do_readdir()
2565 count = 0; in v9fs_do_readdir()
2568 count = 0; in v9fs_do_readdir()
2570 for (struct V9fsDirEnt *e = entries; e; e = e->next) { in v9fs_do_readdir()
2571 dent = e->dent; in v9fs_do_readdir()
2573 if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) { in v9fs_do_readdir()
2574 st = e->st; in v9fs_do_readdir()
2575 /* e->st should never be NULL, but just to be sure */ in v9fs_do_readdir()
2577 err = -1; in v9fs_do_readdir()
2583 if (err < 0) { in v9fs_do_readdir()
2592 * is that no multi-device export detection of stat_to_qid() in v9fs_do_readdir()
2597 size = MIN(sizeof(dent->d_ino), sizeof(qid.path)); in v9fs_do_readdir()
2598 memcpy(&qid.path, &dent->d_ino, size); in v9fs_do_readdir()
2600 qid.type = 0; in v9fs_do_readdir()
2601 qid.version = 0; in v9fs_do_readdir()
2606 v9fs_string_sprintf(&name, "%s", dent->d_name); in v9fs_do_readdir()
2611 dent->d_type, &name); in v9fs_do_readdir()
2615 if (len < 0) { in v9fs_do_readdir()
2625 if (err < 0) { in v9fs_do_readdir()
2635 ssize_t retval = 0; in v9fs_readdir()
2641 V9fsState *s = pdu->s; in v9fs_readdir()
2645 if (retval < 0) { in v9fs_readdir()
2648 trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count); in v9fs_readdir()
2650 /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */ in v9fs_readdir()
2651 if (max_count > s->msize - 11) { in v9fs_readdir()
2652 max_count = s->msize - 11; in v9fs_readdir()
2654 "9p: bad client: T_readdir with count > msize - 11" in v9fs_readdir()
2660 retval = -EINVAL; in v9fs_readdir()
2663 if (fidp->fid_type != P9_FID_DIR) { in v9fs_readdir()
2664 warn_report_once("9p: bad client: T_readdir on non-directory stream"); in v9fs_readdir()
2665 retval = -ENOTDIR; in v9fs_readdir()
2668 if (!fidp->fs.dir.stream) { in v9fs_readdir()
2669 retval = -EINVAL; in v9fs_readdir()
2672 if (s->proto_version != V9FS_PROTO_2000L) { in v9fs_readdir()
2674 "9p: bad client: T_readdir request only expected with 9P2000.L " in v9fs_readdir()
2677 retval = -EOPNOTSUPP; in v9fs_readdir()
2681 if (count < 0) { in v9fs_readdir()
2686 if (retval < 0) { in v9fs_readdir()
2690 trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); in v9fs_readdir()
2702 ssize_t err = 0; in v9fs_xattr_write()
2707 if (fidp->fs.xattr.len < off) { in v9fs_xattr_write()
2708 return -ENOSPC; in v9fs_xattr_write()
2710 write_count = fidp->fs.xattr.len - off; in v9fs_xattr_write()
2715 if (err < 0) { in v9fs_xattr_write()
2719 fidp->fs.xattr.copied_len += write_count; in v9fs_xattr_write()
2723 for (i = 0; i < cnt; i++) { in v9fs_xattr_write()
2729 memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy); in v9fs_xattr_write()
2730 /* updating vs->off since we are not using below */ in v9fs_xattr_write()
2732 write_count -= to_copy; in v9fs_xattr_write()
2744 int32_t len = 0; in v9fs_write()
2745 int32_t total = 0; in v9fs_write()
2749 V9fsState *s = pdu->s; in v9fs_write()
2754 if (err < 0) { in v9fs_write()
2760 trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov); in v9fs_write()
2764 err = -EINVAL; in v9fs_write()
2767 if (fidp->fid_type == P9_FID_FILE) { in v9fs_write()
2768 if (fidp->fs.fd == -1) { in v9fs_write()
2769 err = -EINVAL; in v9fs_write()
2772 } else if (fidp->fid_type == P9_FID_XATTR) { in v9fs_write()
2780 err = -EINVAL; in v9fs_write()
2786 qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total); in v9fs_write()
2787 if (0) { in v9fs_write()
2793 if (len >= 0) { in v9fs_write()
2797 } while (len == -EINTR && !pdu->cancelled); in v9fs_write()
2798 if (len < 0) { in v9fs_write()
2803 } while (total < count && len > 0); in v9fs_write()
2807 if (err < 0) { in v9fs_write()
2811 trace_v9fs_write_return(pdu->tag, pdu->id, total, err); in v9fs_write()
2824 int err = 0; in v9fs_create()
2836 V9fsState *s = pdu->s; in v9fs_create()
2843 if (err < 0) { in v9fs_create()
2846 trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode); in v9fs_create()
2849 err = -ENOENT; in v9fs_create()
2854 err = -EEXIST; in v9fs_create()
2860 err = -EINVAL; in v9fs_create()
2863 if (fidp->fid_type != P9_FID_NONE) { in v9fs_create()
2864 err = -EINVAL; in v9fs_create()
2869 fidp->uid, -1, &stbuf); in v9fs_create()
2870 if (err < 0) { in v9fs_create()
2873 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2874 if (err < 0) { in v9fs_create()
2878 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2881 if (err < 0) { in v9fs_create()
2884 fidp->fid_type = P9_FID_DIR; in v9fs_create()
2887 extension.data, -1 , &stbuf); in v9fs_create()
2888 if (err < 0) { in v9fs_create()
2891 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2892 if (err < 0) { in v9fs_create()
2896 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2902 err = -EINVAL; in v9fs_create()
2907 if (err < 0) { in v9fs_create()
2910 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2911 if (err < 0) { in v9fs_create()
2912 fidp->fid_type = P9_FID_NONE; in v9fs_create()
2916 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2918 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_create()
2919 if (err < 0) { in v9fs_create()
2920 fidp->fid_type = P9_FID_NONE; in v9fs_create()
2926 mode_t nmode = 0; in v9fs_create()
2929 err = -errno; in v9fs_create()
2941 err = -EIO; in v9fs_create()
2946 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, in v9fs_create()
2948 if (err < 0) { in v9fs_create()
2951 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2952 if (err < 0) { in v9fs_create()
2956 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2959 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, in v9fs_create()
2960 0, S_IFIFO | (perm & 0777), &stbuf); in v9fs_create()
2961 if (err < 0) { in v9fs_create()
2964 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2965 if (err < 0) { in v9fs_create()
2969 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2972 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, in v9fs_create()
2973 0, S_IFSOCK | (perm & 0777), &stbuf); in v9fs_create()
2974 if (err < 0) { in v9fs_create()
2977 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); in v9fs_create()
2978 if (err < 0) { in v9fs_create()
2982 v9fs_path_copy(&fidp->path, &path); in v9fs_create()
2985 err = v9fs_co_open2(pdu, fidp, &name, -1, in v9fs_create()
2987 if (err < 0) { in v9fs_create()
2990 fidp->fid_type = P9_FID_FILE; in v9fs_create()
2991 fidp->open_flags = omode_to_uflags(mode); in v9fs_create()
2992 if (fidp->open_flags & O_EXCL) { in v9fs_create()
2997 fidp->flags |= FID_NON_RECLAIMABLE; in v9fs_create()
3000 iounit = get_iounit(pdu, &fidp->path); in v9fs_create()
3002 if (err < 0) { in v9fs_create()
3006 if (err < 0) { in v9fs_create()
3010 trace_v9fs_create_return(pdu->tag, pdu->id, in v9fs_create()
3030 int err = 0; in v9fs_symlink()
3037 if (err < 0) { in v9fs_symlink()
3040 trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid); in v9fs_symlink()
3043 err = -ENOENT; in v9fs_symlink()
3048 err = -EEXIST; in v9fs_symlink()
3054 err = -EINVAL; in v9fs_symlink()
3058 if (err < 0) { in v9fs_symlink()
3062 if (err < 0) { in v9fs_symlink()
3066 if (err < 0) { in v9fs_symlink()
3070 trace_v9fs_symlink_return(pdu->tag, pdu->id, in v9fs_symlink()
3087 V9fsState *s = pdu->s; in v9fs_flush()
3090 if (err < 0) { in v9fs_flush()
3094 trace_v9fs_flush(pdu->tag, pdu->id, tag); in v9fs_flush()
3096 if (pdu->tag == tag) { in v9fs_flush()
3097 warn_report("the guest sent a self-referencing 9P flush request"); in v9fs_flush()
3099 QLIST_FOREACH(cancel_pdu, &s->active_list, next) { in v9fs_flush()
3100 if (cancel_pdu->tag == tag) { in v9fs_flush()
3106 cancel_pdu->cancelled = 1; in v9fs_flush()
3110 qemu_co_queue_wait(&cancel_pdu->complete, NULL); in v9fs_flush()
3111 if (!qemu_co_queue_next(&cancel_pdu->complete)) { in v9fs_flush()
3112 cancel_pdu->cancelled = 0; in v9fs_flush()
3126 int err = 0; in v9fs_link()
3130 if (err < 0) { in v9fs_link()
3133 trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data); in v9fs_link()
3136 err = -ENOENT; in v9fs_link()
3141 err = -EEXIST; in v9fs_link()
3147 err = -ENOENT; in v9fs_link()
3153 err = -ENOENT; in v9fs_link()
3172 int err = 0; in v9fs_remove()
3178 if (err < 0) { in v9fs_remove()
3181 trace_v9fs_remove(pdu->tag, pdu->id, fid); in v9fs_remove()
3185 err = -EINVAL; in v9fs_remove()
3189 if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { in v9fs_remove()
3190 err = -EOPNOTSUPP; in v9fs_remove()
3197 err = v9fs_mark_fids_unreclaim(pdu, &fidp->path); in v9fs_remove()
3198 if (err < 0) { in v9fs_remove()
3201 err = v9fs_co_remove(pdu, &fidp->path); in v9fs_remove()
3207 clunk_fid(pdu->s, fidp->fid); in v9fs_remove()
3215 int err = 0; in v9fs_unlinkat()
3217 int32_t dfid, flags, rflags = 0; in v9fs_unlinkat()
3225 if (err < 0) { in v9fs_unlinkat()
3230 err = -ENOENT; in v9fs_unlinkat()
3235 err = -EINVAL; in v9fs_unlinkat()
3240 err = -ENOTEMPTY; in v9fs_unlinkat()
3245 err = -EINVAL; in v9fs_unlinkat()
3255 err = -EINVAL; in v9fs_unlinkat()
3263 err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path); in v9fs_unlinkat()
3264 if (err < 0) { in v9fs_unlinkat()
3268 if (err < 0) { in v9fs_unlinkat()
3271 err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags); in v9fs_unlinkat()
3289 int err = 0; in v9fs_complete_rename()
3292 V9fsState *s = pdu->s; in v9fs_complete_rename()
3298 if (newdirfid != -1) { in v9fs_complete_rename()
3301 return -ENOENT; in v9fs_complete_rename()
3303 if (fidp->fid_type != P9_FID_NONE) { in v9fs_complete_rename()
3304 err = -EINVAL; in v9fs_complete_rename()
3307 err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path); in v9fs_complete_rename()
3308 if (err < 0) { in v9fs_complete_rename()
3312 char *dir_name = g_path_get_dirname(fidp->path.data); in v9fs_complete_rename()
3319 err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path); in v9fs_complete_rename()
3321 if (err < 0) { in v9fs_complete_rename()
3325 err = v9fs_co_rename(pdu, &fidp->path, &new_path); in v9fs_complete_rename()
3326 if (err < 0) { in v9fs_complete_rename()
3334 g_hash_table_iter_init(&iter, s->fids); in v9fs_complete_rename()
3336 if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) { in v9fs_complete_rename()
3338 v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data)); in v9fs_complete_rename()
3353 ssize_t err = 0; in v9fs_rename()
3359 V9fsState *s = pdu->s; in v9fs_rename()
3363 if (err < 0) { in v9fs_rename()
3368 err = -ENOENT; in v9fs_rename()
3373 err = -EISDIR; in v9fs_rename()
3379 err = -ENOENT; in v9fs_rename()
3382 if (fidp->fid_type != P9_FID_NONE) { in v9fs_rename()
3383 err = -EINVAL; in v9fs_rename()
3387 if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { in v9fs_rename()
3388 err = -EOPNOTSUPP; in v9fs_rename()
3411 V9fsState *s = pdu->s; in v9fs_fix_fid_paths()
3418 err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath); in v9fs_fix_fid_paths()
3419 if (err < 0) { in v9fs_fix_fid_paths()
3422 err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath); in v9fs_fix_fid_paths()
3423 if (err < 0) { in v9fs_fix_fid_paths()
3431 g_hash_table_iter_init(&iter, s->fids); in v9fs_fix_fid_paths()
3433 if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) { in v9fs_fix_fid_paths()
3435 v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data)); in v9fs_fix_fid_paths()
3449 int err = 0; in v9fs_complete_renameat()
3450 V9fsState *s = pdu->s; in v9fs_complete_renameat()
3455 err = -ENOENT; in v9fs_complete_renameat()
3458 if (newdirfid != -1) { in v9fs_complete_renameat()
3461 err = -ENOENT; in v9fs_complete_renameat()
3468 err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name, in v9fs_complete_renameat()
3469 &newdirfidp->path, new_name); in v9fs_complete_renameat()
3470 if (err < 0) { in v9fs_complete_renameat()
3473 if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { in v9fs_complete_renameat()
3475 err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name, in v9fs_complete_renameat()
3476 &newdirfidp->path, new_name); in v9fs_complete_renameat()
3490 ssize_t err = 0; in v9fs_renameat()
3493 V9fsState *s = pdu->s; in v9fs_renameat()
3501 if (err < 0) { in v9fs_renameat()
3506 err = -ENOENT; in v9fs_renameat()
3512 err = -EISDIR; in v9fs_renameat()
3533 int err = 0; in v9fs_wstat()
3540 V9fsState *s = pdu->s; in v9fs_wstat()
3544 if (err < 0) { in v9fs_wstat()
3547 trace_v9fs_wstat(pdu->tag, pdu->id, fid, in v9fs_wstat()
3552 err = -EINVAL; in v9fs_wstat()
3557 err = v9fs_co_fsync(pdu, fidp, 0); in v9fs_wstat()
3560 if (v9stat.mode != -1) { in v9fs_wstat()
3562 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); in v9fs_wstat()
3563 if (err < 0) { in v9fs_wstat()
3570 err = -EIO; in v9fs_wstat()
3573 err = v9fs_co_chmod(pdu, &fidp->path, in v9fs_wstat()
3576 if (err < 0) { in v9fs_wstat()
3580 if (v9stat.mtime != -1 || v9stat.atime != -1) { in v9fs_wstat()
3582 if (v9stat.atime != -1) { in v9fs_wstat()
3583 times[0].tv_sec = v9stat.atime; in v9fs_wstat()
3584 times[0].tv_nsec = 0; in v9fs_wstat()
3586 times[0].tv_nsec = UTIME_OMIT; in v9fs_wstat()
3588 if (v9stat.mtime != -1) { in v9fs_wstat()
3590 times[1].tv_nsec = 0; in v9fs_wstat()
3594 err = v9fs_co_utimensat(pdu, &fidp->path, times); in v9fs_wstat()
3595 if (err < 0) { in v9fs_wstat()
3599 if (v9stat.n_gid != -1 || v9stat.n_uid != -1) { in v9fs_wstat()
3600 err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid); in v9fs_wstat()
3601 if (err < 0) { in v9fs_wstat()
3605 if (v9stat.name.size != 0) { in v9fs_wstat()
3607 err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name); in v9fs_wstat()
3609 if (err < 0) { in v9fs_wstat()
3613 if (v9stat.length != -1) { in v9fs_wstat()
3614 err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length); in v9fs_wstat()
3615 if (err < 0) { in v9fs_wstat()
3645 bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize; in v9fs_fill_statfs()
3649 f_type = stbuf->f_type; in v9fs_fill_statfs()
3650 f_bsize = stbuf->f_bsize; in v9fs_fill_statfs()
3657 f_blocks = stbuf->f_blocks / bsize_factor; in v9fs_fill_statfs()
3658 f_bfree = stbuf->f_bfree / bsize_factor; in v9fs_fill_statfs()
3659 f_bavail = stbuf->f_bavail / bsize_factor; in v9fs_fill_statfs()
3660 f_files = stbuf->f_files; in v9fs_fill_statfs()
3661 f_ffree = stbuf->f_ffree; in v9fs_fill_statfs()
3663 fsid_val = (unsigned int)stbuf->f_fsid.val[0] | in v9fs_fill_statfs()
3664 (unsigned long long)stbuf->f_fsid.val[1] << 32; in v9fs_fill_statfs()
3667 fsid_val = (unsigned int) stbuf->f_fsid.__val[0] | in v9fs_fill_statfs()
3668 (unsigned long long)stbuf->f_fsid.__val[1] << 32; in v9fs_fill_statfs()
3669 f_namelen = stbuf->f_namelen; in v9fs_fill_statfs()
3681 ssize_t retval = 0; in v9fs_statfs()
3686 V9fsState *s = pdu->s; in v9fs_statfs()
3689 if (retval < 0) { in v9fs_statfs()
3694 retval = -ENOENT; in v9fs_statfs()
3697 retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf); in v9fs_statfs()
3698 if (retval < 0) { in v9fs_statfs()
3702 if (retval < 0) { in v9fs_statfs()
3719 int err = 0; in v9fs_mknod()
3730 if (err < 0) { in v9fs_mknod()
3733 trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor); in v9fs_mknod()
3736 err = -ENOENT; in v9fs_mknod()
3741 err = -EEXIST; in v9fs_mknod()
3747 err = -ENOENT; in v9fs_mknod()
3750 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid, in v9fs_mknod()
3752 if (err < 0) { in v9fs_mknod()
3756 if (err < 0) { in v9fs_mknod()
3760 if (err < 0) { in v9fs_mknod()
3764 trace_v9fs_mknod_return(pdu->tag, pdu->id, in v9fs_mknod()
3775 * Server side handling of locking code is very simple, because 9p server in
3778 * do any thing in * qemu 9p server side lock code path.
3779 * So when a TLOCK request comes, always return success
3787 int32_t fid, err = 0; in v9fs_lock()
3794 if (err < 0) { in v9fs_lock()
3797 trace_v9fs_lock(pdu->tag, pdu->id, fid, in v9fs_lock()
3803 err = -EINVAL; in v9fs_lock()
3808 err = -ENOENT; in v9fs_lock()
3812 if (err < 0) { in v9fs_lock()
3816 if (err < 0) { in v9fs_lock()
3820 trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS); in v9fs_lock()
3829 * When a TGETLOCK request comes, always return success because all lock
3838 int32_t fid, err = 0; in v9fs_getlock()
3845 if (err < 0) { in v9fs_getlock()
3848 trace_v9fs_getlock(pdu->tag, pdu->id, fid, in v9fs_getlock()
3853 err = -ENOENT; in v9fs_getlock()
3857 if (err < 0) { in v9fs_getlock()
3864 if (err < 0) { in v9fs_getlock()
3868 trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start, in v9fs_getlock()
3888 int err = 0; in v9fs_mkdir()
3892 if (err < 0) { in v9fs_mkdir()
3895 trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid); in v9fs_mkdir()
3898 err = -ENOENT; in v9fs_mkdir()
3903 err = -EEXIST; in v9fs_mkdir()
3909 err = -ENOENT; in v9fs_mkdir()
3912 err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf); in v9fs_mkdir()
3913 if (err < 0) { in v9fs_mkdir()
3917 if (err < 0) { in v9fs_mkdir()
3921 if (err < 0) { in v9fs_mkdir()
3925 trace_v9fs_mkdir_return(pdu->tag, pdu->id, in v9fs_mkdir()
3938 ssize_t err = 0; in v9fs_xattrwalk()
3944 V9fsState *s = pdu->s; in v9fs_xattrwalk()
3948 if (err < 0) { in v9fs_xattrwalk()
3951 trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data); in v9fs_xattrwalk()
3955 err = -ENOENT; in v9fs_xattrwalk()
3960 err = -EINVAL; in v9fs_xattrwalk()
3963 v9fs_path_copy(&xattr_fidp->path, &file_fidp->path); in v9fs_xattrwalk()
3968 size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0); in v9fs_xattrwalk()
3969 if (size < 0) { in v9fs_xattrwalk()
3971 clunk_fid(s, xattr_fidp->fid); in v9fs_xattrwalk()
3977 xattr_fidp->fs.xattr.len = size; in v9fs_xattrwalk()
3978 xattr_fidp->fid_type = P9_FID_XATTR; in v9fs_xattrwalk()
3979 xattr_fidp->fs.xattr.xattrwalk_fid = true; in v9fs_xattrwalk()
3980 xattr_fidp->fs.xattr.value = g_malloc0(size); in v9fs_xattrwalk()
3982 err = v9fs_co_llistxattr(pdu, &xattr_fidp->path, in v9fs_xattrwalk()
3983 xattr_fidp->fs.xattr.value, in v9fs_xattrwalk()
3984 xattr_fidp->fs.xattr.len); in v9fs_xattrwalk()
3985 if (err < 0) { in v9fs_xattrwalk()
3986 clunk_fid(s, xattr_fidp->fid); in v9fs_xattrwalk()
3991 if (err < 0) { in v9fs_xattrwalk()
4000 size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, in v9fs_xattrwalk()
4001 &name, NULL, 0); in v9fs_xattrwalk()
4002 if (size < 0) { in v9fs_xattrwalk()
4004 clunk_fid(s, xattr_fidp->fid); in v9fs_xattrwalk()
4010 xattr_fidp->fs.xattr.len = size; in v9fs_xattrwalk()
4011 xattr_fidp->fid_type = P9_FID_XATTR; in v9fs_xattrwalk()
4012 xattr_fidp->fs.xattr.xattrwalk_fid = true; in v9fs_xattrwalk()
4013 xattr_fidp->fs.xattr.value = g_malloc0(size); in v9fs_xattrwalk()
4015 err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, in v9fs_xattrwalk()
4016 &name, xattr_fidp->fs.xattr.value, in v9fs_xattrwalk()
4017 xattr_fidp->fs.xattr.len); in v9fs_xattrwalk()
4018 if (err < 0) { in v9fs_xattrwalk()
4019 clunk_fid(s, xattr_fidp->fid); in v9fs_xattrwalk()
4024 if (err < 0) { in v9fs_xattrwalk()
4029 trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); in v9fs_xattrwalk()
4045 * Darwin doesn't seem to define a maximum xattr size in its user
4050 * preliminary solution only works due to its being a reflection of the limit of
4060 int flags, rflags = 0; in v9fs_xattrcreate()
4063 ssize_t err = 0; in v9fs_xattrcreate()
4072 if (err < 0) { in v9fs_xattrcreate()
4075 trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags); in v9fs_xattrcreate()
4078 err = -EINVAL; in v9fs_xattrcreate()
4091 err = -E2BIG; in v9fs_xattrcreate()
4097 err = -EINVAL; in v9fs_xattrcreate()
4100 if (file_fidp->fid_type != P9_FID_NONE) { in v9fs_xattrcreate()
4101 err = -EINVAL; in v9fs_xattrcreate()
4107 xattr_fidp->fid_type = P9_FID_XATTR; in v9fs_xattrcreate()
4108 xattr_fidp->fs.xattr.copied_len = 0; in v9fs_xattrcreate()
4109 xattr_fidp->fs.xattr.xattrwalk_fid = false; in v9fs_xattrcreate()
4110 xattr_fidp->fs.xattr.len = size; in v9fs_xattrcreate()
4111 xattr_fidp->fs.xattr.flags = rflags; in v9fs_xattrcreate()
4112 v9fs_string_init(&xattr_fidp->fs.xattr.name); in v9fs_xattrcreate()
4113 v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name); in v9fs_xattrcreate()
4114 xattr_fidp->fs.xattr.value = g_malloc0(size); in v9fs_xattrcreate()
4129 int err = 0; in v9fs_readlink()
4133 if (err < 0) { in v9fs_readlink()
4136 trace_v9fs_readlink(pdu->tag, pdu->id, fid); in v9fs_readlink()
4139 err = -ENOENT; in v9fs_readlink()
4144 err = v9fs_co_readlink(pdu, &fidp->path, &target); in v9fs_readlink()
4145 if (err < 0) { in v9fs_readlink()
4149 if (err < 0) { in v9fs_readlink()
4154 trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); in v9fs_readlink()
4186 #if 0
4202 pdu_complete(pdu, -EOPNOTSUPP); in v9fs_op_not_supp()
4208 pdu_complete(pdu, -EROFS); in v9fs_fs_ro()
4213 switch (pdu->id) { in is_read_only_op()
4234 return 0; in is_read_only_op()
4242 V9fsState *s = pdu->s; in pdu_submit()
4244 pdu->size = le32_to_cpu(hdr->size_le); in pdu_submit()
4245 pdu->id = hdr->id; in pdu_submit()
4246 pdu->tag = le16_to_cpu(hdr->tag_le); in pdu_submit()
4248 if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) || in pdu_submit()
4249 (pdu_co_handlers[pdu->id] == NULL)) { in pdu_submit()
4251 } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) { in pdu_submit()
4254 handler = pdu_co_handlers[pdu->id]; in pdu_submit()
4257 qemu_co_queue_init(&pdu->complete); in pdu_submit()
4262 /* Returns 0 on success, 1 on failure. */
4273 assert(!s->transport); in v9fs_device_realize_common()
4274 s->transport = t; in v9fs_device_realize_common()
4277 QLIST_INIT(&s->free_list); in v9fs_device_realize_common()
4278 QLIST_INIT(&s->active_list); in v9fs_device_realize_common()
4279 for (i = 0; i < MAX_REQ; i++) { in v9fs_device_realize_common()
4280 QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); in v9fs_device_realize_common()
4281 s->pdus[i].s = s; in v9fs_device_realize_common()
4282 s->pdus[i].idx = i; in v9fs_device_realize_common()
4287 fse = get_fsdev_fsentry(s->fsconf.fsdev_id); in v9fs_device_realize_common()
4290 /* We don't have a fsdev identified by fsdev_id */ in v9fs_device_realize_common()
4291 error_setg(errp, "9pfs device couldn't find fsdev with the " in v9fs_device_realize_common()
4293 s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL"); in v9fs_device_realize_common()
4297 if (!s->fsconf.tag) { in v9fs_device_realize_common()
4298 /* we haven't specified a mount_tag */ in v9fs_device_realize_common()
4300 s->fsconf.fsdev_id); in v9fs_device_realize_common()
4304 s->ctx.export_flags = fse->export_flags; in v9fs_device_realize_common()
4305 s->ctx.fs_root = g_strdup(fse->path); in v9fs_device_realize_common()
4306 s->ctx.exops.get_st_gen = NULL; in v9fs_device_realize_common()
4307 len = strlen(s->fsconf.tag); in v9fs_device_realize_common()
4308 if (len > MAX_TAG_LEN - 1) { in v9fs_device_realize_common()
4310 "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1); in v9fs_device_realize_common()
4314 s->tag = g_strdup(s->fsconf.tag); in v9fs_device_realize_common()
4315 s->ctx.uid = -1; in v9fs_device_realize_common()
4317 s->ops = fse->ops; in v9fs_device_realize_common()
4319 s->ctx.fmode = fse->fmode; in v9fs_device_realize_common()
4320 s->ctx.dmode = fse->dmode; in v9fs_device_realize_common()
4322 s->fids = g_hash_table_new(NULL, NULL); in v9fs_device_realize_common()
4323 qemu_co_rwlock_init(&s->rename_lock); in v9fs_device_realize_common()
4325 if (s->ops->init(&s->ctx, errp) < 0) { in v9fs_device_realize_common()
4327 s->fsconf.fsdev_id); in v9fs_device_realize_common()
4334 * use co-routines here. in v9fs_device_realize_common()
4336 if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) { in v9fs_device_realize_common()
4341 if (s->ops->lstat(&s->ctx, &path, &stat)) { in v9fs_device_realize_common()
4342 error_setg(errp, "share path %s does not exist", fse->path); in v9fs_device_realize_common()
4345 error_setg(errp, "share path %s is not a directory", fse->path); in v9fs_device_realize_common()
4349 s->dev_id = stat.st_dev; in v9fs_device_realize_common()
4353 qpd_table_init(&s->qpd_table); in v9fs_device_realize_common()
4355 qpf_table_init(&s->qpf_table); in v9fs_device_realize_common()
4357 qpp_table_init(&s->qpp_table); in v9fs_device_realize_common()
4358 s->qp_ndevices = 0; in v9fs_device_realize_common()
4359 s->qp_affix_next = 1; /* reserve 0 to detect overflow */ in v9fs_device_realize_common()
4360 s->qp_fullpath_next = 1; in v9fs_device_realize_common()
4362 s->ctx.fst = &fse->fst; in v9fs_device_realize_common()
4363 fsdev_throttle_init(s->ctx.fst); in v9fs_device_realize_common()
4365 s->reclaiming = false; in v9fs_device_realize_common()
4367 rc = 0; in v9fs_device_realize_common()
4378 if (s->ops && s->ops->cleanup) { in v9fs_device_unrealize_common()
4379 s->ops->cleanup(&s->ctx); in v9fs_device_unrealize_common()
4381 if (s->ctx.fst) { in v9fs_device_unrealize_common()
4382 fsdev_throttle_cleanup(s->ctx.fst); in v9fs_device_unrealize_common()
4384 if (s->fids) { in v9fs_device_unrealize_common()
4385 g_hash_table_destroy(s->fids); in v9fs_device_unrealize_common()
4386 s->fids = NULL; in v9fs_device_unrealize_common()
4388 g_free(s->tag); in v9fs_device_unrealize_common()
4389 qp_table_destroy(&s->qpd_table); in v9fs_device_unrealize_common()
4390 qp_table_destroy(&s->qpp_table); in v9fs_device_unrealize_common()
4391 qp_table_destroy(&s->qpf_table); in v9fs_device_unrealize_common()
4392 g_free(s->ctx.fs_root); in v9fs_device_unrealize_common()
4404 virtfs_reset(&data->pdu); in virtfs_co_reset()
4405 data->done = true; in virtfs_co_reset()
4413 while (!QLIST_EMPTY(&s->active_list)) { in v9fs_reset()
4428 if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { in v9fs_set_fd_limit()
4432 open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3); in v9fs_set_fd_limit()