xref: /qemu/hw/9pfs/9p.c (revision d64db833d6e3cbe9ea5f36342480f920f3675cea)
1 /*
2  * Virtio 9p backend
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * Not so fast! You might want to read the 9p developer docs first:
16  * https://wiki.qemu.org/Documentation/9p
17  */
18 
19 #include "qemu/osdep.h"
20 #ifdef CONFIG_LINUX
21 #include <linux/limits.h>
22 #endif
23 #include <glib/gprintf.h>
24 #include "hw/virtio/virtio.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/iov.h"
28 #include "qemu/main-loop.h"
29 #include "qemu/sockets.h"
30 #include "virtio-9p.h"
31 #include "fsdev/qemu-fsdev.h"
32 #include "9p-xattr.h"
33 #include "9p-util.h"
34 #include "coth.h"
35 #include "trace.h"
36 #include "migration/blocker.h"
37 #include "qemu/xxhash.h"
38 #include <math.h>
39 
40 int open_fd_hw;
41 int total_open_fd;
42 static int open_fd_rc;
43 
44 enum {
45     Oread   = 0x00,
46     Owrite  = 0x01,
47     Ordwr   = 0x02,
48     Oexec   = 0x03,
49     Oexcl   = 0x04,
50     Otrunc  = 0x10,
51     Orexec  = 0x20,
52     Orclose = 0x40,
53     Oappend = 0x80,
54 };
55 
56 P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free);
57 
58 static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
59 {
60     ssize_t ret;
61     va_list ap;
62 
63     va_start(ap, fmt);
64     ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
65     va_end(ap);
66 
67     return ret;
68 }
69 
70 static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
71 {
72     ssize_t ret;
73     va_list ap;
74 
75     va_start(ap, fmt);
76     ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
77     va_end(ap);
78 
79     return ret;
80 }
81 
82 static int omode_to_uflags(int8_t mode)
83 {
84     int ret = 0;
85 
86     switch (mode & 3) {
87     case Oread:
88         ret = O_RDONLY;
89         break;
90     case Ordwr:
91         ret = O_RDWR;
92         break;
93     case Owrite:
94         ret = O_WRONLY;
95         break;
96     case Oexec:
97         ret = O_RDONLY;
98         break;
99     }
100 
101     if (mode & Otrunc) {
102         ret |= O_TRUNC;
103     }
104 
105     if (mode & Oappend) {
106         ret |= O_APPEND;
107     }
108 
109     if (mode & Oexcl) {
110         ret |= O_EXCL;
111     }
112 
113     return ret;
114 }
115 
116 typedef struct DotlOpenflagMap {
117     int dotl_flag;
118     int open_flag;
119 } DotlOpenflagMap;
120 
121 static int dotl_to_open_flags(int flags)
122 {
123     int i;
124     /*
125      * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
126      * and P9_DOTL_NOACCESS
127      */
128     int oflags = flags & O_ACCMODE;
129 
130     DotlOpenflagMap dotl_oflag_map[] = {
131         { P9_DOTL_CREATE, O_CREAT },
132         { P9_DOTL_EXCL, O_EXCL },
133         { P9_DOTL_NOCTTY , O_NOCTTY },
134         { P9_DOTL_TRUNC, O_TRUNC },
135         { P9_DOTL_APPEND, O_APPEND },
136         { P9_DOTL_NONBLOCK, O_NONBLOCK } ,
137         { P9_DOTL_DSYNC, O_DSYNC },
138         { P9_DOTL_FASYNC, FASYNC },
139 #ifndef CONFIG_DARWIN
140         { P9_DOTL_NOATIME, O_NOATIME },
141         /*
142          *  On Darwin, we could map to F_NOCACHE, which is
143          *  similar, but doesn't quite have the same
144          *  semantics. However, we don't support O_DIRECT
145          *  even on linux at the moment, so we just ignore
146          *  it here.
147          */
148         { P9_DOTL_DIRECT, O_DIRECT },
149 #endif
150         { P9_DOTL_LARGEFILE, O_LARGEFILE },
151         { P9_DOTL_DIRECTORY, O_DIRECTORY },
152         { P9_DOTL_NOFOLLOW, O_NOFOLLOW },
153         { P9_DOTL_SYNC, O_SYNC },
154     };
155 
156     for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
157         if (flags & dotl_oflag_map[i].dotl_flag) {
158             oflags |= dotl_oflag_map[i].open_flag;
159         }
160     }
161 
162     return oflags;
163 }
164 
165 void cred_init(FsCred *credp)
166 {
167     credp->fc_uid = -1;
168     credp->fc_gid = -1;
169     credp->fc_mode = -1;
170     credp->fc_rdev = -1;
171 }
172 
173 static int get_dotl_openflags(V9fsState *s, int oflags)
174 {
175     int flags;
176     /*
177      * Filter the client open flags
178      */
179     flags = dotl_to_open_flags(oflags);
180     flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT);
181 #ifndef CONFIG_DARWIN
182     /*
183      * Ignore direct disk access hint until the server supports it.
184      */
185     flags &= ~O_DIRECT;
186 #endif
187     return flags;
188 }
189 
190 void v9fs_path_init(V9fsPath *path)
191 {
192     path->data = NULL;
193     path->size = 0;
194 }
195 
196 void v9fs_path_free(V9fsPath *path)
197 {
198     g_free(path->data);
199     path->data = NULL;
200     path->size = 0;
201 }
202 
203 
204 void G_GNUC_PRINTF(2, 3)
205 v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
206 {
207     va_list ap;
208 
209     v9fs_path_free(path);
210 
211     va_start(ap, fmt);
212     /* Bump the size for including terminating NULL */
213     path->size = g_vasprintf(&path->data, fmt, ap) + 1;
214     va_end(ap);
215 }
216 
217 void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src)
218 {
219     v9fs_path_free(dst);
220     dst->size = src->size;
221     dst->data = g_memdup(src->data, src->size);
222 }
223 
224 int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
225                       const char *name, V9fsPath *path)
226 {
227     int err;
228     err = s->ops->name_to_path(&s->ctx, dirpath, name, path);
229     if (err < 0) {
230         err = -errno;
231     }
232     return err;
233 }
234 
235 /*
236  * Return TRUE if s1 is an ancestor of s2.
237  *
238  * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
239  * As a special case, We treat s1 as ancestor of s2 if they are same!
240  */
241 static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2)
242 {
243     if (!strncmp(s1->data, s2->data, s1->size - 1)) {
244         if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') {
245             return 1;
246         }
247     }
248     return 0;
249 }
250 
251 static size_t v9fs_string_size(V9fsString *str)
252 {
253     return str->size;
254 }
255 
256 /*
257  * returns 0 if fid got re-opened, 1 if not, < 0 on error
258  */
259 static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
260 {
261     int err = 1;
262     if (f->fid_type == P9_FID_FILE) {
263         if (f->fs.fd == -1) {
264             do {
265                 err = v9fs_co_open(pdu, f, f->open_flags);
266             } while (err == -EINTR && !pdu->cancelled);
267         }
268     } else if (f->fid_type == P9_FID_DIR) {
269         if (f->fs.dir.stream == NULL) {
270             do {
271                 err = v9fs_co_opendir(pdu, f);
272             } while (err == -EINTR && !pdu->cancelled);
273         }
274     }
275     return err;
276 }
277 
278 static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
279 {
280     int err;
281     V9fsFidState *f;
282     V9fsState *s = pdu->s;
283 
284     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
285     if (f) {
286         BUG_ON(f->clunked);
287         /*
288          * Update the fid ref upfront so that
289          * we don't get reclaimed when we yield
290          * in open later.
291          */
292         f->ref++;
293         /*
294          * check whether we need to reopen the
295          * file. We might have closed the fd
296          * while trying to free up some file
297          * descriptors.
298          */
299         err = v9fs_reopen_fid(pdu, f);
300         if (err < 0) {
301             f->ref--;
302             return NULL;
303         }
304         /*
305          * Mark the fid as referenced so that the LRU
306          * reclaim won't close the file descriptor
307          */
308         f->flags |= FID_REFERENCED;
309         return f;
310     }
311     return NULL;
312 }
313 
314 static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
315 {
316     V9fsFidState *f;
317 
318     f = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
319     if (f) {
320         /* If fid is already there return NULL */
321         BUG_ON(f->clunked);
322         return NULL;
323     }
324     f = g_new0(V9fsFidState, 1);
325     f->fid = fid;
326     f->fid_type = P9_FID_NONE;
327     f->ref = 1;
328     /*
329      * Mark the fid as referenced so that the LRU
330      * reclaim won't close the file descriptor
331      */
332     f->flags |= FID_REFERENCED;
333     g_hash_table_insert(s->fids, GINT_TO_POINTER(fid), f);
334 
335     v9fs_readdir_init(s->proto_version, &f->fs.dir);
336     v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir);
337 
338     return f;
339 }
340 
341 static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
342 {
343     int retval = 0;
344 
345     if (fidp->fs.xattr.xattrwalk_fid) {
346         /* getxattr/listxattr fid */
347         goto free_value;
348     }
349     /*
350      * if this is fid for setxattr. clunk should
351      * result in setxattr localcall
352      */
353     if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
354         /* clunk after partial write */
355         retval = -EINVAL;
356         goto free_out;
357     }
358     if (fidp->fs.xattr.len) {
359         retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
360                                    fidp->fs.xattr.value,
361                                    fidp->fs.xattr.len,
362                                    fidp->fs.xattr.flags);
363     } else {
364         retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
365     }
366 free_out:
367     v9fs_string_free(&fidp->fs.xattr.name);
368 free_value:
369     g_free(fidp->fs.xattr.value);
370     return retval;
371 }
372 
373 static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
374 {
375     int retval = 0;
376 
377     if (fidp->fid_type == P9_FID_FILE) {
378         /* If we reclaimed the fd no need to close */
379         if (fidp->fs.fd != -1) {
380             retval = v9fs_co_close(pdu, &fidp->fs);
381         }
382     } else if (fidp->fid_type == P9_FID_DIR) {
383         if (fidp->fs.dir.stream != NULL) {
384             retval = v9fs_co_closedir(pdu, &fidp->fs);
385         }
386     } else if (fidp->fid_type == P9_FID_XATTR) {
387         retval = v9fs_xattr_fid_clunk(pdu, fidp);
388     }
389     v9fs_path_free(&fidp->path);
390     g_free(fidp);
391     return retval;
392 }
393 
394 static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
395 {
396     BUG_ON(!fidp->ref);
397     fidp->ref--;
398     /*
399      * Don't free the fid if it is in reclaim list
400      */
401     if (!fidp->ref && fidp->clunked) {
402         if (fidp->fid == pdu->s->root_fid) {
403             /*
404              * if the clunked fid is root fid then we
405              * have unmounted the fs on the client side.
406              * delete the migration blocker. Ideally, this
407              * should be hooked to transport close notification
408              */
409             migrate_del_blocker(&pdu->s->migration_blocker);
410         }
411         return free_fid(pdu, fidp);
412     }
413     return 0;
414 }
415 
416 static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
417 {
418     V9fsFidState *fidp;
419 
420     /* TODO: Use g_hash_table_steal_extended() instead? */
421     fidp = g_hash_table_lookup(s->fids, GINT_TO_POINTER(fid));
422     if (fidp) {
423         g_hash_table_remove(s->fids, GINT_TO_POINTER(fid));
424         fidp->clunked = true;
425         return fidp;
426     }
427     return NULL;
428 }
429 
430 void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
431 {
432     int reclaim_count = 0;
433     V9fsState *s = pdu->s;
434     V9fsFidState *f;
435     GHashTableIter iter;
436     gpointer fid;
437 
438     g_hash_table_iter_init(&iter, s->fids);
439 
440     QSLIST_HEAD(, V9fsFidState) reclaim_list =
441         QSLIST_HEAD_INITIALIZER(reclaim_list);
442 
443     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) {
444         /*
445          * Unlink fids cannot be reclaimed. Check
446          * for them and skip them. Also skip fids
447          * currently being operated on.
448          */
449         if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
450             continue;
451         }
452         /*
453          * if it is a recently referenced fid
454          * we leave the fid untouched and clear the
455          * reference bit. We come back to it later
456          * in the next iteration. (a simple LRU without
457          * moving list elements around)
458          */
459         if (f->flags & FID_REFERENCED) {
460             f->flags &= ~FID_REFERENCED;
461             continue;
462         }
463         /*
464          * Add fids to reclaim list.
465          */
466         if (f->fid_type == P9_FID_FILE) {
467             if (f->fs.fd != -1) {
468                 /*
469                  * Up the reference count so that
470                  * a clunk request won't free this fid
471                  */
472                 f->ref++;
473                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
474                 f->fs_reclaim.fd = f->fs.fd;
475                 f->fs.fd = -1;
476                 reclaim_count++;
477             }
478         } else if (f->fid_type == P9_FID_DIR) {
479             if (f->fs.dir.stream != NULL) {
480                 /*
481                  * Up the reference count so that
482                  * a clunk request won't free this fid
483                  */
484                 f->ref++;
485                 QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next);
486                 f->fs_reclaim.dir.stream = f->fs.dir.stream;
487                 f->fs.dir.stream = NULL;
488                 reclaim_count++;
489             }
490         }
491         if (reclaim_count >= open_fd_rc) {
492             break;
493         }
494     }
495     /*
496      * Now close the fid in reclaim list. Free them if they
497      * are already clunked.
498      */
499     while (!QSLIST_EMPTY(&reclaim_list)) {
500         f = QSLIST_FIRST(&reclaim_list);
501         QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next);
502         if (f->fid_type == P9_FID_FILE) {
503             v9fs_co_close(pdu, &f->fs_reclaim);
504         } else if (f->fid_type == P9_FID_DIR) {
505             v9fs_co_closedir(pdu, &f->fs_reclaim);
506         }
507         /*
508          * Now drop the fid reference, free it
509          * if clunked.
510          */
511         put_fid(pdu, f);
512     }
513 }
514 
515 /*
516  * This is used when a path is removed from the directory tree. Any
517  * fids that still reference it must not be closed from then on, since
518  * they cannot be reopened.
519  */
520 static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
521 {
522     int err = 0;
523     V9fsState *s = pdu->s;
524     V9fsFidState *fidp;
525     gpointer fid;
526     GHashTableIter iter;
527     /*
528      * The most common case is probably that we have exactly one
529      * fid for the given path, so preallocate exactly one.
530      */
531     g_autoptr(GArray) to_reopen = g_array_sized_new(FALSE, FALSE,
532             sizeof(V9fsFidState *), 1);
533     gint i;
534 
535     g_hash_table_iter_init(&iter, s->fids);
536 
537     /*
538      * We iterate over the fid table looking for the entries we need
539      * to reopen, and store them in to_reopen. This is because
540      * v9fs_reopen_fid() and put_fid() yield. This allows the fid table
541      * to be modified in the meantime, invalidating our iterator.
542      */
543     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &fidp)) {
544         if (fidp->path.size == path->size &&
545             !memcmp(fidp->path.data, path->data, path->size)) {
546             /*
547              * Ensure the fid survives a potential clunk request during
548              * v9fs_reopen_fid or put_fid.
549              */
550             fidp->ref++;
551             fidp->flags |= FID_NON_RECLAIMABLE;
552             g_array_append_val(to_reopen, fidp);
553         }
554     }
555 
556     for (i = 0; i < to_reopen->len; i++) {
557         fidp = g_array_index(to_reopen, V9fsFidState*, i);
558         /* reopen the file/dir if already closed */
559         err = v9fs_reopen_fid(pdu, fidp);
560         if (err < 0) {
561             break;
562         }
563     }
564 
565     for (i = 0; i < to_reopen->len; i++) {
566         put_fid(pdu, g_array_index(to_reopen, V9fsFidState*, i));
567     }
568     return err;
569 }
570 
571 static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
572 {
573     V9fsState *s = pdu->s;
574     V9fsFidState *fidp;
575     GList *freeing;
576     /*
577      * Get a list of all the values (fid states) in the table, which
578      * we then...
579      */
580     g_autoptr(GList) fids = g_hash_table_get_values(s->fids);
581 
582     /* ... remove from the table, taking over ownership. */
583     g_hash_table_steal_all(s->fids);
584 
585     /*
586      * This allows us to release our references to them asynchronously without
587      * iterating over the hash table and risking iterator invalidation
588      * through concurrent modifications.
589      */
590     for (freeing = fids; freeing; freeing = freeing->next) {
591         fidp = freeing->data;
592         fidp->ref++;
593         fidp->clunked = true;
594         put_fid(pdu, fidp);
595     }
596 }
597 
598 #define P9_QID_TYPE_DIR         0x80
599 #define P9_QID_TYPE_SYMLINK     0x02
600 
601 #define P9_STAT_MODE_DIR        0x80000000
602 #define P9_STAT_MODE_APPEND     0x40000000
603 #define P9_STAT_MODE_EXCL       0x20000000
604 #define P9_STAT_MODE_MOUNT      0x10000000
605 #define P9_STAT_MODE_AUTH       0x08000000
606 #define P9_STAT_MODE_TMP        0x04000000
607 #define P9_STAT_MODE_SYMLINK    0x02000000
608 #define P9_STAT_MODE_LINK       0x01000000
609 #define P9_STAT_MODE_DEVICE     0x00800000
610 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
611 #define P9_STAT_MODE_SOCKET     0x00100000
612 #define P9_STAT_MODE_SETUID     0x00080000
613 #define P9_STAT_MODE_SETGID     0x00040000
614 #define P9_STAT_MODE_SETVTX     0x00010000
615 
616 #define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR |          \
617                                 P9_STAT_MODE_SYMLINK |      \
618                                 P9_STAT_MODE_LINK |         \
619                                 P9_STAT_MODE_DEVICE |       \
620                                 P9_STAT_MODE_NAMED_PIPE |   \
621                                 P9_STAT_MODE_SOCKET)
622 
623 /* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */
624 static inline uint8_t mirror8bit(uint8_t byte)
625 {
626     return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023;
627 }
628 
629 /* Same as mirror8bit() just for a 64 bit data type instead for a byte. */
630 static inline uint64_t mirror64bit(uint64_t value)
631 {
632     return ((uint64_t)mirror8bit(value         & 0xff) << 56) |
633            ((uint64_t)mirror8bit((value >> 8)  & 0xff) << 48) |
634            ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) |
635            ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) |
636            ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) |
637            ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) |
638            ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8)  |
639            ((uint64_t)mirror8bit((value >> 56) & 0xff));
640 }
641 
642 /*
643  * Parameter k for the Exponential Golomb algorithm to be used.
644  *
645  * The smaller this value, the smaller the minimum bit count for the Exp.
646  * Golomb generated affixes will be (at lowest index) however for the
647  * price of having higher maximum bit count of generated affixes (at highest
648  * index). Likewise increasing this parameter yields in smaller maximum bit
649  * count for the price of having higher minimum bit count.
650  *
651  * In practice that means: a good value for k depends on the expected amount
652  * of devices to be exposed by one export. For a small amount of devices k
653  * should be small, for a large amount of devices k might be increased
654  * instead. The default of k=0 should be fine for most users though.
655  *
656  * IMPORTANT: In case this ever becomes a runtime parameter; the value of
657  * k should not change as long as guest is still running! Because that would
658  * cause completely different inode numbers to be generated on guest.
659  */
660 #define EXP_GOLOMB_K    0
661 
662 /**
663  * expGolombEncode() - Exponential Golomb algorithm for arbitrary k
664  *                     (including k=0).
665  *
666  * @n: natural number (or index) of the prefix to be generated
667  *     (1, 2, 3, ...)
668  * @k: parameter k of Exp. Golomb algorithm to be used
669  *     (see comment on EXP_GOLOMB_K macro for details about k)
670  * Return: prefix for given @n and @k
671  *
672  * The Exponential Golomb algorithm generates prefixes (NOT suffixes!)
673  * with growing length and with the mathematical property of being
674  * "prefix-free". The latter means the generated prefixes can be prepended
675  * in front of arbitrary numbers and the resulting concatenated numbers are
676  * guaranteed to be always unique.
677  *
678  * This is a minor adjustment to the original Exp. Golomb algorithm in the
679  * sense that lowest allowed index (@n) starts with 1, not with zero.
680  */
681 static VariLenAffix expGolombEncode(uint64_t n, int k)
682 {
683     const uint64_t value = n + (1 << k) - 1;
684     const int bits = (int) log2(value) + 1;
685     return (VariLenAffix) {
686         .type = AffixType_Prefix,
687         .value = value,
688         .bits = bits + MAX((bits - 1 - k), 0)
689     };
690 }
691 
692 /**
693  * invertAffix() - Converts a suffix into a prefix, or a prefix into a suffix.
694  * @affix: either suffix or prefix to be inverted
695  * Return: inversion of passed @affix
696  *
697  * Simply mirror all bits of the affix value, for the purpose to preserve
698  * respectively the mathematical "prefix-free" or "suffix-free" property
699  * after the conversion.
700  *
701  * If a passed prefix is suitable to create unique numbers, then the
702  * returned suffix is suitable to create unique numbers as well (and vice
703  * versa).
704  */
705 static VariLenAffix invertAffix(const VariLenAffix *affix)
706 {
707     return (VariLenAffix) {
708         .type =
709             (affix->type == AffixType_Suffix) ?
710                 AffixType_Prefix : AffixType_Suffix,
711         .value =
712             mirror64bit(affix->value) >>
713             ((sizeof(affix->value) * 8) - affix->bits),
714         .bits = affix->bits
715     };
716 }
717 
718 /**
719  * affixForIndex() - Generates suffix numbers with "suffix-free" property.
720  * @index: natural number (or index) of the suffix to be generated
721  *         (1, 2, 3, ...)
722  * Return: Suffix suitable to assemble unique number.
723  *
724  * This is just a wrapper function on top of the Exp. Golomb algorithm.
725  *
726  * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes,
727  * this function converts the Exp. Golomb prefixes into appropriate suffixes
728  * which are still suitable for generating unique numbers.
729  */
730 static VariLenAffix affixForIndex(uint64_t index)
731 {
732     VariLenAffix prefix;
733     prefix = expGolombEncode(index, EXP_GOLOMB_K);
734     return invertAffix(&prefix); /* convert prefix to suffix */
735 }
736 
737 static uint32_t qpp_hash(QppEntry e)
738 {
739     return qemu_xxhash4(e.ino_prefix, e.dev);
740 }
741 
742 static uint32_t qpf_hash(QpfEntry e)
743 {
744     return qemu_xxhash4(e.ino, e.dev);
745 }
746 
747 static bool qpd_cmp_func(const void *obj, const void *userp)
748 {
749     const QpdEntry *e1 = obj, *e2 = userp;
750     return e1->dev == e2->dev;
751 }
752 
753 static bool qpp_cmp_func(const void *obj, const void *userp)
754 {
755     const QppEntry *e1 = obj, *e2 = userp;
756     return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix;
757 }
758 
759 static bool qpf_cmp_func(const void *obj, const void *userp)
760 {
761     const QpfEntry *e1 = obj, *e2 = userp;
762     return e1->dev == e2->dev && e1->ino == e2->ino;
763 }
764 
765 static void qp_table_remove(void *p, uint32_t h, void *up)
766 {
767     g_free(p);
768 }
769 
770 static void qp_table_destroy(struct qht *ht)
771 {
772     if (!ht || !ht->map) {
773         return;
774     }
775     qht_iter(ht, qp_table_remove, NULL);
776     qht_destroy(ht);
777 }
778 
779 static void qpd_table_init(struct qht *ht)
780 {
781     qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
782 }
783 
784 static void qpp_table_init(struct qht *ht)
785 {
786     qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE);
787 }
788 
789 static void qpf_table_init(struct qht *ht)
790 {
791     qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE);
792 }
793 
794 /*
795  * Returns how many (high end) bits of inode numbers of the passed fs
796  * device shall be used (in combination with the device number) to
797  * generate hash values for qpp_table entries.
798  *
799  * This function is required if variable length suffixes are used for inode
800  * number mapping on guest level. Since a device may end up having multiple
801  * entries in qpp_table, each entry most probably with a different suffix
802  * length, we thus need this function in conjunction with qpd_table to
803  * "agree" about a fix amount of bits (per device) to be always used for
804  * generating hash values for the purpose of accessing qpp_table in order
805  * get consistent behaviour when accessing qpp_table.
806  */
807 static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev)
808 {
809     QpdEntry lookup = {
810         .dev = dev
811     }, *val;
812     uint32_t hash = dev;
813     VariLenAffix affix;
814 
815     val = qht_lookup(&pdu->s->qpd_table, &lookup, hash);
816     if (!val) {
817         val = g_new0(QpdEntry, 1);
818         *val = lookup;
819         affix = affixForIndex(pdu->s->qp_affix_next);
820         val->prefix_bits = affix.bits;
821         qht_insert(&pdu->s->qpd_table, val, hash, NULL);
822         pdu->s->qp_ndevices++;
823     }
824     return val->prefix_bits;
825 }
826 
827 /*
828  * Slow / full mapping host inode nr -> guest inode nr.
829  *
830  * This function performs a slower and much more costly remapping of an
831  * original file inode number on host to an appropriate different inode
832  * number on guest. For every (dev, inode) combination on host a new
833  * sequential number is generated, cached and exposed as inode number on
834  * guest.
835  *
836  * This is just a "last resort" fallback solution if the much faster/cheaper
837  * qid_path_suffixmap() failed. In practice this slow / full mapping is not
838  * expected ever to be used at all though.
839  *
840  * See qid_path_suffixmap() for details
841  *
842  */
843 static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf,
844                             uint64_t *path)
845 {
846     QpfEntry lookup = {
847         .dev = stbuf->st_dev,
848         .ino = stbuf->st_ino
849     }, *val;
850     uint32_t hash = qpf_hash(lookup);
851     VariLenAffix affix;
852 
853     val = qht_lookup(&pdu->s->qpf_table, &lookup, hash);
854 
855     if (!val) {
856         if (pdu->s->qp_fullpath_next == 0) {
857             /* no more files can be mapped :'( */
858             error_report_once(
859                 "9p: No more prefixes available for remapping inodes from "
860                 "host to guest."
861             );
862             return -ENFILE;
863         }
864 
865         val = g_new0(QpfEntry, 1);
866         *val = lookup;
867 
868         /* new unique inode and device combo */
869         affix = affixForIndex(
870             1ULL << (sizeof(pdu->s->qp_affix_next) * 8)
871         );
872         val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value;
873         pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1);
874         qht_insert(&pdu->s->qpf_table, val, hash, NULL);
875     }
876 
877     *path = val->path;
878     return 0;
879 }
880 
881 /*
882  * Quick mapping host inode nr -> guest inode nr.
883  *
884  * This function performs quick remapping of an original file inode number
885  * on host to an appropriate different inode number on guest. This remapping
886  * of inodes is required to avoid inode nr collisions on guest which would
887  * happen if the 9p export contains more than 1 exported file system (or
888  * more than 1 file system data set), because unlike on host level where the
889  * files would have different device nrs, all files exported by 9p would
890  * share the same device nr on guest (the device nr of the virtual 9p device
891  * that is).
892  *
893  * Inode remapping is performed by chopping off high end bits of the original
894  * inode number from host, shifting the result upwards and then assigning a
895  * generated suffix number for the low end bits, where the same suffix number
896  * will be shared by all inodes with the same device id AND the same high end
897  * bits that have been chopped off. That approach utilizes the fact that inode
898  * numbers very likely share the same high end bits (i.e. due to their common
899  * sequential generation by file systems) and hence we only have to generate
900  * and track a very limited amount of suffixes in practice due to that.
901  *
902  * We generate variable size suffixes for that purpose. The 1st generated
903  * suffix will only have 1 bit and hence we only need to chop off 1 bit from
904  * the original inode number. The subsequent suffixes being generated will
905  * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being
906  * generated will have 3 bits and hence we have to chop off 3 bits from their
907  * original inodes, and so on. That approach of using variable length suffixes
908  * (i.e. over fixed size ones) utilizes the fact that in practice only a very
909  * limited amount of devices are shared by the same export (e.g. typically
910  * less than 2 dozen devices per 9p export), so in practice we need to chop
911  * off less bits than with fixed size prefixes and yet are flexible to add
912  * new devices at runtime below host's export directory at any time without
913  * having to reboot guest nor requiring to reconfigure guest for that. And due
914  * to the very limited amount of original high end bits that we chop off that
915  * way, the total amount of suffixes we need to generate is less than by using
916  * fixed size prefixes and hence it also improves performance of the inode
917  * remapping algorithm, and finally has the nice side effect that the inode
918  * numbers on guest will be much smaller & human friendly. ;-)
919  */
920 static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf,
921                               uint64_t *path)
922 {
923     const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev);
924     QppEntry lookup = {
925         .dev = stbuf->st_dev,
926         .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits))
927     }, *val;
928     uint32_t hash = qpp_hash(lookup);
929 
930     val = qht_lookup(&pdu->s->qpp_table, &lookup, hash);
931 
932     if (!val) {
933         if (pdu->s->qp_affix_next == 0) {
934             /* we ran out of affixes */
935             warn_report_once(
936                 "9p: Potential degraded performance of inode remapping"
937             );
938             return -ENFILE;
939         }
940 
941         val = g_new0(QppEntry, 1);
942         *val = lookup;
943 
944         /* new unique inode affix and device combo */
945         val->qp_affix_index = pdu->s->qp_affix_next++;
946         val->qp_affix = affixForIndex(val->qp_affix_index);
947         qht_insert(&pdu->s->qpp_table, val, hash, NULL);
948     }
949     /* assuming generated affix to be suffix type, not prefix */
950     *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value;
951     return 0;
952 }
953 
954 static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp)
955 {
956     int err;
957     size_t size;
958 
959     if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
960         /* map inode+device to qid path (fast path) */
961         err = qid_path_suffixmap(pdu, stbuf, &qidp->path);
962         if (err == -ENFILE) {
963             /* fast path didn't work, fall back to full map */
964             err = qid_path_fullmap(pdu, stbuf, &qidp->path);
965         }
966         if (err) {
967             return err;
968         }
969     } else {
970         if (pdu->s->dev_id != stbuf->st_dev) {
971             if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) {
972                 error_report_once(
973                     "9p: Multiple devices detected in same VirtFS export. "
974                     "Access of guest to additional devices is (partly) "
975                     "denied due to virtfs option 'multidevs=forbid' being "
976                     "effective."
977                 );
978                 return -ENODEV;
979             } else {
980                 warn_report_once(
981                     "9p: Multiple devices detected in same VirtFS export, "
982                     "which might lead to file ID collisions and severe "
983                     "misbehaviours on guest! You should either use a "
984                     "separate export for each device shared from host or "
985                     "use virtfs option 'multidevs=remap'!"
986                 );
987             }
988         }
989         memset(&qidp->path, 0, sizeof(qidp->path));
990         size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path));
991         memcpy(&qidp->path, &stbuf->st_ino, size);
992     }
993 
994     qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8);
995     qidp->type = 0;
996     if (S_ISDIR(stbuf->st_mode)) {
997         qidp->type |= P9_QID_TYPE_DIR;
998     }
999     if (S_ISLNK(stbuf->st_mode)) {
1000         qidp->type |= P9_QID_TYPE_SYMLINK;
1001     }
1002 
1003     return 0;
1004 }
1005 
1006 V9fsPDU *pdu_alloc(V9fsState *s)
1007 {
1008     V9fsPDU *pdu = NULL;
1009 
1010     if (!QLIST_EMPTY(&s->free_list)) {
1011         pdu = QLIST_FIRST(&s->free_list);
1012         QLIST_REMOVE(pdu, next);
1013         QLIST_INSERT_HEAD(&s->active_list, pdu, next);
1014     }
1015     return pdu;
1016 }
1017 
1018 void pdu_free(V9fsPDU *pdu)
1019 {
1020     V9fsState *s = pdu->s;
1021 
1022     g_assert(!pdu->cancelled);
1023     QLIST_REMOVE(pdu, next);
1024     QLIST_INSERT_HEAD(&s->free_list, pdu, next);
1025 }
1026 
1027 static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
1028 {
1029     int8_t id = pdu->id + 1; /* Response */
1030     V9fsState *s = pdu->s;
1031     int ret;
1032 
1033     /*
1034      * The 9p spec requires that successfully cancelled pdus receive no reply.
1035      * Sending a reply would confuse clients because they would
1036      * assume that any EINTR is the actual result of the operation,
1037      * rather than a consequence of the cancellation. However, if
1038      * the operation completed (successfully or with an error other
1039      * than caused be cancellation), we do send out that reply, both
1040      * for efficiency and to avoid confusing the rest of the state machine
1041      * that assumes passing a non-error here will mean a successful
1042      * transmission of the reply.
1043      */
1044     bool discard = pdu->cancelled && len == -EINTR;
1045     if (discard) {
1046         trace_v9fs_rcancel(pdu->tag, pdu->id);
1047         pdu->size = 0;
1048         goto out_notify;
1049     }
1050 
1051     if (len < 0) {
1052         int err = -len;
1053         len = 7;
1054 
1055         if (s->proto_version != V9FS_PROTO_2000L) {
1056             V9fsString str;
1057 
1058             str.data = strerror(err);
1059             str.size = strlen(str.data);
1060 
1061             ret = pdu_marshal(pdu, len, "s", &str);
1062             if (ret < 0) {
1063                 goto out_notify;
1064             }
1065             len += ret;
1066             id = P9_RERROR;
1067         } else {
1068             err = errno_to_dotl(err);
1069         }
1070 
1071         ret = pdu_marshal(pdu, len, "d", err);
1072         if (ret < 0) {
1073             goto out_notify;
1074         }
1075         len += ret;
1076 
1077         if (s->proto_version == V9FS_PROTO_2000L) {
1078             id = P9_RLERROR;
1079         }
1080         trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */
1081     }
1082 
1083     /* fill out the header */
1084     if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) {
1085         goto out_notify;
1086     }
1087 
1088     /* keep these in sync */
1089     pdu->size = len;
1090     pdu->id = id;
1091 
1092 out_notify:
1093     pdu->s->transport->push_and_notify(pdu);
1094 
1095     /* Now wakeup anybody waiting in flush for this request */
1096     if (!qemu_co_queue_next(&pdu->complete)) {
1097         pdu_free(pdu);
1098     }
1099 }
1100 
1101 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
1102 {
1103     mode_t ret;
1104 
1105     ret = mode & 0777;
1106     if (mode & P9_STAT_MODE_DIR) {
1107         ret |= S_IFDIR;
1108     }
1109 
1110     if (mode & P9_STAT_MODE_SYMLINK) {
1111         ret |= S_IFLNK;
1112     }
1113     if (mode & P9_STAT_MODE_SOCKET) {
1114         ret |= S_IFSOCK;
1115     }
1116     if (mode & P9_STAT_MODE_NAMED_PIPE) {
1117         ret |= S_IFIFO;
1118     }
1119     if (mode & P9_STAT_MODE_DEVICE) {
1120         if (extension->size && extension->data[0] == 'c') {
1121             ret |= S_IFCHR;
1122         } else {
1123             ret |= S_IFBLK;
1124         }
1125     }
1126 
1127     if (!(ret & ~0777)) {
1128         ret |= S_IFREG;
1129     }
1130 
1131     if (mode & P9_STAT_MODE_SETUID) {
1132         ret |= S_ISUID;
1133     }
1134     if (mode & P9_STAT_MODE_SETGID) {
1135         ret |= S_ISGID;
1136     }
1137     if (mode & P9_STAT_MODE_SETVTX) {
1138         ret |= S_ISVTX;
1139     }
1140 
1141     return ret;
1142 }
1143 
1144 static int donttouch_stat(V9fsStat *stat)
1145 {
1146     if (stat->type == -1 &&
1147         stat->dev == -1 &&
1148         stat->qid.type == 0xff &&
1149         stat->qid.version == (uint32_t) -1 &&
1150         stat->qid.path == (uint64_t) -1 &&
1151         stat->mode == -1 &&
1152         stat->atime == -1 &&
1153         stat->mtime == -1 &&
1154         stat->length == -1 &&
1155         !stat->name.size &&
1156         !stat->uid.size &&
1157         !stat->gid.size &&
1158         !stat->muid.size &&
1159         stat->n_uid == -1 &&
1160         stat->n_gid == -1 &&
1161         stat->n_muid == -1) {
1162         return 1;
1163     }
1164 
1165     return 0;
1166 }
1167 
1168 static void v9fs_stat_init(V9fsStat *stat)
1169 {
1170     v9fs_string_init(&stat->name);
1171     v9fs_string_init(&stat->uid);
1172     v9fs_string_init(&stat->gid);
1173     v9fs_string_init(&stat->muid);
1174     v9fs_string_init(&stat->extension);
1175 }
1176 
1177 static void v9fs_stat_free(V9fsStat *stat)
1178 {
1179     v9fs_string_free(&stat->name);
1180     v9fs_string_free(&stat->uid);
1181     v9fs_string_free(&stat->gid);
1182     v9fs_string_free(&stat->muid);
1183     v9fs_string_free(&stat->extension);
1184 }
1185 
1186 static uint32_t stat_to_v9mode(const struct stat *stbuf)
1187 {
1188     uint32_t mode;
1189 
1190     mode = stbuf->st_mode & 0777;
1191     if (S_ISDIR(stbuf->st_mode)) {
1192         mode |= P9_STAT_MODE_DIR;
1193     }
1194 
1195     if (S_ISLNK(stbuf->st_mode)) {
1196         mode |= P9_STAT_MODE_SYMLINK;
1197     }
1198 
1199     if (S_ISSOCK(stbuf->st_mode)) {
1200         mode |= P9_STAT_MODE_SOCKET;
1201     }
1202 
1203     if (S_ISFIFO(stbuf->st_mode)) {
1204         mode |= P9_STAT_MODE_NAMED_PIPE;
1205     }
1206 
1207     if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) {
1208         mode |= P9_STAT_MODE_DEVICE;
1209     }
1210 
1211     if (stbuf->st_mode & S_ISUID) {
1212         mode |= P9_STAT_MODE_SETUID;
1213     }
1214 
1215     if (stbuf->st_mode & S_ISGID) {
1216         mode |= P9_STAT_MODE_SETGID;
1217     }
1218 
1219     if (stbuf->st_mode & S_ISVTX) {
1220         mode |= P9_STAT_MODE_SETVTX;
1221     }
1222 
1223     return mode;
1224 }
1225 
1226 static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path,
1227                                        const char *basename,
1228                                        const struct stat *stbuf,
1229                                        V9fsStat *v9stat)
1230 {
1231     int err;
1232 
1233     memset(v9stat, 0, sizeof(*v9stat));
1234 
1235     err = stat_to_qid(pdu, stbuf, &v9stat->qid);
1236     if (err < 0) {
1237         return err;
1238     }
1239     v9stat->mode = stat_to_v9mode(stbuf);
1240     v9stat->atime = stbuf->st_atime;
1241     v9stat->mtime = stbuf->st_mtime;
1242     v9stat->length = stbuf->st_size;
1243 
1244     v9fs_string_free(&v9stat->uid);
1245     v9fs_string_free(&v9stat->gid);
1246     v9fs_string_free(&v9stat->muid);
1247 
1248     v9stat->n_uid = stbuf->st_uid;
1249     v9stat->n_gid = stbuf->st_gid;
1250     v9stat->n_muid = 0;
1251 
1252     v9fs_string_free(&v9stat->extension);
1253 
1254     if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
1255         err = v9fs_co_readlink(pdu, path, &v9stat->extension);
1256         if (err < 0) {
1257             return err;
1258         }
1259     } else if (v9stat->mode & P9_STAT_MODE_DEVICE) {
1260         v9fs_string_sprintf(&v9stat->extension, "%c %u %u",
1261                 S_ISCHR(stbuf->st_mode) ? 'c' : 'b',
1262                 major(stbuf->st_rdev), minor(stbuf->st_rdev));
1263     } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) {
1264         v9fs_string_sprintf(&v9stat->extension, "%s %lu",
1265                 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink);
1266     }
1267 
1268     v9fs_string_sprintf(&v9stat->name, "%s", basename);
1269 
1270     v9stat->size = 61 +
1271         v9fs_string_size(&v9stat->name) +
1272         v9fs_string_size(&v9stat->uid) +
1273         v9fs_string_size(&v9stat->gid) +
1274         v9fs_string_size(&v9stat->muid) +
1275         v9fs_string_size(&v9stat->extension);
1276     return 0;
1277 }
1278 
1279 #define P9_STATS_MODE          0x00000001ULL
1280 #define P9_STATS_NLINK         0x00000002ULL
1281 #define P9_STATS_UID           0x00000004ULL
1282 #define P9_STATS_GID           0x00000008ULL
1283 #define P9_STATS_RDEV          0x00000010ULL
1284 #define P9_STATS_ATIME         0x00000020ULL
1285 #define P9_STATS_MTIME         0x00000040ULL
1286 #define P9_STATS_CTIME         0x00000080ULL
1287 #define P9_STATS_INO           0x00000100ULL
1288 #define P9_STATS_SIZE          0x00000200ULL
1289 #define P9_STATS_BLOCKS        0x00000400ULL
1290 
1291 #define P9_STATS_BTIME         0x00000800ULL
1292 #define P9_STATS_GEN           0x00001000ULL
1293 #define P9_STATS_DATA_VERSION  0x00002000ULL
1294 
1295 #define P9_STATS_BASIC         0x000007ffULL /* Mask for fields up to BLOCKS */
1296 #define P9_STATS_ALL           0x00003fffULL /* Mask for All fields above */
1297 
1298 
1299 /**
1300  * blksize_to_iounit() - Block size exposed to 9p client.
1301  * Return: block size
1302  *
1303  * @pdu: 9p client request
1304  * @blksize: host filesystem's block size
1305  *
1306  * Convert host filesystem's block size into an appropriate block size for
1307  * 9p client (guest OS side). The value returned suggests an "optimum" block
1308  * size for 9p I/O, i.e. to maximize performance.
1309  */
1310 static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize)
1311 {
1312     int32_t iounit = 0;
1313     V9fsState *s = pdu->s;
1314 
1315     /*
1316      * iounit should be multiples of blksize (host filesystem block size)
1317      * as well as less than (client msize - P9_IOHDRSZ)
1318      */
1319     if (blksize) {
1320         iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize);
1321     }
1322     if (!iounit) {
1323         iounit = s->msize - P9_IOHDRSZ;
1324     }
1325     return iounit;
1326 }
1327 
1328 static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf)
1329 {
1330     return blksize_to_iounit(pdu, stbuf->st_blksize);
1331 }
1332 
1333 static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf,
1334                                 V9fsStatDotl *v9lstat)
1335 {
1336     memset(v9lstat, 0, sizeof(*v9lstat));
1337 
1338     v9lstat->st_mode = stbuf->st_mode;
1339     v9lstat->st_nlink = stbuf->st_nlink;
1340     v9lstat->st_uid = stbuf->st_uid;
1341     v9lstat->st_gid = stbuf->st_gid;
1342     v9lstat->st_rdev = host_dev_to_dotl_dev(stbuf->st_rdev);
1343     v9lstat->st_size = stbuf->st_size;
1344     v9lstat->st_blksize = stat_to_iounit(pdu, stbuf);
1345     v9lstat->st_blocks = stbuf->st_blocks;
1346     v9lstat->st_atime_sec = stbuf->st_atime;
1347     v9lstat->st_mtime_sec = stbuf->st_mtime;
1348     v9lstat->st_ctime_sec = stbuf->st_ctime;
1349 #ifdef CONFIG_DARWIN
1350     v9lstat->st_atime_nsec = stbuf->st_atimespec.tv_nsec;
1351     v9lstat->st_mtime_nsec = stbuf->st_mtimespec.tv_nsec;
1352     v9lstat->st_ctime_nsec = stbuf->st_ctimespec.tv_nsec;
1353 #else
1354     v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec;
1355     v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec;
1356     v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec;
1357 #endif
1358     /* Currently we only support BASIC fields in stat */
1359     v9lstat->st_result_mask = P9_STATS_BASIC;
1360 
1361     return stat_to_qid(pdu, stbuf, &v9lstat->qid);
1362 }
1363 
1364 static void print_sg(struct iovec *sg, int cnt)
1365 {
1366     int i;
1367 
1368     printf("sg[%d]: {", cnt);
1369     for (i = 0; i < cnt; i++) {
1370         if (i) {
1371             printf(", ");
1372         }
1373         printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len);
1374     }
1375     printf("}\n");
1376 }
1377 
1378 /* Will call this only for path name based fid */
1379 static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
1380 {
1381     V9fsPath str;
1382     v9fs_path_init(&str);
1383     v9fs_path_copy(&str, dst);
1384     v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
1385     v9fs_path_free(&str);
1386 }
1387 
1388 static inline bool is_ro_export(FsContext *ctx)
1389 {
1390     return ctx->export_flags & V9FS_RDONLY;
1391 }
1392 
1393 static void coroutine_fn v9fs_version(void *opaque)
1394 {
1395     ssize_t err;
1396     V9fsPDU *pdu = opaque;
1397     V9fsState *s = pdu->s;
1398     V9fsString version;
1399     size_t offset = 7;
1400 
1401     v9fs_string_init(&version);
1402     err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version);
1403     if (err < 0) {
1404         goto out;
1405     }
1406     trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data);
1407 
1408     virtfs_reset(pdu);
1409 
1410     if (!strcmp(version.data, "9P2000.u")) {
1411         s->proto_version = V9FS_PROTO_2000U;
1412     } else if (!strcmp(version.data, "9P2000.L")) {
1413         s->proto_version = V9FS_PROTO_2000L;
1414     } else {
1415         v9fs_string_sprintf(&version, "unknown");
1416         /* skip min. msize check, reporting invalid version has priority */
1417         goto marshal;
1418     }
1419 
1420     if (s->msize < P9_MIN_MSIZE) {
1421         err = -EMSGSIZE;
1422         error_report(
1423             "9pfs: Client requested msize < minimum msize ("
1424             stringify(P9_MIN_MSIZE) ") supported by this server."
1425         );
1426         goto out;
1427     }
1428 
1429     /* 8192 is the default msize of Linux clients */
1430     if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) {
1431         warn_report_once(
1432             "9p: degraded performance: a reasonable high msize should be "
1433             "chosen on client/guest side (chosen msize is <= 8192). See "
1434             "https://wiki.qemu.org/Documentation/9psetup#msize for details."
1435         );
1436     }
1437 
1438 marshal:
1439     err = pdu_marshal(pdu, offset, "ds", s->msize, &version);
1440     if (err < 0) {
1441         goto out;
1442     }
1443     err += offset;
1444     trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data);
1445 out:
1446     pdu_complete(pdu, err);
1447     v9fs_string_free(&version);
1448 }
1449 
1450 static void coroutine_fn v9fs_attach(void *opaque)
1451 {
1452     V9fsPDU *pdu = opaque;
1453     V9fsState *s = pdu->s;
1454     int32_t fid, afid, n_uname;
1455     V9fsString uname, aname;
1456     V9fsFidState *fidp;
1457     size_t offset = 7;
1458     V9fsQID qid;
1459     ssize_t err;
1460     struct stat stbuf;
1461 
1462     v9fs_string_init(&uname);
1463     v9fs_string_init(&aname);
1464     err = pdu_unmarshal(pdu, offset, "ddssd", &fid,
1465                         &afid, &uname, &aname, &n_uname);
1466     if (err < 0) {
1467         goto out_nofid;
1468     }
1469     trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data);
1470 
1471     fidp = alloc_fid(s, fid);
1472     if (fidp == NULL) {
1473         err = -EINVAL;
1474         goto out_nofid;
1475     }
1476     fidp->uid = n_uname;
1477     err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path);
1478     if (err < 0) {
1479         err = -EINVAL;
1480         clunk_fid(s, fid);
1481         goto out;
1482     }
1483     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1484     if (err < 0) {
1485         err = -EINVAL;
1486         clunk_fid(s, fid);
1487         goto out;
1488     }
1489     err = stat_to_qid(pdu, &stbuf, &qid);
1490     if (err < 0) {
1491         err = -EINVAL;
1492         clunk_fid(s, fid);
1493         goto out;
1494     }
1495 
1496     /*
1497      * disable migration if we haven't done already.
1498      * attach could get called multiple times for the same export.
1499      */
1500     if (!s->migration_blocker) {
1501         error_setg(&s->migration_blocker,
1502                    "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
1503                    s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
1504         err = migrate_add_blocker(&s->migration_blocker, NULL);
1505         if (err < 0) {
1506             clunk_fid(s, fid);
1507             goto out;
1508         }
1509         s->root_fid = fid;
1510     }
1511 
1512     err = pdu_marshal(pdu, offset, "Q", &qid);
1513     if (err < 0) {
1514         clunk_fid(s, fid);
1515         goto out;
1516     }
1517     err += offset;
1518 
1519     memcpy(&s->root_st, &stbuf, sizeof(stbuf));
1520     trace_v9fs_attach_return(pdu->tag, pdu->id,
1521                              qid.type, qid.version, qid.path);
1522 out:
1523     put_fid(pdu, fidp);
1524 out_nofid:
1525     pdu_complete(pdu, err);
1526     v9fs_string_free(&uname);
1527     v9fs_string_free(&aname);
1528 }
1529 
1530 static void coroutine_fn v9fs_stat(void *opaque)
1531 {
1532     int32_t fid;
1533     V9fsStat v9stat;
1534     ssize_t err = 0;
1535     size_t offset = 7;
1536     struct stat stbuf;
1537     V9fsFidState *fidp;
1538     V9fsPDU *pdu = opaque;
1539     char *basename;
1540 
1541     err = pdu_unmarshal(pdu, offset, "d", &fid);
1542     if (err < 0) {
1543         goto out_nofid;
1544     }
1545     trace_v9fs_stat(pdu->tag, pdu->id, fid);
1546 
1547     fidp = get_fid(pdu, fid);
1548     if (fidp == NULL) {
1549         err = -ENOENT;
1550         goto out_nofid;
1551     }
1552     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1553     if (err < 0) {
1554         goto out;
1555     }
1556     basename = g_path_get_basename(fidp->path.data);
1557     err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat);
1558     g_free(basename);
1559     if (err < 0) {
1560         goto out;
1561     }
1562     err = pdu_marshal(pdu, offset, "wS", 0, &v9stat);
1563     if (err < 0) {
1564         v9fs_stat_free(&v9stat);
1565         goto out;
1566     }
1567     trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode,
1568                            v9stat.atime, v9stat.mtime, v9stat.length);
1569     err += offset;
1570     v9fs_stat_free(&v9stat);
1571 out:
1572     put_fid(pdu, fidp);
1573 out_nofid:
1574     pdu_complete(pdu, err);
1575 }
1576 
1577 static void coroutine_fn v9fs_getattr(void *opaque)
1578 {
1579     int32_t fid;
1580     size_t offset = 7;
1581     ssize_t retval = 0;
1582     struct stat stbuf;
1583     V9fsFidState *fidp;
1584     uint64_t request_mask;
1585     V9fsStatDotl v9stat_dotl;
1586     V9fsPDU *pdu = opaque;
1587 
1588     retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask);
1589     if (retval < 0) {
1590         goto out_nofid;
1591     }
1592     trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask);
1593 
1594     fidp = get_fid(pdu, fid);
1595     if (fidp == NULL) {
1596         retval = -ENOENT;
1597         goto out_nofid;
1598     }
1599     if ((fidp->fid_type == P9_FID_FILE && fidp->fs.fd != -1) ||
1600         (fidp->fid_type == P9_FID_DIR && fidp->fs.dir.stream))
1601     {
1602         retval = v9fs_co_fstat(pdu, fidp, &stbuf);
1603     } else {
1604         retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1605     }
1606     if (retval < 0) {
1607         goto out;
1608     }
1609     retval = stat_to_v9stat_dotl(pdu, &stbuf, &v9stat_dotl);
1610     if (retval < 0) {
1611         goto out;
1612     }
1613 
1614     /*  fill st_gen if requested and supported by underlying fs */
1615     if (request_mask & P9_STATS_GEN) {
1616         retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
1617         switch (retval) {
1618         case 0:
1619             /* we have valid st_gen: update result mask */
1620             v9stat_dotl.st_result_mask |= P9_STATS_GEN;
1621             break;
1622         case -EINTR:
1623             /* request cancelled, e.g. by Tflush */
1624             goto out;
1625         default:
1626             /* failed to get st_gen: not fatal, ignore */
1627             break;
1628         }
1629     }
1630     retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl);
1631     if (retval < 0) {
1632         goto out;
1633     }
1634     retval += offset;
1635     trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask,
1636                               v9stat_dotl.st_mode, v9stat_dotl.st_uid,
1637                               v9stat_dotl.st_gid);
1638 out:
1639     put_fid(pdu, fidp);
1640 out_nofid:
1641     pdu_complete(pdu, retval);
1642 }
1643 
1644 /* Attribute flags */
1645 #define P9_ATTR_MODE       (1 << 0)
1646 #define P9_ATTR_UID        (1 << 1)
1647 #define P9_ATTR_GID        (1 << 2)
1648 #define P9_ATTR_SIZE       (1 << 3)
1649 #define P9_ATTR_ATIME      (1 << 4)
1650 #define P9_ATTR_MTIME      (1 << 5)
1651 #define P9_ATTR_CTIME      (1 << 6)
1652 #define P9_ATTR_ATIME_SET  (1 << 7)
1653 #define P9_ATTR_MTIME_SET  (1 << 8)
1654 
1655 #define P9_ATTR_MASK    127
1656 
1657 static void coroutine_fn v9fs_setattr(void *opaque)
1658 {
1659     int err = 0;
1660     int32_t fid;
1661     V9fsFidState *fidp;
1662     size_t offset = 7;
1663     V9fsIattr v9iattr;
1664     V9fsPDU *pdu = opaque;
1665 
1666     err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr);
1667     if (err < 0) {
1668         goto out_nofid;
1669     }
1670 
1671     trace_v9fs_setattr(pdu->tag, pdu->id, fid,
1672                        v9iattr.valid, v9iattr.mode, v9iattr.uid, v9iattr.gid,
1673                        v9iattr.size, v9iattr.atime_sec, v9iattr.mtime_sec);
1674 
1675     fidp = get_fid(pdu, fid);
1676     if (fidp == NULL) {
1677         err = -EINVAL;
1678         goto out_nofid;
1679     }
1680     if (v9iattr.valid & P9_ATTR_MODE) {
1681         err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode);
1682         if (err < 0) {
1683             goto out;
1684         }
1685     }
1686     if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) {
1687         struct timespec times[2];
1688         if (v9iattr.valid & P9_ATTR_ATIME) {
1689             if (v9iattr.valid & P9_ATTR_ATIME_SET) {
1690                 times[0].tv_sec = v9iattr.atime_sec;
1691                 times[0].tv_nsec = v9iattr.atime_nsec;
1692             } else {
1693                 times[0].tv_nsec = UTIME_NOW;
1694             }
1695         } else {
1696             times[0].tv_nsec = UTIME_OMIT;
1697         }
1698         if (v9iattr.valid & P9_ATTR_MTIME) {
1699             if (v9iattr.valid & P9_ATTR_MTIME_SET) {
1700                 times[1].tv_sec = v9iattr.mtime_sec;
1701                 times[1].tv_nsec = v9iattr.mtime_nsec;
1702             } else {
1703                 times[1].tv_nsec = UTIME_NOW;
1704             }
1705         } else {
1706             times[1].tv_nsec = UTIME_OMIT;
1707         }
1708         err = v9fs_co_utimensat(pdu, &fidp->path, times);
1709         if (err < 0) {
1710             goto out;
1711         }
1712     }
1713     /*
1714      * If the only valid entry in iattr is ctime we can call
1715      * chown(-1,-1) to update the ctime of the file
1716      */
1717     if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) ||
1718         ((v9iattr.valid & P9_ATTR_CTIME)
1719          && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) {
1720         if (!(v9iattr.valid & P9_ATTR_UID)) {
1721             v9iattr.uid = -1;
1722         }
1723         if (!(v9iattr.valid & P9_ATTR_GID)) {
1724             v9iattr.gid = -1;
1725         }
1726         err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid,
1727                             v9iattr.gid);
1728         if (err < 0) {
1729             goto out;
1730         }
1731     }
1732     if (v9iattr.valid & (P9_ATTR_SIZE)) {
1733         err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
1734         if (err < 0) {
1735             goto out;
1736         }
1737     }
1738     err = offset;
1739     trace_v9fs_setattr_return(pdu->tag, pdu->id);
1740 out:
1741     put_fid(pdu, fidp);
1742 out_nofid:
1743     pdu_complete(pdu, err);
1744 }
1745 
1746 static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids)
1747 {
1748     int i;
1749     ssize_t err;
1750     size_t offset = 7;
1751 
1752     err = pdu_marshal(pdu, offset, "w", nwnames);
1753     if (err < 0) {
1754         return err;
1755     }
1756     offset += err;
1757     for (i = 0; i < nwnames; i++) {
1758         err = pdu_marshal(pdu, offset, "Q", &qids[i]);
1759         if (err < 0) {
1760             return err;
1761         }
1762         offset += err;
1763     }
1764     return offset;
1765 }
1766 
1767 static bool name_is_illegal(const char *name)
1768 {
1769     return !*name || strchr(name, '/') != NULL;
1770 }
1771 
1772 static bool same_stat_id(const struct stat *a, const struct stat *b)
1773 {
1774     return a->st_dev == b->st_dev && a->st_ino == b->st_ino;
1775 }
1776 
1777 /*
1778  * Returns a (newly allocated) comma-separated string presentation of the
1779  * passed array for logging (tracing) purpose for trace event "v9fs_walk".
1780  *
1781  * It is caller's responsibility to free the returned string.
1782  */
1783 static char *trace_v9fs_walk_wnames(V9fsString *wnames, size_t nwnames)
1784 {
1785     g_autofree char **arr = g_malloc0_n(nwnames + 1, sizeof(char *));
1786     for (size_t i = 0; i < nwnames; ++i) {
1787         arr[i] = wnames[i].data;
1788     }
1789     return g_strjoinv(", ", arr);
1790 }
1791 
1792 static void coroutine_fn v9fs_walk(void *opaque)
1793 {
1794     int name_idx, nwalked;
1795     g_autofree V9fsQID *qids = NULL;
1796     int i, err = 0, any_err = 0;
1797     V9fsPath dpath, path;
1798     P9ARRAY_REF(V9fsPath) pathes = NULL;
1799     uint16_t nwnames;
1800     struct stat stbuf, fidst;
1801     g_autofree struct stat *stbufs = NULL;
1802     size_t offset = 7;
1803     int32_t fid, newfid;
1804     P9ARRAY_REF(V9fsString) wnames = NULL;
1805     g_autofree char *trace_wnames = NULL;
1806     V9fsFidState *fidp;
1807     V9fsFidState *newfidp = NULL;
1808     V9fsPDU *pdu = opaque;
1809     V9fsState *s = pdu->s;
1810     V9fsQID qid;
1811 
1812     err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames);
1813     if (err < 0) {
1814         pdu_complete(pdu, err);
1815         return;
1816     }
1817     offset += err;
1818 
1819     if (nwnames > P9_MAXWELEM) {
1820         err = -EINVAL;
1821         goto out_nofid_nownames;
1822     }
1823     if (nwnames) {
1824         P9ARRAY_NEW(V9fsString, wnames, nwnames);
1825         qids   = g_new0(V9fsQID, nwnames);
1826         stbufs = g_new0(struct stat, nwnames);
1827         P9ARRAY_NEW(V9fsPath, pathes, nwnames);
1828         for (i = 0; i < nwnames; i++) {
1829             err = pdu_unmarshal(pdu, offset, "s", &wnames[i]);
1830             if (err < 0) {
1831                 goto out_nofid_nownames;
1832             }
1833             if (name_is_illegal(wnames[i].data)) {
1834                 err = -ENOENT;
1835                 goto out_nofid_nownames;
1836             }
1837             offset += err;
1838         }
1839         if (trace_event_get_state_backends(TRACE_V9FS_WALK)) {
1840             trace_wnames = trace_v9fs_walk_wnames(wnames, nwnames);
1841             trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames,
1842                             trace_wnames);
1843         }
1844     } else {
1845         trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "");
1846     }
1847 
1848     fidp = get_fid(pdu, fid);
1849     if (fidp == NULL) {
1850         err = -ENOENT;
1851         goto out_nofid;
1852     }
1853 
1854     v9fs_path_init(&dpath);
1855     v9fs_path_init(&path);
1856     /*
1857      * Both dpath and path initially point to fidp.
1858      * Needed to handle request with nwnames == 0
1859      */
1860     v9fs_path_copy(&dpath, &fidp->path);
1861     v9fs_path_copy(&path, &fidp->path);
1862 
1863     /*
1864      * To keep latency (i.e. overall execution time for processing this
1865      * Twalk client request) as small as possible, run all the required fs
1866      * driver code altogether inside the following block.
1867      */
1868     v9fs_co_run_in_worker({
1869         nwalked = 0;
1870         if (v9fs_request_cancelled(pdu)) {
1871             any_err |= err = -EINTR;
1872             break;
1873         }
1874         err = s->ops->lstat(&s->ctx, &dpath, &fidst);
1875         if (err < 0) {
1876             any_err |= err = -errno;
1877             break;
1878         }
1879         stbuf = fidst;
1880         for (; nwalked < nwnames; nwalked++) {
1881             if (v9fs_request_cancelled(pdu)) {
1882                 any_err |= err = -EINTR;
1883                 break;
1884             }
1885             if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1886                 strcmp("..", wnames[nwalked].data))
1887             {
1888                 err = s->ops->name_to_path(&s->ctx, &dpath,
1889                                            wnames[nwalked].data,
1890                                            &pathes[nwalked]);
1891                 if (err < 0) {
1892                     any_err |= err = -errno;
1893                     break;
1894                 }
1895                 if (v9fs_request_cancelled(pdu)) {
1896                     any_err |= err = -EINTR;
1897                     break;
1898                 }
1899                 err = s->ops->lstat(&s->ctx, &pathes[nwalked], &stbuf);
1900                 if (err < 0) {
1901                     any_err |= err = -errno;
1902                     break;
1903                 }
1904                 stbufs[nwalked] = stbuf;
1905                 v9fs_path_copy(&dpath, &pathes[nwalked]);
1906             }
1907         }
1908     });
1909     /*
1910      * Handle all the rest of this Twalk request on main thread ...
1911      *
1912      * NOTE: -EINTR is an exception where we deviate from the protocol spec
1913      * and simply send a (R)Lerror response instead of bothering to assemble
1914      * a (deducted) Rwalk response; because -EINTR is always the result of a
1915      * Tflush request, so client would no longer wait for a response in this
1916      * case anyway.
1917      */
1918     if ((err < 0 && !nwalked) || err == -EINTR) {
1919         goto out;
1920     }
1921 
1922     any_err |= err = stat_to_qid(pdu, &fidst, &qid);
1923     if (err < 0 && !nwalked) {
1924         goto out;
1925     }
1926     stbuf = fidst;
1927 
1928     /* reset dpath and path */
1929     v9fs_path_copy(&dpath, &fidp->path);
1930     v9fs_path_copy(&path, &fidp->path);
1931 
1932     for (name_idx = 0; name_idx < nwalked; name_idx++) {
1933         if (!same_stat_id(&pdu->s->root_st, &stbuf) ||
1934             strcmp("..", wnames[name_idx].data))
1935         {
1936             stbuf = stbufs[name_idx];
1937             any_err |= err = stat_to_qid(pdu, &stbuf, &qid);
1938             if (err < 0) {
1939                 break;
1940             }
1941             v9fs_path_copy(&path, &pathes[name_idx]);
1942             v9fs_path_copy(&dpath, &path);
1943         }
1944         memcpy(&qids[name_idx], &qid, sizeof(qid));
1945     }
1946     if (any_err < 0) {
1947         if (!name_idx) {
1948             /* don't send any QIDs, send Rlerror instead */
1949             goto out;
1950         } else {
1951             /* send QIDs (not Rlerror), but fid MUST remain unaffected */
1952             goto send_qids;
1953         }
1954     }
1955     if (fid == newfid) {
1956         if (fidp->fid_type != P9_FID_NONE) {
1957             err = -EINVAL;
1958             goto out;
1959         }
1960         v9fs_path_write_lock(s);
1961         v9fs_path_copy(&fidp->path, &path);
1962         v9fs_path_unlock(s);
1963     } else {
1964         newfidp = alloc_fid(s, newfid);
1965         if (newfidp == NULL) {
1966             err = -EINVAL;
1967             goto out;
1968         }
1969         newfidp->uid = fidp->uid;
1970         v9fs_path_copy(&newfidp->path, &path);
1971     }
1972 send_qids:
1973     err = v9fs_walk_marshal(pdu, name_idx, qids);
1974     trace_v9fs_walk_return(pdu->tag, pdu->id, name_idx, qids);
1975 out:
1976     put_fid(pdu, fidp);
1977     if (newfidp) {
1978         put_fid(pdu, newfidp);
1979     }
1980     v9fs_path_free(&dpath);
1981     v9fs_path_free(&path);
1982     goto out_pdu_complete;
1983 out_nofid_nownames:
1984     trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, "<?>");
1985 out_nofid:
1986 out_pdu_complete:
1987     pdu_complete(pdu, err);
1988 }
1989 
1990 static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
1991 {
1992     struct statfs stbuf;
1993     int err = v9fs_co_statfs(pdu, path, &stbuf);
1994 
1995     return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0);
1996 }
1997 
1998 static void coroutine_fn v9fs_open(void *opaque)
1999 {
2000     int flags;
2001     int32_t fid;
2002     int32_t mode;
2003     V9fsQID qid;
2004     int iounit = 0;
2005     ssize_t err = 0;
2006     size_t offset = 7;
2007     struct stat stbuf;
2008     V9fsFidState *fidp;
2009     V9fsPDU *pdu = opaque;
2010     V9fsState *s = pdu->s;
2011     g_autofree char *trace_oflags = NULL;
2012 
2013     if (s->proto_version == V9FS_PROTO_2000L) {
2014         err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
2015     } else {
2016         uint8_t modebyte;
2017         err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte);
2018         mode = modebyte;
2019     }
2020     if (err < 0) {
2021         goto out_nofid;
2022     }
2023     if (trace_event_get_state_backends(TRACE_V9FS_OPEN)) {
2024         trace_oflags = qemu_open_flags_tostr(
2025             (s->proto_version == V9FS_PROTO_2000L) ?
2026                 dotl_to_open_flags(mode) : omode_to_uflags(mode)
2027         );
2028         trace_v9fs_open(pdu->tag, pdu->id, fid, mode, trace_oflags);
2029     }
2030 
2031     fidp = get_fid(pdu, fid);
2032     if (fidp == NULL) {
2033         err = -ENOENT;
2034         goto out_nofid;
2035     }
2036     if (fidp->fid_type != P9_FID_NONE) {
2037         err = -EINVAL;
2038         goto out;
2039     }
2040 
2041     err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2042     if (err < 0) {
2043         goto out;
2044     }
2045     err = stat_to_qid(pdu, &stbuf, &qid);
2046     if (err < 0) {
2047         goto out;
2048     }
2049     if (S_ISDIR(stbuf.st_mode)) {
2050         err = v9fs_co_opendir(pdu, fidp);
2051         if (err < 0) {
2052             goto out;
2053         }
2054         fidp->fid_type = P9_FID_DIR;
2055         err = pdu_marshal(pdu, offset, "Qd", &qid, 0);
2056         if (err < 0) {
2057             goto out;
2058         }
2059         err += offset;
2060     } else {
2061         if (s->proto_version == V9FS_PROTO_2000L) {
2062             flags = get_dotl_openflags(s, mode);
2063         } else {
2064             flags = omode_to_uflags(mode);
2065         }
2066         if (is_ro_export(&s->ctx)) {
2067             if (mode & O_WRONLY || mode & O_RDWR ||
2068                 mode & O_APPEND || mode & O_TRUNC) {
2069                 err = -EROFS;
2070                 goto out;
2071             }
2072         }
2073         err = v9fs_co_open(pdu, fidp, flags);
2074         if (err < 0) {
2075             goto out;
2076         }
2077         fidp->fid_type = P9_FID_FILE;
2078         fidp->open_flags = flags;
2079         if (flags & O_EXCL) {
2080             /*
2081              * We let the host file system do O_EXCL check
2082              * We should not reclaim such fd
2083              */
2084             fidp->flags |= FID_NON_RECLAIMABLE;
2085         }
2086         iounit = get_iounit(pdu, &fidp->path);
2087         err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2088         if (err < 0) {
2089             goto out;
2090         }
2091         err += offset;
2092     }
2093     trace_v9fs_open_return(pdu->tag, pdu->id,
2094                            qid.type, qid.version, qid.path, iounit);
2095 out:
2096     put_fid(pdu, fidp);
2097 out_nofid:
2098     pdu_complete(pdu, err);
2099 }
2100 
2101 static void coroutine_fn v9fs_lcreate(void *opaque)
2102 {
2103     int32_t dfid, flags, mode;
2104     gid_t gid;
2105     ssize_t err = 0;
2106     ssize_t offset = 7;
2107     V9fsString name;
2108     V9fsFidState *fidp;
2109     struct stat stbuf;
2110     V9fsQID qid;
2111     int32_t iounit;
2112     V9fsPDU *pdu = opaque;
2113 
2114     v9fs_string_init(&name);
2115     err = pdu_unmarshal(pdu, offset, "dsddd", &dfid,
2116                         &name, &flags, &mode, &gid);
2117     if (err < 0) {
2118         goto out_nofid;
2119     }
2120     trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
2121 
2122     if (name_is_illegal(name.data)) {
2123         err = -ENOENT;
2124         goto out_nofid;
2125     }
2126 
2127     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2128         err = -EEXIST;
2129         goto out_nofid;
2130     }
2131 
2132     fidp = get_fid(pdu, dfid);
2133     if (fidp == NULL) {
2134         err = -ENOENT;
2135         goto out_nofid;
2136     }
2137     if (fidp->fid_type != P9_FID_NONE) {
2138         err = -EINVAL;
2139         goto out;
2140     }
2141 
2142     flags = get_dotl_openflags(pdu->s, flags);
2143     err = v9fs_co_open2(pdu, fidp, &name, gid,
2144                         flags | O_CREAT, mode, &stbuf);
2145     if (err < 0) {
2146         goto out;
2147     }
2148     fidp->fid_type = P9_FID_FILE;
2149     fidp->open_flags = flags;
2150     if (flags & O_EXCL) {
2151         /*
2152          * We let the host file system do O_EXCL check
2153          * We should not reclaim such fd
2154          */
2155         fidp->flags |= FID_NON_RECLAIMABLE;
2156     }
2157     iounit =  get_iounit(pdu, &fidp->path);
2158     err = stat_to_qid(pdu, &stbuf, &qid);
2159     if (err < 0) {
2160         goto out;
2161     }
2162     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2163     if (err < 0) {
2164         goto out;
2165     }
2166     err += offset;
2167     trace_v9fs_lcreate_return(pdu->tag, pdu->id,
2168                               qid.type, qid.version, qid.path, iounit);
2169 out:
2170     put_fid(pdu, fidp);
2171 out_nofid:
2172     pdu_complete(pdu, err);
2173     v9fs_string_free(&name);
2174 }
2175 
2176 static void coroutine_fn v9fs_fsync(void *opaque)
2177 {
2178     int err;
2179     int32_t fid;
2180     int datasync;
2181     size_t offset = 7;
2182     V9fsFidState *fidp;
2183     V9fsPDU *pdu = opaque;
2184 
2185     err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
2186     if (err < 0) {
2187         goto out_nofid;
2188     }
2189     trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync);
2190 
2191     fidp = get_fid(pdu, fid);
2192     if (fidp == NULL) {
2193         err = -ENOENT;
2194         goto out_nofid;
2195     }
2196     err = v9fs_co_fsync(pdu, fidp, datasync);
2197     if (!err) {
2198         err = offset;
2199     }
2200     put_fid(pdu, fidp);
2201 out_nofid:
2202     pdu_complete(pdu, err);
2203 }
2204 
2205 static void coroutine_fn v9fs_clunk(void *opaque)
2206 {
2207     int err;
2208     int32_t fid;
2209     size_t offset = 7;
2210     V9fsFidState *fidp;
2211     V9fsPDU *pdu = opaque;
2212     V9fsState *s = pdu->s;
2213 
2214     err = pdu_unmarshal(pdu, offset, "d", &fid);
2215     if (err < 0) {
2216         goto out_nofid;
2217     }
2218     trace_v9fs_clunk(pdu->tag, pdu->id, fid);
2219 
2220     fidp = clunk_fid(s, fid);
2221     if (fidp == NULL) {
2222         err = -ENOENT;
2223         goto out_nofid;
2224     }
2225     /*
2226      * Bump the ref so that put_fid will
2227      * free the fid.
2228      */
2229     fidp->ref++;
2230     err = put_fid(pdu, fidp);
2231     if (!err) {
2232         err = offset;
2233     }
2234 out_nofid:
2235     pdu_complete(pdu, err);
2236 }
2237 
2238 /*
2239  * Create a QEMUIOVector for a sub-region of PDU iovecs
2240  *
2241  * @qiov:       uninitialized QEMUIOVector
2242  * @skip:       number of bytes to skip from beginning of PDU
2243  * @size:       number of bytes to include
2244  * @is_write:   true - write, false - read
2245  *
2246  * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
2247  * with qemu_iovec_destroy().
2248  */
2249 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
2250                                     size_t skip, size_t size,
2251                                     bool is_write)
2252 {
2253     QEMUIOVector elem;
2254     struct iovec *iov;
2255     unsigned int niov;
2256 
2257     if (is_write) {
2258         pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
2259     } else {
2260         pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
2261     }
2262 
2263     qemu_iovec_init_external(&elem, iov, niov);
2264     qemu_iovec_init(qiov, niov);
2265     qemu_iovec_concat(qiov, &elem, skip, size);
2266 }
2267 
2268 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2269                            uint64_t off, uint32_t max_count)
2270 {
2271     ssize_t err;
2272     size_t offset = 7;
2273     uint64_t read_count;
2274     QEMUIOVector qiov_full;
2275 
2276     if (fidp->fs.xattr.len < off) {
2277         read_count = 0;
2278     } else {
2279         read_count = fidp->fs.xattr.len - off;
2280     }
2281     if (read_count > max_count) {
2282         read_count = max_count;
2283     }
2284     err = pdu_marshal(pdu, offset, "d", read_count);
2285     if (err < 0) {
2286         return err;
2287     }
2288     offset += err;
2289 
2290     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
2291     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
2292                     ((char *)fidp->fs.xattr.value) + off,
2293                     read_count);
2294     qemu_iovec_destroy(&qiov_full);
2295     if (err < 0) {
2296         return err;
2297     }
2298     offset += err;
2299     return offset;
2300 }
2301 
2302 static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
2303                                                   V9fsFidState *fidp,
2304                                                   uint32_t max_count)
2305 {
2306     V9fsPath path;
2307     V9fsStat v9stat;
2308     int len, err = 0;
2309     int32_t count = 0;
2310     struct stat stbuf;
2311     off_t saved_dir_pos;
2312     struct dirent *dent;
2313 
2314     /* save the directory position */
2315     saved_dir_pos = v9fs_co_telldir(pdu, fidp);
2316     if (saved_dir_pos < 0) {
2317         return saved_dir_pos;
2318     }
2319 
2320     while (1) {
2321         v9fs_path_init(&path);
2322 
2323         v9fs_readdir_lock(&fidp->fs.dir);
2324 
2325         err = v9fs_co_readdir(pdu, fidp, &dent);
2326         if (err || !dent) {
2327             break;
2328         }
2329         err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
2330         if (err < 0) {
2331             break;
2332         }
2333         err = v9fs_co_lstat(pdu, &path, &stbuf);
2334         if (err < 0) {
2335             break;
2336         }
2337         err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat);
2338         if (err < 0) {
2339             break;
2340         }
2341         if ((count + v9stat.size + 2) > max_count) {
2342             v9fs_readdir_unlock(&fidp->fs.dir);
2343 
2344             /* Ran out of buffer. Set dir back to old position and return */
2345             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2346             v9fs_stat_free(&v9stat);
2347             v9fs_path_free(&path);
2348             return count;
2349         }
2350 
2351         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2352         len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
2353 
2354         v9fs_readdir_unlock(&fidp->fs.dir);
2355 
2356         if (len < 0) {
2357             v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2358             v9fs_stat_free(&v9stat);
2359             v9fs_path_free(&path);
2360             return len;
2361         }
2362         count += len;
2363         v9fs_stat_free(&v9stat);
2364         v9fs_path_free(&path);
2365         saved_dir_pos = qemu_dirent_off(dent);
2366     }
2367 
2368     v9fs_readdir_unlock(&fidp->fs.dir);
2369 
2370     v9fs_path_free(&path);
2371     if (err < 0) {
2372         return err;
2373     }
2374     return count;
2375 }
2376 
2377 static void coroutine_fn v9fs_read(void *opaque)
2378 {
2379     int32_t fid;
2380     uint64_t off;
2381     ssize_t err = 0;
2382     int32_t count = 0;
2383     size_t offset = 7;
2384     uint32_t max_count;
2385     V9fsFidState *fidp;
2386     V9fsPDU *pdu = opaque;
2387     V9fsState *s = pdu->s;
2388 
2389     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
2390     if (err < 0) {
2391         goto out_nofid;
2392     }
2393     trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
2394 
2395     fidp = get_fid(pdu, fid);
2396     if (fidp == NULL) {
2397         err = -EINVAL;
2398         goto out_nofid;
2399     }
2400     if (fidp->fid_type == P9_FID_DIR) {
2401         if (s->proto_version != V9FS_PROTO_2000U) {
2402             warn_report_once(
2403                 "9p: bad client: T_read request on directory only expected "
2404                 "with 9P2000.u protocol version"
2405             );
2406             err = -EOPNOTSUPP;
2407             goto out;
2408         }
2409         if (off == 0) {
2410             v9fs_co_rewinddir(pdu, fidp);
2411         }
2412         count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
2413         if (count < 0) {
2414             err = count;
2415             goto out;
2416         }
2417         err = pdu_marshal(pdu, offset, "d", count);
2418         if (err < 0) {
2419             goto out;
2420         }
2421         err += offset + count;
2422     } else if (fidp->fid_type == P9_FID_FILE) {
2423         QEMUIOVector qiov_full;
2424         QEMUIOVector qiov;
2425         int32_t len;
2426 
2427         v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
2428         qemu_iovec_init(&qiov, qiov_full.niov);
2429         do {
2430             qemu_iovec_reset(&qiov);
2431             qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
2432             if (0) {
2433                 print_sg(qiov.iov, qiov.niov);
2434             }
2435             /* Loop in case of EINTR */
2436             do {
2437                 len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off);
2438                 if (len >= 0) {
2439                     off   += len;
2440                     count += len;
2441                 }
2442             } while (len == -EINTR && !pdu->cancelled);
2443             if (len < 0) {
2444                 /* IO error return the error */
2445                 err = len;
2446                 goto out_free_iovec;
2447             }
2448         } while (count < max_count && len > 0);
2449         err = pdu_marshal(pdu, offset, "d", count);
2450         if (err < 0) {
2451             goto out_free_iovec;
2452         }
2453         err += offset + count;
2454 out_free_iovec:
2455         qemu_iovec_destroy(&qiov);
2456         qemu_iovec_destroy(&qiov_full);
2457     } else if (fidp->fid_type == P9_FID_XATTR) {
2458         err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
2459     } else {
2460         err = -EINVAL;
2461     }
2462     trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
2463 out:
2464     put_fid(pdu, fidp);
2465 out_nofid:
2466     pdu_complete(pdu, err);
2467 }
2468 
2469 /**
2470  * v9fs_readdir_response_size() - Returns size required in Rreaddir response
2471  * for the passed dirent @name.
2472  *
2473  * @name: directory entry's name (i.e. file name, directory name)
2474  * Return: required size in bytes
2475  */
2476 size_t v9fs_readdir_response_size(V9fsString *name)
2477 {
2478     /*
2479      * Size of each dirent on the wire: size of qid (13) + size of offset (8)
2480      * size of type (1) + size of name.size (2) + strlen(name.data)
2481      */
2482     return 24 + v9fs_string_size(name);
2483 }
2484 
2485 static void v9fs_free_dirents(struct V9fsDirEnt *e)
2486 {
2487     struct V9fsDirEnt *next = NULL;
2488 
2489     for (; e; e = next) {
2490         next = e->next;
2491         g_free(e->dent);
2492         g_free(e->st);
2493         g_free(e);
2494     }
2495 }
2496 
2497 static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
2498                                         off_t offset, int32_t max_count)
2499 {
2500     size_t size;
2501     V9fsQID qid;
2502     V9fsString name;
2503     int len, err = 0;
2504     int32_t count = 0;
2505     off_t off;
2506     struct dirent *dent;
2507     struct stat *st;
2508     struct V9fsDirEnt *entries = NULL;
2509 
2510     /*
2511      * inode remapping requires the device id, which in turn might be
2512      * different for different directory entries, so if inode remapping is
2513      * enabled we have to make a full stat for each directory entry
2514      */
2515     const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES;
2516 
2517     /*
2518      * Fetch all required directory entries altogether on a background IO
2519      * thread from fs driver. We don't want to do that for each entry
2520      * individually, because hopping between threads (this main IO thread
2521      * and background IO driver thread) would sum up to huge latencies.
2522      */
2523     count = v9fs_co_readdir_many(pdu, fidp, &entries, offset, max_count,
2524                                  dostat);
2525     if (count < 0) {
2526         err = count;
2527         count = 0;
2528         goto out;
2529     }
2530     count = 0;
2531 
2532     for (struct V9fsDirEnt *e = entries; e; e = e->next) {
2533         dent = e->dent;
2534 
2535         if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) {
2536             st = e->st;
2537             /* e->st should never be NULL, but just to be sure */
2538             if (!st) {
2539                 err = -1;
2540                 break;
2541             }
2542 
2543             /* remap inode */
2544             err = stat_to_qid(pdu, st, &qid);
2545             if (err < 0) {
2546                 break;
2547             }
2548         } else {
2549             /*
2550              * Fill up just the path field of qid because the client uses
2551              * only that. To fill the entire qid structure we will have
2552              * to stat each dirent found, which is expensive. For the
2553              * latter reason we don't call stat_to_qid() here. Only drawback
2554              * is that no multi-device export detection of stat_to_qid()
2555              * would be done and provided as error to the user here. But
2556              * user would get that error anyway when accessing those
2557              * files/dirs through other ways.
2558              */
2559             size = MIN(sizeof(dent->d_ino), sizeof(qid.path));
2560             memcpy(&qid.path, &dent->d_ino, size);
2561             /* Fill the other fields with dummy values */
2562             qid.type = 0;
2563             qid.version = 0;
2564         }
2565 
2566         off = qemu_dirent_off(dent);
2567         v9fs_string_init(&name);
2568         v9fs_string_sprintf(&name, "%s", dent->d_name);
2569 
2570         /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2571         len = pdu_marshal(pdu, 11 + count, "Qqbs",
2572                           &qid, off,
2573                           dent->d_type, &name);
2574 
2575         v9fs_string_free(&name);
2576 
2577         if (len < 0) {
2578             err = len;
2579             break;
2580         }
2581 
2582         count += len;
2583     }
2584 
2585 out:
2586     v9fs_free_dirents(entries);
2587     if (err < 0) {
2588         return err;
2589     }
2590     return count;
2591 }
2592 
2593 static void coroutine_fn v9fs_readdir(void *opaque)
2594 {
2595     int32_t fid;
2596     V9fsFidState *fidp;
2597     ssize_t retval = 0;
2598     size_t offset = 7;
2599     uint64_t initial_offset;
2600     int32_t count;
2601     uint32_t max_count;
2602     V9fsPDU *pdu = opaque;
2603     V9fsState *s = pdu->s;
2604 
2605     retval = pdu_unmarshal(pdu, offset, "dqd", &fid,
2606                            &initial_offset, &max_count);
2607     if (retval < 0) {
2608         goto out_nofid;
2609     }
2610     trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count);
2611 
2612     /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */
2613     if (max_count > s->msize - 11) {
2614         max_count = s->msize - 11;
2615         warn_report_once(
2616             "9p: bad client: T_readdir with count > msize - 11"
2617         );
2618     }
2619 
2620     fidp = get_fid(pdu, fid);
2621     if (fidp == NULL) {
2622         retval = -EINVAL;
2623         goto out_nofid;
2624     }
2625     if (fidp->fid_type != P9_FID_DIR) {
2626         warn_report_once("9p: bad client: T_readdir on non-directory stream");
2627         retval = -ENOTDIR;
2628         goto out;
2629     }
2630     if (!fidp->fs.dir.stream) {
2631         retval = -EINVAL;
2632         goto out;
2633     }
2634     if (s->proto_version != V9FS_PROTO_2000L) {
2635         warn_report_once(
2636             "9p: bad client: T_readdir request only expected with 9P2000.L "
2637             "protocol version"
2638         );
2639         retval = -EOPNOTSUPP;
2640         goto out;
2641     }
2642     count = v9fs_do_readdir(pdu, fidp, (off_t) initial_offset, max_count);
2643     if (count < 0) {
2644         retval = count;
2645         goto out;
2646     }
2647     retval = pdu_marshal(pdu, offset, "d", count);
2648     if (retval < 0) {
2649         goto out;
2650     }
2651     retval += count + offset;
2652     trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval);
2653 out:
2654     put_fid(pdu, fidp);
2655 out_nofid:
2656     pdu_complete(pdu, retval);
2657 }
2658 
2659 static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2660                             uint64_t off, uint32_t count,
2661                             struct iovec *sg, int cnt)
2662 {
2663     int i, to_copy;
2664     ssize_t err = 0;
2665     uint64_t write_count;
2666     size_t offset = 7;
2667 
2668 
2669     if (fidp->fs.xattr.len < off) {
2670         return -ENOSPC;
2671     }
2672     write_count = fidp->fs.xattr.len - off;
2673     if (write_count > count) {
2674         write_count = count;
2675     }
2676     err = pdu_marshal(pdu, offset, "d", write_count);
2677     if (err < 0) {
2678         return err;
2679     }
2680     err += offset;
2681     fidp->fs.xattr.copied_len += write_count;
2682     /*
2683      * Now copy the content from sg list
2684      */
2685     for (i = 0; i < cnt; i++) {
2686         if (write_count > sg[i].iov_len) {
2687             to_copy = sg[i].iov_len;
2688         } else {
2689             to_copy = write_count;
2690         }
2691         memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy);
2692         /* updating vs->off since we are not using below */
2693         off += to_copy;
2694         write_count -= to_copy;
2695     }
2696 
2697     return err;
2698 }
2699 
2700 static void coroutine_fn v9fs_write(void *opaque)
2701 {
2702     ssize_t err;
2703     int32_t fid;
2704     uint64_t off;
2705     uint32_t count;
2706     int32_t len = 0;
2707     int32_t total = 0;
2708     size_t offset = 7;
2709     V9fsFidState *fidp;
2710     V9fsPDU *pdu = opaque;
2711     V9fsState *s = pdu->s;
2712     QEMUIOVector qiov_full;
2713     QEMUIOVector qiov;
2714 
2715     err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count);
2716     if (err < 0) {
2717         pdu_complete(pdu, err);
2718         return;
2719     }
2720     offset += err;
2721     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
2722     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
2723 
2724     fidp = get_fid(pdu, fid);
2725     if (fidp == NULL) {
2726         err = -EINVAL;
2727         goto out_nofid;
2728     }
2729     if (fidp->fid_type == P9_FID_FILE) {
2730         if (fidp->fs.fd == -1) {
2731             err = -EINVAL;
2732             goto out;
2733         }
2734     } else if (fidp->fid_type == P9_FID_XATTR) {
2735         /*
2736          * setxattr operation
2737          */
2738         err = v9fs_xattr_write(s, pdu, fidp, off, count,
2739                                qiov_full.iov, qiov_full.niov);
2740         goto out;
2741     } else {
2742         err = -EINVAL;
2743         goto out;
2744     }
2745     qemu_iovec_init(&qiov, qiov_full.niov);
2746     do {
2747         qemu_iovec_reset(&qiov);
2748         qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
2749         if (0) {
2750             print_sg(qiov.iov, qiov.niov);
2751         }
2752         /* Loop in case of EINTR */
2753         do {
2754             len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off);
2755             if (len >= 0) {
2756                 off   += len;
2757                 total += len;
2758             }
2759         } while (len == -EINTR && !pdu->cancelled);
2760         if (len < 0) {
2761             /* IO error return the error */
2762             err = len;
2763             goto out_qiov;
2764         }
2765     } while (total < count && len > 0);
2766 
2767     offset = 7;
2768     err = pdu_marshal(pdu, offset, "d", total);
2769     if (err < 0) {
2770         goto out_qiov;
2771     }
2772     err += offset;
2773     trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
2774 out_qiov:
2775     qemu_iovec_destroy(&qiov);
2776 out:
2777     put_fid(pdu, fidp);
2778 out_nofid:
2779     qemu_iovec_destroy(&qiov_full);
2780     pdu_complete(pdu, err);
2781 }
2782 
2783 static void coroutine_fn v9fs_create(void *opaque)
2784 {
2785     int32_t fid;
2786     int err = 0;
2787     size_t offset = 7;
2788     V9fsFidState *fidp;
2789     V9fsQID qid;
2790     int32_t perm;
2791     int8_t mode;
2792     V9fsPath path;
2793     struct stat stbuf;
2794     V9fsString name;
2795     V9fsString extension;
2796     int iounit;
2797     V9fsPDU *pdu = opaque;
2798     V9fsState *s = pdu->s;
2799 
2800     v9fs_path_init(&path);
2801     v9fs_string_init(&name);
2802     v9fs_string_init(&extension);
2803     err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name,
2804                         &perm, &mode, &extension);
2805     if (err < 0) {
2806         goto out_nofid;
2807     }
2808     trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode);
2809 
2810     if (name_is_illegal(name.data)) {
2811         err = -ENOENT;
2812         goto out_nofid;
2813     }
2814 
2815     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
2816         err = -EEXIST;
2817         goto out_nofid;
2818     }
2819 
2820     fidp = get_fid(pdu, fid);
2821     if (fidp == NULL) {
2822         err = -EINVAL;
2823         goto out_nofid;
2824     }
2825     if (fidp->fid_type != P9_FID_NONE) {
2826         err = -EINVAL;
2827         goto out;
2828     }
2829     if (perm & P9_STAT_MODE_DIR) {
2830         err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777,
2831                             fidp->uid, -1, &stbuf);
2832         if (err < 0) {
2833             goto out;
2834         }
2835         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2836         if (err < 0) {
2837             goto out;
2838         }
2839         v9fs_path_write_lock(s);
2840         v9fs_path_copy(&fidp->path, &path);
2841         v9fs_path_unlock(s);
2842         err = v9fs_co_opendir(pdu, fidp);
2843         if (err < 0) {
2844             goto out;
2845         }
2846         fidp->fid_type = P9_FID_DIR;
2847     } else if (perm & P9_STAT_MODE_SYMLINK) {
2848         err = v9fs_co_symlink(pdu, fidp, &name,
2849                               extension.data, -1 , &stbuf);
2850         if (err < 0) {
2851             goto out;
2852         }
2853         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2854         if (err < 0) {
2855             goto out;
2856         }
2857         v9fs_path_write_lock(s);
2858         v9fs_path_copy(&fidp->path, &path);
2859         v9fs_path_unlock(s);
2860     } else if (perm & P9_STAT_MODE_LINK) {
2861         int32_t ofid = atoi(extension.data);
2862         V9fsFidState *ofidp = get_fid(pdu, ofid);
2863         if (ofidp == NULL) {
2864             err = -EINVAL;
2865             goto out;
2866         }
2867         err = v9fs_co_link(pdu, ofidp, fidp, &name);
2868         put_fid(pdu, ofidp);
2869         if (err < 0) {
2870             goto out;
2871         }
2872         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2873         if (err < 0) {
2874             fidp->fid_type = P9_FID_NONE;
2875             goto out;
2876         }
2877         v9fs_path_write_lock(s);
2878         v9fs_path_copy(&fidp->path, &path);
2879         v9fs_path_unlock(s);
2880         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2881         if (err < 0) {
2882             fidp->fid_type = P9_FID_NONE;
2883             goto out;
2884         }
2885     } else if (perm & P9_STAT_MODE_DEVICE) {
2886         char ctype;
2887         uint32_t major, minor;
2888         mode_t nmode = 0;
2889 
2890         if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) {
2891             err = -errno;
2892             goto out;
2893         }
2894 
2895         switch (ctype) {
2896         case 'c':
2897             nmode = S_IFCHR;
2898             break;
2899         case 'b':
2900             nmode = S_IFBLK;
2901             break;
2902         default:
2903             err = -EIO;
2904             goto out;
2905         }
2906 
2907         nmode |= perm & 0777;
2908         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2909                             makedev(major, minor), nmode, &stbuf);
2910         if (err < 0) {
2911             goto out;
2912         }
2913         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2914         if (err < 0) {
2915             goto out;
2916         }
2917         v9fs_path_write_lock(s);
2918         v9fs_path_copy(&fidp->path, &path);
2919         v9fs_path_unlock(s);
2920     } else if (perm & P9_STAT_MODE_NAMED_PIPE) {
2921         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2922                             0, S_IFIFO | (perm & 0777), &stbuf);
2923         if (err < 0) {
2924             goto out;
2925         }
2926         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2927         if (err < 0) {
2928             goto out;
2929         }
2930         v9fs_path_write_lock(s);
2931         v9fs_path_copy(&fidp->path, &path);
2932         v9fs_path_unlock(s);
2933     } else if (perm & P9_STAT_MODE_SOCKET) {
2934         err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2935                             0, S_IFSOCK | (perm & 0777), &stbuf);
2936         if (err < 0) {
2937             goto out;
2938         }
2939         err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2940         if (err < 0) {
2941             goto out;
2942         }
2943         v9fs_path_write_lock(s);
2944         v9fs_path_copy(&fidp->path, &path);
2945         v9fs_path_unlock(s);
2946     } else {
2947         err = v9fs_co_open2(pdu, fidp, &name, -1,
2948                             omode_to_uflags(mode) | O_CREAT, perm, &stbuf);
2949         if (err < 0) {
2950             goto out;
2951         }
2952         fidp->fid_type = P9_FID_FILE;
2953         fidp->open_flags = omode_to_uflags(mode);
2954         if (fidp->open_flags & O_EXCL) {
2955             /*
2956              * We let the host file system do O_EXCL check
2957              * We should not reclaim such fd
2958              */
2959             fidp->flags |= FID_NON_RECLAIMABLE;
2960         }
2961     }
2962     iounit = get_iounit(pdu, &fidp->path);
2963     err = stat_to_qid(pdu, &stbuf, &qid);
2964     if (err < 0) {
2965         goto out;
2966     }
2967     err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2968     if (err < 0) {
2969         goto out;
2970     }
2971     err += offset;
2972     trace_v9fs_create_return(pdu->tag, pdu->id,
2973                              qid.type, qid.version, qid.path, iounit);
2974 out:
2975     put_fid(pdu, fidp);
2976 out_nofid:
2977    pdu_complete(pdu, err);
2978    v9fs_string_free(&name);
2979    v9fs_string_free(&extension);
2980    v9fs_path_free(&path);
2981 }
2982 
2983 static void coroutine_fn v9fs_symlink(void *opaque)
2984 {
2985     V9fsPDU *pdu = opaque;
2986     V9fsString name;
2987     V9fsString symname;
2988     V9fsFidState *dfidp;
2989     V9fsQID qid;
2990     struct stat stbuf;
2991     int32_t dfid;
2992     int err = 0;
2993     gid_t gid;
2994     size_t offset = 7;
2995 
2996     v9fs_string_init(&name);
2997     v9fs_string_init(&symname);
2998     err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid);
2999     if (err < 0) {
3000         goto out_nofid;
3001     }
3002     trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid);
3003 
3004     if (name_is_illegal(name.data)) {
3005         err = -ENOENT;
3006         goto out_nofid;
3007     }
3008 
3009     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3010         err = -EEXIST;
3011         goto out_nofid;
3012     }
3013 
3014     dfidp = get_fid(pdu, dfid);
3015     if (dfidp == NULL) {
3016         err = -EINVAL;
3017         goto out_nofid;
3018     }
3019     err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf);
3020     if (err < 0) {
3021         goto out;
3022     }
3023     err = stat_to_qid(pdu, &stbuf, &qid);
3024     if (err < 0) {
3025         goto out;
3026     }
3027     err =  pdu_marshal(pdu, offset, "Q", &qid);
3028     if (err < 0) {
3029         goto out;
3030     }
3031     err += offset;
3032     trace_v9fs_symlink_return(pdu->tag, pdu->id,
3033                               qid.type, qid.version, qid.path);
3034 out:
3035     put_fid(pdu, dfidp);
3036 out_nofid:
3037     pdu_complete(pdu, err);
3038     v9fs_string_free(&name);
3039     v9fs_string_free(&symname);
3040 }
3041 
3042 static void coroutine_fn v9fs_flush(void *opaque)
3043 {
3044     ssize_t err;
3045     int16_t tag;
3046     size_t offset = 7;
3047     V9fsPDU *cancel_pdu = NULL;
3048     V9fsPDU *pdu = opaque;
3049     V9fsState *s = pdu->s;
3050 
3051     err = pdu_unmarshal(pdu, offset, "w", &tag);
3052     if (err < 0) {
3053         pdu_complete(pdu, err);
3054         return;
3055     }
3056     trace_v9fs_flush(pdu->tag, pdu->id, tag);
3057 
3058     if (pdu->tag == tag) {
3059         warn_report("the guest sent a self-referencing 9P flush request");
3060     } else {
3061         QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
3062             if (cancel_pdu->tag == tag) {
3063                 break;
3064             }
3065         }
3066     }
3067     if (cancel_pdu) {
3068         cancel_pdu->cancelled = 1;
3069         /*
3070          * Wait for pdu to complete.
3071          */
3072         qemu_co_queue_wait(&cancel_pdu->complete, NULL);
3073         if (!qemu_co_queue_next(&cancel_pdu->complete)) {
3074             cancel_pdu->cancelled = 0;
3075             pdu_free(cancel_pdu);
3076         }
3077     }
3078     pdu_complete(pdu, 7);
3079 }
3080 
3081 static void coroutine_fn v9fs_link(void *opaque)
3082 {
3083     V9fsPDU *pdu = opaque;
3084     int32_t dfid, oldfid;
3085     V9fsFidState *dfidp, *oldfidp;
3086     V9fsString name;
3087     size_t offset = 7;
3088     int err = 0;
3089 
3090     v9fs_string_init(&name);
3091     err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
3092     if (err < 0) {
3093         goto out_nofid;
3094     }
3095     trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
3096 
3097     if (name_is_illegal(name.data)) {
3098         err = -ENOENT;
3099         goto out_nofid;
3100     }
3101 
3102     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3103         err = -EEXIST;
3104         goto out_nofid;
3105     }
3106 
3107     dfidp = get_fid(pdu, dfid);
3108     if (dfidp == NULL) {
3109         err = -ENOENT;
3110         goto out_nofid;
3111     }
3112 
3113     oldfidp = get_fid(pdu, oldfid);
3114     if (oldfidp == NULL) {
3115         err = -ENOENT;
3116         goto out;
3117     }
3118     err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
3119     if (!err) {
3120         err = offset;
3121     }
3122     put_fid(pdu, oldfidp);
3123 out:
3124     put_fid(pdu, dfidp);
3125 out_nofid:
3126     v9fs_string_free(&name);
3127     pdu_complete(pdu, err);
3128 }
3129 
3130 /* Only works with path name based fid */
3131 static void coroutine_fn v9fs_remove(void *opaque)
3132 {
3133     int32_t fid;
3134     int err = 0;
3135     size_t offset = 7;
3136     V9fsFidState *fidp;
3137     V9fsPDU *pdu = opaque;
3138 
3139     err = pdu_unmarshal(pdu, offset, "d", &fid);
3140     if (err < 0) {
3141         goto out_nofid;
3142     }
3143     trace_v9fs_remove(pdu->tag, pdu->id, fid);
3144 
3145     fidp = get_fid(pdu, fid);
3146     if (fidp == NULL) {
3147         err = -EINVAL;
3148         goto out_nofid;
3149     }
3150     /* if fs driver is not path based, return EOPNOTSUPP */
3151     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3152         err = -EOPNOTSUPP;
3153         goto out_err;
3154     }
3155     /*
3156      * IF the file is unlinked, we cannot reopen
3157      * the file later. So don't reclaim fd
3158      */
3159     err = v9fs_mark_fids_unreclaim(pdu, &fidp->path);
3160     if (err < 0) {
3161         goto out_err;
3162     }
3163     err = v9fs_co_remove(pdu, &fidp->path);
3164     if (!err) {
3165         err = offset;
3166     }
3167 out_err:
3168     /* For TREMOVE we need to clunk the fid even on failed remove */
3169     clunk_fid(pdu->s, fidp->fid);
3170     put_fid(pdu, fidp);
3171 out_nofid:
3172     pdu_complete(pdu, err);
3173 }
3174 
3175 static void coroutine_fn v9fs_unlinkat(void *opaque)
3176 {
3177     int err = 0;
3178     V9fsString name;
3179     int32_t dfid, flags, rflags = 0;
3180     size_t offset = 7;
3181     V9fsPath path;
3182     V9fsFidState *dfidp;
3183     V9fsPDU *pdu = opaque;
3184 
3185     v9fs_string_init(&name);
3186     err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
3187     if (err < 0) {
3188         goto out_nofid;
3189     }
3190 
3191     if (name_is_illegal(name.data)) {
3192         err = -ENOENT;
3193         goto out_nofid;
3194     }
3195 
3196     if (!strcmp(".", name.data)) {
3197         err = -EINVAL;
3198         goto out_nofid;
3199     }
3200 
3201     if (!strcmp("..", name.data)) {
3202         err = -ENOTEMPTY;
3203         goto out_nofid;
3204     }
3205 
3206     if (flags & ~P9_DOTL_AT_REMOVEDIR) {
3207         err = -EINVAL;
3208         goto out_nofid;
3209     }
3210 
3211     if (flags & P9_DOTL_AT_REMOVEDIR) {
3212         rflags |= AT_REMOVEDIR;
3213     }
3214 
3215     dfidp = get_fid(pdu, dfid);
3216     if (dfidp == NULL) {
3217         err = -EINVAL;
3218         goto out_nofid;
3219     }
3220     /*
3221      * IF the file is unlinked, we cannot reopen
3222      * the file later. So don't reclaim fd
3223      */
3224     v9fs_path_init(&path);
3225     err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
3226     if (err < 0) {
3227         goto out_err;
3228     }
3229     err = v9fs_mark_fids_unreclaim(pdu, &path);
3230     if (err < 0) {
3231         goto out_err;
3232     }
3233     err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags);
3234     if (!err) {
3235         err = offset;
3236     }
3237 out_err:
3238     put_fid(pdu, dfidp);
3239     v9fs_path_free(&path);
3240 out_nofid:
3241     pdu_complete(pdu, err);
3242     v9fs_string_free(&name);
3243 }
3244 
3245 
3246 /* Only works with path name based fid */
3247 static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
3248                                              int32_t newdirfid,
3249                                              V9fsString *name)
3250 {
3251     int err = 0;
3252     V9fsPath new_path;
3253     V9fsFidState *tfidp;
3254     V9fsState *s = pdu->s;
3255     V9fsFidState *dirfidp = NULL;
3256     GHashTableIter iter;
3257     gpointer fid;
3258 
3259     v9fs_path_init(&new_path);
3260     if (newdirfid != -1) {
3261         dirfidp = get_fid(pdu, newdirfid);
3262         if (dirfidp == NULL) {
3263             return -ENOENT;
3264         }
3265         if (fidp->fid_type != P9_FID_NONE) {
3266             err = -EINVAL;
3267             goto out;
3268         }
3269         err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
3270         if (err < 0) {
3271             goto out;
3272         }
3273     } else {
3274         char *dir_name = g_path_get_dirname(fidp->path.data);
3275         V9fsPath dir_path;
3276 
3277         v9fs_path_init(&dir_path);
3278         v9fs_path_sprintf(&dir_path, "%s", dir_name);
3279         g_free(dir_name);
3280 
3281         err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path);
3282         v9fs_path_free(&dir_path);
3283         if (err < 0) {
3284             goto out;
3285         }
3286     }
3287     err = v9fs_co_rename(pdu, &fidp->path, &new_path);
3288     if (err < 0) {
3289         goto out;
3290     }
3291 
3292     /*
3293      * Fixup fid's pointing to the old name to
3294      * start pointing to the new name
3295      */
3296     g_hash_table_iter_init(&iter, s->fids);
3297     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3298         if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) {
3299             /* replace the name */
3300             v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data));
3301         }
3302     }
3303 out:
3304     if (dirfidp) {
3305         put_fid(pdu, dirfidp);
3306     }
3307     v9fs_path_free(&new_path);
3308     return err;
3309 }
3310 
3311 /* Only works with path name based fid */
3312 static void coroutine_fn v9fs_rename(void *opaque)
3313 {
3314     int32_t fid;
3315     ssize_t err = 0;
3316     size_t offset = 7;
3317     V9fsString name;
3318     int32_t newdirfid;
3319     V9fsFidState *fidp;
3320     V9fsPDU *pdu = opaque;
3321     V9fsState *s = pdu->s;
3322 
3323     v9fs_string_init(&name);
3324     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name);
3325     if (err < 0) {
3326         goto out_nofid;
3327     }
3328 
3329     if (name_is_illegal(name.data)) {
3330         err = -ENOENT;
3331         goto out_nofid;
3332     }
3333 
3334     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3335         err = -EISDIR;
3336         goto out_nofid;
3337     }
3338 
3339     fidp = get_fid(pdu, fid);
3340     if (fidp == NULL) {
3341         err = -ENOENT;
3342         goto out_nofid;
3343     }
3344     if (fidp->fid_type != P9_FID_NONE) {
3345         err = -EINVAL;
3346         goto out;
3347     }
3348     /* if fs driver is not path based, return EOPNOTSUPP */
3349     if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
3350         err = -EOPNOTSUPP;
3351         goto out;
3352     }
3353     v9fs_path_write_lock(s);
3354     err = v9fs_complete_rename(pdu, fidp, newdirfid, &name);
3355     v9fs_path_unlock(s);
3356     if (!err) {
3357         err = offset;
3358     }
3359 out:
3360     put_fid(pdu, fidp);
3361 out_nofid:
3362     pdu_complete(pdu, err);
3363     v9fs_string_free(&name);
3364 }
3365 
3366 static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
3367                                            V9fsString *old_name,
3368                                            V9fsPath *newdir,
3369                                            V9fsString *new_name)
3370 {
3371     V9fsFidState *tfidp;
3372     V9fsPath oldpath, newpath;
3373     V9fsState *s = pdu->s;
3374     int err;
3375     GHashTableIter iter;
3376     gpointer fid;
3377 
3378     v9fs_path_init(&oldpath);
3379     v9fs_path_init(&newpath);
3380     err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath);
3381     if (err < 0) {
3382         goto out;
3383     }
3384     err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath);
3385     if (err < 0) {
3386         goto out;
3387     }
3388 
3389     /*
3390      * Fixup fid's pointing to the old name to
3391      * start pointing to the new name
3392      */
3393     g_hash_table_iter_init(&iter, s->fids);
3394     while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &tfidp)) {
3395         if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) {
3396             /* replace the name */
3397             v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data));
3398         }
3399     }
3400 out:
3401     v9fs_path_free(&oldpath);
3402     v9fs_path_free(&newpath);
3403     return err;
3404 }
3405 
3406 static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
3407                                                V9fsString *old_name,
3408                                                int32_t newdirfid,
3409                                                V9fsString *new_name)
3410 {
3411     int err = 0;
3412     V9fsState *s = pdu->s;
3413     V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL;
3414 
3415     olddirfidp = get_fid(pdu, olddirfid);
3416     if (olddirfidp == NULL) {
3417         err = -ENOENT;
3418         goto out;
3419     }
3420     if (newdirfid != -1) {
3421         newdirfidp = get_fid(pdu, newdirfid);
3422         if (newdirfidp == NULL) {
3423             err = -ENOENT;
3424             goto out;
3425         }
3426     } else {
3427         newdirfidp = get_fid(pdu, olddirfid);
3428     }
3429 
3430     err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name,
3431                            &newdirfidp->path, new_name);
3432     if (err < 0) {
3433         goto out;
3434     }
3435     if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
3436         /* Only for path based fid  we need to do the below fixup */
3437         err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name,
3438                                  &newdirfidp->path, new_name);
3439     }
3440 out:
3441     if (olddirfidp) {
3442         put_fid(pdu, olddirfidp);
3443     }
3444     if (newdirfidp) {
3445         put_fid(pdu, newdirfidp);
3446     }
3447     return err;
3448 }
3449 
3450 static void coroutine_fn v9fs_renameat(void *opaque)
3451 {
3452     ssize_t err = 0;
3453     size_t offset = 7;
3454     V9fsPDU *pdu = opaque;
3455     V9fsState *s = pdu->s;
3456     int32_t olddirfid, newdirfid;
3457     V9fsString old_name, new_name;
3458 
3459     v9fs_string_init(&old_name);
3460     v9fs_string_init(&new_name);
3461     err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
3462                         &old_name, &newdirfid, &new_name);
3463     if (err < 0) {
3464         goto out_err;
3465     }
3466 
3467     if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) {
3468         err = -ENOENT;
3469         goto out_err;
3470     }
3471 
3472     if (!strcmp(".", old_name.data) || !strcmp("..", old_name.data) ||
3473         !strcmp(".", new_name.data) || !strcmp("..", new_name.data)) {
3474         err = -EISDIR;
3475         goto out_err;
3476     }
3477 
3478     v9fs_path_write_lock(s);
3479     err = v9fs_complete_renameat(pdu, olddirfid,
3480                                  &old_name, newdirfid, &new_name);
3481     v9fs_path_unlock(s);
3482     if (!err) {
3483         err = offset;
3484     }
3485 
3486 out_err:
3487     pdu_complete(pdu, err);
3488     v9fs_string_free(&old_name);
3489     v9fs_string_free(&new_name);
3490 }
3491 
3492 static void coroutine_fn v9fs_wstat(void *opaque)
3493 {
3494     int32_t fid;
3495     int err = 0;
3496     int16_t unused;
3497     V9fsStat v9stat;
3498     size_t offset = 7;
3499     struct stat stbuf;
3500     V9fsFidState *fidp;
3501     V9fsPDU *pdu = opaque;
3502     V9fsState *s = pdu->s;
3503 
3504     v9fs_stat_init(&v9stat);
3505     err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat);
3506     if (err < 0) {
3507         goto out_nofid;
3508     }
3509     trace_v9fs_wstat(pdu->tag, pdu->id, fid,
3510                      v9stat.mode, v9stat.atime, v9stat.mtime);
3511 
3512     fidp = get_fid(pdu, fid);
3513     if (fidp == NULL) {
3514         err = -EINVAL;
3515         goto out_nofid;
3516     }
3517     /* do we need to sync the file? */
3518     if (donttouch_stat(&v9stat)) {
3519         err = v9fs_co_fsync(pdu, fidp, 0);
3520         goto out;
3521     }
3522     if (v9stat.mode != -1) {
3523         uint32_t v9_mode;
3524         err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
3525         if (err < 0) {
3526             goto out;
3527         }
3528         v9_mode = stat_to_v9mode(&stbuf);
3529         if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) !=
3530             (v9_mode & P9_STAT_MODE_TYPE_BITS)) {
3531             /* Attempting to change the type */
3532             err = -EIO;
3533             goto out;
3534         }
3535         err = v9fs_co_chmod(pdu, &fidp->path,
3536                             v9mode_to_mode(v9stat.mode,
3537                                            &v9stat.extension));
3538         if (err < 0) {
3539             goto out;
3540         }
3541     }
3542     if (v9stat.mtime != -1 || v9stat.atime != -1) {
3543         struct timespec times[2];
3544         if (v9stat.atime != -1) {
3545             times[0].tv_sec = v9stat.atime;
3546             times[0].tv_nsec = 0;
3547         } else {
3548             times[0].tv_nsec = UTIME_OMIT;
3549         }
3550         if (v9stat.mtime != -1) {
3551             times[1].tv_sec = v9stat.mtime;
3552             times[1].tv_nsec = 0;
3553         } else {
3554             times[1].tv_nsec = UTIME_OMIT;
3555         }
3556         err = v9fs_co_utimensat(pdu, &fidp->path, times);
3557         if (err < 0) {
3558             goto out;
3559         }
3560     }
3561     if (v9stat.n_gid != -1 || v9stat.n_uid != -1) {
3562         err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid);
3563         if (err < 0) {
3564             goto out;
3565         }
3566     }
3567     if (v9stat.name.size != 0) {
3568         v9fs_path_write_lock(s);
3569         err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name);
3570         v9fs_path_unlock(s);
3571         if (err < 0) {
3572             goto out;
3573         }
3574     }
3575     if (v9stat.length != -1) {
3576         err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length);
3577         if (err < 0) {
3578             goto out;
3579         }
3580     }
3581     err = offset;
3582 out:
3583     put_fid(pdu, fidp);
3584 out_nofid:
3585     v9fs_stat_free(&v9stat);
3586     pdu_complete(pdu, err);
3587 }
3588 
3589 static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
3590 {
3591     uint32_t f_type;
3592     uint32_t f_bsize;
3593     uint64_t f_blocks;
3594     uint64_t f_bfree;
3595     uint64_t f_bavail;
3596     uint64_t f_files;
3597     uint64_t f_ffree;
3598     uint64_t fsid_val;
3599     uint32_t f_namelen;
3600     size_t offset = 7;
3601     int32_t bsize_factor;
3602 
3603     /*
3604      * compute bsize factor based on host file system block size
3605      * and client msize
3606      */
3607     bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize;
3608     if (!bsize_factor) {
3609         bsize_factor = 1;
3610     }
3611     f_type  = stbuf->f_type;
3612     f_bsize = stbuf->f_bsize;
3613     f_bsize *= bsize_factor;
3614     /*
3615      * f_bsize is adjusted(multiplied) by bsize factor, so we need to
3616      * adjust(divide) the number of blocks, free blocks and available
3617      * blocks by bsize factor
3618      */
3619     f_blocks = stbuf->f_blocks / bsize_factor;
3620     f_bfree  = stbuf->f_bfree / bsize_factor;
3621     f_bavail = stbuf->f_bavail / bsize_factor;
3622     f_files  = stbuf->f_files;
3623     f_ffree  = stbuf->f_ffree;
3624 #ifdef CONFIG_DARWIN
3625     fsid_val = (unsigned int)stbuf->f_fsid.val[0] |
3626                (unsigned long long)stbuf->f_fsid.val[1] << 32;
3627     f_namelen = NAME_MAX;
3628 #else
3629     fsid_val = (unsigned int) stbuf->f_fsid.__val[0] |
3630                (unsigned long long)stbuf->f_fsid.__val[1] << 32;
3631     f_namelen = stbuf->f_namelen;
3632 #endif
3633 
3634     return pdu_marshal(pdu, offset, "ddqqqqqqd",
3635                        f_type, f_bsize, f_blocks, f_bfree,
3636                        f_bavail, f_files, f_ffree,
3637                        fsid_val, f_namelen);
3638 }
3639 
3640 static void coroutine_fn v9fs_statfs(void *opaque)
3641 {
3642     int32_t fid;
3643     ssize_t retval = 0;
3644     size_t offset = 7;
3645     V9fsFidState *fidp;
3646     struct statfs stbuf;
3647     V9fsPDU *pdu = opaque;
3648     V9fsState *s = pdu->s;
3649 
3650     retval = pdu_unmarshal(pdu, offset, "d", &fid);
3651     if (retval < 0) {
3652         goto out_nofid;
3653     }
3654     fidp = get_fid(pdu, fid);
3655     if (fidp == NULL) {
3656         retval = -ENOENT;
3657         goto out_nofid;
3658     }
3659     retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf);
3660     if (retval < 0) {
3661         goto out;
3662     }
3663     retval = v9fs_fill_statfs(s, pdu, &stbuf);
3664     if (retval < 0) {
3665         goto out;
3666     }
3667     retval += offset;
3668 out:
3669     put_fid(pdu, fidp);
3670 out_nofid:
3671     pdu_complete(pdu, retval);
3672 }
3673 
3674 static void coroutine_fn v9fs_mknod(void *opaque)
3675 {
3676 
3677     int mode;
3678     gid_t gid;
3679     int32_t fid;
3680     V9fsQID qid;
3681     int err = 0;
3682     int major, minor;
3683     size_t offset = 7;
3684     V9fsString name;
3685     struct stat stbuf;
3686     V9fsFidState *fidp;
3687     V9fsPDU *pdu = opaque;
3688 
3689     v9fs_string_init(&name);
3690     err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
3691                         &major, &minor, &gid);
3692     if (err < 0) {
3693         goto out_nofid;
3694     }
3695     trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
3696 
3697     if (name_is_illegal(name.data)) {
3698         err = -ENOENT;
3699         goto out_nofid;
3700     }
3701 
3702     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3703         err = -EEXIST;
3704         goto out_nofid;
3705     }
3706 
3707     fidp = get_fid(pdu, fid);
3708     if (fidp == NULL) {
3709         err = -ENOENT;
3710         goto out_nofid;
3711     }
3712     err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
3713                         makedev(major, minor), mode, &stbuf);
3714     if (err < 0) {
3715         goto out;
3716     }
3717     err = stat_to_qid(pdu, &stbuf, &qid);
3718     if (err < 0) {
3719         goto out;
3720     }
3721     err = pdu_marshal(pdu, offset, "Q", &qid);
3722     if (err < 0) {
3723         goto out;
3724     }
3725     err += offset;
3726     trace_v9fs_mknod_return(pdu->tag, pdu->id,
3727                             qid.type, qid.version, qid.path);
3728 out:
3729     put_fid(pdu, fidp);
3730 out_nofid:
3731     pdu_complete(pdu, err);
3732     v9fs_string_free(&name);
3733 }
3734 
3735 /*
3736  * Implement posix byte range locking code
3737  * Server side handling of locking code is very simple, because 9p server in
3738  * QEMU can handle only one client. And most of the lock handling
3739  * (like conflict, merging) etc is done by the VFS layer itself, so no need to
3740  * do any thing in * qemu 9p server side lock code path.
3741  * So when a TLOCK request comes, always return success
3742  */
3743 static void coroutine_fn v9fs_lock(void *opaque)
3744 {
3745     V9fsFlock flock;
3746     size_t offset = 7;
3747     struct stat stbuf;
3748     V9fsFidState *fidp;
3749     int32_t fid, err = 0;
3750     V9fsPDU *pdu = opaque;
3751 
3752     v9fs_string_init(&flock.client_id);
3753     err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type,
3754                         &flock.flags, &flock.start, &flock.length,
3755                         &flock.proc_id, &flock.client_id);
3756     if (err < 0) {
3757         goto out_nofid;
3758     }
3759     trace_v9fs_lock(pdu->tag, pdu->id, fid,
3760                     flock.type, flock.start, flock.length);
3761 
3762 
3763     /* We support only block flag now (that too ignored currently) */
3764     if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) {
3765         err = -EINVAL;
3766         goto out_nofid;
3767     }
3768     fidp = get_fid(pdu, fid);
3769     if (fidp == NULL) {
3770         err = -ENOENT;
3771         goto out_nofid;
3772     }
3773     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3774     if (err < 0) {
3775         goto out;
3776     }
3777     err = pdu_marshal(pdu, offset, "b", P9_LOCK_SUCCESS);
3778     if (err < 0) {
3779         goto out;
3780     }
3781     err += offset;
3782     trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS);
3783 out:
3784     put_fid(pdu, fidp);
3785 out_nofid:
3786     pdu_complete(pdu, err);
3787     v9fs_string_free(&flock.client_id);
3788 }
3789 
3790 /*
3791  * When a TGETLOCK request comes, always return success because all lock
3792  * handling is done by client's VFS layer.
3793  */
3794 static void coroutine_fn v9fs_getlock(void *opaque)
3795 {
3796     size_t offset = 7;
3797     struct stat stbuf;
3798     V9fsFidState *fidp;
3799     V9fsGetlock glock;
3800     int32_t fid, err = 0;
3801     V9fsPDU *pdu = opaque;
3802 
3803     v9fs_string_init(&glock.client_id);
3804     err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type,
3805                         &glock.start, &glock.length, &glock.proc_id,
3806                         &glock.client_id);
3807     if (err < 0) {
3808         goto out_nofid;
3809     }
3810     trace_v9fs_getlock(pdu->tag, pdu->id, fid,
3811                        glock.type, glock.start, glock.length);
3812 
3813     fidp = get_fid(pdu, fid);
3814     if (fidp == NULL) {
3815         err = -ENOENT;
3816         goto out_nofid;
3817     }
3818     err = v9fs_co_fstat(pdu, fidp, &stbuf);
3819     if (err < 0) {
3820         goto out;
3821     }
3822     glock.type = P9_LOCK_TYPE_UNLCK;
3823     err = pdu_marshal(pdu, offset, "bqqds", glock.type,
3824                           glock.start, glock.length, glock.proc_id,
3825                           &glock.client_id);
3826     if (err < 0) {
3827         goto out;
3828     }
3829     err += offset;
3830     trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start,
3831                               glock.length, glock.proc_id);
3832 out:
3833     put_fid(pdu, fidp);
3834 out_nofid:
3835     pdu_complete(pdu, err);
3836     v9fs_string_free(&glock.client_id);
3837 }
3838 
3839 static void coroutine_fn v9fs_mkdir(void *opaque)
3840 {
3841     V9fsPDU *pdu = opaque;
3842     size_t offset = 7;
3843     int32_t fid;
3844     struct stat stbuf;
3845     V9fsQID qid;
3846     V9fsString name;
3847     V9fsFidState *fidp;
3848     gid_t gid;
3849     int mode;
3850     int err = 0;
3851 
3852     v9fs_string_init(&name);
3853     err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid);
3854     if (err < 0) {
3855         goto out_nofid;
3856     }
3857     trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid);
3858 
3859     if (name_is_illegal(name.data)) {
3860         err = -ENOENT;
3861         goto out_nofid;
3862     }
3863 
3864     if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
3865         err = -EEXIST;
3866         goto out_nofid;
3867     }
3868 
3869     fidp = get_fid(pdu, fid);
3870     if (fidp == NULL) {
3871         err = -ENOENT;
3872         goto out_nofid;
3873     }
3874     err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf);
3875     if (err < 0) {
3876         goto out;
3877     }
3878     err = stat_to_qid(pdu, &stbuf, &qid);
3879     if (err < 0) {
3880         goto out;
3881     }
3882     err = pdu_marshal(pdu, offset, "Q", &qid);
3883     if (err < 0) {
3884         goto out;
3885     }
3886     err += offset;
3887     trace_v9fs_mkdir_return(pdu->tag, pdu->id,
3888                             qid.type, qid.version, qid.path, err);
3889 out:
3890     put_fid(pdu, fidp);
3891 out_nofid:
3892     pdu_complete(pdu, err);
3893     v9fs_string_free(&name);
3894 }
3895 
3896 static void coroutine_fn v9fs_xattrwalk(void *opaque)
3897 {
3898     int64_t size;
3899     V9fsString name;
3900     ssize_t err = 0;
3901     size_t offset = 7;
3902     int32_t fid, newfid;
3903     V9fsFidState *file_fidp;
3904     V9fsFidState *xattr_fidp = NULL;
3905     V9fsPDU *pdu = opaque;
3906     V9fsState *s = pdu->s;
3907 
3908     v9fs_string_init(&name);
3909     err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name);
3910     if (err < 0) {
3911         goto out_nofid;
3912     }
3913     trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data);
3914 
3915     file_fidp = get_fid(pdu, fid);
3916     if (file_fidp == NULL) {
3917         err = -ENOENT;
3918         goto out_nofid;
3919     }
3920     xattr_fidp = alloc_fid(s, newfid);
3921     if (xattr_fidp == NULL) {
3922         err = -EINVAL;
3923         goto out;
3924     }
3925     v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
3926     if (!v9fs_string_size(&name)) {
3927         /*
3928          * listxattr request. Get the size first
3929          */
3930         size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0);
3931         if (size < 0) {
3932             err = size;
3933             clunk_fid(s, xattr_fidp->fid);
3934             goto out;
3935         }
3936         /*
3937          * Read the xattr value
3938          */
3939         xattr_fidp->fs.xattr.len = size;
3940         xattr_fidp->fid_type = P9_FID_XATTR;
3941         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3942         xattr_fidp->fs.xattr.value = g_malloc0(size);
3943         if (size) {
3944             err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
3945                                      xattr_fidp->fs.xattr.value,
3946                                      xattr_fidp->fs.xattr.len);
3947             if (err < 0) {
3948                 clunk_fid(s, xattr_fidp->fid);
3949                 goto out;
3950             }
3951         }
3952         err = pdu_marshal(pdu, offset, "q", size);
3953         if (err < 0) {
3954             goto out;
3955         }
3956         err += offset;
3957     } else {
3958         /*
3959          * specific xattr fid. We check for xattr
3960          * presence also collect the xattr size
3961          */
3962         size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3963                                  &name, NULL, 0);
3964         if (size < 0) {
3965             err = size;
3966             clunk_fid(s, xattr_fidp->fid);
3967             goto out;
3968         }
3969         /*
3970          * Read the xattr value
3971          */
3972         xattr_fidp->fs.xattr.len = size;
3973         xattr_fidp->fid_type = P9_FID_XATTR;
3974         xattr_fidp->fs.xattr.xattrwalk_fid = true;
3975         xattr_fidp->fs.xattr.value = g_malloc0(size);
3976         if (size) {
3977             err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3978                                     &name, xattr_fidp->fs.xattr.value,
3979                                     xattr_fidp->fs.xattr.len);
3980             if (err < 0) {
3981                 clunk_fid(s, xattr_fidp->fid);
3982                 goto out;
3983             }
3984         }
3985         err = pdu_marshal(pdu, offset, "q", size);
3986         if (err < 0) {
3987             goto out;
3988         }
3989         err += offset;
3990     }
3991     trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size);
3992 out:
3993     put_fid(pdu, file_fidp);
3994     if (xattr_fidp) {
3995         put_fid(pdu, xattr_fidp);
3996     }
3997 out_nofid:
3998     pdu_complete(pdu, err);
3999     v9fs_string_free(&name);
4000 }
4001 
4002 #if defined(CONFIG_LINUX)
4003 /* Currently, only Linux has XATTR_SIZE_MAX */
4004 #define P9_XATTR_SIZE_MAX XATTR_SIZE_MAX
4005 #elif defined(CONFIG_DARWIN)
4006 /*
4007  * Darwin doesn't seem to define a maximum xattr size in its user
4008  * space header, so manually configure it across platforms as 64k.
4009  *
4010  * Having no limit at all can lead to QEMU crashing during large g_malloc()
4011  * calls. Because QEMU does not currently support macOS guests, the below
4012  * preliminary solution only works due to its being a reflection of the limit of
4013  * Linux guests.
4014  */
4015 #define P9_XATTR_SIZE_MAX 65536
4016 #else
4017 #error Missing definition for P9_XATTR_SIZE_MAX for this host system
4018 #endif
4019 
4020 static void coroutine_fn v9fs_xattrcreate(void *opaque)
4021 {
4022     int flags, rflags = 0;
4023     int32_t fid;
4024     uint64_t size;
4025     ssize_t err = 0;
4026     V9fsString name;
4027     size_t offset = 7;
4028     V9fsFidState *file_fidp;
4029     V9fsFidState *xattr_fidp;
4030     V9fsPDU *pdu = opaque;
4031 
4032     v9fs_string_init(&name);
4033     err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags);
4034     if (err < 0) {
4035         goto out_nofid;
4036     }
4037     trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
4038 
4039     if (flags & ~(P9_XATTR_CREATE | P9_XATTR_REPLACE)) {
4040         err = -EINVAL;
4041         goto out_nofid;
4042     }
4043 
4044     if (flags & P9_XATTR_CREATE) {
4045         rflags |= XATTR_CREATE;
4046     }
4047 
4048     if (flags & P9_XATTR_REPLACE) {
4049         rflags |= XATTR_REPLACE;
4050     }
4051 
4052     if (size > P9_XATTR_SIZE_MAX) {
4053         err = -E2BIG;
4054         goto out_nofid;
4055     }
4056 
4057     file_fidp = get_fid(pdu, fid);
4058     if (file_fidp == NULL) {
4059         err = -EINVAL;
4060         goto out_nofid;
4061     }
4062     if (file_fidp->fid_type != P9_FID_NONE) {
4063         err = -EINVAL;
4064         goto out_put_fid;
4065     }
4066 
4067     /* Make the file fid point to xattr */
4068     xattr_fidp = file_fidp;
4069     xattr_fidp->fid_type = P9_FID_XATTR;
4070     xattr_fidp->fs.xattr.copied_len = 0;
4071     xattr_fidp->fs.xattr.xattrwalk_fid = false;
4072     xattr_fidp->fs.xattr.len = size;
4073     xattr_fidp->fs.xattr.flags = rflags;
4074     v9fs_string_init(&xattr_fidp->fs.xattr.name);
4075     v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
4076     xattr_fidp->fs.xattr.value = g_malloc0(size);
4077     err = offset;
4078 out_put_fid:
4079     put_fid(pdu, file_fidp);
4080 out_nofid:
4081     pdu_complete(pdu, err);
4082     v9fs_string_free(&name);
4083 }
4084 
4085 static void coroutine_fn v9fs_readlink(void *opaque)
4086 {
4087     V9fsPDU *pdu = opaque;
4088     size_t offset = 7;
4089     V9fsString target;
4090     int32_t fid;
4091     int err = 0;
4092     V9fsFidState *fidp;
4093 
4094     err = pdu_unmarshal(pdu, offset, "d", &fid);
4095     if (err < 0) {
4096         goto out_nofid;
4097     }
4098     trace_v9fs_readlink(pdu->tag, pdu->id, fid);
4099     fidp = get_fid(pdu, fid);
4100     if (fidp == NULL) {
4101         err = -ENOENT;
4102         goto out_nofid;
4103     }
4104 
4105     v9fs_string_init(&target);
4106     err = v9fs_co_readlink(pdu, &fidp->path, &target);
4107     if (err < 0) {
4108         goto out;
4109     }
4110     err = pdu_marshal(pdu, offset, "s", &target);
4111     if (err < 0) {
4112         v9fs_string_free(&target);
4113         goto out;
4114     }
4115     err += offset;
4116     trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data);
4117     v9fs_string_free(&target);
4118 out:
4119     put_fid(pdu, fidp);
4120 out_nofid:
4121     pdu_complete(pdu, err);
4122 }
4123 
4124 static CoroutineEntry *pdu_co_handlers[] = {
4125     [P9_TREADDIR] = v9fs_readdir,
4126     [P9_TSTATFS] = v9fs_statfs,
4127     [P9_TGETATTR] = v9fs_getattr,
4128     [P9_TSETATTR] = v9fs_setattr,
4129     [P9_TXATTRWALK] = v9fs_xattrwalk,
4130     [P9_TXATTRCREATE] = v9fs_xattrcreate,
4131     [P9_TMKNOD] = v9fs_mknod,
4132     [P9_TRENAME] = v9fs_rename,
4133     [P9_TLOCK] = v9fs_lock,
4134     [P9_TGETLOCK] = v9fs_getlock,
4135     [P9_TRENAMEAT] = v9fs_renameat,
4136     [P9_TREADLINK] = v9fs_readlink,
4137     [P9_TUNLINKAT] = v9fs_unlinkat,
4138     [P9_TMKDIR] = v9fs_mkdir,
4139     [P9_TVERSION] = v9fs_version,
4140     [P9_TLOPEN] = v9fs_open,
4141     [P9_TATTACH] = v9fs_attach,
4142     [P9_TSTAT] = v9fs_stat,
4143     [P9_TWALK] = v9fs_walk,
4144     [P9_TCLUNK] = v9fs_clunk,
4145     [P9_TFSYNC] = v9fs_fsync,
4146     [P9_TOPEN] = v9fs_open,
4147     [P9_TREAD] = v9fs_read,
4148 #if 0
4149     [P9_TAUTH] = v9fs_auth,
4150 #endif
4151     [P9_TFLUSH] = v9fs_flush,
4152     [P9_TLINK] = v9fs_link,
4153     [P9_TSYMLINK] = v9fs_symlink,
4154     [P9_TCREATE] = v9fs_create,
4155     [P9_TLCREATE] = v9fs_lcreate,
4156     [P9_TWRITE] = v9fs_write,
4157     [P9_TWSTAT] = v9fs_wstat,
4158     [P9_TREMOVE] = v9fs_remove,
4159 };
4160 
4161 static void coroutine_fn v9fs_op_not_supp(void *opaque)
4162 {
4163     V9fsPDU *pdu = opaque;
4164     pdu_complete(pdu, -EOPNOTSUPP);
4165 }
4166 
4167 static void coroutine_fn v9fs_fs_ro(void *opaque)
4168 {
4169     V9fsPDU *pdu = opaque;
4170     pdu_complete(pdu, -EROFS);
4171 }
4172 
4173 static inline bool is_read_only_op(V9fsPDU *pdu)
4174 {
4175     switch (pdu->id) {
4176     case P9_TREADDIR:
4177     case P9_TSTATFS:
4178     case P9_TGETATTR:
4179     case P9_TXATTRWALK:
4180     case P9_TLOCK:
4181     case P9_TGETLOCK:
4182     case P9_TREADLINK:
4183     case P9_TVERSION:
4184     case P9_TLOPEN:
4185     case P9_TATTACH:
4186     case P9_TSTAT:
4187     case P9_TWALK:
4188     case P9_TCLUNK:
4189     case P9_TFSYNC:
4190     case P9_TOPEN:
4191     case P9_TREAD:
4192     case P9_TAUTH:
4193     case P9_TFLUSH:
4194         return 1;
4195     default:
4196         return 0;
4197     }
4198 }
4199 
4200 void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr)
4201 {
4202     Coroutine *co;
4203     CoroutineEntry *handler;
4204     V9fsState *s = pdu->s;
4205 
4206     pdu->size = le32_to_cpu(hdr->size_le);
4207     pdu->id = hdr->id;
4208     pdu->tag = le16_to_cpu(hdr->tag_le);
4209 
4210     if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
4211         (pdu_co_handlers[pdu->id] == NULL)) {
4212         handler = v9fs_op_not_supp;
4213     } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
4214         handler = v9fs_fs_ro;
4215     } else {
4216         handler = pdu_co_handlers[pdu->id];
4217     }
4218 
4219     qemu_co_queue_init(&pdu->complete);
4220     co = qemu_coroutine_create(handler, pdu);
4221     qemu_coroutine_enter(co);
4222 }
4223 
4224 /* Returns 0 on success, 1 on failure. */
4225 int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t,
4226                                Error **errp)
4227 {
4228     ERRP_GUARD();
4229     int i, len;
4230     struct stat stat;
4231     FsDriverEntry *fse;
4232     V9fsPath path;
4233     int rc = 1;
4234 
4235     assert(!s->transport);
4236     s->transport = t;
4237 
4238     /* initialize pdu allocator */
4239     QLIST_INIT(&s->free_list);
4240     QLIST_INIT(&s->active_list);
4241     for (i = 0; i < MAX_REQ; i++) {
4242         QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
4243         s->pdus[i].s = s;
4244         s->pdus[i].idx = i;
4245     }
4246 
4247     v9fs_path_init(&path);
4248 
4249     fse = get_fsdev_fsentry(s->fsconf.fsdev_id);
4250 
4251     if (!fse) {
4252         /* We don't have a fsdev identified by fsdev_id */
4253         error_setg(errp, "9pfs device couldn't find fsdev with the "
4254                    "id = %s",
4255                    s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL");
4256         goto out;
4257     }
4258 
4259     if (!s->fsconf.tag) {
4260         /* we haven't specified a mount_tag */
4261         error_setg(errp, "fsdev with id %s needs mount_tag arguments",
4262                    s->fsconf.fsdev_id);
4263         goto out;
4264     }
4265 
4266     s->ctx.export_flags = fse->export_flags;
4267     s->ctx.fs_root = g_strdup(fse->path);
4268     s->ctx.exops.get_st_gen = NULL;
4269     len = strlen(s->fsconf.tag);
4270     if (len > MAX_TAG_LEN - 1) {
4271         error_setg(errp, "mount tag '%s' (%d bytes) is longer than "
4272                    "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1);
4273         goto out;
4274     }
4275 
4276     s->tag = g_strdup(s->fsconf.tag);
4277     s->ctx.uid = -1;
4278 
4279     s->ops = fse->ops;
4280 
4281     s->ctx.fmode = fse->fmode;
4282     s->ctx.dmode = fse->dmode;
4283 
4284     s->fids = g_hash_table_new(NULL, NULL);
4285     qemu_co_rwlock_init(&s->rename_lock);
4286 
4287     if (s->ops->init(&s->ctx, errp) < 0) {
4288         error_prepend(errp, "cannot initialize fsdev '%s': ",
4289                       s->fsconf.fsdev_id);
4290         goto out;
4291     }
4292 
4293     /*
4294      * Check details of export path, We need to use fs driver
4295      * call back to do that. Since we are in the init path, we don't
4296      * use co-routines here.
4297      */
4298     if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) {
4299         error_setg(errp,
4300                    "error in converting name to path %s", strerror(errno));
4301         goto out;
4302     }
4303     if (s->ops->lstat(&s->ctx, &path, &stat)) {
4304         error_setg(errp, "share path %s does not exist", fse->path);
4305         goto out;
4306     } else if (!S_ISDIR(stat.st_mode)) {
4307         error_setg(errp, "share path %s is not a directory", fse->path);
4308         goto out;
4309     }
4310 
4311     s->dev_id = stat.st_dev;
4312 
4313     /* init inode remapping : */
4314     /* hash table for variable length inode suffixes */
4315     qpd_table_init(&s->qpd_table);
4316     /* hash table for slow/full inode remapping (most users won't need it) */
4317     qpf_table_init(&s->qpf_table);
4318     /* hash table for quick inode remapping */
4319     qpp_table_init(&s->qpp_table);
4320     s->qp_ndevices = 0;
4321     s->qp_affix_next = 1; /* reserve 0 to detect overflow */
4322     s->qp_fullpath_next = 1;
4323 
4324     s->ctx.fst = &fse->fst;
4325     fsdev_throttle_init(s->ctx.fst);
4326 
4327     rc = 0;
4328 out:
4329     if (rc) {
4330         v9fs_device_unrealize_common(s);
4331     }
4332     v9fs_path_free(&path);
4333     return rc;
4334 }
4335 
4336 void v9fs_device_unrealize_common(V9fsState *s)
4337 {
4338     if (s->ops && s->ops->cleanup) {
4339         s->ops->cleanup(&s->ctx);
4340     }
4341     if (s->ctx.fst) {
4342         fsdev_throttle_cleanup(s->ctx.fst);
4343     }
4344     if (s->fids) {
4345         g_hash_table_destroy(s->fids);
4346         s->fids = NULL;
4347     }
4348     g_free(s->tag);
4349     qp_table_destroy(&s->qpd_table);
4350     qp_table_destroy(&s->qpp_table);
4351     qp_table_destroy(&s->qpf_table);
4352     g_free(s->ctx.fs_root);
4353 }
4354 
4355 typedef struct VirtfsCoResetData {
4356     V9fsPDU pdu;
4357     bool done;
4358 } VirtfsCoResetData;
4359 
4360 static void coroutine_fn virtfs_co_reset(void *opaque)
4361 {
4362     VirtfsCoResetData *data = opaque;
4363 
4364     virtfs_reset(&data->pdu);
4365     data->done = true;
4366 }
4367 
4368 void v9fs_reset(V9fsState *s)
4369 {
4370     VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
4371     Coroutine *co;
4372 
4373     while (!QLIST_EMPTY(&s->active_list)) {
4374         aio_poll(qemu_get_aio_context(), true);
4375     }
4376 
4377     co = qemu_coroutine_create(virtfs_co_reset, &data);
4378     qemu_coroutine_enter(co);
4379 
4380     while (!data.done) {
4381         aio_poll(qemu_get_aio_context(), true);
4382     }
4383 }
4384 
4385 static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
4386 {
4387     struct rlimit rlim;
4388     if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
4389         error_report("Failed to get the resource limit");
4390         exit(1);
4391     }
4392     open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3);
4393     open_fd_rc = rlim.rlim_cur / 2;
4394 }
4395