xref: /qemu/hw/virtio/vhost.c (revision aa94d52142f674c7abe638f9cfb19bd89a99f154)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
29 
30 /* enabled until disconnected backend stabilizes */
31 #define _VHOST_DEBUG 1
32 
33 #ifdef _VHOST_DEBUG
34 #define VHOST_OPS_DEBUG(fmt, ...) \
35     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
36                       strerror(errno), errno); } while (0)
37 #else
38 #define VHOST_OPS_DEBUG(fmt, ...) \
39     do { } while (0)
40 #endif
41 
42 static struct vhost_log *vhost_log;
43 static struct vhost_log *vhost_log_shm;
44 
45 static unsigned int used_memslots;
46 static QLIST_HEAD(, vhost_dev) vhost_devices =
47     QLIST_HEAD_INITIALIZER(vhost_devices);
48 
49 bool vhost_has_free_slot(void)
50 {
51     unsigned int slots_limit = ~0U;
52     struct vhost_dev *hdev;
53 
54     QLIST_FOREACH(hdev, &vhost_devices, entry) {
55         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
56         slots_limit = MIN(slots_limit, r);
57     }
58     return slots_limit > used_memslots;
59 }
60 
61 static void vhost_dev_sync_region(struct vhost_dev *dev,
62                                   MemoryRegionSection *section,
63                                   uint64_t mfirst, uint64_t mlast,
64                                   uint64_t rfirst, uint64_t rlast)
65 {
66     vhost_log_chunk_t *log = dev->log->log;
67 
68     uint64_t start = MAX(mfirst, rfirst);
69     uint64_t end = MIN(mlast, rlast);
70     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
71     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
72     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
73 
74     if (end < start) {
75         return;
76     }
77     assert(end / VHOST_LOG_CHUNK < dev->log_size);
78     assert(start / VHOST_LOG_CHUNK < dev->log_size);
79 
80     for (;from < to; ++from) {
81         vhost_log_chunk_t log;
82         /* We first check with non-atomic: much cheaper,
83          * and we expect non-dirty to be the common case. */
84         if (!*from) {
85             addr += VHOST_LOG_CHUNK;
86             continue;
87         }
88         /* Data must be read atomically. We don't really need barrier semantics
89          * but it's easier to use atomic_* than roll our own. */
90         log = atomic_xchg(from, 0);
91         while (log) {
92             int bit = ctzl(log);
93             hwaddr page_addr;
94             hwaddr section_offset;
95             hwaddr mr_offset;
96             page_addr = addr + bit * VHOST_LOG_PAGE;
97             section_offset = page_addr - section->offset_within_address_space;
98             mr_offset = section_offset + section->offset_within_region;
99             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
100             log &= ~(0x1ull << bit);
101         }
102         addr += VHOST_LOG_CHUNK;
103     }
104 }
105 
106 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
107                                    MemoryRegionSection *section,
108                                    hwaddr first,
109                                    hwaddr last)
110 {
111     int i;
112     hwaddr start_addr;
113     hwaddr end_addr;
114 
115     if (!dev->log_enabled || !dev->started) {
116         return 0;
117     }
118     start_addr = section->offset_within_address_space;
119     end_addr = range_get_last(start_addr, int128_get64(section->size));
120     start_addr = MAX(first, start_addr);
121     end_addr = MIN(last, end_addr);
122 
123     for (i = 0; i < dev->mem->nregions; ++i) {
124         struct vhost_memory_region *reg = dev->mem->regions + i;
125         vhost_dev_sync_region(dev, section, start_addr, end_addr,
126                               reg->guest_phys_addr,
127                               range_get_last(reg->guest_phys_addr,
128                                              reg->memory_size));
129     }
130     for (i = 0; i < dev->nvqs; ++i) {
131         struct vhost_virtqueue *vq = dev->vqs + i;
132         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
133                               range_get_last(vq->used_phys, vq->used_size));
134     }
135     return 0;
136 }
137 
138 static void vhost_log_sync(MemoryListener *listener,
139                           MemoryRegionSection *section)
140 {
141     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
142                                          memory_listener);
143     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
144 }
145 
146 static void vhost_log_sync_range(struct vhost_dev *dev,
147                                  hwaddr first, hwaddr last)
148 {
149     int i;
150     /* FIXME: this is N^2 in number of sections */
151     for (i = 0; i < dev->n_mem_sections; ++i) {
152         MemoryRegionSection *section = &dev->mem_sections[i];
153         vhost_sync_dirty_bitmap(dev, section, first, last);
154     }
155 }
156 
157 /* Assign/unassign. Keep an unsorted array of non-overlapping
158  * memory regions in dev->mem. */
159 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
160                                       uint64_t start_addr,
161                                       uint64_t size)
162 {
163     int from, to, n = dev->mem->nregions;
164     /* Track overlapping/split regions for sanity checking. */
165     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
166 
167     for (from = 0, to = 0; from < n; ++from, ++to) {
168         struct vhost_memory_region *reg = dev->mem->regions + to;
169         uint64_t reglast;
170         uint64_t memlast;
171         uint64_t change;
172 
173         /* clone old region */
174         if (to != from) {
175             memcpy(reg, dev->mem->regions + from, sizeof *reg);
176         }
177 
178         /* No overlap is simple */
179         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
180                             start_addr, size)) {
181             continue;
182         }
183 
184         /* Split only happens if supplied region
185          * is in the middle of an existing one. Thus it can not
186          * overlap with any other existing region. */
187         assert(!split);
188 
189         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
190         memlast = range_get_last(start_addr, size);
191 
192         /* Remove whole region */
193         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
194             --dev->mem->nregions;
195             --to;
196             ++overlap_middle;
197             continue;
198         }
199 
200         /* Shrink region */
201         if (memlast >= reglast) {
202             reg->memory_size = start_addr - reg->guest_phys_addr;
203             assert(reg->memory_size);
204             assert(!overlap_end);
205             ++overlap_end;
206             continue;
207         }
208 
209         /* Shift region */
210         if (start_addr <= reg->guest_phys_addr) {
211             change = memlast + 1 - reg->guest_phys_addr;
212             reg->memory_size -= change;
213             reg->guest_phys_addr += change;
214             reg->userspace_addr += change;
215             assert(reg->memory_size);
216             assert(!overlap_start);
217             ++overlap_start;
218             continue;
219         }
220 
221         /* This only happens if supplied region
222          * is in the middle of an existing one. Thus it can not
223          * overlap with any other existing region. */
224         assert(!overlap_start);
225         assert(!overlap_end);
226         assert(!overlap_middle);
227         /* Split region: shrink first part, shift second part. */
228         memcpy(dev->mem->regions + n, reg, sizeof *reg);
229         reg->memory_size = start_addr - reg->guest_phys_addr;
230         assert(reg->memory_size);
231         change = memlast + 1 - reg->guest_phys_addr;
232         reg = dev->mem->regions + n;
233         reg->memory_size -= change;
234         assert(reg->memory_size);
235         reg->guest_phys_addr += change;
236         reg->userspace_addr += change;
237         /* Never add more than 1 region */
238         assert(dev->mem->nregions == n);
239         ++dev->mem->nregions;
240         ++split;
241     }
242 }
243 
244 /* Called after unassign, so no regions overlap the given range. */
245 static void vhost_dev_assign_memory(struct vhost_dev *dev,
246                                     uint64_t start_addr,
247                                     uint64_t size,
248                                     uint64_t uaddr)
249 {
250     int from, to;
251     struct vhost_memory_region *merged = NULL;
252     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
253         struct vhost_memory_region *reg = dev->mem->regions + to;
254         uint64_t prlast, urlast;
255         uint64_t pmlast, umlast;
256         uint64_t s, e, u;
257 
258         /* clone old region */
259         if (to != from) {
260             memcpy(reg, dev->mem->regions + from, sizeof *reg);
261         }
262         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
263         pmlast = range_get_last(start_addr, size);
264         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
265         umlast = range_get_last(uaddr, size);
266 
267         /* check for overlapping regions: should never happen. */
268         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
269         /* Not an adjacent or overlapping region - do not merge. */
270         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
271             (pmlast + 1 != reg->guest_phys_addr ||
272              umlast + 1 != reg->userspace_addr)) {
273             continue;
274         }
275 
276         if (dev->vhost_ops->vhost_backend_can_merge &&
277             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
278                                                      reg->userspace_addr,
279                                                      reg->memory_size)) {
280             continue;
281         }
282 
283         if (merged) {
284             --to;
285             assert(to >= 0);
286         } else {
287             merged = reg;
288         }
289         u = MIN(uaddr, reg->userspace_addr);
290         s = MIN(start_addr, reg->guest_phys_addr);
291         e = MAX(pmlast, prlast);
292         uaddr = merged->userspace_addr = u;
293         start_addr = merged->guest_phys_addr = s;
294         size = merged->memory_size = e - s + 1;
295         assert(merged->memory_size);
296     }
297 
298     if (!merged) {
299         struct vhost_memory_region *reg = dev->mem->regions + to;
300         memset(reg, 0, sizeof *reg);
301         reg->memory_size = size;
302         assert(reg->memory_size);
303         reg->guest_phys_addr = start_addr;
304         reg->userspace_addr = uaddr;
305         ++to;
306     }
307     assert(to <= dev->mem->nregions + 1);
308     dev->mem->nregions = to;
309 }
310 
311 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
312 {
313     uint64_t log_size = 0;
314     int i;
315     for (i = 0; i < dev->mem->nregions; ++i) {
316         struct vhost_memory_region *reg = dev->mem->regions + i;
317         uint64_t last = range_get_last(reg->guest_phys_addr,
318                                        reg->memory_size);
319         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
320     }
321     for (i = 0; i < dev->nvqs; ++i) {
322         struct vhost_virtqueue *vq = dev->vqs + i;
323         uint64_t last = vq->used_phys + vq->used_size - 1;
324         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
325     }
326     return log_size;
327 }
328 
329 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
330 {
331     struct vhost_log *log;
332     uint64_t logsize = size * sizeof(*(log->log));
333     int fd = -1;
334 
335     log = g_new0(struct vhost_log, 1);
336     if (share) {
337         log->log = qemu_memfd_alloc("vhost-log", logsize,
338                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
339                                     &fd);
340         memset(log->log, 0, logsize);
341     } else {
342         log->log = g_malloc0(logsize);
343     }
344 
345     log->size = size;
346     log->refcnt = 1;
347     log->fd = fd;
348 
349     return log;
350 }
351 
352 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
353 {
354     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
355 
356     if (!log || log->size != size) {
357         log = vhost_log_alloc(size, share);
358         if (share) {
359             vhost_log_shm = log;
360         } else {
361             vhost_log = log;
362         }
363     } else {
364         ++log->refcnt;
365     }
366 
367     return log;
368 }
369 
370 static void vhost_log_put(struct vhost_dev *dev, bool sync)
371 {
372     struct vhost_log *log = dev->log;
373 
374     if (!log) {
375         return;
376     }
377     dev->log = NULL;
378     dev->log_size = 0;
379 
380     --log->refcnt;
381     if (log->refcnt == 0) {
382         /* Sync only the range covered by the old log */
383         if (dev->log_size && sync) {
384             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
385         }
386 
387         if (vhost_log == log) {
388             g_free(log->log);
389             vhost_log = NULL;
390         } else if (vhost_log_shm == log) {
391             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
392                             log->fd);
393             vhost_log_shm = NULL;
394         }
395 
396         g_free(log);
397     }
398 }
399 
400 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
401 {
402     return dev->vhost_ops->vhost_requires_shm_log &&
403            dev->vhost_ops->vhost_requires_shm_log(dev);
404 }
405 
406 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
407 {
408     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
409     uint64_t log_base = (uintptr_t)log->log;
410     int r;
411 
412     /* inform backend of log switching, this must be done before
413        releasing the current log, to ensure no logging is lost */
414     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
415     if (r < 0) {
416         VHOST_OPS_DEBUG("vhost_set_log_base failed");
417     }
418 
419     vhost_log_put(dev, true);
420     dev->log = log;
421     dev->log_size = size;
422 }
423 
424 
425 static int vhost_verify_ring_part_mapping(void *part,
426                                           uint64_t part_addr,
427                                           uint64_t part_size,
428                                           uint64_t start_addr,
429                                           uint64_t size)
430 {
431     hwaddr l;
432     void *p;
433     int r = 0;
434 
435     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
436         return 0;
437     }
438     l = part_size;
439     p = cpu_physical_memory_map(part_addr, &l, 1);
440     if (!p || l != part_size) {
441         r = -ENOMEM;
442     }
443     if (p != part) {
444         r = -EBUSY;
445     }
446     cpu_physical_memory_unmap(p, l, 0, 0);
447     return r;
448 }
449 
450 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
451                                       uint64_t start_addr,
452                                       uint64_t size)
453 {
454     int i, j;
455     int r = 0;
456     const char *part_name[] = {
457         "descriptor table",
458         "available ring",
459         "used ring"
460     };
461 
462     for (i = 0; i < dev->nvqs; ++i) {
463         struct vhost_virtqueue *vq = dev->vqs + i;
464 
465         j = 0;
466         r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
467                                            vq->desc_size, start_addr, size);
468         if (!r) {
469             break;
470         }
471 
472         j++;
473         r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
474                                            vq->avail_size, start_addr, size);
475         if (!r) {
476             break;
477         }
478 
479         j++;
480         r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
481                                            vq->used_size, start_addr, size);
482         if (!r) {
483             break;
484         }
485     }
486 
487     if (r == -ENOMEM) {
488         error_report("Unable to map %s for ring %d", part_name[j], i);
489     } else if (r == -EBUSY) {
490         error_report("%s relocated for ring %d", part_name[j], i);
491     }
492     return r;
493 }
494 
495 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
496 						      uint64_t start_addr,
497 						      uint64_t size)
498 {
499     int i, n = dev->mem->nregions;
500     for (i = 0; i < n; ++i) {
501         struct vhost_memory_region *reg = dev->mem->regions + i;
502         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
503                            start_addr, size)) {
504             return reg;
505         }
506     }
507     return NULL;
508 }
509 
510 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
511                                  uint64_t start_addr,
512                                  uint64_t size,
513                                  uint64_t uaddr)
514 {
515     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
516     uint64_t reglast;
517     uint64_t memlast;
518 
519     if (!reg) {
520         return true;
521     }
522 
523     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
524     memlast = range_get_last(start_addr, size);
525 
526     /* Need to extend region? */
527     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
528         return true;
529     }
530     /* userspace_addr changed? */
531     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
532 }
533 
534 static void vhost_set_memory(MemoryListener *listener,
535                              MemoryRegionSection *section,
536                              bool add)
537 {
538     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
539                                          memory_listener);
540     hwaddr start_addr = section->offset_within_address_space;
541     ram_addr_t size = int128_get64(section->size);
542     bool log_dirty =
543         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
544     int s = offsetof(struct vhost_memory, regions) +
545         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
546     void *ram;
547 
548     dev->mem = g_realloc(dev->mem, s);
549 
550     if (log_dirty) {
551         add = false;
552     }
553 
554     assert(size);
555 
556     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
557     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
558     if (add) {
559         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
560             /* Region exists with same address. Nothing to do. */
561             return;
562         }
563     } else {
564         if (!vhost_dev_find_reg(dev, start_addr, size)) {
565             /* Removing region that we don't access. Nothing to do. */
566             return;
567         }
568     }
569 
570     vhost_dev_unassign_memory(dev, start_addr, size);
571     if (add) {
572         /* Add given mapping, merging adjacent regions if any */
573         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
574     } else {
575         /* Remove old mapping for this memory, if any. */
576         vhost_dev_unassign_memory(dev, start_addr, size);
577     }
578     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
579     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
580     dev->memory_changed = true;
581     used_memslots = dev->mem->nregions;
582 }
583 
584 static bool vhost_section(MemoryRegionSection *section)
585 {
586     return memory_region_is_ram(section->mr);
587 }
588 
589 static void vhost_begin(MemoryListener *listener)
590 {
591     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
592                                          memory_listener);
593     dev->mem_changed_end_addr = 0;
594     dev->mem_changed_start_addr = -1;
595 }
596 
597 static void vhost_commit(MemoryListener *listener)
598 {
599     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
600                                          memory_listener);
601     hwaddr start_addr = 0;
602     ram_addr_t size = 0;
603     uint64_t log_size;
604     int r;
605 
606     if (!dev->memory_changed) {
607         return;
608     }
609     if (!dev->started) {
610         return;
611     }
612     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
613         return;
614     }
615 
616     if (dev->started) {
617         start_addr = dev->mem_changed_start_addr;
618         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
619 
620         r = vhost_verify_ring_mappings(dev, start_addr, size);
621         assert(r >= 0);
622     }
623 
624     if (!dev->log_enabled) {
625         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
626         if (r < 0) {
627             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
628         }
629         dev->memory_changed = false;
630         return;
631     }
632     log_size = vhost_get_log_size(dev);
633     /* We allocate an extra 4K bytes to log,
634      * to reduce the * number of reallocations. */
635 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
636     /* To log more, must increase log size before table update. */
637     if (dev->log_size < log_size) {
638         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
639     }
640     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
641     if (r < 0) {
642         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
643     }
644     /* To log less, can only decrease log size after table update. */
645     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
646         vhost_dev_log_resize(dev, log_size);
647     }
648     dev->memory_changed = false;
649 }
650 
651 static void vhost_region_add(MemoryListener *listener,
652                              MemoryRegionSection *section)
653 {
654     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
655                                          memory_listener);
656 
657     if (!vhost_section(section)) {
658         return;
659     }
660 
661     ++dev->n_mem_sections;
662     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
663                                 dev->n_mem_sections);
664     dev->mem_sections[dev->n_mem_sections - 1] = *section;
665     memory_region_ref(section->mr);
666     vhost_set_memory(listener, section, true);
667 }
668 
669 static void vhost_region_del(MemoryListener *listener,
670                              MemoryRegionSection *section)
671 {
672     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
673                                          memory_listener);
674     int i;
675 
676     if (!vhost_section(section)) {
677         return;
678     }
679 
680     vhost_set_memory(listener, section, false);
681     memory_region_unref(section->mr);
682     for (i = 0; i < dev->n_mem_sections; ++i) {
683         if (dev->mem_sections[i].offset_within_address_space
684             == section->offset_within_address_space) {
685             --dev->n_mem_sections;
686             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
687                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
688             break;
689         }
690     }
691 }
692 
693 static void vhost_region_nop(MemoryListener *listener,
694                              MemoryRegionSection *section)
695 {
696 }
697 
698 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
699                                     struct vhost_virtqueue *vq,
700                                     unsigned idx, bool enable_log)
701 {
702     struct vhost_vring_addr addr = {
703         .index = idx,
704         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
705         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
706         .used_user_addr = (uint64_t)(unsigned long)vq->used,
707         .log_guest_addr = vq->used_phys,
708         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
709     };
710     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
711     if (r < 0) {
712         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
713         return -errno;
714     }
715     return 0;
716 }
717 
718 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
719 {
720     uint64_t features = dev->acked_features;
721     int r;
722     if (enable_log) {
723         features |= 0x1ULL << VHOST_F_LOG_ALL;
724     }
725     r = dev->vhost_ops->vhost_set_features(dev, features);
726     if (r < 0) {
727         VHOST_OPS_DEBUG("vhost_set_features failed");
728     }
729     return r < 0 ? -errno : 0;
730 }
731 
732 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
733 {
734     int r, i, idx;
735     r = vhost_dev_set_features(dev, enable_log);
736     if (r < 0) {
737         goto err_features;
738     }
739     for (i = 0; i < dev->nvqs; ++i) {
740         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
741         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
742                                      enable_log);
743         if (r < 0) {
744             goto err_vq;
745         }
746     }
747     return 0;
748 err_vq:
749     for (; i >= 0; --i) {
750         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
751         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
752                                  dev->log_enabled);
753     }
754     vhost_dev_set_features(dev, dev->log_enabled);
755 err_features:
756     return r;
757 }
758 
759 static int vhost_migration_log(MemoryListener *listener, int enable)
760 {
761     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
762                                          memory_listener);
763     int r;
764     if (!!enable == dev->log_enabled) {
765         return 0;
766     }
767     if (!dev->started) {
768         dev->log_enabled = enable;
769         return 0;
770     }
771     if (!enable) {
772         r = vhost_dev_set_log(dev, false);
773         if (r < 0) {
774             return r;
775         }
776         vhost_log_put(dev, false);
777     } else {
778         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
779         r = vhost_dev_set_log(dev, true);
780         if (r < 0) {
781             return r;
782         }
783     }
784     dev->log_enabled = enable;
785     return 0;
786 }
787 
788 static void vhost_log_global_start(MemoryListener *listener)
789 {
790     int r;
791 
792     r = vhost_migration_log(listener, true);
793     if (r < 0) {
794         abort();
795     }
796 }
797 
798 static void vhost_log_global_stop(MemoryListener *listener)
799 {
800     int r;
801 
802     r = vhost_migration_log(listener, false);
803     if (r < 0) {
804         abort();
805     }
806 }
807 
808 static void vhost_log_start(MemoryListener *listener,
809                             MemoryRegionSection *section,
810                             int old, int new)
811 {
812     /* FIXME: implement */
813 }
814 
815 static void vhost_log_stop(MemoryListener *listener,
816                            MemoryRegionSection *section,
817                            int old, int new)
818 {
819     /* FIXME: implement */
820 }
821 
822 /* The vhost driver natively knows how to handle the vrings of non
823  * cross-endian legacy devices and modern devices. Only legacy devices
824  * exposed to a bi-endian guest may require the vhost driver to use a
825  * specific endianness.
826  */
827 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
828 {
829     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
830         return false;
831     }
832 #ifdef HOST_WORDS_BIGENDIAN
833     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
834 #else
835     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
836 #endif
837 }
838 
839 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
840                                                    bool is_big_endian,
841                                                    int vhost_vq_index)
842 {
843     struct vhost_vring_state s = {
844         .index = vhost_vq_index,
845         .num = is_big_endian
846     };
847 
848     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
849         return 0;
850     }
851 
852     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
853     if (errno == ENOTTY) {
854         error_report("vhost does not support cross-endian");
855         return -ENOSYS;
856     }
857 
858     return -errno;
859 }
860 
861 static int vhost_virtqueue_start(struct vhost_dev *dev,
862                                 struct VirtIODevice *vdev,
863                                 struct vhost_virtqueue *vq,
864                                 unsigned idx)
865 {
866     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
867     VirtioBusState *vbus = VIRTIO_BUS(qbus);
868     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
869     hwaddr s, l, a;
870     int r;
871     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
872     struct vhost_vring_file file = {
873         .index = vhost_vq_index
874     };
875     struct vhost_vring_state state = {
876         .index = vhost_vq_index
877     };
878     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
879 
880 
881     vq->num = state.num = virtio_queue_get_num(vdev, idx);
882     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
883     if (r) {
884         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
885         return -errno;
886     }
887 
888     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
889     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
890     if (r) {
891         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
892         return -errno;
893     }
894 
895     if (vhost_needs_vring_endian(vdev)) {
896         r = vhost_virtqueue_set_vring_endian_legacy(dev,
897                                                     virtio_is_big_endian(vdev),
898                                                     vhost_vq_index);
899         if (r) {
900             return -errno;
901         }
902     }
903 
904     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
905     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
906     vq->desc = cpu_physical_memory_map(a, &l, 0);
907     if (!vq->desc || l != s) {
908         r = -ENOMEM;
909         goto fail_alloc_desc;
910     }
911     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
912     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
913     vq->avail = cpu_physical_memory_map(a, &l, 0);
914     if (!vq->avail || l != s) {
915         r = -ENOMEM;
916         goto fail_alloc_avail;
917     }
918     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
919     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
920     vq->used = cpu_physical_memory_map(a, &l, 1);
921     if (!vq->used || l != s) {
922         r = -ENOMEM;
923         goto fail_alloc_used;
924     }
925 
926     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
927     if (r < 0) {
928         r = -errno;
929         goto fail_alloc;
930     }
931 
932     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
933     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
934     if (r) {
935         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
936         r = -errno;
937         goto fail_kick;
938     }
939 
940     /* Clear and discard previous events if any. */
941     event_notifier_test_and_clear(&vq->masked_notifier);
942 
943     /* Init vring in unmasked state, unless guest_notifier_mask
944      * will do it later.
945      */
946     if (!vdev->use_guest_notifier_mask) {
947         /* TODO: check and handle errors. */
948         vhost_virtqueue_mask(dev, vdev, idx, false);
949     }
950 
951     if (k->query_guest_notifiers &&
952         k->query_guest_notifiers(qbus->parent) &&
953         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
954         file.fd = -1;
955         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
956         if (r) {
957             goto fail_vector;
958         }
959     }
960 
961     return 0;
962 
963 fail_vector:
964 fail_kick:
965 fail_alloc:
966     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
967                               0, 0);
968 fail_alloc_used:
969     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
970                               0, 0);
971 fail_alloc_avail:
972     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
973                               0, 0);
974 fail_alloc_desc:
975     return r;
976 }
977 
978 static void vhost_virtqueue_stop(struct vhost_dev *dev,
979                                     struct VirtIODevice *vdev,
980                                     struct vhost_virtqueue *vq,
981                                     unsigned idx)
982 {
983     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
984     struct vhost_vring_state state = {
985         .index = vhost_vq_index,
986     };
987     int r;
988 
989     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
990     if (r < 0) {
991         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
992     } else {
993         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
994     }
995     virtio_queue_invalidate_signalled_used(vdev, idx);
996     virtio_queue_update_used_idx(vdev, idx);
997 
998     /* In the cross-endian case, we need to reset the vring endianness to
999      * native as legacy devices expect so by default.
1000      */
1001     if (vhost_needs_vring_endian(vdev)) {
1002         vhost_virtqueue_set_vring_endian_legacy(dev,
1003                                                 !virtio_is_big_endian(vdev),
1004                                                 vhost_vq_index);
1005     }
1006 
1007     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1008                               1, virtio_queue_get_used_size(vdev, idx));
1009     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
1010                               0, virtio_queue_get_avail_size(vdev, idx));
1011     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
1012                               0, virtio_queue_get_desc_size(vdev, idx));
1013 }
1014 
1015 static void vhost_eventfd_add(MemoryListener *listener,
1016                               MemoryRegionSection *section,
1017                               bool match_data, uint64_t data, EventNotifier *e)
1018 {
1019 }
1020 
1021 static void vhost_eventfd_del(MemoryListener *listener,
1022                               MemoryRegionSection *section,
1023                               bool match_data, uint64_t data, EventNotifier *e)
1024 {
1025 }
1026 
1027 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1028                                                 int n, uint32_t timeout)
1029 {
1030     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1031     struct vhost_vring_state state = {
1032         .index = vhost_vq_index,
1033         .num = timeout,
1034     };
1035     int r;
1036 
1037     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1038         return -EINVAL;
1039     }
1040 
1041     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1042     if (r) {
1043         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1044         return r;
1045     }
1046 
1047     return 0;
1048 }
1049 
1050 static int vhost_virtqueue_init(struct vhost_dev *dev,
1051                                 struct vhost_virtqueue *vq, int n)
1052 {
1053     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1054     struct vhost_vring_file file = {
1055         .index = vhost_vq_index,
1056     };
1057     int r = event_notifier_init(&vq->masked_notifier, 0);
1058     if (r < 0) {
1059         return r;
1060     }
1061 
1062     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1063     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1064     if (r) {
1065         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1066         r = -errno;
1067         goto fail_call;
1068     }
1069     return 0;
1070 fail_call:
1071     event_notifier_cleanup(&vq->masked_notifier);
1072     return r;
1073 }
1074 
1075 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1076 {
1077     event_notifier_cleanup(&vq->masked_notifier);
1078 }
1079 
1080 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1081                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1082 {
1083     uint64_t features;
1084     int i, r, n_initialized_vqs = 0;
1085 
1086     hdev->migration_blocker = NULL;
1087 
1088     r = vhost_set_backend_type(hdev, backend_type);
1089     assert(r >= 0);
1090 
1091     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1092     if (r < 0) {
1093         goto fail;
1094     }
1095 
1096     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1097         error_report("vhost backend memory slots limit is less"
1098                 " than current number of present memory slots");
1099         r = -1;
1100         goto fail;
1101     }
1102 
1103     r = hdev->vhost_ops->vhost_set_owner(hdev);
1104     if (r < 0) {
1105         VHOST_OPS_DEBUG("vhost_set_owner failed");
1106         goto fail;
1107     }
1108 
1109     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1110     if (r < 0) {
1111         VHOST_OPS_DEBUG("vhost_get_features failed");
1112         goto fail;
1113     }
1114 
1115     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1116         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1117         if (r < 0) {
1118             goto fail;
1119         }
1120     }
1121 
1122     if (busyloop_timeout) {
1123         for (i = 0; i < hdev->nvqs; ++i) {
1124             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1125                                                      busyloop_timeout);
1126             if (r < 0) {
1127                 goto fail_busyloop;
1128             }
1129         }
1130     }
1131 
1132     hdev->features = features;
1133 
1134     hdev->memory_listener = (MemoryListener) {
1135         .begin = vhost_begin,
1136         .commit = vhost_commit,
1137         .region_add = vhost_region_add,
1138         .region_del = vhost_region_del,
1139         .region_nop = vhost_region_nop,
1140         .log_start = vhost_log_start,
1141         .log_stop = vhost_log_stop,
1142         .log_sync = vhost_log_sync,
1143         .log_global_start = vhost_log_global_start,
1144         .log_global_stop = vhost_log_global_stop,
1145         .eventfd_add = vhost_eventfd_add,
1146         .eventfd_del = vhost_eventfd_del,
1147         .priority = 10
1148     };
1149 
1150     if (hdev->migration_blocker == NULL) {
1151         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1152             error_setg(&hdev->migration_blocker,
1153                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1154         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1155             error_setg(&hdev->migration_blocker,
1156                        "Migration disabled: failed to allocate shared memory");
1157         }
1158     }
1159 
1160     if (hdev->migration_blocker != NULL) {
1161         migrate_add_blocker(hdev->migration_blocker);
1162     }
1163 
1164     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1165     hdev->n_mem_sections = 0;
1166     hdev->mem_sections = NULL;
1167     hdev->log = NULL;
1168     hdev->log_size = 0;
1169     hdev->log_enabled = false;
1170     hdev->started = false;
1171     hdev->memory_changed = false;
1172     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1173     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1174     return 0;
1175 
1176 fail_busyloop:
1177     while (--i >= 0) {
1178         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1179     }
1180 fail:
1181     hdev->nvqs = n_initialized_vqs;
1182     vhost_dev_cleanup(hdev);
1183     return r;
1184 }
1185 
1186 void vhost_dev_cleanup(struct vhost_dev *hdev)
1187 {
1188     int i;
1189 
1190     for (i = 0; i < hdev->nvqs; ++i) {
1191         vhost_virtqueue_cleanup(hdev->vqs + i);
1192     }
1193     if (hdev->mem) {
1194         /* those are only safe after successful init */
1195         memory_listener_unregister(&hdev->memory_listener);
1196         QLIST_REMOVE(hdev, entry);
1197     }
1198     if (hdev->migration_blocker) {
1199         migrate_del_blocker(hdev->migration_blocker);
1200         error_free(hdev->migration_blocker);
1201     }
1202     g_free(hdev->mem);
1203     g_free(hdev->mem_sections);
1204     if (hdev->vhost_ops) {
1205         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1206     }
1207     assert(!hdev->log);
1208 
1209     memset(hdev, 0, sizeof(struct vhost_dev));
1210 }
1211 
1212 /* Stop processing guest IO notifications in qemu.
1213  * Start processing them in vhost in kernel.
1214  */
1215 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1216 {
1217     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1218     int i, r, e;
1219 
1220     /* We will pass the notifiers to the kernel, make sure that QEMU
1221      * doesn't interfere.
1222      */
1223     r = virtio_device_grab_ioeventfd(vdev);
1224     if (r < 0) {
1225         error_report("binding does not support host notifiers");
1226         goto fail;
1227     }
1228 
1229     for (i = 0; i < hdev->nvqs; ++i) {
1230         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1231                                          true);
1232         if (r < 0) {
1233             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1234             goto fail_vq;
1235         }
1236     }
1237 
1238     return 0;
1239 fail_vq:
1240     while (--i >= 0) {
1241         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1242                                          false);
1243         if (e < 0) {
1244             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1245         }
1246         assert (e >= 0);
1247     }
1248     virtio_device_release_ioeventfd(vdev);
1249 fail:
1250     return r;
1251 }
1252 
1253 /* Stop processing guest IO notifications in vhost.
1254  * Start processing them in qemu.
1255  * This might actually run the qemu handlers right away,
1256  * so virtio in qemu must be completely setup when this is called.
1257  */
1258 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1259 {
1260     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1261     int i, r;
1262 
1263     for (i = 0; i < hdev->nvqs; ++i) {
1264         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1265                                          false);
1266         if (r < 0) {
1267             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1268         }
1269         assert (r >= 0);
1270     }
1271     virtio_device_release_ioeventfd(vdev);
1272 }
1273 
1274 /* Test and clear event pending status.
1275  * Should be called after unmask to avoid losing events.
1276  */
1277 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1278 {
1279     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1280     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1281     return event_notifier_test_and_clear(&vq->masked_notifier);
1282 }
1283 
1284 /* Mask/unmask events from this vq. */
1285 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1286                          bool mask)
1287 {
1288     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1289     int r, index = n - hdev->vq_index;
1290     struct vhost_vring_file file;
1291 
1292     /* should only be called after backend is connected */
1293     assert(hdev->vhost_ops);
1294 
1295     if (mask) {
1296         assert(vdev->use_guest_notifier_mask);
1297         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1298     } else {
1299         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1300     }
1301 
1302     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1303     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1304     if (r < 0) {
1305         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1306     }
1307 }
1308 
1309 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1310                             uint64_t features)
1311 {
1312     const int *bit = feature_bits;
1313     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1314         uint64_t bit_mask = (1ULL << *bit);
1315         if (!(hdev->features & bit_mask)) {
1316             features &= ~bit_mask;
1317         }
1318         bit++;
1319     }
1320     return features;
1321 }
1322 
1323 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1324                         uint64_t features)
1325 {
1326     const int *bit = feature_bits;
1327     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1328         uint64_t bit_mask = (1ULL << *bit);
1329         if (features & bit_mask) {
1330             hdev->acked_features |= bit_mask;
1331         }
1332         bit++;
1333     }
1334 }
1335 
1336 /* Host notifiers must be enabled at this point. */
1337 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1338 {
1339     int i, r;
1340 
1341     /* should only be called after backend is connected */
1342     assert(hdev->vhost_ops);
1343 
1344     hdev->started = true;
1345 
1346     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1347     if (r < 0) {
1348         goto fail_features;
1349     }
1350     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1351     if (r < 0) {
1352         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1353         r = -errno;
1354         goto fail_mem;
1355     }
1356     for (i = 0; i < hdev->nvqs; ++i) {
1357         r = vhost_virtqueue_start(hdev,
1358                                   vdev,
1359                                   hdev->vqs + i,
1360                                   hdev->vq_index + i);
1361         if (r < 0) {
1362             goto fail_vq;
1363         }
1364     }
1365 
1366     if (hdev->log_enabled) {
1367         uint64_t log_base;
1368 
1369         hdev->log_size = vhost_get_log_size(hdev);
1370         hdev->log = vhost_log_get(hdev->log_size,
1371                                   vhost_dev_log_is_shared(hdev));
1372         log_base = (uintptr_t)hdev->log->log;
1373         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1374                                                 hdev->log_size ? log_base : 0,
1375                                                 hdev->log);
1376         if (r < 0) {
1377             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1378             r = -errno;
1379             goto fail_log;
1380         }
1381     }
1382 
1383     return 0;
1384 fail_log:
1385     vhost_log_put(hdev, false);
1386 fail_vq:
1387     while (--i >= 0) {
1388         vhost_virtqueue_stop(hdev,
1389                              vdev,
1390                              hdev->vqs + i,
1391                              hdev->vq_index + i);
1392     }
1393     i = hdev->nvqs;
1394 fail_mem:
1395 fail_features:
1396 
1397     hdev->started = false;
1398     return r;
1399 }
1400 
1401 /* Host notifiers must be enabled at this point. */
1402 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1403 {
1404     int i;
1405 
1406     /* should only be called after backend is connected */
1407     assert(hdev->vhost_ops);
1408 
1409     for (i = 0; i < hdev->nvqs; ++i) {
1410         vhost_virtqueue_stop(hdev,
1411                              vdev,
1412                              hdev->vqs + i,
1413                              hdev->vq_index + i);
1414     }
1415 
1416     vhost_log_put(hdev, true);
1417     hdev->started = false;
1418 }
1419 
1420 int vhost_net_set_backend(struct vhost_dev *hdev,
1421                           struct vhost_vring_file *file)
1422 {
1423     if (hdev->vhost_ops->vhost_net_set_backend) {
1424         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1425     }
1426 
1427     return -1;
1428 }
1429