xref: /qemu/hw/virtio/vhost.c (revision f1f9e6c5961ffb36fd4a81cd7edcded7bfad2ab2)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/migration.h"
29 
30 /* enabled until disconnected backend stabilizes */
31 #define _VHOST_DEBUG 1
32 
33 #ifdef _VHOST_DEBUG
34 #define VHOST_OPS_DEBUG(fmt, ...) \
35     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
36                       strerror(errno), errno); } while (0)
37 #else
38 #define VHOST_OPS_DEBUG(fmt, ...) \
39     do { } while (0)
40 #endif
41 
42 static struct vhost_log *vhost_log;
43 static struct vhost_log *vhost_log_shm;
44 
45 static unsigned int used_memslots;
46 static QLIST_HEAD(, vhost_dev) vhost_devices =
47     QLIST_HEAD_INITIALIZER(vhost_devices);
48 
49 bool vhost_has_free_slot(void)
50 {
51     unsigned int slots_limit = ~0U;
52     struct vhost_dev *hdev;
53 
54     QLIST_FOREACH(hdev, &vhost_devices, entry) {
55         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
56         slots_limit = MIN(slots_limit, r);
57     }
58     return slots_limit > used_memslots;
59 }
60 
61 static void vhost_dev_sync_region(struct vhost_dev *dev,
62                                   MemoryRegionSection *section,
63                                   uint64_t mfirst, uint64_t mlast,
64                                   uint64_t rfirst, uint64_t rlast)
65 {
66     vhost_log_chunk_t *log = dev->log->log;
67 
68     uint64_t start = MAX(mfirst, rfirst);
69     uint64_t end = MIN(mlast, rlast);
70     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
71     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
72     uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
73 
74     if (end < start) {
75         return;
76     }
77     assert(end / VHOST_LOG_CHUNK < dev->log_size);
78     assert(start / VHOST_LOG_CHUNK < dev->log_size);
79 
80     for (;from < to; ++from) {
81         vhost_log_chunk_t log;
82         /* We first check with non-atomic: much cheaper,
83          * and we expect non-dirty to be the common case. */
84         if (!*from) {
85             addr += VHOST_LOG_CHUNK;
86             continue;
87         }
88         /* Data must be read atomically. We don't really need barrier semantics
89          * but it's easier to use atomic_* than roll our own. */
90         log = atomic_xchg(from, 0);
91         while (log) {
92             int bit = ctzl(log);
93             hwaddr page_addr;
94             hwaddr section_offset;
95             hwaddr mr_offset;
96             page_addr = addr + bit * VHOST_LOG_PAGE;
97             section_offset = page_addr - section->offset_within_address_space;
98             mr_offset = section_offset + section->offset_within_region;
99             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
100             log &= ~(0x1ull << bit);
101         }
102         addr += VHOST_LOG_CHUNK;
103     }
104 }
105 
106 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
107                                    MemoryRegionSection *section,
108                                    hwaddr first,
109                                    hwaddr last)
110 {
111     int i;
112     hwaddr start_addr;
113     hwaddr end_addr;
114 
115     if (!dev->log_enabled || !dev->started) {
116         return 0;
117     }
118     start_addr = section->offset_within_address_space;
119     end_addr = range_get_last(start_addr, int128_get64(section->size));
120     start_addr = MAX(first, start_addr);
121     end_addr = MIN(last, end_addr);
122 
123     for (i = 0; i < dev->mem->nregions; ++i) {
124         struct vhost_memory_region *reg = dev->mem->regions + i;
125         vhost_dev_sync_region(dev, section, start_addr, end_addr,
126                               reg->guest_phys_addr,
127                               range_get_last(reg->guest_phys_addr,
128                                              reg->memory_size));
129     }
130     for (i = 0; i < dev->nvqs; ++i) {
131         struct vhost_virtqueue *vq = dev->vqs + i;
132         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
133                               range_get_last(vq->used_phys, vq->used_size));
134     }
135     return 0;
136 }
137 
138 static void vhost_log_sync(MemoryListener *listener,
139                           MemoryRegionSection *section)
140 {
141     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
142                                          memory_listener);
143     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
144 }
145 
146 static void vhost_log_sync_range(struct vhost_dev *dev,
147                                  hwaddr first, hwaddr last)
148 {
149     int i;
150     /* FIXME: this is N^2 in number of sections */
151     for (i = 0; i < dev->n_mem_sections; ++i) {
152         MemoryRegionSection *section = &dev->mem_sections[i];
153         vhost_sync_dirty_bitmap(dev, section, first, last);
154     }
155 }
156 
157 /* Assign/unassign. Keep an unsorted array of non-overlapping
158  * memory regions in dev->mem. */
159 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
160                                       uint64_t start_addr,
161                                       uint64_t size)
162 {
163     int from, to, n = dev->mem->nregions;
164     /* Track overlapping/split regions for sanity checking. */
165     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
166 
167     for (from = 0, to = 0; from < n; ++from, ++to) {
168         struct vhost_memory_region *reg = dev->mem->regions + to;
169         uint64_t reglast;
170         uint64_t memlast;
171         uint64_t change;
172 
173         /* clone old region */
174         if (to != from) {
175             memcpy(reg, dev->mem->regions + from, sizeof *reg);
176         }
177 
178         /* No overlap is simple */
179         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
180                             start_addr, size)) {
181             continue;
182         }
183 
184         /* Split only happens if supplied region
185          * is in the middle of an existing one. Thus it can not
186          * overlap with any other existing region. */
187         assert(!split);
188 
189         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
190         memlast = range_get_last(start_addr, size);
191 
192         /* Remove whole region */
193         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
194             --dev->mem->nregions;
195             --to;
196             ++overlap_middle;
197             continue;
198         }
199 
200         /* Shrink region */
201         if (memlast >= reglast) {
202             reg->memory_size = start_addr - reg->guest_phys_addr;
203             assert(reg->memory_size);
204             assert(!overlap_end);
205             ++overlap_end;
206             continue;
207         }
208 
209         /* Shift region */
210         if (start_addr <= reg->guest_phys_addr) {
211             change = memlast + 1 - reg->guest_phys_addr;
212             reg->memory_size -= change;
213             reg->guest_phys_addr += change;
214             reg->userspace_addr += change;
215             assert(reg->memory_size);
216             assert(!overlap_start);
217             ++overlap_start;
218             continue;
219         }
220 
221         /* This only happens if supplied region
222          * is in the middle of an existing one. Thus it can not
223          * overlap with any other existing region. */
224         assert(!overlap_start);
225         assert(!overlap_end);
226         assert(!overlap_middle);
227         /* Split region: shrink first part, shift second part. */
228         memcpy(dev->mem->regions + n, reg, sizeof *reg);
229         reg->memory_size = start_addr - reg->guest_phys_addr;
230         assert(reg->memory_size);
231         change = memlast + 1 - reg->guest_phys_addr;
232         reg = dev->mem->regions + n;
233         reg->memory_size -= change;
234         assert(reg->memory_size);
235         reg->guest_phys_addr += change;
236         reg->userspace_addr += change;
237         /* Never add more than 1 region */
238         assert(dev->mem->nregions == n);
239         ++dev->mem->nregions;
240         ++split;
241     }
242 }
243 
244 /* Called after unassign, so no regions overlap the given range. */
245 static void vhost_dev_assign_memory(struct vhost_dev *dev,
246                                     uint64_t start_addr,
247                                     uint64_t size,
248                                     uint64_t uaddr)
249 {
250     int from, to;
251     struct vhost_memory_region *merged = NULL;
252     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
253         struct vhost_memory_region *reg = dev->mem->regions + to;
254         uint64_t prlast, urlast;
255         uint64_t pmlast, umlast;
256         uint64_t s, e, u;
257 
258         /* clone old region */
259         if (to != from) {
260             memcpy(reg, dev->mem->regions + from, sizeof *reg);
261         }
262         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
263         pmlast = range_get_last(start_addr, size);
264         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
265         umlast = range_get_last(uaddr, size);
266 
267         /* check for overlapping regions: should never happen. */
268         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
269         /* Not an adjacent or overlapping region - do not merge. */
270         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
271             (pmlast + 1 != reg->guest_phys_addr ||
272              umlast + 1 != reg->userspace_addr)) {
273             continue;
274         }
275 
276         if (dev->vhost_ops->vhost_backend_can_merge &&
277             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
278                                                      reg->userspace_addr,
279                                                      reg->memory_size)) {
280             continue;
281         }
282 
283         if (merged) {
284             --to;
285             assert(to >= 0);
286         } else {
287             merged = reg;
288         }
289         u = MIN(uaddr, reg->userspace_addr);
290         s = MIN(start_addr, reg->guest_phys_addr);
291         e = MAX(pmlast, prlast);
292         uaddr = merged->userspace_addr = u;
293         start_addr = merged->guest_phys_addr = s;
294         size = merged->memory_size = e - s + 1;
295         assert(merged->memory_size);
296     }
297 
298     if (!merged) {
299         struct vhost_memory_region *reg = dev->mem->regions + to;
300         memset(reg, 0, sizeof *reg);
301         reg->memory_size = size;
302         assert(reg->memory_size);
303         reg->guest_phys_addr = start_addr;
304         reg->userspace_addr = uaddr;
305         ++to;
306     }
307     assert(to <= dev->mem->nregions + 1);
308     dev->mem->nregions = to;
309 }
310 
311 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
312 {
313     uint64_t log_size = 0;
314     int i;
315     for (i = 0; i < dev->mem->nregions; ++i) {
316         struct vhost_memory_region *reg = dev->mem->regions + i;
317         uint64_t last = range_get_last(reg->guest_phys_addr,
318                                        reg->memory_size);
319         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
320     }
321     for (i = 0; i < dev->nvqs; ++i) {
322         struct vhost_virtqueue *vq = dev->vqs + i;
323         uint64_t last = vq->used_phys + vq->used_size - 1;
324         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
325     }
326     return log_size;
327 }
328 
329 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
330 {
331     struct vhost_log *log;
332     uint64_t logsize = size * sizeof(*(log->log));
333     int fd = -1;
334 
335     log = g_new0(struct vhost_log, 1);
336     if (share) {
337         log->log = qemu_memfd_alloc("vhost-log", logsize,
338                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
339                                     &fd);
340         memset(log->log, 0, logsize);
341     } else {
342         log->log = g_malloc0(logsize);
343     }
344 
345     log->size = size;
346     log->refcnt = 1;
347     log->fd = fd;
348 
349     return log;
350 }
351 
352 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
353 {
354     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
355 
356     if (!log || log->size != size) {
357         log = vhost_log_alloc(size, share);
358         if (share) {
359             vhost_log_shm = log;
360         } else {
361             vhost_log = log;
362         }
363     } else {
364         ++log->refcnt;
365     }
366 
367     return log;
368 }
369 
370 static void vhost_log_put(struct vhost_dev *dev, bool sync)
371 {
372     struct vhost_log *log = dev->log;
373 
374     if (!log) {
375         return;
376     }
377     dev->log = NULL;
378     dev->log_size = 0;
379 
380     --log->refcnt;
381     if (log->refcnt == 0) {
382         /* Sync only the range covered by the old log */
383         if (dev->log_size && sync) {
384             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
385         }
386 
387         if (vhost_log == log) {
388             g_free(log->log);
389             vhost_log = NULL;
390         } else if (vhost_log_shm == log) {
391             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
392                             log->fd);
393             vhost_log_shm = NULL;
394         }
395 
396         g_free(log);
397     }
398 }
399 
400 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
401 {
402     return dev->vhost_ops->vhost_requires_shm_log &&
403            dev->vhost_ops->vhost_requires_shm_log(dev);
404 }
405 
406 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
407 {
408     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
409     uint64_t log_base = (uintptr_t)log->log;
410     int r;
411 
412     /* inform backend of log switching, this must be done before
413        releasing the current log, to ensure no logging is lost */
414     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
415     if (r < 0) {
416         VHOST_OPS_DEBUG("vhost_set_log_base failed");
417     }
418 
419     vhost_log_put(dev, true);
420     dev->log = log;
421     dev->log_size = size;
422 }
423 
424 
425 static int vhost_verify_ring_part_mapping(void *part,
426                                           uint64_t part_addr,
427                                           uint64_t part_size,
428                                           uint64_t start_addr,
429                                           uint64_t size)
430 {
431     hwaddr l;
432     void *p;
433     int r = 0;
434 
435     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
436         return 0;
437     }
438     l = part_size;
439     p = cpu_physical_memory_map(part_addr, &l, 1);
440     if (!p || l != part_size) {
441         r = -ENOMEM;
442     }
443     if (p != part) {
444         r = -EBUSY;
445     }
446     cpu_physical_memory_unmap(p, l, 0, 0);
447     return r;
448 }
449 
450 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
451                                       uint64_t start_addr,
452                                       uint64_t size)
453 {
454     int i, j;
455     int r = 0;
456     const char *part_name[] = {
457         "descriptor table",
458         "available ring",
459         "used ring"
460     };
461 
462     for (i = 0; i < dev->nvqs; ++i) {
463         struct vhost_virtqueue *vq = dev->vqs + i;
464 
465         j = 0;
466         r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
467                                            vq->desc_size, start_addr, size);
468         if (!r) {
469             break;
470         }
471 
472         j++;
473         r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
474                                            vq->avail_size, start_addr, size);
475         if (!r) {
476             break;
477         }
478 
479         j++;
480         r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
481                                            vq->used_size, start_addr, size);
482         if (!r) {
483             break;
484         }
485     }
486 
487     if (r == -ENOMEM) {
488         error_report("Unable to map %s for ring %d", part_name[j], i);
489     } else if (r == -EBUSY) {
490         error_report("%s relocated for ring %d", part_name[j], i);
491     }
492     return r;
493 }
494 
495 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
496 						      uint64_t start_addr,
497 						      uint64_t size)
498 {
499     int i, n = dev->mem->nregions;
500     for (i = 0; i < n; ++i) {
501         struct vhost_memory_region *reg = dev->mem->regions + i;
502         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
503                            start_addr, size)) {
504             return reg;
505         }
506     }
507     return NULL;
508 }
509 
510 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
511                                  uint64_t start_addr,
512                                  uint64_t size,
513                                  uint64_t uaddr)
514 {
515     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
516     uint64_t reglast;
517     uint64_t memlast;
518 
519     if (!reg) {
520         return true;
521     }
522 
523     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
524     memlast = range_get_last(start_addr, size);
525 
526     /* Need to extend region? */
527     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
528         return true;
529     }
530     /* userspace_addr changed? */
531     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
532 }
533 
534 static void vhost_set_memory(MemoryListener *listener,
535                              MemoryRegionSection *section,
536                              bool add)
537 {
538     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
539                                          memory_listener);
540     hwaddr start_addr = section->offset_within_address_space;
541     ram_addr_t size = int128_get64(section->size);
542     bool log_dirty =
543         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
544     int s = offsetof(struct vhost_memory, regions) +
545         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
546     void *ram;
547 
548     dev->mem = g_realloc(dev->mem, s);
549 
550     if (log_dirty) {
551         add = false;
552     }
553 
554     assert(size);
555 
556     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
557     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
558     if (add) {
559         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
560             /* Region exists with same address. Nothing to do. */
561             return;
562         }
563     } else {
564         if (!vhost_dev_find_reg(dev, start_addr, size)) {
565             /* Removing region that we don't access. Nothing to do. */
566             return;
567         }
568     }
569 
570     vhost_dev_unassign_memory(dev, start_addr, size);
571     if (add) {
572         /* Add given mapping, merging adjacent regions if any */
573         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
574     } else {
575         /* Remove old mapping for this memory, if any. */
576         vhost_dev_unassign_memory(dev, start_addr, size);
577     }
578     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
579     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
580     dev->memory_changed = true;
581     used_memslots = dev->mem->nregions;
582 }
583 
584 static bool vhost_section(MemoryRegionSection *section)
585 {
586     return memory_region_is_ram(section->mr);
587 }
588 
589 static void vhost_begin(MemoryListener *listener)
590 {
591     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
592                                          memory_listener);
593     dev->mem_changed_end_addr = 0;
594     dev->mem_changed_start_addr = -1;
595 }
596 
597 static void vhost_commit(MemoryListener *listener)
598 {
599     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
600                                          memory_listener);
601     hwaddr start_addr = 0;
602     ram_addr_t size = 0;
603     uint64_t log_size;
604     int r;
605 
606     if (!dev->memory_changed) {
607         return;
608     }
609     if (!dev->started) {
610         return;
611     }
612     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
613         return;
614     }
615 
616     if (dev->started) {
617         start_addr = dev->mem_changed_start_addr;
618         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
619 
620         r = vhost_verify_ring_mappings(dev, start_addr, size);
621         assert(r >= 0);
622     }
623 
624     if (!dev->log_enabled) {
625         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
626         if (r < 0) {
627             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
628         }
629         dev->memory_changed = false;
630         return;
631     }
632     log_size = vhost_get_log_size(dev);
633     /* We allocate an extra 4K bytes to log,
634      * to reduce the * number of reallocations. */
635 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
636     /* To log more, must increase log size before table update. */
637     if (dev->log_size < log_size) {
638         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
639     }
640     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
641     if (r < 0) {
642         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
643     }
644     /* To log less, can only decrease log size after table update. */
645     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
646         vhost_dev_log_resize(dev, log_size);
647     }
648     dev->memory_changed = false;
649 }
650 
651 static void vhost_region_add(MemoryListener *listener,
652                              MemoryRegionSection *section)
653 {
654     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
655                                          memory_listener);
656 
657     if (!vhost_section(section)) {
658         return;
659     }
660 
661     ++dev->n_mem_sections;
662     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
663                                 dev->n_mem_sections);
664     dev->mem_sections[dev->n_mem_sections - 1] = *section;
665     memory_region_ref(section->mr);
666     vhost_set_memory(listener, section, true);
667 }
668 
669 static void vhost_region_del(MemoryListener *listener,
670                              MemoryRegionSection *section)
671 {
672     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
673                                          memory_listener);
674     int i;
675 
676     if (!vhost_section(section)) {
677         return;
678     }
679 
680     vhost_set_memory(listener, section, false);
681     memory_region_unref(section->mr);
682     for (i = 0; i < dev->n_mem_sections; ++i) {
683         if (dev->mem_sections[i].offset_within_address_space
684             == section->offset_within_address_space) {
685             --dev->n_mem_sections;
686             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
687                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
688             break;
689         }
690     }
691 }
692 
693 static void vhost_region_nop(MemoryListener *listener,
694                              MemoryRegionSection *section)
695 {
696 }
697 
698 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
699                                     struct vhost_virtqueue *vq,
700                                     unsigned idx, bool enable_log)
701 {
702     struct vhost_vring_addr addr = {
703         .index = idx,
704         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
705         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
706         .used_user_addr = (uint64_t)(unsigned long)vq->used,
707         .log_guest_addr = vq->used_phys,
708         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
709     };
710     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
711     if (r < 0) {
712         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
713         return -errno;
714     }
715     return 0;
716 }
717 
718 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
719 {
720     uint64_t features = dev->acked_features;
721     int r;
722     if (enable_log) {
723         features |= 0x1ULL << VHOST_F_LOG_ALL;
724     }
725     r = dev->vhost_ops->vhost_set_features(dev, features);
726     if (r < 0) {
727         VHOST_OPS_DEBUG("vhost_set_features failed");
728     }
729     return r < 0 ? -errno : 0;
730 }
731 
732 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
733 {
734     int r, i, idx;
735     r = vhost_dev_set_features(dev, enable_log);
736     if (r < 0) {
737         goto err_features;
738     }
739     for (i = 0; i < dev->nvqs; ++i) {
740         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
741         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
742                                      enable_log);
743         if (r < 0) {
744             goto err_vq;
745         }
746     }
747     return 0;
748 err_vq:
749     for (; i >= 0; --i) {
750         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
751         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
752                                  dev->log_enabled);
753     }
754     vhost_dev_set_features(dev, dev->log_enabled);
755 err_features:
756     return r;
757 }
758 
759 static int vhost_migration_log(MemoryListener *listener, int enable)
760 {
761     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
762                                          memory_listener);
763     int r;
764     if (!!enable == dev->log_enabled) {
765         return 0;
766     }
767     if (!dev->started) {
768         dev->log_enabled = enable;
769         return 0;
770     }
771     if (!enable) {
772         r = vhost_dev_set_log(dev, false);
773         if (r < 0) {
774             return r;
775         }
776         vhost_log_put(dev, false);
777     } else {
778         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
779         r = vhost_dev_set_log(dev, true);
780         if (r < 0) {
781             return r;
782         }
783     }
784     dev->log_enabled = enable;
785     return 0;
786 }
787 
788 static void vhost_log_global_start(MemoryListener *listener)
789 {
790     int r;
791 
792     r = vhost_migration_log(listener, true);
793     if (r < 0) {
794         abort();
795     }
796 }
797 
798 static void vhost_log_global_stop(MemoryListener *listener)
799 {
800     int r;
801 
802     r = vhost_migration_log(listener, false);
803     if (r < 0) {
804         abort();
805     }
806 }
807 
808 static void vhost_log_start(MemoryListener *listener,
809                             MemoryRegionSection *section,
810                             int old, int new)
811 {
812     /* FIXME: implement */
813 }
814 
815 static void vhost_log_stop(MemoryListener *listener,
816                            MemoryRegionSection *section,
817                            int old, int new)
818 {
819     /* FIXME: implement */
820 }
821 
822 /* The vhost driver natively knows how to handle the vrings of non
823  * cross-endian legacy devices and modern devices. Only legacy devices
824  * exposed to a bi-endian guest may require the vhost driver to use a
825  * specific endianness.
826  */
827 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
828 {
829     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
830         return false;
831     }
832 #ifdef HOST_WORDS_BIGENDIAN
833     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
834 #else
835     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
836 #endif
837 }
838 
839 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
840                                                    bool is_big_endian,
841                                                    int vhost_vq_index)
842 {
843     struct vhost_vring_state s = {
844         .index = vhost_vq_index,
845         .num = is_big_endian
846     };
847 
848     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
849         return 0;
850     }
851 
852     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
853     if (errno == ENOTTY) {
854         error_report("vhost does not support cross-endian");
855         return -ENOSYS;
856     }
857 
858     return -errno;
859 }
860 
861 static int vhost_virtqueue_start(struct vhost_dev *dev,
862                                 struct VirtIODevice *vdev,
863                                 struct vhost_virtqueue *vq,
864                                 unsigned idx)
865 {
866     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
867     VirtioBusState *vbus = VIRTIO_BUS(qbus);
868     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
869     hwaddr s, l, a;
870     int r;
871     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
872     struct vhost_vring_file file = {
873         .index = vhost_vq_index
874     };
875     struct vhost_vring_state state = {
876         .index = vhost_vq_index
877     };
878     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
879 
880 
881     vq->num = state.num = virtio_queue_get_num(vdev, idx);
882     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
883     if (r) {
884         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
885         return -errno;
886     }
887 
888     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
889     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
890     if (r) {
891         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
892         return -errno;
893     }
894 
895     if (vhost_needs_vring_endian(vdev)) {
896         r = vhost_virtqueue_set_vring_endian_legacy(dev,
897                                                     virtio_is_big_endian(vdev),
898                                                     vhost_vq_index);
899         if (r) {
900             return -errno;
901         }
902     }
903 
904     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
905     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
906     vq->desc = cpu_physical_memory_map(a, &l, 0);
907     if (!vq->desc || l != s) {
908         r = -ENOMEM;
909         goto fail_alloc_desc;
910     }
911     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
912     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
913     vq->avail = cpu_physical_memory_map(a, &l, 0);
914     if (!vq->avail || l != s) {
915         r = -ENOMEM;
916         goto fail_alloc_avail;
917     }
918     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
919     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
920     vq->used = cpu_physical_memory_map(a, &l, 1);
921     if (!vq->used || l != s) {
922         r = -ENOMEM;
923         goto fail_alloc_used;
924     }
925 
926     vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
927     vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
928     vq->ring = cpu_physical_memory_map(a, &l, 1);
929     if (!vq->ring || l != s) {
930         r = -ENOMEM;
931         goto fail_alloc_ring;
932     }
933 
934     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
935     if (r < 0) {
936         r = -errno;
937         goto fail_alloc;
938     }
939 
940     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
941     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
942     if (r) {
943         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
944         r = -errno;
945         goto fail_kick;
946     }
947 
948     /* Clear and discard previous events if any. */
949     event_notifier_test_and_clear(&vq->masked_notifier);
950 
951     /* Init vring in unmasked state, unless guest_notifier_mask
952      * will do it later.
953      */
954     if (!vdev->use_guest_notifier_mask) {
955         /* TODO: check and handle errors. */
956         vhost_virtqueue_mask(dev, vdev, idx, false);
957     }
958 
959     if (k->query_guest_notifiers &&
960         k->query_guest_notifiers(qbus->parent) &&
961         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
962         file.fd = -1;
963         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
964         if (r) {
965             goto fail_vector;
966         }
967     }
968 
969     return 0;
970 
971 fail_vector:
972 fail_kick:
973 fail_alloc:
974     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
975                               0, 0);
976 fail_alloc_ring:
977     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
978                               0, 0);
979 fail_alloc_used:
980     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
981                               0, 0);
982 fail_alloc_avail:
983     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
984                               0, 0);
985 fail_alloc_desc:
986     return r;
987 }
988 
989 static void vhost_virtqueue_stop(struct vhost_dev *dev,
990                                     struct VirtIODevice *vdev,
991                                     struct vhost_virtqueue *vq,
992                                     unsigned idx)
993 {
994     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
995     struct vhost_vring_state state = {
996         .index = vhost_vq_index,
997     };
998     int r;
999 
1000     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1001     if (r < 0) {
1002         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1003     } else {
1004         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1005     }
1006     virtio_queue_invalidate_signalled_used(vdev, idx);
1007 
1008     /* In the cross-endian case, we need to reset the vring endianness to
1009      * native as legacy devices expect so by default.
1010      */
1011     if (vhost_needs_vring_endian(vdev)) {
1012         vhost_virtqueue_set_vring_endian_legacy(dev,
1013                                                 !virtio_is_big_endian(vdev),
1014                                                 vhost_vq_index);
1015     }
1016 
1017     cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
1018                               0, virtio_queue_get_ring_size(vdev, idx));
1019     cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1020                               1, virtio_queue_get_used_size(vdev, idx));
1021     cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
1022                               0, virtio_queue_get_avail_size(vdev, idx));
1023     cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
1024                               0, virtio_queue_get_desc_size(vdev, idx));
1025 }
1026 
1027 static void vhost_eventfd_add(MemoryListener *listener,
1028                               MemoryRegionSection *section,
1029                               bool match_data, uint64_t data, EventNotifier *e)
1030 {
1031 }
1032 
1033 static void vhost_eventfd_del(MemoryListener *listener,
1034                               MemoryRegionSection *section,
1035                               bool match_data, uint64_t data, EventNotifier *e)
1036 {
1037 }
1038 
1039 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1040                                                 int n, uint32_t timeout)
1041 {
1042     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1043     struct vhost_vring_state state = {
1044         .index = vhost_vq_index,
1045         .num = timeout,
1046     };
1047     int r;
1048 
1049     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1050         return -EINVAL;
1051     }
1052 
1053     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1054     if (r) {
1055         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1056         return r;
1057     }
1058 
1059     return 0;
1060 }
1061 
1062 static int vhost_virtqueue_init(struct vhost_dev *dev,
1063                                 struct vhost_virtqueue *vq, int n)
1064 {
1065     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1066     struct vhost_vring_file file = {
1067         .index = vhost_vq_index,
1068     };
1069     int r = event_notifier_init(&vq->masked_notifier, 0);
1070     if (r < 0) {
1071         return r;
1072     }
1073 
1074     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1075     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1076     if (r) {
1077         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1078         r = -errno;
1079         goto fail_call;
1080     }
1081     return 0;
1082 fail_call:
1083     event_notifier_cleanup(&vq->masked_notifier);
1084     return r;
1085 }
1086 
1087 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1088 {
1089     event_notifier_cleanup(&vq->masked_notifier);
1090 }
1091 
1092 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1093                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1094 {
1095     uint64_t features;
1096     int i, r, n_initialized_vqs = 0;
1097 
1098     hdev->migration_blocker = NULL;
1099 
1100     r = vhost_set_backend_type(hdev, backend_type);
1101     assert(r >= 0);
1102 
1103     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1104     if (r < 0) {
1105         goto fail;
1106     }
1107 
1108     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1109         error_report("vhost backend memory slots limit is less"
1110                 " than current number of present memory slots");
1111         r = -1;
1112         goto fail;
1113     }
1114 
1115     r = hdev->vhost_ops->vhost_set_owner(hdev);
1116     if (r < 0) {
1117         VHOST_OPS_DEBUG("vhost_set_owner failed");
1118         goto fail;
1119     }
1120 
1121     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1122     if (r < 0) {
1123         VHOST_OPS_DEBUG("vhost_get_features failed");
1124         goto fail;
1125     }
1126 
1127     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1128         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1129         if (r < 0) {
1130             goto fail;
1131         }
1132     }
1133 
1134     if (busyloop_timeout) {
1135         for (i = 0; i < hdev->nvqs; ++i) {
1136             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1137                                                      busyloop_timeout);
1138             if (r < 0) {
1139                 goto fail_busyloop;
1140             }
1141         }
1142     }
1143 
1144     hdev->features = features;
1145 
1146     hdev->memory_listener = (MemoryListener) {
1147         .begin = vhost_begin,
1148         .commit = vhost_commit,
1149         .region_add = vhost_region_add,
1150         .region_del = vhost_region_del,
1151         .region_nop = vhost_region_nop,
1152         .log_start = vhost_log_start,
1153         .log_stop = vhost_log_stop,
1154         .log_sync = vhost_log_sync,
1155         .log_global_start = vhost_log_global_start,
1156         .log_global_stop = vhost_log_global_stop,
1157         .eventfd_add = vhost_eventfd_add,
1158         .eventfd_del = vhost_eventfd_del,
1159         .priority = 10
1160     };
1161 
1162     if (hdev->migration_blocker == NULL) {
1163         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1164             error_setg(&hdev->migration_blocker,
1165                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1166         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1167             error_setg(&hdev->migration_blocker,
1168                        "Migration disabled: failed to allocate shared memory");
1169         }
1170     }
1171 
1172     if (hdev->migration_blocker != NULL) {
1173         migrate_add_blocker(hdev->migration_blocker);
1174     }
1175 
1176     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1177     hdev->n_mem_sections = 0;
1178     hdev->mem_sections = NULL;
1179     hdev->log = NULL;
1180     hdev->log_size = 0;
1181     hdev->log_enabled = false;
1182     hdev->started = false;
1183     hdev->memory_changed = false;
1184     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1185     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1186     return 0;
1187 
1188 fail_busyloop:
1189     while (--i >= 0) {
1190         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1191     }
1192 fail:
1193     hdev->nvqs = n_initialized_vqs;
1194     vhost_dev_cleanup(hdev);
1195     return r;
1196 }
1197 
1198 void vhost_dev_cleanup(struct vhost_dev *hdev)
1199 {
1200     int i;
1201 
1202     for (i = 0; i < hdev->nvqs; ++i) {
1203         vhost_virtqueue_cleanup(hdev->vqs + i);
1204     }
1205     if (hdev->mem) {
1206         /* those are only safe after successful init */
1207         memory_listener_unregister(&hdev->memory_listener);
1208         QLIST_REMOVE(hdev, entry);
1209     }
1210     if (hdev->migration_blocker) {
1211         migrate_del_blocker(hdev->migration_blocker);
1212         error_free(hdev->migration_blocker);
1213     }
1214     g_free(hdev->mem);
1215     g_free(hdev->mem_sections);
1216     if (hdev->vhost_ops) {
1217         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1218     }
1219     assert(!hdev->log);
1220 
1221     memset(hdev, 0, sizeof(struct vhost_dev));
1222 }
1223 
1224 /* Stop processing guest IO notifications in qemu.
1225  * Start processing them in vhost in kernel.
1226  */
1227 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1228 {
1229     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1230     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1231     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1232     int i, r, e;
1233 
1234     if (!k->ioeventfd_assign) {
1235         error_report("binding does not support host notifiers");
1236         r = -ENOSYS;
1237         goto fail;
1238     }
1239 
1240     virtio_device_stop_ioeventfd(vdev);
1241     for (i = 0; i < hdev->nvqs; ++i) {
1242         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1243                                          true);
1244         if (r < 0) {
1245             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1246             goto fail_vq;
1247         }
1248     }
1249 
1250     return 0;
1251 fail_vq:
1252     while (--i >= 0) {
1253         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1254                                          false);
1255         if (e < 0) {
1256             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1257         }
1258         assert (e >= 0);
1259     }
1260     virtio_device_start_ioeventfd(vdev);
1261 fail:
1262     return r;
1263 }
1264 
1265 /* Stop processing guest IO notifications in vhost.
1266  * Start processing them in qemu.
1267  * This might actually run the qemu handlers right away,
1268  * so virtio in qemu must be completely setup when this is called.
1269  */
1270 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1271 {
1272     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1273     int i, r;
1274 
1275     for (i = 0; i < hdev->nvqs; ++i) {
1276         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1277                                          false);
1278         if (r < 0) {
1279             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1280         }
1281         assert (r >= 0);
1282     }
1283     virtio_device_start_ioeventfd(vdev);
1284 }
1285 
1286 /* Test and clear event pending status.
1287  * Should be called after unmask to avoid losing events.
1288  */
1289 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1290 {
1291     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1292     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1293     return event_notifier_test_and_clear(&vq->masked_notifier);
1294 }
1295 
1296 /* Mask/unmask events from this vq. */
1297 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1298                          bool mask)
1299 {
1300     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1301     int r, index = n - hdev->vq_index;
1302     struct vhost_vring_file file;
1303 
1304     /* should only be called after backend is connected */
1305     assert(hdev->vhost_ops);
1306 
1307     if (mask) {
1308         assert(vdev->use_guest_notifier_mask);
1309         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1310     } else {
1311         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1312     }
1313 
1314     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1315     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1316     if (r < 0) {
1317         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1318     }
1319 }
1320 
1321 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1322                             uint64_t features)
1323 {
1324     const int *bit = feature_bits;
1325     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1326         uint64_t bit_mask = (1ULL << *bit);
1327         if (!(hdev->features & bit_mask)) {
1328             features &= ~bit_mask;
1329         }
1330         bit++;
1331     }
1332     return features;
1333 }
1334 
1335 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1336                         uint64_t features)
1337 {
1338     const int *bit = feature_bits;
1339     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1340         uint64_t bit_mask = (1ULL << *bit);
1341         if (features & bit_mask) {
1342             hdev->acked_features |= bit_mask;
1343         }
1344         bit++;
1345     }
1346 }
1347 
1348 /* Host notifiers must be enabled at this point. */
1349 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1350 {
1351     int i, r;
1352 
1353     /* should only be called after backend is connected */
1354     assert(hdev->vhost_ops);
1355 
1356     hdev->started = true;
1357 
1358     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1359     if (r < 0) {
1360         goto fail_features;
1361     }
1362     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1363     if (r < 0) {
1364         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1365         r = -errno;
1366         goto fail_mem;
1367     }
1368     for (i = 0; i < hdev->nvqs; ++i) {
1369         r = vhost_virtqueue_start(hdev,
1370                                   vdev,
1371                                   hdev->vqs + i,
1372                                   hdev->vq_index + i);
1373         if (r < 0) {
1374             goto fail_vq;
1375         }
1376     }
1377 
1378     if (hdev->log_enabled) {
1379         uint64_t log_base;
1380 
1381         hdev->log_size = vhost_get_log_size(hdev);
1382         hdev->log = vhost_log_get(hdev->log_size,
1383                                   vhost_dev_log_is_shared(hdev));
1384         log_base = (uintptr_t)hdev->log->log;
1385         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1386                                                 hdev->log_size ? log_base : 0,
1387                                                 hdev->log);
1388         if (r < 0) {
1389             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1390             r = -errno;
1391             goto fail_log;
1392         }
1393     }
1394 
1395     return 0;
1396 fail_log:
1397     vhost_log_put(hdev, false);
1398 fail_vq:
1399     while (--i >= 0) {
1400         vhost_virtqueue_stop(hdev,
1401                              vdev,
1402                              hdev->vqs + i,
1403                              hdev->vq_index + i);
1404     }
1405     i = hdev->nvqs;
1406 fail_mem:
1407 fail_features:
1408 
1409     hdev->started = false;
1410     return r;
1411 }
1412 
1413 /* Host notifiers must be enabled at this point. */
1414 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1415 {
1416     int i;
1417 
1418     /* should only be called after backend is connected */
1419     assert(hdev->vhost_ops);
1420 
1421     for (i = 0; i < hdev->nvqs; ++i) {
1422         vhost_virtqueue_stop(hdev,
1423                              vdev,
1424                              hdev->vqs + i,
1425                              hdev->vq_index + i);
1426     }
1427 
1428     vhost_log_put(hdev, true);
1429     hdev->started = false;
1430 }
1431 
1432 int vhost_net_set_backend(struct vhost_dev *hdev,
1433                           struct vhost_vring_file *file)
1434 {
1435     if (hdev->vhost_ops->vhost_net_set_backend) {
1436         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1437     }
1438 
1439     return -1;
1440 }
1441