xref: /qemu/hw/virtio/vhost.c (revision 61b01bbc6c27f06f4732aedcb6554e135f41b760)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30 #include "trace.h"
31 
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
34 
35 #ifdef _VHOST_DEBUG
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                       strerror(errno), errno); } while (0)
39 #else
40 #define VHOST_OPS_DEBUG(fmt, ...) \
41     do { } while (0)
42 #endif
43 
44 static struct vhost_log *vhost_log;
45 static struct vhost_log *vhost_log_shm;
46 
47 static unsigned int used_memslots;
48 static QLIST_HEAD(, vhost_dev) vhost_devices =
49     QLIST_HEAD_INITIALIZER(vhost_devices);
50 
51 bool vhost_has_free_slot(void)
52 {
53     unsigned int slots_limit = ~0U;
54     struct vhost_dev *hdev;
55 
56     QLIST_FOREACH(hdev, &vhost_devices, entry) {
57         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58         slots_limit = MIN(slots_limit, r);
59     }
60     return slots_limit > used_memslots;
61 }
62 
63 static void vhost_dev_sync_region(struct vhost_dev *dev,
64                                   MemoryRegionSection *section,
65                                   uint64_t mfirst, uint64_t mlast,
66                                   uint64_t rfirst, uint64_t rlast)
67 {
68     vhost_log_chunk_t *log = dev->log->log;
69 
70     uint64_t start = MAX(mfirst, rfirst);
71     uint64_t end = MIN(mlast, rlast);
72     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
74     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 
76     if (end < start) {
77         return;
78     }
79     assert(end / VHOST_LOG_CHUNK < dev->log_size);
80     assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 
82     for (;from < to; ++from) {
83         vhost_log_chunk_t log;
84         /* We first check with non-atomic: much cheaper,
85          * and we expect non-dirty to be the common case. */
86         if (!*from) {
87             addr += VHOST_LOG_CHUNK;
88             continue;
89         }
90         /* Data must be read atomically. We don't really need barrier semantics
91          * but it's easier to use atomic_* than roll our own. */
92         log = atomic_xchg(from, 0);
93         while (log) {
94             int bit = ctzl(log);
95             hwaddr page_addr;
96             hwaddr section_offset;
97             hwaddr mr_offset;
98             page_addr = addr + bit * VHOST_LOG_PAGE;
99             section_offset = page_addr - section->offset_within_address_space;
100             mr_offset = section_offset + section->offset_within_region;
101             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
102             log &= ~(0x1ull << bit);
103         }
104         addr += VHOST_LOG_CHUNK;
105     }
106 }
107 
108 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109                                    MemoryRegionSection *section,
110                                    hwaddr first,
111                                    hwaddr last)
112 {
113     int i;
114     hwaddr start_addr;
115     hwaddr end_addr;
116 
117     if (!dev->log_enabled || !dev->started) {
118         return 0;
119     }
120     start_addr = section->offset_within_address_space;
121     end_addr = range_get_last(start_addr, int128_get64(section->size));
122     start_addr = MAX(first, start_addr);
123     end_addr = MIN(last, end_addr);
124 
125     for (i = 0; i < dev->mem->nregions; ++i) {
126         struct vhost_memory_region *reg = dev->mem->regions + i;
127         vhost_dev_sync_region(dev, section, start_addr, end_addr,
128                               reg->guest_phys_addr,
129                               range_get_last(reg->guest_phys_addr,
130                                              reg->memory_size));
131     }
132     for (i = 0; i < dev->nvqs; ++i) {
133         struct vhost_virtqueue *vq = dev->vqs + i;
134         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
135                               range_get_last(vq->used_phys, vq->used_size));
136     }
137     return 0;
138 }
139 
140 static void vhost_log_sync(MemoryListener *listener,
141                           MemoryRegionSection *section)
142 {
143     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
144                                          memory_listener);
145     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
146 }
147 
148 static void vhost_log_sync_range(struct vhost_dev *dev,
149                                  hwaddr first, hwaddr last)
150 {
151     int i;
152     /* FIXME: this is N^2 in number of sections */
153     for (i = 0; i < dev->n_mem_sections; ++i) {
154         MemoryRegionSection *section = &dev->mem_sections[i];
155         vhost_sync_dirty_bitmap(dev, section, first, last);
156     }
157 }
158 
159 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
160 {
161     uint64_t log_size = 0;
162     int i;
163     for (i = 0; i < dev->mem->nregions; ++i) {
164         struct vhost_memory_region *reg = dev->mem->regions + i;
165         uint64_t last = range_get_last(reg->guest_phys_addr,
166                                        reg->memory_size);
167         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
168     }
169     for (i = 0; i < dev->nvqs; ++i) {
170         struct vhost_virtqueue *vq = dev->vqs + i;
171         uint64_t last = vq->used_phys + vq->used_size - 1;
172         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173     }
174     return log_size;
175 }
176 
177 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
178 {
179     Error *err = NULL;
180     struct vhost_log *log;
181     uint64_t logsize = size * sizeof(*(log->log));
182     int fd = -1;
183 
184     log = g_new0(struct vhost_log, 1);
185     if (share) {
186         log->log = qemu_memfd_alloc("vhost-log", logsize,
187                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
188                                     &fd, &err);
189         if (err) {
190             error_report_err(err);
191             g_free(log);
192             return NULL;
193         }
194         memset(log->log, 0, logsize);
195     } else {
196         log->log = g_malloc0(logsize);
197     }
198 
199     log->size = size;
200     log->refcnt = 1;
201     log->fd = fd;
202 
203     return log;
204 }
205 
206 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
207 {
208     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
209 
210     if (!log || log->size != size) {
211         log = vhost_log_alloc(size, share);
212         if (share) {
213             vhost_log_shm = log;
214         } else {
215             vhost_log = log;
216         }
217     } else {
218         ++log->refcnt;
219     }
220 
221     return log;
222 }
223 
224 static void vhost_log_put(struct vhost_dev *dev, bool sync)
225 {
226     struct vhost_log *log = dev->log;
227 
228     if (!log) {
229         return;
230     }
231 
232     --log->refcnt;
233     if (log->refcnt == 0) {
234         /* Sync only the range covered by the old log */
235         if (dev->log_size && sync) {
236             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
237         }
238 
239         if (vhost_log == log) {
240             g_free(log->log);
241             vhost_log = NULL;
242         } else if (vhost_log_shm == log) {
243             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
244                             log->fd);
245             vhost_log_shm = NULL;
246         }
247 
248         g_free(log);
249     }
250 
251     dev->log = NULL;
252     dev->log_size = 0;
253 }
254 
255 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
256 {
257     return dev->vhost_ops->vhost_requires_shm_log &&
258            dev->vhost_ops->vhost_requires_shm_log(dev);
259 }
260 
261 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
262 {
263     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
264     uint64_t log_base = (uintptr_t)log->log;
265     int r;
266 
267     /* inform backend of log switching, this must be done before
268        releasing the current log, to ensure no logging is lost */
269     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
270     if (r < 0) {
271         VHOST_OPS_DEBUG("vhost_set_log_base failed");
272     }
273 
274     vhost_log_put(dev, true);
275     dev->log = log;
276     dev->log_size = size;
277 }
278 
279 static int vhost_dev_has_iommu(struct vhost_dev *dev)
280 {
281     VirtIODevice *vdev = dev->vdev;
282 
283     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
284 }
285 
286 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
287                               hwaddr *plen, int is_write)
288 {
289     if (!vhost_dev_has_iommu(dev)) {
290         return cpu_physical_memory_map(addr, plen, is_write);
291     } else {
292         return (void *)(uintptr_t)addr;
293     }
294 }
295 
296 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
297                                hwaddr len, int is_write,
298                                hwaddr access_len)
299 {
300     if (!vhost_dev_has_iommu(dev)) {
301         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
302     }
303 }
304 
305 static int vhost_verify_ring_part_mapping(void *ring_hva,
306                                           uint64_t ring_gpa,
307                                           uint64_t ring_size,
308                                           void *reg_hva,
309                                           uint64_t reg_gpa,
310                                           uint64_t reg_size)
311 {
312     uint64_t hva_ring_offset;
313     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
314     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
315 
316     if (ring_last < reg_gpa || ring_gpa > reg_last) {
317         return 0;
318     }
319     /* check that whole ring's is mapped */
320     if (ring_last > reg_last) {
321         return -ENOMEM;
322     }
323     /* check that ring's MemoryRegion wasn't replaced */
324     hva_ring_offset = ring_gpa - reg_gpa;
325     if (ring_hva != reg_hva + hva_ring_offset) {
326         return -EBUSY;
327     }
328 
329     return 0;
330 }
331 
332 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
333                                       void *reg_hva,
334                                       uint64_t reg_gpa,
335                                       uint64_t reg_size)
336 {
337     int i, j;
338     int r = 0;
339     const char *part_name[] = {
340         "descriptor table",
341         "available ring",
342         "used ring"
343     };
344 
345     for (i = 0; i < dev->nvqs; ++i) {
346         struct vhost_virtqueue *vq = dev->vqs + i;
347 
348         j = 0;
349         r = vhost_verify_ring_part_mapping(
350                 vq->desc, vq->desc_phys, vq->desc_size,
351                 reg_hva, reg_gpa, reg_size);
352         if (r) {
353             break;
354         }
355 
356         j++;
357         r = vhost_verify_ring_part_mapping(
358                 vq->desc, vq->desc_phys, vq->desc_size,
359                 reg_hva, reg_gpa, reg_size);
360         if (r) {
361             break;
362         }
363 
364         j++;
365         r = vhost_verify_ring_part_mapping(
366                 vq->desc, vq->desc_phys, vq->desc_size,
367                 reg_hva, reg_gpa, reg_size);
368         if (r) {
369             break;
370         }
371     }
372 
373     if (r == -ENOMEM) {
374         error_report("Unable to map %s for ring %d", part_name[j], i);
375     } else if (r == -EBUSY) {
376         error_report("%s relocated for ring %d", part_name[j], i);
377     }
378     return r;
379 }
380 
381 static bool vhost_section(MemoryRegionSection *section)
382 {
383     bool result;
384     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
385                      ~(1 << DIRTY_MEMORY_MIGRATION);
386     result = memory_region_is_ram(section->mr) &&
387         !memory_region_is_rom(section->mr);
388 
389     /* Vhost doesn't handle any block which is doing dirty-tracking other
390      * than migration; this typically fires on VGA areas.
391      */
392     result &= !log_dirty;
393 
394     trace_vhost_section(section->mr->name, result);
395     return result;
396 }
397 
398 static void vhost_begin(MemoryListener *listener)
399 {
400     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
401                                          memory_listener);
402     dev->tmp_sections = NULL;
403     dev->n_tmp_sections = 0;
404 }
405 
406 static void vhost_commit(MemoryListener *listener)
407 {
408     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
409                                          memory_listener);
410     MemoryRegionSection *old_sections;
411     int n_old_sections;
412     uint64_t log_size;
413     size_t regions_size;
414     int r;
415     int i;
416     bool changed = false;
417 
418     /* Note we can be called before the device is started, but then
419      * starting the device calls set_mem_table, so we need to have
420      * built the data structures.
421      */
422     old_sections = dev->mem_sections;
423     n_old_sections = dev->n_mem_sections;
424     dev->mem_sections = dev->tmp_sections;
425     dev->n_mem_sections = dev->n_tmp_sections;
426 
427     if (dev->n_mem_sections != n_old_sections) {
428         changed = true;
429     } else {
430         /* Same size, lets check the contents */
431         changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
432                          n_old_sections * sizeof(old_sections[0])) != 0;
433     }
434 
435     trace_vhost_commit(dev->started, changed);
436     if (!changed) {
437         goto out;
438     }
439 
440     /* Rebuild the regions list from the new sections list */
441     regions_size = offsetof(struct vhost_memory, regions) +
442                        dev->n_mem_sections * sizeof dev->mem->regions[0];
443     dev->mem = g_realloc(dev->mem, regions_size);
444     dev->mem->nregions = dev->n_mem_sections;
445     used_memslots = dev->mem->nregions;
446     for (i = 0; i < dev->n_mem_sections; i++) {
447         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
448         struct MemoryRegionSection *mrs = dev->mem_sections + i;
449 
450         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
451         cur_vmr->memory_size     = int128_get64(mrs->size);
452         cur_vmr->userspace_addr  =
453             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
454             mrs->offset_within_region;
455         cur_vmr->flags_padding   = 0;
456     }
457 
458     if (!dev->started) {
459         goto out;
460     }
461 
462     for (i = 0; i < dev->mem->nregions; i++) {
463         if (vhost_verify_ring_mappings(dev,
464                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
465                        dev->mem->regions[i].guest_phys_addr,
466                        dev->mem->regions[i].memory_size)) {
467             error_report("Verify ring failure on region %d", i);
468             abort();
469         }
470     }
471 
472     if (!dev->log_enabled) {
473         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
474         if (r < 0) {
475             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
476         }
477         goto out;
478     }
479     log_size = vhost_get_log_size(dev);
480     /* We allocate an extra 4K bytes to log,
481      * to reduce the * number of reallocations. */
482 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
483     /* To log more, must increase log size before table update. */
484     if (dev->log_size < log_size) {
485         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
486     }
487     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
488     if (r < 0) {
489         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
490     }
491     /* To log less, can only decrease log size after table update. */
492     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
493         vhost_dev_log_resize(dev, log_size);
494     }
495 
496 out:
497     /* Deref the old list of sections, this must happen _after_ the
498      * vhost_set_mem_table to ensure the client isn't still using the
499      * section we're about to unref.
500      */
501     while (n_old_sections--) {
502         memory_region_unref(old_sections[n_old_sections].mr);
503     }
504     g_free(old_sections);
505     return;
506 }
507 
508 /* Adds the section data to the tmp_section structure.
509  * It relies on the listener calling us in memory address order
510  * and for each region (via the _add and _nop methods) to
511  * join neighbours.
512  */
513 static void vhost_region_add_section(struct vhost_dev *dev,
514                                      MemoryRegionSection *section)
515 {
516     bool need_add = true;
517     uint64_t mrs_size = int128_get64(section->size);
518     uint64_t mrs_gpa = section->offset_within_address_space;
519     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
520                          section->offset_within_region;
521 
522     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
523                                    mrs_host);
524 
525     if (dev->n_tmp_sections) {
526         /* Since we already have at least one section, lets see if
527          * this extends it; since we're scanning in order, we only
528          * have to look at the last one, and the FlatView that calls
529          * us shouldn't have overlaps.
530          */
531         MemoryRegionSection *prev_sec = dev->tmp_sections +
532                                                (dev->n_tmp_sections - 1);
533         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
534         uint64_t prev_size = int128_get64(prev_sec->size);
535         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
536         uint64_t prev_host_start =
537                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
538                         prev_sec->offset_within_region;
539         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
540 
541         if (prev_gpa_end + 1 == mrs_gpa &&
542             prev_host_end + 1 == mrs_host &&
543             section->mr == prev_sec->mr &&
544             (!dev->vhost_ops->vhost_backend_can_merge ||
545                 dev->vhost_ops->vhost_backend_can_merge(dev,
546                     mrs_host, mrs_size,
547                     prev_host_start, prev_size))) {
548             /* The two sections abut */
549             need_add = false;
550             prev_sec->size = int128_add(prev_sec->size, section->size);
551             trace_vhost_region_add_section_abut(section->mr->name,
552                                                 mrs_size + prev_size);
553         }
554     }
555 
556     if (need_add) {
557         ++dev->n_tmp_sections;
558         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
559                                     dev->n_tmp_sections);
560         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
561         /* The flatview isn't stable and we don't use it, making it NULL
562          * means we can memcmp the list.
563          */
564         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
565         memory_region_ref(section->mr);
566     }
567 }
568 
569 /* Used for both add and nop callbacks */
570 static void vhost_region_addnop(MemoryListener *listener,
571                                 MemoryRegionSection *section)
572 {
573     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
574                                          memory_listener);
575 
576     if (!vhost_section(section)) {
577         return;
578     }
579     vhost_region_add_section(dev, section);
580 }
581 
582 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
583 {
584     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
585     struct vhost_dev *hdev = iommu->hdev;
586     hwaddr iova = iotlb->iova + iommu->iommu_offset;
587 
588     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
589                                               iotlb->addr_mask + 1)) {
590         error_report("Fail to invalidate device iotlb");
591     }
592 }
593 
594 static void vhost_iommu_region_add(MemoryListener *listener,
595                                    MemoryRegionSection *section)
596 {
597     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
598                                          iommu_listener);
599     struct vhost_iommu *iommu;
600     Int128 end;
601 
602     if (!memory_region_is_iommu(section->mr)) {
603         return;
604     }
605 
606     iommu = g_malloc0(sizeof(*iommu));
607     end = int128_add(int128_make64(section->offset_within_region),
608                      section->size);
609     end = int128_sub(end, int128_one());
610     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
611                         IOMMU_NOTIFIER_UNMAP,
612                         section->offset_within_region,
613                         int128_get64(end));
614     iommu->mr = section->mr;
615     iommu->iommu_offset = section->offset_within_address_space -
616                           section->offset_within_region;
617     iommu->hdev = dev;
618     memory_region_register_iommu_notifier(section->mr, &iommu->n);
619     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
620     /* TODO: can replay help performance here? */
621 }
622 
623 static void vhost_iommu_region_del(MemoryListener *listener,
624                                    MemoryRegionSection *section)
625 {
626     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
627                                          iommu_listener);
628     struct vhost_iommu *iommu;
629 
630     if (!memory_region_is_iommu(section->mr)) {
631         return;
632     }
633 
634     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
635         if (iommu->mr == section->mr &&
636             iommu->n.start == section->offset_within_region) {
637             memory_region_unregister_iommu_notifier(iommu->mr,
638                                                     &iommu->n);
639             QLIST_REMOVE(iommu, iommu_next);
640             g_free(iommu);
641             break;
642         }
643     }
644 }
645 
646 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
647                                     struct vhost_virtqueue *vq,
648                                     unsigned idx, bool enable_log)
649 {
650     struct vhost_vring_addr addr = {
651         .index = idx,
652         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
653         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
654         .used_user_addr = (uint64_t)(unsigned long)vq->used,
655         .log_guest_addr = vq->used_phys,
656         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
657     };
658     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
659     if (r < 0) {
660         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
661         return -errno;
662     }
663     return 0;
664 }
665 
666 static int vhost_dev_set_features(struct vhost_dev *dev,
667                                   bool enable_log)
668 {
669     uint64_t features = dev->acked_features;
670     int r;
671     if (enable_log) {
672         features |= 0x1ULL << VHOST_F_LOG_ALL;
673     }
674     r = dev->vhost_ops->vhost_set_features(dev, features);
675     if (r < 0) {
676         VHOST_OPS_DEBUG("vhost_set_features failed");
677     }
678     return r < 0 ? -errno : 0;
679 }
680 
681 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
682 {
683     int r, i, idx;
684     r = vhost_dev_set_features(dev, enable_log);
685     if (r < 0) {
686         goto err_features;
687     }
688     for (i = 0; i < dev->nvqs; ++i) {
689         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
690         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
691                                      enable_log);
692         if (r < 0) {
693             goto err_vq;
694         }
695     }
696     return 0;
697 err_vq:
698     for (; i >= 0; --i) {
699         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
700         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
701                                  dev->log_enabled);
702     }
703     vhost_dev_set_features(dev, dev->log_enabled);
704 err_features:
705     return r;
706 }
707 
708 static int vhost_migration_log(MemoryListener *listener, int enable)
709 {
710     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
711                                          memory_listener);
712     int r;
713     if (!!enable == dev->log_enabled) {
714         return 0;
715     }
716     if (!dev->started) {
717         dev->log_enabled = enable;
718         return 0;
719     }
720     if (!enable) {
721         r = vhost_dev_set_log(dev, false);
722         if (r < 0) {
723             return r;
724         }
725         vhost_log_put(dev, false);
726     } else {
727         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
728         r = vhost_dev_set_log(dev, true);
729         if (r < 0) {
730             return r;
731         }
732     }
733     dev->log_enabled = enable;
734     return 0;
735 }
736 
737 static void vhost_log_global_start(MemoryListener *listener)
738 {
739     int r;
740 
741     r = vhost_migration_log(listener, true);
742     if (r < 0) {
743         abort();
744     }
745 }
746 
747 static void vhost_log_global_stop(MemoryListener *listener)
748 {
749     int r;
750 
751     r = vhost_migration_log(listener, false);
752     if (r < 0) {
753         abort();
754     }
755 }
756 
757 static void vhost_log_start(MemoryListener *listener,
758                             MemoryRegionSection *section,
759                             int old, int new)
760 {
761     /* FIXME: implement */
762 }
763 
764 static void vhost_log_stop(MemoryListener *listener,
765                            MemoryRegionSection *section,
766                            int old, int new)
767 {
768     /* FIXME: implement */
769 }
770 
771 /* The vhost driver natively knows how to handle the vrings of non
772  * cross-endian legacy devices and modern devices. Only legacy devices
773  * exposed to a bi-endian guest may require the vhost driver to use a
774  * specific endianness.
775  */
776 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
777 {
778     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
779         return false;
780     }
781 #ifdef HOST_WORDS_BIGENDIAN
782     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
783 #else
784     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
785 #endif
786 }
787 
788 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
789                                                    bool is_big_endian,
790                                                    int vhost_vq_index)
791 {
792     struct vhost_vring_state s = {
793         .index = vhost_vq_index,
794         .num = is_big_endian
795     };
796 
797     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
798         return 0;
799     }
800 
801     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
802     if (errno == ENOTTY) {
803         error_report("vhost does not support cross-endian");
804         return -ENOSYS;
805     }
806 
807     return -errno;
808 }
809 
810 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
811                                       uint64_t gpa, uint64_t *uaddr,
812                                       uint64_t *len)
813 {
814     int i;
815 
816     for (i = 0; i < hdev->mem->nregions; i++) {
817         struct vhost_memory_region *reg = hdev->mem->regions + i;
818 
819         if (gpa >= reg->guest_phys_addr &&
820             reg->guest_phys_addr + reg->memory_size > gpa) {
821             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
822             *len = reg->guest_phys_addr + reg->memory_size - gpa;
823             return 0;
824         }
825     }
826 
827     return -EFAULT;
828 }
829 
830 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
831 {
832     IOMMUTLBEntry iotlb;
833     uint64_t uaddr, len;
834     int ret = -EFAULT;
835 
836     rcu_read_lock();
837 
838     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
839                                           iova, write);
840     if (iotlb.target_as != NULL) {
841         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
842                                          &uaddr, &len);
843         if (ret) {
844             error_report("Fail to lookup the translated address "
845                          "%"PRIx64, iotlb.translated_addr);
846             goto out;
847         }
848 
849         len = MIN(iotlb.addr_mask + 1, len);
850         iova = iova & ~iotlb.addr_mask;
851 
852         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
853                                                 len, iotlb.perm);
854         if (ret) {
855             error_report("Fail to update device iotlb");
856             goto out;
857         }
858     }
859 out:
860     rcu_read_unlock();
861 
862     return ret;
863 }
864 
865 static int vhost_virtqueue_start(struct vhost_dev *dev,
866                                 struct VirtIODevice *vdev,
867                                 struct vhost_virtqueue *vq,
868                                 unsigned idx)
869 {
870     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
871     VirtioBusState *vbus = VIRTIO_BUS(qbus);
872     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
873     hwaddr s, l, a;
874     int r;
875     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
876     struct vhost_vring_file file = {
877         .index = vhost_vq_index
878     };
879     struct vhost_vring_state state = {
880         .index = vhost_vq_index
881     };
882     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
883 
884 
885     vq->num = state.num = virtio_queue_get_num(vdev, idx);
886     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
887     if (r) {
888         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
889         return -errno;
890     }
891 
892     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
893     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
894     if (r) {
895         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
896         return -errno;
897     }
898 
899     if (vhost_needs_vring_endian(vdev)) {
900         r = vhost_virtqueue_set_vring_endian_legacy(dev,
901                                                     virtio_is_big_endian(vdev),
902                                                     vhost_vq_index);
903         if (r) {
904             return -errno;
905         }
906     }
907 
908     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
909     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
910     vq->desc = vhost_memory_map(dev, a, &l, 0);
911     if (!vq->desc || l != s) {
912         r = -ENOMEM;
913         goto fail_alloc_desc;
914     }
915     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
916     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
917     vq->avail = vhost_memory_map(dev, a, &l, 0);
918     if (!vq->avail || l != s) {
919         r = -ENOMEM;
920         goto fail_alloc_avail;
921     }
922     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
923     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
924     vq->used = vhost_memory_map(dev, a, &l, 1);
925     if (!vq->used || l != s) {
926         r = -ENOMEM;
927         goto fail_alloc_used;
928     }
929 
930     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
931     if (r < 0) {
932         r = -errno;
933         goto fail_alloc;
934     }
935 
936     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
937     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
938     if (r) {
939         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
940         r = -errno;
941         goto fail_kick;
942     }
943 
944     /* Clear and discard previous events if any. */
945     event_notifier_test_and_clear(&vq->masked_notifier);
946 
947     /* Init vring in unmasked state, unless guest_notifier_mask
948      * will do it later.
949      */
950     if (!vdev->use_guest_notifier_mask) {
951         /* TODO: check and handle errors. */
952         vhost_virtqueue_mask(dev, vdev, idx, false);
953     }
954 
955     if (k->query_guest_notifiers &&
956         k->query_guest_notifiers(qbus->parent) &&
957         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
958         file.fd = -1;
959         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
960         if (r) {
961             goto fail_vector;
962         }
963     }
964 
965     return 0;
966 
967 fail_vector:
968 fail_kick:
969 fail_alloc:
970     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
971                        0, 0);
972 fail_alloc_used:
973     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
974                        0, 0);
975 fail_alloc_avail:
976     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
977                        0, 0);
978 fail_alloc_desc:
979     return r;
980 }
981 
982 static void vhost_virtqueue_stop(struct vhost_dev *dev,
983                                     struct VirtIODevice *vdev,
984                                     struct vhost_virtqueue *vq,
985                                     unsigned idx)
986 {
987     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
988     struct vhost_vring_state state = {
989         .index = vhost_vq_index,
990     };
991     int r;
992 
993     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
994     if (r < 0) {
995         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
996         /* Connection to the backend is broken, so let's sync internal
997          * last avail idx to the device used idx.
998          */
999         virtio_queue_restore_last_avail_idx(vdev, idx);
1000     } else {
1001         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1002     }
1003     virtio_queue_invalidate_signalled_used(vdev, idx);
1004     virtio_queue_update_used_idx(vdev, idx);
1005 
1006     /* In the cross-endian case, we need to reset the vring endianness to
1007      * native as legacy devices expect so by default.
1008      */
1009     if (vhost_needs_vring_endian(vdev)) {
1010         vhost_virtqueue_set_vring_endian_legacy(dev,
1011                                                 !virtio_is_big_endian(vdev),
1012                                                 vhost_vq_index);
1013     }
1014 
1015     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1016                        1, virtio_queue_get_used_size(vdev, idx));
1017     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1018                        0, virtio_queue_get_avail_size(vdev, idx));
1019     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1020                        0, virtio_queue_get_desc_size(vdev, idx));
1021 }
1022 
1023 static void vhost_eventfd_add(MemoryListener *listener,
1024                               MemoryRegionSection *section,
1025                               bool match_data, uint64_t data, EventNotifier *e)
1026 {
1027 }
1028 
1029 static void vhost_eventfd_del(MemoryListener *listener,
1030                               MemoryRegionSection *section,
1031                               bool match_data, uint64_t data, EventNotifier *e)
1032 {
1033 }
1034 
1035 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1036                                                 int n, uint32_t timeout)
1037 {
1038     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1039     struct vhost_vring_state state = {
1040         .index = vhost_vq_index,
1041         .num = timeout,
1042     };
1043     int r;
1044 
1045     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1046         return -EINVAL;
1047     }
1048 
1049     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1050     if (r) {
1051         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1052         return r;
1053     }
1054 
1055     return 0;
1056 }
1057 
1058 static int vhost_virtqueue_init(struct vhost_dev *dev,
1059                                 struct vhost_virtqueue *vq, int n)
1060 {
1061     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1062     struct vhost_vring_file file = {
1063         .index = vhost_vq_index,
1064     };
1065     int r = event_notifier_init(&vq->masked_notifier, 0);
1066     if (r < 0) {
1067         return r;
1068     }
1069 
1070     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1071     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1072     if (r) {
1073         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1074         r = -errno;
1075         goto fail_call;
1076     }
1077 
1078     vq->dev = dev;
1079 
1080     return 0;
1081 fail_call:
1082     event_notifier_cleanup(&vq->masked_notifier);
1083     return r;
1084 }
1085 
1086 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1087 {
1088     event_notifier_cleanup(&vq->masked_notifier);
1089 }
1090 
1091 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1092                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1093 {
1094     uint64_t features;
1095     int i, r, n_initialized_vqs = 0;
1096     Error *local_err = NULL;
1097 
1098     hdev->vdev = NULL;
1099     hdev->migration_blocker = NULL;
1100 
1101     r = vhost_set_backend_type(hdev, backend_type);
1102     assert(r >= 0);
1103 
1104     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1105     if (r < 0) {
1106         goto fail;
1107     }
1108 
1109     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1110         error_report("vhost backend memory slots limit is less"
1111                 " than current number of present memory slots");
1112         r = -1;
1113         goto fail;
1114     }
1115 
1116     r = hdev->vhost_ops->vhost_set_owner(hdev);
1117     if (r < 0) {
1118         VHOST_OPS_DEBUG("vhost_set_owner failed");
1119         goto fail;
1120     }
1121 
1122     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1123     if (r < 0) {
1124         VHOST_OPS_DEBUG("vhost_get_features failed");
1125         goto fail;
1126     }
1127 
1128     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1129         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1130         if (r < 0) {
1131             goto fail;
1132         }
1133     }
1134 
1135     if (busyloop_timeout) {
1136         for (i = 0; i < hdev->nvqs; ++i) {
1137             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1138                                                      busyloop_timeout);
1139             if (r < 0) {
1140                 goto fail_busyloop;
1141             }
1142         }
1143     }
1144 
1145     hdev->features = features;
1146 
1147     hdev->memory_listener = (MemoryListener) {
1148         .begin = vhost_begin,
1149         .commit = vhost_commit,
1150         .region_add = vhost_region_addnop,
1151         .region_nop = vhost_region_addnop,
1152         .log_start = vhost_log_start,
1153         .log_stop = vhost_log_stop,
1154         .log_sync = vhost_log_sync,
1155         .log_global_start = vhost_log_global_start,
1156         .log_global_stop = vhost_log_global_stop,
1157         .eventfd_add = vhost_eventfd_add,
1158         .eventfd_del = vhost_eventfd_del,
1159         .priority = 10
1160     };
1161 
1162     hdev->iommu_listener = (MemoryListener) {
1163         .region_add = vhost_iommu_region_add,
1164         .region_del = vhost_iommu_region_del,
1165     };
1166 
1167     if (hdev->migration_blocker == NULL) {
1168         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1169             error_setg(&hdev->migration_blocker,
1170                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1171         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1172             error_setg(&hdev->migration_blocker,
1173                        "Migration disabled: failed to allocate shared memory");
1174         }
1175     }
1176 
1177     if (hdev->migration_blocker != NULL) {
1178         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1179         if (local_err) {
1180             error_report_err(local_err);
1181             error_free(hdev->migration_blocker);
1182             goto fail_busyloop;
1183         }
1184     }
1185 
1186     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1187     hdev->n_mem_sections = 0;
1188     hdev->mem_sections = NULL;
1189     hdev->log = NULL;
1190     hdev->log_size = 0;
1191     hdev->log_enabled = false;
1192     hdev->started = false;
1193     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1194     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1195     return 0;
1196 
1197 fail_busyloop:
1198     while (--i >= 0) {
1199         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1200     }
1201 fail:
1202     hdev->nvqs = n_initialized_vqs;
1203     vhost_dev_cleanup(hdev);
1204     return r;
1205 }
1206 
1207 void vhost_dev_cleanup(struct vhost_dev *hdev)
1208 {
1209     int i;
1210 
1211     for (i = 0; i < hdev->nvqs; ++i) {
1212         vhost_virtqueue_cleanup(hdev->vqs + i);
1213     }
1214     if (hdev->mem) {
1215         /* those are only safe after successful init */
1216         memory_listener_unregister(&hdev->memory_listener);
1217         QLIST_REMOVE(hdev, entry);
1218     }
1219     if (hdev->migration_blocker) {
1220         migrate_del_blocker(hdev->migration_blocker);
1221         error_free(hdev->migration_blocker);
1222     }
1223     g_free(hdev->mem);
1224     g_free(hdev->mem_sections);
1225     if (hdev->vhost_ops) {
1226         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1227     }
1228     assert(!hdev->log);
1229 
1230     memset(hdev, 0, sizeof(struct vhost_dev));
1231 }
1232 
1233 /* Stop processing guest IO notifications in qemu.
1234  * Start processing them in vhost in kernel.
1235  */
1236 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1237 {
1238     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1239     int i, r, e;
1240 
1241     /* We will pass the notifiers to the kernel, make sure that QEMU
1242      * doesn't interfere.
1243      */
1244     r = virtio_device_grab_ioeventfd(vdev);
1245     if (r < 0) {
1246         error_report("binding does not support host notifiers");
1247         goto fail;
1248     }
1249 
1250     for (i = 0; i < hdev->nvqs; ++i) {
1251         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1252                                          true);
1253         if (r < 0) {
1254             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1255             goto fail_vq;
1256         }
1257     }
1258 
1259     return 0;
1260 fail_vq:
1261     while (--i >= 0) {
1262         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1263                                          false);
1264         if (e < 0) {
1265             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1266         }
1267         assert (e >= 0);
1268         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1269     }
1270     virtio_device_release_ioeventfd(vdev);
1271 fail:
1272     return r;
1273 }
1274 
1275 /* Stop processing guest IO notifications in vhost.
1276  * Start processing them in qemu.
1277  * This might actually run the qemu handlers right away,
1278  * so virtio in qemu must be completely setup when this is called.
1279  */
1280 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1281 {
1282     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1283     int i, r;
1284 
1285     for (i = 0; i < hdev->nvqs; ++i) {
1286         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1287                                          false);
1288         if (r < 0) {
1289             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1290         }
1291         assert (r >= 0);
1292         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1293     }
1294     virtio_device_release_ioeventfd(vdev);
1295 }
1296 
1297 /* Test and clear event pending status.
1298  * Should be called after unmask to avoid losing events.
1299  */
1300 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1301 {
1302     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1303     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1304     return event_notifier_test_and_clear(&vq->masked_notifier);
1305 }
1306 
1307 /* Mask/unmask events from this vq. */
1308 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1309                          bool mask)
1310 {
1311     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1312     int r, index = n - hdev->vq_index;
1313     struct vhost_vring_file file;
1314 
1315     /* should only be called after backend is connected */
1316     assert(hdev->vhost_ops);
1317 
1318     if (mask) {
1319         assert(vdev->use_guest_notifier_mask);
1320         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1321     } else {
1322         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1323     }
1324 
1325     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1326     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1327     if (r < 0) {
1328         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1329     }
1330 }
1331 
1332 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1333                             uint64_t features)
1334 {
1335     const int *bit = feature_bits;
1336     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1337         uint64_t bit_mask = (1ULL << *bit);
1338         if (!(hdev->features & bit_mask)) {
1339             features &= ~bit_mask;
1340         }
1341         bit++;
1342     }
1343     return features;
1344 }
1345 
1346 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1347                         uint64_t features)
1348 {
1349     const int *bit = feature_bits;
1350     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1351         uint64_t bit_mask = (1ULL << *bit);
1352         if (features & bit_mask) {
1353             hdev->acked_features |= bit_mask;
1354         }
1355         bit++;
1356     }
1357 }
1358 
1359 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1360                          uint32_t config_len)
1361 {
1362     assert(hdev->vhost_ops);
1363 
1364     if (hdev->vhost_ops->vhost_get_config) {
1365         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1366     }
1367 
1368     return -1;
1369 }
1370 
1371 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1372                          uint32_t offset, uint32_t size, uint32_t flags)
1373 {
1374     assert(hdev->vhost_ops);
1375 
1376     if (hdev->vhost_ops->vhost_set_config) {
1377         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1378                                                  size, flags);
1379     }
1380 
1381     return -1;
1382 }
1383 
1384 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1385                                    const VhostDevConfigOps *ops)
1386 {
1387     assert(hdev->vhost_ops);
1388     hdev->config_ops = ops;
1389 }
1390 
1391 /* Host notifiers must be enabled at this point. */
1392 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1393 {
1394     int i, r;
1395 
1396     /* should only be called after backend is connected */
1397     assert(hdev->vhost_ops);
1398 
1399     hdev->started = true;
1400     hdev->vdev = vdev;
1401 
1402     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1403     if (r < 0) {
1404         goto fail_features;
1405     }
1406 
1407     if (vhost_dev_has_iommu(hdev)) {
1408         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1409     }
1410 
1411     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1412     if (r < 0) {
1413         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1414         r = -errno;
1415         goto fail_mem;
1416     }
1417     for (i = 0; i < hdev->nvqs; ++i) {
1418         r = vhost_virtqueue_start(hdev,
1419                                   vdev,
1420                                   hdev->vqs + i,
1421                                   hdev->vq_index + i);
1422         if (r < 0) {
1423             goto fail_vq;
1424         }
1425     }
1426 
1427     if (hdev->log_enabled) {
1428         uint64_t log_base;
1429 
1430         hdev->log_size = vhost_get_log_size(hdev);
1431         hdev->log = vhost_log_get(hdev->log_size,
1432                                   vhost_dev_log_is_shared(hdev));
1433         log_base = (uintptr_t)hdev->log->log;
1434         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1435                                                 hdev->log_size ? log_base : 0,
1436                                                 hdev->log);
1437         if (r < 0) {
1438             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1439             r = -errno;
1440             goto fail_log;
1441         }
1442     }
1443 
1444     if (vhost_dev_has_iommu(hdev)) {
1445         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1446 
1447         /* Update used ring information for IOTLB to work correctly,
1448          * vhost-kernel code requires for this.*/
1449         for (i = 0; i < hdev->nvqs; ++i) {
1450             struct vhost_virtqueue *vq = hdev->vqs + i;
1451             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1452         }
1453     }
1454     return 0;
1455 fail_log:
1456     vhost_log_put(hdev, false);
1457 fail_vq:
1458     while (--i >= 0) {
1459         vhost_virtqueue_stop(hdev,
1460                              vdev,
1461                              hdev->vqs + i,
1462                              hdev->vq_index + i);
1463     }
1464     i = hdev->nvqs;
1465 
1466 fail_mem:
1467 fail_features:
1468 
1469     hdev->started = false;
1470     return r;
1471 }
1472 
1473 /* Host notifiers must be enabled at this point. */
1474 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1475 {
1476     int i;
1477 
1478     /* should only be called after backend is connected */
1479     assert(hdev->vhost_ops);
1480 
1481     for (i = 0; i < hdev->nvqs; ++i) {
1482         vhost_virtqueue_stop(hdev,
1483                              vdev,
1484                              hdev->vqs + i,
1485                              hdev->vq_index + i);
1486     }
1487 
1488     if (vhost_dev_has_iommu(hdev)) {
1489         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1490         memory_listener_unregister(&hdev->iommu_listener);
1491     }
1492     vhost_log_put(hdev, true);
1493     hdev->started = false;
1494     hdev->vdev = NULL;
1495 }
1496 
1497 int vhost_net_set_backend(struct vhost_dev *hdev,
1498                           struct vhost_vring_file *file)
1499 {
1500     if (hdev->vhost_ops->vhost_net_set_backend) {
1501         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1502     }
1503 
1504     return -1;
1505 }
1506