xref: /qemu/hw/virtio/vhost.c (revision a84e937649f09d372e677b3978a933f1207513a2)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30 #include "trace.h"
31 
32 /* enabled until disconnected backend stabilizes */
33 #define _VHOST_DEBUG 1
34 
35 #ifdef _VHOST_DEBUG
36 #define VHOST_OPS_DEBUG(fmt, ...) \
37     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38                       strerror(errno), errno); } while (0)
39 #else
40 #define VHOST_OPS_DEBUG(fmt, ...) \
41     do { } while (0)
42 #endif
43 
44 static struct vhost_log *vhost_log;
45 static struct vhost_log *vhost_log_shm;
46 
47 static unsigned int used_memslots;
48 static QLIST_HEAD(, vhost_dev) vhost_devices =
49     QLIST_HEAD_INITIALIZER(vhost_devices);
50 
51 bool vhost_has_free_slot(void)
52 {
53     unsigned int slots_limit = ~0U;
54     struct vhost_dev *hdev;
55 
56     QLIST_FOREACH(hdev, &vhost_devices, entry) {
57         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
58         slots_limit = MIN(slots_limit, r);
59     }
60     return slots_limit > used_memslots;
61 }
62 
63 static void vhost_dev_sync_region(struct vhost_dev *dev,
64                                   MemoryRegionSection *section,
65                                   uint64_t mfirst, uint64_t mlast,
66                                   uint64_t rfirst, uint64_t rlast)
67 {
68     vhost_log_chunk_t *log = dev->log->log;
69 
70     uint64_t start = MAX(mfirst, rfirst);
71     uint64_t end = MIN(mlast, rlast);
72     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
73     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
74     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
75 
76     if (end < start) {
77         return;
78     }
79     assert(end / VHOST_LOG_CHUNK < dev->log_size);
80     assert(start / VHOST_LOG_CHUNK < dev->log_size);
81 
82     for (;from < to; ++from) {
83         vhost_log_chunk_t log;
84         /* We first check with non-atomic: much cheaper,
85          * and we expect non-dirty to be the common case. */
86         if (!*from) {
87             addr += VHOST_LOG_CHUNK;
88             continue;
89         }
90         /* Data must be read atomically. We don't really need barrier semantics
91          * but it's easier to use atomic_* than roll our own. */
92         log = atomic_xchg(from, 0);
93         while (log) {
94             int bit = ctzl(log);
95             hwaddr page_addr;
96             hwaddr section_offset;
97             hwaddr mr_offset;
98             page_addr = addr + bit * VHOST_LOG_PAGE;
99             section_offset = page_addr - section->offset_within_address_space;
100             mr_offset = section_offset + section->offset_within_region;
101             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
102             log &= ~(0x1ull << bit);
103         }
104         addr += VHOST_LOG_CHUNK;
105     }
106 }
107 
108 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
109                                    MemoryRegionSection *section,
110                                    hwaddr first,
111                                    hwaddr last)
112 {
113     int i;
114     hwaddr start_addr;
115     hwaddr end_addr;
116 
117     if (!dev->log_enabled || !dev->started) {
118         return 0;
119     }
120     start_addr = section->offset_within_address_space;
121     end_addr = range_get_last(start_addr, int128_get64(section->size));
122     start_addr = MAX(first, start_addr);
123     end_addr = MIN(last, end_addr);
124 
125     for (i = 0; i < dev->mem->nregions; ++i) {
126         struct vhost_memory_region *reg = dev->mem->regions + i;
127         vhost_dev_sync_region(dev, section, start_addr, end_addr,
128                               reg->guest_phys_addr,
129                               range_get_last(reg->guest_phys_addr,
130                                              reg->memory_size));
131     }
132     for (i = 0; i < dev->nvqs; ++i) {
133         struct vhost_virtqueue *vq = dev->vqs + i;
134         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
135                               range_get_last(vq->used_phys, vq->used_size));
136     }
137     return 0;
138 }
139 
140 static void vhost_log_sync(MemoryListener *listener,
141                           MemoryRegionSection *section)
142 {
143     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
144                                          memory_listener);
145     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
146 }
147 
148 static void vhost_log_sync_range(struct vhost_dev *dev,
149                                  hwaddr first, hwaddr last)
150 {
151     int i;
152     /* FIXME: this is N^2 in number of sections */
153     for (i = 0; i < dev->n_mem_sections; ++i) {
154         MemoryRegionSection *section = &dev->mem_sections[i];
155         vhost_sync_dirty_bitmap(dev, section, first, last);
156     }
157 }
158 
159 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
160 {
161     uint64_t log_size = 0;
162     int i;
163     for (i = 0; i < dev->mem->nregions; ++i) {
164         struct vhost_memory_region *reg = dev->mem->regions + i;
165         uint64_t last = range_get_last(reg->guest_phys_addr,
166                                        reg->memory_size);
167         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
168     }
169     for (i = 0; i < dev->nvqs; ++i) {
170         struct vhost_virtqueue *vq = dev->vqs + i;
171         uint64_t last = vq->used_phys + vq->used_size - 1;
172         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
173     }
174     return log_size;
175 }
176 
177 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
178 {
179     Error *err = NULL;
180     struct vhost_log *log;
181     uint64_t logsize = size * sizeof(*(log->log));
182     int fd = -1;
183 
184     log = g_new0(struct vhost_log, 1);
185     if (share) {
186         log->log = qemu_memfd_alloc("vhost-log", logsize,
187                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
188                                     &fd, &err);
189         if (err) {
190             error_report_err(err);
191             g_free(log);
192             return NULL;
193         }
194         memset(log->log, 0, logsize);
195     } else {
196         log->log = g_malloc0(logsize);
197     }
198 
199     log->size = size;
200     log->refcnt = 1;
201     log->fd = fd;
202 
203     return log;
204 }
205 
206 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
207 {
208     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
209 
210     if (!log || log->size != size) {
211         log = vhost_log_alloc(size, share);
212         if (share) {
213             vhost_log_shm = log;
214         } else {
215             vhost_log = log;
216         }
217     } else {
218         ++log->refcnt;
219     }
220 
221     return log;
222 }
223 
224 static void vhost_log_put(struct vhost_dev *dev, bool sync)
225 {
226     struct vhost_log *log = dev->log;
227 
228     if (!log) {
229         return;
230     }
231 
232     --log->refcnt;
233     if (log->refcnt == 0) {
234         /* Sync only the range covered by the old log */
235         if (dev->log_size && sync) {
236             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
237         }
238 
239         if (vhost_log == log) {
240             g_free(log->log);
241             vhost_log = NULL;
242         } else if (vhost_log_shm == log) {
243             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
244                             log->fd);
245             vhost_log_shm = NULL;
246         }
247 
248         g_free(log);
249     }
250 
251     dev->log = NULL;
252     dev->log_size = 0;
253 }
254 
255 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
256 {
257     return dev->vhost_ops->vhost_requires_shm_log &&
258            dev->vhost_ops->vhost_requires_shm_log(dev);
259 }
260 
261 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
262 {
263     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
264     uint64_t log_base = (uintptr_t)log->log;
265     int r;
266 
267     /* inform backend of log switching, this must be done before
268        releasing the current log, to ensure no logging is lost */
269     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
270     if (r < 0) {
271         VHOST_OPS_DEBUG("vhost_set_log_base failed");
272     }
273 
274     vhost_log_put(dev, true);
275     dev->log = log;
276     dev->log_size = size;
277 }
278 
279 static int vhost_dev_has_iommu(struct vhost_dev *dev)
280 {
281     VirtIODevice *vdev = dev->vdev;
282 
283     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
284 }
285 
286 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
287                               hwaddr *plen, int is_write)
288 {
289     if (!vhost_dev_has_iommu(dev)) {
290         return cpu_physical_memory_map(addr, plen, is_write);
291     } else {
292         return (void *)(uintptr_t)addr;
293     }
294 }
295 
296 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
297                                hwaddr len, int is_write,
298                                hwaddr access_len)
299 {
300     if (!vhost_dev_has_iommu(dev)) {
301         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
302     }
303 }
304 
305 static int vhost_verify_ring_part_mapping(void *ring_hva,
306                                           uint64_t ring_gpa,
307                                           uint64_t ring_size,
308                                           void *reg_hva,
309                                           uint64_t reg_gpa,
310                                           uint64_t reg_size)
311 {
312     uint64_t hva_ring_offset;
313     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
314     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
315 
316     if (ring_last < reg_gpa || ring_gpa > reg_last) {
317         return 0;
318     }
319     /* check that whole ring's is mapped */
320     if (ring_last > reg_last) {
321         return -ENOMEM;
322     }
323     /* check that ring's MemoryRegion wasn't replaced */
324     hva_ring_offset = ring_gpa - reg_gpa;
325     if (ring_hva != reg_hva + hva_ring_offset) {
326         return -EBUSY;
327     }
328 
329     return 0;
330 }
331 
332 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
333                                       void *reg_hva,
334                                       uint64_t reg_gpa,
335                                       uint64_t reg_size)
336 {
337     int i, j;
338     int r = 0;
339     const char *part_name[] = {
340         "descriptor table",
341         "available ring",
342         "used ring"
343     };
344 
345     for (i = 0; i < dev->nvqs; ++i) {
346         struct vhost_virtqueue *vq = dev->vqs + i;
347 
348         if (vq->desc_phys == 0) {
349             continue;
350         }
351 
352         j = 0;
353         r = vhost_verify_ring_part_mapping(
354                 vq->desc, vq->desc_phys, vq->desc_size,
355                 reg_hva, reg_gpa, reg_size);
356         if (r) {
357             break;
358         }
359 
360         j++;
361         r = vhost_verify_ring_part_mapping(
362                 vq->avail, vq->avail_phys, vq->avail_size,
363                 reg_hva, reg_gpa, reg_size);
364         if (r) {
365             break;
366         }
367 
368         j++;
369         r = vhost_verify_ring_part_mapping(
370                 vq->used, vq->used_phys, vq->used_size,
371                 reg_hva, reg_gpa, reg_size);
372         if (r) {
373             break;
374         }
375     }
376 
377     if (r == -ENOMEM) {
378         error_report("Unable to map %s for ring %d", part_name[j], i);
379     } else if (r == -EBUSY) {
380         error_report("%s relocated for ring %d", part_name[j], i);
381     }
382     return r;
383 }
384 
385 static bool vhost_section(MemoryRegionSection *section)
386 {
387     bool result;
388     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
389                      ~(1 << DIRTY_MEMORY_MIGRATION);
390     result = memory_region_is_ram(section->mr) &&
391         !memory_region_is_rom(section->mr);
392 
393     /* Vhost doesn't handle any block which is doing dirty-tracking other
394      * than migration; this typically fires on VGA areas.
395      */
396     result &= !log_dirty;
397 
398     trace_vhost_section(section->mr->name, result);
399     return result;
400 }
401 
402 static void vhost_begin(MemoryListener *listener)
403 {
404     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
405                                          memory_listener);
406     dev->tmp_sections = NULL;
407     dev->n_tmp_sections = 0;
408 }
409 
410 static void vhost_commit(MemoryListener *listener)
411 {
412     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
413                                          memory_listener);
414     MemoryRegionSection *old_sections;
415     int n_old_sections;
416     uint64_t log_size;
417     size_t regions_size;
418     int r;
419     int i;
420     bool changed = false;
421 
422     /* Note we can be called before the device is started, but then
423      * starting the device calls set_mem_table, so we need to have
424      * built the data structures.
425      */
426     old_sections = dev->mem_sections;
427     n_old_sections = dev->n_mem_sections;
428     dev->mem_sections = dev->tmp_sections;
429     dev->n_mem_sections = dev->n_tmp_sections;
430 
431     if (dev->n_mem_sections != n_old_sections) {
432         changed = true;
433     } else {
434         /* Same size, lets check the contents */
435         changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
436                          n_old_sections * sizeof(old_sections[0])) != 0;
437     }
438 
439     trace_vhost_commit(dev->started, changed);
440     if (!changed) {
441         goto out;
442     }
443 
444     /* Rebuild the regions list from the new sections list */
445     regions_size = offsetof(struct vhost_memory, regions) +
446                        dev->n_mem_sections * sizeof dev->mem->regions[0];
447     dev->mem = g_realloc(dev->mem, regions_size);
448     dev->mem->nregions = dev->n_mem_sections;
449     used_memslots = dev->mem->nregions;
450     for (i = 0; i < dev->n_mem_sections; i++) {
451         struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
452         struct MemoryRegionSection *mrs = dev->mem_sections + i;
453 
454         cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
455         cur_vmr->memory_size     = int128_get64(mrs->size);
456         cur_vmr->userspace_addr  =
457             (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
458             mrs->offset_within_region;
459         cur_vmr->flags_padding   = 0;
460     }
461 
462     if (!dev->started) {
463         goto out;
464     }
465 
466     for (i = 0; i < dev->mem->nregions; i++) {
467         if (vhost_verify_ring_mappings(dev,
468                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
469                        dev->mem->regions[i].guest_phys_addr,
470                        dev->mem->regions[i].memory_size)) {
471             error_report("Verify ring failure on region %d", i);
472             abort();
473         }
474     }
475 
476     if (!dev->log_enabled) {
477         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
478         if (r < 0) {
479             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
480         }
481         goto out;
482     }
483     log_size = vhost_get_log_size(dev);
484     /* We allocate an extra 4K bytes to log,
485      * to reduce the * number of reallocations. */
486 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
487     /* To log more, must increase log size before table update. */
488     if (dev->log_size < log_size) {
489         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
490     }
491     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
492     if (r < 0) {
493         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
494     }
495     /* To log less, can only decrease log size after table update. */
496     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
497         vhost_dev_log_resize(dev, log_size);
498     }
499 
500 out:
501     /* Deref the old list of sections, this must happen _after_ the
502      * vhost_set_mem_table to ensure the client isn't still using the
503      * section we're about to unref.
504      */
505     while (n_old_sections--) {
506         memory_region_unref(old_sections[n_old_sections].mr);
507     }
508     g_free(old_sections);
509     return;
510 }
511 
512 /* Adds the section data to the tmp_section structure.
513  * It relies on the listener calling us in memory address order
514  * and for each region (via the _add and _nop methods) to
515  * join neighbours.
516  */
517 static void vhost_region_add_section(struct vhost_dev *dev,
518                                      MemoryRegionSection *section)
519 {
520     bool need_add = true;
521     uint64_t mrs_size = int128_get64(section->size);
522     uint64_t mrs_gpa = section->offset_within_address_space;
523     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
524                          section->offset_within_region;
525     RAMBlock *mrs_rb = section->mr->ram_block;
526     size_t mrs_page = qemu_ram_pagesize(mrs_rb);
527 
528     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
529                                    mrs_host);
530 
531     /* Round the section to it's page size */
532     /* First align the start down to a page boundary */
533     uint64_t alignage = mrs_host & (mrs_page - 1);
534     if (alignage) {
535         mrs_host -= alignage;
536         mrs_size += alignage;
537         mrs_gpa  -= alignage;
538     }
539     /* Now align the size up to a page boundary */
540     alignage = mrs_size & (mrs_page - 1);
541     if (alignage) {
542         mrs_size += mrs_page - alignage;
543     }
544     trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size,
545                                            mrs_host);
546 
547     if (dev->n_tmp_sections) {
548         /* Since we already have at least one section, lets see if
549          * this extends it; since we're scanning in order, we only
550          * have to look at the last one, and the FlatView that calls
551          * us shouldn't have overlaps.
552          */
553         MemoryRegionSection *prev_sec = dev->tmp_sections +
554                                                (dev->n_tmp_sections - 1);
555         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
556         uint64_t prev_size = int128_get64(prev_sec->size);
557         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
558         uint64_t prev_host_start =
559                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
560                         prev_sec->offset_within_region;
561         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
562 
563         if (mrs_gpa <= (prev_gpa_end + 1)) {
564             /* OK, looks like overlapping/intersecting - it's possible that
565              * the rounding to page sizes has made them overlap, but they should
566              * match up in the same RAMBlock if they do.
567              */
568             if (mrs_gpa < prev_gpa_start) {
569                 error_report("%s:Section rounded to %"PRIx64
570                              " prior to previous %"PRIx64,
571                              __func__, mrs_gpa, prev_gpa_start);
572                 /* A way to cleanly fail here would be better */
573                 return;
574             }
575             /* Offset from the start of the previous GPA to this GPA */
576             size_t offset = mrs_gpa - prev_gpa_start;
577 
578             if (prev_host_start + offset == mrs_host &&
579                 section->mr == prev_sec->mr &&
580                 (!dev->vhost_ops->vhost_backend_can_merge ||
581                  dev->vhost_ops->vhost_backend_can_merge(dev,
582                     mrs_host, mrs_size,
583                     prev_host_start, prev_size))) {
584                 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
585                 need_add = false;
586                 prev_sec->offset_within_address_space =
587                     MIN(prev_gpa_start, mrs_gpa);
588                 prev_sec->offset_within_region =
589                     MIN(prev_host_start, mrs_host) -
590                     (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
591                 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
592                                                mrs_host));
593                 trace_vhost_region_add_section_merge(section->mr->name,
594                                         int128_get64(prev_sec->size),
595                                         prev_sec->offset_within_address_space,
596                                         prev_sec->offset_within_region);
597             } else {
598                 /* adjoining regions are fine, but overlapping ones with
599                  * different blocks/offsets shouldn't happen
600                  */
601                 if (mrs_gpa != prev_gpa_end + 1) {
602                     error_report("%s: Overlapping but not coherent sections "
603                                  "at %"PRIx64,
604                                  __func__, mrs_gpa);
605                     return;
606                 }
607             }
608         }
609     }
610 
611     if (need_add) {
612         ++dev->n_tmp_sections;
613         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
614                                     dev->n_tmp_sections);
615         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
616         /* The flatview isn't stable and we don't use it, making it NULL
617          * means we can memcmp the list.
618          */
619         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
620         memory_region_ref(section->mr);
621     }
622 }
623 
624 /* Used for both add and nop callbacks */
625 static void vhost_region_addnop(MemoryListener *listener,
626                                 MemoryRegionSection *section)
627 {
628     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
629                                          memory_listener);
630 
631     if (!vhost_section(section)) {
632         return;
633     }
634     vhost_region_add_section(dev, section);
635 }
636 
637 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
638 {
639     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
640     struct vhost_dev *hdev = iommu->hdev;
641     hwaddr iova = iotlb->iova + iommu->iommu_offset;
642 
643     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
644                                               iotlb->addr_mask + 1)) {
645         error_report("Fail to invalidate device iotlb");
646     }
647 }
648 
649 static void vhost_iommu_region_add(MemoryListener *listener,
650                                    MemoryRegionSection *section)
651 {
652     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
653                                          iommu_listener);
654     struct vhost_iommu *iommu;
655     Int128 end;
656 
657     if (!memory_region_is_iommu(section->mr)) {
658         return;
659     }
660 
661     iommu = g_malloc0(sizeof(*iommu));
662     end = int128_add(int128_make64(section->offset_within_region),
663                      section->size);
664     end = int128_sub(end, int128_one());
665     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
666                         IOMMU_NOTIFIER_UNMAP,
667                         section->offset_within_region,
668                         int128_get64(end));
669     iommu->mr = section->mr;
670     iommu->iommu_offset = section->offset_within_address_space -
671                           section->offset_within_region;
672     iommu->hdev = dev;
673     memory_region_register_iommu_notifier(section->mr, &iommu->n);
674     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
675     /* TODO: can replay help performance here? */
676 }
677 
678 static void vhost_iommu_region_del(MemoryListener *listener,
679                                    MemoryRegionSection *section)
680 {
681     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
682                                          iommu_listener);
683     struct vhost_iommu *iommu;
684 
685     if (!memory_region_is_iommu(section->mr)) {
686         return;
687     }
688 
689     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
690         if (iommu->mr == section->mr &&
691             iommu->n.start == section->offset_within_region) {
692             memory_region_unregister_iommu_notifier(iommu->mr,
693                                                     &iommu->n);
694             QLIST_REMOVE(iommu, iommu_next);
695             g_free(iommu);
696             break;
697         }
698     }
699 }
700 
701 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
702                                     struct vhost_virtqueue *vq,
703                                     unsigned idx, bool enable_log)
704 {
705     struct vhost_vring_addr addr = {
706         .index = idx,
707         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
708         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
709         .used_user_addr = (uint64_t)(unsigned long)vq->used,
710         .log_guest_addr = vq->used_phys,
711         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
712     };
713     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
714     if (r < 0) {
715         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
716         return -errno;
717     }
718     return 0;
719 }
720 
721 static int vhost_dev_set_features(struct vhost_dev *dev,
722                                   bool enable_log)
723 {
724     uint64_t features = dev->acked_features;
725     int r;
726     if (enable_log) {
727         features |= 0x1ULL << VHOST_F_LOG_ALL;
728     }
729     r = dev->vhost_ops->vhost_set_features(dev, features);
730     if (r < 0) {
731         VHOST_OPS_DEBUG("vhost_set_features failed");
732     }
733     return r < 0 ? -errno : 0;
734 }
735 
736 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
737 {
738     int r, i, idx;
739     r = vhost_dev_set_features(dev, enable_log);
740     if (r < 0) {
741         goto err_features;
742     }
743     for (i = 0; i < dev->nvqs; ++i) {
744         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
745         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
746                                      enable_log);
747         if (r < 0) {
748             goto err_vq;
749         }
750     }
751     return 0;
752 err_vq:
753     for (; i >= 0; --i) {
754         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
755         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
756                                  dev->log_enabled);
757     }
758     vhost_dev_set_features(dev, dev->log_enabled);
759 err_features:
760     return r;
761 }
762 
763 static int vhost_migration_log(MemoryListener *listener, int enable)
764 {
765     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
766                                          memory_listener);
767     int r;
768     if (!!enable == dev->log_enabled) {
769         return 0;
770     }
771     if (!dev->started) {
772         dev->log_enabled = enable;
773         return 0;
774     }
775     if (!enable) {
776         r = vhost_dev_set_log(dev, false);
777         if (r < 0) {
778             return r;
779         }
780         vhost_log_put(dev, false);
781     } else {
782         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
783         r = vhost_dev_set_log(dev, true);
784         if (r < 0) {
785             return r;
786         }
787     }
788     dev->log_enabled = enable;
789     return 0;
790 }
791 
792 static void vhost_log_global_start(MemoryListener *listener)
793 {
794     int r;
795 
796     r = vhost_migration_log(listener, true);
797     if (r < 0) {
798         abort();
799     }
800 }
801 
802 static void vhost_log_global_stop(MemoryListener *listener)
803 {
804     int r;
805 
806     r = vhost_migration_log(listener, false);
807     if (r < 0) {
808         abort();
809     }
810 }
811 
812 static void vhost_log_start(MemoryListener *listener,
813                             MemoryRegionSection *section,
814                             int old, int new)
815 {
816     /* FIXME: implement */
817 }
818 
819 static void vhost_log_stop(MemoryListener *listener,
820                            MemoryRegionSection *section,
821                            int old, int new)
822 {
823     /* FIXME: implement */
824 }
825 
826 /* The vhost driver natively knows how to handle the vrings of non
827  * cross-endian legacy devices and modern devices. Only legacy devices
828  * exposed to a bi-endian guest may require the vhost driver to use a
829  * specific endianness.
830  */
831 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
832 {
833     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
834         return false;
835     }
836 #ifdef HOST_WORDS_BIGENDIAN
837     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
838 #else
839     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
840 #endif
841 }
842 
843 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
844                                                    bool is_big_endian,
845                                                    int vhost_vq_index)
846 {
847     struct vhost_vring_state s = {
848         .index = vhost_vq_index,
849         .num = is_big_endian
850     };
851 
852     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
853         return 0;
854     }
855 
856     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
857     if (errno == ENOTTY) {
858         error_report("vhost does not support cross-endian");
859         return -ENOSYS;
860     }
861 
862     return -errno;
863 }
864 
865 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
866                                       uint64_t gpa, uint64_t *uaddr,
867                                       uint64_t *len)
868 {
869     int i;
870 
871     for (i = 0; i < hdev->mem->nregions; i++) {
872         struct vhost_memory_region *reg = hdev->mem->regions + i;
873 
874         if (gpa >= reg->guest_phys_addr &&
875             reg->guest_phys_addr + reg->memory_size > gpa) {
876             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
877             *len = reg->guest_phys_addr + reg->memory_size - gpa;
878             return 0;
879         }
880     }
881 
882     return -EFAULT;
883 }
884 
885 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
886 {
887     IOMMUTLBEntry iotlb;
888     uint64_t uaddr, len;
889     int ret = -EFAULT;
890 
891     rcu_read_lock();
892 
893     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
894                                           iova, write);
895     if (iotlb.target_as != NULL) {
896         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
897                                          &uaddr, &len);
898         if (ret) {
899             error_report("Fail to lookup the translated address "
900                          "%"PRIx64, iotlb.translated_addr);
901             goto out;
902         }
903 
904         len = MIN(iotlb.addr_mask + 1, len);
905         iova = iova & ~iotlb.addr_mask;
906 
907         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
908                                                 len, iotlb.perm);
909         if (ret) {
910             error_report("Fail to update device iotlb");
911             goto out;
912         }
913     }
914 out:
915     rcu_read_unlock();
916 
917     return ret;
918 }
919 
920 static int vhost_virtqueue_start(struct vhost_dev *dev,
921                                 struct VirtIODevice *vdev,
922                                 struct vhost_virtqueue *vq,
923                                 unsigned idx)
924 {
925     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
926     VirtioBusState *vbus = VIRTIO_BUS(qbus);
927     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
928     hwaddr s, l, a;
929     int r;
930     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
931     struct vhost_vring_file file = {
932         .index = vhost_vq_index
933     };
934     struct vhost_vring_state state = {
935         .index = vhost_vq_index
936     };
937     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
938 
939     a = virtio_queue_get_desc_addr(vdev, idx);
940     if (a == 0) {
941         /* Queue might not be ready for start */
942         return 0;
943     }
944 
945     vq->num = state.num = virtio_queue_get_num(vdev, idx);
946     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
947     if (r) {
948         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
949         return -errno;
950     }
951 
952     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
953     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
954     if (r) {
955         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
956         return -errno;
957     }
958 
959     if (vhost_needs_vring_endian(vdev)) {
960         r = vhost_virtqueue_set_vring_endian_legacy(dev,
961                                                     virtio_is_big_endian(vdev),
962                                                     vhost_vq_index);
963         if (r) {
964             return -errno;
965         }
966     }
967 
968     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
969     vq->desc_phys = a;
970     vq->desc = vhost_memory_map(dev, a, &l, 0);
971     if (!vq->desc || l != s) {
972         r = -ENOMEM;
973         goto fail_alloc_desc;
974     }
975     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
976     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
977     vq->avail = vhost_memory_map(dev, a, &l, 0);
978     if (!vq->avail || l != s) {
979         r = -ENOMEM;
980         goto fail_alloc_avail;
981     }
982     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
983     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
984     vq->used = vhost_memory_map(dev, a, &l, 1);
985     if (!vq->used || l != s) {
986         r = -ENOMEM;
987         goto fail_alloc_used;
988     }
989 
990     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
991     if (r < 0) {
992         r = -errno;
993         goto fail_alloc;
994     }
995 
996     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
997     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
998     if (r) {
999         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1000         r = -errno;
1001         goto fail_kick;
1002     }
1003 
1004     /* Clear and discard previous events if any. */
1005     event_notifier_test_and_clear(&vq->masked_notifier);
1006 
1007     /* Init vring in unmasked state, unless guest_notifier_mask
1008      * will do it later.
1009      */
1010     if (!vdev->use_guest_notifier_mask) {
1011         /* TODO: check and handle errors. */
1012         vhost_virtqueue_mask(dev, vdev, idx, false);
1013     }
1014 
1015     if (k->query_guest_notifiers &&
1016         k->query_guest_notifiers(qbus->parent) &&
1017         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1018         file.fd = -1;
1019         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1020         if (r) {
1021             goto fail_vector;
1022         }
1023     }
1024 
1025     return 0;
1026 
1027 fail_vector:
1028 fail_kick:
1029 fail_alloc:
1030     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1031                        0, 0);
1032 fail_alloc_used:
1033     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1034                        0, 0);
1035 fail_alloc_avail:
1036     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1037                        0, 0);
1038 fail_alloc_desc:
1039     return r;
1040 }
1041 
1042 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1043                                     struct VirtIODevice *vdev,
1044                                     struct vhost_virtqueue *vq,
1045                                     unsigned idx)
1046 {
1047     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1048     struct vhost_vring_state state = {
1049         .index = vhost_vq_index,
1050     };
1051     int r;
1052     int a;
1053 
1054     a = virtio_queue_get_desc_addr(vdev, idx);
1055     if (a == 0) {
1056         /* Don't stop the virtqueue which might have not been started */
1057         return;
1058     }
1059 
1060     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1061     if (r < 0) {
1062         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1063         /* Connection to the backend is broken, so let's sync internal
1064          * last avail idx to the device used idx.
1065          */
1066         virtio_queue_restore_last_avail_idx(vdev, idx);
1067     } else {
1068         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1069     }
1070     virtio_queue_invalidate_signalled_used(vdev, idx);
1071     virtio_queue_update_used_idx(vdev, idx);
1072 
1073     /* In the cross-endian case, we need to reset the vring endianness to
1074      * native as legacy devices expect so by default.
1075      */
1076     if (vhost_needs_vring_endian(vdev)) {
1077         vhost_virtqueue_set_vring_endian_legacy(dev,
1078                                                 !virtio_is_big_endian(vdev),
1079                                                 vhost_vq_index);
1080     }
1081 
1082     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1083                        1, virtio_queue_get_used_size(vdev, idx));
1084     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1085                        0, virtio_queue_get_avail_size(vdev, idx));
1086     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1087                        0, virtio_queue_get_desc_size(vdev, idx));
1088 }
1089 
1090 static void vhost_eventfd_add(MemoryListener *listener,
1091                               MemoryRegionSection *section,
1092                               bool match_data, uint64_t data, EventNotifier *e)
1093 {
1094 }
1095 
1096 static void vhost_eventfd_del(MemoryListener *listener,
1097                               MemoryRegionSection *section,
1098                               bool match_data, uint64_t data, EventNotifier *e)
1099 {
1100 }
1101 
1102 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1103                                                 int n, uint32_t timeout)
1104 {
1105     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1106     struct vhost_vring_state state = {
1107         .index = vhost_vq_index,
1108         .num = timeout,
1109     };
1110     int r;
1111 
1112     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1113         return -EINVAL;
1114     }
1115 
1116     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1117     if (r) {
1118         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1119         return r;
1120     }
1121 
1122     return 0;
1123 }
1124 
1125 static int vhost_virtqueue_init(struct vhost_dev *dev,
1126                                 struct vhost_virtqueue *vq, int n)
1127 {
1128     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1129     struct vhost_vring_file file = {
1130         .index = vhost_vq_index,
1131     };
1132     int r = event_notifier_init(&vq->masked_notifier, 0);
1133     if (r < 0) {
1134         return r;
1135     }
1136 
1137     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1138     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1139     if (r) {
1140         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1141         r = -errno;
1142         goto fail_call;
1143     }
1144 
1145     vq->dev = dev;
1146 
1147     return 0;
1148 fail_call:
1149     event_notifier_cleanup(&vq->masked_notifier);
1150     return r;
1151 }
1152 
1153 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1154 {
1155     event_notifier_cleanup(&vq->masked_notifier);
1156 }
1157 
1158 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1159                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1160 {
1161     uint64_t features;
1162     int i, r, n_initialized_vqs = 0;
1163     Error *local_err = NULL;
1164 
1165     hdev->vdev = NULL;
1166     hdev->migration_blocker = NULL;
1167 
1168     r = vhost_set_backend_type(hdev, backend_type);
1169     assert(r >= 0);
1170 
1171     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1172     if (r < 0) {
1173         goto fail;
1174     }
1175 
1176     r = hdev->vhost_ops->vhost_set_owner(hdev);
1177     if (r < 0) {
1178         VHOST_OPS_DEBUG("vhost_set_owner failed");
1179         goto fail;
1180     }
1181 
1182     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1183     if (r < 0) {
1184         VHOST_OPS_DEBUG("vhost_get_features failed");
1185         goto fail;
1186     }
1187 
1188     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1189         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1190         if (r < 0) {
1191             goto fail;
1192         }
1193     }
1194 
1195     if (busyloop_timeout) {
1196         for (i = 0; i < hdev->nvqs; ++i) {
1197             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1198                                                      busyloop_timeout);
1199             if (r < 0) {
1200                 goto fail_busyloop;
1201             }
1202         }
1203     }
1204 
1205     hdev->features = features;
1206 
1207     hdev->memory_listener = (MemoryListener) {
1208         .begin = vhost_begin,
1209         .commit = vhost_commit,
1210         .region_add = vhost_region_addnop,
1211         .region_nop = vhost_region_addnop,
1212         .log_start = vhost_log_start,
1213         .log_stop = vhost_log_stop,
1214         .log_sync = vhost_log_sync,
1215         .log_global_start = vhost_log_global_start,
1216         .log_global_stop = vhost_log_global_stop,
1217         .eventfd_add = vhost_eventfd_add,
1218         .eventfd_del = vhost_eventfd_del,
1219         .priority = 10
1220     };
1221 
1222     hdev->iommu_listener = (MemoryListener) {
1223         .region_add = vhost_iommu_region_add,
1224         .region_del = vhost_iommu_region_del,
1225     };
1226 
1227     if (hdev->migration_blocker == NULL) {
1228         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1229             error_setg(&hdev->migration_blocker,
1230                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1231         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1232             error_setg(&hdev->migration_blocker,
1233                        "Migration disabled: failed to allocate shared memory");
1234         }
1235     }
1236 
1237     if (hdev->migration_blocker != NULL) {
1238         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1239         if (local_err) {
1240             error_report_err(local_err);
1241             error_free(hdev->migration_blocker);
1242             goto fail_busyloop;
1243         }
1244     }
1245 
1246     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1247     hdev->n_mem_sections = 0;
1248     hdev->mem_sections = NULL;
1249     hdev->log = NULL;
1250     hdev->log_size = 0;
1251     hdev->log_enabled = false;
1252     hdev->started = false;
1253     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1254     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1255 
1256     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1257         error_report("vhost backend memory slots limit is less"
1258                 " than current number of present memory slots");
1259         r = -1;
1260         if (busyloop_timeout) {
1261             goto fail_busyloop;
1262         } else {
1263             goto fail;
1264         }
1265     }
1266 
1267     return 0;
1268 
1269 fail_busyloop:
1270     while (--i >= 0) {
1271         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1272     }
1273 fail:
1274     hdev->nvqs = n_initialized_vqs;
1275     vhost_dev_cleanup(hdev);
1276     return r;
1277 }
1278 
1279 void vhost_dev_cleanup(struct vhost_dev *hdev)
1280 {
1281     int i;
1282 
1283     for (i = 0; i < hdev->nvqs; ++i) {
1284         vhost_virtqueue_cleanup(hdev->vqs + i);
1285     }
1286     if (hdev->mem) {
1287         /* those are only safe after successful init */
1288         memory_listener_unregister(&hdev->memory_listener);
1289         QLIST_REMOVE(hdev, entry);
1290     }
1291     if (hdev->migration_blocker) {
1292         migrate_del_blocker(hdev->migration_blocker);
1293         error_free(hdev->migration_blocker);
1294     }
1295     g_free(hdev->mem);
1296     g_free(hdev->mem_sections);
1297     if (hdev->vhost_ops) {
1298         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1299     }
1300     assert(!hdev->log);
1301 
1302     memset(hdev, 0, sizeof(struct vhost_dev));
1303 }
1304 
1305 /* Stop processing guest IO notifications in qemu.
1306  * Start processing them in vhost in kernel.
1307  */
1308 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1309 {
1310     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1311     int i, r, e;
1312 
1313     /* We will pass the notifiers to the kernel, make sure that QEMU
1314      * doesn't interfere.
1315      */
1316     r = virtio_device_grab_ioeventfd(vdev);
1317     if (r < 0) {
1318         error_report("binding does not support host notifiers");
1319         goto fail;
1320     }
1321 
1322     for (i = 0; i < hdev->nvqs; ++i) {
1323         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1324                                          true);
1325         if (r < 0) {
1326             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1327             goto fail_vq;
1328         }
1329     }
1330 
1331     return 0;
1332 fail_vq:
1333     while (--i >= 0) {
1334         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1335                                          false);
1336         if (e < 0) {
1337             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1338         }
1339         assert (e >= 0);
1340         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1341     }
1342     virtio_device_release_ioeventfd(vdev);
1343 fail:
1344     return r;
1345 }
1346 
1347 /* Stop processing guest IO notifications in vhost.
1348  * Start processing them in qemu.
1349  * This might actually run the qemu handlers right away,
1350  * so virtio in qemu must be completely setup when this is called.
1351  */
1352 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1353 {
1354     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1355     int i, r;
1356 
1357     for (i = 0; i < hdev->nvqs; ++i) {
1358         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1359                                          false);
1360         if (r < 0) {
1361             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1362         }
1363         assert (r >= 0);
1364         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1365     }
1366     virtio_device_release_ioeventfd(vdev);
1367 }
1368 
1369 /* Test and clear event pending status.
1370  * Should be called after unmask to avoid losing events.
1371  */
1372 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1373 {
1374     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1375     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1376     return event_notifier_test_and_clear(&vq->masked_notifier);
1377 }
1378 
1379 /* Mask/unmask events from this vq. */
1380 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1381                          bool mask)
1382 {
1383     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1384     int r, index = n - hdev->vq_index;
1385     struct vhost_vring_file file;
1386 
1387     /* should only be called after backend is connected */
1388     assert(hdev->vhost_ops);
1389 
1390     if (mask) {
1391         assert(vdev->use_guest_notifier_mask);
1392         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1393     } else {
1394         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1395     }
1396 
1397     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1398     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1399     if (r < 0) {
1400         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1401     }
1402 }
1403 
1404 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1405                             uint64_t features)
1406 {
1407     const int *bit = feature_bits;
1408     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1409         uint64_t bit_mask = (1ULL << *bit);
1410         if (!(hdev->features & bit_mask)) {
1411             features &= ~bit_mask;
1412         }
1413         bit++;
1414     }
1415     return features;
1416 }
1417 
1418 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1419                         uint64_t features)
1420 {
1421     const int *bit = feature_bits;
1422     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1423         uint64_t bit_mask = (1ULL << *bit);
1424         if (features & bit_mask) {
1425             hdev->acked_features |= bit_mask;
1426         }
1427         bit++;
1428     }
1429 }
1430 
1431 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1432                          uint32_t config_len)
1433 {
1434     assert(hdev->vhost_ops);
1435 
1436     if (hdev->vhost_ops->vhost_get_config) {
1437         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1438     }
1439 
1440     return -1;
1441 }
1442 
1443 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1444                          uint32_t offset, uint32_t size, uint32_t flags)
1445 {
1446     assert(hdev->vhost_ops);
1447 
1448     if (hdev->vhost_ops->vhost_set_config) {
1449         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1450                                                  size, flags);
1451     }
1452 
1453     return -1;
1454 }
1455 
1456 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1457                                    const VhostDevConfigOps *ops)
1458 {
1459     hdev->config_ops = ops;
1460 }
1461 
1462 /* Host notifiers must be enabled at this point. */
1463 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1464 {
1465     int i, r;
1466 
1467     /* should only be called after backend is connected */
1468     assert(hdev->vhost_ops);
1469 
1470     hdev->started = true;
1471     hdev->vdev = vdev;
1472 
1473     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1474     if (r < 0) {
1475         goto fail_features;
1476     }
1477 
1478     if (vhost_dev_has_iommu(hdev)) {
1479         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1480     }
1481 
1482     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1483     if (r < 0) {
1484         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1485         r = -errno;
1486         goto fail_mem;
1487     }
1488     for (i = 0; i < hdev->nvqs; ++i) {
1489         r = vhost_virtqueue_start(hdev,
1490                                   vdev,
1491                                   hdev->vqs + i,
1492                                   hdev->vq_index + i);
1493         if (r < 0) {
1494             goto fail_vq;
1495         }
1496     }
1497 
1498     if (hdev->log_enabled) {
1499         uint64_t log_base;
1500 
1501         hdev->log_size = vhost_get_log_size(hdev);
1502         hdev->log = vhost_log_get(hdev->log_size,
1503                                   vhost_dev_log_is_shared(hdev));
1504         log_base = (uintptr_t)hdev->log->log;
1505         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1506                                                 hdev->log_size ? log_base : 0,
1507                                                 hdev->log);
1508         if (r < 0) {
1509             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1510             r = -errno;
1511             goto fail_log;
1512         }
1513     }
1514 
1515     if (vhost_dev_has_iommu(hdev)) {
1516         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1517 
1518         /* Update used ring information for IOTLB to work correctly,
1519          * vhost-kernel code requires for this.*/
1520         for (i = 0; i < hdev->nvqs; ++i) {
1521             struct vhost_virtqueue *vq = hdev->vqs + i;
1522             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1523         }
1524     }
1525     return 0;
1526 fail_log:
1527     vhost_log_put(hdev, false);
1528 fail_vq:
1529     while (--i >= 0) {
1530         vhost_virtqueue_stop(hdev,
1531                              vdev,
1532                              hdev->vqs + i,
1533                              hdev->vq_index + i);
1534     }
1535     i = hdev->nvqs;
1536 
1537 fail_mem:
1538 fail_features:
1539 
1540     hdev->started = false;
1541     return r;
1542 }
1543 
1544 /* Host notifiers must be enabled at this point. */
1545 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1546 {
1547     int i;
1548 
1549     /* should only be called after backend is connected */
1550     assert(hdev->vhost_ops);
1551 
1552     for (i = 0; i < hdev->nvqs; ++i) {
1553         vhost_virtqueue_stop(hdev,
1554                              vdev,
1555                              hdev->vqs + i,
1556                              hdev->vq_index + i);
1557     }
1558 
1559     if (vhost_dev_has_iommu(hdev)) {
1560         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1561         memory_listener_unregister(&hdev->iommu_listener);
1562     }
1563     vhost_log_put(hdev, true);
1564     hdev->started = false;
1565     hdev->vdev = NULL;
1566 }
1567 
1568 int vhost_net_set_backend(struct vhost_dev *hdev,
1569                           struct vhost_vring_file *file)
1570 {
1571     if (hdev->vhost_ops->vhost_net_set_backend) {
1572         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1573     }
1574 
1575     return -1;
1576 }
1577