xref: /qemu/hw/virtio/vhost.c (revision 48d7c97577498657f9ccbcbf1f990fdb4b79501f)
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30 
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33 
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37                       strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40     do { } while (0)
41 #endif
42 
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
45 
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48     QLIST_HEAD_INITIALIZER(vhost_devices);
49 
50 bool vhost_has_free_slot(void)
51 {
52     unsigned int slots_limit = ~0U;
53     struct vhost_dev *hdev;
54 
55     QLIST_FOREACH(hdev, &vhost_devices, entry) {
56         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57         slots_limit = MIN(slots_limit, r);
58     }
59     return slots_limit > used_memslots;
60 }
61 
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63                                   MemoryRegionSection *section,
64                                   uint64_t mfirst, uint64_t mlast,
65                                   uint64_t rfirst, uint64_t rlast)
66 {
67     vhost_log_chunk_t *log = dev->log->log;
68 
69     uint64_t start = MAX(mfirst, rfirst);
70     uint64_t end = MIN(mlast, rlast);
71     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
74 
75     if (end < start) {
76         return;
77     }
78     assert(end / VHOST_LOG_CHUNK < dev->log_size);
79     assert(start / VHOST_LOG_CHUNK < dev->log_size);
80 
81     for (;from < to; ++from) {
82         vhost_log_chunk_t log;
83         /* We first check with non-atomic: much cheaper,
84          * and we expect non-dirty to be the common case. */
85         if (!*from) {
86             addr += VHOST_LOG_CHUNK;
87             continue;
88         }
89         /* Data must be read atomically. We don't really need barrier semantics
90          * but it's easier to use atomic_* than roll our own. */
91         log = atomic_xchg(from, 0);
92         while (log) {
93             int bit = ctzl(log);
94             hwaddr page_addr;
95             hwaddr section_offset;
96             hwaddr mr_offset;
97             page_addr = addr + bit * VHOST_LOG_PAGE;
98             section_offset = page_addr - section->offset_within_address_space;
99             mr_offset = section_offset + section->offset_within_region;
100             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101             log &= ~(0x1ull << bit);
102         }
103         addr += VHOST_LOG_CHUNK;
104     }
105 }
106 
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108                                    MemoryRegionSection *section,
109                                    hwaddr first,
110                                    hwaddr last)
111 {
112     int i;
113     hwaddr start_addr;
114     hwaddr end_addr;
115 
116     if (!dev->log_enabled || !dev->started) {
117         return 0;
118     }
119     start_addr = section->offset_within_address_space;
120     end_addr = range_get_last(start_addr, int128_get64(section->size));
121     start_addr = MAX(first, start_addr);
122     end_addr = MIN(last, end_addr);
123 
124     for (i = 0; i < dev->mem->nregions; ++i) {
125         struct vhost_memory_region *reg = dev->mem->regions + i;
126         vhost_dev_sync_region(dev, section, start_addr, end_addr,
127                               reg->guest_phys_addr,
128                               range_get_last(reg->guest_phys_addr,
129                                              reg->memory_size));
130     }
131     for (i = 0; i < dev->nvqs; ++i) {
132         struct vhost_virtqueue *vq = dev->vqs + i;
133         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134                               range_get_last(vq->used_phys, vq->used_size));
135     }
136     return 0;
137 }
138 
139 static void vhost_log_sync(MemoryListener *listener,
140                           MemoryRegionSection *section)
141 {
142     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143                                          memory_listener);
144     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145 }
146 
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148                                  hwaddr first, hwaddr last)
149 {
150     int i;
151     /* FIXME: this is N^2 in number of sections */
152     for (i = 0; i < dev->n_mem_sections; ++i) {
153         MemoryRegionSection *section = &dev->mem_sections[i];
154         vhost_sync_dirty_bitmap(dev, section, first, last);
155     }
156 }
157 
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159  * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161                                       uint64_t start_addr,
162                                       uint64_t size)
163 {
164     int from, to, n = dev->mem->nregions;
165     /* Track overlapping/split regions for sanity checking. */
166     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167 
168     for (from = 0, to = 0; from < n; ++from, ++to) {
169         struct vhost_memory_region *reg = dev->mem->regions + to;
170         uint64_t reglast;
171         uint64_t memlast;
172         uint64_t change;
173 
174         /* clone old region */
175         if (to != from) {
176             memcpy(reg, dev->mem->regions + from, sizeof *reg);
177         }
178 
179         /* No overlap is simple */
180         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181                             start_addr, size)) {
182             continue;
183         }
184 
185         /* Split only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!split);
189 
190         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191         memlast = range_get_last(start_addr, size);
192 
193         /* Remove whole region */
194         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195             --dev->mem->nregions;
196             --to;
197             ++overlap_middle;
198             continue;
199         }
200 
201         /* Shrink region */
202         if (memlast >= reglast) {
203             reg->memory_size = start_addr - reg->guest_phys_addr;
204             assert(reg->memory_size);
205             assert(!overlap_end);
206             ++overlap_end;
207             continue;
208         }
209 
210         /* Shift region */
211         if (start_addr <= reg->guest_phys_addr) {
212             change = memlast + 1 - reg->guest_phys_addr;
213             reg->memory_size -= change;
214             reg->guest_phys_addr += change;
215             reg->userspace_addr += change;
216             assert(reg->memory_size);
217             assert(!overlap_start);
218             ++overlap_start;
219             continue;
220         }
221 
222         /* This only happens if supplied region
223          * is in the middle of an existing one. Thus it can not
224          * overlap with any other existing region. */
225         assert(!overlap_start);
226         assert(!overlap_end);
227         assert(!overlap_middle);
228         /* Split region: shrink first part, shift second part. */
229         memcpy(dev->mem->regions + n, reg, sizeof *reg);
230         reg->memory_size = start_addr - reg->guest_phys_addr;
231         assert(reg->memory_size);
232         change = memlast + 1 - reg->guest_phys_addr;
233         reg = dev->mem->regions + n;
234         reg->memory_size -= change;
235         assert(reg->memory_size);
236         reg->guest_phys_addr += change;
237         reg->userspace_addr += change;
238         /* Never add more than 1 region */
239         assert(dev->mem->nregions == n);
240         ++dev->mem->nregions;
241         ++split;
242     }
243 }
244 
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247                                     uint64_t start_addr,
248                                     uint64_t size,
249                                     uint64_t uaddr)
250 {
251     int from, to;
252     struct vhost_memory_region *merged = NULL;
253     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254         struct vhost_memory_region *reg = dev->mem->regions + to;
255         uint64_t prlast, urlast;
256         uint64_t pmlast, umlast;
257         uint64_t s, e, u;
258 
259         /* clone old region */
260         if (to != from) {
261             memcpy(reg, dev->mem->regions + from, sizeof *reg);
262         }
263         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264         pmlast = range_get_last(start_addr, size);
265         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266         umlast = range_get_last(uaddr, size);
267 
268         /* check for overlapping regions: should never happen. */
269         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270         /* Not an adjacent or overlapping region - do not merge. */
271         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272             (pmlast + 1 != reg->guest_phys_addr ||
273              umlast + 1 != reg->userspace_addr)) {
274             continue;
275         }
276 
277         if (dev->vhost_ops->vhost_backend_can_merge &&
278             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279                                                      reg->userspace_addr,
280                                                      reg->memory_size)) {
281             continue;
282         }
283 
284         if (merged) {
285             --to;
286             assert(to >= 0);
287         } else {
288             merged = reg;
289         }
290         u = MIN(uaddr, reg->userspace_addr);
291         s = MIN(start_addr, reg->guest_phys_addr);
292         e = MAX(pmlast, prlast);
293         uaddr = merged->userspace_addr = u;
294         start_addr = merged->guest_phys_addr = s;
295         size = merged->memory_size = e - s + 1;
296         assert(merged->memory_size);
297     }
298 
299     if (!merged) {
300         struct vhost_memory_region *reg = dev->mem->regions + to;
301         memset(reg, 0, sizeof *reg);
302         reg->memory_size = size;
303         assert(reg->memory_size);
304         reg->guest_phys_addr = start_addr;
305         reg->userspace_addr = uaddr;
306         ++to;
307     }
308     assert(to <= dev->mem->nregions + 1);
309     dev->mem->nregions = to;
310 }
311 
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313 {
314     uint64_t log_size = 0;
315     int i;
316     for (i = 0; i < dev->mem->nregions; ++i) {
317         struct vhost_memory_region *reg = dev->mem->regions + i;
318         uint64_t last = range_get_last(reg->guest_phys_addr,
319                                        reg->memory_size);
320         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321     }
322     for (i = 0; i < dev->nvqs; ++i) {
323         struct vhost_virtqueue *vq = dev->vqs + i;
324         uint64_t last = vq->used_phys + vq->used_size - 1;
325         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326     }
327     return log_size;
328 }
329 
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
331 {
332     Error *err = NULL;
333     struct vhost_log *log;
334     uint64_t logsize = size * sizeof(*(log->log));
335     int fd = -1;
336 
337     log = g_new0(struct vhost_log, 1);
338     if (share) {
339         log->log = qemu_memfd_alloc("vhost-log", logsize,
340                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
341                                     &fd, &err);
342         if (err) {
343             error_report_err(err);
344             g_free(log);
345             return NULL;
346         }
347         memset(log->log, 0, logsize);
348     } else {
349         log->log = g_malloc0(logsize);
350     }
351 
352     log->size = size;
353     log->refcnt = 1;
354     log->fd = fd;
355 
356     return log;
357 }
358 
359 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
360 {
361     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
362 
363     if (!log || log->size != size) {
364         log = vhost_log_alloc(size, share);
365         if (share) {
366             vhost_log_shm = log;
367         } else {
368             vhost_log = log;
369         }
370     } else {
371         ++log->refcnt;
372     }
373 
374     return log;
375 }
376 
377 static void vhost_log_put(struct vhost_dev *dev, bool sync)
378 {
379     struct vhost_log *log = dev->log;
380 
381     if (!log) {
382         return;
383     }
384 
385     --log->refcnt;
386     if (log->refcnt == 0) {
387         /* Sync only the range covered by the old log */
388         if (dev->log_size && sync) {
389             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
390         }
391 
392         if (vhost_log == log) {
393             g_free(log->log);
394             vhost_log = NULL;
395         } else if (vhost_log_shm == log) {
396             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
397                             log->fd);
398             vhost_log_shm = NULL;
399         }
400 
401         g_free(log);
402     }
403 
404     dev->log = NULL;
405     dev->log_size = 0;
406 }
407 
408 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
409 {
410     return dev->vhost_ops->vhost_requires_shm_log &&
411            dev->vhost_ops->vhost_requires_shm_log(dev);
412 }
413 
414 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
415 {
416     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
417     uint64_t log_base = (uintptr_t)log->log;
418     int r;
419 
420     /* inform backend of log switching, this must be done before
421        releasing the current log, to ensure no logging is lost */
422     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
423     if (r < 0) {
424         VHOST_OPS_DEBUG("vhost_set_log_base failed");
425     }
426 
427     vhost_log_put(dev, true);
428     dev->log = log;
429     dev->log_size = size;
430 }
431 
432 static int vhost_dev_has_iommu(struct vhost_dev *dev)
433 {
434     VirtIODevice *vdev = dev->vdev;
435 
436     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
437 }
438 
439 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
440                               hwaddr *plen, int is_write)
441 {
442     if (!vhost_dev_has_iommu(dev)) {
443         return cpu_physical_memory_map(addr, plen, is_write);
444     } else {
445         return (void *)(uintptr_t)addr;
446     }
447 }
448 
449 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
450                                hwaddr len, int is_write,
451                                hwaddr access_len)
452 {
453     if (!vhost_dev_has_iommu(dev)) {
454         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
455     }
456 }
457 
458 static int vhost_verify_ring_part_mapping(void *ring_hva,
459                                           uint64_t ring_gpa,
460                                           uint64_t ring_size,
461                                           void *reg_hva,
462                                           uint64_t reg_gpa,
463                                           uint64_t reg_size)
464 {
465     uint64_t hva_ring_offset;
466     uint64_t ring_last = range_get_last(ring_gpa, ring_size);
467     uint64_t reg_last = range_get_last(reg_gpa, reg_size);
468 
469     if (ring_last < reg_gpa || ring_gpa > reg_last) {
470         return 0;
471     }
472     /* check that whole ring's is mapped */
473     if (ring_last > reg_last) {
474         return -ENOMEM;
475     }
476     /* check that ring's MemoryRegion wasn't replaced */
477     hva_ring_offset = ring_gpa - reg_gpa;
478     if (ring_hva != reg_hva + hva_ring_offset) {
479         return -EBUSY;
480     }
481 
482     return 0;
483 }
484 
485 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
486                                       void *reg_hva,
487                                       uint64_t reg_gpa,
488                                       uint64_t reg_size)
489 {
490     int i, j;
491     int r = 0;
492     const char *part_name[] = {
493         "descriptor table",
494         "available ring",
495         "used ring"
496     };
497 
498     for (i = 0; i < dev->nvqs; ++i) {
499         struct vhost_virtqueue *vq = dev->vqs + i;
500 
501         j = 0;
502         r = vhost_verify_ring_part_mapping(
503                 vq->desc, vq->desc_phys, vq->desc_size,
504                 reg_hva, reg_gpa, reg_size);
505         if (r) {
506             break;
507         }
508 
509         j++;
510         r = vhost_verify_ring_part_mapping(
511                 vq->desc, vq->desc_phys, vq->desc_size,
512                 reg_hva, reg_gpa, reg_size);
513         if (r) {
514             break;
515         }
516 
517         j++;
518         r = vhost_verify_ring_part_mapping(
519                 vq->desc, vq->desc_phys, vq->desc_size,
520                 reg_hva, reg_gpa, reg_size);
521         if (r) {
522             break;
523         }
524     }
525 
526     if (r == -ENOMEM) {
527         error_report("Unable to map %s for ring %d", part_name[j], i);
528     } else if (r == -EBUSY) {
529         error_report("%s relocated for ring %d", part_name[j], i);
530     }
531     return r;
532 }
533 
534 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
535 						      uint64_t start_addr,
536 						      uint64_t size)
537 {
538     int i, n = dev->mem->nregions;
539     for (i = 0; i < n; ++i) {
540         struct vhost_memory_region *reg = dev->mem->regions + i;
541         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
542                            start_addr, size)) {
543             return reg;
544         }
545     }
546     return NULL;
547 }
548 
549 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
550                                  uint64_t start_addr,
551                                  uint64_t size,
552                                  uint64_t uaddr)
553 {
554     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
555     uint64_t reglast;
556     uint64_t memlast;
557 
558     if (!reg) {
559         return true;
560     }
561 
562     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
563     memlast = range_get_last(start_addr, size);
564 
565     /* Need to extend region? */
566     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
567         return true;
568     }
569     /* userspace_addr changed? */
570     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
571 }
572 
573 static void vhost_set_memory(MemoryListener *listener,
574                              MemoryRegionSection *section,
575                              bool add)
576 {
577     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
578                                          memory_listener);
579     hwaddr start_addr = section->offset_within_address_space;
580     ram_addr_t size = int128_get64(section->size);
581     bool log_dirty =
582         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
583     int s = offsetof(struct vhost_memory, regions) +
584         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
585     void *ram;
586 
587     dev->mem = g_realloc(dev->mem, s);
588 
589     if (log_dirty) {
590         add = false;
591     }
592 
593     assert(size);
594 
595     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
596     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
597     if (add) {
598         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
599             /* Region exists with same address. Nothing to do. */
600             return;
601         }
602     } else {
603         if (!vhost_dev_find_reg(dev, start_addr, size)) {
604             /* Removing region that we don't access. Nothing to do. */
605             return;
606         }
607     }
608 
609     vhost_dev_unassign_memory(dev, start_addr, size);
610     if (add) {
611         /* Add given mapping, merging adjacent regions if any */
612         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
613     } else {
614         /* Remove old mapping for this memory, if any. */
615         vhost_dev_unassign_memory(dev, start_addr, size);
616     }
617     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
618     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
619     dev->memory_changed = true;
620     used_memslots = dev->mem->nregions;
621 }
622 
623 static bool vhost_section(MemoryRegionSection *section)
624 {
625     return memory_region_is_ram(section->mr) &&
626         !memory_region_is_rom(section->mr);
627 }
628 
629 static void vhost_begin(MemoryListener *listener)
630 {
631     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
632                                          memory_listener);
633     dev->mem_changed_end_addr = 0;
634     dev->mem_changed_start_addr = -1;
635     dev->tmp_sections = NULL;
636     dev->n_tmp_sections = 0;
637 }
638 
639 static void vhost_commit(MemoryListener *listener)
640 {
641     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
642                                          memory_listener);
643     MemoryRegionSection *old_sections;
644     int n_old_sections;
645     uint64_t log_size;
646     int r;
647     int i;
648 
649     old_sections = dev->mem_sections;
650     n_old_sections = dev->n_mem_sections;
651     dev->mem_sections = dev->tmp_sections;
652     dev->n_mem_sections = dev->n_tmp_sections;
653 
654     if (!dev->memory_changed) {
655         goto out;
656     }
657     if (!dev->started) {
658         goto out;
659     }
660     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
661         goto out;
662     }
663 
664     for (i = 0; i < dev->mem->nregions; i++) {
665         if (vhost_verify_ring_mappings(dev,
666                        (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
667                        dev->mem->regions[i].guest_phys_addr,
668                        dev->mem->regions[i].memory_size)) {
669             error_report("Verify ring failure on region %d", i);
670             abort();
671         }
672     }
673 
674     if (!dev->log_enabled) {
675         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
676         if (r < 0) {
677             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
678         }
679         dev->memory_changed = false;
680         goto out;
681     }
682     log_size = vhost_get_log_size(dev);
683     /* We allocate an extra 4K bytes to log,
684      * to reduce the * number of reallocations. */
685 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
686     /* To log more, must increase log size before table update. */
687     if (dev->log_size < log_size) {
688         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
689     }
690     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
691     if (r < 0) {
692         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
693     }
694     /* To log less, can only decrease log size after table update. */
695     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
696         vhost_dev_log_resize(dev, log_size);
697     }
698     dev->memory_changed = false;
699 
700 out:
701     /* Deref the old list of sections, this must happen _after_ the
702      * vhost_set_mem_table to ensure the client isn't still using the
703      * section we're about to unref.
704      */
705     while (n_old_sections--) {
706         memory_region_unref(old_sections[n_old_sections].mr);
707     }
708     g_free(old_sections);
709     return;
710 }
711 
712 /* Adds the section data to the tmp_section structure.
713  * It relies on the listener calling us in memory address order
714  * and for each region (via the _add and _nop methods) to
715  * join neighbours.
716  */
717 static void vhost_region_add_section(struct vhost_dev *dev,
718                                      MemoryRegionSection *section)
719 {
720     bool need_add = true;
721     uint64_t mrs_size = int128_get64(section->size);
722     uint64_t mrs_gpa = section->offset_within_address_space;
723     uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
724                          section->offset_within_region;
725 
726     trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
727                                    mrs_host);
728 
729     bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
730                         ~(1 << DIRTY_MEMORY_MIGRATION);
731     if (log_dirty) {
732         return;
733     }
734 
735     if (dev->n_tmp_sections) {
736         /* Since we already have at least one section, lets see if
737          * this extends it; since we're scanning in order, we only
738          * have to look at the last one, and the FlatView that calls
739          * us shouldn't have overlaps.
740          */
741         MemoryRegionSection *prev_sec = dev->tmp_sections +
742                                                (dev->n_tmp_sections - 1);
743         uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
744         uint64_t prev_size = int128_get64(prev_sec->size);
745         uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
746         uint64_t prev_host_start =
747                         (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
748                         prev_sec->offset_within_region;
749         uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
750 
751         if (prev_gpa_end + 1 == mrs_gpa &&
752             prev_host_end + 1 == mrs_host &&
753             section->mr == prev_sec->mr &&
754             (!dev->vhost_ops->vhost_backend_can_merge ||
755                 dev->vhost_ops->vhost_backend_can_merge(dev,
756                     mrs_host, mrs_size,
757                     prev_host_start, prev_size))) {
758             /* The two sections abut */
759             need_add = false;
760             prev_sec->size = int128_add(prev_sec->size, section->size);
761             trace_vhost_region_add_section_abut(section->mr->name,
762                                                 mrs_size + prev_size);
763         }
764     }
765 
766     if (need_add) {
767         ++dev->n_tmp_sections;
768         dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
769                                     dev->n_tmp_sections);
770         dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
771         /* The flatview isn't stable and we don't use it, making it NULL
772          * means we can memcmp the list.
773          */
774         dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
775         memory_region_ref(section->mr);
776     }
777 }
778 
779 static void vhost_region_add(MemoryListener *listener,
780                              MemoryRegionSection *section)
781 {
782     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
783                                          memory_listener);
784 
785     if (!vhost_section(section)) {
786         return;
787     }
788     vhost_region_add_section(dev, section);
789 
790     vhost_set_memory(listener, section, true);
791 }
792 
793 /* Called on regions that have not changed */
794 static void vhost_region_nop(MemoryListener *listener,
795                              MemoryRegionSection *section)
796 {
797     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
798                                          memory_listener);
799 
800     if (!vhost_section(section)) {
801         return;
802     }
803 
804     vhost_region_add_section(dev, section);
805 }
806 
807 static void vhost_region_del(MemoryListener *listener,
808                              MemoryRegionSection *section)
809 {
810     if (!vhost_section(section)) {
811         return;
812     }
813 
814     vhost_set_memory(listener, section, false);
815 }
816 
817 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
818 {
819     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
820     struct vhost_dev *hdev = iommu->hdev;
821     hwaddr iova = iotlb->iova + iommu->iommu_offset;
822 
823     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
824                                               iotlb->addr_mask + 1)) {
825         error_report("Fail to invalidate device iotlb");
826     }
827 }
828 
829 static void vhost_iommu_region_add(MemoryListener *listener,
830                                    MemoryRegionSection *section)
831 {
832     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
833                                          iommu_listener);
834     struct vhost_iommu *iommu;
835     Int128 end;
836 
837     if (!memory_region_is_iommu(section->mr)) {
838         return;
839     }
840 
841     iommu = g_malloc0(sizeof(*iommu));
842     end = int128_add(int128_make64(section->offset_within_region),
843                      section->size);
844     end = int128_sub(end, int128_one());
845     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
846                         IOMMU_NOTIFIER_UNMAP,
847                         section->offset_within_region,
848                         int128_get64(end));
849     iommu->mr = section->mr;
850     iommu->iommu_offset = section->offset_within_address_space -
851                           section->offset_within_region;
852     iommu->hdev = dev;
853     memory_region_register_iommu_notifier(section->mr, &iommu->n);
854     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
855     /* TODO: can replay help performance here? */
856 }
857 
858 static void vhost_iommu_region_del(MemoryListener *listener,
859                                    MemoryRegionSection *section)
860 {
861     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
862                                          iommu_listener);
863     struct vhost_iommu *iommu;
864 
865     if (!memory_region_is_iommu(section->mr)) {
866         return;
867     }
868 
869     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
870         if (iommu->mr == section->mr &&
871             iommu->n.start == section->offset_within_region) {
872             memory_region_unregister_iommu_notifier(iommu->mr,
873                                                     &iommu->n);
874             QLIST_REMOVE(iommu, iommu_next);
875             g_free(iommu);
876             break;
877         }
878     }
879 }
880 
881 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
882                                     struct vhost_virtqueue *vq,
883                                     unsigned idx, bool enable_log)
884 {
885     struct vhost_vring_addr addr = {
886         .index = idx,
887         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
888         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
889         .used_user_addr = (uint64_t)(unsigned long)vq->used,
890         .log_guest_addr = vq->used_phys,
891         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
892     };
893     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
894     if (r < 0) {
895         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
896         return -errno;
897     }
898     return 0;
899 }
900 
901 static int vhost_dev_set_features(struct vhost_dev *dev,
902                                   bool enable_log)
903 {
904     uint64_t features = dev->acked_features;
905     int r;
906     if (enable_log) {
907         features |= 0x1ULL << VHOST_F_LOG_ALL;
908     }
909     r = dev->vhost_ops->vhost_set_features(dev, features);
910     if (r < 0) {
911         VHOST_OPS_DEBUG("vhost_set_features failed");
912     }
913     return r < 0 ? -errno : 0;
914 }
915 
916 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
917 {
918     int r, i, idx;
919     r = vhost_dev_set_features(dev, enable_log);
920     if (r < 0) {
921         goto err_features;
922     }
923     for (i = 0; i < dev->nvqs; ++i) {
924         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
925         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
926                                      enable_log);
927         if (r < 0) {
928             goto err_vq;
929         }
930     }
931     return 0;
932 err_vq:
933     for (; i >= 0; --i) {
934         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
935         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
936                                  dev->log_enabled);
937     }
938     vhost_dev_set_features(dev, dev->log_enabled);
939 err_features:
940     return r;
941 }
942 
943 static int vhost_migration_log(MemoryListener *listener, int enable)
944 {
945     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
946                                          memory_listener);
947     int r;
948     if (!!enable == dev->log_enabled) {
949         return 0;
950     }
951     if (!dev->started) {
952         dev->log_enabled = enable;
953         return 0;
954     }
955     if (!enable) {
956         r = vhost_dev_set_log(dev, false);
957         if (r < 0) {
958             return r;
959         }
960         vhost_log_put(dev, false);
961     } else {
962         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
963         r = vhost_dev_set_log(dev, true);
964         if (r < 0) {
965             return r;
966         }
967     }
968     dev->log_enabled = enable;
969     return 0;
970 }
971 
972 static void vhost_log_global_start(MemoryListener *listener)
973 {
974     int r;
975 
976     r = vhost_migration_log(listener, true);
977     if (r < 0) {
978         abort();
979     }
980 }
981 
982 static void vhost_log_global_stop(MemoryListener *listener)
983 {
984     int r;
985 
986     r = vhost_migration_log(listener, false);
987     if (r < 0) {
988         abort();
989     }
990 }
991 
992 static void vhost_log_start(MemoryListener *listener,
993                             MemoryRegionSection *section,
994                             int old, int new)
995 {
996     /* FIXME: implement */
997 }
998 
999 static void vhost_log_stop(MemoryListener *listener,
1000                            MemoryRegionSection *section,
1001                            int old, int new)
1002 {
1003     /* FIXME: implement */
1004 }
1005 
1006 /* The vhost driver natively knows how to handle the vrings of non
1007  * cross-endian legacy devices and modern devices. Only legacy devices
1008  * exposed to a bi-endian guest may require the vhost driver to use a
1009  * specific endianness.
1010  */
1011 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
1012 {
1013     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1014         return false;
1015     }
1016 #ifdef HOST_WORDS_BIGENDIAN
1017     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
1018 #else
1019     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
1020 #endif
1021 }
1022 
1023 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
1024                                                    bool is_big_endian,
1025                                                    int vhost_vq_index)
1026 {
1027     struct vhost_vring_state s = {
1028         .index = vhost_vq_index,
1029         .num = is_big_endian
1030     };
1031 
1032     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
1033         return 0;
1034     }
1035 
1036     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
1037     if (errno == ENOTTY) {
1038         error_report("vhost does not support cross-endian");
1039         return -ENOSYS;
1040     }
1041 
1042     return -errno;
1043 }
1044 
1045 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
1046                                       uint64_t gpa, uint64_t *uaddr,
1047                                       uint64_t *len)
1048 {
1049     int i;
1050 
1051     for (i = 0; i < hdev->mem->nregions; i++) {
1052         struct vhost_memory_region *reg = hdev->mem->regions + i;
1053 
1054         if (gpa >= reg->guest_phys_addr &&
1055             reg->guest_phys_addr + reg->memory_size > gpa) {
1056             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
1057             *len = reg->guest_phys_addr + reg->memory_size - gpa;
1058             return 0;
1059         }
1060     }
1061 
1062     return -EFAULT;
1063 }
1064 
1065 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1066 {
1067     IOMMUTLBEntry iotlb;
1068     uint64_t uaddr, len;
1069     int ret = -EFAULT;
1070 
1071     rcu_read_lock();
1072 
1073     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1074                                           iova, write);
1075     if (iotlb.target_as != NULL) {
1076         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1077                                          &uaddr, &len);
1078         if (ret) {
1079             error_report("Fail to lookup the translated address "
1080                          "%"PRIx64, iotlb.translated_addr);
1081             goto out;
1082         }
1083 
1084         len = MIN(iotlb.addr_mask + 1, len);
1085         iova = iova & ~iotlb.addr_mask;
1086 
1087         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1088                                                 len, iotlb.perm);
1089         if (ret) {
1090             error_report("Fail to update device iotlb");
1091             goto out;
1092         }
1093     }
1094 out:
1095     rcu_read_unlock();
1096 
1097     return ret;
1098 }
1099 
1100 static int vhost_virtqueue_start(struct vhost_dev *dev,
1101                                 struct VirtIODevice *vdev,
1102                                 struct vhost_virtqueue *vq,
1103                                 unsigned idx)
1104 {
1105     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1106     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1107     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1108     hwaddr s, l, a;
1109     int r;
1110     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1111     struct vhost_vring_file file = {
1112         .index = vhost_vq_index
1113     };
1114     struct vhost_vring_state state = {
1115         .index = vhost_vq_index
1116     };
1117     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1118 
1119 
1120     vq->num = state.num = virtio_queue_get_num(vdev, idx);
1121     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1122     if (r) {
1123         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1124         return -errno;
1125     }
1126 
1127     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1128     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1129     if (r) {
1130         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1131         return -errno;
1132     }
1133 
1134     if (vhost_needs_vring_endian(vdev)) {
1135         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1136                                                     virtio_is_big_endian(vdev),
1137                                                     vhost_vq_index);
1138         if (r) {
1139             return -errno;
1140         }
1141     }
1142 
1143     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1144     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
1145     vq->desc = vhost_memory_map(dev, a, &l, 0);
1146     if (!vq->desc || l != s) {
1147         r = -ENOMEM;
1148         goto fail_alloc_desc;
1149     }
1150     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1151     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1152     vq->avail = vhost_memory_map(dev, a, &l, 0);
1153     if (!vq->avail || l != s) {
1154         r = -ENOMEM;
1155         goto fail_alloc_avail;
1156     }
1157     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1158     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1159     vq->used = vhost_memory_map(dev, a, &l, 1);
1160     if (!vq->used || l != s) {
1161         r = -ENOMEM;
1162         goto fail_alloc_used;
1163     }
1164 
1165     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1166     if (r < 0) {
1167         r = -errno;
1168         goto fail_alloc;
1169     }
1170 
1171     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1172     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1173     if (r) {
1174         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1175         r = -errno;
1176         goto fail_kick;
1177     }
1178 
1179     /* Clear and discard previous events if any. */
1180     event_notifier_test_and_clear(&vq->masked_notifier);
1181 
1182     /* Init vring in unmasked state, unless guest_notifier_mask
1183      * will do it later.
1184      */
1185     if (!vdev->use_guest_notifier_mask) {
1186         /* TODO: check and handle errors. */
1187         vhost_virtqueue_mask(dev, vdev, idx, false);
1188     }
1189 
1190     if (k->query_guest_notifiers &&
1191         k->query_guest_notifiers(qbus->parent) &&
1192         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1193         file.fd = -1;
1194         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1195         if (r) {
1196             goto fail_vector;
1197         }
1198     }
1199 
1200     return 0;
1201 
1202 fail_vector:
1203 fail_kick:
1204 fail_alloc:
1205     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1206                        0, 0);
1207 fail_alloc_used:
1208     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1209                        0, 0);
1210 fail_alloc_avail:
1211     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1212                        0, 0);
1213 fail_alloc_desc:
1214     return r;
1215 }
1216 
1217 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1218                                     struct VirtIODevice *vdev,
1219                                     struct vhost_virtqueue *vq,
1220                                     unsigned idx)
1221 {
1222     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1223     struct vhost_vring_state state = {
1224         .index = vhost_vq_index,
1225     };
1226     int r;
1227 
1228     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1229     if (r < 0) {
1230         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1231         /* Connection to the backend is broken, so let's sync internal
1232          * last avail idx to the device used idx.
1233          */
1234         virtio_queue_restore_last_avail_idx(vdev, idx);
1235     } else {
1236         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1237     }
1238     virtio_queue_invalidate_signalled_used(vdev, idx);
1239     virtio_queue_update_used_idx(vdev, idx);
1240 
1241     /* In the cross-endian case, we need to reset the vring endianness to
1242      * native as legacy devices expect so by default.
1243      */
1244     if (vhost_needs_vring_endian(vdev)) {
1245         vhost_virtqueue_set_vring_endian_legacy(dev,
1246                                                 !virtio_is_big_endian(vdev),
1247                                                 vhost_vq_index);
1248     }
1249 
1250     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1251                        1, virtio_queue_get_used_size(vdev, idx));
1252     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1253                        0, virtio_queue_get_avail_size(vdev, idx));
1254     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1255                        0, virtio_queue_get_desc_size(vdev, idx));
1256 }
1257 
1258 static void vhost_eventfd_add(MemoryListener *listener,
1259                               MemoryRegionSection *section,
1260                               bool match_data, uint64_t data, EventNotifier *e)
1261 {
1262 }
1263 
1264 static void vhost_eventfd_del(MemoryListener *listener,
1265                               MemoryRegionSection *section,
1266                               bool match_data, uint64_t data, EventNotifier *e)
1267 {
1268 }
1269 
1270 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1271                                                 int n, uint32_t timeout)
1272 {
1273     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1274     struct vhost_vring_state state = {
1275         .index = vhost_vq_index,
1276         .num = timeout,
1277     };
1278     int r;
1279 
1280     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1281         return -EINVAL;
1282     }
1283 
1284     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1285     if (r) {
1286         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1287         return r;
1288     }
1289 
1290     return 0;
1291 }
1292 
1293 static int vhost_virtqueue_init(struct vhost_dev *dev,
1294                                 struct vhost_virtqueue *vq, int n)
1295 {
1296     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1297     struct vhost_vring_file file = {
1298         .index = vhost_vq_index,
1299     };
1300     int r = event_notifier_init(&vq->masked_notifier, 0);
1301     if (r < 0) {
1302         return r;
1303     }
1304 
1305     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1306     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1307     if (r) {
1308         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1309         r = -errno;
1310         goto fail_call;
1311     }
1312 
1313     vq->dev = dev;
1314 
1315     return 0;
1316 fail_call:
1317     event_notifier_cleanup(&vq->masked_notifier);
1318     return r;
1319 }
1320 
1321 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1322 {
1323     event_notifier_cleanup(&vq->masked_notifier);
1324 }
1325 
1326 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1327                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1328 {
1329     uint64_t features;
1330     int i, r, n_initialized_vqs = 0;
1331     Error *local_err = NULL;
1332 
1333     hdev->vdev = NULL;
1334     hdev->migration_blocker = NULL;
1335 
1336     r = vhost_set_backend_type(hdev, backend_type);
1337     assert(r >= 0);
1338 
1339     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1340     if (r < 0) {
1341         goto fail;
1342     }
1343 
1344     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1345         error_report("vhost backend memory slots limit is less"
1346                 " than current number of present memory slots");
1347         r = -1;
1348         goto fail;
1349     }
1350 
1351     r = hdev->vhost_ops->vhost_set_owner(hdev);
1352     if (r < 0) {
1353         VHOST_OPS_DEBUG("vhost_set_owner failed");
1354         goto fail;
1355     }
1356 
1357     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1358     if (r < 0) {
1359         VHOST_OPS_DEBUG("vhost_get_features failed");
1360         goto fail;
1361     }
1362 
1363     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1364         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1365         if (r < 0) {
1366             goto fail;
1367         }
1368     }
1369 
1370     if (busyloop_timeout) {
1371         for (i = 0; i < hdev->nvqs; ++i) {
1372             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1373                                                      busyloop_timeout);
1374             if (r < 0) {
1375                 goto fail_busyloop;
1376             }
1377         }
1378     }
1379 
1380     hdev->features = features;
1381 
1382     hdev->memory_listener = (MemoryListener) {
1383         .begin = vhost_begin,
1384         .commit = vhost_commit,
1385         .region_add = vhost_region_add,
1386         .region_del = vhost_region_del,
1387         .region_nop = vhost_region_nop,
1388         .log_start = vhost_log_start,
1389         .log_stop = vhost_log_stop,
1390         .log_sync = vhost_log_sync,
1391         .log_global_start = vhost_log_global_start,
1392         .log_global_stop = vhost_log_global_stop,
1393         .eventfd_add = vhost_eventfd_add,
1394         .eventfd_del = vhost_eventfd_del,
1395         .priority = 10
1396     };
1397 
1398     hdev->iommu_listener = (MemoryListener) {
1399         .region_add = vhost_iommu_region_add,
1400         .region_del = vhost_iommu_region_del,
1401     };
1402 
1403     if (hdev->migration_blocker == NULL) {
1404         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1405             error_setg(&hdev->migration_blocker,
1406                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1407         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1408             error_setg(&hdev->migration_blocker,
1409                        "Migration disabled: failed to allocate shared memory");
1410         }
1411     }
1412 
1413     if (hdev->migration_blocker != NULL) {
1414         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1415         if (local_err) {
1416             error_report_err(local_err);
1417             error_free(hdev->migration_blocker);
1418             goto fail_busyloop;
1419         }
1420     }
1421 
1422     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1423     hdev->n_mem_sections = 0;
1424     hdev->mem_sections = NULL;
1425     hdev->log = NULL;
1426     hdev->log_size = 0;
1427     hdev->log_enabled = false;
1428     hdev->started = false;
1429     hdev->memory_changed = false;
1430     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1431     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1432     return 0;
1433 
1434 fail_busyloop:
1435     while (--i >= 0) {
1436         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1437     }
1438 fail:
1439     hdev->nvqs = n_initialized_vqs;
1440     vhost_dev_cleanup(hdev);
1441     return r;
1442 }
1443 
1444 void vhost_dev_cleanup(struct vhost_dev *hdev)
1445 {
1446     int i;
1447 
1448     for (i = 0; i < hdev->nvqs; ++i) {
1449         vhost_virtqueue_cleanup(hdev->vqs + i);
1450     }
1451     if (hdev->mem) {
1452         /* those are only safe after successful init */
1453         memory_listener_unregister(&hdev->memory_listener);
1454         QLIST_REMOVE(hdev, entry);
1455     }
1456     if (hdev->migration_blocker) {
1457         migrate_del_blocker(hdev->migration_blocker);
1458         error_free(hdev->migration_blocker);
1459     }
1460     g_free(hdev->mem);
1461     g_free(hdev->mem_sections);
1462     if (hdev->vhost_ops) {
1463         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1464     }
1465     assert(!hdev->log);
1466 
1467     memset(hdev, 0, sizeof(struct vhost_dev));
1468 }
1469 
1470 /* Stop processing guest IO notifications in qemu.
1471  * Start processing them in vhost in kernel.
1472  */
1473 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1474 {
1475     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1476     int i, r, e;
1477 
1478     /* We will pass the notifiers to the kernel, make sure that QEMU
1479      * doesn't interfere.
1480      */
1481     r = virtio_device_grab_ioeventfd(vdev);
1482     if (r < 0) {
1483         error_report("binding does not support host notifiers");
1484         goto fail;
1485     }
1486 
1487     for (i = 0; i < hdev->nvqs; ++i) {
1488         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1489                                          true);
1490         if (r < 0) {
1491             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1492             goto fail_vq;
1493         }
1494     }
1495 
1496     return 0;
1497 fail_vq:
1498     while (--i >= 0) {
1499         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1500                                          false);
1501         if (e < 0) {
1502             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1503         }
1504         assert (e >= 0);
1505         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1506     }
1507     virtio_device_release_ioeventfd(vdev);
1508 fail:
1509     return r;
1510 }
1511 
1512 /* Stop processing guest IO notifications in vhost.
1513  * Start processing them in qemu.
1514  * This might actually run the qemu handlers right away,
1515  * so virtio in qemu must be completely setup when this is called.
1516  */
1517 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1518 {
1519     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1520     int i, r;
1521 
1522     for (i = 0; i < hdev->nvqs; ++i) {
1523         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1524                                          false);
1525         if (r < 0) {
1526             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1527         }
1528         assert (r >= 0);
1529         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1530     }
1531     virtio_device_release_ioeventfd(vdev);
1532 }
1533 
1534 /* Test and clear event pending status.
1535  * Should be called after unmask to avoid losing events.
1536  */
1537 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1538 {
1539     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1540     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1541     return event_notifier_test_and_clear(&vq->masked_notifier);
1542 }
1543 
1544 /* Mask/unmask events from this vq. */
1545 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1546                          bool mask)
1547 {
1548     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1549     int r, index = n - hdev->vq_index;
1550     struct vhost_vring_file file;
1551 
1552     /* should only be called after backend is connected */
1553     assert(hdev->vhost_ops);
1554 
1555     if (mask) {
1556         assert(vdev->use_guest_notifier_mask);
1557         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1558     } else {
1559         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1560     }
1561 
1562     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1563     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1564     if (r < 0) {
1565         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1566     }
1567 }
1568 
1569 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1570                             uint64_t features)
1571 {
1572     const int *bit = feature_bits;
1573     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1574         uint64_t bit_mask = (1ULL << *bit);
1575         if (!(hdev->features & bit_mask)) {
1576             features &= ~bit_mask;
1577         }
1578         bit++;
1579     }
1580     return features;
1581 }
1582 
1583 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1584                         uint64_t features)
1585 {
1586     const int *bit = feature_bits;
1587     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1588         uint64_t bit_mask = (1ULL << *bit);
1589         if (features & bit_mask) {
1590             hdev->acked_features |= bit_mask;
1591         }
1592         bit++;
1593     }
1594 }
1595 
1596 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1597                          uint32_t config_len)
1598 {
1599     assert(hdev->vhost_ops);
1600 
1601     if (hdev->vhost_ops->vhost_get_config) {
1602         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1603     }
1604 
1605     return -1;
1606 }
1607 
1608 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1609                          uint32_t offset, uint32_t size, uint32_t flags)
1610 {
1611     assert(hdev->vhost_ops);
1612 
1613     if (hdev->vhost_ops->vhost_set_config) {
1614         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1615                                                  size, flags);
1616     }
1617 
1618     return -1;
1619 }
1620 
1621 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1622                                    const VhostDevConfigOps *ops)
1623 {
1624     assert(hdev->vhost_ops);
1625     hdev->config_ops = ops;
1626 }
1627 
1628 /* Host notifiers must be enabled at this point. */
1629 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1630 {
1631     int i, r;
1632 
1633     /* should only be called after backend is connected */
1634     assert(hdev->vhost_ops);
1635 
1636     hdev->started = true;
1637     hdev->vdev = vdev;
1638 
1639     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1640     if (r < 0) {
1641         goto fail_features;
1642     }
1643 
1644     if (vhost_dev_has_iommu(hdev)) {
1645         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1646     }
1647 
1648     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1649     if (r < 0) {
1650         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1651         r = -errno;
1652         goto fail_mem;
1653     }
1654     for (i = 0; i < hdev->nvqs; ++i) {
1655         r = vhost_virtqueue_start(hdev,
1656                                   vdev,
1657                                   hdev->vqs + i,
1658                                   hdev->vq_index + i);
1659         if (r < 0) {
1660             goto fail_vq;
1661         }
1662     }
1663 
1664     if (hdev->log_enabled) {
1665         uint64_t log_base;
1666 
1667         hdev->log_size = vhost_get_log_size(hdev);
1668         hdev->log = vhost_log_get(hdev->log_size,
1669                                   vhost_dev_log_is_shared(hdev));
1670         log_base = (uintptr_t)hdev->log->log;
1671         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1672                                                 hdev->log_size ? log_base : 0,
1673                                                 hdev->log);
1674         if (r < 0) {
1675             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1676             r = -errno;
1677             goto fail_log;
1678         }
1679     }
1680 
1681     if (vhost_dev_has_iommu(hdev)) {
1682         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1683 
1684         /* Update used ring information for IOTLB to work correctly,
1685          * vhost-kernel code requires for this.*/
1686         for (i = 0; i < hdev->nvqs; ++i) {
1687             struct vhost_virtqueue *vq = hdev->vqs + i;
1688             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1689         }
1690     }
1691     return 0;
1692 fail_log:
1693     vhost_log_put(hdev, false);
1694 fail_vq:
1695     while (--i >= 0) {
1696         vhost_virtqueue_stop(hdev,
1697                              vdev,
1698                              hdev->vqs + i,
1699                              hdev->vq_index + i);
1700     }
1701     i = hdev->nvqs;
1702 
1703 fail_mem:
1704 fail_features:
1705 
1706     hdev->started = false;
1707     return r;
1708 }
1709 
1710 /* Host notifiers must be enabled at this point. */
1711 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1712 {
1713     int i;
1714 
1715     /* should only be called after backend is connected */
1716     assert(hdev->vhost_ops);
1717 
1718     for (i = 0; i < hdev->nvqs; ++i) {
1719         vhost_virtqueue_stop(hdev,
1720                              vdev,
1721                              hdev->vqs + i,
1722                              hdev->vq_index + i);
1723     }
1724 
1725     if (vhost_dev_has_iommu(hdev)) {
1726         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1727         memory_listener_unregister(&hdev->iommu_listener);
1728     }
1729     vhost_log_put(hdev, true);
1730     hdev->started = false;
1731     hdev->vdev = NULL;
1732 }
1733 
1734 int vhost_net_set_backend(struct vhost_dev *hdev,
1735                           struct vhost_vring_file *file)
1736 {
1737     if (hdev->vhost_ops->vhost_net_set_backend) {
1738         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1739     }
1740 
1741     return -1;
1742 }
1743